summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorPaolo Valente <paolo.valente@linaro.org>2021-11-25 19:15:10 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-04-15 14:18:23 +0200
commit931aff627469a75c77b9fd3823146d0575afffd6 (patch)
treeb50e6dfc327292afba6e31486bdcdf073a8dff98 /block
parent7a7b11d694eda5059bca2017e0afe7df0a13f4db (diff)
Revert "Revert "block, bfq: honor already-setup queue merges""
[ Upstream commit 15729ff8143f8135b03988a100a19e66d7cb7ecd ] A crash [1] happened to be triggered in conjunction with commit 2d52c58b9c9b ("block, bfq: honor already-setup queue merges"). The latter was then reverted by commit ebc69e897e17 ("Revert "block, bfq: honor already-setup queue merges""). Yet, the reverted commit was not the one introducing the bug. In fact, it actually triggered a UAF introduced by a different commit, and now fixed by commit d29bd41428cf ("block, bfq: reset last_bfqq_created on group change"). So, there is no point in keeping commit 2d52c58b9c9b ("block, bfq: honor already-setup queue merges") out. This commit restores it. [1] https://bugzilla.kernel.org/show_bug.cgi?id=214503 Reported-by: Holger Hoffstätte <holger@applied-asynchrony.com> Signed-off-by: Paolo Valente <paolo.valente@linaro.org> Link: https://lore.kernel.org/r/20211125181510.15004-1-paolo.valente@linaro.org Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'block')
-rw-r--r--block/bfq-iosched.c16
1 files changed, 13 insertions, 3 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 38c39d50ae04..1d443d17cf7c 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -2523,6 +2523,15 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
* are likely to increase the throughput.
*/
bfqq->new_bfqq = new_bfqq;
+ /*
+ * The above assignment schedules the following redirections:
+ * each time some I/O for bfqq arrives, the process that
+ * generated that I/O is disassociated from bfqq and
+ * associated with new_bfqq. Here we increases new_bfqq->ref
+ * in advance, adding the number of processes that are
+ * expected to be associated with new_bfqq as they happen to
+ * issue I/O.
+ */
new_bfqq->ref += process_refs;
return new_bfqq;
}
@@ -2582,6 +2591,10 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
{
struct bfq_queue *in_service_bfqq, *new_bfqq;
+ /* if a merge has already been setup, then proceed with that first */
+ if (bfqq->new_bfqq)
+ return bfqq->new_bfqq;
+
/*
* Do not perform queue merging if the device is non
* rotational and performs internal queueing. In fact, such a
@@ -2636,9 +2649,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
if (bfq_too_late_for_merging(bfqq))
return NULL;
- if (bfqq->new_bfqq)
- return bfqq->new_bfqq;
-
if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
return NULL;