blk-mq-sched: add new parameter nr_requests in blk_mq_alloc_sched_tags()

This helper only support to allocate the default number of requests,
add a new parameter to support specific number of requests.

Prepare to fix potential deadlock in the case nr_requests grow.

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
Reviewed-by: Nilay Shroff <nilay@linux.ibm.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Yu Kuai
2025-09-10 16:04:42 +08:00
committed by Jens Axboe
parent e632004044
commit 6293e336f6
4 changed files with 19 additions and 11 deletions

View File

@@ -454,7 +454,7 @@ void blk_mq_free_sched_tags_batch(struct xarray *et_table,
}
struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
unsigned int nr_hw_queues)
unsigned int nr_hw_queues, unsigned int nr_requests)
{
unsigned int nr_tags;
int i;
@@ -470,13 +470,8 @@ struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
nr_tags * sizeof(struct blk_mq_tags *), gfp);
if (!et)
return NULL;
/*
* Default to double of smaller one between hw queue_depth and
* 128, since we don't split into sync/async like the old code
* did. Additionally, this is a per-hw queue depth.
*/
et->nr_requests = 2 * min_t(unsigned int, set->queue_depth,
BLKDEV_DEFAULT_RQ);
et->nr_requests = nr_requests;
et->nr_hw_queues = nr_hw_queues;
if (blk_mq_is_shared_tags(set->flags)) {
@@ -521,7 +516,8 @@ int blk_mq_alloc_sched_tags_batch(struct xarray *et_table,
* concurrently.
*/
if (q->elevator) {
et = blk_mq_alloc_sched_tags(set, nr_hw_queues);
et = blk_mq_alloc_sched_tags(set, nr_hw_queues,
blk_mq_default_nr_requests(set));
if (!et)
goto out_unwind;
if (xa_insert(et_table, q->id, et, gfp))

View File

@@ -24,7 +24,7 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
void blk_mq_sched_free_rqs(struct request_queue *q);
struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
unsigned int nr_hw_queues);
unsigned int nr_hw_queues, unsigned int nr_requests);
int blk_mq_alloc_sched_tags_batch(struct xarray *et_table,
struct blk_mq_tag_set *set, unsigned int nr_hw_queues);
void blk_mq_free_sched_tags(struct elevator_tags *et,

View File

@@ -109,6 +109,17 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(blk_opf_t opf,
return ctx->hctxs[blk_mq_get_hctx_type(opf)];
}
/*
* Default to double of smaller one between hw queue_depth and
* 128, since we don't split into sync/async like the old code
* did. Additionally, this is a per-hw queue depth.
*/
static inline unsigned int blk_mq_default_nr_requests(
struct blk_mq_tag_set *set)
{
return 2 * min_t(unsigned int, set->queue_depth, BLKDEV_DEFAULT_RQ);
}
/*
* sysfs helpers
*/

View File

@@ -669,7 +669,8 @@ static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx)
lockdep_assert_held(&set->update_nr_hwq_lock);
if (strncmp(ctx->name, "none", 4)) {
ctx->et = blk_mq_alloc_sched_tags(set, set->nr_hw_queues);
ctx->et = blk_mq_alloc_sched_tags(set, set->nr_hw_queues,
blk_mq_default_nr_requests(set));
if (!ctx->et)
return -ENOMEM;
}