blk-mq: avoid excessive boot delays with large lun counts

Hi,

Zhangqing Luo reported long boot times on a system with thousands of
LUNs when scsi-mq was enabled.  He narrowed the problem down to
blk_mq_add_queue_tag_set, where every queue is frozen in order to set
the BLK_MQ_F_TAG_SHARED flag.  Each added device will freeze all queues
added before it in sequence, which involves waiting for an RCU grace
period for each one.  We don't need to do this.  After the second queue
is added, only new queues need to be initialized with the shared tag.
We can do that by percolating the flag up to the blk_mq_tag_set, and
updating the newly added queue's hctxs if the flag is set.

This problem was introduced by commit 0d2602ca30 (blk-mq: improve
support for shared tags maps).

Reported-and-tested-by: Jason Luo <zhangqing.luo@oracle.com>
Reviewed-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Jeff Moyer 2015-11-03 10:40:06 -05:00 committed by Jens Axboe
parent cdea01b2bf
commit 2404e607a9

View file

@ -1695,7 +1695,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
INIT_LIST_HEAD(&hctx->dispatch); INIT_LIST_HEAD(&hctx->dispatch);
hctx->queue = q; hctx->queue = q;
hctx->queue_num = hctx_idx; hctx->queue_num = hctx_idx;
hctx->flags = set->flags; hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
blk_mq_init_cpu_notifier(&hctx->cpu_notifier, blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
blk_mq_hctx_notify, hctx); blk_mq_hctx_notify, hctx);
@ -1882,27 +1882,26 @@ static void blk_mq_map_swqueue(struct request_queue *q,
} }
} }
static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set) static void queue_set_hctx_shared(struct request_queue *q, bool shared)
{ {
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
struct request_queue *q;
bool shared;
int i; int i;
if (set->tag_list.next == set->tag_list.prev) queue_for_each_hw_ctx(q, hctx, i) {
shared = false; if (shared)
else hctx->flags |= BLK_MQ_F_TAG_SHARED;
shared = true; else
hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
}
}
static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
{
struct request_queue *q;
list_for_each_entry(q, &set->tag_list, tag_set_list) { list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_freeze_queue(q); blk_mq_freeze_queue(q);
queue_set_hctx_shared(q, shared);
queue_for_each_hw_ctx(q, hctx, i) {
if (shared)
hctx->flags |= BLK_MQ_F_TAG_SHARED;
else
hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
}
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q);
} }
} }
@ -1913,7 +1912,12 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
mutex_lock(&set->tag_list_lock); mutex_lock(&set->tag_list_lock);
list_del_init(&q->tag_set_list); list_del_init(&q->tag_set_list);
blk_mq_update_tag_set_depth(set); if (list_is_singular(&set->tag_list)) {
/* just transitioned to unshared */
set->flags &= ~BLK_MQ_F_TAG_SHARED;
/* update existing queue */
blk_mq_update_tag_set_depth(set, false);
}
mutex_unlock(&set->tag_list_lock); mutex_unlock(&set->tag_list_lock);
} }
@ -1923,8 +1927,17 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
q->tag_set = set; q->tag_set = set;
mutex_lock(&set->tag_list_lock); mutex_lock(&set->tag_list_lock);
/* Check to see if we're transitioning to shared (from 1 to 2 queues). */
if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
set->flags |= BLK_MQ_F_TAG_SHARED;
/* update existing queue */
blk_mq_update_tag_set_depth(set, true);
}
if (set->flags & BLK_MQ_F_TAG_SHARED)
queue_set_hctx_shared(q, true);
list_add_tail(&q->tag_set_list, &set->tag_list); list_add_tail(&q->tag_set_list, &set->tag_list);
blk_mq_update_tag_set_depth(set);
mutex_unlock(&set->tag_list_lock); mutex_unlock(&set->tag_list_lock);
} }