nvme: update node paths after adding new path
The nvme namespace paths were being updated only when the current path was not set or nonoptimized. If a new path comes online that is a better path for its NUMA node, the multipath selector may continue using the previously set path on a potentially further node. This patch re-runs the path assignment after successfully adding a new optimized path. Signed-off-by: Keith Busch <keith.busch@intel.com> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
0585b75437
commit
886fabf693
1 changed files with 9 additions and 0 deletions
|
@ -321,6 +321,15 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
|
||||||
device_add_disk(&head->subsys->dev, head->disk,
|
device_add_disk(&head->subsys->dev, head->disk,
|
||||||
nvme_ns_id_attr_groups);
|
nvme_ns_id_attr_groups);
|
||||||
|
|
||||||
|
if (nvme_path_is_optimized(ns)) {
|
||||||
|
int node, srcu_idx;
|
||||||
|
|
||||||
|
srcu_idx = srcu_read_lock(&head->srcu);
|
||||||
|
for_each_node(node)
|
||||||
|
__nvme_find_path(head, node);
|
||||||
|
srcu_read_unlock(&head->srcu, srcu_idx);
|
||||||
|
}
|
||||||
|
|
||||||
kblockd_schedule_work(&ns->head->requeue_work);
|
kblockd_schedule_work(&ns->head->requeue_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue