target: Only reset specific dynamic entries during lun_group creation
This patch changes core_tpg_add_node_to_devs() to avoid unnecessarly resetting every se_dev_entry in se_node_acl->tpg_lun_hlist when the operation is driven by an explicit configfs se_lun->lun_group creation via core_dev_add_lun() to only update a single se_lun. Otherwise for the second core_tpg_check_initiator_node_acl() case, go ahead and continue to scan the full set of currently active se_lun in se_portal_group->tpg_lun_hlist. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
This commit is contained in:
parent
84786546b6
commit
df9766ca9d
3 changed files with 8 additions and 4 deletions
|
@ -1197,7 +1197,7 @@ int core_dev_add_lun(
|
||||||
if (acl->dynamic_node_acl &&
|
if (acl->dynamic_node_acl &&
|
||||||
(!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
|
(!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
|
||||||
!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
|
!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
|
||||||
core_tpg_add_node_to_devs(acl, tpg);
|
core_tpg_add_node_to_devs(acl, tpg, lun);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mutex_unlock(&tpg->acl_node_mutex);
|
mutex_unlock(&tpg->acl_node_mutex);
|
||||||
|
|
|
@ -64,7 +64,8 @@ extern struct se_device *g_lun0_dev;
|
||||||
|
|
||||||
struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
|
struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
|
||||||
const char *);
|
const char *);
|
||||||
void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *);
|
void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *,
|
||||||
|
struct se_lun *);
|
||||||
void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
|
void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
|
||||||
struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u32);
|
struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u32);
|
||||||
int core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
|
int core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
|
||||||
|
|
|
@ -89,7 +89,8 @@ EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
|
||||||
*/
|
*/
|
||||||
void core_tpg_add_node_to_devs(
|
void core_tpg_add_node_to_devs(
|
||||||
struct se_node_acl *acl,
|
struct se_node_acl *acl,
|
||||||
struct se_portal_group *tpg)
|
struct se_portal_group *tpg,
|
||||||
|
struct se_lun *lun_orig)
|
||||||
{
|
{
|
||||||
u32 lun_access = 0;
|
u32 lun_access = 0;
|
||||||
struct se_lun *lun;
|
struct se_lun *lun;
|
||||||
|
@ -99,6 +100,8 @@ void core_tpg_add_node_to_devs(
|
||||||
hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
|
hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
|
||||||
if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
|
if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
|
||||||
continue;
|
continue;
|
||||||
|
if (lun_orig && lun != lun_orig)
|
||||||
|
continue;
|
||||||
|
|
||||||
dev = lun->lun_se_dev;
|
dev = lun->lun_se_dev;
|
||||||
/*
|
/*
|
||||||
|
@ -238,7 +241,7 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
|
||||||
*/
|
*/
|
||||||
if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
|
if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
|
||||||
(tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
|
(tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
|
||||||
core_tpg_add_node_to_devs(acl, tpg);
|
core_tpg_add_node_to_devs(acl, tpg, NULL);
|
||||||
|
|
||||||
target_add_node_acl(acl);
|
target_add_node_acl(acl);
|
||||||
return acl;
|
return acl;
|
||||||
|
|
Loading…
Reference in a new issue