Merge branch 'nvme-4.16' of git://git.infradead.org/nvme into for-4.16/block
Pull NVMe fixes from Christoph: "Below are the pending nvme updates for Linux 4.16. Just fixes and cleanups from various contributors this time around."
This commit is contained in:
commit
550203e64c
13 changed files with 307 additions and 117 deletions
|
@ -232,6 +232,15 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
|
|||
|
||||
old_state = ctrl->state;
|
||||
switch (new_state) {
|
||||
case NVME_CTRL_ADMIN_ONLY:
|
||||
switch (old_state) {
|
||||
case NVME_CTRL_RESETTING:
|
||||
changed = true;
|
||||
/* FALLTHRU */
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case NVME_CTRL_LIVE:
|
||||
switch (old_state) {
|
||||
case NVME_CTRL_NEW:
|
||||
|
@ -247,6 +256,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
|
|||
switch (old_state) {
|
||||
case NVME_CTRL_NEW:
|
||||
case NVME_CTRL_LIVE:
|
||||
case NVME_CTRL_ADMIN_ONLY:
|
||||
changed = true;
|
||||
/* FALLTHRU */
|
||||
default:
|
||||
|
@ -266,6 +276,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
|
|||
case NVME_CTRL_DELETING:
|
||||
switch (old_state) {
|
||||
case NVME_CTRL_LIVE:
|
||||
case NVME_CTRL_ADMIN_ONLY:
|
||||
case NVME_CTRL_RESETTING:
|
||||
case NVME_CTRL_RECONNECTING:
|
||||
changed = true;
|
||||
|
@ -1217,16 +1228,27 @@ static int nvme_open(struct block_device *bdev, fmode_t mode)
|
|||
#ifdef CONFIG_NVME_MULTIPATH
|
||||
/* should never be called due to GENHD_FL_HIDDEN */
|
||||
if (WARN_ON_ONCE(ns->head->disk))
|
||||
return -ENXIO;
|
||||
goto fail;
|
||||
#endif
|
||||
if (!kref_get_unless_zero(&ns->kref))
|
||||
return -ENXIO;
|
||||
goto fail;
|
||||
if (!try_module_get(ns->ctrl->ops->module))
|
||||
goto fail_put_ns;
|
||||
|
||||
return 0;
|
||||
|
||||
fail_put_ns:
|
||||
nvme_put_ns(ns);
|
||||
fail:
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static void nvme_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
nvme_put_ns(disk->private_data);
|
||||
struct nvme_ns *ns = disk->private_data;
|
||||
|
||||
module_put(ns->ctrl->ops->module);
|
||||
nvme_put_ns(ns);
|
||||
}
|
||||
|
||||
static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
||||
|
@ -2047,6 +2069,22 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static int nvme_active_ctrls(struct nvme_subsystem *subsys)
|
||||
{
|
||||
int count = 0;
|
||||
struct nvme_ctrl *ctrl;
|
||||
|
||||
mutex_lock(&subsys->lock);
|
||||
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
|
||||
if (ctrl->state != NVME_CTRL_DELETING &&
|
||||
ctrl->state != NVME_CTRL_DEAD)
|
||||
count++;
|
||||
}
|
||||
mutex_unlock(&subsys->lock);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
|
||||
{
|
||||
struct nvme_subsystem *subsys, *found;
|
||||
|
@ -2085,7 +2123,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
|
|||
* Verify that the subsystem actually supports multiple
|
||||
* controllers, else bail out.
|
||||
*/
|
||||
if (!(id->cmic & (1 << 1))) {
|
||||
if (nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) {
|
||||
dev_err(ctrl->device,
|
||||
"ignoring ctrl due to duplicate subnqn (%s).\n",
|
||||
found->subnqn);
|
||||
|
@ -2252,7 +2290,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
|
|||
shutdown_timeout, 60);
|
||||
|
||||
if (ctrl->shutdown_timeout != shutdown_timeout)
|
||||
dev_warn(ctrl->device,
|
||||
dev_info(ctrl->device,
|
||||
"Shutdown timeout set to %u seconds\n",
|
||||
ctrl->shutdown_timeout);
|
||||
} else
|
||||
|
@ -2336,8 +2374,14 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
|
|||
struct nvme_ctrl *ctrl =
|
||||
container_of(inode->i_cdev, struct nvme_ctrl, cdev);
|
||||
|
||||
if (ctrl->state != NVME_CTRL_LIVE)
|
||||
switch (ctrl->state) {
|
||||
case NVME_CTRL_LIVE:
|
||||
case NVME_CTRL_ADMIN_ONLY:
|
||||
break;
|
||||
default:
|
||||
return -EWOULDBLOCK;
|
||||
}
|
||||
|
||||
file->private_data = ctrl;
|
||||
return 0;
|
||||
}
|
||||
|
@ -2601,6 +2645,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
|
|||
static const char *const state_name[] = {
|
||||
[NVME_CTRL_NEW] = "new",
|
||||
[NVME_CTRL_LIVE] = "live",
|
||||
[NVME_CTRL_ADMIN_ONLY] = "only-admin",
|
||||
[NVME_CTRL_RESETTING] = "resetting",
|
||||
[NVME_CTRL_RECONNECTING]= "reconnecting",
|
||||
[NVME_CTRL_DELETING] = "deleting",
|
||||
|
@ -3073,6 +3118,8 @@ static void nvme_scan_work(struct work_struct *work)
|
|||
if (ctrl->state != NVME_CTRL_LIVE)
|
||||
return;
|
||||
|
||||
WARN_ON_ONCE(!ctrl->tagset);
|
||||
|
||||
if (nvme_identify_ctrl(ctrl, &id))
|
||||
return;
|
||||
|
||||
|
@ -3093,8 +3140,7 @@ static void nvme_scan_work(struct work_struct *work)
|
|||
void nvme_queue_scan(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
/*
|
||||
* Do not queue new scan work when a controller is reset during
|
||||
* removal.
|
||||
* Only new queue scan work when admin and IO queues are both alive
|
||||
*/
|
||||
if (ctrl->state == NVME_CTRL_LIVE)
|
||||
queue_work(nvme_wq, &ctrl->scan_work);
|
||||
|
|
|
@ -492,7 +492,7 @@ EXPORT_SYMBOL_GPL(nvmf_should_reconnect);
|
|||
*/
|
||||
int nvmf_register_transport(struct nvmf_transport_ops *ops)
|
||||
{
|
||||
if (!ops->create_ctrl)
|
||||
if (!ops->create_ctrl || !ops->module)
|
||||
return -EINVAL;
|
||||
|
||||
down_write(&nvmf_transports_rwsem);
|
||||
|
@ -868,32 +868,41 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (!try_module_get(ops->module)) {
|
||||
ret = -EBUSY;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = nvmf_check_required_opts(opts, ops->required_opts);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
goto out_module_put;
|
||||
ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS |
|
||||
ops->allowed_opts | ops->required_opts);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
goto out_module_put;
|
||||
|
||||
ctrl = ops->create_ctrl(dev, opts);
|
||||
if (IS_ERR(ctrl)) {
|
||||
ret = PTR_ERR(ctrl);
|
||||
goto out_unlock;
|
||||
goto out_module_put;
|
||||
}
|
||||
|
||||
if (strcmp(ctrl->subsys->subnqn, opts->subsysnqn)) {
|
||||
dev_warn(ctrl->device,
|
||||
"controller returned incorrect NQN: \"%s\".\n",
|
||||
ctrl->subsys->subnqn);
|
||||
module_put(ops->module);
|
||||
up_read(&nvmf_transports_rwsem);
|
||||
nvme_delete_ctrl_sync(ctrl);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
module_put(ops->module);
|
||||
up_read(&nvmf_transports_rwsem);
|
||||
return ctrl;
|
||||
|
||||
out_module_put:
|
||||
module_put(ops->module);
|
||||
out_unlock:
|
||||
up_read(&nvmf_transports_rwsem);
|
||||
out_free_opts:
|
||||
|
|
|
@ -108,6 +108,7 @@ struct nvmf_ctrl_options {
|
|||
* fabric implementation of NVMe fabrics.
|
||||
* @entry: Used by the fabrics library to add the new
|
||||
* registration entry to its linked-list internal tree.
|
||||
* @module: Transport module reference
|
||||
* @name: Name of the NVMe fabric driver implementation.
|
||||
* @required_opts: sysfs command-line options that must be specified
|
||||
* when adding a new NVMe controller.
|
||||
|
@ -126,6 +127,7 @@ struct nvmf_ctrl_options {
|
|||
*/
|
||||
struct nvmf_transport_ops {
|
||||
struct list_head entry;
|
||||
struct module *module;
|
||||
const char *name;
|
||||
int required_opts;
|
||||
int allowed_opts;
|
||||
|
|
|
@ -3381,6 +3381,7 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
|
|||
|
||||
static struct nvmf_transport_ops nvme_fc_transport = {
|
||||
.name = "fc",
|
||||
.module = THIS_MODULE,
|
||||
.required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
|
||||
.allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
|
||||
.create_ctrl = nvme_fc_create_ctrl,
|
||||
|
|
|
@ -119,6 +119,7 @@ static inline struct nvme_request *nvme_req(struct request *req)
|
|||
enum nvme_ctrl_state {
|
||||
NVME_CTRL_NEW,
|
||||
NVME_CTRL_LIVE,
|
||||
NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */
|
||||
NVME_CTRL_RESETTING,
|
||||
NVME_CTRL_RECONNECTING,
|
||||
NVME_CTRL_DELETING,
|
||||
|
|
|
@ -1770,7 +1770,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
|
|||
dma_addr_t descs_dma;
|
||||
int i = 0;
|
||||
void **bufs;
|
||||
u64 size = 0, tmp;
|
||||
u64 size, tmp;
|
||||
|
||||
tmp = (preferred + chunk_size - 1);
|
||||
do_div(tmp, chunk_size);
|
||||
|
@ -1853,7 +1853,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
|
|||
u64 preferred = (u64)dev->ctrl.hmpre * 4096;
|
||||
u64 min = (u64)dev->ctrl.hmmin * 4096;
|
||||
u32 enable_bits = NVME_HOST_MEM_ENABLE;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
preferred = min(preferred, max);
|
||||
if (min > max) {
|
||||
|
@ -2035,13 +2035,12 @@ static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
|
|||
}
|
||||
|
||||
/*
|
||||
* Return: error value if an error occurred setting up the queues or calling
|
||||
* Identify Device. 0 if these succeeded, even if adding some of the
|
||||
* namespaces failed. At the moment, these failures are silent. TBD which
|
||||
* failures should be reported.
|
||||
* return error value only when tagset allocation failed
|
||||
*/
|
||||
static int nvme_dev_add(struct nvme_dev *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!dev->ctrl.tagset) {
|
||||
dev->tagset.ops = &nvme_mq_ops;
|
||||
dev->tagset.nr_hw_queues = dev->online_queues - 1;
|
||||
|
@ -2057,8 +2056,12 @@ static int nvme_dev_add(struct nvme_dev *dev)
|
|||
dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
dev->tagset.driver_data = dev;
|
||||
|
||||
if (blk_mq_alloc_tag_set(&dev->tagset))
|
||||
return 0;
|
||||
ret = blk_mq_alloc_tag_set(&dev->tagset);
|
||||
if (ret) {
|
||||
dev_warn(dev->ctrl.device,
|
||||
"IO queues tagset allocation failed %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
dev->ctrl.tagset = &dev->tagset;
|
||||
|
||||
nvme_dbbuf_set(dev);
|
||||
|
@ -2291,6 +2294,7 @@ static void nvme_reset_work(struct work_struct *work)
|
|||
container_of(work, struct nvme_dev, ctrl.reset_work);
|
||||
bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
|
||||
int result = -ENODEV;
|
||||
enum nvme_ctrl_state new_state = NVME_CTRL_LIVE;
|
||||
|
||||
if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
|
||||
goto out;
|
||||
|
@ -2354,15 +2358,23 @@ static void nvme_reset_work(struct work_struct *work)
|
|||
dev_warn(dev->ctrl.device, "IO queues not created\n");
|
||||
nvme_kill_queues(&dev->ctrl);
|
||||
nvme_remove_namespaces(&dev->ctrl);
|
||||
new_state = NVME_CTRL_ADMIN_ONLY;
|
||||
} else {
|
||||
nvme_start_queues(&dev->ctrl);
|
||||
nvme_wait_freeze(&dev->ctrl);
|
||||
nvme_dev_add(dev);
|
||||
/* hit this only when allocate tagset fails */
|
||||
if (nvme_dev_add(dev))
|
||||
new_state = NVME_CTRL_ADMIN_ONLY;
|
||||
nvme_unfreeze(&dev->ctrl);
|
||||
}
|
||||
|
||||
if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
|
||||
dev_warn(dev->ctrl.device, "failed to mark controller live\n");
|
||||
/*
|
||||
* If only admin queue live, keep it to do further investigation or
|
||||
* recovery.
|
||||
*/
|
||||
if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) {
|
||||
dev_warn(dev->ctrl.device,
|
||||
"failed to mark controller state %d\n", new_state);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -2498,10 +2510,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
if (result)
|
||||
goto release_pools;
|
||||
|
||||
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING);
|
||||
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
|
||||
|
||||
queue_work(nvme_wq, &dev->ctrl.reset_work);
|
||||
nvme_reset_ctrl(&dev->ctrl);
|
||||
|
||||
return 0;
|
||||
|
||||
release_pools:
|
||||
|
|
|
@ -2006,6 +2006,7 @@ out_free_ctrl:
|
|||
|
||||
static struct nvmf_transport_ops nvme_rdma_transport = {
|
||||
.name = "rdma",
|
||||
.module = THIS_MODULE,
|
||||
.required_opts = NVMF_OPT_TRADDR,
|
||||
.allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
|
||||
NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO,
|
||||
|
|
|
@ -830,7 +830,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
|
|||
/* Don't accept keep-alive timeout for discovery controllers */
|
||||
if (kato) {
|
||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||
goto out_free_sqs;
|
||||
goto out_remove_ida;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -860,6 +860,8 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
|
|||
*ctrlp = ctrl;
|
||||
return 0;
|
||||
|
||||
out_remove_ida:
|
||||
ida_simple_remove(&cntlid_ida, ctrl->cntlid);
|
||||
out_free_sqs:
|
||||
kfree(ctrl->sqs);
|
||||
out_free_cqs:
|
||||
|
@ -877,21 +879,22 @@ static void nvmet_ctrl_free(struct kref *ref)
|
|||
struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
|
||||
struct nvmet_subsys *subsys = ctrl->subsys;
|
||||
|
||||
nvmet_stop_keep_alive_timer(ctrl);
|
||||
|
||||
mutex_lock(&subsys->lock);
|
||||
list_del(&ctrl->subsys_entry);
|
||||
mutex_unlock(&subsys->lock);
|
||||
|
||||
nvmet_stop_keep_alive_timer(ctrl);
|
||||
|
||||
flush_work(&ctrl->async_event_work);
|
||||
cancel_work_sync(&ctrl->fatal_err_work);
|
||||
|
||||
ida_simple_remove(&cntlid_ida, ctrl->cntlid);
|
||||
nvmet_subsys_put(subsys);
|
||||
|
||||
kfree(ctrl->sqs);
|
||||
kfree(ctrl->cqs);
|
||||
kfree(ctrl);
|
||||
|
||||
nvmet_subsys_put(subsys);
|
||||
}
|
||||
|
||||
void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
|
||||
|
|
|
@ -225,7 +225,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
|
|||
goto out_ctrl_put;
|
||||
}
|
||||
|
||||
pr_info("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
|
||||
pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
|
||||
|
||||
out:
|
||||
kfree(d);
|
||||
|
|
|
@ -2490,14 +2490,8 @@ nvmet_fc_add_port(struct nvmet_port *port)
|
|||
list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
|
||||
if ((tgtport->fc_target_port.node_name == traddr.nn) &&
|
||||
(tgtport->fc_target_port.port_name == traddr.pn)) {
|
||||
/* a FC port can only be 1 nvmet port id */
|
||||
if (!tgtport->port) {
|
||||
tgtport->port = port;
|
||||
port->priv = tgtport;
|
||||
nvmet_fc_tgtport_get(tgtport);
|
||||
ret = 0;
|
||||
} else
|
||||
ret = -EALREADY;
|
||||
tgtport->port = port;
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -2508,19 +2502,7 @@ nvmet_fc_add_port(struct nvmet_port *port)
|
|||
static void
|
||||
nvmet_fc_remove_port(struct nvmet_port *port)
|
||||
{
|
||||
struct nvmet_fc_tgtport *tgtport = port->priv;
|
||||
unsigned long flags;
|
||||
bool matched = false;
|
||||
|
||||
spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
|
||||
if (tgtport->port == port) {
|
||||
matched = true;
|
||||
tgtport->port = NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
|
||||
|
||||
if (matched)
|
||||
nvmet_fc_tgtport_put(tgtport);
|
||||
/* nothing to do */
|
||||
}
|
||||
|
||||
static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
|
||||
|
|
|
@ -204,6 +204,10 @@ struct fcloop_lport {
|
|||
struct completion unreg_done;
|
||||
};
|
||||
|
||||
struct fcloop_lport_priv {
|
||||
struct fcloop_lport *lport;
|
||||
};
|
||||
|
||||
struct fcloop_rport {
|
||||
struct nvme_fc_remote_port *remoteport;
|
||||
struct nvmet_fc_target_port *targetport;
|
||||
|
@ -238,21 +242,32 @@ struct fcloop_lsreq {
|
|||
int status;
|
||||
};
|
||||
|
||||
enum {
|
||||
INI_IO_START = 0,
|
||||
INI_IO_ACTIVE = 1,
|
||||
INI_IO_ABORTED = 2,
|
||||
INI_IO_COMPLETED = 3,
|
||||
};
|
||||
|
||||
struct fcloop_fcpreq {
|
||||
struct fcloop_tport *tport;
|
||||
struct nvmefc_fcp_req *fcpreq;
|
||||
spinlock_t reqlock;
|
||||
u16 status;
|
||||
u32 inistate;
|
||||
bool active;
|
||||
bool aborted;
|
||||
struct work_struct work;
|
||||
struct kref ref;
|
||||
struct work_struct fcp_rcv_work;
|
||||
struct work_struct abort_rcv_work;
|
||||
struct work_struct tio_done_work;
|
||||
struct nvmefc_tgt_fcp_req tgt_fcp_req;
|
||||
};
|
||||
|
||||
struct fcloop_ini_fcpreq {
|
||||
struct nvmefc_fcp_req *fcpreq;
|
||||
struct fcloop_fcpreq *tfcp_req;
|
||||
struct work_struct iniwork;
|
||||
spinlock_t inilock;
|
||||
};
|
||||
|
||||
static inline struct fcloop_lsreq *
|
||||
|
@ -343,17 +358,122 @@ fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* FCP IO operation done by initiator abort.
|
||||
* call back up initiator "done" flows.
|
||||
*/
|
||||
static void
|
||||
fcloop_tgt_fcprqst_ini_done_work(struct work_struct *work)
|
||||
fcloop_tfcp_req_free(struct kref *ref)
|
||||
{
|
||||
struct fcloop_ini_fcpreq *inireq =
|
||||
container_of(work, struct fcloop_ini_fcpreq, iniwork);
|
||||
struct fcloop_fcpreq *tfcp_req =
|
||||
container_of(ref, struct fcloop_fcpreq, ref);
|
||||
|
||||
inireq->fcpreq->done(inireq->fcpreq);
|
||||
kfree(tfcp_req);
|
||||
}
|
||||
|
||||
static void
|
||||
fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
|
||||
{
|
||||
kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
|
||||
}
|
||||
|
||||
static int
|
||||
fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
|
||||
{
|
||||
return kref_get_unless_zero(&tfcp_req->ref);
|
||||
}
|
||||
|
||||
static void
|
||||
fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
|
||||
struct fcloop_fcpreq *tfcp_req, int status)
|
||||
{
|
||||
struct fcloop_ini_fcpreq *inireq = NULL;
|
||||
|
||||
if (fcpreq) {
|
||||
inireq = fcpreq->private;
|
||||
spin_lock(&inireq->inilock);
|
||||
inireq->tfcp_req = NULL;
|
||||
spin_unlock(&inireq->inilock);
|
||||
|
||||
fcpreq->status = status;
|
||||
fcpreq->done(fcpreq);
|
||||
}
|
||||
|
||||
/* release original io reference on tgt struct */
|
||||
fcloop_tfcp_req_put(tfcp_req);
|
||||
}
|
||||
|
||||
static void
|
||||
fcloop_fcp_recv_work(struct work_struct *work)
|
||||
{
|
||||
struct fcloop_fcpreq *tfcp_req =
|
||||
container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
|
||||
struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
|
||||
int ret = 0;
|
||||
bool aborted = false;
|
||||
|
||||
spin_lock(&tfcp_req->reqlock);
|
||||
switch (tfcp_req->inistate) {
|
||||
case INI_IO_START:
|
||||
tfcp_req->inistate = INI_IO_ACTIVE;
|
||||
break;
|
||||
case INI_IO_ABORTED:
|
||||
aborted = true;
|
||||
break;
|
||||
default:
|
||||
spin_unlock(&tfcp_req->reqlock);
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
spin_unlock(&tfcp_req->reqlock);
|
||||
|
||||
if (unlikely(aborted))
|
||||
ret = -ECANCELED;
|
||||
else
|
||||
ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
|
||||
&tfcp_req->tgt_fcp_req,
|
||||
fcpreq->cmdaddr, fcpreq->cmdlen);
|
||||
if (ret)
|
||||
fcloop_call_host_done(fcpreq, tfcp_req, ret);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void
|
||||
fcloop_fcp_abort_recv_work(struct work_struct *work)
|
||||
{
|
||||
struct fcloop_fcpreq *tfcp_req =
|
||||
container_of(work, struct fcloop_fcpreq, abort_rcv_work);
|
||||
struct nvmefc_fcp_req *fcpreq;
|
||||
bool completed = false;
|
||||
|
||||
spin_lock(&tfcp_req->reqlock);
|
||||
fcpreq = tfcp_req->fcpreq;
|
||||
switch (tfcp_req->inistate) {
|
||||
case INI_IO_ABORTED:
|
||||
break;
|
||||
case INI_IO_COMPLETED:
|
||||
completed = true;
|
||||
break;
|
||||
default:
|
||||
spin_unlock(&tfcp_req->reqlock);
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
spin_unlock(&tfcp_req->reqlock);
|
||||
|
||||
if (unlikely(completed)) {
|
||||
/* remove reference taken in original abort downcall */
|
||||
fcloop_tfcp_req_put(tfcp_req);
|
||||
return;
|
||||
}
|
||||
|
||||
if (tfcp_req->tport->targetport)
|
||||
nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
|
||||
&tfcp_req->tgt_fcp_req);
|
||||
|
||||
spin_lock(&tfcp_req->reqlock);
|
||||
tfcp_req->fcpreq = NULL;
|
||||
spin_unlock(&tfcp_req->reqlock);
|
||||
|
||||
fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
|
||||
/* call_host_done releases reference for abort downcall */
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -364,20 +484,15 @@ static void
|
|||
fcloop_tgt_fcprqst_done_work(struct work_struct *work)
|
||||
{
|
||||
struct fcloop_fcpreq *tfcp_req =
|
||||
container_of(work, struct fcloop_fcpreq, work);
|
||||
struct fcloop_tport *tport = tfcp_req->tport;
|
||||
container_of(work, struct fcloop_fcpreq, tio_done_work);
|
||||
struct nvmefc_fcp_req *fcpreq;
|
||||
|
||||
spin_lock(&tfcp_req->reqlock);
|
||||
fcpreq = tfcp_req->fcpreq;
|
||||
tfcp_req->inistate = INI_IO_COMPLETED;
|
||||
spin_unlock(&tfcp_req->reqlock);
|
||||
|
||||
if (tport->remoteport && fcpreq) {
|
||||
fcpreq->status = tfcp_req->status;
|
||||
fcpreq->done(fcpreq);
|
||||
}
|
||||
|
||||
kfree(tfcp_req);
|
||||
fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
|
||||
}
|
||||
|
||||
|
||||
|
@ -390,7 +505,6 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
|
|||
struct fcloop_rport *rport = remoteport->private;
|
||||
struct fcloop_ini_fcpreq *inireq = fcpreq->private;
|
||||
struct fcloop_fcpreq *tfcp_req;
|
||||
int ret = 0;
|
||||
|
||||
if (!rport->targetport)
|
||||
return -ECONNREFUSED;
|
||||
|
@ -401,16 +515,20 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
|
|||
|
||||
inireq->fcpreq = fcpreq;
|
||||
inireq->tfcp_req = tfcp_req;
|
||||
INIT_WORK(&inireq->iniwork, fcloop_tgt_fcprqst_ini_done_work);
|
||||
spin_lock_init(&inireq->inilock);
|
||||
|
||||
tfcp_req->fcpreq = fcpreq;
|
||||
tfcp_req->tport = rport->targetport->private;
|
||||
tfcp_req->inistate = INI_IO_START;
|
||||
spin_lock_init(&tfcp_req->reqlock);
|
||||
INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work);
|
||||
INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
|
||||
INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
|
||||
INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
|
||||
kref_init(&tfcp_req->ref);
|
||||
|
||||
ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req,
|
||||
fcpreq->cmdaddr, fcpreq->cmdlen);
|
||||
schedule_work(&tfcp_req->fcp_rcv_work);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -589,7 +707,7 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
|
|||
{
|
||||
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
|
||||
|
||||
schedule_work(&tfcp_req->work);
|
||||
schedule_work(&tfcp_req->tio_done_work);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -605,27 +723,47 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
|
|||
void *hw_queue_handle,
|
||||
struct nvmefc_fcp_req *fcpreq)
|
||||
{
|
||||
struct fcloop_rport *rport = remoteport->private;
|
||||
struct fcloop_ini_fcpreq *inireq = fcpreq->private;
|
||||
struct fcloop_fcpreq *tfcp_req = inireq->tfcp_req;
|
||||
struct fcloop_fcpreq *tfcp_req;
|
||||
bool abortio = true;
|
||||
|
||||
spin_lock(&inireq->inilock);
|
||||
tfcp_req = inireq->tfcp_req;
|
||||
if (tfcp_req)
|
||||
fcloop_tfcp_req_get(tfcp_req);
|
||||
spin_unlock(&inireq->inilock);
|
||||
|
||||
if (!tfcp_req)
|
||||
/* abort has already been called */
|
||||
return;
|
||||
|
||||
if (rport->targetport)
|
||||
nvmet_fc_rcv_fcp_abort(rport->targetport,
|
||||
&tfcp_req->tgt_fcp_req);
|
||||
|
||||
/* break initiator/target relationship for io */
|
||||
spin_lock(&tfcp_req->reqlock);
|
||||
inireq->tfcp_req = NULL;
|
||||
tfcp_req->fcpreq = NULL;
|
||||
switch (tfcp_req->inistate) {
|
||||
case INI_IO_START:
|
||||
case INI_IO_ACTIVE:
|
||||
tfcp_req->inistate = INI_IO_ABORTED;
|
||||
break;
|
||||
case INI_IO_COMPLETED:
|
||||
abortio = false;
|
||||
break;
|
||||
default:
|
||||
spin_unlock(&tfcp_req->reqlock);
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
spin_unlock(&tfcp_req->reqlock);
|
||||
|
||||
/* post the aborted io completion */
|
||||
fcpreq->status = -ECANCELED;
|
||||
schedule_work(&inireq->iniwork);
|
||||
if (abortio)
|
||||
/* leave the reference while the work item is scheduled */
|
||||
WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
|
||||
else {
|
||||
/*
|
||||
* as the io has already had the done callback made,
|
||||
* nothing more to do. So release the reference taken above
|
||||
*/
|
||||
fcloop_tfcp_req_put(tfcp_req);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -657,7 +795,8 @@ fcloop_nport_get(struct fcloop_nport *nport)
|
|||
static void
|
||||
fcloop_localport_delete(struct nvme_fc_local_port *localport)
|
||||
{
|
||||
struct fcloop_lport *lport = localport->private;
|
||||
struct fcloop_lport_priv *lport_priv = localport->private;
|
||||
struct fcloop_lport *lport = lport_priv->lport;
|
||||
|
||||
/* release any threads waiting for the unreg to complete */
|
||||
complete(&lport->unreg_done);
|
||||
|
@ -697,7 +836,7 @@ static struct nvme_fc_port_template fctemplate = {
|
|||
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
|
||||
.dma_boundary = FCLOOP_DMABOUND_4G,
|
||||
/* sizes of additional private data for data structures */
|
||||
.local_priv_sz = sizeof(struct fcloop_lport),
|
||||
.local_priv_sz = sizeof(struct fcloop_lport_priv),
|
||||
.remote_priv_sz = sizeof(struct fcloop_rport),
|
||||
.lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
|
||||
.fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
|
||||
|
@ -714,8 +853,7 @@ static struct nvmet_fc_target_template tgttemplate = {
|
|||
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
|
||||
.dma_boundary = FCLOOP_DMABOUND_4G,
|
||||
/* optional features */
|
||||
.target_features = NVMET_FCTGTFEAT_CMD_IN_ISR |
|
||||
NVMET_FCTGTFEAT_OPDONE_IN_ISR,
|
||||
.target_features = 0,
|
||||
/* sizes of additional private data for data structures */
|
||||
.target_priv_sz = sizeof(struct fcloop_tport),
|
||||
};
|
||||
|
@ -728,11 +866,17 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
|
|||
struct fcloop_ctrl_options *opts;
|
||||
struct nvme_fc_local_port *localport;
|
||||
struct fcloop_lport *lport;
|
||||
int ret;
|
||||
struct fcloop_lport_priv *lport_priv;
|
||||
unsigned long flags;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
lport = kzalloc(sizeof(*lport), GFP_KERNEL);
|
||||
if (!lport)
|
||||
return -ENOMEM;
|
||||
|
||||
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
|
||||
if (!opts)
|
||||
return -ENOMEM;
|
||||
goto out_free_lport;
|
||||
|
||||
ret = fcloop_parse_options(opts, buf);
|
||||
if (ret)
|
||||
|
@ -752,23 +896,25 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
|
|||
|
||||
ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
|
||||
if (!ret) {
|
||||
unsigned long flags;
|
||||
|
||||
/* success */
|
||||
lport = localport->private;
|
||||
lport_priv = localport->private;
|
||||
lport_priv->lport = lport;
|
||||
|
||||
lport->localport = localport;
|
||||
INIT_LIST_HEAD(&lport->lport_list);
|
||||
|
||||
spin_lock_irqsave(&fcloop_lock, flags);
|
||||
list_add_tail(&lport->lport_list, &fcloop_lports);
|
||||
spin_unlock_irqrestore(&fcloop_lock, flags);
|
||||
|
||||
/* mark all of the input buffer consumed */
|
||||
ret = count;
|
||||
}
|
||||
|
||||
out_free_opts:
|
||||
kfree(opts);
|
||||
out_free_lport:
|
||||
/* free only if we're going to fail */
|
||||
if (ret)
|
||||
kfree(lport);
|
||||
|
||||
return ret ? ret : count;
|
||||
}
|
||||
|
||||
|
@ -790,6 +936,8 @@ __wait_localport_unreg(struct fcloop_lport *lport)
|
|||
|
||||
wait_for_completion(&lport->unreg_done);
|
||||
|
||||
kfree(lport);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -686,6 +686,7 @@ static struct nvmet_fabrics_ops nvme_loop_ops = {
|
|||
|
||||
static struct nvmf_transport_ops nvme_loop_transport = {
|
||||
.name = "loop",
|
||||
.module = THIS_MODULE,
|
||||
.create_ctrl = nvme_loop_create_ctrl,
|
||||
};
|
||||
|
||||
|
|
|
@ -921,7 +921,7 @@ static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
|
|||
|
||||
static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
|
||||
{
|
||||
pr_info("freeing queue %d\n", queue->idx);
|
||||
pr_debug("freeing queue %d\n", queue->idx);
|
||||
|
||||
nvmet_sq_destroy(&queue->nvme_sq);
|
||||
|
||||
|
@ -1503,25 +1503,9 @@ err_ib_client:
|
|||
|
||||
static void __exit nvmet_rdma_exit(void)
|
||||
{
|
||||
struct nvmet_rdma_queue *queue;
|
||||
|
||||
nvmet_unregister_transport(&nvmet_rdma_ops);
|
||||
|
||||
flush_scheduled_work();
|
||||
|
||||
mutex_lock(&nvmet_rdma_queue_mutex);
|
||||
while ((queue = list_first_entry_or_null(&nvmet_rdma_queue_list,
|
||||
struct nvmet_rdma_queue, queue_list))) {
|
||||
list_del_init(&queue->queue_list);
|
||||
|
||||
mutex_unlock(&nvmet_rdma_queue_mutex);
|
||||
__nvmet_rdma_queue_disconnect(queue);
|
||||
mutex_lock(&nvmet_rdma_queue_mutex);
|
||||
}
|
||||
mutex_unlock(&nvmet_rdma_queue_mutex);
|
||||
|
||||
flush_scheduled_work();
|
||||
ib_unregister_client(&nvmet_rdma_ib_client);
|
||||
WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
|
||||
ida_destroy(&nvmet_rdma_queue_ida);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue