Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6: (32 commits)
  [SCSI] aacraid: prevent panic on adapter resource failure
  [SCSI] aha152x: use data accessors and !use_sg cleanup
  [SCSI] aha152x: Fix check_condition code-path
  [SCSI] aha152x: Clean Reset path
  [SCSI] aha152x: preliminary fixes and some comments
  [SCSI] aha152x: use bounce buffer
  [SCSI] aha152x: fix debug mode symbol conflict
  [SCSI] sd: disentangle barriers in SCSI
  [SCSI] lpfc : scsi command accessor fix for 8.2.2
  [SCSI] qlogicpti: Some cosmetic changes
  [SCSI] lpfc 8.2.2 : Change version number to 8.2.2
  [SCSI] lpfc 8.2.2 : Style cleanups
  [SCSI] lpfc 8.2.2 : Miscellaneous Bug Fixes
  [SCSI] lpfc 8.2.2 : Miscellaneous management and logging mods
  [SCSI] lpfc 8.2.2 : Rework the lpfc_printf_log() macro
  [SCSI] lpfc 8.2.2 : Attribute and Parameter splits for vport and physical port
  [SCSI] lpfc 8.2.2 : Fix locking around HBA's port_list
  [SCSI] lpfc 8.2.2 : Error messages and debugfs updates
  [SCSI] initialize shost_data to zero
  [SCSI] mptsas: add SMP passthrough support via bsg
  ...
This commit is contained in:
Linus Torvalds 2007-08-06 17:48:34 -07:00
commit bced137384
46 changed files with 2847 additions and 2060 deletions

View file

@ -1,5 +1,5 @@
/*
* bsg.c - block layer implementation of the sg v3 interface
* bsg.c - block layer implementation of the sg v4 interface
*
* Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
* Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
@ -421,7 +421,6 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
hdr->info = 0;
if (hdr->device_status || hdr->transport_status || hdr->driver_status)
hdr->info |= SG_INFO_CHECK;
hdr->din_resid = rq->data_len;
hdr->response_len = 0;
if (rq->sense_len && hdr->response) {
@ -437,9 +436,14 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
}
if (rq->next_rq) {
hdr->dout_resid = rq->data_len;
hdr->din_resid = rq->next_rq->data_len;
blk_rq_unmap_user(bidi_bio);
blk_put_request(rq->next_rq);
}
} else if (rq_data_dir(rq) == READ)
hdr->din_resid = rq->data_len;
else
hdr->dout_resid = rq->data_len;
blk_rq_unmap_user(bio);
blk_put_request(rq);

View file

@ -88,7 +88,9 @@ module_param(mpt_channel_mapping, int, 0);
MODULE_PARM_DESC(mpt_channel_mapping, " Mapping id's to channels (default=0)");
static int mpt_debug_level;
module_param(mpt_debug_level, int, 0);
static int mpt_set_debug_level(const char *val, struct kernel_param *kp);
module_param_call(mpt_debug_level, mpt_set_debug_level, param_get_int,
&mpt_debug_level, 0600);
MODULE_PARM_DESC(mpt_debug_level, " debug level - refer to mptdebug.h - (default=0)");
#ifdef MFCNT
@ -220,6 +222,19 @@ pci_enable_io_access(struct pci_dev *pdev)
pci_write_config_word(pdev, PCI_COMMAND, command_reg);
}
static int mpt_set_debug_level(const char *val, struct kernel_param *kp)
{
int ret = param_set_int(val, kp);
MPT_ADAPTER *ioc;
if (ret)
return ret;
list_for_each_entry(ioc, &ioc_list, list)
ioc->debug_level = mpt_debug_level;
return 0;
}
/*
* Process turbo (context) reply...
*/

View file

@ -1312,11 +1312,137 @@ mptsas_get_bay_identifier(struct sas_rphy *rphy)
return rc;
}
static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
struct request *req)
{
MPT_ADAPTER *ioc = ((MPT_SCSI_HOST *) shost->hostdata)->ioc;
MPT_FRAME_HDR *mf;
SmpPassthroughRequest_t *smpreq;
struct request *rsp = req->next_rq;
int ret;
int flagsLength;
unsigned long timeleft;
char *psge;
dma_addr_t dma_addr_in = 0;
dma_addr_t dma_addr_out = 0;
u64 sas_address = 0;
if (!rsp) {
printk(KERN_ERR "%s: the smp response space is missing\n",
__FUNCTION__);
return -EINVAL;
}
/* do we need to support multiple segments? */
if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
printk(KERN_ERR "%s: multiple segments req %u %u, rsp %u %u\n",
__FUNCTION__, req->bio->bi_vcnt, req->data_len,
rsp->bio->bi_vcnt, rsp->data_len);
return -EINVAL;
}
ret = mutex_lock_interruptible(&ioc->sas_mgmt.mutex);
if (ret)
goto out;
mf = mpt_get_msg_frame(mptsasMgmtCtx, ioc);
if (!mf) {
ret = -ENOMEM;
goto out_unlock;
}
smpreq = (SmpPassthroughRequest_t *)mf;
memset(smpreq, 0, sizeof(*smpreq));
smpreq->RequestDataLength = cpu_to_le16(req->data_len - 4);
smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
if (rphy)
sas_address = rphy->identify.sas_address;
else {
struct mptsas_portinfo *port_info;
mutex_lock(&ioc->sas_topology_mutex);
port_info = mptsas_find_portinfo_by_handle(ioc, ioc->handle);
if (port_info && port_info->phy_info)
sas_address =
port_info->phy_info[0].phy->identify.sas_address;
mutex_unlock(&ioc->sas_topology_mutex);
}
*((u64 *)&smpreq->SASAddress) = cpu_to_le64(sas_address);
psge = (char *)
(((int *) mf) + (offsetof(SmpPassthroughRequest_t, SGL) / 4));
/* request */
flagsLength = (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
MPI_SGE_FLAGS_END_OF_BUFFER |
MPI_SGE_FLAGS_DIRECTION |
mpt_addr_size()) << MPI_SGE_FLAGS_SHIFT;
flagsLength |= (req->data_len - 4);
dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio),
req->data_len, PCI_DMA_BIDIRECTIONAL);
if (!dma_addr_out)
goto put_mf;
mpt_add_sge(psge, flagsLength, dma_addr_out);
psge += (sizeof(u32) + sizeof(dma_addr_t));
/* response */
flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
flagsLength |= rsp->data_len + 4;
dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio),
rsp->data_len, PCI_DMA_BIDIRECTIONAL);
if (!dma_addr_in)
goto unmap;
mpt_add_sge(psge, flagsLength, dma_addr_in);
mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
if (!timeleft) {
printk(KERN_ERR "%s: smp timeout!\n", __FUNCTION__);
/* On timeout reset the board */
mpt_HardResetHandler(ioc, CAN_SLEEP);
ret = -ETIMEDOUT;
goto unmap;
}
mf = NULL;
if (ioc->sas_mgmt.status & MPT_IOCTL_STATUS_RF_VALID) {
SmpPassthroughReply_t *smprep;
smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
memcpy(req->sense, smprep, sizeof(*smprep));
req->sense_len = sizeof(*smprep);
} else {
printk(KERN_ERR "%s: smp passthru reply failed to be returned\n",
__FUNCTION__);
ret = -ENXIO;
}
unmap:
if (dma_addr_out)
pci_unmap_single(ioc->pcidev, dma_addr_out, req->data_len,
PCI_DMA_BIDIRECTIONAL);
if (dma_addr_in)
pci_unmap_single(ioc->pcidev, dma_addr_in, rsp->data_len,
PCI_DMA_BIDIRECTIONAL);
put_mf:
if (mf)
mpt_free_msg_frame(ioc, mf);
out_unlock:
mutex_unlock(&ioc->sas_mgmt.mutex);
out:
return ret;
}
static struct sas_function_template mptsas_transport_functions = {
.get_linkerrors = mptsas_get_linkerrors,
.get_enclosure_identifier = mptsas_get_enclosure_identifier,
.get_bay_identifier = mptsas_get_bay_identifier,
.phy_reset = mptsas_phy_reset,
.smp_handler = mptsas_smp_handler,
};
static struct scsi_transport_template *mptsas_transport_template;

View file

@ -4154,8 +4154,9 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
fcp_rsp_iu->fcp_resid,
(int) zfcp_get_fcp_dl(fcp_cmnd_iu));
scpnt->resid = fcp_rsp_iu->fcp_resid;
if (scpnt->request_bufflen - scpnt->resid < scpnt->underflow)
scsi_set_resid(scpnt, fcp_rsp_iu->fcp_resid);
if (scsi_bufflen(scpnt) - scsi_get_resid(scpnt) <
scpnt->underflow)
set_host_byte(&scpnt->result, DID_ERROR);
}

View file

@ -36,8 +36,6 @@ static void zfcp_qdio_sbale_fill
(struct zfcp_fsf_req *, unsigned long, void *, int);
static int zfcp_qdio_sbals_from_segment
(struct zfcp_fsf_req *, unsigned long, void *, unsigned long);
static int zfcp_qdio_sbals_from_buffer
(struct zfcp_fsf_req *, unsigned long, void *, unsigned long, int);
static qdio_handler_t zfcp_qdio_request_handler;
static qdio_handler_t zfcp_qdio_response_handler;
@ -631,28 +629,6 @@ out:
}
/**
* zfcp_qdio_sbals_from_buffer - fill SBALs from buffer
* @fsf_req: request to be processed
* @sbtype: SBALE flags
* @buffer: data buffer
* @length: length of buffer
* @max_sbals: upper bound for number of SBALs to be used
*/
static int
zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
void *buffer, unsigned long length, int max_sbals)
{
struct scatterlist sg_segment;
zfcp_address_to_sg(buffer, &sg_segment);
sg_segment.length = length;
return zfcp_qdio_sbals_from_sg(fsf_req, sbtype, &sg_segment, 1,
max_sbals);
}
/**
* zfcp_qdio_sbals_from_scsicmnd - fill SBALs from scsi command
* @fsf_req: request to be processed
@ -664,18 +640,13 @@ int
zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req,
unsigned long sbtype, struct scsi_cmnd *scsi_cmnd)
{
if (scsi_cmnd->use_sg) {
if (scsi_sg_count(scsi_cmnd))
return zfcp_qdio_sbals_from_sg(fsf_req, sbtype,
(struct scatterlist *)
scsi_cmnd->request_buffer,
scsi_cmnd->use_sg,
ZFCP_MAX_SBALS_PER_REQ);
} else {
return zfcp_qdio_sbals_from_buffer(fsf_req, sbtype,
scsi_cmnd->request_buffer,
scsi_cmnd->request_bufflen,
ZFCP_MAX_SBALS_PER_REQ);
}
scsi_sglist(scsi_cmnd),
scsi_sg_count(scsi_cmnd),
ZFCP_MAX_SBALS_PER_REQ);
else
return 0;
}
/**

View file

@ -1110,7 +1110,9 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
__aac_shutdown(aac);
out_unmap:
aac_fib_map_free(aac);
pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
if (aac->comm_addr)
pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
aac->comm_phys);
kfree(aac->queues);
aac_adapter_ioremap(aac, 0);
kfree(aac->fibs);

View file

@ -289,18 +289,18 @@ static LIST_HEAD(aha152x_host_list);
if(spin_is_locked(&QLOCK)) { \
DPRINTK(debug_intr, DEBUG_LEAD "(%s:%d) already locked at %s:%d\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__, QLOCKER, QLOCKERL); \
} \
DPRINTK(debug_locks, DEBUG_LEAD "(%s:%d) locking\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__); \
DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locking\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__); \
spin_lock_irqsave(&QLOCK,flags); \
DPRINTK(debug_locks, DEBUG_LEAD "(%s:%d) locked\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__); \
DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locked\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__); \
QLOCKER=__FUNCTION__; \
QLOCKERL=__LINE__; \
} while(0)
#define DO_UNLOCK(flags) \
do { \
DPRINTK(debug_locks, DEBUG_LEAD "(%s:%d) unlocking (locked at %s:%d)\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__, QLOCKER, QLOCKERL); \
DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocking (locked at %s:%d)\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__, QLOCKER, QLOCKERL); \
spin_unlock_irqrestore(&QLOCK,flags); \
DPRINTK(debug_locks, DEBUG_LEAD "(%s:%d) unlocked\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__); \
DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocked\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__); \
QLOCKER="(not locked)"; \
QLOCKERL=0; \
} while(0)
@ -322,6 +322,12 @@ static LIST_HEAD(aha152x_host_list);
(cmd) ? ((cmd)->device->id & 0x0f) : -1, \
(cmd) ? ((cmd)->device->lun & 0x07) : -1
static inline void
CMD_INC_RESID(struct scsi_cmnd *cmd, int inc)
{
scsi_set_resid(cmd, scsi_get_resid(cmd) + inc);
}
#define DELAY_DEFAULT 1000
#if defined(PCMCIA)
@ -552,14 +558,11 @@ struct aha152x_hostdata {
struct aha152x_scdata {
Scsi_Cmnd *next; /* next sc in queue */
struct completion *done;/* semaphore to block on */
unsigned char cmd_len;
unsigned char cmnd[MAX_COMMAND_SIZE];
unsigned short use_sg;
unsigned request_bufflen;
void *request_buffer;
unsigned char aha_orig_cmd_len;
unsigned char aha_orig_cmnd[MAX_COMMAND_SIZE];
int aha_orig_resid;
};
/* access macros for hostdata */
#define HOSTDATA(shpnt) ((struct aha152x_hostdata *) &shpnt->hostdata)
@ -978,15 +981,15 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
#if defined(AHA152X_DEBUG)
if (HOSTDATA(shpnt)->debug & debug_queue) {
printk(INFO_LEAD "queue: %p; cmd_len=%d pieces=%d size=%u cmnd=",
CMDINFO(SCpnt), SCpnt, SCpnt->cmd_len, SCpnt->use_sg, SCpnt->request_bufflen);
CMDINFO(SCpnt), SCpnt, SCpnt->cmd_len,
scsi_sg_count(SCpnt), scsi_bufflen(SCpnt));
__scsi_print_command(SCpnt->cmnd);
}
#endif
SCpnt->scsi_done = done;
SCpnt->resid = SCpnt->request_bufflen;
SCpnt->SCp.phase = not_issued | phase;
SCpnt->SCp.Status = CHECK_CONDITION;
SCpnt->SCp.Status = 0x1; /* Ilegal status by SCSI standard */
SCpnt->SCp.Message = 0;
SCpnt->SCp.have_data_in = 0;
SCpnt->SCp.sent_command = 0;
@ -997,20 +1000,11 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
return FAILED;
}
} else {
struct aha152x_scdata *sc;
SCpnt->host_scribble = kmalloc(sizeof(struct aha152x_scdata), GFP_ATOMIC);
if(SCpnt->host_scribble==0) {
printk(ERR_LEAD "allocation failed\n", CMDINFO(SCpnt));
return FAILED;
}
sc = SCDATA(SCpnt);
memcpy(sc->cmnd, SCpnt->cmnd, sizeof(sc->cmnd));
sc->request_buffer = SCpnt->request_buffer;
sc->request_bufflen = SCpnt->request_bufflen;
sc->use_sg = SCpnt->use_sg;
sc->cmd_len = SCpnt->cmd_len;
}
SCNEXT(SCpnt) = NULL;
@ -1022,16 +1016,25 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
SCp.buffer : next buffer
SCp.buffers_residual : left buffers in list
SCp.phase : current state of the command */
if (SCpnt->use_sg) {
SCpnt->SCp.buffer = (struct scatterlist *) SCpnt->request_buffer;
SCpnt->SCp.ptr = SG_ADDRESS(SCpnt->SCp.buffer);
SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
SCpnt->SCp.buffers_residual = SCpnt->use_sg - 1;
} else {
SCpnt->SCp.ptr = (char *) SCpnt->request_buffer;
SCpnt->SCp.this_residual = SCpnt->request_bufflen;
if ((phase & (check_condition|resetting)) || !scsi_sglist(SCpnt)) {
if (phase & check_condition) {
SCpnt->SCp.ptr = SCpnt->sense_buffer;
SCpnt->SCp.this_residual = sizeof(SCpnt->sense_buffer);
scsi_set_resid(SCpnt, sizeof(SCpnt->sense_buffer));
} else {
SCpnt->SCp.ptr = NULL;
SCpnt->SCp.this_residual = 0;
scsi_set_resid(SCpnt, 0);
}
SCpnt->SCp.buffer = NULL;
SCpnt->SCp.buffers_residual = 0;
} else {
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
SCpnt->SCp.buffer = scsi_sglist(SCpnt);
SCpnt->SCp.ptr = SG_ADDRESS(SCpnt->SCp.buffer);
SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1;
}
DO_LOCK(flags);
@ -1150,9 +1153,6 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
DECLARE_COMPLETION(done);
int ret, issued, disconnected;
unsigned char old_cmd_len = SCpnt->cmd_len;
unsigned short old_use_sg = SCpnt->use_sg;
void *old_buffer = SCpnt->request_buffer;
unsigned old_bufflen = SCpnt->request_bufflen;
unsigned long flags;
unsigned long timeleft;
@ -1174,9 +1174,6 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
DO_UNLOCK(flags);
SCpnt->cmd_len = 0;
SCpnt->use_sg = 0;
SCpnt->request_buffer = NULL;
SCpnt->request_bufflen = 0;
aha152x_internal_queue(SCpnt, &done, resetting, reset_done);
@ -1189,9 +1186,6 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
}
SCpnt->cmd_len = old_cmd_len;
SCpnt->use_sg = old_use_sg;
SCpnt->request_buffer = old_buffer;
SCpnt->request_bufflen = old_bufflen;
DO_LOCK(flags);
@ -1531,8 +1525,8 @@ static void busfree_run(struct Scsi_Host *shpnt)
/* target sent DISCONNECT */
DPRINTK(debug_selection, DEBUG_LEAD "target disconnected at %d/%d\n",
CMDINFO(CURRENT_SC),
CURRENT_SC->resid,
CURRENT_SC->request_bufflen);
scsi_get_resid(CURRENT_SC),
scsi_bufflen(CURRENT_SC));
#if defined(AHA152X_STAT)
HOSTDATA(shpnt)->disconnections++;
#endif
@ -1568,18 +1562,16 @@ static void busfree_run(struct Scsi_Host *shpnt)
#endif
/* restore old command */
memcpy(cmd->cmnd, sc->cmnd, sizeof(sc->cmnd));
cmd->request_buffer = sc->request_buffer;
cmd->request_bufflen = sc->request_bufflen;
cmd->use_sg = sc->use_sg;
cmd->cmd_len = sc->cmd_len;
memcpy(cmd->cmnd, sc->aha_orig_cmnd, sizeof(cmd->cmnd));
cmd->cmd_len = sc->aha_orig_cmd_len;
scsi_set_resid(cmd, sc->aha_orig_resid);
cmd->SCp.Status = 0x02;
cmd->SCp.Status = SAM_STAT_CHECK_CONDITION;
HOSTDATA(shpnt)->commands--;
if (!HOSTDATA(shpnt)->commands)
SETPORT(PORTA, 0); /* turn led off */
} else if(DONE_SC->SCp.Status==0x02) {
} else if(DONE_SC->SCp.Status==SAM_STAT_CHECK_CONDITION) {
#if defined(AHA152X_STAT)
HOSTDATA(shpnt)->busfree_with_check_condition++;
#endif
@ -1587,13 +1579,23 @@ static void busfree_run(struct Scsi_Host *shpnt)
DPRINTK(debug_eh, ERR_LEAD "CHECK CONDITION found\n", CMDINFO(DONE_SC));
#endif
if(!(DONE_SC->SCp.Status & not_issued)) {
if(!(DONE_SC->SCp.phase & not_issued)) {
struct aha152x_scdata *sc;
Scsi_Cmnd *ptr = DONE_SC;
DONE_SC=NULL;
#if 0
DPRINTK(debug_eh, ERR_LEAD "requesting sense\n", CMDINFO(ptr));
#endif
/* save old command */
sc = SCDATA(ptr);
/* It was allocated in aha152x_internal_queue? */
BUG_ON(!sc);
memcpy(sc->aha_orig_cmnd, ptr->cmnd,
sizeof(ptr->cmnd));
sc->aha_orig_cmd_len = ptr->cmd_len;
sc->aha_orig_resid = scsi_get_resid(ptr);
ptr->cmnd[0] = REQUEST_SENSE;
ptr->cmnd[1] = 0;
ptr->cmnd[2] = 0;
@ -1601,10 +1603,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
ptr->cmnd[4] = sizeof(ptr->sense_buffer);
ptr->cmnd[5] = 0;
ptr->cmd_len = 6;
ptr->use_sg = 0;
ptr->request_buffer = ptr->sense_buffer;
ptr->request_bufflen = sizeof(ptr->sense_buffer);
DO_UNLOCK(flags);
aha152x_internal_queue(ptr, NULL, check_condition, ptr->scsi_done);
DO_LOCK(flags);
@ -2180,7 +2179,8 @@ static void datai_init(struct Scsi_Host *shpnt)
DATA_LEN=0;
DPRINTK(debug_datai,
DEBUG_LEAD "datai_init: request_bufflen=%d resid=%d\n",
CMDINFO(CURRENT_SC), CURRENT_SC->request_bufflen, CURRENT_SC->resid);
CMDINFO(CURRENT_SC), scsi_bufflen(CURRENT_SC),
scsi_get_resid(CURRENT_SC));
}
static void datai_run(struct Scsi_Host *shpnt)
@ -2293,11 +2293,12 @@ static void datai_run(struct Scsi_Host *shpnt)
static void datai_end(struct Scsi_Host *shpnt)
{
CURRENT_SC->resid -= GETSTCNT();
CMD_INC_RESID(CURRENT_SC, -GETSTCNT());
DPRINTK(debug_datai,
DEBUG_LEAD "datai_end: request_bufflen=%d resid=%d stcnt=%d\n",
CMDINFO(CURRENT_SC), CURRENT_SC->request_bufflen, CURRENT_SC->resid, GETSTCNT());
CMDINFO(CURRENT_SC), scsi_bufflen(CURRENT_SC),
scsi_get_resid(CURRENT_SC), GETSTCNT());
SETPORT(SXFRCTL0, CH1|CLRSTCNT);
SETPORT(DMACNTRL0, 0);
@ -2318,11 +2319,12 @@ static void datao_init(struct Scsi_Host *shpnt)
SETPORT(SIMODE0, 0);
SETPORT(SIMODE1, ENSCSIPERR | ENSCSIRST | ENPHASEMIS | ENBUSFREE );
DATA_LEN = CURRENT_SC->resid;
DATA_LEN = scsi_get_resid(CURRENT_SC);
DPRINTK(debug_datao,
DEBUG_LEAD "datao_init: request_bufflen=%d; resid=%d\n",
CMDINFO(CURRENT_SC), CURRENT_SC->request_bufflen, CURRENT_SC->resid);
CMDINFO(CURRENT_SC), scsi_bufflen(CURRENT_SC),
scsi_get_resid(CURRENT_SC));
}
static void datao_run(struct Scsi_Host *shpnt)
@ -2346,7 +2348,7 @@ static void datao_run(struct Scsi_Host *shpnt)
SETPORT(DMACNTRL0,WRITE_READ|ENDMA|_8BIT);
SETPORT(DATAPORT, *CURRENT_SC->SCp.ptr++);
CURRENT_SC->SCp.this_residual--;
CURRENT_SC->resid--;
CMD_INC_RESID(CURRENT_SC, -1);
SETPORT(DMACNTRL0,WRITE_READ|ENDMA);
}
@ -2355,7 +2357,7 @@ static void datao_run(struct Scsi_Host *shpnt)
outsw(DATAPORT, CURRENT_SC->SCp.ptr, data_count);
CURRENT_SC->SCp.ptr += 2 * data_count;
CURRENT_SC->SCp.this_residual -= 2 * data_count;
CURRENT_SC->resid -= 2 * data_count;
CMD_INC_RESID(CURRENT_SC, -2 * data_count);
}
if(CURRENT_SC->SCp.this_residual==0 && CURRENT_SC->SCp.buffers_residual>0) {
@ -2381,35 +2383,34 @@ static void datao_run(struct Scsi_Host *shpnt)
static void datao_end(struct Scsi_Host *shpnt)
{
if(TESTLO(DMASTAT, DFIFOEMP)) {
int data_count = (DATA_LEN - CURRENT_SC->resid) - GETSTCNT();
int data_count = (DATA_LEN - scsi_get_resid(CURRENT_SC)) -
GETSTCNT();
DPRINTK(debug_datao, DEBUG_LEAD "datao: %d bytes to resend (%d written, %d transferred)\n",
CMDINFO(CURRENT_SC),
data_count,
DATA_LEN-CURRENT_SC->resid,
DATA_LEN - scsi_get_resid(CURRENT_SC),
GETSTCNT());
CURRENT_SC->resid += data_count;
CMD_INC_RESID(CURRENT_SC, data_count);
if(CURRENT_SC->use_sg) {
data_count -= CURRENT_SC->SCp.ptr - SG_ADDRESS(CURRENT_SC->SCp.buffer);
while(data_count>0) {
CURRENT_SC->SCp.buffer--;
CURRENT_SC->SCp.buffers_residual++;
data_count -= CURRENT_SC->SCp.buffer->length;
}
CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer) - data_count;
CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length + data_count;
} else {
CURRENT_SC->SCp.ptr -= data_count;
CURRENT_SC->SCp.this_residual += data_count;
data_count -= CURRENT_SC->SCp.ptr -
SG_ADDRESS(CURRENT_SC->SCp.buffer);
while(data_count>0) {
CURRENT_SC->SCp.buffer--;
CURRENT_SC->SCp.buffers_residual++;
data_count -= CURRENT_SC->SCp.buffer->length;
}
CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer) -
data_count;
CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length +
data_count;
}
DPRINTK(debug_datao, DEBUG_LEAD "datao_end: request_bufflen=%d; resid=%d; stcnt=%d\n",
CMDINFO(CURRENT_SC),
CURRENT_SC->request_bufflen,
CURRENT_SC->resid,
scsi_bufflen(CURRENT_SC),
scsi_get_resid(CURRENT_SC),
GETSTCNT());
SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT);
@ -2936,7 +2937,7 @@ static void show_command(Scsi_Cmnd *ptr)
__scsi_print_command(ptr->cmnd);
printk(KERN_DEBUG "); request_bufflen=%d; resid=%d; phase |",
ptr->request_bufflen, ptr->resid);
scsi_bufflen(ptr), scsi_get_resid(ptr));
if (ptr->SCp.phase & not_issued)
printk("not issued|");
@ -3006,7 +3007,8 @@ static int get_command(char *pos, Scsi_Cmnd * ptr)
SPRINTF("0x%02x ", ptr->cmnd[i]);
SPRINTF("); resid=%d; residual=%d; buffers=%d; phase |",
ptr->resid, ptr->SCp.this_residual, ptr->SCp.buffers_residual);
scsi_get_resid(ptr), ptr->SCp.this_residual,
ptr->SCp.buffers_residual);
if (ptr->SCp.phase & not_issued)
SPRINTF("not issued|");
@ -3395,7 +3397,7 @@ static int aha152x_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start
PDEBUG(debug_datai, "data in");
PDEBUG(debug_datao, "data out");
PDEBUG(debug_eh, "eh");
PDEBUG(debug_locks, "locks");
PDEBUG(debug_locking, "locks");
PDEBUG(debug_phases, "phases");
SPRINTF("\n");
@ -3474,6 +3476,12 @@ static int aha152x_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start
return thislength < length ? thislength : length;
}
static int aha152x_adjust_queue(struct scsi_device *device)
{
blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH);
return 0;
}
static struct scsi_host_template aha152x_driver_template = {
.module = THIS_MODULE,
.name = AHA152X_REVID,
@ -3490,6 +3498,7 @@ static struct scsi_host_template aha152x_driver_template = {
.sg_tablesize = SG_ALL,
.cmd_per_lun = 1,
.use_clustering = DISABLE_CLUSTERING,
.slave_alloc = aha152x_adjust_queue,
};
#if !defined(PCMCIA)

View file

@ -298,7 +298,7 @@ typedef union {
enum {
debug_procinfo = 0x0001,
debug_queue = 0x0002,
debug_locks = 0x0004,
debug_locking = 0x0004,
debug_intr = 0x0008,
debug_selection = 0x0010,
debug_msgo = 0x0020,

View file

@ -1701,7 +1701,16 @@ ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0
&& maxsync < AHC_SYNCRATE_ULTRA2)
maxsync = AHC_SYNCRATE_ULTRA2;
/* Now set the maxsync based on the card capabilities
* DT is already done above */
if ((ahc->features & (AHC_DT | AHC_ULTRA2)) == 0
&& maxsync < AHC_SYNCRATE_ULTRA)
maxsync = AHC_SYNCRATE_ULTRA;
if ((ahc->features & (AHC_DT | AHC_ULTRA2 | AHC_ULTRA)) == 0
&& maxsync < AHC_SYNCRATE_FAST)
maxsync = AHC_SYNCRATE_FAST;
for (syncrate = &ahc_syncrates[maxsync];
syncrate->rate != NULL;
syncrate++) {
@ -1765,6 +1774,17 @@ ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
else
scsirate &= SXFR;
/* now set maxsync based on card capabilities */
if ((ahc->features & AHC_DT) == 0 && maxsync < AHC_SYNCRATE_ULTRA2)
maxsync = AHC_SYNCRATE_ULTRA2;
if ((ahc->features & (AHC_DT | AHC_ULTRA2)) == 0
&& maxsync < AHC_SYNCRATE_ULTRA)
maxsync = AHC_SYNCRATE_ULTRA;
if ((ahc->features & (AHC_DT | AHC_ULTRA2 | AHC_ULTRA)) == 0
&& maxsync < AHC_SYNCRATE_FAST)
maxsync = AHC_SYNCRATE_FAST;
syncrate = &ahc_syncrates[maxsync];
while (syncrate->rate != NULL) {

View file

@ -173,20 +173,20 @@ static struct pci_device_id dptids[] = {
};
MODULE_DEVICE_TABLE(pci,dptids);
static int adpt_detect(struct scsi_host_template* sht)
static void adpt_exit(void);
static int adpt_detect(void)
{
struct pci_dev *pDev = NULL;
adpt_hba* pHba;
adpt_init();
PINFO("Detecting Adaptec I2O RAID controllers...\n");
/* search for all Adatpec I2O RAID cards */
while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
if(pDev->device == PCI_DPT_DEVICE_ID ||
pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
if(adpt_install_hba(sht, pDev) ){
if(adpt_install_hba(pDev) ){
PERROR("Could not Init an I2O RAID device\n");
PERROR("Will not try to detect others.\n");
return hba_count-1;
@ -248,34 +248,33 @@ rebuild_sys_tab:
}
for (pHba = hba_chain; pHba; pHba = pHba->next) {
if( adpt_scsi_register(pHba,sht) < 0){
if (adpt_scsi_register(pHba) < 0) {
adpt_i2o_delete_hba(pHba);
continue;
}
pHba->initialized = TRUE;
pHba->state &= ~DPTI_STATE_RESET;
scsi_scan_host(pHba->host);
}
// Register our control device node
// nodes will need to be created in /dev to access this
// the nodes can not be created from within the driver
if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
adpt_i2o_sys_shutdown();
adpt_exit();
return 0;
}
return hba_count;
}
/*
* scsi_unregister will be called AFTER we return.
*/
static int adpt_release(struct Scsi_Host *host)
static int adpt_release(adpt_hba *pHba)
{
adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
struct Scsi_Host *shost = pHba->host;
scsi_remove_host(shost);
// adpt_i2o_quiesce_hba(pHba);
adpt_i2o_delete_hba(pHba);
scsi_unregister(host);
scsi_host_put(shost);
return 0;
}
@ -882,7 +881,7 @@ static int adpt_reboot_event(struct notifier_block *n, ulong code, void *p)
#endif
static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
static int adpt_install_hba(struct pci_dev* pDev)
{
adpt_hba* pHba = NULL;
@ -1031,8 +1030,6 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba)
mutex_lock(&adpt_configuration_lock);
// scsi_unregister calls our adpt_release which
// does a quiese
if(pHba->host){
free_irq(pHba->host->irq, pHba);
}
@ -1084,17 +1081,6 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba)
}
static int adpt_init(void)
{
printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
#ifdef REBOOT_NOTIFIER
register_reboot_notifier(&adpt_reboot_notifier);
#endif
return 0;
}
static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
{
struct adpt_device* d;
@ -2180,37 +2166,6 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
}
static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht)
{
struct Scsi_Host *host = NULL;
host = scsi_register(sht, sizeof(adpt_hba*));
if (host == NULL) {
printk ("%s: scsi_register returned NULL\n",pHba->name);
return -1;
}
host->hostdata[0] = (unsigned long)pHba;
pHba->host = host;
host->irq = pHba->pDev->irq;
/* no IO ports, so don't have to set host->io_port and
* host->n_io_port
*/
host->io_port = 0;
host->n_io_port = 0;
/* see comments in scsi_host.h */
host->max_id = 16;
host->max_lun = 256;
host->max_channel = pHba->top_scsi_channel + 1;
host->cmd_per_lun = 1;
host->unique_id = (uint) pHba;
host->sg_tablesize = pHba->sg_tablesize;
host->can_queue = pHba->post_fifo_size;
return 0;
}
static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
{
adpt_hba* pHba;
@ -3329,12 +3284,10 @@ static static void adpt_delay(int millisec)
#endif
static struct scsi_host_template driver_template = {
static struct scsi_host_template adpt_template = {
.name = "dpt_i2o",
.proc_name = "dpt_i2o",
.proc_info = adpt_proc_info,
.detect = adpt_detect,
.release = adpt_release,
.info = adpt_info,
.queuecommand = adpt_queue,
.eh_abort_handler = adpt_abort,
@ -3348,5 +3301,62 @@ static struct scsi_host_template driver_template = {
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING,
};
#include "scsi_module.c"
static s32 adpt_scsi_register(adpt_hba* pHba)
{
struct Scsi_Host *host;
host = scsi_host_alloc(&adpt_template, sizeof(adpt_hba*));
if (host == NULL) {
printk ("%s: scsi_host_alloc returned NULL\n",pHba->name);
return -1;
}
host->hostdata[0] = (unsigned long)pHba;
pHba->host = host;
host->irq = pHba->pDev->irq;
/* no IO ports, so don't have to set host->io_port and
* host->n_io_port
*/
host->io_port = 0;
host->n_io_port = 0;
/* see comments in scsi_host.h */
host->max_id = 16;
host->max_lun = 256;
host->max_channel = pHba->top_scsi_channel + 1;
host->cmd_per_lun = 1;
host->unique_id = (uint) pHba;
host->sg_tablesize = pHba->sg_tablesize;
host->can_queue = pHba->post_fifo_size;
if (scsi_add_host(host, &pHba->pDev->dev)) {
scsi_host_put(host);
return -1;
}
return 0;
}
static int __init adpt_init(void)
{
int count;
printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
#ifdef REBOOT_NOTIFIER
register_reboot_notifier(&adpt_reboot_notifier);
#endif
count = adpt_detect();
return count > 0 ? 0 : -ENODEV;
}
static void __exit adpt_exit(void)
{
while (hba_chain)
adpt_release(hba_chain);
}
module_init(adpt_init);
module_exit(adpt_exit);
MODULE_LICENSE("GPL");

View file

@ -28,11 +28,9 @@
* SCSI interface function Prototypes
*/
static int adpt_detect(struct scsi_host_template * sht);
static int adpt_queue(struct scsi_cmnd * cmd, void (*cmdcomplete) (struct scsi_cmnd *));
static int adpt_abort(struct scsi_cmnd * cmd);
static int adpt_reset(struct scsi_cmnd* cmd);
static int adpt_release(struct Scsi_Host *host);
static int adpt_slave_configure(struct scsi_device *);
static const char *adpt_info(struct Scsi_Host *pSHost);
@ -49,8 +47,6 @@ static int adpt_device_reset(struct scsi_cmnd* cmd);
#define DPT_DRIVER_NAME "Adaptec I2O RAID"
#ifndef HOSTS_C
#include "dpt/sys_info.h"
#include <linux/wait.h>
#include "dpt/dpti_i2o.h"
@ -289,7 +285,7 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba);
static s32 adpt_i2o_hrt_get(adpt_hba* pHba);
static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* dptdevice);
static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd);
static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht);
static s32 adpt_scsi_register(adpt_hba* pHba);
static s32 adpt_hba_reset(adpt_hba* pHba);
static s32 adpt_i2o_reset_hba(adpt_hba* pHba);
static s32 adpt_rescan(adpt_hba* pHba);
@ -299,7 +295,7 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba);
static void adpt_inquiry(adpt_hba* pHba);
static void adpt_fail_posted_scbs(adpt_hba* pHba);
static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun);
static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev) ;
static int adpt_install_hba(struct pci_dev* pDev) ;
static int adpt_i2o_online_hba(adpt_hba* pHba);
static void adpt_i2o_post_wait_complete(u32, int);
static int adpt_i2o_systab_send(adpt_hba* pHba);
@ -343,5 +339,4 @@ static void adpt_i386_info(sysInfo_S* si);
#define FW_DEBUG_BLED_OFFSET 8
#define FW_DEBUG_FLAGS_NO_HEADERS_B 0x01
#endif /* !HOSTS_C */
#endif /* _DPT_H */

View file

@ -220,7 +220,7 @@
#define ESP_BUSID_RESELID 0x10
#define ESP_BUSID_CTR32BIT 0x40
#define ESP_BUS_TIMEOUT 275 /* In milli-seconds */
#define ESP_BUS_TIMEOUT 250 /* In milli-seconds */
#define ESP_TIMEO_CONST 8192
#define ESP_NEG_DEFP(mhz, cfact) \
((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact)))

View file

@ -902,11 +902,6 @@ static void __init gdth_search_dev(gdth_pci_str *pcistr, ushort *cnt,
return;
/* GDT PCI controller found, resources are already in pdev */
pcistr[*cnt].pdev = pdev;
pcistr[*cnt].vendor_id = vendor;
pcistr[*cnt].device_id = device;
pcistr[*cnt].subdevice_id = pdev->subsystem_device;
pcistr[*cnt].bus = pdev->bus->number;
pcistr[*cnt].device_fn = pdev->devfn;
pcistr[*cnt].irq = pdev->irq;
base0 = pci_resource_flags(pdev, 0);
base1 = pci_resource_flags(pdev, 1);
@ -926,7 +921,8 @@ static void __init gdth_search_dev(gdth_pci_str *pcistr, ushort *cnt,
pcistr[*cnt].io = pci_resource_start(pdev, 1);
}
TRACE2(("Controller found at %d/%d, irq %d, dpmem 0x%lx\n",
pcistr[*cnt].bus, PCI_SLOT(pcistr[*cnt].device_fn),
pcistr[*cnt].pdev->bus->number,
PCI_SLOT(pcistr[*cnt].pdev->devfn),
pcistr[*cnt].irq, pcistr[*cnt].dpmem));
(*cnt)++;
}
@ -946,20 +942,20 @@ static void __init gdth_sort_pci(gdth_pci_str *pcistr, int cnt)
changed = FALSE;
for (i = 0; i < cnt-1; ++i) {
if (!reverse_scan) {
if ((pcistr[i].bus > pcistr[i+1].bus) ||
(pcistr[i].bus == pcistr[i+1].bus &&
PCI_SLOT(pcistr[i].device_fn) >
PCI_SLOT(pcistr[i+1].device_fn))) {
if ((pcistr[i].pdev->bus->number > pcistr[i+1].pdev->bus->number) ||
(pcistr[i].pdev->bus->number == pcistr[i+1].pdev->bus->number &&
PCI_SLOT(pcistr[i].pdev->devfn) >
PCI_SLOT(pcistr[i+1].pdev->devfn))) {
temp = pcistr[i];
pcistr[i] = pcistr[i+1];
pcistr[i+1] = temp;
changed = TRUE;
}
} else {
if ((pcistr[i].bus < pcistr[i+1].bus) ||
(pcistr[i].bus == pcistr[i+1].bus &&
PCI_SLOT(pcistr[i].device_fn) <
PCI_SLOT(pcistr[i+1].device_fn))) {
if ((pcistr[i].pdev->bus->number < pcistr[i+1].pdev->bus->number) ||
(pcistr[i].pdev->bus->number == pcistr[i+1].pdev->bus->number &&
PCI_SLOT(pcistr[i].pdev->devfn) <
PCI_SLOT(pcistr[i+1].pdev->devfn))) {
temp = pcistr[i];
pcistr[i] = pcistr[i+1];
pcistr[i+1] = temp;
@ -1176,17 +1172,16 @@ static int __init gdth_init_pci(gdth_pci_str *pcistr,gdth_ha_str *ha)
TRACE(("gdth_init_pci()\n"));
if (pcistr->vendor_id == PCI_VENDOR_ID_INTEL)
if (pcistr->pdev->vendor == PCI_VENDOR_ID_INTEL)
ha->oem_id = OEM_ID_INTEL;
else
ha->oem_id = OEM_ID_ICP;
ha->brd_phys = (pcistr->bus << 8) | (pcistr->device_fn & 0xf8);
ha->stype = (ulong32)pcistr->device_id;
ha->subdevice_id = pcistr->subdevice_id;
ha->brd_phys = (pcistr->pdev->bus->number << 8) | (pcistr->pdev->devfn & 0xf8);
ha->stype = (ulong32)pcistr->pdev->device;
ha->irq = pcistr->irq;
ha->pdev = pcistr->pdev;
if (ha->stype <= PCI_DEVICE_ID_VORTEX_GDT6000B) { /* GDT6000/B */
if (ha->pdev->device <= PCI_DEVICE_ID_VORTEX_GDT6000B) { /* GDT6000/B */
TRACE2(("init_pci() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq));
ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6_dpram_str));
if (ha->brd == NULL) {
@ -1293,7 +1288,7 @@ static int __init gdth_init_pci(gdth_pci_str *pcistr,gdth_ha_str *ha)
ha->dma64_support = 0;
} else if (ha->stype <= PCI_DEVICE_ID_VORTEX_GDT6555) { /* GDT6110, ... */
} else if (ha->pdev->device <= PCI_DEVICE_ID_VORTEX_GDT6555) { /* GDT6110, ... */
ha->plx = (gdt6c_plx_regs *)pcistr->io;
TRACE2(("init_pci_new() dpmem %lx irq %d\n",
pcistr->dpmem,ha->irq));
@ -4601,7 +4596,8 @@ static int __init gdth_detect(Scsi_Host_Template *shtp)
}
/* controller found and initialized */
printk("Configuring GDT-PCI HA at %d/%d IRQ %u\n",
pcistr[ctr].bus,PCI_SLOT(pcistr[ctr].device_fn),ha->irq);
pcistr[ctr].pdev->bus->number,
PCI_SLOT(pcistr[ctr].pdev->devfn), ha->irq);
if (request_irq(ha->irq, gdth_interrupt,
IRQF_DISABLED|IRQF_SHARED, "gdth", ha))
@ -4637,7 +4633,7 @@ static int __init gdth_detect(Scsi_Host_Template *shtp)
#endif
ha->scratch_busy = FALSE;
ha->req_first = NULL;
ha->tid_cnt = pcistr[ctr].device_id >= 0x200 ? MAXID : MAX_HDRIVES;
ha->tid_cnt = pcistr[ctr].pdev->device >= 0x200 ? MAXID : MAX_HDRIVES;
if (max_ids > 0 && max_ids < ha->tid_cnt)
ha->tid_cnt = max_ids;
for (i=0; i<GDTH_MAXCMDS; ++i)
@ -4810,7 +4806,7 @@ static const char *gdth_ctr_name(int hanum)
} else if (ha->type == GDT_ISA) {
return("GDT2000/2020");
} else if (ha->type == GDT_PCI) {
switch (ha->stype) {
switch (ha->pdev->device) {
case PCI_DEVICE_ID_VORTEX_GDT60x0:
return("GDT6000/6020/6050");
case PCI_DEVICE_ID_VORTEX_GDT6000B:
@ -5448,12 +5444,12 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
ctrt.type =
(ha->oem_id == OEM_ID_INTEL ? 0xfd : 0xfe);
if (ha->stype >= 0x300)
ctrt.ext_type = 0x6000 | ha->subdevice_id;
ctrt.ext_type = 0x6000 | ha->pdev->subsystem_device;
else
ctrt.ext_type = 0x6000 | ha->stype;
}
ctrt.device_id = ha->stype;
ctrt.sub_device_id = ha->subdevice_id;
ctrt.device_id = ha->pdev->device;
ctrt.sub_device_id = ha->pdev->subsystem_device;
}
ctrt.info = ha->brd_phys;
ctrt.oem_id = ha->oem_id;

View file

@ -845,11 +845,6 @@ typedef struct {
/* PCI resources */
typedef struct {
struct pci_dev *pdev;
ushort vendor_id; /* vendor (ICP, Intel, ..) */
ushort device_id; /* device ID (0,..,9) */
ushort subdevice_id; /* sub device ID */
unchar bus; /* PCI bus */
unchar device_fn; /* PCI device/function no. */
ulong dpmem; /* DPRAM address */
ulong io; /* IO address */
ulong io_mm; /* IO address mem. mapped */
@ -862,7 +857,6 @@ typedef struct {
ushort oem_id; /* OEM */
ushort type; /* controller class */
ulong32 stype; /* subtype (PCI: device ID) */
ushort subdevice_id; /* sub device ID (PCI) */
ushort fw_vers; /* firmware version */
ushort cache_feat; /* feat. cache serv. (s/g,..)*/
ushort raw_feat; /* feat. raw service (s/g,..)*/

View file

@ -220,7 +220,7 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
get_device(&shost->shost_gendev);
if (shost->transportt->host_size &&
(shost->shost_data = kmalloc(shost->transportt->host_size,
(shost->shost_data = kzalloc(shost->transportt->host_size,
GFP_KERNEL)) == NULL)
goto out_del_classdev;

View file

@ -393,12 +393,6 @@ static int map_sg_data(struct scsi_cmnd *cmd,
return 1;
else if (sg_mapped < 0)
return 0;
else if (sg_mapped > SG_ALL) {
printk(KERN_ERR
"ibmvscsi: More than %d mapped sg entries, got %d\n",
SG_ALL, sg_mapped);
return 0;
}
set_srp_direction(cmd, srp_cmd, sg_mapped);
@ -708,8 +702,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
struct srp_cmd *srp_cmd;
struct srp_event_struct *evt_struct;
struct srp_indirect_buf *indirect;
struct ibmvscsi_host_data *hostdata =
(struct ibmvscsi_host_data *)&cmnd->device->host->hostdata;
struct ibmvscsi_host_data *hostdata = shost_priv(cmnd->device->host);
u16 lun = lun_from_dev(cmnd->device);
u8 out_fmt, in_fmt;
@ -960,8 +953,7 @@ static void sync_completion(struct srp_event_struct *evt_struct)
*/
static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
{
struct ibmvscsi_host_data *hostdata =
(struct ibmvscsi_host_data *)cmd->device->host->hostdata;
struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
struct srp_tsk_mgmt *tsk_mgmt;
struct srp_event_struct *evt;
struct srp_event_struct *tmp_evt, *found_evt;
@ -1084,9 +1076,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
*/
static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
struct ibmvscsi_host_data *hostdata =
(struct ibmvscsi_host_data *)cmd->device->host->hostdata;
struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
struct srp_tsk_mgmt *tsk_mgmt;
struct srp_event_struct *evt;
struct srp_event_struct *tmp_evt, *pos;
@ -1183,8 +1173,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd)
{
unsigned long wait_switch = 0;
struct ibmvscsi_host_data *hostdata =
(struct ibmvscsi_host_data *)cmd->device->host->hostdata;
struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
dev_err(hostdata->dev, "Resetting connection due to error recovery\n");
@ -1412,8 +1401,7 @@ static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
static ssize_t show_host_srp_version(struct class_device *class_dev, char *buf)
{
struct Scsi_Host *shost = class_to_shost(class_dev);
struct ibmvscsi_host_data *hostdata =
(struct ibmvscsi_host_data *)shost->hostdata;
struct ibmvscsi_host_data *hostdata = shost_priv(shost);
int len;
len = snprintf(buf, PAGE_SIZE, "%s\n",
@ -1433,8 +1421,7 @@ static ssize_t show_host_partition_name(struct class_device *class_dev,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(class_dev);
struct ibmvscsi_host_data *hostdata =
(struct ibmvscsi_host_data *)shost->hostdata;
struct ibmvscsi_host_data *hostdata = shost_priv(shost);
int len;
len = snprintf(buf, PAGE_SIZE, "%s\n",
@ -1454,8 +1441,7 @@ static ssize_t show_host_partition_number(struct class_device *class_dev,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(class_dev);
struct ibmvscsi_host_data *hostdata =
(struct ibmvscsi_host_data *)shost->hostdata;
struct ibmvscsi_host_data *hostdata = shost_priv(shost);
int len;
len = snprintf(buf, PAGE_SIZE, "%d\n",
@ -1474,8 +1460,7 @@ static struct class_device_attribute ibmvscsi_host_partition_number = {
static ssize_t show_host_mad_version(struct class_device *class_dev, char *buf)
{
struct Scsi_Host *shost = class_to_shost(class_dev);
struct ibmvscsi_host_data *hostdata =
(struct ibmvscsi_host_data *)shost->hostdata;
struct ibmvscsi_host_data *hostdata = shost_priv(shost);
int len;
len = snprintf(buf, PAGE_SIZE, "%d\n",
@ -1494,8 +1479,7 @@ static struct class_device_attribute ibmvscsi_host_mad_version = {
static ssize_t show_host_os_type(struct class_device *class_dev, char *buf)
{
struct Scsi_Host *shost = class_to_shost(class_dev);
struct ibmvscsi_host_data *hostdata =
(struct ibmvscsi_host_data *)shost->hostdata;
struct ibmvscsi_host_data *hostdata = shost_priv(shost);
int len;
len = snprintf(buf, PAGE_SIZE, "%d\n", hostdata->madapter_info.os_type);
@ -1513,8 +1497,7 @@ static struct class_device_attribute ibmvscsi_host_os_type = {
static ssize_t show_host_config(struct class_device *class_dev, char *buf)
{
struct Scsi_Host *shost = class_to_shost(class_dev);
struct ibmvscsi_host_data *hostdata =
(struct ibmvscsi_host_data *)shost->hostdata;
struct ibmvscsi_host_data *hostdata = shost_priv(shost);
/* returns null-terminated host config data */
if (ibmvscsi_do_host_config(hostdata, buf, PAGE_SIZE) == 0)
@ -1582,7 +1565,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
goto scsi_host_alloc_failed;
}
hostdata = (struct ibmvscsi_host_data *)host->hostdata;
hostdata = shost_priv(host);
memset(hostdata, 0x00, sizeof(*hostdata));
INIT_LIST_HEAD(&hostdata->sent);
hostdata->host = host;

View file

@ -32,7 +32,8 @@ config SCSI_SAS_LIBSAS
config SCSI_SAS_ATA
bool "ATA support for libsas (requires libata)"
depends on SCSI_SAS_LIBSAS && ATA
depends on SCSI_SAS_LIBSAS
depends on ATA = y || ATA = SCSI_SAS_LIBSAS
help
Builds in ATA support into libsas. Will necessitate
the loading of libata along with libsas.

View file

@ -45,7 +45,7 @@ struct lpfc_sli2_slim;
#define LPFC_DISC_IOCB_BUFF_COUNT 20
#define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */
#define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */
#define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */
/* Define macros for 64 bit support */
#define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr)))
@ -78,6 +78,7 @@ struct lpfc_dma_pool {
struct hbq_dmabuf {
struct lpfc_dmabuf dbuf;
uint32_t size;
uint32_t tag;
};
@ -329,13 +330,30 @@ struct lpfc_vport {
#define FC_LOADING 0x1 /* HBA in process of loading drvr */
#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */
char *vname; /* Application assigned name */
/* Vport Config Parameters */
uint32_t cfg_scan_down;
uint32_t cfg_lun_queue_depth;
uint32_t cfg_nodev_tmo;
uint32_t cfg_devloss_tmo;
uint32_t cfg_restrict_login;
uint32_t cfg_peer_port_login;
uint32_t cfg_fcp_class;
uint32_t cfg_use_adisc;
uint32_t cfg_fdmi_on;
uint32_t cfg_discovery_threads;
uint32_t cfg_log_verbose;
uint32_t cfg_max_luns;
uint32_t dev_loss_tmo_changed;
struct fc_vport *fc_vport;
#ifdef CONFIG_LPFC_DEBUG_FS
struct dentry *debug_disc_trc;
struct dentry *debug_nodelist;
struct dentry *vport_debugfs_root;
struct lpfc_disc_trc *disc_trc;
struct lpfc_debugfs_trc *disc_trc;
atomic_t disc_trc_cnt;
#endif
};
@ -345,17 +363,25 @@ struct hbq_s {
uint32_t next_hbqPutIdx; /* Index to next HBQ slot to use */
uint32_t hbqPutIdx; /* HBQ slot to use */
uint32_t local_hbqGetIdx; /* Local copy of Get index from Port */
void *hbq_virt; /* Virtual ptr to this hbq */
struct list_head hbq_buffer_list; /* buffers assigned to this HBQ */
/* Callback for HBQ buffer allocation */
struct hbq_dmabuf *(*hbq_alloc_buffer) (struct lpfc_hba *);
/* Callback for HBQ buffer free */
void (*hbq_free_buffer) (struct lpfc_hba *,
struct hbq_dmabuf *);
};
#define LPFC_MAX_HBQS 16
/* this matches the possition in the lpfc_hbq_defs array */
#define LPFC_MAX_HBQS 4
/* this matches the position in the lpfc_hbq_defs array */
#define LPFC_ELS_HBQ 0
#define LPFC_EXTRA_HBQ 1
struct lpfc_hba {
struct lpfc_sli sli;
uint32_t sli_rev; /* SLI2 or SLI3 */
uint32_t sli3_options; /* Mask of enabled SLI3 options */
#define LPFC_SLI3_ENABLED 0x01
#define LPFC_SLI3_ENABLED 0x01
#define LPFC_SLI3_HBQ_ENABLED 0x02
#define LPFC_SLI3_NPIV_ENABLED 0x04
#define LPFC_SLI3_VPORT_TEARDOWN 0x08
@ -364,7 +390,7 @@ struct lpfc_hba {
enum hba_state link_state;
uint32_t link_flag; /* link state flags */
#define LS_LOOPBACK_MODE 0x1 /* NPort is in Loopback mode */
#define LS_LOOPBACK_MODE 0x1 /* NPort is in Loopback mode */
/* This flag is set while issuing */
/* INIT_LINK mailbox command */
#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
@ -413,28 +439,16 @@ struct lpfc_hba {
uint8_t wwpn[8];
uint32_t RandomData[7];
uint32_t cfg_log_verbose;
uint32_t cfg_lun_queue_depth;
uint32_t cfg_nodev_tmo;
uint32_t cfg_devloss_tmo;
uint32_t cfg_hba_queue_depth;
uint32_t cfg_peer_port_login;
uint32_t cfg_vport_restrict_login;
uint32_t cfg_npiv_enable;
uint32_t cfg_fcp_class;
uint32_t cfg_use_adisc;
/* HBA Config Parameters */
uint32_t cfg_ack0;
uint32_t cfg_enable_npiv;
uint32_t cfg_topology;
uint32_t cfg_scan_down;
uint32_t cfg_link_speed;
uint32_t cfg_cr_delay;
uint32_t cfg_cr_count;
uint32_t cfg_multi_ring_support;
uint32_t cfg_multi_ring_rctl;
uint32_t cfg_multi_ring_type;
uint32_t cfg_fdmi_on;
uint32_t cfg_discovery_threads;
uint32_t cfg_max_luns;
uint32_t cfg_poll;
uint32_t cfg_poll_tmo;
uint32_t cfg_use_msi;
@ -442,8 +456,8 @@ struct lpfc_hba {
uint32_t cfg_sg_dma_buf_size;
uint64_t cfg_soft_wwnn;
uint64_t cfg_soft_wwpn;
uint32_t cfg_hba_queue_depth;
uint32_t dev_loss_tmo_changed;
lpfc_vpd_t vpd; /* vital product data */
@ -457,7 +471,6 @@ struct lpfc_hba {
wait_queue_head_t *work_wait;
struct task_struct *worker_thread;
struct list_head hbq_buffer_list;
uint32_t hbq_count; /* Count of configured HBQs */
struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
@ -526,12 +539,14 @@ struct lpfc_hba {
mempool_t *nlp_mem_pool;
struct fc_host_statistics link_stats;
uint8_t using_msi;
struct list_head port_list;
struct lpfc_vport *pport; /* physical lpfc_vport pointer */
uint16_t max_vpi; /* Maximum virtual nports */
#define LPFC_MAX_VPI 100 /* Max number of VPorts supported */
unsigned long *vpi_bmask; /* vpi allocation table */
struct lpfc_vport *pport; /* physical lpfc_vport pointer */
uint16_t max_vpi; /* Maximum virtual nports */
#define LPFC_MAX_VPI 100 /* Max number of VPI supported */
#define LPFC_MAX_VPORTS (LPFC_MAX_VPI+1)/* Max number of VPorts supported */
unsigned long *vpi_bmask; /* vpi allocation table */
/* Data structure used by fabric iocb scheduler */
struct list_head fabric_iocb_list;
@ -547,6 +562,11 @@ struct lpfc_hba {
#ifdef CONFIG_LPFC_DEBUG_FS
struct dentry *hba_debugfs_root;
atomic_t debugfs_vport_count;
struct dentry *debug_hbqinfo;
struct dentry *debug_dumpslim;
struct dentry *debug_slow_ring_trc;
struct lpfc_debugfs_trc *slow_ring_trc;
atomic_t slow_ring_trc_cnt;
#endif
/* Fields used for heart beat. */

View file

@ -67,12 +67,6 @@ lpfc_drvr_version_show(struct class_device *cdev, char *buf)
return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
}
static ssize_t
management_version_show(struct class_device *cdev, char *buf)
{
return snprintf(buf, PAGE_SIZE, DFC_API_VERSION "\n");
}
static ssize_t
lpfc_info_show(struct class_device *cdev, char *buf)
{
@ -319,9 +313,8 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
if (cnt++ > 3000) {
lpfc_printf_log(phba,
KERN_WARNING, LOG_INIT,
"%d:0466 Outstanding IO when "
"bringing Adapter offline\n",
phba->brd_no);
"0466 Outstanding IO when "
"bringing Adapter offline\n");
break;
}
}
@ -437,7 +430,7 @@ lpfc_board_mode_store(struct class_device *cdev, const char *buf, size_t count)
return -EIO;
}
int
static int
lpfc_get_hba_info(struct lpfc_hba *phba,
uint32_t *mxri, uint32_t *axri,
uint32_t *mrpi, uint32_t *arpi,
@ -694,9 +687,8 @@ lpfc_##attr##_init(struct lpfc_hba *phba, int val) \
return 0;\
}\
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
"%d:0449 lpfc_"#attr" attribute cannot be set to %d, "\
"allowed range is ["#minval", "#maxval"]\n", \
phba->brd_no, val); \
"0449 lpfc_"#attr" attribute cannot be set to %d, "\
"allowed range is ["#minval", "#maxval"]\n", val); \
phba->cfg_##attr = default;\
return -EINVAL;\
}
@ -710,9 +702,8 @@ lpfc_##attr##_set(struct lpfc_hba *phba, int val) \
return 0;\
}\
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
"%d:0450 lpfc_"#attr" attribute cannot be set to %d, "\
"allowed range is ["#minval", "#maxval"]\n", \
phba->brd_no, val); \
"0450 lpfc_"#attr" attribute cannot be set to %d, "\
"allowed range is ["#minval", "#maxval"]\n", val); \
return -EINVAL;\
}
@ -734,6 +725,75 @@ lpfc_##attr##_store(struct class_device *cdev, const char *buf, size_t count) \
return -EINVAL;\
}
#define lpfc_vport_param_show(attr) \
static ssize_t \
lpfc_##attr##_show(struct class_device *cdev, char *buf) \
{ \
struct Scsi_Host *shost = class_to_shost(cdev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
int val = 0;\
val = vport->cfg_##attr;\
return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
}
#define lpfc_vport_param_hex_show(attr) \
static ssize_t \
lpfc_##attr##_show(struct class_device *cdev, char *buf) \
{ \
struct Scsi_Host *shost = class_to_shost(cdev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
int val = 0;\
val = vport->cfg_##attr;\
return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
}
#define lpfc_vport_param_init(attr, default, minval, maxval) \
static int \
lpfc_##attr##_init(struct lpfc_vport *vport, int val) \
{ \
if (val >= minval && val <= maxval) {\
vport->cfg_##attr = val;\
return 0;\
}\
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
"0449 lpfc_"#attr" attribute cannot be set to %d, "\
"allowed range is ["#minval", "#maxval"]\n", val); \
vport->cfg_##attr = default;\
return -EINVAL;\
}
#define lpfc_vport_param_set(attr, default, minval, maxval) \
static int \
lpfc_##attr##_set(struct lpfc_vport *vport, int val) \
{ \
if (val >= minval && val <= maxval) {\
vport->cfg_##attr = val;\
return 0;\
}\
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
"0450 lpfc_"#attr" attribute cannot be set to %d, "\
"allowed range is ["#minval", "#maxval"]\n", val); \
return -EINVAL;\
}
#define lpfc_vport_param_store(attr) \
static ssize_t \
lpfc_##attr##_store(struct class_device *cdev, const char *buf, size_t count) \
{ \
struct Scsi_Host *shost = class_to_shost(cdev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
int val=0;\
if (!isdigit(buf[0]))\
return -EINVAL;\
if (sscanf(buf, "%i", &val) != 1)\
return -EINVAL;\
if (lpfc_##attr##_set(vport, val) == 0) \
return strlen(buf);\
else \
return -EINVAL;\
}
#define LPFC_ATTR(name, defval, minval, maxval, desc) \
static int lpfc_##name = defval;\
module_param(lpfc_##name, int, 0);\
@ -778,6 +838,50 @@ lpfc_param_store(name)\
static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
lpfc_##name##_show, lpfc_##name##_store)
#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \
static int lpfc_##name = defval;\
module_param(lpfc_##name, int, 0);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_init(name, defval, minval, maxval)
#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \
static int lpfc_##name = defval;\
module_param(lpfc_##name, int, 0);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \
static int lpfc_##name = defval;\
module_param(lpfc_##name, int, 0);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
lpfc_vport_param_set(name, defval, minval, maxval)\
lpfc_vport_param_store(name)\
static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
lpfc_##name##_show, lpfc_##name##_store)
#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \
static int lpfc_##name = defval;\
module_param(lpfc_##name, int, 0);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_hex_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
static int lpfc_##name = defval;\
module_param(lpfc_##name, int, 0);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_hex_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
lpfc_vport_param_set(name, defval, minval, maxval)\
lpfc_vport_param_store(name)\
static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
lpfc_##name##_show, lpfc_##name##_store)
static CLASS_DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL);
static CLASS_DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
static CLASS_DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
@ -794,8 +898,6 @@ static CLASS_DEVICE_ATTR(num_discovered_ports, S_IRUGO,
static CLASS_DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
static CLASS_DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show,
NULL);
static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show,
NULL);
static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
lpfc_board_mode_show, lpfc_board_mode_store);
static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
@ -908,17 +1010,15 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
stat1 = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
if (stat1)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0463 lpfc_soft_wwpn attribute set failed to reinit "
"adapter - %d\n", phba->brd_no, stat1);
"0463 lpfc_soft_wwpn attribute set failed to "
"reinit adapter - %d\n", stat1);
init_completion(&online_compl);
lpfc_workq_post_event(phba, &stat2, &online_compl, LPFC_EVT_ONLINE);
wait_for_completion(&online_compl);
if (stat2)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0464 lpfc_soft_wwpn attribute set failed to reinit "
"adapter - %d\n", phba->brd_no, stat2);
"0464 lpfc_soft_wwpn attribute set failed to "
"reinit adapter - %d\n", stat2);
return (stat1 || stat2) ? -EIO : count;
}
static CLASS_DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\
@ -927,8 +1027,8 @@ static CLASS_DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\
static ssize_t
lpfc_soft_wwnn_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *host = class_to_shost(cdev);
struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
struct Scsi_Host *shost = class_to_shost(cdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
return snprintf(buf, PAGE_SIZE, "0x%llx\n",
(unsigned long long)phba->cfg_soft_wwnn);
}
@ -937,8 +1037,8 @@ lpfc_soft_wwnn_show(struct class_device *cdev, char *buf)
static ssize_t
lpfc_soft_wwnn_store(struct class_device *cdev, const char *buf, size_t count)
{
struct Scsi_Host *host = class_to_shost(cdev);
struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
struct Scsi_Host *shost = class_to_shost(cdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
unsigned int i, j, cnt=count;
u8 wwnn[8];
@ -1002,7 +1102,7 @@ MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
" 2 - select SLI-2 even on SLI-3 capable HBAs,"
" 3 - select SLI-3");
LPFC_ATTR_R(npiv_enable, 0, 0, 1, "Enable NPIV functionality");
LPFC_ATTR_R(enable_npiv, 0, 0, 1, "Enable NPIV functionality");
/*
# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
@ -1019,90 +1119,75 @@ lpfc_nodev_tmo_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
int val = 0;
val = phba->cfg_devloss_tmo;
return snprintf(buf, PAGE_SIZE, "%d\n",
phba->cfg_devloss_tmo);
val = vport->cfg_devloss_tmo;
return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo);
}
static int
lpfc_nodev_tmo_init(struct lpfc_hba *phba, int val)
lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
{
static int warned;
if (phba->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) {
phba->cfg_nodev_tmo = phba->cfg_devloss_tmo;
if (!warned && val != LPFC_DEF_DEVLOSS_TMO) {
warned = 1;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0402 Ignoring nodev_tmo module "
"parameter because devloss_tmo is"
" set.\n",
phba->brd_no);
}
if (vport->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) {
vport->cfg_nodev_tmo = vport->cfg_devloss_tmo;
if (val != LPFC_DEF_DEVLOSS_TMO)
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0402 Ignoring nodev_tmo module "
"parameter because devloss_tmo is "
"set.\n");
return 0;
}
if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
phba->cfg_nodev_tmo = val;
phba->cfg_devloss_tmo = val;
vport->cfg_nodev_tmo = val;
vport->cfg_devloss_tmo = val;
return 0;
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0400 lpfc_nodev_tmo attribute cannot be set to %d, "
"allowed range is [%d, %d]\n",
phba->brd_no, val,
LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
phba->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0400 lpfc_nodev_tmo attribute cannot be set to"
" %d, allowed range is [%d, %d]\n",
val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
vport->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
return -EINVAL;
}
static void
lpfc_update_rport_devloss_tmo(struct lpfc_hba *phba)
lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
{
struct lpfc_vport *vport;
struct Scsi_Host *shost;
struct lpfc_nodelist *ndlp;
list_for_each_entry(vport, &phba->port_list, listentry) {
shost = lpfc_shost_from_vport(vport);
spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
if (ndlp->rport)
ndlp->rport->dev_loss_tmo =
phba->cfg_devloss_tmo;
spin_unlock_irq(shost->host_lock);
}
shost = lpfc_shost_from_vport(vport);
spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
if (ndlp->rport)
ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
spin_unlock_irq(shost->host_lock);
}
static int
lpfc_nodev_tmo_set(struct lpfc_hba *phba, int val)
lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
{
if (phba->dev_loss_tmo_changed ||
(lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0401 Ignoring change to nodev_tmo "
"because devloss_tmo is set.\n",
phba->brd_no);
if (vport->dev_loss_tmo_changed ||
(lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0401 Ignoring change to nodev_tmo "
"because devloss_tmo is set.\n");
return 0;
}
if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
phba->cfg_nodev_tmo = val;
phba->cfg_devloss_tmo = val;
lpfc_update_rport_devloss_tmo(phba);
vport->cfg_nodev_tmo = val;
vport->cfg_devloss_tmo = val;
lpfc_update_rport_devloss_tmo(vport);
return 0;
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0403 lpfc_nodev_tmo attribute cannot be set to %d, "
"allowed range is [%d, %d]\n",
phba->brd_no, val, LPFC_MIN_DEVLOSS_TMO,
LPFC_MAX_DEVLOSS_TMO);
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0403 lpfc_nodev_tmo attribute cannot be set to"
"%d, allowed range is [%d, %d]\n",
val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
return -EINVAL;
}
lpfc_param_store(nodev_tmo)
lpfc_vport_param_store(nodev_tmo)
static CLASS_DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR,
lpfc_nodev_tmo_show, lpfc_nodev_tmo_store);
@ -1116,29 +1201,28 @@ module_param(lpfc_devloss_tmo, int, 0);
MODULE_PARM_DESC(lpfc_devloss_tmo,
"Seconds driver will hold I/O waiting "
"for a device to come back");
lpfc_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO,
LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO)
lpfc_param_show(devloss_tmo)
lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO,
LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO)
lpfc_vport_param_show(devloss_tmo)
static int
lpfc_devloss_tmo_set(struct lpfc_hba *phba, int val)
lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
{
if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
phba->cfg_nodev_tmo = val;
phba->cfg_devloss_tmo = val;
phba->dev_loss_tmo_changed = 1;
lpfc_update_rport_devloss_tmo(phba);
vport->cfg_nodev_tmo = val;
vport->cfg_devloss_tmo = val;
vport->dev_loss_tmo_changed = 1;
lpfc_update_rport_devloss_tmo(vport);
return 0;
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0404 lpfc_devloss_tmo attribute cannot be set to"
" %d, allowed range is [%d, %d]\n",
phba->brd_no, val, LPFC_MIN_DEVLOSS_TMO,
LPFC_MAX_DEVLOSS_TMO);
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0404 lpfc_devloss_tmo attribute cannot be set to"
" %d, allowed range is [%d, %d]\n",
val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
return -EINVAL;
}
lpfc_param_store(devloss_tmo)
lpfc_vport_param_store(devloss_tmo)
static CLASS_DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
lpfc_devloss_tmo_show, lpfc_devloss_tmo_store);
@ -1160,14 +1244,15 @@ static CLASS_DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
# LOG_LIBDFC 0x2000 LIBDFC events
# LOG_ALL_MSG 0xffff LOG all messages
*/
LPFC_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffff, "Verbose logging bit-mask");
LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffff,
"Verbose logging bit-mask");
/*
# lun_queue_depth: This parameter is used to limit the number of outstanding
# commands per FCP LUN. Value range is [1,128]. Default value is 30.
*/
LPFC_ATTR_R(lun_queue_depth, 30, 1, 128,
"Max number of FCP commands we can queue to a specific LUN");
LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 128,
"Max number of FCP commands we can queue to a specific LUN");
/*
# hba_queue_depth: This parameter is used to limit the number of outstanding
@ -1188,12 +1273,12 @@ LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192,
# are allowed to login to each other.
# Default value of this parameter is 0.
*/
LPFC_ATTR_R(peer_port_login, 0, 0, 1,
"Allow peer ports on the same physical port to login to each "
"other.");
LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1,
"Allow peer ports on the same physical port to login to each "
"other.");
/*
# vport_restrict_login: This parameter allows/prevents logins
# restrict_login: This parameter allows/prevents logins
# between Virtual Ports and remote initiators.
# When this parameter is not set (0) Virtual Ports will accept PLOGIs from
# other initiators and will attempt to PLOGI all remote ports.
@ -1203,8 +1288,55 @@ LPFC_ATTR_R(peer_port_login, 0, 0, 1,
# This parameter does not restrict logins to Fabric resident remote ports.
# Default value of this parameter is 1.
*/
LPFC_ATTR_RW(vport_restrict_login, 1, 0, 1,
"Restrict virtual ports login to remote initiators.");
static int lpfc_restrict_login = 1;
module_param(lpfc_restrict_login, int, 0);
MODULE_PARM_DESC(lpfc_restrict_login,
"Restrict virtual ports login to remote initiators.");
lpfc_vport_param_show(restrict_login);
static int
lpfc_restrict_login_init(struct lpfc_vport *vport, int val)
{
if (val < 0 || val > 1) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0449 lpfc_restrict_login attribute cannot "
"be set to %d, allowed range is [0, 1]\n",
val);
vport->cfg_restrict_login = 1;
return -EINVAL;
}
if (vport->port_type == LPFC_PHYSICAL_PORT) {
vport->cfg_restrict_login = 0;
return 0;
}
vport->cfg_restrict_login = val;
return 0;
}
static int
lpfc_restrict_login_set(struct lpfc_vport *vport, int val)
{
if (val < 0 || val > 1) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0450 lpfc_restrict_login attribute cannot "
"be set to %d, allowed range is [0, 1]\n",
val);
vport->cfg_restrict_login = 1;
return -EINVAL;
}
if (vport->port_type == LPFC_PHYSICAL_PORT && val != 0) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0468 lpfc_restrict_login must be 0 for "
"Physical ports.\n");
vport->cfg_restrict_login = 0;
return 0;
}
vport->cfg_restrict_login = val;
return 0;
}
lpfc_vport_param_store(restrict_login);
static CLASS_DEVICE_ATTR(lpfc_restrict_login, S_IRUGO | S_IWUSR,
lpfc_restrict_login_show, lpfc_restrict_login_store);
/*
# Some disk devices have a "select ID" or "select Target" capability.
@ -1223,8 +1355,8 @@ LPFC_ATTR_RW(vport_restrict_login, 1, 0, 1,
# and will not work across a fabric. Also this parameter will take
# effect only in the case when ALPA map is not available.)
*/
LPFC_ATTR_R(scan_down, 1, 0, 1,
"Start scanning for devices from highest ALPA to lowest");
LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
"Start scanning for devices from highest ALPA to lowest");
/*
# lpfc_topology: link topology for init link
@ -1255,15 +1387,15 @@ LPFC_ATTR_R(link_speed, 0, 0, 8, "Select link speed");
# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
# Value range is [2,3]. Default value is 3.
*/
LPFC_ATTR_R(fcp_class, 3, 2, 3,
"Select Fibre Channel class of service for FCP sequences");
LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3,
"Select Fibre Channel class of service for FCP sequences");
/*
# lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range
# is [0,1]. Default value is 0.
*/
LPFC_ATTR_RW(use_adisc, 0, 0, 1,
"Use ADISC on rediscovery to authenticate FCP devices");
LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
"Use ADISC on rediscovery to authenticate FCP devices");
/*
# lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value
@ -1315,13 +1447,13 @@ LPFC_ATTR_R(multi_ring_type, FC_LLC_SNAP, 1,
# 2 = support FDMI with attribute of hostname
# Value range [0,2]. Default value is 0.
*/
LPFC_ATTR_RW(fdmi_on, 0, 0, 2, "Enable FDMI support");
LPFC_VPORT_ATTR_RW(fdmi_on, 0, 0, 2, "Enable FDMI support");
/*
# Specifies the maximum number of ELS cmds we can have outstanding (for
# discovery). Value range is [1,64]. Default value = 32.
*/
LPFC_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands "
LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands "
"during discovery");
/*
@ -1329,8 +1461,7 @@ LPFC_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands "
# Value range is [0,65535]. Default value is 255.
# NOTE: The SCSI layer might probe all allowed LUN on some old targets.
*/
LPFC_ATTR_R(max_luns, 255, 0, 65535,
"Maximum allowed LUN");
LPFC_VPORT_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN");
/*
# lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring.
@ -1367,7 +1498,6 @@ struct class_device_attribute *lpfc_hba_attrs[] = {
&class_device_attr_lpfc_lun_queue_depth,
&class_device_attr_lpfc_hba_queue_depth,
&class_device_attr_lpfc_peer_port_login,
&class_device_attr_lpfc_vport_restrict_login,
&class_device_attr_lpfc_nodev_tmo,
&class_device_attr_lpfc_devloss_tmo,
&class_device_attr_lpfc_fcp_class,
@ -1383,9 +1513,8 @@ struct class_device_attribute *lpfc_hba_attrs[] = {
&class_device_attr_lpfc_multi_ring_type,
&class_device_attr_lpfc_fdmi_on,
&class_device_attr_lpfc_max_luns,
&class_device_attr_lpfc_npiv_enable,
&class_device_attr_lpfc_enable_npiv,
&class_device_attr_nport_evt_cnt,
&class_device_attr_management_version,
&class_device_attr_board_mode,
&class_device_attr_max_vpi,
&class_device_attr_used_vpi,
@ -1404,6 +1533,28 @@ struct class_device_attribute *lpfc_hba_attrs[] = {
NULL,
};
struct class_device_attribute *lpfc_vport_attrs[] = {
&class_device_attr_info,
&class_device_attr_state,
&class_device_attr_num_discovered_ports,
&class_device_attr_lpfc_drvr_version,
&class_device_attr_lpfc_log_verbose,
&class_device_attr_lpfc_lun_queue_depth,
&class_device_attr_lpfc_nodev_tmo,
&class_device_attr_lpfc_devloss_tmo,
&class_device_attr_lpfc_hba_queue_depth,
&class_device_attr_lpfc_peer_port_login,
&class_device_attr_lpfc_restrict_login,
&class_device_attr_lpfc_fcp_class,
&class_device_attr_lpfc_use_adisc,
&class_device_attr_lpfc_fdmi_on,
&class_device_attr_lpfc_max_luns,
&class_device_attr_nport_evt_cnt,
&class_device_attr_npiv_info,
NULL,
};
static ssize_t
sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
@ -2243,7 +2394,6 @@ struct fc_function_template lpfc_vport_transport_functions = {
.get_starget_port_name = lpfc_get_starget_port_name,
.show_starget_port_name = 1,
.issue_fc_host_lip = lpfc_issue_lip,
.dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
.terminate_rport_io = lpfc_terminate_rport_io,
@ -2253,39 +2403,25 @@ struct fc_function_template lpfc_vport_transport_functions = {
void
lpfc_get_cfgparam(struct lpfc_hba *phba)
{
lpfc_log_verbose_init(phba, lpfc_log_verbose);
lpfc_cr_delay_init(phba, lpfc_cr_delay);
lpfc_cr_count_init(phba, lpfc_cr_count);
lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl);
lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type);
lpfc_lun_queue_depth_init(phba, lpfc_lun_queue_depth);
lpfc_fcp_class_init(phba, lpfc_fcp_class);
lpfc_use_adisc_init(phba, lpfc_use_adisc);
lpfc_ack0_init(phba, lpfc_ack0);
lpfc_topology_init(phba, lpfc_topology);
lpfc_scan_down_init(phba, lpfc_scan_down);
lpfc_link_speed_init(phba, lpfc_link_speed);
lpfc_fdmi_on_init(phba, lpfc_fdmi_on);
lpfc_discovery_threads_init(phba, lpfc_discovery_threads);
lpfc_max_luns_init(phba, lpfc_max_luns);
lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
lpfc_peer_port_login_init(phba, lpfc_peer_port_login);
lpfc_npiv_enable_init(phba, lpfc_npiv_enable);
lpfc_vport_restrict_login_init(phba, lpfc_vport_restrict_login);
lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
lpfc_use_msi_init(phba, lpfc_use_msi);
lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo);
lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo);
phba->cfg_poll = lpfc_poll;
phba->cfg_soft_wwnn = 0L;
phba->cfg_soft_wwpn = 0L;
/*
* The total number of segments is the configuration value plus 2
* since the IOCB need a command and response bde.
*/
phba->cfg_sg_seg_cnt = LPFC_SG_SEG_CNT + 2;
/*
* Since the sg_tablesize is module parameter, the sg_dma_buf_size
* used to create the sg_dma_buf_pool must be dynamically calculated
@ -2293,9 +2429,24 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp) +
(phba->cfg_sg_seg_cnt * sizeof(struct ulp_bde64));
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
return;
}
void
lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
{
lpfc_log_verbose_init(vport, lpfc_log_verbose);
lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth);
lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo);
lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo);
lpfc_peer_port_login_init(vport, lpfc_peer_port_login);
lpfc_restrict_login_init(vport, lpfc_restrict_login);
lpfc_fcp_class_init(vport, lpfc_fcp_class);
lpfc_use_adisc_init(vport, lpfc_use_adisc);
lpfc_fdmi_on_init(vport, lpfc_fdmi_on);
lpfc_discovery_threads_init(vport, lpfc_discovery_threads);
lpfc_max_luns_init(vport, lpfc_max_luns);
lpfc_scan_down_init(vport, lpfc_scan_down);
return;
}

View file

@ -40,6 +40,7 @@ void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
void lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove);
int lpfc_linkdown(struct lpfc_hba *);
void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
@ -101,7 +102,7 @@ int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
struct lpfc_nodelist *, LPFC_MBOXQ_t *, uint8_t);
struct lpfc_nodelist *, LPFC_MBOXQ_t *);
int lpfc_els_rsp_reject(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
struct lpfc_nodelist *, LPFC_MBOXQ_t *);
int lpfc_els_rsp_adisc_acc(struct lpfc_vport *, struct lpfc_iocbq *,
@ -117,6 +118,7 @@ void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
int lpfc_els_handle_rscn(struct lpfc_vport *);
void lpfc_els_flush_rscn(struct lpfc_vport *);
int lpfc_rscn_payload_check(struct lpfc_vport *, uint32_t);
void lpfc_els_flush_all_cmd(struct lpfc_hba *);
void lpfc_els_flush_cmd(struct lpfc_vport *);
int lpfc_els_disc_adisc(struct lpfc_vport *);
int lpfc_els_disc_plogi(struct lpfc_vport *);
@ -161,9 +163,11 @@ LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
void lpfc_config_hbq(struct lpfc_hba *, struct lpfc_hbq_init *, uint32_t ,
LPFC_MBOXQ_t *);
void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
uint32_t , LPFC_MBOXQ_t *);
struct lpfc_hbq_entry * lpfc_sli_next_hbq_slot(struct lpfc_hba *, uint32_t);
struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *);
void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *);
int lpfc_mem_alloc(struct lpfc_hba *);
void lpfc_mem_free(struct lpfc_hba *);
@ -200,6 +204,7 @@ int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
struct lpfc_sli_ring *,
dma_addr_t);
int lpfc_sli_hbq_count(void);
int lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *, uint32_t);
int lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *, uint32_t);
void lpfc_sli_hbqbuf_free_all(struct lpfc_hba *);
@ -207,10 +212,9 @@ struct hbq_dmabuf *lpfc_sli_hbqbuf_find(struct lpfc_hba *, uint32_t);
int lpfc_sli_hbq_size(void);
int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
int lpfc_sli_sum_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
uint64_t, lpfc_ctx_cmd);
int lpfc_sli_abort_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
uint64_t, uint32_t, lpfc_ctx_cmd);
int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd);
int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t,
uint64_t, lpfc_ctx_cmd);
void lpfc_mbox_timeout(unsigned long);
void lpfc_mbox_timeout_handler(struct lpfc_hba *);
@ -234,8 +238,6 @@ void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba,
struct lpfc_iocbq * cmdiocb,
struct lpfc_iocbq * rspiocb);
void *lpfc_hbq_alloc(struct lpfc_hba *, int, dma_addr_t *);
void lpfc_hbq_free(struct lpfc_hba *, void *, dma_addr_t);
void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
void *lpfc_mbuf_alloc(struct lpfc_hba *, int, dma_addr_t *);
@ -248,10 +250,13 @@ const char* lpfc_info(struct Scsi_Host *);
int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
void lpfc_get_cfgparam(struct lpfc_hba *);
void lpfc_get_vport_cfgparam(struct lpfc_vport *);
int lpfc_alloc_sysfs_attr(struct lpfc_vport *);
void lpfc_free_sysfs_attr(struct lpfc_vport *);
extern struct class_device_attribute *lpfc_hba_attrs[];
extern struct class_device_attribute *lpfc_vport_attrs[];
extern struct scsi_host_template lpfc_template;
extern struct scsi_host_template lpfc_vport_template;
extern struct fc_function_template lpfc_transport_functions;
extern struct fc_function_template lpfc_vport_transport_functions;
extern int lpfc_sli_mode;
@ -260,7 +265,7 @@ int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
void lpfc_terminate_rport_io(struct fc_rport *);
void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);
struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int, struct fc_vport *);
struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int, struct device *);
int lpfc_vport_disable(struct fc_vport *fc_vport, bool disable);
void lpfc_mbx_unreg_vpi(struct lpfc_vport *);
void destroy_port(struct lpfc_vport *);
@ -271,6 +276,9 @@ extern void lpfc_debugfs_initialize(struct lpfc_vport *);
extern void lpfc_debugfs_terminate(struct lpfc_vport *);
extern void lpfc_debugfs_disc_trc(struct lpfc_vport *, int, char *, uint32_t,
uint32_t, uint32_t);
extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t,
uint32_t, uint32_t);
extern struct lpfc_hbq_init *lpfc_hbq_defs[];
/* Interface exported by fabric iocb scheduler */
int lpfc_issue_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *);

View file

@ -257,6 +257,10 @@ lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb)
{
struct lpfc_dmabuf *buf_ptr;
if (ctiocb->context_un.ndlp) {
lpfc_nlp_put(ctiocb->context_un.ndlp);
ctiocb->context_un.ndlp = NULL;
}
if (ctiocb->context1) {
buf_ptr = (struct lpfc_dmabuf *) ctiocb->context1;
lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
@ -314,6 +318,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
/* Save for completion so we can release these resources */
geniocb->context1 = (uint8_t *) inp;
geniocb->context2 = (uint8_t *) outp;
geniocb->context_un.ndlp = ndlp;
/* Fill in payload, bp points to frame payload */
icmd->ulpCommand = CMD_GEN_REQUEST64_CR;
@ -341,11 +346,11 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
}
/* Issue GEN REQ IOCB for NPORT <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d (%d):0119 Issue GEN REQ IOCB to NPORT x%x "
"Data: x%x x%x\n", phba->brd_no, vport->vpi,
ndlp->nlp_DID, icmd->ulpIoTag,
vport->port_state);
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0119 Issue GEN REQ IOCB to NPORT x%x "
"Data: x%x x%x\n",
ndlp->nlp_DID, icmd->ulpIoTag,
vport->port_state);
geniocb->iocb_cmpl = cmpl;
geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
geniocb->vport = vport;
@ -390,17 +395,19 @@ lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp,
return 0;
}
static struct lpfc_vport *
struct lpfc_vport *
lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) {
struct lpfc_vport *vport_curr;
unsigned long flags;
spin_lock_irqsave(&phba->hbalock, flags);
list_for_each_entry(vport_curr, &phba->port_list, listentry) {
if ((vport_curr->fc_myDID) &&
(vport_curr->fc_myDID == did))
if ((vport_curr->fc_myDID) && (vport_curr->fc_myDID == did)) {
spin_unlock_irqrestore(&phba->hbalock, flags);
return vport_curr;
}
}
spin_unlock_irqrestore(&phba->hbalock, flags);
return NULL;
}
@ -449,10 +456,10 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
*/
if ((Did != vport->fc_myDID) &&
((lpfc_find_vport_by_did(phba, Did) == NULL) ||
phba->cfg_peer_port_login)) {
vport->cfg_peer_port_login)) {
if ((vport->port_type != LPFC_NPIV_PORT) ||
(vport->fc_flag & FC_RFF_NOT_SUPPORTED) ||
(!phba->cfg_vport_restrict_login)) {
(!vport->cfg_restrict_login)) {
ndlp = lpfc_setup_disc_node(vport, Did);
if (ndlp) {
lpfc_debugfs_disc_trc(vport,
@ -462,14 +469,13 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
Did, ndlp->nlp_flag,
vport->fc_flag);
lpfc_printf_log(phba, KERN_INFO,
lpfc_printf_vlog(vport,
KERN_INFO,
LOG_DISCOVERY,
"%d (%d):0238 Process "
"0238 Process "
"x%x NameServer Rsp"
"Data: x%x x%x x%x\n",
phba->brd_no,
vport->vpi, Did,
ndlp->nlp_flag,
Did, ndlp->nlp_flag,
vport->fc_flag,
vport->fc_rscn_id_cnt);
} else {
@ -480,14 +486,13 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
Did, vport->fc_flag,
vport->fc_rscn_id_cnt);
lpfc_printf_log(phba, KERN_INFO,
lpfc_printf_vlog(vport,
KERN_INFO,
LOG_DISCOVERY,
"%d (%d):0239 Skip x%x "
"0239 Skip x%x "
"NameServer Rsp Data: "
"x%x x%x\n",
phba->brd_no,
vport->vpi, Did,
vport->fc_flag,
Did, vport->fc_flag,
vport->fc_rscn_id_cnt);
}
@ -514,14 +519,13 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
Did, vport->fc_flag,
vport->fc_rscn_id_cnt);
lpfc_printf_log(phba, KERN_INFO,
lpfc_printf_vlog(vport,
KERN_INFO,
LOG_DISCOVERY,
"%d (%d):0245 Skip x%x "
"0245 Skip x%x "
"NameServer Rsp Data: "
"x%x x%x\n",
phba->brd_no,
vport->vpi, Did,
vport->fc_flag,
Did, vport->fc_flag,
vport->fc_rscn_id_cnt);
}
}
@ -549,8 +553,12 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_dmabuf *bmp;
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
struct lpfc_nodelist *ndlp;
int rc;
/* First save ndlp, before we overwrite it */
ndlp = cmdiocb->context_un.ndlp;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
@ -568,9 +576,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (lpfc_els_chk_latt(vport) || lpfc_error_lost_link(irsp)) {
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d (%d):0216 Link event during NS query\n",
phba->brd_no, vport->vpi);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0216 Link event during NS query\n");
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
goto out;
}
@ -588,46 +595,61 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0257 GID_FT Query error: 0x%x 0x%x\n",
phba->brd_no, vport->vpi, irsp->ulpStatus,
vport->fc_ns_retry);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0257 GID_FT Query error: 0x%x 0x%x\n",
irsp->ulpStatus, vport->fc_ns_retry);
} else {
/* Good status, continue checking */
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d (%d):0208 NameServer Rsp "
"Data: x%x\n",
phba->brd_no, vport->vpi,
vport->fc_flag);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0208 NameServer Rsp Data: x%x\n",
vport->fc_flag);
lpfc_ns_rsp(vport, outp,
(uint32_t) (irsp->un.genreq64.bdl.bdeSize));
} else if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
/* NameServer Rsp Error */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d (%d):0240 NameServer Rsp Error "
if ((CTrsp->ReasonCode == SLI_CT_UNABLE_TO_PERFORM_REQ)
&& (CTrsp->Explanation == SLI_CT_NO_FC4_TYPES)) {
lpfc_printf_vlog(vport, KERN_INFO,
LOG_DISCOVERY,
"0269 No NameServer Entries "
"Data: x%x x%x x%x x%x\n",
phba->brd_no, vport->vpi,
CTrsp->CommandResponse.bits.CmdRsp,
(uint32_t) CTrsp->ReasonCode,
(uint32_t) CTrsp->Explanation,
vport->fc_flag);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"GID_FT no entry cmd:x%x rsn:x%x exp:x%x",
(uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
(uint32_t) CTrsp->ReasonCode,
(uint32_t) CTrsp->Explanation);
} else {
lpfc_printf_vlog(vport, KERN_INFO,
LOG_DISCOVERY,
"0240 NameServer Rsp Error "
"Data: x%x x%x x%x x%x\n",
CTrsp->CommandResponse.bits.CmdRsp,
(uint32_t) CTrsp->ReasonCode,
(uint32_t) CTrsp->Explanation,
vport->fc_flag);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"GID_FT rsp err1 cmd:x%x rsn:x%x exp:x%x",
(uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
(uint32_t) CTrsp->ReasonCode,
(uint32_t) CTrsp->Explanation);
}
} else {
/* NameServer Rsp Error */
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d (%d):0241 NameServer Rsp Error "
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0241 NameServer Rsp Error "
"Data: x%x x%x x%x x%x\n",
phba->brd_no, vport->vpi,
CTrsp->CommandResponse.bits.CmdRsp,
(uint32_t) CTrsp->ReasonCode,
(uint32_t) CTrsp->Explanation,
@ -661,11 +683,12 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_disc_start(vport);
}
out:
cmdiocb->context_un.ndlp = ndlp; /* Now restore ndlp for free */
lpfc_ct_free_iocb(phba, cmdiocb);
return;
}
void
static void
lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
@ -695,40 +718,37 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
if ((fbits & FC4_FEATURE_INIT) &&
!(fbits & FC4_FEATURE_TARGET)) {
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d (%d):0245 Skip x%x GFF "
"NameServer Rsp Data: (init) "
"x%x x%x\n", phba->brd_no,
vport->vpi, did, fbits,
vport->fc_rscn_id_cnt);
lpfc_printf_vlog(vport, KERN_INFO,
LOG_DISCOVERY,
"0270 Skip x%x GFF "
"NameServer Rsp Data: (init) "
"x%x x%x\n", did, fbits,
vport->fc_rscn_id_cnt);
goto out;
}
}
}
else {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d (%d):0267 NameServer GFF Rsp"
" x%x Error (%d %d) Data: x%x x%x\n",
phba->brd_no, vport->vpi, did,
irsp->ulpStatus, irsp->un.ulpWord[4],
vport->fc_flag, vport->fc_rscn_id_cnt)
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0267 NameServer GFF Rsp "
"x%x Error (%d %d) Data: x%x x%x\n",
did, irsp->ulpStatus, irsp->un.ulpWord[4],
vport->fc_flag, vport->fc_rscn_id_cnt)
}
/* This is a target port, unregistered port, or the GFF_ID failed */
ndlp = lpfc_setup_disc_node(vport, did);
if (ndlp) {
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d (%d):0242 Process x%x GFF "
"NameServer Rsp Data: x%x x%x x%x\n",
phba->brd_no, vport->vpi,
did, ndlp->nlp_flag, vport->fc_flag,
vport->fc_rscn_id_cnt);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0242 Process x%x GFF "
"NameServer Rsp Data: x%x x%x x%x\n",
did, ndlp->nlp_flag, vport->fc_flag,
vport->fc_rscn_id_cnt);
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d (%d):0243 Skip x%x GFF "
"NameServer Rsp Data: x%x x%x\n",
phba->brd_no, vport->vpi, did,
vport->fc_flag, vport->fc_rscn_id_cnt);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0243 Skip x%x GFF "
"NameServer Rsp Data: x%x x%x\n", did,
vport->fc_flag, vport->fc_rscn_id_cnt);
}
out:
/* Link up / RSCN discovery */
@ -766,10 +786,14 @@ lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_dmabuf *outp;
IOCB_t *irsp;
struct lpfc_sli_ct_request *CTrsp;
struct lpfc_nodelist *ndlp;
int cmdcode, rc;
uint8_t retry;
uint32_t latt;
/* First save ndlp, before we overwrite it */
ndlp = cmdiocb->context_un.ndlp;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
@ -784,22 +808,21 @@ lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
latt = lpfc_els_chk_latt(vport);
/* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d (%d):0209 RFT request completes, latt %d, "
"ulpStatus x%x CmdRsp x%x, Context x%x, Tag x%x\n",
phba->brd_no, vport->vpi, latt, irsp->ulpStatus,
CTrsp->CommandResponse.bits.CmdRsp,
cmdiocb->iocb.ulpContext, cmdiocb->iocb.ulpIoTag);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0209 RFT request completes, latt %d, "
"ulpStatus x%x CmdRsp x%x, Context x%x, Tag x%x\n",
latt, irsp->ulpStatus,
CTrsp->CommandResponse.bits.CmdRsp,
cmdiocb->iocb.ulpContext, cmdiocb->iocb.ulpIoTag);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"CT cmd cmpl: status:x%x/x%x cmd:x%x",
irsp->ulpStatus, irsp->un.ulpWord[4], cmdcode);
if (irsp->ulpStatus) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d (%d):0268 NS cmd %x Error (%d %d)\n",
phba->brd_no, vport->vpi, cmdcode,
irsp->ulpStatus, irsp->un.ulpWord[4]);
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0268 NS cmd %x Error (%d %d)\n",
cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]);
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) ||
@ -811,15 +834,15 @@ lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
retry++;
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d (%d):0216 Retrying NS cmd %x\n",
phba->brd_no, vport->vpi, cmdcode);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0216 Retrying NS cmd %x\n", cmdcode);
rc = lpfc_ns_cmd(vport, cmdcode, retry, 0);
if (rc == 0)
goto out;
}
out:
cmdiocb->context_un.ndlp = ndlp; /* Now restore ndlp for free */
lpfc_ct_free_iocb(phba, cmdiocb);
return;
}
@ -862,7 +885,7 @@ lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return;
}
int
static int
lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
size_t size)
{
@ -957,10 +980,9 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
}
/* NameServer Req */
lpfc_printf_log(phba, KERN_INFO ,LOG_DISCOVERY,
"%d (%d):0236 NameServer Req Data: x%x x%x x%x\n",
phba->brd_no, vport->vpi, cmdcode, vport->fc_flag,
vport->fc_rscn_id_cnt);
lpfc_printf_vlog(vport, KERN_INFO ,LOG_DISCOVERY,
"0236 NameServer Req Data: x%x x%x x%x\n",
cmdcode, vport->fc_flag, vport->fc_rscn_id_cnt);
bpl = (struct ulp_bde64 *) bmp->virt;
memset(bpl, 0, sizeof(struct ulp_bde64));
@ -1059,6 +1081,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
cmpl = lpfc_cmpl_ct_cmd_rff_id;
break;
}
lpfc_nlp_get(ndlp);
if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) {
/* On success, The cmpl function will free the buffers */
@ -1069,6 +1092,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
}
rc=6;
lpfc_nlp_put(ndlp);
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
ns_cmd_free_bmp:
kfree(bmp);
@ -1077,10 +1101,9 @@ ns_cmd_free_mpvirt:
ns_cmd_free_mp:
kfree(mp);
ns_cmd_exit:
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d (%d):0266 Issue NameServer Req x%x err %d Data: x%x x%x\n",
phba->brd_no, vport->vpi, cmdcode, rc, vport->fc_flag,
vport->fc_rscn_id_cnt);
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0266 Issue NameServer Req x%x err %d Data: x%x x%x\n",
cmdcode, rc, vport->fc_flag, vport->fc_rscn_id_cnt);
return 1;
}
@ -1106,12 +1129,11 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, irsp->un.ulpWord[4], latt);
if (latt || irsp->ulpStatus) {
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d (%d):0229 FDMI cmd %04x failed, latt = %d "
"ulpStatus: x%x, rid x%x\n",
phba->brd_no, vport->vpi,
be16_to_cpu(fdmi_cmd), latt, irsp->ulpStatus,
irsp->un.ulpWord[4]);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0229 FDMI cmd %04x failed, latt = %d "
"ulpStatus: x%x, rid x%x\n",
be16_to_cpu(fdmi_cmd), latt, irsp->ulpStatus,
irsp->un.ulpWord[4]);
lpfc_ct_free_iocb(phba, cmdiocb);
return;
}
@ -1119,10 +1141,9 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp = lpfc_findnode_did(vport, FDMI_DID);
if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
/* FDMI rsp failed */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d (%d):0220 FDMI rsp failed Data: x%x\n",
phba->brd_no, vport->vpi,
be16_to_cpu(fdmi_cmd));
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0220 FDMI rsp failed Data: x%x\n",
be16_to_cpu(fdmi_cmd));
}
switch (be16_to_cpu(fdmi_cmd)) {
@ -1185,11 +1206,9 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
INIT_LIST_HEAD(&bmp->list);
/* FDMI request */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d (%d):0218 FDMI Request Data: x%x x%x x%x\n",
phba->brd_no, vport->vpi, vport->fc_flag,
vport->port_state, cmdcode);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0218 FDMI Request Data: x%x x%x x%x\n",
vport->fc_flag, vport->port_state, cmdcode);
CtReq = (struct lpfc_sli_ct_request *) mp->virt;
memset(CtReq, 0, sizeof(struct lpfc_sli_ct_request));
@ -1449,7 +1468,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
pab->ab.EntryCnt++;
size += FOURBYTES + len;
if (phba->cfg_fdmi_on == 2) {
if (vport->cfg_fdmi_on == 2) {
/* #6 Port attribute entry */
ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab +
size);
@ -1499,10 +1518,12 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
bpl->tus.w = le32_to_cpu(bpl->tus.w);
cmpl = lpfc_cmpl_ct_cmd_fdmi;
lpfc_nlp_get(ndlp);
if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP, 0))
return 0;
lpfc_nlp_put(ndlp);
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
fdmi_cmd_free_bmp:
kfree(bmp);
@ -1512,9 +1533,9 @@ fdmi_cmd_free_mp:
kfree(mp);
fdmi_cmd_exit:
/* Issue FDMI request failed */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d (%d):0244 Issue FDMI request failed Data: x%x\n",
phba->brd_no, vport->vpi, cmdcode);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0244 Issue FDMI request failed Data: x%x\n",
cmdcode);
return 1;
}

View file

@ -71,15 +71,22 @@
* lpfc_debugfs_mask_disc_trc=Y Where Y is an event mask as defined in
* lpfc_debugfs.h .
*/
static int lpfc_debugfs_enable = 0;
static int lpfc_debugfs_enable = 1;
module_param(lpfc_debugfs_enable, int, 0);
MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services");
static int lpfc_debugfs_max_disc_trc = 0; /* This MUST be a power of 2 */
/* This MUST be a power of 2 */
static int lpfc_debugfs_max_disc_trc = 0;
module_param(lpfc_debugfs_max_disc_trc, int, 0);
MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc,
"Set debugfs discovery trace depth");
/* This MUST be a power of 2 */
static int lpfc_debugfs_max_slow_ring_trc = 0;
module_param(lpfc_debugfs_max_slow_ring_trc, int, 0);
MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc,
"Set debugfs slow ring trace depth");
static int lpfc_debugfs_mask_disc_trc = 0;
module_param(lpfc_debugfs_mask_disc_trc, int, 0);
MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
@ -87,28 +94,34 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
#include <linux/debugfs.h>
/* size of discovery_trace output line */
#define LPFC_DISC_TRC_ENTRY_SIZE 80
/* size of output line, for discovery_trace and slow_ring_trace */
#define LPFC_DEBUG_TRC_ENTRY_SIZE 100
/* nodelist output buffer size */
#define LPFC_NODELIST_SIZE 8192
#define LPFC_NODELIST_ENTRY_SIZE 120
/* dumpslim output buffer size */
#define LPFC_DUMPSLIM_SIZE 4096
/* hbqinfo output buffer size */
#define LPFC_HBQINFO_SIZE 8192
struct lpfc_debug {
char *buffer;
int len;
};
atomic_t lpfc_debugfs_disc_trc_cnt = ATOMIC_INIT(0);
unsigned long lpfc_debugfs_start_time = 0L;
static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
static unsigned long lpfc_debugfs_start_time = 0L;
static int
lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
{
int i, index, len, enable;
uint32_t ms;
struct lpfc_disc_trc *dtp;
char buffer[80];
struct lpfc_debugfs_trc *dtp;
char buffer[LPFC_DEBUG_TRC_ENTRY_SIZE];
enable = lpfc_debugfs_enable;
@ -122,7 +135,8 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
if (!dtp->fmt)
continue;
ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
snprintf(buffer, 80, "%010d:%010d ms:%s\n",
snprintf(buffer,
LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
dtp->seq_cnt, ms, dtp->fmt);
len += snprintf(buf+len, size-len, buffer,
dtp->data1, dtp->data2, dtp->data3);
@ -132,7 +146,8 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
if (!dtp->fmt)
continue;
ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
snprintf(buffer, 80, "%010d:%010d ms:%s\n",
snprintf(buffer,
LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
dtp->seq_cnt, ms, dtp->fmt);
len += snprintf(buf+len, size-len, buffer,
dtp->data1, dtp->data2, dtp->data3);
@ -142,6 +157,236 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
return len;
}
static int
lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
{
int i, index, len, enable;
uint32_t ms;
struct lpfc_debugfs_trc *dtp;
char buffer[LPFC_DEBUG_TRC_ENTRY_SIZE];
enable = lpfc_debugfs_enable;
lpfc_debugfs_enable = 0;
len = 0;
index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
(lpfc_debugfs_max_slow_ring_trc - 1);
for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
dtp = phba->slow_ring_trc + i;
if (!dtp->fmt)
continue;
ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
snprintf(buffer,
LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
dtp->seq_cnt, ms, dtp->fmt);
len += snprintf(buf+len, size-len, buffer,
dtp->data1, dtp->data2, dtp->data3);
}
for (i = 0; i < index; i++) {
dtp = phba->slow_ring_trc + i;
if (!dtp->fmt)
continue;
ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
snprintf(buffer,
LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
dtp->seq_cnt, ms, dtp->fmt);
len += snprintf(buf+len, size-len, buffer,
dtp->data1, dtp->data2, dtp->data3);
}
lpfc_debugfs_enable = enable;
return len;
}
static int lpfc_debugfs_last_hbq = -1;
static int
lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
{
int len = 0;
int cnt, i, j, found, posted, low;
uint32_t phys, raw_index, getidx;
struct lpfc_hbq_init *hip;
struct hbq_s *hbqs;
struct lpfc_hbq_entry *hbqe;
struct lpfc_dmabuf *d_buf;
struct hbq_dmabuf *hbq_buf;
cnt = LPFC_HBQINFO_SIZE;
spin_lock_irq(&phba->hbalock);
/* toggle between multiple hbqs, if any */
i = lpfc_sli_hbq_count();
if (i > 1) {
lpfc_debugfs_last_hbq++;
if (lpfc_debugfs_last_hbq >= i)
lpfc_debugfs_last_hbq = 0;
}
else
lpfc_debugfs_last_hbq = 0;
i = lpfc_debugfs_last_hbq;
len += snprintf(buf+len, size-len, "HBQ %d Info\n", i);
hbqs = &phba->hbqs[i];
posted = 0;
list_for_each_entry(d_buf, &hbqs->hbq_buffer_list, list)
posted++;
hip = lpfc_hbq_defs[i];
len += snprintf(buf+len, size-len,
"idx:%d prof:%d rn:%d bufcnt:%d icnt:%d acnt:%d posted %d\n",
hip->hbq_index, hip->profile, hip->rn,
hip->buffer_count, hip->init_count, hip->add_count, posted);
raw_index = phba->hbq_get[i];
getidx = le32_to_cpu(raw_index);
len += snprintf(buf+len, size-len,
"entrys:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n",
hbqs->entry_count, hbqs->hbqPutIdx, hbqs->next_hbqPutIdx,
hbqs->local_hbqGetIdx, getidx);
hbqe = (struct lpfc_hbq_entry *) phba->hbqs[i].hbq_virt;
for (j=0; j<hbqs->entry_count; j++) {
len += snprintf(buf+len, size-len,
"%03d: %08x %04x %05x ", j,
hbqe->bde.addrLow, hbqe->bde.tus.w, hbqe->buffer_tag);
i = 0;
found = 0;
/* First calculate if slot has an associated posted buffer */
low = hbqs->hbqPutIdx - posted;
if (low >= 0) {
if ((j >= hbqs->hbqPutIdx) || (j < low)) {
len += snprintf(buf+len, size-len, "Unused\n");
goto skipit;
}
}
else {
if ((j >= hbqs->hbqPutIdx) &&
(j < (hbqs->entry_count+low))) {
len += snprintf(buf+len, size-len, "Unused\n");
goto skipit;
}
}
/* Get the Buffer info for the posted buffer */
list_for_each_entry(d_buf, &hbqs->hbq_buffer_list, list) {
hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff);
if (phys == hbqe->bde.addrLow) {
len += snprintf(buf+len, size-len,
"Buf%d: %p %06x\n", i,
hbq_buf->dbuf.virt, hbq_buf->tag);
found = 1;
break;
}
i++;
}
if (!found) {
len += snprintf(buf+len, size-len, "No DMAinfo?\n");
}
skipit:
hbqe++;
if (len > LPFC_HBQINFO_SIZE - 54)
break;
}
spin_unlock_irq(&phba->hbalock);
return len;
}
static int
lpfc_debugfs_dumpslim_data(struct lpfc_hba *phba, char *buf, int size)
{
int len = 0;
int cnt, i, off;
uint32_t word0, word1, word2, word3;
uint32_t *ptr;
struct lpfc_pgp *pgpp;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
cnt = LPFC_DUMPSLIM_SIZE;
off = 0;
spin_lock_irq(&phba->hbalock);
len += snprintf(buf+len, size-len, "SLIM Mailbox\n");
ptr = (uint32_t *)phba->slim2p;
i = sizeof(MAILBOX_t);
while (i > 0) {
len += snprintf(buf+len, size-len,
"%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
*(ptr+5), *(ptr+6), *(ptr+7));
ptr += 8;
i -= (8 * sizeof(uint32_t));
off += (8 * sizeof(uint32_t));
}
len += snprintf(buf+len, size-len, "SLIM PCB\n");
ptr = (uint32_t *)&phba->slim2p->pcb;
i = sizeof(PCB_t);
while (i > 0) {
len += snprintf(buf+len, size-len,
"%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
*(ptr+5), *(ptr+6), *(ptr+7));
ptr += 8;
i -= (8 * sizeof(uint32_t));
off += (8 * sizeof(uint32_t));
}
pgpp = (struct lpfc_pgp *)&phba->slim2p->mbx.us.s3_pgp.port;
pring = &psli->ring[0];
len += snprintf(buf+len, size-len,
"Ring 0: CMD GetInx:%d (Max:%d Next:%d Local:%d flg:x%x) "
"RSP PutInx:%d Max:%d\n",
pgpp->cmdGetInx, pring->numCiocb,
pring->next_cmdidx, pring->local_getidx, pring->flag,
pgpp->rspPutInx, pring->numRiocb);
pgpp++;
pring = &psli->ring[1];
len += snprintf(buf+len, size-len,
"Ring 1: CMD GetInx:%d (Max:%d Next:%d Local:%d flg:x%x) "
"RSP PutInx:%d Max:%d\n",
pgpp->cmdGetInx, pring->numCiocb,
pring->next_cmdidx, pring->local_getidx, pring->flag,
pgpp->rspPutInx, pring->numRiocb);
pgpp++;
pring = &psli->ring[2];
len += snprintf(buf+len, size-len,
"Ring 2: CMD GetInx:%d (Max:%d Next:%d Local:%d flg:x%x) "
"RSP PutInx:%d Max:%d\n",
pgpp->cmdGetInx, pring->numCiocb,
pring->next_cmdidx, pring->local_getidx, pring->flag,
pgpp->rspPutInx, pring->numRiocb);
pgpp++;
pring = &psli->ring[3];
len += snprintf(buf+len, size-len,
"Ring 3: CMD GetInx:%d (Max:%d Next:%d Local:%d flg:x%x) "
"RSP PutInx:%d Max:%d\n",
pgpp->cmdGetInx, pring->numCiocb,
pring->next_cmdidx, pring->local_getidx, pring->flag,
pgpp->rspPutInx, pring->numRiocb);
ptr = (uint32_t *)&phba->slim2p->mbx.us.s3_pgp.hbq_get;
word0 = readl(phba->HAregaddr);
word1 = readl(phba->CAregaddr);
word2 = readl(phba->HSregaddr);
word3 = readl(phba->HCregaddr);
len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x HC:%08x\n",
word0, word1, word2, word3);
spin_unlock_irq(&phba->hbalock);
return len;
}
static int
lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
{
@ -204,7 +449,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
len += snprintf(buf+len, size-len, "RPI:%03d flag:x%08x ",
ndlp->nlp_rpi, ndlp->nlp_flag);
if (!ndlp->nlp_type)
len += snprintf(buf+len, size-len, "UNKNOWN_TYPE");
len += snprintf(buf+len, size-len, "UNKNOWN_TYPE ");
if (ndlp->nlp_type & NLP_FC_NODE)
len += snprintf(buf+len, size-len, "FC_NODE ");
if (ndlp->nlp_type & NLP_FABRIC)
@ -213,7 +458,9 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
len += snprintf(buf+len, size-len, "FCP_TGT sid:%d ",
ndlp->nlp_sid);
if (ndlp->nlp_type & NLP_FCP_INITIATOR)
len += snprintf(buf+len, size-len, "FCP_INITIATOR");
len += snprintf(buf+len, size-len, "FCP_INITIATOR ");
len += snprintf(buf+len, size-len, "refcnt:%x",
atomic_read(&ndlp->kref.refcount));
len += snprintf(buf+len, size-len, "\n");
}
spin_unlock_irq(shost->host_lock);
@ -227,7 +474,7 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
uint32_t data1, uint32_t data2, uint32_t data3)
{
#ifdef CONFIG_LPFC_DEBUG_FS
struct lpfc_disc_trc *dtp;
struct lpfc_debugfs_trc *dtp;
int index;
if (!(lpfc_debugfs_mask_disc_trc & mask))
@ -244,7 +491,32 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
dtp->data1 = data1;
dtp->data2 = data2;
dtp->data3 = data3;
dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_disc_trc_cnt);
dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
dtp->jif = jiffies;
#endif
return;
}
inline void
lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
uint32_t data1, uint32_t data2, uint32_t data3)
{
#ifdef CONFIG_LPFC_DEBUG_FS
struct lpfc_debugfs_trc *dtp;
int index;
if (!lpfc_debugfs_enable || !lpfc_debugfs_max_slow_ring_trc ||
!phba || !phba->slow_ring_trc)
return;
index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
(lpfc_debugfs_max_slow_ring_trc - 1);
dtp = phba->slow_ring_trc + index;
dtp->fmt = fmt;
dtp->data1 = data1;
dtp->data2 = data2;
dtp->data3 = data3;
dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
dtp->jif = jiffies;
#endif
return;
@ -269,7 +541,7 @@ lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file)
goto out;
/* Round to page boundry */
size = (lpfc_debugfs_max_disc_trc * LPFC_DISC_TRC_ENTRY_SIZE);
size = (lpfc_debugfs_max_disc_trc * LPFC_DEBUG_TRC_ENTRY_SIZE);
size = PAGE_ALIGN(size);
debug->buffer = kmalloc(size, GFP_KERNEL);
@ -286,6 +558,95 @@ out:
return rc;
}
static int
lpfc_debugfs_slow_ring_trc_open(struct inode *inode, struct file *file)
{
struct lpfc_hba *phba = inode->i_private;
struct lpfc_debug *debug;
int size;
int rc = -ENOMEM;
if (!lpfc_debugfs_max_slow_ring_trc) {
rc = -ENOSPC;
goto out;
}
debug = kmalloc(sizeof(*debug), GFP_KERNEL);
if (!debug)
goto out;
/* Round to page boundry */
size = (lpfc_debugfs_max_slow_ring_trc * LPFC_DEBUG_TRC_ENTRY_SIZE);
size = PAGE_ALIGN(size);
debug->buffer = kmalloc(size, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
goto out;
}
debug->len = lpfc_debugfs_slow_ring_trc_data(phba, debug->buffer, size);
file->private_data = debug;
rc = 0;
out:
return rc;
}
static int
lpfc_debugfs_hbqinfo_open(struct inode *inode, struct file *file)
{
struct lpfc_hba *phba = inode->i_private;
struct lpfc_debug *debug;
int rc = -ENOMEM;
debug = kmalloc(sizeof(*debug), GFP_KERNEL);
if (!debug)
goto out;
/* Round to page boundry */
debug->buffer = kmalloc(LPFC_HBQINFO_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
goto out;
}
debug->len = lpfc_debugfs_hbqinfo_data(phba, debug->buffer,
LPFC_HBQINFO_SIZE);
file->private_data = debug;
rc = 0;
out:
return rc;
}
static int
lpfc_debugfs_dumpslim_open(struct inode *inode, struct file *file)
{
struct lpfc_hba *phba = inode->i_private;
struct lpfc_debug *debug;
int rc = -ENOMEM;
debug = kmalloc(sizeof(*debug), GFP_KERNEL);
if (!debug)
goto out;
/* Round to page boundry */
debug->buffer = kmalloc(LPFC_DUMPSLIM_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
goto out;
}
debug->len = lpfc_debugfs_dumpslim_data(phba, debug->buffer,
LPFC_DUMPSLIM_SIZE);
file->private_data = debug;
rc = 0;
out:
return rc;
}
static int
lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file)
{
@ -372,6 +733,33 @@ static struct file_operations lpfc_debugfs_op_nodelist = {
.release = lpfc_debugfs_release,
};
#undef lpfc_debugfs_op_hbqinfo
static struct file_operations lpfc_debugfs_op_hbqinfo = {
.owner = THIS_MODULE,
.open = lpfc_debugfs_hbqinfo_open,
.llseek = lpfc_debugfs_lseek,
.read = lpfc_debugfs_read,
.release = lpfc_debugfs_release,
};
#undef lpfc_debugfs_op_dumpslim
static struct file_operations lpfc_debugfs_op_dumpslim = {
.owner = THIS_MODULE,
.open = lpfc_debugfs_dumpslim_open,
.llseek = lpfc_debugfs_lseek,
.read = lpfc_debugfs_read,
.release = lpfc_debugfs_release,
};
#undef lpfc_debugfs_op_slow_ring_trc
static struct file_operations lpfc_debugfs_op_slow_ring_trc = {
.owner = THIS_MODULE,
.open = lpfc_debugfs_slow_ring_trc_open,
.llseek = lpfc_debugfs_lseek,
.read = lpfc_debugfs_read,
.release = lpfc_debugfs_release,
};
static struct dentry *lpfc_debugfs_root = NULL;
static atomic_t lpfc_debugfs_hba_count;
#endif
@ -387,6 +775,116 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
if (!lpfc_debugfs_enable)
return;
/* Setup lpfc root directory */
if (!lpfc_debugfs_root) {
lpfc_debugfs_root = debugfs_create_dir("lpfc", NULL);
atomic_set(&lpfc_debugfs_hba_count, 0);
if (!lpfc_debugfs_root) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0409 Cannot create debugfs root\n");
goto debug_failed;
}
}
if (!lpfc_debugfs_start_time)
lpfc_debugfs_start_time = jiffies;
/* Setup lpfcX directory for specific HBA */
snprintf(name, sizeof(name), "lpfc%d", phba->brd_no);
if (!phba->hba_debugfs_root) {
phba->hba_debugfs_root =
debugfs_create_dir(name, lpfc_debugfs_root);
if (!phba->hba_debugfs_root) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0409 Cannot create debugfs hba\n");
goto debug_failed;
}
atomic_inc(&lpfc_debugfs_hba_count);
atomic_set(&phba->debugfs_vport_count, 0);
/* Setup hbqinfo */
snprintf(name, sizeof(name), "hbqinfo");
phba->debug_hbqinfo =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_hbqinfo);
if (!phba->debug_hbqinfo) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0409 Cannot create debugfs hbqinfo\n");
goto debug_failed;
}
/* Setup dumpslim */
snprintf(name, sizeof(name), "dumpslim");
phba->debug_dumpslim =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_dumpslim);
if (!phba->debug_dumpslim) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0409 Cannot create debugfs dumpslim\n");
goto debug_failed;
}
/* Setup slow ring trace */
if (lpfc_debugfs_max_slow_ring_trc) {
num = lpfc_debugfs_max_slow_ring_trc - 1;
if (num & lpfc_debugfs_max_slow_ring_trc) {
/* Change to be a power of 2 */
num = lpfc_debugfs_max_slow_ring_trc;
i = 0;
while (num > 1) {
num = num >> 1;
i++;
}
lpfc_debugfs_max_slow_ring_trc = (1 << i);
printk(KERN_ERR
"lpfc_debugfs_max_disc_trc changed to "
"%d\n", lpfc_debugfs_max_disc_trc);
}
}
snprintf(name, sizeof(name), "slow_ring_trace");
phba->debug_slow_ring_trc =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_slow_ring_trc);
if (!phba->debug_slow_ring_trc) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0409 Cannot create debugfs "
"slow_ring_trace\n");
goto debug_failed;
}
if (!phba->slow_ring_trc) {
phba->slow_ring_trc = kmalloc(
(sizeof(struct lpfc_debugfs_trc) *
lpfc_debugfs_max_slow_ring_trc),
GFP_KERNEL);
if (!phba->slow_ring_trc) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0409 Cannot create debugfs "
"slow_ring buffer\n");
goto debug_failed;
}
atomic_set(&phba->slow_ring_trc_cnt, 0);
memset(phba->slow_ring_trc, 0,
(sizeof(struct lpfc_debugfs_trc) *
lpfc_debugfs_max_slow_ring_trc));
}
}
snprintf(name, sizeof(name), "vport%d", vport->vpi);
if (!vport->vport_debugfs_root) {
vport->vport_debugfs_root =
debugfs_create_dir(name, phba->hba_debugfs_root);
if (!vport->vport_debugfs_root) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0409 Cant create debugfs");
goto debug_failed;
}
atomic_inc(&phba->debugfs_vport_count);
}
if (lpfc_debugfs_max_disc_trc) {
num = lpfc_debugfs_max_disc_trc - 1;
if (num & lpfc_debugfs_max_disc_trc) {
@ -399,48 +897,24 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
}
lpfc_debugfs_max_disc_trc = (1 << i);
printk(KERN_ERR
"lpfc_debugfs_max_disc_trc changed to %d\n",
lpfc_debugfs_max_disc_trc);
"lpfc_debugfs_max_disc_trc changed to %d\n",
lpfc_debugfs_max_disc_trc);
}
}
if (!lpfc_debugfs_root) {
lpfc_debugfs_root = debugfs_create_dir("lpfc", NULL);
atomic_set(&lpfc_debugfs_hba_count, 0);
if (!lpfc_debugfs_root)
goto debug_failed;
}
snprintf(name, sizeof(name), "lpfc%d", phba->brd_no);
if (!phba->hba_debugfs_root) {
phba->hba_debugfs_root =
debugfs_create_dir(name, lpfc_debugfs_root);
if (!phba->hba_debugfs_root)
goto debug_failed;
atomic_inc(&lpfc_debugfs_hba_count);
atomic_set(&phba->debugfs_vport_count, 0);
}
snprintf(name, sizeof(name), "vport%d", vport->vpi);
if (!vport->vport_debugfs_root) {
vport->vport_debugfs_root =
debugfs_create_dir(name, phba->hba_debugfs_root);
if (!vport->vport_debugfs_root)
goto debug_failed;
atomic_inc(&phba->debugfs_vport_count);
}
if (!lpfc_debugfs_start_time)
lpfc_debugfs_start_time = jiffies;
vport->disc_trc = kmalloc(
(sizeof(struct lpfc_disc_trc) * lpfc_debugfs_max_disc_trc),
(sizeof(struct lpfc_debugfs_trc) * lpfc_debugfs_max_disc_trc),
GFP_KERNEL);
if (!vport->disc_trc)
if (!vport->disc_trc) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0409 Cannot create debugfs disc trace "
"buffer\n");
goto debug_failed;
}
atomic_set(&vport->disc_trc_cnt, 0);
memset(vport->disc_trc, 0,
(sizeof(struct lpfc_disc_trc) * lpfc_debugfs_max_disc_trc));
(sizeof(struct lpfc_debugfs_trc) * lpfc_debugfs_max_disc_trc));
snprintf(name, sizeof(name), "discovery_trace");
vport->debug_disc_trc =
@ -448,9 +922,9 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
vport->vport_debugfs_root,
vport, &lpfc_debugfs_op_disc_trc);
if (!vport->debug_disc_trc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0409 Cannot create debugfs",
phba->brd_no);
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0409 Cannot create debugfs "
"discovery_trace\n");
goto debug_failed;
}
snprintf(name, sizeof(name), "nodelist");
@ -459,9 +933,8 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
vport->vport_debugfs_root,
vport, &lpfc_debugfs_op_nodelist);
if (!vport->debug_nodelist) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0409 Cannot create debugfs",
phba->brd_no);
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0409 Cant create debugfs nodelist");
goto debug_failed;
}
debug_failed:
@ -488,21 +961,45 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(vport->debug_nodelist); /* nodelist */
vport->debug_nodelist = NULL;
}
if (vport->vport_debugfs_root) {
debugfs_remove(vport->vport_debugfs_root); /* vportX */
vport->vport_debugfs_root = NULL;
atomic_dec(&phba->debugfs_vport_count);
}
if (atomic_read(&phba->debugfs_vport_count) == 0) {
debugfs_remove(vport->phba->hba_debugfs_root); /* lpfcX */
vport->phba->hba_debugfs_root = NULL;
atomic_dec(&lpfc_debugfs_hba_count);
if (phba->debug_hbqinfo) {
debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */
phba->debug_hbqinfo = NULL;
}
if (phba->debug_dumpslim) {
debugfs_remove(phba->debug_dumpslim); /* dumpslim */
phba->debug_dumpslim = NULL;
}
if (phba->slow_ring_trc) {
kfree(phba->slow_ring_trc);
phba->slow_ring_trc = NULL;
}
if (phba->debug_slow_ring_trc) {
/* slow_ring_trace */
debugfs_remove(phba->debug_slow_ring_trc);
phba->debug_slow_ring_trc = NULL;
}
if (phba->hba_debugfs_root) {
debugfs_remove(phba->hba_debugfs_root); /* lpfcX */
phba->hba_debugfs_root = NULL;
atomic_dec(&lpfc_debugfs_hba_count);
}
if (atomic_read(&lpfc_debugfs_hba_count) == 0) {
debugfs_remove(lpfc_debugfs_root); /* lpfc */
lpfc_debugfs_root = NULL;
}
}
#endif
return;
}

View file

@ -22,7 +22,7 @@
#define _H_LPFC_DEBUG_FS
#ifdef CONFIG_LPFC_DEBUG_FS
struct lpfc_disc_trc {
struct lpfc_debugfs_trc {
char *fmt;
uint32_t data1;
uint32_t data2;

File diff suppressed because it is too large Load diff

View file

@ -83,10 +83,17 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
if (ndlp->nlp_sid != NLP_NO_SID) {
lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
lpfc_sli_abort_iocb(ndlp->vport,
&phba->sli.ring[phba->sli.fcp_ring],
ndlp->nlp_sid, 0, LPFC_CTX_TGT);
}
/*
* A device is normally blocked for rediscovery and unblocked when
* devloss timeout happens. In case a vport is removed or driver
* unloaded before devloss timeout happens, we need to unblock here.
*/
scsi_target_unblock(&rport->dev);
return;
}
@ -194,32 +201,30 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
if (ndlp->nlp_sid != NLP_NO_SID) {
warn_on = 1;
/* flush the target */
lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
ndlp->nlp_sid, 0, LPFC_CTX_TGT);
}
if (vport->load_flag & FC_UNLOADING)
warn_on = 0;
if (warn_on) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d (%d):0203 Devloss timeout on "
"WWPN %x:%x:%x:%x:%x:%x:%x:%x "
"NPort x%x Data: x%x x%x x%x\n",
phba->brd_no, vport->vpi,
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0203 Devloss timeout on "
"WWPN %x:%x:%x:%x:%x:%x:%x:%x "
"NPort x%x Data: x%x x%x x%x\n",
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d (%d):0204 Devloss timeout on "
"WWPN %x:%x:%x:%x:%x:%x:%x:%x "
"NPort x%x Data: x%x x%x x%x\n",
phba->brd_no, vport->vpi,
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0204 Devloss timeout on "
"WWPN %x:%x:%x:%x:%x:%x:%x:%x "
"NPort x%x Data: x%x x%x x%x\n",
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
}
if (!(vport->load_flag & FC_UNLOADING) &&
@ -344,12 +349,14 @@ lpfc_work_list_done(struct lpfc_hba *phba)
}
void
static void
lpfc_work_done(struct lpfc_hba *phba)
{
struct lpfc_sli_ring *pring;
uint32_t ha_copy, status, control, work_port_events;
struct lpfc_vport **vports;
struct lpfc_vport *vport;
int i;
spin_lock_irq(&phba->hbalock);
ha_copy = phba->work_ha;
@ -364,48 +371,41 @@ lpfc_work_done(struct lpfc_hba *phba)
if (ha_copy & HA_LATT)
lpfc_handle_latt(phba);
spin_lock_irq(&phba->hbalock);
list_for_each_entry(vport, &phba->port_list, listentry) {
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (!scsi_host_get(shost)) {
continue;
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for(i = 0; i < LPFC_MAX_VPORTS; i++) {
/*
* We could have no vports in array if unloading, so if
* this happens then just use the pport
*/
if (vports[i] == NULL && i == 0)
vport = phba->pport;
else
vport = vports[i];
if (vport == NULL)
break;
work_port_events = vport->work_port_events;
if (work_port_events & WORKER_DISC_TMO)
lpfc_disc_timeout_handler(vport);
if (work_port_events & WORKER_ELS_TMO)
lpfc_els_timeout_handler(vport);
if (work_port_events & WORKER_HB_TMO)
lpfc_hb_timeout_handler(phba);
if (work_port_events & WORKER_MBOX_TMO)
lpfc_mbox_timeout_handler(phba);
if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
lpfc_unblock_fabric_iocbs(phba);
if (work_port_events & WORKER_FDMI_TMO)
lpfc_fdmi_timeout_handler(vport);
if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
lpfc_ramp_down_queue_handler(phba);
if (work_port_events & WORKER_RAMP_UP_QUEUE)
lpfc_ramp_up_queue_handler(phba);
spin_lock_irq(&vport->work_port_lock);
vport->work_port_events &= ~work_port_events;
spin_unlock_irq(&vport->work_port_lock);
}
spin_unlock_irq(&phba->hbalock);
work_port_events = vport->work_port_events;
if (work_port_events & WORKER_DISC_TMO)
lpfc_disc_timeout_handler(vport);
if (work_port_events & WORKER_ELS_TMO)
lpfc_els_timeout_handler(vport);
if (work_port_events & WORKER_HB_TMO)
lpfc_hb_timeout_handler(phba);
if (work_port_events & WORKER_MBOX_TMO)
lpfc_mbox_timeout_handler(phba);
if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
lpfc_unblock_fabric_iocbs(phba);
if (work_port_events & WORKER_FDMI_TMO)
lpfc_fdmi_timeout_handler(vport);
if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
lpfc_ramp_down_queue_handler(phba);
if (work_port_events & WORKER_RAMP_UP_QUEUE)
lpfc_ramp_up_queue_handler(phba);
spin_lock_irq(&vport->work_port_lock);
vport->work_port_events &= ~work_port_events;
spin_unlock_irq(&vport->work_port_lock);
scsi_host_put(shost);
spin_lock_irq(&phba->hbalock);
}
spin_unlock_irq(&phba->hbalock);
lpfc_destroy_vport_work_array(vports);
pring = &phba->sli.ring[LPFC_ELS_RING];
status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
@ -426,10 +426,19 @@ lpfc_work_done(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
control = readl(phba->HCregaddr);
if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
lpfc_debugfs_slow_ring_trc(phba,
"WRK Enable ring: cntl:x%x hacopy:x%x",
control, ha_copy, 0);
control |= (HC_R0INT_ENA << LPFC_ELS_RING);
writel(control, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
}
else {
lpfc_debugfs_slow_ring_trc(phba,
"WRK Ring ok: cntl:x%x hacopy:x%x",
control, ha_copy, 0);
}
spin_unlock_irq(&phba->hbalock);
}
lpfc_work_list_done(phba);
@ -439,32 +448,22 @@ static int
check_work_wait_done(struct lpfc_hba *phba)
{
struct lpfc_vport *vport;
struct lpfc_sli_ring *pring;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
int rc = 0;
spin_lock_irq(&phba->hbalock);
list_for_each_entry(vport, &phba->port_list, listentry) {
if (vport->work_port_events) {
rc = 1;
goto exit;
break;
}
}
if (phba->work_ha || (!list_empty(&phba->work_list)) ||
kthread_should_stop()) {
if (rc || phba->work_ha || (!list_empty(&phba->work_list)) ||
kthread_should_stop() || pring->flag & LPFC_DEFERRED_RING_EVENT) {
rc = 1;
goto exit;
}
pring = &phba->sli.ring[LPFC_ELS_RING];
if (pring->flag & LPFC_DEFERRED_RING_EVENT)
rc = 1;
exit:
if (rc)
phba->work_found++;
else
} else
phba->work_found = 0;
spin_unlock_irq(&phba->hbalock);
return rc;
}
@ -592,7 +591,6 @@ lpfc_linkdown_port(struct lpfc_vport *vport)
/* free any ndlp's on unused list */
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
/* free any ndlp's in unused state */
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
lpfc_drop_node(vport, ndlp);
@ -605,8 +603,9 @@ lpfc_linkdown(struct lpfc_hba *phba)
{
struct lpfc_vport *vport = phba->pport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_vport *port_iterator;
struct lpfc_vport **vports;
LPFC_MBOXQ_t *mb;
int i;
if (phba->link_state == LPFC_LINK_DOWN) {
return 0;
@ -617,13 +616,13 @@ lpfc_linkdown(struct lpfc_hba *phba)
phba->pport->fc_flag &= ~FC_LBIT;
}
spin_unlock_irq(&phba->hbalock);
list_for_each_entry(port_iterator, &phba->port_list, listentry) {
/* Issue a LINK DOWN event to all nodes */
lpfc_linkdown_port(port_iterator);
}
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
/* Issue a LINK DOWN event to all nodes */
lpfc_linkdown_port(vports[i]);
}
lpfc_destroy_vport_work_array(vports);
/* Clean up any firmware default rpi's */
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mb) {
@ -724,7 +723,8 @@ lpfc_linkup_port(struct lpfc_vport *vport)
static int
lpfc_linkup(struct lpfc_hba *phba)
{
struct lpfc_vport *vport;
struct lpfc_vport **vports;
int i;
phba->link_state = LPFC_LINK_UP;
@ -732,9 +732,11 @@ lpfc_linkup(struct lpfc_hba *phba)
clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
del_timer_sync(&phba->fabric_block_timer);
list_for_each_entry(vport, &phba->port_list, listentry) {
lpfc_linkup_port(vport);
}
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++)
lpfc_linkup_port(vports[i]);
lpfc_destroy_vport_work_array(vports);
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
lpfc_issue_clear_la(phba, phba->pport);
@ -764,12 +766,10 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Check for error */
if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
/* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"%d (%d):0320 CLEAR_LA mbxStatus error x%x hba "
"state x%x\n",
phba->brd_no, vport->vpi, mb->mbxStatus,
vport->port_state);
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
"0320 CLEAR_LA mbxStatus error x%x hba "
"state x%x\n",
mb->mbxStatus, vport->port_state);
phba->link_state = LPFC_HBA_ERROR;
goto out;
}
@ -801,10 +801,8 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
out:
/* Device Discovery completes */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d (%d):0225 Device Discovery completes\n",
phba->brd_no, vport->vpi);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0225 Device Discovery completes\n");
mempool_free(pmb, phba->mbox_mem_pool);
spin_lock_irq(shost->host_lock);
@ -861,19 +859,17 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
out:
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"%d (%d):0306 CONFIG_LINK mbxStatus error x%x "
"HBA state x%x\n",
phba->brd_no, vport->vpi, pmb->mb.mbxStatus,
vport->port_state);
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
"0306 CONFIG_LINK mbxStatus error x%x "
"HBA state x%x\n",
pmb->mb.mbxStatus, vport->port_state);
mempool_free(pmb, phba->mbox_mem_pool);
lpfc_linkdown(phba);
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d (%d):0200 CONFIG_LINK bad hba state x%x\n",
phba->brd_no, vport->vpi, vport->port_state);
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0200 CONFIG_LINK bad hba state x%x\n",
vport->port_state);
lpfc_issue_clear_la(phba, vport);
return;
@ -890,12 +886,10 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Check for error */
if (mb->mbxStatus) {
/* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"%d (%d):0319 READ_SPARAM mbxStatus error x%x "
"hba state x%x>\n",
phba->brd_no, vport->vpi, mb->mbxStatus,
vport->port_state);
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
"0319 READ_SPARAM mbxStatus error x%x "
"hba state x%x>\n",
mb->mbxStatus, vport->port_state);
lpfc_linkdown(phba);
goto out;
}
@ -978,7 +972,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
if (i == 0) {
phba->alpa_map[0] = 0;
} else {
if (phba->cfg_log_verbose & LOG_LINK_EVENT) {
if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
int numalpa, j, k;
union {
uint8_t pamap[16];
@ -1004,10 +998,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
lpfc_printf_log(phba,
KERN_WARNING,
LOG_LINK_EVENT,
"%d:1304 Link Up Event "
"1304 Link Up Event "
"ALPA map Data: x%x "
"x%x x%x x%x\n",
phba->brd_no,
un.pa.wd1, un.pa.wd2,
un.pa.wd3, un.pa.wd4);
}
@ -1015,7 +1008,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
}
} else {
if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
if (phba->max_vpi && phba->cfg_npiv_enable &&
if (phba->max_vpi && phba->cfg_enable_npiv &&
(phba->sli_rev == 3))
phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
}
@ -1055,11 +1048,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
}
out:
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"%d (%d):0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
phba->brd_no, vport->vpi,
vport->port_state, sparam_mbox, cfglink_mbox);
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
"0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
vport->port_state, sparam_mbox, cfglink_mbox);
lpfc_issue_clear_la(phba, vport);
return;
}
@ -1100,8 +1091,8 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Check for error */
if (mb->mbxStatus) {
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
"%d:1307 READ_LA mbox error x%x state x%x\n",
phba->brd_no, mb->mbxStatus, vport->port_state);
"1307 READ_LA mbox error x%x state x%x\n",
mb->mbxStatus, vport->port_state);
lpfc_mbx_issue_link_down(phba);
phba->link_state = LPFC_HBA_ERROR;
goto lpfc_mbx_cmpl_read_la_free_mbuf;
@ -1132,26 +1123,26 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
phba->fc_stat.LinkUp++;
if (phba->link_flag & LS_LOOPBACK_MODE) {
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
"%d:1306 Link Up Event in loop back mode "
"x%x received Data: x%x x%x x%x x%x\n",
phba->brd_no, la->eventTag, phba->fc_eventTag,
la->granted_AL_PA, la->UlnkSpeed,
phba->alpa_map[0]);
"1306 Link Up Event in loop back mode "
"x%x received Data: x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
la->granted_AL_PA, la->UlnkSpeed,
phba->alpa_map[0]);
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"%d:1303 Link Up Event x%x received "
"Data: x%x x%x x%x x%x\n",
phba->brd_no, la->eventTag, phba->fc_eventTag,
la->granted_AL_PA, la->UlnkSpeed,
phba->alpa_map[0]);
"1303 Link Up Event x%x received "
"Data: x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
la->granted_AL_PA, la->UlnkSpeed,
phba->alpa_map[0]);
}
lpfc_mbx_process_link_up(phba, la);
} else {
phba->fc_stat.LinkDown++;
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"%d:1305 Link Down Event x%x received "
"1305 Link Down Event x%x received "
"Data: x%x x%x x%x\n",
phba->brd_no, la->eventTag, phba->fc_eventTag,
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag);
lpfc_mbx_issue_link_down(phba);
}
@ -1199,10 +1190,9 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
case 0x0011:
case 0x0020:
case 0x9700:
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d (%d):0911 cmpl_unreg_vpi, "
"mb status = 0x%x\n",
phba->brd_no, vport->vpi, mb->mbxStatus);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0911 cmpl_unreg_vpi, mb status = 0x%x\n",
mb->mbxStatus);
break;
}
vport->unreg_vpi_cmpl = VPORT_OK;
@ -1231,9 +1221,8 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
"%d (%d):1800 Could not issue unreg_vpi\n",
phba->brd_no, vport->vpi);
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
"1800 Could not issue unreg_vpi\n");
mempool_free(mbox, phba->mbox_mem_pool);
vport->unreg_vpi_cmpl = VPORT_ERROR;
}
@ -1250,9 +1239,9 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
case 0x0011:
case 0x9601:
case 0x9602:
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d (%d):0912 cmpl_reg_vpi, mb status = 0x%x\n",
phba->brd_no, vport->vpi, mb->mbxStatus);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0912 cmpl_reg_vpi, mb status = 0x%x\n",
mb->mbxStatus);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@ -1289,15 +1278,15 @@ void
lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
struct lpfc_vport *next_vport;
MAILBOX_t *mb = &pmb->mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
struct lpfc_nodelist *ndlp;
ndlp = (struct lpfc_nodelist *) pmb->context2;
struct lpfc_vport **vports;
int i;
ndlp = (struct lpfc_nodelist *) pmb->context2;
pmb->context1 = NULL;
pmb->context2 = NULL;
if (mb->mbxStatus) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
@ -1314,10 +1303,9 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"%d (%d):0258 Register Fabric login error: 0x%x\n",
phba->brd_no, vport->vpi, mb->mbxStatus);
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
"0258 Register Fabric login error: 0x%x\n",
mb->mbxStatus);
return;
}
@ -1328,21 +1316,26 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */
if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
list_for_each_entry(next_vport, &phba->port_list, listentry) {
if (next_vport->port_type == LPFC_PHYSICAL_PORT)
continue;
if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
lpfc_initial_fdisc(next_vport);
else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
lpfc_vport_set_state(vport,
FC_VPORT_NO_FABRIC_SUPP);
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0259 No NPIV Fabric "
"support\n",
phba->brd_no, vport->vpi);
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for(i = 0;
i < LPFC_MAX_VPORTS && vports[i] != NULL;
i++) {
if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
continue;
if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
lpfc_initial_fdisc(vports[i]);
else if (phba->sli3_options &
LPFC_SLI3_NPIV_ENABLED) {
lpfc_vport_set_state(vports[i],
FC_VPORT_NO_FABRIC_SUPP);
lpfc_printf_vlog(vport, KERN_ERR,
LOG_ELS,
"0259 No NPIV "
"Fabric support\n");
}
}
}
lpfc_destroy_vport_work_array(vports);
lpfc_do_scr_ns_plogi(phba, vport);
}
@ -1386,9 +1379,9 @@ out:
return;
}
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0260 Register NameServer error: 0x%x\n",
phba->brd_no, vport->vpi, mb->mbxStatus);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0260 Register NameServer error: 0x%x\n",
mb->mbxStatus);
return;
}
@ -1598,7 +1591,7 @@ lpfc_nlp_state_name(char *buffer, size_t size, int state)
[NLP_STE_NPR_NODE] = "NPR",
};
if (state < ARRAY_SIZE(states) && states[state])
if (state < NLP_STE_MAX_STATE && states[state])
strlcpy(buffer, states[state], size);
else
snprintf(buffer, size, "unknown (%d)", state);
@ -1613,12 +1606,11 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int old_state = ndlp->nlp_state;
char name1[16], name2[16];
lpfc_printf_log(vport->phba, KERN_INFO, LOG_NODE,
"%d (%d):0904 NPort state transition x%06x, %s -> %s\n",
vport->phba->brd_no, vport->vpi,
ndlp->nlp_DID,
lpfc_nlp_state_name(name1, sizeof(name1), old_state),
lpfc_nlp_state_name(name2, sizeof(name2), state));
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0904 NPort state transition x%06x, %s -> %s\n",
ndlp->nlp_DID,
lpfc_nlp_state_name(name1, sizeof(name1), old_state),
lpfc_nlp_state_name(name2, sizeof(name2), state));
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
"node statechg did:x%x old:%d ste:%d",
@ -1664,16 +1656,7 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
void
lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
lpfc_cancel_retry_delay_tmo(vport, ndlp);
if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
spin_lock_irq(shost->host_lock);
list_del_init(&ndlp->nlp_listp);
ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
spin_unlock_irq(shost->host_lock);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
lpfc_nlp_put(ndlp);
}
@ -1710,12 +1693,12 @@ lpfc_set_disctmo(struct lpfc_vport *vport)
spin_unlock_irq(shost->host_lock);
/* Start Discovery Timer state <hba_state> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d (%d):0247 Start Discovery Timer state x%x "
"Data: x%x x%lx x%x x%x\n",
phba->brd_no, vport->vpi, vport->port_state, tmo,
(unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
vport->fc_adisc_cnt);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0247 Start Discovery Timer state x%x "
"Data: x%x x%lx x%x x%x\n",
vport->port_state, tmo,
(unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
vport->fc_adisc_cnt);
return;
}
@ -1727,7 +1710,6 @@ int
lpfc_can_disctmo(struct lpfc_vport *vport)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
unsigned long iflags;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
@ -1746,13 +1728,11 @@ lpfc_can_disctmo(struct lpfc_vport *vport)
}
/* Cancel Discovery Timer state <hba_state> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d (%d):0248 Cancel Discovery Timer state x%x "
"Data: x%x x%x x%x\n",
phba->brd_no, vport->vpi, vport->port_state,
vport->fc_flag, vport->fc_plogi_cnt,
vport->fc_adisc_cnt);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0248 Cancel Discovery Timer state x%x "
"Data: x%x x%x x%x\n",
vport->port_state, vport->fc_flag,
vport->fc_plogi_cnt, vport->fc_adisc_cnt);
return 0;
}
@ -1935,10 +1915,9 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
rc = lpfc_sli_issue_mbox(phba, mbox,
(MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
"%d (%d):1815 Could not issue "
"unreg_did (default rpis)\n",
phba->brd_no, vport->vpi);
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
"1815 Could not issue "
"unreg_did (default rpis)\n");
mempool_free(mbox, phba->mbox_mem_pool);
}
}
@ -1957,12 +1936,11 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_dmabuf *mp;
/* Cleanup node for NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d (%d):0900 Cleanup node for NPort x%x "
"Data: x%x x%x x%x\n",
phba->brd_no, vport->vpi, ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0900 Cleanup node for NPort x%x "
"Data: x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
lpfc_dequeue_node(vport, ndlp);
/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
@ -2094,7 +2072,6 @@ lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
static struct lpfc_nodelist *
__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp;
uint32_t data1;
@ -2104,20 +2081,18 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
((uint32_t) ndlp->nlp_xri << 16) |
((uint32_t) ndlp->nlp_type << 8) |
((uint32_t) ndlp->nlp_rpi & 0xff));
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d (%d):0929 FIND node DID "
" Data: x%p x%x x%x x%x\n",
phba->brd_no, vport->vpi,
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0929 FIND node DID "
"Data: x%p x%x x%x x%x\n",
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1);
return ndlp;
}
}
/* FIND node did <did> NOT FOUND */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
"%d (%d):0932 FIND node did x%x NOT FOUND.\n",
phba->brd_no, vport->vpi, did);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0932 FIND node did x%x NOT FOUND.\n", did);
return NULL;
}
@ -2208,7 +2183,7 @@ lpfc_disc_list_loopmap(struct lpfc_vport *vport)
/* If cfg_scan_down is set, start from highest
* ALPA (0xef) to lowest (0x1).
*/
if (phba->cfg_scan_down)
if (vport->cfg_scan_down)
index = j;
else
index = FC_MAXLOOP - j - 1;
@ -2309,12 +2284,11 @@ lpfc_disc_start(struct lpfc_vport *vport)
vport->num_disc_nodes = 0;
/* Start Discovery state <hba_state> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d (%d):0202 Start Discovery hba state x%x "
"Data: x%x x%x x%x\n",
phba->brd_no, vport->vpi, vport->port_state,
vport->fc_flag, vport->fc_plogi_cnt,
vport->fc_adisc_cnt);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0202 Start Discovery hba state x%x "
"Data: x%x x%x x%x\n",
vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
vport->fc_adisc_cnt);
/* First do ADISCs - if any */
num_sent = lpfc_els_disc_adisc(vport);
@ -2532,10 +2506,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
* FAN
*/
/* FAN timeout */
lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY,
"%d (%d):0221 FAN timeout\n",
phba->brd_no, vport->vpi);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
"0221 FAN timeout\n");
/* Start discovery by sending FLOGI, clean up old rpis */
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
nlp_listp) {
@ -2562,10 +2534,9 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
case LPFC_FLOGI:
/* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
/* Initial FLOGI timeout */
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d (%d):0222 Initial %s timeout\n",
phba->brd_no, vport->vpi,
vport->vpi ? "FLOGI" : "FDISC");
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0222 Initial %s timeout\n",
vport->vpi ? "FLOGI" : "FDISC");
/* Assume no Fabric and go on with discovery.
* Check for outstanding ELS FLOGI to abort.
@ -2581,11 +2552,9 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
case LPFC_FABRIC_CFG_LINK:
/* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
NameServer login */
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d (%d):0223 Timeout while waiting for "
"NameServer login\n",
phba->brd_no, vport->vpi);
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0223 Timeout while waiting for "
"NameServer login\n");
/* Next look for NameServer ndlp */
ndlp = lpfc_findnode_did(vport, NameServer_DID);
if (ndlp)
@ -2596,11 +2565,10 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
case LPFC_NS_QRY:
/* Check for wait for NameServer Rsp timeout */
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d (%d):0224 NameServer Query timeout "
"Data: x%x x%x\n",
phba->brd_no, vport->vpi,
vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0224 NameServer Query timeout "
"Data: x%x x%x\n",
vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
/* Try it one more time */
@ -2627,10 +2595,9 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
/* Setup and issue mailbox INITIALIZE LINK command */
initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!initlinkmbox) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d (%d):0206 Device Discovery "
"completion error\n",
phba->brd_no, vport->vpi);
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0206 Device Discovery "
"completion error\n");
phba->link_state = LPFC_HBA_ERROR;
break;
}
@ -2651,9 +2618,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
case LPFC_DISC_AUTH:
/* Node Authentication timeout */
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d (%d):0227 Node Authentication timeout\n",
phba->brd_no, vport->vpi);
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0227 Node Authentication timeout\n");
lpfc_disc_flush_list(vport);
/*
@ -2670,11 +2636,10 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
case LPFC_VPORT_READY:
if (vport->fc_flag & FC_RSCN_MODE) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d (%d):0231 RSCN timeout Data: x%x "
"x%x\n",
phba->brd_no, vport->vpi,
vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0231 RSCN timeout Data: x%x "
"x%x\n",
vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_cmd(vport);
@ -2685,20 +2650,17 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d (%d):0229 Unexpected discovery timeout, "
"vport State x%x\n",
phba->brd_no, vport->vpi, vport->port_state);
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0229 Unexpected discovery timeout, "
"vport State x%x\n", vport->port_state);
break;
}
switch (phba->link_state) {
case LPFC_CLEAR_LA:
/* CLEAR LA timeout */
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d (%d):0228 CLEAR LA timeout\n",
phba->brd_no, vport->vpi);
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0228 CLEAR LA timeout\n");
clrlaerr = 1;
break;
@ -2709,10 +2671,9 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
case LPFC_LINK_DOWN:
case LPFC_LINK_UP:
case LPFC_HBA_ERROR:
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d (%d):0230 Unexpected timeout, hba link "
"state x%x\n",
phba->brd_no, vport->vpi, phba->link_state);
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0230 Unexpected timeout, hba link "
"state x%x\n", phba->link_state);
clrlaerr = 1;
break;
@ -2757,7 +2718,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
* fdmi-on=2 (supporting RPA/hostnmae)
*/
if (phba->cfg_fdmi_on == 1)
if (vport->cfg_fdmi_on == 1)
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
else
mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
@ -2853,32 +2814,6 @@ lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
return ndlp;
}
void
lpfc_dev_loss_delay(unsigned long ptr)
{
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
struct lpfc_vport *vport = ndlp->vport;
struct lpfc_hba *phba = vport->phba;
struct lpfc_work_evt *evtp = &ndlp->dev_loss_evt;
unsigned long flags;
evtp = &ndlp->dev_loss_evt;
spin_lock_irqsave(&phba->hbalock, flags);
if (!list_empty(&evtp->evt_listp)) {
spin_unlock_irqrestore(&phba->hbalock, flags);
return;
}
evtp->evt_arg1 = ndlp;
evtp->evt = LPFC_EVT_DEV_LOSS_DELAY;
list_add_tail(&evtp->evt_listp, &phba->work_list);
if (phba->work_wait)
lpfc_worker_wake_up(phba);
spin_unlock_irqrestore(&phba->hbalock, flags);
return;
}
void
lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t did)
@ -2902,7 +2837,7 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return;
}
void
static void
lpfc_nlp_release(struct kref *kref)
{
struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,

View file

@ -150,7 +150,11 @@ struct lpfc_sli_ct_request {
struct gff_acc {
uint8_t fbits[128];
} gff_acc;
#ifdef __BIG_ENDIAN_BITFIELD
#define FCP_TYPE_FEATURE_OFFSET 7
#else /* __LITTLE_ENDIAN_BITFIELD */
#define FCP_TYPE_FEATURE_OFFSET 4
#endif
struct rff {
uint32_t PortId;
uint8_t reserved[2];
@ -805,7 +809,7 @@ typedef struct _RNID { /* Structure is in Big Endian format */
} un;
} RNID;
typedef struct _RPS { /* Structure is in Big Endian format */
typedef struct _RPS { /* Structure is in Big Endian format */
union {
uint32_t portNum;
struct lpfc_name portName;
@ -823,7 +827,7 @@ typedef struct _RPS_RSP { /* Structure is in Big Endian format */
uint32_t crcCnt;
} RPS_RSP;
typedef struct _RPL { /* Structure is in Big Endian format */
typedef struct _RPL { /* Structure is in Big Endian format */
uint32_t maxsize;
uint32_t index;
} RPL;
@ -834,7 +838,7 @@ typedef struct _PORT_NUM_BLK {
struct lpfc_name portName;
} PORT_NUM_BLK;
typedef struct _RPL_RSP { /* Structure is in Big Endian format */
typedef struct _RPL_RSP { /* Structure is in Big Endian format */
uint32_t listLen;
uint32_t index;
PORT_NUM_BLK port_num_blk;
@ -2613,8 +2617,8 @@ typedef union {
LOAD_SM_VAR varLdSM; /* cmd = 1 (LOAD_SM) */
READ_NV_VAR varRDnvp; /* cmd = 2 (READ_NVPARMS) */
WRITE_NV_VAR varWTnvp; /* cmd = 3 (WRITE_NVPARMS) */
BIU_DIAG_VAR varBIUdiag; /* cmd = 4 (RUN_BIU_DIAG) */
INIT_LINK_VAR varInitLnk; /* cmd = 5 (INIT_LINK) */
BIU_DIAG_VAR varBIUdiag; /* cmd = 4 (RUN_BIU_DIAG) */
INIT_LINK_VAR varInitLnk; /* cmd = 5 (INIT_LINK) */
DOWN_LINK_VAR varDwnLnk; /* cmd = 6 (DOWN_LINK) */
CONFIG_LINK varCfgLnk; /* cmd = 7 (CONFIG_LINK) */
PART_SLIM_VAR varSlim; /* cmd = 8 (PART_SLIM) */

View file

@ -53,8 +53,6 @@ static struct scsi_transport_template *lpfc_transport_template = NULL;
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
static DEFINE_IDR(lpfc_hba_index);
/************************************************************************/
/* */
/* lpfc_config_port_prep */
@ -107,10 +105,9 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"%d:0324 Config Port initialization "
"0324 Config Port initialization "
"error, mbxCmd x%x READ_NVPARM, "
"mbxStatus x%x\n",
phba->brd_no,
mb->mbxCommand, mb->mbxStatus);
mempool_free(pmb, phba->mbox_mem_pool);
return -ERESTART;
@ -128,9 +125,8 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0439 Adapter failed to init, mbxCmd x%x "
"0439 Adapter failed to init, mbxCmd x%x "
"READ_REV, mbxStatus x%x\n",
phba->brd_no,
mb->mbxCommand, mb->mbxStatus);
mempool_free( pmb, phba->mbox_mem_pool);
return -ERESTART;
@ -144,9 +140,8 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
if (mb->un.varRdRev.rr == 0) {
vp->rev.rBit = 0;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0440 Adapter failed to init, READ_REV has "
"missing revision information.\n",
phba->brd_no);
"0440 Adapter failed to init, READ_REV has "
"missing revision information.\n");
mempool_free(pmb, phba->mbox_mem_pool);
return -ERESTART;
}
@ -197,9 +192,8 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"%d:0441 VPD not present on adapter, "
"0441 VPD not present on adapter, "
"mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
phba->brd_no,
mb->mbxCommand, mb->mbxStatus);
mb->un.varDmp.word_cnt = 0;
}
@ -253,9 +247,8 @@ lpfc_config_port_post(struct lpfc_hba *phba)
pmb->vport = vport;
if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0448 Adapter failed init, mbxCmd x%x "
"0448 Adapter failed init, mbxCmd x%x "
"READ_SPARM mbxStatus x%x\n",
phba->brd_no,
mb->mbxCommand, mb->mbxStatus);
phba->link_state = LPFC_HBA_ERROR;
mp = (struct lpfc_dmabuf *) pmb->context1;
@ -312,9 +305,8 @@ lpfc_config_port_post(struct lpfc_hba *phba)
pmb->vport = vport;
if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0453 Adapter failed to init, mbxCmd x%x "
"0453 Adapter failed to init, mbxCmd x%x "
"READ_CONFIG, mbxStatus x%x\n",
phba->brd_no,
mb->mbxCommand, mb->mbxStatus);
phba->link_state = LPFC_HBA_ERROR;
mempool_free( pmb, phba->mbox_mem_pool);
@ -344,9 +336,8 @@ lpfc_config_port_post(struct lpfc_hba *phba)
&& !(phba->lmt & LMT_10Gb))) {
/* Reset link speed to auto */
lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
"%d:1302 Invalid speed for this board: "
"1302 Invalid speed for this board: "
"Reset link speed to auto: x%x\n",
phba->brd_no,
phba->cfg_link_speed);
phba->cfg_link_speed = LINK_SPEED_AUTO;
}
@ -402,9 +393,8 @@ lpfc_config_port_post(struct lpfc_hba *phba)
lpfc_set_loopback_flag(phba);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0454 Adapter failed to init, mbxCmd x%x "
"0454 Adapter failed to init, mbxCmd x%x "
"INIT_LINK, mbxStatus x%x\n",
phba->brd_no,
mb->mbxCommand, mb->mbxStatus);
/* Clear all interrupt enable conditions */
@ -437,16 +427,11 @@ lpfc_config_port_post(struct lpfc_hba *phba)
int
lpfc_hba_down_prep(struct lpfc_hba *phba)
{
struct lpfc_vport *vport = phba->pport;
/* Disable interrupts */
writel(0, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
list_for_each_entry(vport, &phba->port_list, listentry) {
lpfc_cleanup_discovery_resources(vport);
}
lpfc_cleanup_discovery_resources(phba->pport);
return 0;
}
@ -518,7 +503,7 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
mempool_free(pmboxq, phba->mbox_mem_pool);
if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
!(phba->link_state == LPFC_HBA_ERROR) &&
!(phba->pport->fc_flag & FC_UNLOADING))
!(phba->pport->load_flag & FC_UNLOADING))
mod_timer(&phba->hb_tmofunc,
jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
return;
@ -532,7 +517,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
struct lpfc_sli *psli = &phba->sli;
if ((phba->link_state == LPFC_HBA_ERROR) ||
(phba->pport->fc_flag & FC_UNLOADING) ||
(phba->pport->load_flag & FC_UNLOADING) ||
(phba->pport->fc_flag & FC_OFFLINE_MODE))
return;
@ -586,8 +571,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
* need to take the HBA offline.
*/
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0459 Adapter heartbeat failure, taking "
"this port offline.\n", phba->brd_no);
"0459 Adapter heartbeat failure, taking "
"this port offline.\n");
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
@ -615,9 +600,10 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
struct lpfc_vport *vport = phba->pport;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
struct lpfc_vport *port_iterator;
struct lpfc_vport **vports;
uint32_t event_data;
struct Scsi_Host *shost;
int i;
/* If the pci channel is offline, ignore possible errors,
* since we cannot communicate with the pci card anyway. */
@ -628,18 +614,21 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
phba->work_hs & HS_FFER5) {
/* Re-establishing Link */
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
"%d:1301 Re-establishing Link "
"1301 Re-establishing Link "
"Data: x%x x%x x%x\n",
phba->brd_no, phba->work_hs,
phba->work_hs,
phba->work_status[0], phba->work_status[1]);
list_for_each_entry(port_iterator, &phba->port_list,
listentry) {
shost = lpfc_shost_from_vport(port_iterator);
spin_lock_irq(shost->host_lock);
port_iterator->fc_flag |= FC_ESTABLISH_LINK;
spin_unlock_irq(shost->host_lock);
}
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for(i = 0;
i < LPFC_MAX_VPORTS && vports[i] != NULL;
i++){
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
vports[i]->fc_flag |= FC_ESTABLISH_LINK;
spin_unlock_irq(shost->host_lock);
}
lpfc_destroy_vport_work_array(vports);
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
spin_unlock_irq(&phba->hbalock);
@ -673,9 +662,9 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
* twice. This is the adapter hardware error path.
*/
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0457 Adapter Hardware Error "
"0457 Adapter Hardware Error "
"Data: x%x x%x x%x\n",
phba->brd_no, phba->work_hs,
phba->work_hs,
phba->work_status[0], phba->work_status[1]);
event_data = FC_REG_DUMP_EVENT;
@ -708,7 +697,6 @@ lpfc_handle_latt(struct lpfc_hba *phba)
{
struct lpfc_vport *vport = phba->pport;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_vport *port_iterator;
LPFC_MBOXQ_t *pmb;
volatile uint32_t control;
struct lpfc_dmabuf *mp;
@ -729,8 +717,7 @@ lpfc_handle_latt(struct lpfc_hba *phba)
rc = -EIO;
/* Cleanup any outstanding ELS commands */
list_for_each_entry(port_iterator, &phba->port_list, listentry)
lpfc_els_flush_cmd(port_iterator);
lpfc_els_flush_all_cmd(phba);
psli->slistat.link_event++;
lpfc_read_la(phba, pmb, mp);
@ -773,8 +760,7 @@ lpfc_handle_latt_err_exit:
/* The other case is an error from issue_mbox */
if (rc == -ENOMEM)
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"%d:0300 READ_LA: no buffers\n",
phba->brd_no);
"0300 READ_LA: no buffers\n");
return;
}
@ -799,8 +785,7 @@ lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
/* Vital Product */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"%d:0455 Vital Product Data: x%x x%x x%x x%x\n",
phba->brd_no,
"0455 Vital Product Data: x%x x%x x%x x%x\n",
(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
(uint32_t) vpd[3]);
while (!finished && (index < (len - 4))) {
@ -1313,22 +1298,25 @@ static void
lpfc_establish_link_tmo(unsigned long ptr)
{
struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
struct lpfc_vport *vport = phba->pport;
struct lpfc_vport **vports;
unsigned long iflag;
int i;
/* Re-establishing Link, timer expired */
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"%d:1300 Re-establishing Link, timer expired "
"1300 Re-establishing Link, timer expired "
"Data: x%x x%x\n",
phba->brd_no, vport->fc_flag,
vport->port_state);
list_for_each_entry(vport, &phba->port_list, listentry) {
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
spin_lock_irqsave(shost->host_lock, iflag);
vport->fc_flag &= ~FC_ESTABLISH_LINK;
spin_unlock_irqrestore(shost->host_lock, iflag);
}
phba->pport->fc_flag, phba->pport->port_state);
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
struct Scsi_Host *shost;
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irqsave(shost->host_lock, iflag);
vports[i]->fc_flag &= ~FC_ESTABLISH_LINK;
spin_unlock_irqrestore(shost->host_lock, iflag);
}
lpfc_destroy_vport_work_array(vports);
}
void
@ -1343,12 +1331,9 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
static void
lpfc_stop_phba_timers(struct lpfc_hba *phba)
{
struct lpfc_vport *vport;
del_timer_sync(&phba->fcp_poll_timer);
del_timer_sync(&phba->fc_estabtmo);
list_for_each_entry(vport, &phba->port_list, listentry)
lpfc_stop_vport_timers(vport);
lpfc_stop_vport_timers(phba->pport);
del_timer_sync(&phba->sli.mbox_tmo);
del_timer_sync(&phba->fabric_block_timer);
phba->hb_outstanding = 0;
@ -1360,6 +1345,8 @@ int
lpfc_online(struct lpfc_hba *phba)
{
struct lpfc_vport *vport = phba->pport;
struct lpfc_vport **vports;
int i;
if (!phba)
return 0;
@ -1368,8 +1355,7 @@ lpfc_online(struct lpfc_hba *phba)
return 0;
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"%d:0458 Bring Adapter online\n",
phba->brd_no);
"0458 Bring Adapter online\n");
lpfc_block_mgmt_io(phba);
@ -1383,14 +1369,18 @@ lpfc_online(struct lpfc_hba *phba)
return 1;
}
list_for_each_entry(vport, &phba->port_list, listentry) {
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_OFFLINE_MODE;
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
}
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
struct Scsi_Host *shost;
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
}
lpfc_destroy_vport_work_array(vports);
lpfc_unblock_mgmt_io(phba);
return 0;
@ -1440,39 +1430,39 @@ lpfc_offline_prep(struct lpfc_hba * phba)
void
lpfc_offline(struct lpfc_hba *phba)
{
struct lpfc_vport *vport = phba->pport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_vport *port_iterator;
struct Scsi_Host *shost;
struct lpfc_vport **vports;
int i;
if (vport->fc_flag & FC_OFFLINE_MODE)
if (phba->pport->fc_flag & FC_OFFLINE_MODE)
return;
/* stop all timers associated with this hba */
lpfc_stop_phba_timers(phba);
list_for_each_entry(port_iterator, &phba->port_list, listentry) {
port_iterator->work_port_events = 0;
}
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++)
lpfc_stop_vport_timers(vports[i]);
lpfc_destroy_vport_work_array(vports);
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"%d:0460 Bring Adapter offline\n",
phba->brd_no);
"0460 Bring Adapter offline\n");
/* Bring down the SLI Layer and cleanup. The HBA is offline
now. */
lpfc_sli_hba_down(phba);
spin_lock_irq(&phba->hbalock);
phba->work_ha = 0;
vport->fc_flag |= FC_OFFLINE_MODE;
spin_unlock_irq(&phba->hbalock);
list_for_each_entry(port_iterator, &phba->port_list, listentry) {
shost = lpfc_shost_from_vport(port_iterator);
lpfc_cleanup(port_iterator);
spin_lock_irq(shost->host_lock);
vport->work_port_events = 0;
vport->fc_flag |= FC_OFFLINE_MODE;
spin_unlock_irq(shost->host_lock);
}
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
lpfc_cleanup(vports[i]);
spin_lock_irq(shost->host_lock);
vports[i]->work_port_events = 0;
vports[i]->fc_flag |= FC_OFFLINE_MODE;
spin_unlock_irq(shost->host_lock);
}
lpfc_destroy_vport_work_array(vports);
}
/******************************************************************************
@ -1509,15 +1499,19 @@ lpfc_scsi_free(struct lpfc_hba *phba)
return 0;
}
struct lpfc_vport *
lpfc_create_port(struct lpfc_hba *phba, int instance, struct fc_vport *fc_vport)
lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
{
struct lpfc_vport *vport;
struct Scsi_Host *shost;
int error = 0;
shost = scsi_host_alloc(&lpfc_template, sizeof(struct lpfc_vport));
if (dev != &phba->pcidev->dev)
shost = scsi_host_alloc(&lpfc_vport_template,
sizeof(struct lpfc_vport));
else
shost = scsi_host_alloc(&lpfc_template,
sizeof(struct lpfc_vport));
if (!shost)
goto out;
@ -1527,9 +1521,10 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct fc_vport *fc_vport)
vport->load_flag |= FC_LOADING;
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
lpfc_get_vport_cfgparam(vport);
shost->unique_id = instance;
shost->max_id = LPFC_MAX_TARGET;
shost->max_lun = phba->cfg_max_luns;
shost->max_lun = vport->cfg_max_luns;
shost->this_id = -1;
shost->max_cmd_len = 16;
/*
@ -1538,7 +1533,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct fc_vport *fc_vport)
* max xri value determined in hba setup.
*/
shost->can_queue = phba->cfg_hba_queue_depth - 10;
if (fc_vport != NULL) {
if (dev != &phba->pcidev->dev) {
shost->transportt = lpfc_vport_transport_template;
vport->port_type = LPFC_NPIV_PORT;
} else {
@ -1562,15 +1557,13 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct fc_vport *fc_vport)
vport->els_tmofunc.function = lpfc_els_timeout;
vport->els_tmofunc.data = (unsigned long)vport;
if (fc_vport != NULL) {
error = scsi_add_host(shost, &fc_vport->dev);
} else {
error = scsi_add_host(shost, &phba->pcidev->dev);
}
error = scsi_add_host(shost, dev);
if (error)
goto out_put_shost;
spin_lock_irq(&phba->hbalock);
list_add_tail(&vport->listentry, &phba->port_list);
spin_unlock_irq(&phba->hbalock);
return vport;
out_put_shost:
@ -1625,23 +1618,21 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
spin_lock_irq(shost->host_lock);
if (vport->fc_flag & FC_UNLOADING) {
if (vport->load_flag & FC_UNLOADING) {
stat = 1;
goto finished;
}
if (time >= 30 * HZ) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"%d:0461 Scanning longer than 30 "
"seconds. Continuing initialization\n",
phba->brd_no);
"0461 Scanning longer than 30 "
"seconds. Continuing initialization\n");
stat = 1;
goto finished;
}
if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"%d:0465 Link down longer than 15 "
"seconds. Continuing initialization\n",
phba->brd_no);
"0465 Link down longer than 15 "
"seconds. Continuing initialization\n");
stat = 1;
goto finished;
}
@ -1704,7 +1695,7 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
fc_host_max_npiv_vports(shost) = phba->max_vpi;
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_LOADING;
vport->load_flag &= ~FC_LOADING;
spin_unlock_irq(shost->host_lock);
}
@ -1716,9 +1707,10 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
struct lpfc_sli *psli;
struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
struct Scsi_Host *shost = NULL;
void *ptr;
unsigned long bar0map_len, bar2map_len;
int error = -ENODEV;
int i;
int i, hbq_count;
uint16_t iotag;
if (pci_enable_device(pdev))
@ -1739,7 +1731,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
goto out_free_phba;
INIT_LIST_HEAD(&phba->port_list);
INIT_LIST_HEAD(&phba->hbq_buffer_list);
/*
* Get all the module params for configuring this host and then
* establish the host.
@ -1817,6 +1808,17 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
if (!phba->hbqslimp.virt)
goto out_free_slim;
hbq_count = lpfc_sli_hbq_count();
ptr = phba->hbqslimp.virt;
for (i = 0; i < hbq_count; ++i) {
phba->hbqs[i].hbq_virt = ptr;
INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
ptr += (lpfc_hbq_defs[i]->entry_count *
sizeof(struct lpfc_hbq_entry));
}
phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
/* Initialize the SLI Layer to run with lpfc HBAs. */
@ -1880,7 +1882,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Initialize list of fabric iocbs */
INIT_LIST_HEAD(&phba->fabric_iocb_list);
vport = lpfc_create_port(phba, phba->brd_no, NULL);
vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
if (!vport)
goto out_kthread_stop;
@ -1892,18 +1894,19 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
if (phba->cfg_use_msi) {
error = pci_enable_msi(phba->pcidev);
if (error)
lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "%d:0452 "
"Enable MSI failed, continuing with "
"IRQ\n", phba->brd_no);
if (!error)
phba->using_msi = 1;
else
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0452 Enable MSI failed, continuing "
"with IRQ\n");
}
error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED,
LPFC_DRIVER_NAME, phba);
if (error) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0451 Enable interrupt handler failed\n",
phba->brd_no);
"0451 Enable interrupt handler failed\n");
goto out_disable_msi;
}
@ -1940,14 +1943,15 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
out_remove_device:
lpfc_free_sysfs_attr(vport);
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_UNLOADING;
vport->load_flag |= FC_UNLOADING;
spin_unlock_irq(shost->host_lock);
out_free_irq:
lpfc_stop_phba_timers(phba);
phba->pport->work_port_events = 0;
free_irq(phba->pcidev->irq, phba);
out_disable_msi:
pci_disable_msi(phba->pcidev);
if (phba->using_msi)
pci_disable_msi(phba->pcidev);
destroy_port(vport);
out_kthread_stop:
kthread_stop(phba->worker_thread);
@ -1989,16 +1993,15 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_vport *port_iterator;
list_for_each_entry(port_iterator, &phba->port_list, listentry)
port_iterator->load_flag |= FC_UNLOADING;
spin_lock_irq(&phba->hbalock);
vport->load_flag |= FC_UNLOADING;
spin_unlock_irq(&phba->hbalock);
kfree(vport->vname);
lpfc_free_sysfs_attr(vport);
fc_remove_host(shost);
scsi_remove_host(shost);
/*
* Bring down the SLI Layer. This step disable all interrupts,
* clears the rings, discards all mailbox commands, and resets
@ -2012,7 +2015,6 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
list_del_init(&vport->listentry);
spin_unlock_irq(&phba->hbalock);
lpfc_debugfs_terminate(vport);
lpfc_cleanup(vport);
@ -2020,7 +2022,8 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
/* Release the irq reservation */
free_irq(phba->pcidev->irq, phba);
pci_disable_msi(phba->pcidev);
if (phba->using_msi)
pci_disable_msi(phba->pcidev);
pci_set_drvdata(pdev, NULL);
scsi_host_put(shost);
@ -2062,8 +2065,8 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata;
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
@ -2079,6 +2082,11 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
pring = &psli->ring[psli->fcp_ring];
lpfc_sli_abort_iocb_ring(phba, pring);
/* Release the irq reservation */
free_irq(phba->pcidev->irq, phba);
if (phba->using_msi)
pci_disable_msi(phba->pcidev);
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
@ -2091,8 +2099,8 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
*/
static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata;
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
struct lpfc_sli *psli = &phba->sli;
int bars = pci_select_bars(pdev, IORESOURCE_MEM);
@ -2106,9 +2114,9 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
/* Re-establishing Link */
spin_lock_irq(host->host_lock);
spin_lock_irq(shost->host_lock);
phba->pport->fc_flag |= FC_ESTABLISH_LINK;
spin_unlock_irq(host->host_lock);
spin_unlock_irq(shost->host_lock);
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
@ -2131,8 +2139,8 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
*/
static void lpfc_io_resume(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata;
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
if (lpfc_online(phba) == 0) {
mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);

View file

@ -33,6 +33,12 @@
#define LOG_VPORT 0x4000 /* NPIV events */
#define LOG_ALL_MSG 0xffff /* LOG all messages */
#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
{ if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \
dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
fmt, (vport)->phba->brd_no, vport->vpi, ##arg); }
#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
{ if (((mask) &(phba)->cfg_log_verbose) || (level[1] <= '3')) \
dev_printk(level, &((phba)->pcidev)->dev, fmt, ##arg); }
{ if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \
dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
fmt, phba->brd_no, ##arg); }

View file

@ -275,11 +275,8 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
kfree(mp);
mb->mbxCommand = MBX_READ_SPARM64;
/* READ_SPARAM: no buffers */
lpfc_printf_log(phba,
KERN_WARNING,
LOG_MBOX,
"%d:0301 READ_SPARAM: no buffers\n",
phba->brd_no);
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"0301 READ_SPARAM: no buffers\n");
return (1);
}
INIT_LIST_HEAD(&mp->list);
@ -378,9 +375,8 @@ lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
mb->mbxCommand = MBX_REG_LOGIN64;
/* REG_LOGIN: no buffers */
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"%d (%d):0302 REG_LOGIN: no buffers, DID x%x, "
"flag x%x\n",
phba->brd_no, vpi, did, flag);
"0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
"flag x%x\n", vpi, did, flag);
return (1);
}
INIT_LIST_HEAD(&mp->list);
@ -564,7 +560,8 @@ lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
}
void
lpfc_config_hbq(struct lpfc_hba *phba, struct lpfc_hbq_init *hbq_desc,
lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
struct lpfc_hbq_init *hbq_desc,
uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
{
int i;
@ -572,6 +569,7 @@ lpfc_config_hbq(struct lpfc_hba *phba, struct lpfc_hbq_init *hbq_desc,
struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
hbqmb->hbqId = id;
hbqmb->entry_count = hbq_desc->entry_count; /* # entries in HBQ */
hbqmb->recvNotify = hbq_desc->rn; /* Receive
* Notification */
@ -691,8 +689,8 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) {
mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
mb->un.varCfgPort.max_hbq = 1; /* Requesting 2 HBQs */
if (phba->max_vpi && phba->cfg_npiv_enable &&
mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
if (phba->max_vpi && phba->cfg_enable_npiv &&
phba->vpd.sli3Feat.cmv) {
mb->un.varCfgPort.max_vpi = phba->max_vpi;
mb->un.varCfgPort.cmv = 1;

View file

@ -231,21 +231,34 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
return;
}
void *
lpfc_hbq_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
struct hbq_dmabuf *
lpfc_els_hbq_alloc(struct lpfc_hba *phba)
{
void *ret;
ret = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_ATOMIC, handle);
return ret;
struct hbq_dmabuf *hbqbp;
hbqbp = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
if (!hbqbp)
return NULL;
hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
&hbqbp->dbuf.phys);
if (!hbqbp->dbuf.virt) {
kfree(hbqbp);
return NULL;
}
hbqbp->size = LPFC_BPL_SIZE;
return hbqbp;
}
void
lpfc_hbq_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma)
lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
{
pci_pool_free(phba->lpfc_hbq_pool, virt, dma);
pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
kfree(hbqbp);
return;
}
/* This is ONLY called for the LPFC_ELS_HBQ */
void
lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
{
@ -254,9 +267,8 @@ lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf);
if (hbq_entry->tag == -1) {
lpfc_hbq_free(phba, hbq_entry->dbuf.virt,
hbq_entry->dbuf.phys);
kfree(hbq_entry);
(phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
(phba, hbq_entry);
} else {
lpfc_sli_free_hbq(phba, hbq_entry);
}

View file

@ -133,15 +133,15 @@ lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
return 1;
bad_service_param:
lpfc_printf_log(vport->phba, KERN_ERR, LOG_DISCOVERY,
"%d (%d):0207 Device %x "
"(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
"invalid service parameters. Ignoring device.\n",
vport->phba->brd_no, ndlp->vport->vpi, ndlp->nlp_DID,
sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0207 Device %x "
"(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
"invalid service parameters. Ignoring device.\n",
ndlp->nlp_DID,
sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
return 0;
}
@ -194,11 +194,11 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
IOCB_t *cmd;
/* Abort outstanding I/O on NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d (%d):0205 Abort outstanding I/O on NPort x%x "
"Data: x%x x%x x%x\n",
phba->brd_no, ndlp->vport->vpi, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
"0205 Abort outstanding I/O on NPort x%x "
"Data: x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
lpfc_fabric_abort_nport(ndlp);
@ -298,13 +298,12 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
icmd = &cmdiocb->iocb;
/* PLOGI chkparm OK */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d (%d):0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
phba->brd_no, vport->vpi,
ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
ndlp->nlp_rpi);
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
ndlp->nlp_rpi);
if (phba->cfg_fcp_class == 2 && sp->cls2.classValid)
if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
ndlp->nlp_fcp_info |= CLASS2;
else
ndlp->nlp_fcp_info |= CLASS3;
@ -330,7 +329,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
case NLP_STE_PRLI_ISSUE:
case NLP_STE_UNMAPPED_NODE:
case NLP_STE_MAPPED_NODE:
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, 0);
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
return 1;
}
@ -392,7 +391,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
if ((vport->port_type == LPFC_NPIV_PORT &&
phba->cfg_vport_restrict_login)) {
vport->cfg_restrict_login)) {
/* In order to preserve RPIs, we want to cleanup
* the default RPI the firmware created to rcv
@ -408,7 +407,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp, mbox);
return 1;
}
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
return 1;
out:
@ -452,7 +451,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
} else {
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp,
NULL, 0);
NULL);
}
return 1;
}
@ -489,9 +488,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_flag |= NLP_LOGO_ACC;
spin_unlock_irq(shost->host_lock);
if (els_cmd == ELS_CMD_PRLO)
lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
else
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
if (!(ndlp->nlp_type & NLP_FABRIC) ||
(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
@ -564,10 +563,14 @@ static uint32_t
lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
if (!ndlp->nlp_rpi) {
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
return 0;
}
/* Check config parameter use-adisc or FCP-2 */
if ((phba->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_ADISC;
@ -583,12 +586,11 @@ static uint32_t
lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
lpfc_printf_log(vport->phba, KERN_ERR, LOG_DISCOVERY,
"%d (%d):0253 Illegal State Transition: node x%x "
"event x%x, state x%x Data: x%x x%x\n",
vport->phba->brd_no, vport->vpi,
ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
ndlp->nlp_flag);
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0253 Illegal State Transition: node x%x "
"event x%x, state x%x Data: x%x x%x\n",
ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
ndlp->nlp_flag);
return ndlp->nlp_state;
}
@ -630,7 +632,7 @@ lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_LOGO_ACC;
spin_unlock_irq(shost->host_lock);
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
return ndlp->nlp_state;
@ -726,7 +728,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_els_abort(phba, ndlp);
if (evt == NLP_EVT_RCV_LOGO) {
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
} else {
lpfc_issue_els_logo(vport, ndlp, 0);
}
@ -778,16 +780,12 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3))
goto out;
/* PLOGI chkparm OK */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d (%d):0121 PLOGI chkparm OK "
"Data: x%x x%x x%x x%x\n",
phba->brd_no, vport->vpi,
ndlp->nlp_DID, ndlp->nlp_state,
ndlp->nlp_flag, ndlp->nlp_rpi);
if (phba->cfg_fcp_class == 2 && (sp->cls2.classValid))
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_state,
ndlp->nlp_flag, ndlp->nlp_rpi);
if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid))
ndlp->nlp_fcp_info |= CLASS2;
else
ndlp->nlp_fcp_info |= CLASS3;
@ -806,10 +804,9 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0133 PLOGI: no memory for reg_login "
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0133 PLOGI: no memory for reg_login "
"Data: x%x x%x x%x x%x\n",
phba->brd_no, vport->vpi,
ndlp->nlp_DID, ndlp->nlp_state,
ndlp->nlp_flag, ndlp->nlp_rpi);
goto out;
@ -844,30 +841,27 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
kfree(mp);
mempool_free(mbox, phba->mbox_mem_pool);
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0134 PLOGI: cannot issue reg_login "
"Data: x%x x%x x%x x%x\n",
phba->brd_no, vport->vpi,
ndlp->nlp_DID, ndlp->nlp_state,
ndlp->nlp_flag, ndlp->nlp_rpi);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0134 PLOGI: cannot issue reg_login "
"Data: x%x x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_state,
ndlp->nlp_flag, ndlp->nlp_rpi);
} else {
mempool_free(mbox, phba->mbox_mem_pool);
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0135 PLOGI: cannot format reg_login "
"Data: x%x x%x x%x x%x\n",
phba->brd_no, vport->vpi,
ndlp->nlp_DID, ndlp->nlp_state,
ndlp->nlp_flag, ndlp->nlp_rpi);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0135 PLOGI: cannot format reg_login "
"Data: x%x x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_state,
ndlp->nlp_flag, ndlp->nlp_rpi);
}
out:
if (ndlp->nlp_DID == NameServer_DID) {
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0261 Cannot Register NameServer login\n",
phba->brd_no, vport->vpi);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0261 Cannot Register NameServer login\n");
}
/* Free this node since the driver cannot login or has the wrong
@ -1178,7 +1172,7 @@ lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport,
struct lpfc_iocbq *cmdiocb;
cmdiocb = (struct lpfc_iocbq *) arg;
lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
return ndlp->nlp_state;
}
@ -1189,19 +1183,15 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
uint32_t evt)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
MAILBOX_t *mb = &pmb->mb;
uint32_t did = mb->un.varWords[1];
if (mb->mbxStatus) {
/* RegLogin failed */
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"%d (%d):0246 RegLogin failed Data: x%x x%x "
"x%x\n",
phba->brd_no, vport->vpi,
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
"0246 RegLogin failed Data: x%x x%x x%x\n",
did, mb->mbxStatus, vport->port_state);
/*
* If RegLogin failed due to lack of HBA resources do not
* retry discovery.
@ -1337,7 +1327,7 @@ lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
return ndlp->nlp_state;
}
@ -1358,7 +1348,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
irsp = &rspiocb->iocb;
if (irsp->ulpStatus) {
if ((vport->port_type == LPFC_NPIV_PORT) &&
phba->cfg_vport_restrict_login) {
vport->cfg_restrict_login) {
goto out;
}
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
@ -1380,7 +1370,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
(vport->port_type == LPFC_NPIV_PORT) &&
phba->cfg_vport_restrict_login) {
vport->cfg_restrict_login) {
out:
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_TARGET_REMOVE;
@ -1529,7 +1519,7 @@ lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
return ndlp->nlp_state;
}
@ -1600,8 +1590,8 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
/* flush the target */
lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
ndlp->nlp_sid, 0, LPFC_CTX_TGT);
/* Treat like rcv logo */
lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
@ -1734,7 +1724,7 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_flag |= NLP_LOGO_ACC;
spin_unlock_irq(shost->host_lock);
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
@ -2047,7 +2037,6 @@ int
lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
struct lpfc_hba *phba = vport->phba;
uint32_t cur_state, rc;
uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
uint32_t);
@ -2056,11 +2045,10 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
cur_state = ndlp->nlp_state;
/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d (%d):0211 DSM in event x%x on NPort x%x in "
"state %d Data: x%x\n",
phba->brd_no, vport->vpi,
evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0211 DSM in event x%x on NPort x%x in "
"state %d Data: x%x\n",
evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
"DSM in: evt:%d ste:%d did:x%x",
@ -2070,11 +2058,9 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
rc = (func) (vport, ndlp, arg, evt);
/* DSM out state <rc> on NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
"%d (%d):0212 DSM out state %d on NPort x%x "
"Data: x%x\n",
phba->brd_no, vport->vpi,
rc, ndlp->nlp_DID, ndlp->nlp_flag);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0212 DSM out state %d on NPort x%x Data: x%x\n",
rc, ndlp->nlp_DID, ndlp->nlp_flag);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
"DSM out: ste:%d did:x%x flg:x%x",

View file

@ -84,22 +84,21 @@ lpfc_adjust_queue_depth(struct lpfc_hba *phba)
* SCSI command completion.
*/
static inline void
lpfc_rampup_queue_depth(struct lpfc_hba *phba,
lpfc_rampup_queue_depth(struct lpfc_vport *vport,
struct scsi_device *sdev)
{
unsigned long flags;
struct lpfc_hba *phba = vport->phba;
atomic_inc(&phba->num_cmd_success);
if (phba->cfg_lun_queue_depth <= sdev->queue_depth)
if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
return;
spin_lock_irqsave(&phba->hbalock, flags);
if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
spin_unlock_irqrestore(&phba->hbalock, flags);
return;
}
phba->last_ramp_up_time = jiffies;
spin_unlock_irqrestore(&phba->hbalock, flags);
@ -119,43 +118,40 @@ lpfc_rampup_queue_depth(struct lpfc_hba *phba,
void
lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
{
struct lpfc_vport *vport;
struct Scsi_Host *host;
struct lpfc_vport **vports;
struct Scsi_Host *shost;
struct scsi_device *sdev;
unsigned long new_queue_depth;
unsigned long num_rsrc_err, num_cmd_success;
int i;
num_rsrc_err = atomic_read(&phba->num_rsrc_err);
num_cmd_success = atomic_read(&phba->num_cmd_success);
spin_lock_irq(&phba->hbalock);
list_for_each_entry(vport, &phba->port_list, listentry) {
host = lpfc_shost_from_vport(vport);
if (!scsi_host_get(host))
continue;
spin_unlock_irq(&phba->hbalock);
shost_for_each_device(sdev, host) {
new_queue_depth = sdev->queue_depth * num_rsrc_err /
(num_rsrc_err + num_cmd_success);
if (!new_queue_depth)
new_queue_depth = sdev->queue_depth - 1;
else
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
shost_for_each_device(sdev, shost) {
new_queue_depth =
sdev->queue_depth - new_queue_depth;
if (sdev->ordered_tags)
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
new_queue_depth);
else
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
new_queue_depth);
sdev->queue_depth * num_rsrc_err /
(num_rsrc_err + num_cmd_success);
if (!new_queue_depth)
new_queue_depth = sdev->queue_depth - 1;
else
new_queue_depth = sdev->queue_depth -
new_queue_depth;
if (sdev->ordered_tags)
scsi_adjust_queue_depth(sdev,
MSG_ORDERED_TAG,
new_queue_depth);
else
scsi_adjust_queue_depth(sdev,
MSG_SIMPLE_TAG,
new_queue_depth);
}
}
spin_lock_irq(&phba->hbalock);
scsi_host_put(host);
}
spin_unlock_irq(&phba->hbalock);
lpfc_destroy_vport_work_array(vports);
atomic_set(&phba->num_rsrc_err, 0);
atomic_set(&phba->num_cmd_success, 0);
}
@ -163,29 +159,27 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
void
lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
{
struct lpfc_vport *vport;
struct Scsi_Host *host;
struct lpfc_vport **vports;
struct Scsi_Host *shost;
struct scsi_device *sdev;
int i;
spin_lock_irq(&phba->hbalock);
list_for_each_entry(vport, &phba->port_list, listentry) {
host = lpfc_shost_from_vport(vport);
if (!scsi_host_get(host))
continue;
spin_unlock_irq(&phba->hbalock);
shost_for_each_device(sdev, host) {
if (sdev->ordered_tags)
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
sdev->queue_depth+1);
else
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
sdev->queue_depth+1);
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
shost_for_each_device(sdev, shost) {
if (sdev->ordered_tags)
scsi_adjust_queue_depth(sdev,
MSG_ORDERED_TAG,
sdev->queue_depth+1);
else
scsi_adjust_queue_depth(sdev,
MSG_SIMPLE_TAG,
sdev->queue_depth+1);
}
}
spin_lock_irq(&phba->hbalock);
scsi_host_put(host);
}
spin_unlock_irq(&phba->hbalock);
lpfc_destroy_vport_work_array(vports);
atomic_set(&phba->num_rsrc_err, 0);
atomic_set(&phba->num_cmd_success, 0);
}
@ -411,9 +405,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
struct lpfc_hba *phba = vport->phba;
uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
uint32_t vpi = vport->vpi;
uint32_t resp_info = fcprsp->rspStatus2;
uint32_t scsi_status = fcprsp->rspStatus3;
uint32_t *lp;
@ -445,15 +437,15 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
if (!scsi_status && (resp_info & RESID_UNDER))
logit = LOG_FCP;
lpfc_printf_log(phba, KERN_WARNING, logit,
"%d (%d):0730 FCP command x%x failed: x%x SNS x%x x%x "
"Data: x%x x%x x%x x%x x%x\n",
phba->brd_no, vpi, cmnd->cmnd[0], scsi_status,
be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
be32_to_cpu(fcprsp->rspResId),
be32_to_cpu(fcprsp->rspSnsLen),
be32_to_cpu(fcprsp->rspRspLen),
fcprsp->rspInfo3);
lpfc_printf_vlog(vport, KERN_WARNING, logit,
"0730 FCP command x%x failed: x%x SNS x%x x%x "
"Data: x%x x%x x%x x%x x%x\n",
cmnd->cmnd[0], scsi_status,
be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
be32_to_cpu(fcprsp->rspResId),
be32_to_cpu(fcprsp->rspSnsLen),
be32_to_cpu(fcprsp->rspRspLen),
fcprsp->rspInfo3);
if (resp_info & RSP_LEN_VALID) {
rsplen = be32_to_cpu(fcprsp->rspRspLen);
@ -468,12 +460,12 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
if (resp_info & RESID_UNDER) {
scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
"%d (%d):0716 FCP Read Underrun, expected %d, "
"residual %d Data: x%x x%x x%x\n",
phba->brd_no, vpi, be32_to_cpu(fcpcmd->fcpDl),
scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
cmnd->underflow);
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0716 FCP Read Underrun, expected %d, "
"residual %d Data: x%x x%x x%x\n",
be32_to_cpu(fcpcmd->fcpDl),
scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
cmnd->underflow);
/*
* If there is an under run check if under run reported by
@ -483,14 +475,13 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
fcpi_parm &&
(scsi_get_resid(cmnd) != fcpi_parm)) {
lpfc_printf_log(phba, KERN_WARNING,
LOG_FCP | LOG_FCP_ERROR,
"%d (%d):0735 FCP Read Check Error "
"and Underrun Data: x%x x%x x%x x%x\n",
phba->brd_no, vpi,
be32_to_cpu(fcpcmd->fcpDl),
scsi_get_resid(cmnd), fcpi_parm,
cmnd->cmnd[0]);
lpfc_printf_vlog(vport, KERN_WARNING,
LOG_FCP | LOG_FCP_ERROR,
"0735 FCP Read Check Error "
"and Underrun Data: x%x x%x x%x x%x\n",
be32_to_cpu(fcpcmd->fcpDl),
scsi_get_resid(cmnd), fcpi_parm,
cmnd->cmnd[0]);
scsi_set_resid(cmnd, scsi_bufflen(cmnd));
host_status = DID_ERROR;
}
@ -504,21 +495,19 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
(scsi_status == SAM_STAT_GOOD) &&
(scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
< cmnd->underflow)) {
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
"%d (%d):0717 FCP command x%x residual "
"underrun converted to error "
"Data: x%x x%x x%x\n",
phba->brd_no, vpi, cmnd->cmnd[0],
scsi_bufflen(cmnd),
scsi_get_resid(cmnd), cmnd->underflow);
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0717 FCP command x%x residual "
"underrun converted to error "
"Data: x%x x%x x%x\n",
cmnd->cmnd[0], scsi_bufflen(cmnd),
scsi_get_resid(cmnd), cmnd->underflow);
host_status = DID_ERROR;
}
} else if (resp_info & RESID_OVER) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
"%d (%d):0720 FCP command x%x residual "
"overrun error. Data: x%x x%x \n",
phba->brd_no, vpi, cmnd->cmnd[0],
scsi_bufflen(cmnd), scsi_get_resid(cmnd));
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"0720 FCP command x%x residual overrun error. "
"Data: x%x x%x \n", cmnd->cmnd[0],
scsi_bufflen(cmnd), scsi_get_resid(cmnd));
host_status = DID_ERROR;
/*
@ -527,13 +516,12 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
*/
} else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
(cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
"%d (%d):0734 FCP Read Check Error Data: "
"x%x x%x x%x x%x\n",
phba->brd_no, vpi,
be32_to_cpu(fcpcmd->fcpDl),
be32_to_cpu(fcprsp->rspResId),
fcpi_parm, cmnd->cmnd[0]);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
"0734 FCP Read Check Error Data: "
"x%x x%x x%x x%x\n",
be32_to_cpu(fcpcmd->fcpDl),
be32_to_cpu(fcprsp->rspResId),
fcpi_parm, cmnd->cmnd[0]);
host_status = DID_ERROR;
scsi_set_resid(cmnd, scsi_bufflen(cmnd));
}
@ -552,9 +540,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
struct lpfc_nodelist *pnode = rdata->pnode;
struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
uint32_t vpi = (lpfc_cmd->cur_iocbq.vport
? lpfc_cmd->cur_iocbq.vport->vpi
: 0);
int result;
struct scsi_device *sdev, *tmp_sdev;
int depth = 0;
@ -569,15 +554,15 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
else if (lpfc_cmd->status >= IOSTAT_CNT)
lpfc_cmd->status = IOSTAT_DEFAULT;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
"%d (%d):0729 FCP cmd x%x failed <%d/%d> "
"status: x%x result: x%x Data: x%x x%x\n",
phba->brd_no, vpi, cmd->cmnd[0],
cmd->device ? cmd->device->id : 0xffff,
cmd->device ? cmd->device->lun : 0xffff,
lpfc_cmd->status, lpfc_cmd->result,
pIocbOut->iocb.ulpContext,
lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"0729 FCP cmd x%x failed <%d/%d> "
"status: x%x result: x%x Data: x%x x%x\n",
cmd->cmnd[0],
cmd->device ? cmd->device->id : 0xffff,
cmd->device ? cmd->device->lun : 0xffff,
lpfc_cmd->status, lpfc_cmd->result,
pIocbOut->iocb.ulpContext,
lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
switch (lpfc_cmd->status) {
case IOSTAT_FCP_RSP_ERROR:
@ -610,13 +595,12 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
uint32_t *lp = (uint32_t *)cmd->sense_buffer;
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
"%d (%d):0710 Iodone <%d/%d> cmd %p, error "
"x%x SNS x%x x%x Data: x%x x%x\n",
phba->brd_no, vpi, cmd->device->id,
cmd->device->lun, cmd, cmd->result,
*lp, *(lp + 3), cmd->retries,
scsi_get_resid(cmd));
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0710 Iodone <%d/%d> cmd %p, error "
"x%x SNS x%x x%x Data: x%x x%x\n",
cmd->device->id, cmd->device->lun, cmd,
cmd->result, *lp, *(lp + 3), cmd->retries,
scsi_get_resid(cmd));
}
result = cmd->result;
@ -631,16 +615,16 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
if (!result)
lpfc_rampup_queue_depth(phba, sdev);
lpfc_rampup_queue_depth(vport, sdev);
if (!result && pnode != NULL &&
((jiffies - pnode->last_ramp_up_time) >
LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
((jiffies - pnode->last_q_full_time) >
LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
(phba->cfg_lun_queue_depth > sdev->queue_depth)) {
(vport->cfg_lun_queue_depth > sdev->queue_depth)) {
shost_for_each_device(tmp_sdev, sdev->host) {
if (phba->cfg_lun_queue_depth > tmp_sdev->queue_depth) {
if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
if (tmp_sdev->id != sdev->id)
continue;
if (tmp_sdev->ordered_tags)
@ -680,10 +664,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
depth = sdev->host->cmd_per_lun;
if (depth) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
"%d (%d):0711 detected queue full - "
"lun queue depth adjusted to %d.\n",
phba->brd_no, vpi, depth);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"0711 detected queue full - lun queue "
"depth adjusted to %d.\n", depth);
}
}
@ -853,12 +836,9 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
return FAILED;
/* Issue Target Reset to TGT <num> */
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
"%d (%d):0702 Issue Target Reset to TGT %d "
"Data: x%x x%x\n",
phba->brd_no, vport->vpi, tgt_id,
rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
ret = lpfc_sli_issue_iocb_wait(phba,
&phba->sli.ring[phba->sli.fcp_ring],
iocbq, iocbqrsp, lpfc_cmd->timeout);
@ -965,10 +945,9 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
if (lpfc_cmd == NULL) {
lpfc_adjust_queue_depth(phba);
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
"%d (%d):0707 driver's buffer pool is empty, "
"IO busied\n",
phba->brd_no, vport->vpi);
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0707 driver's buffer pool is empty, "
"IO busied\n");
goto out_host_busy;
}
@ -1103,28 +1082,25 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
schedule_timeout_uninterruptible(LPFC_ABORT_WAIT * HZ);
if (++loop_count
> (2 * phba->cfg_devloss_tmo)/LPFC_ABORT_WAIT)
> (2 * vport->cfg_devloss_tmo)/LPFC_ABORT_WAIT)
break;
}
if (lpfc_cmd->pCmd == cmnd) {
ret = FAILED;
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d (%d):0748 abort handler timed out waiting "
"for abort to complete: ret %#x, ID %d, "
"LUN %d, snum %#lx\n",
phba->brd_no, vport->vpi, ret,
cmnd->device->id, cmnd->device->lun,
cmnd->serial_number);
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0748 abort handler timed out waiting "
"for abort to complete: ret %#x, ID %d, "
"LUN %d, snum %#lx\n",
ret, cmnd->device->id, cmnd->device->lun,
cmnd->serial_number);
}
out:
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
"%d (%d):0749 SCSI Layer I/O Abort Request "
"Status x%x ID %d LUN %d snum %#lx\n",
phba->brd_no, vport->vpi, ret, cmnd->device->id,
cmnd->device->lun, cmnd->serial_number);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"0749 SCSI Layer I/O Abort Request Status x%x ID %d "
"LUN %d snum %#lx\n", ret, cmnd->device->id,
cmnd->device->lun, cmnd->serial_number);
return ret;
}
@ -1158,12 +1134,11 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
loopcnt++;
rdata = cmnd->device->hostdata;
if (!rdata ||
(loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d (%d):0721 LUN Reset rport "
"failure: cnt x%x rdata x%p\n",
phba->brd_no, vport->vpi,
loopcnt, rdata);
(loopcnt > ((vport->cfg_devloss_tmo * 2) + 1))){
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0721 LUN Reset rport "
"failure: cnt x%x rdata x%p\n",
loopcnt, rdata);
goto out;
}
pnode = rdata->pnode;
@ -1193,12 +1168,10 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
if (iocbqrsp == NULL)
goto out_free_scsi_buf;
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
"%d (%d):0703 Issue target reset to TGT %d LUN %d "
"rpi x%x nlp_flag x%x\n",
phba->brd_no, vport->vpi, cmnd->device->id,
cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0703 Issue target reset to TGT %d LUN %d "
"rpi x%x nlp_flag x%x\n", cmnd->device->id,
cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
iocb_status = lpfc_sli_issue_iocb_wait(phba,
&phba->sli.ring[phba->sli.fcp_ring],
iocbq, iocbqrsp, lpfc_cmd->timeout);
@ -1221,33 +1194,28 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
* Unfortunately, some targets do not abide by this forcing the driver
* to double check.
*/
cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
cmnd->device->id, cmnd->device->lun,
cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
LPFC_CTX_LUN);
if (cnt)
lpfc_sli_abort_iocb(phba,
&phba->sli.ring[phba->sli.fcp_ring],
lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
cmnd->device->id, cmnd->device->lun,
0, LPFC_CTX_LUN);
LPFC_CTX_LUN);
loopcnt = 0;
while(cnt) {
schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
if (++loopcnt
> (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
> (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT)
break;
cnt = lpfc_sli_sum_iocb(phba,
&phba->sli.ring[phba->sli.fcp_ring],
cmnd->device->id, cmnd->device->lun,
LPFC_CTX_LUN);
cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
cmnd->device->lun, LPFC_CTX_LUN);
}
if (cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d (%d):0719 device reset I/O flush failure: "
"cnt x%x\n",
phba->brd_no, vport->vpi, cnt);
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0719 device reset I/O flush failure: "
"cnt x%x\n", cnt);
ret = FAILED;
}
@ -1255,12 +1223,11 @@ out_free_scsi_buf:
if (iocb_status != IOCB_TIMEDOUT) {
lpfc_release_scsi_buf(phba, lpfc_cmd);
}
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d (%d):0713 SCSI layer issued device reset (%d, %d) "
"return x%x status x%x result x%x\n",
phba->brd_no, vport->vpi, cmnd->device->id,
cmnd->device->lun, ret, cmd_status, cmd_result);
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0713 SCSI layer issued device reset (%d, %d) "
"return x%x status x%x result x%x\n",
cmnd->device->id, cmnd->device->lun, ret,
cmd_status, cmd_result);
out:
return ret;
}
@ -1311,10 +1278,9 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
cmnd->device->lun,
ndlp->rport->dd_data);
if (ret != SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d (%d):0700 Bus Reset on target %d "
"failed\n",
phba->brd_no, vport->vpi, i);
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0700 Bus Reset on target %d failed\n",
i);
err_count++;
break;
}
@ -1333,35 +1299,30 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
* the targets. Unfortunately, some targets do not abide by
* this forcing the driver to double check.
*/
cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
0, 0, LPFC_CTX_HOST);
cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
if (cnt)
lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
0, 0, 0, LPFC_CTX_HOST);
lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
0, 0, LPFC_CTX_HOST);
loopcnt = 0;
while(cnt) {
schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
if (++loopcnt
> (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
> (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT)
break;
cnt = lpfc_sli_sum_iocb(phba,
&phba->sli.ring[phba->sli.fcp_ring],
0, 0, LPFC_CTX_HOST);
cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
}
if (cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d (%d):0715 Bus Reset I/O flush failure: "
"cnt x%x left x%x\n",
phba->brd_no, vport->vpi, cnt, i);
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0715 Bus Reset I/O flush failure: "
"cnt x%x left x%x\n", cnt, i);
ret = FAILED;
}
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d (%d):0714 SCSI layer issued Bus Reset Data: x%x\n",
phba->brd_no, vport->vpi, ret);
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
out:
return ret;
}
@ -1390,36 +1351,32 @@ lpfc_slave_alloc(struct scsi_device *sdev)
* extra. This list of scsi bufs exists for the lifetime of the driver.
*/
total = phba->total_scsi_bufs;
num_to_alloc = phba->cfg_lun_queue_depth + 2;
num_to_alloc = vport->cfg_lun_queue_depth + 2;
/* Allow some exchanges to be available always to complete discovery */
if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
"%d (%d):0704 At limitation of %d "
"preallocated command buffers\n",
phba->brd_no, vport->vpi, total);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"0704 At limitation of %d preallocated "
"command buffers\n", total);
return 0;
/* Allow some exchanges to be available always to complete discovery */
} else if (total + num_to_alloc >
phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
"%d (%d):0705 Allocation request of %d "
"command buffers will exceed max of %d. "
"Reducing allocation request to %d.\n",
phba->brd_no, vport->vpi, num_to_alloc,
phba->cfg_hba_queue_depth,
(phba->cfg_hba_queue_depth - total));
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"0705 Allocation request of %d "
"command buffers will exceed max of %d. "
"Reducing allocation request to %d.\n",
num_to_alloc, phba->cfg_hba_queue_depth,
(phba->cfg_hba_queue_depth - total));
num_to_alloc = phba->cfg_hba_queue_depth - total;
}
for (i = 0; i < num_to_alloc; i++) {
scsi_buf = lpfc_new_scsi_buf(vport);
if (!scsi_buf) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d (%d):0706 Failed to allocate "
"command buffer\n",
phba->brd_no, vport->vpi);
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0706 Failed to allocate "
"command buffer\n");
break;
}
@ -1439,9 +1396,9 @@ lpfc_slave_configure(struct scsi_device *sdev)
struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
if (sdev->tagged_supported)
scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth);
scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
else
scsi_deactivate_tcq(sdev, phba->cfg_lun_queue_depth);
scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
/*
* Initialize the fc transport attributes for the target
@ -1449,7 +1406,7 @@ lpfc_slave_configure(struct scsi_device *sdev)
* target pointer is stored in the starget_data for the
* driver's sysfs entry point functions.
*/
rport->dev_loss_tmo = phba->cfg_devloss_tmo;
rport->dev_loss_tmo = vport->cfg_devloss_tmo;
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
lpfc_sli_poll_fcp_ring(phba);
@ -1487,3 +1444,23 @@ struct scsi_host_template lpfc_template = {
.shost_attrs = lpfc_hba_attrs,
.max_sectors = 0xFFFF,
};
struct scsi_host_template lpfc_vport_template = {
.module = THIS_MODULE,
.name = LPFC_DRIVER_NAME,
.info = lpfc_info,
.queuecommand = lpfc_queuecommand,
.eh_abort_handler = lpfc_abort_handler,
.eh_device_reset_handler= lpfc_device_reset_handler,
.eh_bus_reset_handler = lpfc_bus_reset_handler,
.slave_alloc = lpfc_slave_alloc,
.slave_configure = lpfc_slave_configure,
.slave_destroy = lpfc_slave_destroy,
.scan_finished = lpfc_scan_finished,
.this_id = -1,
.sg_tablesize = LPFC_SG_SEG_CNT,
.cmd_per_lun = LPFC_CMD_PER_LUN,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = lpfc_vport_attrs,
.max_sectors = 0xFFFF,
};

View file

@ -49,9 +49,8 @@
lpfc_printf_log(phba, \
KERN_INFO, \
LOG_MBOX | LOG_SLI, \
"%d (%d):0311 Mailbox command x%x cannot " \
"(%d):0311 Mailbox command x%x cannot " \
"issue Data: x%x x%x x%x\n", \
phba->brd_no, \
pmbox->vport ? pmbox->vport->vpi : 0, \
pmbox->mb.mbxCommand, \
phba->pport->port_state, \
@ -231,13 +230,11 @@ lpfc_sli_ring_map(struct lpfc_hba *phba)
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0446 Adapter failed to init (%d), "
"0446 Adapter failed to init (%d), "
"mbxCmd x%x CFG_RING, mbxStatus x%x, "
"ring %d\n",
phba->brd_no, rc,
pmbox->mbxCommand,
pmbox->mbxStatus,
i);
rc, pmbox->mbxCommand,
pmbox->mbxStatus, i);
phba->link_state = LPFC_HBA_ERROR;
ret = -ENXIO;
break;
@ -296,9 +293,9 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
if (unlikely(pring->local_getidx >= max_cmd_idx)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"%d:0315 Ring %d issue: portCmdGet %d "
"0315 Ring %d issue: portCmdGet %d "
"is bigger then cmd ring %d\n",
phba->brd_no, pring->ringno,
pring->ringno,
pring->local_getidx, max_cmd_idx);
phba->link_state = LPFC_HBA_ERROR;
@ -366,7 +363,7 @@ lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
if (psli->iocbq_lookup)
memcpy(new_arr, old_arr,
((psli->last_iotag + 1) *
sizeof (struct lpfc_iocbq *)));
sizeof (struct lpfc_iocbq *)));
psli->iocbq_lookup = new_arr;
psli->iocbq_lookup_len = new_len;
psli->last_iotag = iotag;
@ -380,8 +377,8 @@ lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
"%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n",
phba->brd_no, psli->last_iotag);
"0318 Failed to allocate IOTAG.last IOTAG is %d\n",
psli->last_iotag);
return 0;
}
@ -395,6 +392,14 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
*/
nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
if (pring->ringno == LPFC_ELS_RING) {
lpfc_debugfs_slow_ring_trc(phba,
"IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
*(((uint32_t *) &nextiocb->iocb) + 4),
*(((uint32_t *) &nextiocb->iocb) + 6),
*(((uint32_t *) &nextiocb->iocb) + 7));
}
/*
* Issue iocb command to adapter
*/
@ -527,10 +532,9 @@ lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
lpfc_printf_log(phba, KERN_ERR,
LOG_SLI | LOG_VPORT,
"%d:1802 HBQ %d: local_hbqGetIdx "
"1802 HBQ %d: local_hbqGetIdx "
"%u is > than hbqp->entry_count %u\n",
phba->brd_no, hbqno,
hbqp->local_hbqGetIdx,
hbqno, hbqp->local_hbqGetIdx,
hbqp->entry_count);
phba->link_state = LPFC_HBA_ERROR;
@ -541,7 +545,8 @@ lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
return NULL;
}
return (struct lpfc_hbq_entry *) phba->hbqslimp.virt + hbqp->hbqPutIdx;
return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
hbqp->hbqPutIdx;
}
void
@ -549,18 +554,21 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
{
struct lpfc_dmabuf *dmabuf, *next_dmabuf;
struct hbq_dmabuf *hbq_buf;
int i, hbq_count;
hbq_count = lpfc_sli_hbq_count();
/* Return all memory used by all HBQs */
list_for_each_entry_safe(dmabuf, next_dmabuf,
&phba->hbq_buffer_list, list) {
hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
list_del(&hbq_buf->dbuf.list);
lpfc_hbq_free(phba, hbq_buf->dbuf.virt, hbq_buf->dbuf.phys);
kfree(hbq_buf);
for (i = 0; i < hbq_count; ++i) {
list_for_each_entry_safe(dmabuf, next_dmabuf,
&phba->hbqs[i].hbq_buffer_list, list) {
hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
list_del(&hbq_buf->dbuf.list);
(phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
}
}
}
static void
static struct lpfc_hbq_entry *
lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
struct hbq_dmabuf *hbq_buf)
{
@ -574,7 +582,7 @@ lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
hbqe->bde.tus.f.bdeSize = FCELSSIZE;
hbqe->bde.tus.f.bdeSize = hbq_buf->size;
hbqe->bde.tus.f.bdeFlags = 0;
hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
@ -583,8 +591,9 @@ lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
/* flush */
readl(phba->hbq_put + hbqno);
list_add_tail(&hbq_buf->dbuf.list, &phba->hbq_buffer_list);
list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
}
return hbqe;
}
static struct lpfc_hbq_init lpfc_els_hbq = {
@ -592,22 +601,38 @@ static struct lpfc_hbq_init lpfc_els_hbq = {
.entry_count = 200,
.mask_count = 0,
.profile = 0,
.ring_mask = 1 << LPFC_ELS_RING,
.ring_mask = (1 << LPFC_ELS_RING),
.buffer_count = 0,
.init_count = 20,
.add_count = 5,
};
static struct lpfc_hbq_init *lpfc_hbq_defs[] = {
&lpfc_els_hbq,
static struct lpfc_hbq_init lpfc_extra_hbq = {
.rn = 1,
.entry_count = 200,
.mask_count = 0,
.profile = 0,
.ring_mask = (1 << LPFC_EXTRA_RING),
.buffer_count = 0,
.init_count = 0,
.add_count = 5,
};
int
struct lpfc_hbq_init *lpfc_hbq_defs[] = {
&lpfc_els_hbq,
&lpfc_extra_hbq,
};
static int
lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
{
uint32_t i, start, end;
struct hbq_dmabuf *hbq_buffer;
if (!phba->hbqs[hbqno].hbq_alloc_buffer) {
return 0;
}
start = lpfc_hbq_defs[hbqno]->buffer_count;
end = count + lpfc_hbq_defs[hbqno]->buffer_count;
if (end > lpfc_hbq_defs[hbqno]->entry_count) {
@ -616,17 +641,14 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
/* Populate HBQ entries */
for (i = start; i < end; i++) {
hbq_buffer = kmalloc(sizeof(struct hbq_dmabuf),
GFP_KERNEL);
hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
if (!hbq_buffer)
return 1;
hbq_buffer->dbuf.virt = lpfc_hbq_alloc(phba, MEM_PRI,
&hbq_buffer->dbuf.phys);
if (hbq_buffer->dbuf.virt == NULL)
return 1;
hbq_buffer->tag = (i | (hbqno << 16));
lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer);
lpfc_hbq_defs[hbqno]->buffer_count++;
if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
lpfc_hbq_defs[hbqno]->buffer_count++;
else
(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
}
return 0;
}
@ -650,28 +672,34 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
{
struct lpfc_dmabuf *d_buf;
struct hbq_dmabuf *hbq_buf;
uint32_t hbqno;
list_for_each_entry(d_buf, &phba->hbq_buffer_list, list) {
hbqno = tag >> 16;
if (hbqno > LPFC_MAX_HBQS)
return NULL;
list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
if ((hbq_buf->tag & 0xffff) == tag) {
if (hbq_buf->tag == tag) {
return hbq_buf;
}
}
lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
"%d:1803 Bad hbq tag. Data: x%x x%x\n",
phba->brd_no, tag,
lpfc_hbq_defs[tag >> 16]->buffer_count);
"1803 Bad hbq tag. Data: x%x x%x\n",
tag, lpfc_hbq_defs[tag >> 16]->buffer_count);
return NULL;
}
void
lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *sp)
lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
{
uint32_t hbqno;
if (sp) {
hbqno = sp->tag >> 16;
lpfc_sli_hbq_to_firmware(phba, hbqno, sp);
if (hbq_buffer) {
hbqno = hbq_buffer->tag >> 16;
if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
}
}
}
@ -837,12 +865,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
*/
if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
MBX_SHUTDOWN) {
/* Unknow mailbox command compl */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"%d (%d):0323 Unknown Mailbox command "
"(%d):0323 Unknown Mailbox command "
"%x Cmpl\n",
phba->brd_no,
pmb->vport ? pmb->vport->vpi : 0,
pmbox->mbxCommand);
phba->link_state = LPFC_HBA_ERROR;
@ -857,10 +883,9 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
/* Mbox cmd cmpl error - RETRYing */
lpfc_printf_log(phba, KERN_INFO,
LOG_MBOX | LOG_SLI,
"%d (%d):0305 Mbox cmd cmpl "
"(%d):0305 Mbox cmd cmpl "
"error - RETRYing Data: x%x "
"x%x x%x x%x\n",
phba->brd_no,
pmb->vport ? pmb->vport->vpi :0,
pmbox->mbxCommand,
pmbox->mbxStatus,
@ -879,9 +904,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
/* Mailbox cmd <cmd> Cmpl <cmpl> */
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"%d (%d):0307 Mailbox cmd x%x Cmpl x%p "
"(%d):0307 Mailbox cmd x%x Cmpl x%p "
"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
phba->brd_no,
pmb->vport ? pmb->vport->vpi : 0,
pmbox->mbxCommand,
pmb->mbox_cmpl,
@ -905,21 +929,26 @@ static struct lpfc_dmabuf *
lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
{
struct hbq_dmabuf *hbq_entry, *new_hbq_entry;
uint32_t hbqno;
void *virt; /* virtual address ptr */
dma_addr_t phys; /* mapped address */
hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
if (hbq_entry == NULL)
return NULL;
list_del(&hbq_entry->dbuf.list);
new_hbq_entry = kmalloc(sizeof(struct hbq_dmabuf), GFP_ATOMIC);
hbqno = tag >> 16;
new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
if (new_hbq_entry == NULL)
return &hbq_entry->dbuf;
new_hbq_entry->dbuf = hbq_entry->dbuf;
new_hbq_entry->tag = -1;
hbq_entry->dbuf.virt = lpfc_hbq_alloc(phba, 0, &hbq_entry->dbuf.phys);
if (hbq_entry->dbuf.virt == NULL) {
kfree(new_hbq_entry);
return &hbq_entry->dbuf;
}
phys = new_hbq_entry->dbuf.phys;
virt = new_hbq_entry->dbuf.virt;
new_hbq_entry->dbuf.phys = hbq_entry->dbuf.phys;
new_hbq_entry->dbuf.virt = hbq_entry->dbuf.virt;
hbq_entry->dbuf.phys = phys;
hbq_entry->dbuf.virt = virt;
lpfc_sli_free_hbq(phba, hbq_entry);
return &new_hbq_entry->dbuf;
}
@ -965,7 +994,7 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
irsp->un.ulpWord[3]);
if (irsp->ulpBdeCount == 2)
saveq->context3 = lpfc_sli_replace_hbqbuff(phba,
irsp->un.ulpWord[15]);
irsp->unsli3.sli3Words[7]);
}
/* unSolicited Responses */
@ -996,12 +1025,9 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* Ring <ringno> handler: unexpected
Rctl <Rctl> Type <Type> received */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"%d:0313 Ring %d handler: unexpected Rctl x%x "
"0313 Ring %d handler: unexpected Rctl x%x "
"Type x%x received\n",
phba->brd_no,
pring->ringno,
Rctl,
Type);
pring->ringno, Rctl, Type);
}
return 1;
}
@ -1024,10 +1050,9 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
}
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"%d:0317 iotag x%x is out off "
"0317 iotag x%x is out off "
"range: max iotag x%x wd0 x%x\n",
phba->brd_no, iotag,
phba->sli.last_iotag,
iotag, phba->sli.last_iotag,
*(((uint32_t *) &prspiocb->iocb) + 7));
return NULL;
}
@ -1075,18 +1100,16 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* Ring <ringno> handler: unexpected completion IoTag
* <IoTag>
*/
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"%d (%d):0322 Ring %d handler: "
"unexpected completion IoTag x%x "
"Data: x%x x%x x%x x%x\n",
phba->brd_no,
cmdiocbp->vport->vpi,
pring->ringno,
saveq->iocb.ulpIoTag,
saveq->iocb.ulpStatus,
saveq->iocb.un.ulpWord[4],
saveq->iocb.ulpCommand,
saveq->iocb.ulpContext);
lpfc_printf_vlog(cmdiocbp->vport, KERN_WARNING, LOG_SLI,
"0322 Ring %d handler: "
"unexpected completion IoTag x%x "
"Data: x%x x%x x%x x%x\n",
pring->ringno,
saveq->iocb.ulpIoTag,
saveq->iocb.ulpStatus,
saveq->iocb.un.ulpWord[4],
saveq->iocb.ulpCommand,
saveq->iocb.ulpContext);
}
}
@ -1104,10 +1127,9 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
* rsp ring <portRspMax>
*/
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"%d:0312 Ring %d handler: portRspPut %d "
"0312 Ring %d handler: portRspPut %d "
"is bigger then rsp ring %d\n",
phba->brd_no, pring->ringno,
le32_to_cpu(pgp->rspPutInx),
pring->ringno, le32_to_cpu(pgp->rspPutInx),
pring->numRiocb);
phba->link_state = LPFC_HBA_ERROR;
@ -1177,9 +1199,9 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
if (unlikely(irsp->ulpStatus)) {
/* Rsp ring <ringno> error: IOCB */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"%d:0326 Rsp Ring %d error: IOCB Data: "
"0326 Rsp Ring %d error: IOCB Data: "
"x%x x%x x%x x%x x%x x%x x%x x%x\n",
phba->brd_no, pring->ringno,
pring->ringno,
irsp->un.ulpWord[0],
irsp->un.ulpWord[1],
irsp->un.ulpWord[2],
@ -1199,9 +1221,9 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
*/
if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"%d:0314 IOCB cmd 0x%x"
" processed. Skipping"
" completion", phba->brd_no,
"0314 IOCB cmd 0x%x "
"processed. Skipping "
"completion",
irsp->ulpCommand);
break;
}
@ -1226,10 +1248,9 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
} else {
/* Unknown IOCB command */
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"%d:0321 Unknown IOCB command "
"0321 Unknown IOCB command "
"Data: x%x, x%x x%x x%x x%x\n",
phba->brd_no, type,
irsp->ulpCommand,
type, irsp->ulpCommand,
irsp->ulpStatus,
irsp->ulpIoTag,
irsp->ulpContext);
@ -1353,9 +1374,9 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
/* Rsp ring <ringno> error: IOCB */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"%d:0336 Rsp Ring %d error: IOCB Data: "
"0336 Rsp Ring %d error: IOCB Data: "
"x%x x%x x%x x%x x%x x%x x%x x%x\n",
phba->brd_no, pring->ringno,
pring->ringno,
irsp->un.ulpWord[0],
irsp->un.ulpWord[1],
irsp->un.ulpWord[2],
@ -1375,10 +1396,9 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
*/
if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"%d:0333 IOCB cmd 0x%x"
"0333 IOCB cmd 0x%x"
" processed. Skipping"
" completion\n",
phba->brd_no,
irsp->ulpCommand);
break;
}
@ -1415,10 +1435,9 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
} else {
/* Unknown IOCB command */
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"%d:0334 Unknown IOCB command "
"0334 Unknown IOCB command "
"Data: x%x, x%x x%x x%x x%x\n",
phba->brd_no, type,
irsp->ulpCommand,
type, irsp->ulpCommand,
irsp->ulpStatus,
irsp->ulpIoTag,
irsp->ulpContext);
@ -1496,10 +1515,9 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
* rsp ring <portRspMax>
*/
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"%d:0303 Ring %d handler: portRspPut %d "
"0303 Ring %d handler: portRspPut %d "
"is bigger then rsp ring %d\n",
phba->brd_no, pring->ringno, portRspPut,
portRspMax);
pring->ringno, portRspPut, portRspMax);
phba->link_state = LPFC_HBA_ERROR;
spin_unlock_irqrestore(&phba->hbalock, iflag);
@ -1542,6 +1560,14 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
if (++pring->rspidx >= portRspMax)
pring->rspidx = 0;
if (pring->ringno == LPFC_ELS_RING) {
lpfc_debugfs_slow_ring_trc(phba,
"IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
*(((uint32_t *) irsp) + 4),
*(((uint32_t *) irsp) + 6),
*(((uint32_t *) irsp) + 7));
}
writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
if (list_empty(&(pring->iocb_continueq))) {
@ -1580,13 +1606,12 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
if (irsp->ulpStatus) {
/* Rsp ring <ringno> error: IOCB */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"%d:0328 Rsp Ring %d error: "
"0328 Rsp Ring %d error: "
"IOCB Data: "
"x%x x%x x%x x%x "
"x%x x%x x%x x%x "
"x%x x%x x%x x%x "
"x%x x%x x%x x%x\n",
phba->brd_no,
pring->ringno,
irsp->un.ulpWord[0],
irsp->un.ulpWord[1],
@ -1661,10 +1686,9 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
} else {
/* Unknown IOCB command */
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"%d:0335 Unknown IOCB "
"0335 Unknown IOCB "
"command Data: x%x "
"x%x x%x x%x\n",
phba->brd_no,
irsp->ulpCommand,
irsp->ulpStatus,
irsp->ulpIoTag,
@ -1892,8 +1916,8 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
/* Kill HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"%d:0329 Kill HBA Data: x%x x%x\n",
phba->brd_no, phba->pport->port_state, psli->sli_flag);
"0329 Kill HBA Data: x%x x%x\n",
phba->pport->port_state, psli->sli_flag);
if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
GFP_KERNEL)) == 0)
@ -1966,7 +1990,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
/* Reset HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"%d:0325 Reset HBA Data: x%x x%x\n", phba->brd_no,
"0325 Reset HBA Data: x%x x%x\n",
phba->pport->port_state, psli->sli_flag);
/* perform board reset */
@ -2021,7 +2045,7 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
/* Restart HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no,
"0337 Restart HBA Data: x%x x%x\n",
phba->pport->port_state, psli->sli_flag);
word0 = 0;
@ -2086,9 +2110,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
/* Adapter failed to init, timeout, status reg
<status> */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0436 Adapter failed to init, "
"timeout, status reg x%x\n",
phba->brd_no, status);
"0436 Adapter failed to init, "
"timeout, status reg x%x\n", status);
phba->link_state = LPFC_HBA_ERROR;
return -ETIMEDOUT;
}
@ -2099,10 +2122,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
/* Adapter failed to init, chipset, status reg
<status> */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0437 Adapter failed to init, "
"chipset, status reg x%x\n",
phba->brd_no,
status);
"0437 Adapter failed to init, "
"chipset, status reg x%x\n", status);
phba->link_state = LPFC_HBA_ERROR;
return -EIO;
}
@ -2129,10 +2150,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
/* ERROR: During chipset initialization */
/* Adapter failed to init, chipset, status reg <status> */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0438 Adapter failed to init, chipset, "
"status reg x%x\n",
phba->brd_no,
status);
"0438 Adapter failed to init, chipset, "
"status reg x%x\n", status);
phba->link_state = LPFC_HBA_ERROR;
return -EIO;
}
@ -2147,7 +2166,7 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
return 0;
}
static int
int
lpfc_sli_hbq_count(void)
{
return ARRAY_SIZE(lpfc_hbq_defs);
@ -2200,8 +2219,8 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
phba->hbqs[hbqno].local_hbqGetIdx = 0;
phba->hbqs[hbqno].entry_count =
lpfc_hbq_defs[hbqno]->entry_count;
lpfc_config_hbq(phba, lpfc_hbq_defs[hbqno], hbq_entry_index,
pmb);
lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
hbq_entry_index, pmb);
hbq_entry_index += phba->hbqs[hbqno].entry_count;
if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
@ -2210,9 +2229,9 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR,
LOG_SLI | LOG_VPORT,
"%d:1805 Adapter failed to init. "
"1805 Adapter failed to init. "
"Data: x%x x%x x%x\n",
phba->brd_no, pmbox->mbxCommand,
pmbox->mbxCommand,
pmbox->mbxStatus, hbqno);
phba->link_state = LPFC_HBA_ERROR;
@ -2279,10 +2298,9 @@ lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0442 Adapter failed to init, mbxCmd x%x "
"0442 Adapter failed to init, mbxCmd x%x "
"CONFIG_PORT, mbxStatus x%x Data: x%x\n",
phba->brd_no, pmb->mb.mbxCommand,
pmb->mb.mbxStatus, 0);
pmb->mb.mbxCommand, pmb->mb.mbxStatus, 0);
spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
spin_unlock_irq(&phba->hbalock);
@ -2321,11 +2339,11 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
switch (lpfc_sli_mode) {
case 2:
if (phba->cfg_npiv_enable) {
if (phba->cfg_enable_npiv) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
"%d:1824 NPIV enabled: Override lpfc_sli_mode "
"1824 NPIV enabled: Override lpfc_sli_mode "
"parameter (%d) to auto (0).\n",
phba->brd_no, lpfc_sli_mode);
lpfc_sli_mode);
break;
}
mode = 2;
@ -2335,9 +2353,8 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
"%d:1819 Unrecognized lpfc_sli_mode "
"parameter: %d.\n",
phba->brd_no, lpfc_sli_mode);
"1819 Unrecognized lpfc_sli_mode "
"parameter: %d.\n", lpfc_sli_mode);
break;
}
@ -2345,9 +2362,8 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
rc = lpfc_do_config_port(phba, mode);
if (rc && lpfc_sli_mode == 3)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
"%d:1820 Unable to select SLI-3. "
"Not supported by adapter.\n",
phba->brd_no);
"1820 Unable to select SLI-3. "
"Not supported by adapter.\n");
if (rc && mode != 2)
rc = lpfc_do_config_port(phba, 2);
if (rc)
@ -2366,8 +2382,8 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"%d:0444 Firmware in SLI %x mode. Max_vpi %d\n",
phba->brd_no, phba->sli_rev, phba->max_vpi);
"0444 Firmware in SLI %x mode. Max_vpi %d\n",
phba->sli_rev, phba->max_vpi);
rc = lpfc_sli_ring_map(phba);
if (rc)
@ -2392,8 +2408,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
lpfc_sli_hba_setup_error:
phba->link_state = LPFC_HBA_ERROR;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"%d:0445 Firmware initialization failed\n",
phba->brd_no);
"0445 Firmware initialization failed\n");
return rc;
}
@ -2445,9 +2460,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
/* Mbox cmd <mbxCommand> timeout */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"%d:0310 Mailbox command x%x timeout Data: x%x x%x "
"x%p\n",
phba->brd_no,
"0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
mb->mbxCommand,
phba->pport->port_state,
phba->sli.sli_flag,
@ -2470,8 +2483,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
lpfc_sli_abort_iocb_ring(phba, pring);
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"%d:0316 Resetting board due to mailbox timeout\n",
phba->brd_no);
"0316 Resetting board due to mailbox timeout\n");
/*
* lpfc_offline calls lpfc_sli_hba_down which will clean up
* on oustanding mailbox commands.
@ -2502,8 +2514,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
if(!pmbox->vport) {
lpfc_printf_log(phba, KERN_ERR,
LOG_MBOX | LOG_VPORT,
"%d:1806 Mbox x%x failed. No vport\n",
phba->brd_no,
"1806 Mbox x%x failed. No vport\n",
pmbox->mb.mbxCommand);
dump_stack();
return MBXERR_ERROR;
@ -2580,9 +2591,8 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
/* Mbox cmd issue - BUSY */
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"%d (%d):0308 Mbox cmd issue - BUSY Data: "
"(%d):0308 Mbox cmd issue - BUSY Data: "
"x%x x%x x%x x%x\n",
phba->brd_no,
pmbox->vport ? pmbox->vport->vpi : 0xffffff,
mb->mbxCommand, phba->pport->port_state,
psli->sli_flag, flag);
@ -2644,9 +2654,9 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
/* Mailbox cmd <cmd> issue */
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"%d (%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
"(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
"x%x\n",
phba->brd_no, pmbox->vport ? pmbox->vport->vpi : 0,
pmbox->vport ? pmbox->vport->vpi : 0,
mb->mbxCommand, phba->pport->port_state,
psli->sli_flag, flag);
@ -2848,8 +2858,7 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
(piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
lpfc_printf_log(phba, KERN_ERR,
LOG_SLI | LOG_VPORT,
"%d:1807 IOCB x%x failed. No vport\n",
phba->brd_no,
"1807 IOCB x%x failed. No vport\n",
piocb->iocb.ulpCommand);
dump_stack();
return IOCB_ERROR;
@ -3080,11 +3089,10 @@ lpfc_sli_setup(struct lpfc_hba *phba)
}
if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
/* Too many cmd / rsp ring entries in SLI2 SLIM */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0462 Too many cmd / rsp ring entries in "
"SLI2 SLIM Data: x%x x%lx\n",
phba->brd_no, totiocbsize,
(unsigned long) MAX_SLIM_IOCB_SIZE);
printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
"SLI2 SLIM Data: x%x x%lx\n",
phba->brd_no, totiocbsize,
(unsigned long) MAX_SLIM_IOCB_SIZE);
}
if (phba->cfg_multi_ring_support == 2)
lpfc_extra_ring_setup(phba);
@ -3305,9 +3313,9 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0410 Cannot find virtual addr for mapped buf on "
"0410 Cannot find virtual addr for mapped buf on "
"ring %d Data x%llx x%p x%p x%x\n",
phba->brd_no, pring->ringno, (unsigned long long)phys,
pring->ringno, (unsigned long long)phys,
slp->next, slp->prev, pring->postbufq_cnt);
return NULL;
}
@ -3332,12 +3340,11 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
abort_iocb = phba->sli.iocbq_lookup[abort_iotag];
lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
"%d:0327 Cannot abort els iocb %p "
"0327 Cannot abort els iocb %p "
"with tag %x context %x, abort status %x, "
"abort code %x\n",
phba->brd_no, abort_iocb, abort_iotag,
abort_context, irsp->ulpStatus,
irsp->un.ulpWord[4]);
abort_iocb, abort_iotag, abort_context,
irsp->ulpStatus, irsp->un.ulpWord[4]);
/*
* make sure we have the right iocbq before taking it
@ -3371,9 +3378,9 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"%d (X):0133 Ignoring ELS cmd tag x%x completion Data: "
"0133 Ignoring ELS cmd tag x%x completion Data: "
"x%x x%x x%x\n",
phba->brd_no, irsp->ulpIoTag, irsp->ulpStatus,
irsp->ulpIoTag, irsp->ulpStatus,
irsp->un.ulpWord[4], irsp->ulpTimeout);
if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
lpfc_ct_free_iocb(phba, cmdiocb);
@ -3439,12 +3446,11 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"%d (%d):0339 Abort xri x%x, original iotag x%x, "
"abort cmd iotag x%x\n",
phba->brd_no, vport->vpi,
iabt->un.acxri.abortContextTag,
iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
"0339 Abort xri x%x, original iotag x%x, "
"abort cmd iotag x%x\n",
iabt->un.acxri.abortContextTag,
iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
abort_iotag_exit:
@ -3457,8 +3463,8 @@ abort_iotag_exit:
}
static int
lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, uint16_t tgt_id,
uint64_t lun_id, uint32_t ctx,
lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
uint16_t tgt_id, uint64_t lun_id,
lpfc_ctx_cmd ctx_cmd)
{
struct lpfc_scsi_buf *lpfc_cmd;
@ -3468,6 +3474,9 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, uint16_t tgt_id,
if (!(iocbq->iocb_flag & LPFC_IO_FCP))
return rc;
if (iocbq->vport != vport)
return rc;
lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
cmnd = lpfc_cmd->pCmd;
@ -3484,10 +3493,6 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, uint16_t tgt_id,
if (cmnd->device->id == tgt_id)
rc = 0;
break;
case LPFC_CTX_CTX:
if (iocbq->iocb.ulpContext == ctx)
rc = 0;
break;
case LPFC_CTX_HOST:
rc = 0;
break;
@ -3501,17 +3506,18 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, uint16_t tgt_id,
}
int
lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd)
lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
lpfc_ctx_cmd ctx_cmd)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocbq;
int sum, i;
for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id,
0, ctx_cmd) == 0)
if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
ctx_cmd) == 0)
sum++;
}
@ -3527,10 +3533,10 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
int
lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
uint16_t tgt_id, uint64_t lun_id, uint32_t ctx,
lpfc_ctx_cmd abort_cmd)
lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocbq;
struct lpfc_iocbq *abtsiocb;
IOCB_t *cmd = NULL;
@ -3540,7 +3546,7 @@ lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
if (lpfc_sli_validate_fcp_iocb(iocbq, tgt_id, lun_id, 0,
if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
abort_cmd) != 0)
continue;
@ -3647,25 +3653,23 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
if (piocb->iocb_flag & LPFC_IO_WAKE) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"%d:0331 IOCB wake signaled\n",
phba->brd_no);
"0331 IOCB wake signaled\n");
} else if (timeleft == 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"%d:0338 IOCB wait timeout error - no "
"wake response Data x%x\n",
phba->brd_no, timeout);
"0338 IOCB wait timeout error - no "
"wake response Data x%x\n", timeout);
retval = IOCB_TIMEDOUT;
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"%d:0330 IOCB wake NOT set, "
"Data x%x x%lx\n", phba->brd_no,
"0330 IOCB wake NOT set, "
"Data x%x x%lx\n",
timeout, (timeleft / jiffies));
retval = IOCB_TIMEDOUT;
}
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"%d:0332 IOCB wait issue failed, Data x%x\n",
phba->brd_no, retval);
":0332 IOCB wait issue failed, Data x%x\n",
retval);
retval = IOCB_ERROR;
}
@ -3850,12 +3854,33 @@ lpfc_intr_handler(int irq, void *dev_id)
if (status & HA_RXMASK) {
spin_lock(&phba->hbalock);
control = readl(phba->HCregaddr);
lpfc_debugfs_slow_ring_trc(phba,
"ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
control, status,
(uint32_t)phba->sli.slistat.sli_intr);
if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
lpfc_debugfs_slow_ring_trc(phba,
"ISR Disable ring:"
"pwork:x%x hawork:x%x wait:x%x",
phba->work_ha, work_ha_copy,
(uint32_t)((unsigned long)
phba->work_wait));
control &=
~(HC_R0INT_ENA << LPFC_ELS_RING);
writel(control, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
}
else {
lpfc_debugfs_slow_ring_trc(phba,
"ISR slow ring: pwork:"
"x%x hawork:x%x wait:x%x",
phba->work_ha, work_ha_copy,
(uint32_t)((unsigned long)
phba->work_wait));
}
spin_unlock(&phba->hbalock);
}
}
@ -3895,12 +3920,10 @@ lpfc_intr_handler(int irq, void *dev_id)
*/
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX |
LOG_SLI,
"%d (%d):0304 Stray Mailbox "
"(%d):0304 Stray Mailbox "
"Interrupt mbxCommand x%x "
"mbxStatus x%x\n",
phba->brd_no,
(vport
? vport->vpi : 0),
(vport ? vport->vpi : 0),
pmbox->mbxCommand,
pmbox->mbxStatus);
}

View file

@ -26,7 +26,6 @@ struct lpfc_vport;
typedef enum _lpfc_ctx_cmd {
LPFC_CTX_LUN,
LPFC_CTX_TGT,
LPFC_CTX_CTX,
LPFC_CTX_HOST
} lpfc_ctx_cmd;
@ -54,9 +53,10 @@ struct lpfc_iocbq {
void *context2; /* caller context information */
void *context3; /* caller context information */
union {
wait_queue_head_t *wait_queue;
struct lpfc_iocbq *rsp_iocb;
struct lpfcMboxq *mbox;
wait_queue_head_t *wait_queue;
struct lpfc_iocbq *rsp_iocb;
struct lpfcMboxq *mbox;
struct lpfc_nodelist *ndlp;
} context_un;
void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
@ -143,7 +143,7 @@ struct lpfc_sli_ring {
uint16_t numCiocb; /* number of command iocb's per ring */
uint16_t numRiocb; /* number of rsp iocb's per ring */
uint16_t sizeCiocb; /* Size of command iocb's in this ring */
uint16_t sizeRiocb; /* Size of response iocb's in this ring */
uint16_t sizeRiocb; /* Size of response iocb's in this ring */
uint32_t fast_iotag; /* max fastlookup based iotag */
uint32_t iotag_ctr; /* keeps track of the next iotag to use */

View file

@ -18,12 +18,10 @@
* included with this package. *
*******************************************************************/
#define LPFC_DRIVER_VERSION "8.2.1"
#define LPFC_DRIVER_VERSION "8.2.2"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
#define LPFC_COPYRIGHT "Copyright(c) 2004-2007 Emulex. All rights reserved."
#define DFC_API_VERSION "0.0.0"

View file

@ -125,11 +125,10 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
pmb->vport = vport;
rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
"%d (%d):1818 VPort failed init, mbxCmd x%x "
"READ_SPARM mbxStatus x%x, rc = x%x\n",
phba->brd_no, vport->vpi,
mb->mbxCommand, mb->mbxStatus, rc);
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT,
"1818 VPort failed init, mbxCmd x%x "
"READ_SPARM mbxStatus x%x, rc = x%x\n",
mb->mbxCommand, mb->mbxStatus, rc);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
if (rc != MBX_TIMEOUT)
@ -162,9 +161,9 @@ lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn,
return 1;
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"%d:1822 Invalid %s: %02x:%02x:%02x:%02x:"
"1822 Invalid %s: %02x:%02x:%02x:%02x:"
"%02x:%02x:%02x:%02x\n",
phba->brd_no, name_type,
name_type,
wwn->u.wwn[0], wwn->u.wwn[1],
wwn->u.wwn[2], wwn->u.wwn[3],
wwn->u.wwn[4], wwn->u.wwn[5],
@ -176,16 +175,21 @@ static int
lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
{
struct lpfc_vport *vport;
unsigned long flags;
spin_lock_irqsave(&phba->hbalock, flags);
list_for_each_entry(vport, &phba->port_list, listentry) {
if (vport == new_vport)
continue;
/* If they match, return not unique */
if (memcmp(&vport->fc_sparam.portName,
&new_vport->fc_sparam.portName,
sizeof(struct lpfc_name)) == 0)
&new_vport->fc_sparam.portName,
sizeof(struct lpfc_name)) == 0) {
spin_unlock_irqrestore(&phba->hbalock, flags);
return 0;
}
}
spin_unlock_irqrestore(&phba->hbalock, flags);
return 1;
}
@ -193,8 +197,8 @@ int
lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
{
struct lpfc_nodelist *ndlp;
struct lpfc_vport *pport =
(struct lpfc_vport *) fc_vport->shost->hostdata;
struct Scsi_Host *shost = fc_vport->shost;
struct lpfc_vport *pport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = pport->phba;
struct lpfc_vport *vport = NULL;
int instance;
@ -204,9 +208,9 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
if ((phba->sli_rev < 3) ||
!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"%d:1808 Create VPORT failed: "
"1808 Create VPORT failed: "
"NPIV is not enabled: SLImode:%d\n",
phba->brd_no, phba->sli_rev);
phba->sli_rev);
rc = VPORT_INVAL;
goto error_out;
}
@ -214,9 +218,9 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
vpi = lpfc_alloc_vpi(phba);
if (vpi == 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"%d:1809 Create VPORT failed: "
"1809 Create VPORT failed: "
"Max VPORTs (%d) exceeded\n",
phba->brd_no, phba->max_vpi);
phba->max_vpi);
rc = VPORT_NORESOURCES;
goto error_out;
}
@ -225,18 +229,17 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
/* Assign an unused board number */
if ((instance = lpfc_get_instance()) < 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"%d:1810 Create VPORT failed: Cannot get "
"instance number\n", phba->brd_no);
"1810 Create VPORT failed: Cannot get "
"instance number\n");
lpfc_free_vpi(phba, vpi);
rc = VPORT_NORESOURCES;
goto error_out;
}
vport = lpfc_create_port(phba, instance, fc_vport);
vport = lpfc_create_port(phba, instance, &fc_vport->dev);
if (!vport) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"%d:1811 Create VPORT failed: vpi x%x\n",
phba->brd_no, vpi);
"1811 Create VPORT failed: vpi x%x\n", vpi);
lpfc_free_vpi(phba, vpi);
rc = VPORT_NORESOURCES;
goto error_out;
@ -246,10 +249,9 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
lpfc_debugfs_initialize(vport);
if (lpfc_vport_sparm(phba, vport)) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"%d:1813 Create VPORT failed: vpi:%d "
"Cannot get sparam\n",
phba->brd_no, vpi);
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1813 Create VPORT failed. "
"Cannot get sparam\n");
lpfc_free_vpi(phba, vpi);
destroy_port(vport);
rc = VPORT_NORESOURCES;
@ -269,10 +271,9 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") ||
!lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"%d:1821 Create VPORT failed: vpi:%d "
"Invalid WWN format\n",
phba->brd_no, vpi);
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1821 Create VPORT failed. "
"Invalid WWN format\n");
lpfc_free_vpi(phba, vpi);
destroy_port(vport);
rc = VPORT_INVAL;
@ -280,10 +281,9 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
}
if (!lpfc_unique_wwpn(phba, vport)) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"%d:1823 Create VPORT failed: vpi:%d "
"Duplicate WWN on HBA\n",
phba->brd_no, vpi);
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1823 Create VPORT failed. "
"Duplicate WWN on HBA\n");
lpfc_free_vpi(phba, vpi);
destroy_port(vport);
rc = VPORT_INVAL;
@ -315,10 +315,8 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
lpfc_initial_fdisc(vport);
} else {
lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0262 No NPIV Fabric "
"support\n",
phba->brd_no, vport->vpi);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0262 No NPIV Fabric support\n");
}
} else {
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
@ -326,12 +324,14 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
rc = VPORT_OK;
out:
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1825 Vport Created.\n");
lpfc_host_attrib_init(lpfc_shost_from_vport(vport));
error_out:
return rc;
}
int
static int
disable_vport(struct fc_vport *fc_vport)
{
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
@ -371,10 +371,12 @@ disable_vport(struct fc_vport *fc_vport)
lpfc_mbx_unreg_vpi(vport);
lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1826 Vport Disabled.\n");
return VPORT_OK;
}
int
static int
enable_vport(struct fc_vport *fc_vport)
{
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
@ -400,15 +402,14 @@ enable_vport(struct fc_vport *fc_vport)
lpfc_initial_fdisc(vport);
} else {
lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
"%d (%d):0264 No NPIV Fabric "
"support\n",
phba->brd_no, vport->vpi);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0264 No NPIV Fabric support\n");
}
} else {
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1827 Vport Enabled.\n");
return VPORT_OK;
}
@ -431,8 +432,29 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct lpfc_hba *phba = vport->phba;
long timeout;
int rc = VPORT_ERROR;
if (vport->port_type == LPFC_PHYSICAL_PORT) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1812 vport_delete failed: Cannot delete "
"physical host\n");
return VPORT_ERROR;
}
/*
* If we are not unloading the driver then prevent the vport_delete
* from happening until after this vport's discovery is finished.
*/
if (!(phba->pport->load_flag & FC_UNLOADING)) {
int check_count = 0;
while (check_count < ((phba->fc_ratov * 3) + 3) &&
vport->port_state > LPFC_VPORT_FAILED &&
vport->port_state < LPFC_VPORT_READY) {
check_count++;
msleep(1000);
}
if (vport->port_state > LPFC_VPORT_FAILED &&
vport->port_state < LPFC_VPORT_READY)
return -EAGAIN;
}
/*
* This is a bit of a mess. We want to ensure the shost doesn't get
* torn down until we're done with the embedded lpfc_vport structure.
@ -450,16 +472,9 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
*/
if (!scsi_host_get(shost) || !scsi_host_get(shost))
return VPORT_INVAL;
if (vport->port_type == LPFC_PHYSICAL_PORT) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"%d:1812 vport_delete failed: Cannot delete "
"physical host\n", phba->brd_no);
goto out;
}
spin_lock_irq(&phba->hbalock);
vport->load_flag |= FC_UNLOADING;
spin_unlock_irq(&phba->hbalock);
kfree(vport->vname);
lpfc_debugfs_terminate(vport);
fc_remove_host(lpfc_shost_from_vport(vport));
@ -511,13 +526,46 @@ skip_logo:
spin_lock_irq(&phba->hbalock);
list_del_init(&vport->listentry);
spin_unlock_irq(&phba->hbalock);
rc = VPORT_OK;
out:
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1828 Vport Deleted.\n");
scsi_host_put(shost);
return rc;
return VPORT_OK;
}
EXPORT_SYMBOL(lpfc_vport_create);
EXPORT_SYMBOL(lpfc_vport_delete);
struct lpfc_vport **
lpfc_create_vport_work_array(struct lpfc_hba *phba)
{
struct lpfc_vport *port_iterator;
struct lpfc_vport **vports;
int index = 0;
vports = kzalloc(LPFC_MAX_VPORTS * sizeof(struct lpfc_vport *),
GFP_KERNEL);
if (vports == NULL)
return NULL;
spin_lock_irq(&phba->hbalock);
list_for_each_entry(port_iterator, &phba->port_list, listentry) {
if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
lpfc_printf_vlog(port_iterator, KERN_WARNING, LOG_VPORT,
"1801 Create vport work array FAILED: "
"cannot do scsi_host_get\n");
continue;
}
vports[index++] = port_iterator;
}
spin_unlock_irq(&phba->hbalock);
return vports;
}
void
lpfc_destroy_vport_work_array(struct lpfc_vport **vports)
{
int i;
if (vports == NULL)
return;
for (i=0; vports[i] != NULL && i < LPFC_MAX_VPORTS; i++)
scsi_host_put(lpfc_shost_from_vport(vports[i]));
kfree(vports);
}

View file

@ -88,6 +88,8 @@ int lpfc_vport_create(struct fc_vport *, bool);
int lpfc_vport_delete(struct fc_vport *);
int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *);
int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint);
struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *);
void lpfc_destroy_vport_work_array(struct lpfc_vport **);
/*
* queuecommand VPORT-specific return codes. Specified in the host byte code.

View file

@ -2154,6 +2154,19 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
}
}
/* Get memory for cached NVRAM */
ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
if (ha->nvram == NULL) {
/* error */
qla_printk(KERN_WARNING, ha,
"Memory Allocation failed - nvram cache\n");
qla2x00_mem_free(ha);
msleep(100);
continue;
}
/* Done all allocations without any error. */
status = 0;
@ -2266,6 +2279,7 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
ha->fw_dump_reading = 0;
vfree(ha->optrom_buffer);
kfree(ha->nvram);
}
/*

View file

@ -193,7 +193,8 @@ static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int f
cpu_relax();
}
if (!loop_count)
printk(KERN_EMERG "qlogicpti: mbox_command loop timeout #1\n");
printk(KERN_EMERG "qlogicpti%d: mbox_command loop timeout #1\n",
qpti->qpti_id);
/* Write mailbox command registers. */
switch (mbox_param[param[0]] >> 4) {
@ -224,8 +225,8 @@ static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int f
(sbus_readw(qpti->qregs + HCCTRL) & HCCTRL_CRIRQ))
udelay(20);
if (!loop_count)
printk(KERN_EMERG "qlogicpti: mbox_command[%04x] loop timeout #2\n",
param[0]);
printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #2\n",
qpti->qpti_id, param[0]);
/* Wait for SBUS semaphore to get set. */
loop_count = DEFAULT_LOOP_COUNT;
@ -238,16 +239,16 @@ static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int f
break;
}
if (!loop_count)
printk(KERN_EMERG "qlogicpti: mbox_command[%04x] loop timeout #3\n",
param[0]);
printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #3\n",
qpti->qpti_id, param[0]);
/* Wait for MBOX busy condition to go away. */
loop_count = DEFAULT_LOOP_COUNT;
while (--loop_count && (sbus_readw(qpti->qregs + MBOX0) == 0x04))
udelay(20);
if (!loop_count)
printk(KERN_EMERG "qlogicpti: mbox_command[%04x] loop timeout #4\n",
param[0]);
printk(KERN_EMERG "qlogicpti%d: mbox_command[%04x] loop timeout #4\n",
qpti->qpti_id, param[0]);
/* Read back output parameters. */
switch (mbox_param[param[0]] & 0xf) {
@ -342,7 +343,8 @@ static int qlogicpti_reset_hardware(struct Scsi_Host *host)
while (--loop_count && ((sbus_readw(qpti->qregs + MBOX0) & 0xff) == 0x04))
udelay(20);
if (!loop_count)
printk(KERN_EMERG "qlogicpti: reset_hardware loop timeout\n");
printk(KERN_EMERG "qlogicpti%d: reset_hardware loop timeout\n",
qpti->qpti_id);
sbus_writew(HCCTRL_PAUSE, qpti->qregs + HCCTRL);
set_sbus_cfg1(qpti);
@ -721,12 +723,12 @@ static int __init qpti_register_irq(struct qlogicpti *qpti)
IRQF_SHARED, "Qlogic/PTI", qpti))
goto fail;
printk("qpti%d: IRQ %d ", qpti->qpti_id, qpti->irq);
printk("qlogicpti%d: IRQ %d ", qpti->qpti_id, qpti->irq);
return 0;
fail:
printk("qpti%d: Cannot acquire irq line\n", qpti->qpti_id);
printk("qlogicpti%d: Cannot acquire irq line\n", qpti->qpti_id);
return -1;
}
@ -1210,7 +1212,7 @@ static int qlogicpti_return_status(struct Status_Entry *sts, int id)
host_status = DID_OK;
break;
default:
printk(KERN_EMERG "qpti%d: unknown completion status 0x%04x\n",
printk(KERN_EMERG "qlogicpti%d: unknown completion status 0x%04x\n",
id, sts->completion_status);
host_status = DID_ERROR;
break;
@ -1329,8 +1331,8 @@ static int qlogicpti_abort(struct scsi_cmnd *Cmnd)
u32 cmd_cookie;
int i;
printk(KERN_WARNING "qlogicpti : Aborting cmd for tgt[%d] lun[%d]\n",
(int)Cmnd->device->id, (int)Cmnd->device->lun);
printk(KERN_WARNING "qlogicpti%d: Aborting cmd for tgt[%d] lun[%d]\n",
qpti->qpti_id, (int)Cmnd->device->id, (int)Cmnd->device->lun);
qlogicpti_disable_irqs(qpti);
@ -1348,7 +1350,8 @@ static int qlogicpti_abort(struct scsi_cmnd *Cmnd)
param[3] = cmd_cookie & 0xffff;
if (qlogicpti_mbox_command(qpti, param, 0) ||
(param[0] != MBOX_COMMAND_COMPLETE)) {
printk(KERN_EMERG "qlogicpti : scsi abort failure: %x\n", param[0]);
printk(KERN_EMERG "qlogicpti%d: scsi abort failure: %x\n",
qpti->qpti_id, param[0]);
return_status = FAILED;
}
@ -1364,7 +1367,8 @@ static int qlogicpti_reset(struct scsi_cmnd *Cmnd)
struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
int return_status = SUCCESS;
printk(KERN_WARNING "qlogicpti : Resetting SCSI bus!\n");
printk(KERN_WARNING "qlogicpti%d: Resetting SCSI bus!\n",
qpti->qpti_id);
qlogicpti_disable_irqs(qpti);
@ -1372,7 +1376,8 @@ static int qlogicpti_reset(struct scsi_cmnd *Cmnd)
param[1] = qpti->host_param.bus_reset_delay;
if (qlogicpti_mbox_command(qpti, param, 0) ||
(param[0] != MBOX_COMMAND_COMPLETE)) {
printk(KERN_EMERG "qlogicisp : scsi bus reset failure: %x\n", param[0]);
printk(KERN_EMERG "qlogicisp%d: scsi bus reset failure: %x\n",
qpti->qpti_id, param[0]);
return_status = FAILED;
}
@ -1454,22 +1459,25 @@ static int __devinit qpti_sbus_probe(struct of_device *dev, const struct of_devi
if (qlogicpti_reset_hardware(host))
goto fail_unmap_queues;
if (scsi_add_host(host, &dev->dev))
goto fail_unmap_queues;
printk("(Firmware v%d.%d.%d)", qpti->fware_majrev,
qpti->fware_minrev, qpti->fware_micrev);
fcode = of_get_property(dp, "isp-fcode", NULL);
if (fcode && fcode[0])
printk("(Firmware %s)", fcode);
printk("(FCode %s)", fcode);
if (of_find_property(dp, "differential", NULL) != NULL)
qpti->differential = 1;
printk (" [%s Wide, using %s interface]\n",
printk("\nqlogicpti%d: [%s Wide, using %s interface]\n",
qpti->qpti_id,
(qpti->ultra ? "Ultra" : "Fast"),
(qpti->differential ? "differential" : "single ended"));
if (scsi_add_host(host, &dev->dev)) {
printk("qlogicpti%d: Failed scsi_add_host\n", qpti->qpti_id);
goto fail_unmap_queues;
}
dev_set_drvdata(&sdev->ofdev.dev, qpti);
qpti_chain_add(qpti);

View file

@ -1038,22 +1038,6 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
return BLKPREP_KILL;
}
static int scsi_issue_flush_fn(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
struct scsi_device *sdev = q->queuedata;
struct scsi_driver *drv;
if (sdev->sdev_state != SDEV_RUNNING)
return -ENXIO;
drv = *(struct scsi_driver **) disk->private_data;
if (drv->issue_flush)
return drv->issue_flush(&sdev->sdev_gendev, error_sector);
return -EOPNOTSUPP;
}
static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
struct request *req)
{
@ -1596,7 +1580,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
return NULL;
blk_queue_prep_rq(q, scsi_prep_fn);
blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
blk_queue_softirq_done(q, scsi_softirq_done);
return q;
}

View file

@ -241,7 +241,6 @@ static struct scsi_driver sd_template = {
},
.rescan = sd_rescan,
.init_command = sd_init_command,
.issue_flush = sd_issue_flush,
};
/*
@ -800,10 +799,17 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
return 0;
}
static int sd_issue_flush(struct device *dev, sector_t *error_sector)
static int sd_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
int ret = 0;
struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
struct scsi_device *sdp = q->queuedata;
struct scsi_disk *sdkp;
if (sdp->sdev_state != SDEV_RUNNING)
return -ENXIO;
sdkp = scsi_disk_get_from_dev(&sdp->sdev_gendev);
if (!sdkp)
return -ENODEV;
@ -1663,6 +1669,8 @@ static int sd_probe(struct device *dev)
sd_revalidate_disk(gd);
blk_queue_issue_flush_fn(sdp->request_queue, sd_issue_flush);
gd->driverfs_dev = &sdp->sdev_gendev;
gd->flags = GENHD_FL_DRIVERFS;
if (sdp->removable)

View file

@ -1485,7 +1485,7 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
struct st_buffer *STbp;
char *name = tape_name(STp);
if (down_interruptible(&STp->lock))
if (mutex_lock_interruptible(&STp->lock))
return -ERESTARTSYS;
retval = rw_checks(STp, filp, count);
@ -1736,7 +1736,7 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
if (SRpnt != NULL)
st_release_request(SRpnt);
release_buffering(STp, 0);
up(&STp->lock);
mutex_unlock(&STp->lock);
return retval;
}
@ -1942,7 +1942,7 @@ st_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
struct st_buffer *STbp = STp->buffer;
DEB( char *name = tape_name(STp); )
if (down_interruptible(&STp->lock))
if (mutex_lock_interruptible(&STp->lock))
return -ERESTARTSYS;
retval = rw_checks(STp, filp, count);
@ -2069,7 +2069,7 @@ st_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
release_buffering(STp, 1);
STbp->buffer_bytes = 0;
}
up(&STp->lock);
mutex_unlock(&STp->lock);
return retval;
}
@ -3226,7 +3226,7 @@ static int st_ioctl(struct inode *inode, struct file *file,
char *name = tape_name(STp);
void __user *p = (void __user *)arg;
if (down_interruptible(&STp->lock))
if (mutex_lock_interruptible(&STp->lock))
return -ERESTARTSYS;
DEB(
@ -3537,7 +3537,7 @@ static int st_ioctl(struct inode *inode, struct file *file,
retval = (-EFAULT);
goto out;
}
up(&STp->lock);
mutex_unlock(&STp->lock);
switch (cmd_in) {
case SCSI_IOCTL_GET_IDLUN:
case SCSI_IOCTL_GET_BUS_NUMBER:
@ -3563,7 +3563,7 @@ static int st_ioctl(struct inode *inode, struct file *file,
return retval;
out:
up(&STp->lock);
mutex_unlock(&STp->lock);
return retval;
}
@ -4029,7 +4029,7 @@ static int st_probe(struct device *dev)
tpnt->density_changed = tpnt->compression_changed =
tpnt->blksize_changed = 0;
init_MUTEX(&tpnt->lock);
mutex_init(&tpnt->lock);
st_nr_dev++;
write_unlock(&st_dev_arr_lock);

View file

@ -3,6 +3,7 @@
#define _ST_H
#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/kref.h>
#include <scsi/scsi_cmnd.h>
@ -98,7 +99,7 @@ struct st_partstat {
struct scsi_tape {
struct scsi_driver *driver;
struct scsi_device *device;
struct semaphore lock; /* For serialization */
struct mutex lock; /* For serialization */
struct completion wait; /* For SCSI commands */
struct st_buffer *buffer;

View file

@ -15,14 +15,18 @@ struct sg_io_v4 {
__u32 request_len; /* [i] in bytes */
__u64 request; /* [i], [*i] {SCSI: cdb} */
__u64 request_tag; /* [i] {SCSI: task tag (only if flagged)} */
__u32 request_attr; /* [i] {SCSI: task attribute} */
__u32 request_tag; /* [i] {SCSI: task tag (only if flagged)} */
__u32 request_priority; /* [i] {SCSI: task priority} */
__u32 request_extra; /* [i] {spare, for padding} */
__u32 max_response_len; /* [i] in bytes */
__u64 response; /* [i], [*o] {SCSI: (auto)sense data} */
/* "din_" for data in (from device); "dout_" for data out (to device) */
/* "dout_": data out (to device); "din_": data in (from device) */
__u32 dout_iovec_count; /* [i] 0 -> "flat" dout transfer else
dout_xfer points to array of iovec */
__u32 dout_xfer_len; /* [i] bytes to be transferred to device */
__u32 din_iovec_count; /* [i] 0 -> "flat" din transfer */
__u32 din_xfer_len; /* [i] bytes to be transferred from device */
__u64 dout_xferp; /* [i], [*i] */
__u64 din_xferp; /* [i], [*o] */
@ -39,8 +43,9 @@ struct sg_io_v4 {
__u32 info; /* [o] additional information */
__u32 duration; /* [o] time to complete, in milliseconds */
__u32 response_len; /* [o] bytes of response actually written */
__s32 din_resid; /* [o] actual_din_xfer_len - din_xfer_len */
__u32 generated_tag; /* [o] {SCSI: task tag that transport chose} */
__s32 din_resid; /* [o] din_xfer_len - actual_din_xfer_len */
__s32 dout_resid; /* [o] dout_xfer_len - actual_dout_xfer_len */
__u64 generated_tag; /* [o] {SCSI: transport generated task tag} */
__u32 spare_out; /* [o] */
__u32 padding;

View file

@ -13,8 +13,6 @@ struct scsi_driver {
int (*init_command)(struct scsi_cmnd *);
void (*rescan)(struct device *);
int (*issue_flush)(struct device *, sector_t *);
int (*prepare_flush)(struct request_queue *, struct request *);
};
#define to_scsi_driver(drv) \
container_of((drv), struct scsi_driver, gendrv)

View file

@ -56,8 +56,6 @@ static int sd_suspend(struct device *dev, pm_message_t state);
static int sd_resume(struct device *dev);
static void sd_rescan(struct device *);
static int sd_init_command(struct scsi_cmnd *);
static int sd_issue_flush(struct device *, sector_t *);
static void sd_prepare_flush(struct request_queue *, struct request *);
static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
static void scsi_disk_release(struct class_device *cdev);
static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);