Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (276 commits) [SCSI] zfcp: Trigger logging in the FCP channel on qdio error conditions [SCSI] zfcp: Introduce experimental support for DIF/DIX [SCSI] zfcp: Enable data division support for FCP devices [SCSI] zfcp: Prevent access on uninitialized memory. [SCSI] zfcp: Post events through FC transport class [SCSI] zfcp: Cleanup QDIO attachment and improve processing. [SCSI] zfcp: Cleanup function parameters for sbal value. [SCSI] zfcp: Use correct width for timer_interval field [SCSI] zfcp: Remove SCSI device when removing unit [SCSI] zfcp: Use memdup_user and kstrdup [SCSI] zfcp: Fix retry after failed "open port" erp action [SCSI] zfcp: Fail erp after timeout [SCSI] zfcp: Use forced_reopen in terminate_rport_io callback [SCSI] zfcp: Register SCSI devices after successful fc_remote_port_add [SCSI] zfcp: Do not try "forced close" when port is already closed [SCSI] zfcp: Do not unblock rport from REOPEN_PORT_FORCED [SCSI] sd: add support for runtime PM [SCSI] implement runtime Power Management [SCSI] convert to the new PM framework [SCSI] Unify SAM_ and SAM_STAT_ macros ...
This commit is contained in:
commit
03da309867
268 changed files with 19699 additions and 6601 deletions
107
Documentation/scsi/hpsa.txt
Normal file
107
Documentation/scsi/hpsa.txt
Normal file
|
@ -0,0 +1,107 @@
|
|||
|
||||
HPSA - Hewlett Packard Smart Array driver
|
||||
-----------------------------------------
|
||||
|
||||
This file describes the hpsa SCSI driver for HP Smart Array controllers.
|
||||
The hpsa driver is intended to supplant the cciss driver for newer
|
||||
Smart Array controllers. The hpsa driver is a SCSI driver, while the
|
||||
cciss driver is a "block" driver. Actually cciss is both a block
|
||||
driver (for logical drives) AND a SCSI driver (for tape drives). This
|
||||
"split-brained" design of the cciss driver is a source of excess
|
||||
complexity and eliminating that complexity is one of the reasons
|
||||
for hpsa to exist.
|
||||
|
||||
Supported devices:
|
||||
------------------
|
||||
|
||||
Smart Array P212
|
||||
Smart Array P410
|
||||
Smart Array P410i
|
||||
Smart Array P411
|
||||
Smart Array P812
|
||||
Smart Array P712m
|
||||
Smart Array P711m
|
||||
StorageWorks P1210m
|
||||
|
||||
Additionally, older Smart Arrays may work with the hpsa driver if the kernel
|
||||
boot parameter "hpsa_allow_any=1" is specified, however these are not tested
|
||||
nor supported by HP with this driver. For older Smart Arrays, the cciss
|
||||
driver should still be used.
|
||||
|
||||
HPSA specific entries in /sys
|
||||
-----------------------------
|
||||
|
||||
In addition to the generic SCSI attributes available in /sys, hpsa supports
|
||||
the following attributes:
|
||||
|
||||
HPSA specific host attributes:
|
||||
------------------------------
|
||||
|
||||
/sys/class/scsi_host/host*/rescan
|
||||
/sys/class/scsi_host/host*/firmware_revision
|
||||
|
||||
the host "rescan" attribute is a write only attribute. Writing to this
|
||||
attribute will cause the driver to scan for new, changed, or removed devices
|
||||
(e.g. hot-plugged tape drives, or newly configured or deleted logical drives,
|
||||
etc.) and notify the SCSI midlayer of any changes detected. Normally this is
|
||||
triggered automatically by HP's Array Configuration Utility (either the GUI or
|
||||
command line variety) so for logical drive changes, the user should not
|
||||
normally have to use this. It may be useful when hot plugging devices like
|
||||
tape drives, or entire storage boxes containing pre-configured logical drives.
|
||||
|
||||
The "firmware_revision" attribute contains the firmware version of the Smart Array.
|
||||
For example:
|
||||
|
||||
root@host:/sys/class/scsi_host/host4# cat firmware_revision
|
||||
7.14
|
||||
|
||||
HPSA specific disk attributes:
|
||||
------------------------------
|
||||
|
||||
/sys/class/scsi_disk/c:b:t:l/device/unique_id
|
||||
/sys/class/scsi_disk/c:b:t:l/device/raid_level
|
||||
/sys/class/scsi_disk/c:b:t:l/device/lunid
|
||||
|
||||
(where c:b:t:l are the controller, bus, target and lun of the device)
|
||||
|
||||
For example:
|
||||
|
||||
root@host:/sys/class/scsi_disk/4:0:0:0/device# cat unique_id
|
||||
600508B1001044395355323037570F77
|
||||
root@host:/sys/class/scsi_disk/4:0:0:0/device# cat lunid
|
||||
0x0000004000000000
|
||||
root@host:/sys/class/scsi_disk/4:0:0:0/device# cat raid_level
|
||||
RAID 0
|
||||
|
||||
HPSA specific ioctls:
|
||||
---------------------
|
||||
|
||||
For compatibility with applications written for the cciss driver, many, but
|
||||
not all of the ioctls supported by the cciss driver are also supported by the
|
||||
hpsa driver. The data structures used by these are described in
|
||||
include/linux/cciss_ioctl.h
|
||||
|
||||
CCISS_DEREGDISK
|
||||
CCISS_REGNEWDISK
|
||||
CCISS_REGNEWD
|
||||
|
||||
The above three ioctls all do exactly the same thing, which is to cause the driver
|
||||
to rescan for new devices. This does exactly the same thing as writing to the
|
||||
hpsa specific host "rescan" attribute.
|
||||
|
||||
CCISS_GETPCIINFO
|
||||
|
||||
Returns PCI domain, bus, device and function and "board ID" (PCI subsystem ID).
|
||||
|
||||
CCISS_GETDRIVVER
|
||||
|
||||
Returns driver version in three bytes encoded as:
|
||||
(major_version << 16) | (minor_version << 8) | (subminor_version)
|
||||
|
||||
CCISS_PASSTHRU
|
||||
CCISS_BIG_PASSTHRU
|
||||
|
||||
Allows "BMIC" and "CISS" commands to be passed through to the Smart Array.
|
||||
These are used extensively by the HP Array Configuration Utility, SNMP storage
|
||||
agents, etc. See cciss_vol_status at http://cciss.sf.net for some examples.
|
||||
|
|
@ -2625,6 +2625,14 @@ S: Maintained
|
|||
F: Documentation/blockdev/cpqarray.txt
|
||||
F: drivers/block/cpqarray.*
|
||||
|
||||
HEWLETT-PACKARD SMART ARRAY RAID DRIVER (hpsa)
|
||||
M: Stephen M. Cameron <scameron@beardog.cce.hp.com>
|
||||
L: iss_storagedev@hp.com
|
||||
S: Supported
|
||||
F: Documentation/scsi/hpsa.txt
|
||||
F: drivers/scsi/hpsa*.[ch]
|
||||
F: include/linux/cciss*.h
|
||||
|
||||
HEWLETT-PACKARD SMART CISS RAID DRIVER (cciss)
|
||||
M: Mike Miller <mike.miller@hp.com>
|
||||
L: iss_storagedev@hp.com
|
||||
|
|
|
@ -84,6 +84,7 @@ struct qdr {
|
|||
|
||||
#define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40
|
||||
#define QIB_RFLAGS_ENABLE_QEBSM 0x80
|
||||
#define QIB_RFLAGS_ENABLE_DATA_DIV 0x02
|
||||
|
||||
/**
|
||||
* struct qib - queue information block (QIB)
|
||||
|
@ -284,6 +285,9 @@ struct slsb {
|
|||
u8 val[QDIO_MAX_BUFFERS_PER_Q];
|
||||
} __attribute__ ((packed, aligned(256)));
|
||||
|
||||
#define CHSC_AC2_DATA_DIV_AVAILABLE 0x0010
|
||||
#define CHSC_AC2_DATA_DIV_ENABLED 0x0002
|
||||
|
||||
struct qdio_ssqd_desc {
|
||||
u8 flags;
|
||||
u8:8;
|
||||
|
@ -332,6 +336,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
|
|||
* @adapter_name: name for the adapter
|
||||
* @qib_param_field_format: format for qib_parm_field
|
||||
* @qib_param_field: pointer to 128 bytes or NULL, if no param field
|
||||
* @qib_rflags: rflags to set
|
||||
* @input_slib_elements: pointer to no_input_qs * 128 words of data or NULL
|
||||
* @output_slib_elements: pointer to no_output_qs * 128 words of data or NULL
|
||||
* @no_input_qs: number of input queues
|
||||
|
@ -348,6 +353,7 @@ struct qdio_initialize {
|
|||
unsigned char adapter_name[8];
|
||||
unsigned int qib_param_field_format;
|
||||
unsigned char *qib_param_field;
|
||||
unsigned char qib_rflags;
|
||||
unsigned long *input_slib_elements;
|
||||
unsigned long *output_slib_elements;
|
||||
unsigned int no_input_qs;
|
||||
|
|
|
@ -115,6 +115,7 @@ MODULE_PARM_DESC(mpt_fwfault_debug, "Enable detection of Firmware fault"
|
|||
" and halt Firmware on fault - (default=0)");
|
||||
|
||||
|
||||
static char MptCallbacksName[MPT_MAX_PROTOCOL_DRIVERS][50];
|
||||
|
||||
#ifdef MFCNT
|
||||
static int mfcounter = 0;
|
||||
|
@ -213,7 +214,7 @@ static int ProcessEventNotification(MPT_ADAPTER *ioc,
|
|||
static void mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf);
|
||||
static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info);
|
||||
static void mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info);
|
||||
static void mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info);
|
||||
static void mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info , u8 cb_idx);
|
||||
static int mpt_read_ioc_pg_3(MPT_ADAPTER *ioc);
|
||||
static void mpt_inactive_raid_list_free(MPT_ADAPTER *ioc);
|
||||
|
||||
|
@ -490,7 +491,7 @@ mpt_reply(MPT_ADAPTER *ioc, u32 pa)
|
|||
else if (ioc->bus_type == SPI)
|
||||
mpt_spi_log_info(ioc, log_info);
|
||||
else if (ioc->bus_type == SAS)
|
||||
mpt_sas_log_info(ioc, log_info);
|
||||
mpt_sas_log_info(ioc, log_info, cb_idx);
|
||||
}
|
||||
|
||||
if (ioc_stat & MPI_IOCSTATUS_MASK)
|
||||
|
@ -644,7 +645,7 @@ mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
|
|||
* considered an error by the caller.
|
||||
*/
|
||||
u8
|
||||
mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass)
|
||||
mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass, char *func_name)
|
||||
{
|
||||
u8 cb_idx;
|
||||
last_drv_idx = MPT_MAX_PROTOCOL_DRIVERS;
|
||||
|
@ -659,6 +660,8 @@ mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass)
|
|||
MptDriverClass[cb_idx] = dclass;
|
||||
MptEvHandlers[cb_idx] = NULL;
|
||||
last_drv_idx = cb_idx;
|
||||
memcpy(MptCallbacksName[cb_idx], func_name,
|
||||
strlen(func_name) > 50 ? 50 : strlen(func_name));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1632,6 +1635,7 @@ mpt_mapresources(MPT_ADAPTER *ioc)
|
|||
} else {
|
||||
printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
|
||||
ioc->name, pci_name(pdev));
|
||||
pci_release_selected_regions(pdev, ioc->bars);
|
||||
return r;
|
||||
}
|
||||
} else {
|
||||
|
@ -1645,6 +1649,7 @@ mpt_mapresources(MPT_ADAPTER *ioc)
|
|||
} else {
|
||||
printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
|
||||
ioc->name, pci_name(pdev));
|
||||
pci_release_selected_regions(pdev, ioc->bars);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
@ -1675,6 +1680,7 @@ mpt_mapresources(MPT_ADAPTER *ioc)
|
|||
if (mem == NULL) {
|
||||
printk(MYIOC_s_ERR_FMT ": ERROR - Unable to map adapter"
|
||||
" memory!\n", ioc->name);
|
||||
pci_release_selected_regions(pdev, ioc->bars);
|
||||
return -EINVAL;
|
||||
}
|
||||
ioc->memmap = mem;
|
||||
|
@ -1770,7 +1776,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */
|
||||
ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
|
||||
|
||||
ioc->pcidev = pdev;
|
||||
|
||||
spin_lock_init(&ioc->taskmgmt_lock);
|
||||
mutex_init(&ioc->internal_cmds.mutex);
|
||||
|
@ -1913,6 +1918,9 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
ioc->msi_enable = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
ioc->fw_events_off = 1;
|
||||
|
||||
if (ioc->errata_flag_1064)
|
||||
pci_disable_io_access(pdev);
|
||||
|
||||
|
@ -2051,7 +2059,6 @@ mpt_detach(struct pci_dev *pdev)
|
|||
|
||||
mpt_adapter_dispose(ioc);
|
||||
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
|
@ -5062,8 +5069,9 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
|
|||
if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
|
||||
goto out;
|
||||
if (!timeleft) {
|
||||
printk(KERN_DEBUG "%s: Issuing Reset from %s!!\n",
|
||||
ioc->name, __func__);
|
||||
printk(MYIOC_s_WARN_FMT
|
||||
"Issuing Reset from %s!!, doorbell=0x%08x\n",
|
||||
ioc->name, __func__, mpt_GetIocState(ioc, 0));
|
||||
mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
|
||||
mpt_free_msg_frame(ioc, mf);
|
||||
}
|
||||
|
@ -6454,8 +6462,9 @@ out:
|
|||
mutex_unlock(&ioc->mptbase_cmds.mutex);
|
||||
if (issue_hard_reset) {
|
||||
issue_hard_reset = 0;
|
||||
printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
|
||||
ioc->name, __func__);
|
||||
printk(MYIOC_s_WARN_FMT
|
||||
"Issuing Reset from %s!!, doorbell=0x%08x\n",
|
||||
ioc->name, __func__, mpt_GetIocState(ioc, 0));
|
||||
if (retry_count == 0) {
|
||||
if (mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP) != 0)
|
||||
retry_count++;
|
||||
|
@ -6971,6 +6980,7 @@ mpt_SoftResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
|
|||
|
||||
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
|
||||
if (ioc->taskmgmt_in_progress) {
|
||||
ioc->ioc_reset_in_progress = 0;
|
||||
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
|
||||
return -1;
|
||||
}
|
||||
|
@ -7144,7 +7154,8 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
|
|||
rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag);
|
||||
if (rc != 0) {
|
||||
printk(KERN_WARNING MYNAM
|
||||
": WARNING - (%d) Cannot recover %s\n", rc, ioc->name);
|
||||
": WARNING - (%d) Cannot recover %s, doorbell=0x%08x\n",
|
||||
rc, ioc->name, mpt_GetIocState(ioc, 0));
|
||||
} else {
|
||||
if (ioc->hard_resets < -1)
|
||||
ioc->hard_resets++;
|
||||
|
@ -7997,7 +8008,7 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
|
|||
* Refer to lsi/mpi_log_sas.h.
|
||||
**/
|
||||
static void
|
||||
mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info)
|
||||
mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info, u8 cb_idx)
|
||||
{
|
||||
union loginfo_type {
|
||||
u32 loginfo;
|
||||
|
@ -8051,21 +8062,22 @@ union loginfo_type {
|
|||
if (sub_code_desc != NULL)
|
||||
printk(MYIOC_s_INFO_FMT
|
||||
"LogInfo(0x%08x): Originator={%s}, Code={%s},"
|
||||
" SubCode={%s}\n",
|
||||
" SubCode={%s} cb_idx %s\n",
|
||||
ioc->name, log_info, originator_desc, code_desc,
|
||||
sub_code_desc);
|
||||
sub_code_desc, MptCallbacksName[cb_idx]);
|
||||
else if (code_desc != NULL)
|
||||
printk(MYIOC_s_INFO_FMT
|
||||
"LogInfo(0x%08x): Originator={%s}, Code={%s},"
|
||||
" SubCode(0x%04x)\n",
|
||||
" SubCode(0x%04x) cb_idx %s\n",
|
||||
ioc->name, log_info, originator_desc, code_desc,
|
||||
sas_loginfo.dw.subcode);
|
||||
sas_loginfo.dw.subcode, MptCallbacksName[cb_idx]);
|
||||
else
|
||||
printk(MYIOC_s_INFO_FMT
|
||||
"LogInfo(0x%08x): Originator={%s}, Code=(0x%02x),"
|
||||
" SubCode(0x%04x)\n",
|
||||
" SubCode(0x%04x) cb_idx %s\n",
|
||||
ioc->name, log_info, originator_desc,
|
||||
sas_loginfo.dw.code, sas_loginfo.dw.subcode);
|
||||
sas_loginfo.dw.code, sas_loginfo.dw.subcode,
|
||||
MptCallbacksName[cb_idx]);
|
||||
}
|
||||
|
||||
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
|
||||
|
@ -8430,7 +8442,8 @@ fusion_init(void)
|
|||
/* Register ourselves (mptbase) in order to facilitate
|
||||
* EventNotification handling.
|
||||
*/
|
||||
mpt_base_index = mpt_register(mptbase_reply, MPTBASE_DRIVER);
|
||||
mpt_base_index = mpt_register(mptbase_reply, MPTBASE_DRIVER,
|
||||
"mptbase_reply");
|
||||
|
||||
/* Register for hard reset handling callbacks.
|
||||
*/
|
||||
|
|
|
@ -76,8 +76,8 @@
|
|||
#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
|
||||
#endif
|
||||
|
||||
#define MPT_LINUX_VERSION_COMMON "3.04.15"
|
||||
#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.15"
|
||||
#define MPT_LINUX_VERSION_COMMON "3.04.17"
|
||||
#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.17"
|
||||
#define WHAT_MAGIC_STRING "@" "(" "#" ")"
|
||||
|
||||
#define show_mptmod_ver(s,ver) \
|
||||
|
@ -396,6 +396,8 @@ typedef struct _VirtTarget {
|
|||
u8 raidVolume; /* set, if RAID Volume */
|
||||
u8 type; /* byte 0 of Inquiry data */
|
||||
u8 deleted; /* target in process of being removed */
|
||||
u8 inDMD; /* currently in the device
|
||||
removal delay timer */
|
||||
u32 num_luns;
|
||||
} VirtTarget;
|
||||
|
||||
|
@ -580,6 +582,7 @@ struct mptfc_rport_info
|
|||
typedef void (*MPT_ADD_SGE)(void *pAddr, u32 flagslength, dma_addr_t dma_addr);
|
||||
typedef void (*MPT_ADD_CHAIN)(void *pAddr, u8 next, u16 length,
|
||||
dma_addr_t dma_addr);
|
||||
typedef void (*MPT_SCHEDULE_TARGET_RESET)(void *ioc);
|
||||
|
||||
/*
|
||||
* Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS
|
||||
|
@ -601,7 +604,7 @@ typedef struct _MPT_ADAPTER
|
|||
u16 nvdata_version_default;
|
||||
int debug_level;
|
||||
u8 io_missing_delay;
|
||||
u8 device_missing_delay;
|
||||
u16 device_missing_delay;
|
||||
SYSIF_REGS __iomem *chip; /* == c8817000 (mmap) */
|
||||
SYSIF_REGS __iomem *pio_chip; /* Programmed IO (downloadboot) */
|
||||
u8 bus_type;
|
||||
|
@ -738,6 +741,7 @@ typedef struct _MPT_ADAPTER
|
|||
int taskmgmt_in_progress;
|
||||
u8 taskmgmt_quiesce_io;
|
||||
u8 ioc_reset_in_progress;
|
||||
MPT_SCHEDULE_TARGET_RESET schedule_target_reset;
|
||||
struct work_struct sas_persist_task;
|
||||
|
||||
struct work_struct fc_setup_reset_work;
|
||||
|
@ -922,7 +926,8 @@ extern void mpt_detach(struct pci_dev *pdev);
|
|||
extern int mpt_suspend(struct pci_dev *pdev, pm_message_t state);
|
||||
extern int mpt_resume(struct pci_dev *pdev);
|
||||
#endif
|
||||
extern u8 mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass);
|
||||
extern u8 mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass,
|
||||
char *func_name);
|
||||
extern void mpt_deregister(u8 cb_idx);
|
||||
extern int mpt_event_register(u8 cb_idx, MPT_EVHANDLER ev_cbfunc);
|
||||
extern void mpt_event_deregister(u8 cb_idx);
|
||||
|
|
|
@ -261,10 +261,16 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
|
|||
/* We are done, issue wake up
|
||||
*/
|
||||
if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) {
|
||||
if (req->u.hdr.Function == MPI_FUNCTION_SCSI_TASK_MGMT)
|
||||
if (req->u.hdr.Function == MPI_FUNCTION_SCSI_TASK_MGMT) {
|
||||
mpt_clear_taskmgmt_in_progress_flag(ioc);
|
||||
ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
|
||||
complete(&ioc->ioctl_cmds.done);
|
||||
ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
|
||||
complete(&ioc->ioctl_cmds.done);
|
||||
if (ioc->bus_type == SAS)
|
||||
ioc->schedule_target_reset(ioc);
|
||||
} else {
|
||||
ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
|
||||
complete(&ioc->ioctl_cmds.done);
|
||||
}
|
||||
}
|
||||
|
||||
out_continuation:
|
||||
|
@ -298,6 +304,8 @@ mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
|
|||
mpt_clear_taskmgmt_in_progress_flag(ioc);
|
||||
ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
|
||||
complete(&ioc->taskmgmt_cmds.done);
|
||||
if (ioc->bus_type == SAS)
|
||||
ioc->schedule_target_reset(ioc);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
|
@ -946,9 +954,12 @@ retry_wait:
|
|||
mpt_free_msg_frame(iocp, mf);
|
||||
goto fwdl_out;
|
||||
}
|
||||
if (!timeleft)
|
||||
if (!timeleft) {
|
||||
printk(MYIOC_s_WARN_FMT
|
||||
"FW download timeout, doorbell=0x%08x\n",
|
||||
iocp->name, mpt_GetIocState(iocp, 0));
|
||||
mptctl_timeout_expired(iocp, mf);
|
||||
else
|
||||
} else
|
||||
goto retry_wait;
|
||||
goto fwdl_out;
|
||||
}
|
||||
|
@ -2293,6 +2304,10 @@ retry_wait:
|
|||
goto done_free_mem;
|
||||
}
|
||||
if (!timeleft) {
|
||||
printk(MYIOC_s_WARN_FMT
|
||||
"mpt cmd timeout, doorbell=0x%08x"
|
||||
" function=0x%x\n",
|
||||
ioc->name, mpt_GetIocState(ioc, 0), function);
|
||||
if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
|
||||
mutex_unlock(&ioc->taskmgmt_cmds.mutex);
|
||||
mptctl_timeout_expired(ioc, mf);
|
||||
|
@ -2600,9 +2615,12 @@ retry_wait:
|
|||
mpt_free_msg_frame(ioc, mf);
|
||||
goto out;
|
||||
}
|
||||
if (!timeleft)
|
||||
if (!timeleft) {
|
||||
printk(MYIOC_s_WARN_FMT
|
||||
"HOST INFO command timeout, doorbell=0x%08x\n",
|
||||
ioc->name, mpt_GetIocState(ioc, 0));
|
||||
mptctl_timeout_expired(ioc, mf);
|
||||
else
|
||||
} else
|
||||
goto retry_wait;
|
||||
goto out;
|
||||
}
|
||||
|
@ -3000,7 +3018,8 @@ static int __init mptctl_init(void)
|
|||
* Install our handler
|
||||
*/
|
||||
++where;
|
||||
mptctl_id = mpt_register(mptctl_reply, MPTCTL_DRIVER);
|
||||
mptctl_id = mpt_register(mptctl_reply, MPTCTL_DRIVER,
|
||||
"mptctl_reply");
|
||||
if (!mptctl_id || mptctl_id >= MPT_MAX_PROTOCOL_DRIVERS) {
|
||||
printk(KERN_ERR MYNAM ": ERROR: Failed to register with Fusion MPT base driver\n");
|
||||
misc_deregister(&mptctl_miscdev);
|
||||
|
@ -3008,7 +3027,8 @@ static int __init mptctl_init(void)
|
|||
goto out_fail;
|
||||
}
|
||||
|
||||
mptctl_taskmgmt_id = mpt_register(mptctl_taskmgmt_reply, MPTCTL_DRIVER);
|
||||
mptctl_taskmgmt_id = mpt_register(mptctl_taskmgmt_reply, MPTCTL_DRIVER,
|
||||
"mptctl_taskmgmt_reply");
|
||||
if (!mptctl_taskmgmt_id || mptctl_taskmgmt_id >= MPT_MAX_PROTOCOL_DRIVERS) {
|
||||
printk(KERN_ERR MYNAM ": ERROR: Failed to register with Fusion MPT base driver\n");
|
||||
mpt_deregister(mptctl_id);
|
||||
|
|
|
@ -1472,9 +1472,12 @@ mptfc_init(void)
|
|||
if (!mptfc_transport_template)
|
||||
return -ENODEV;
|
||||
|
||||
mptfcDoneCtx = mpt_register(mptscsih_io_done, MPTFC_DRIVER);
|
||||
mptfcTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTFC_DRIVER);
|
||||
mptfcInternalCtx = mpt_register(mptscsih_scandv_complete, MPTFC_DRIVER);
|
||||
mptfcDoneCtx = mpt_register(mptscsih_io_done, MPTFC_DRIVER,
|
||||
"mptscsih_scandv_complete");
|
||||
mptfcTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTFC_DRIVER,
|
||||
"mptscsih_scandv_complete");
|
||||
mptfcInternalCtx = mpt_register(mptscsih_scandv_complete, MPTFC_DRIVER,
|
||||
"mptscsih_scandv_complete");
|
||||
|
||||
mpt_event_register(mptfcDoneCtx, mptfc_event_process);
|
||||
mpt_reset_register(mptfcDoneCtx, mptfc_ioc_reset);
|
||||
|
|
|
@ -1452,7 +1452,9 @@ static int __init mpt_lan_init (void)
|
|||
{
|
||||
show_mptmod_ver(LANAME, LANVER);
|
||||
|
||||
if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) {
|
||||
LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER,
|
||||
"lan_reply");
|
||||
if (LanCtx <= 0) {
|
||||
printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
|
|
@ -57,6 +57,7 @@
|
|||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_transport_sas.h>
|
||||
#include <scsi/scsi_transport.h>
|
||||
#include <scsi/scsi_dbg.h>
|
||||
|
||||
#include "mptbase.h"
|
||||
|
@ -126,6 +127,7 @@ static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc);
|
|||
static void mptsas_broadcast_primative_work(struct fw_event_work *fw_event);
|
||||
static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event);
|
||||
static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id);
|
||||
void mptsas_schedule_target_reset(void *ioc);
|
||||
|
||||
static void mptsas_print_phy_data(MPT_ADAPTER *ioc,
|
||||
MPI_SAS_IO_UNIT0_PHY_DATA *phy_data)
|
||||
|
@ -1138,6 +1140,44 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* mptsas_schedule_target_reset- send pending target reset
|
||||
* @iocp: per adapter object
|
||||
*
|
||||
* This function will delete scheduled target reset from the list and
|
||||
* try to send next target reset. This will be called from completion
|
||||
* context of any Task managment command.
|
||||
*/
|
||||
|
||||
void
|
||||
mptsas_schedule_target_reset(void *iocp)
|
||||
{
|
||||
MPT_ADAPTER *ioc = (MPT_ADAPTER *)(iocp);
|
||||
MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
|
||||
struct list_head *head = &hd->target_reset_list;
|
||||
struct mptsas_target_reset_event *target_reset_list;
|
||||
u8 id, channel;
|
||||
/*
|
||||
* issue target reset to next device in the queue
|
||||
*/
|
||||
|
||||
head = &hd->target_reset_list;
|
||||
if (list_empty(head))
|
||||
return;
|
||||
|
||||
target_reset_list = list_entry(head->next,
|
||||
struct mptsas_target_reset_event, list);
|
||||
|
||||
id = target_reset_list->sas_event_data.TargetID;
|
||||
channel = target_reset_list->sas_event_data.Bus;
|
||||
target_reset_list->time_count = jiffies;
|
||||
|
||||
if (mptsas_target_reset(ioc, channel, id))
|
||||
target_reset_list->target_reset_issued = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* mptsas_taskmgmt_complete - complete SAS task management function
|
||||
* @ioc: Pointer to MPT_ADAPTER structure
|
||||
|
@ -1222,28 +1262,12 @@ mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
|
|||
* enable work queue to remove device from upper layers
|
||||
*/
|
||||
list_del(&target_reset_list->list);
|
||||
if ((mptsas_find_vtarget(ioc, channel, id)) && !ioc->fw_events_off)
|
||||
if (!ioc->fw_events_off)
|
||||
mptsas_queue_device_delete(ioc,
|
||||
&target_reset_list->sas_event_data);
|
||||
|
||||
|
||||
/*
|
||||
* issue target reset to next device in the queue
|
||||
*/
|
||||
|
||||
head = &hd->target_reset_list;
|
||||
if (list_empty(head))
|
||||
return 1;
|
||||
|
||||
target_reset_list = list_entry(head->next, struct mptsas_target_reset_event,
|
||||
list);
|
||||
|
||||
id = target_reset_list->sas_event_data.TargetID;
|
||||
channel = target_reset_list->sas_event_data.Bus;
|
||||
target_reset_list->time_count = jiffies;
|
||||
|
||||
if (mptsas_target_reset(ioc, channel, id))
|
||||
target_reset_list->target_reset_issued = 1;
|
||||
ioc->schedule_target_reset(ioc);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -1889,6 +1913,48 @@ mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
|
|||
return mptscsih_qcmd(SCpnt,done);
|
||||
}
|
||||
|
||||
/**
|
||||
* mptsas_mptsas_eh_timed_out - resets the scsi_cmnd timeout
|
||||
* if the device under question is currently in the
|
||||
* device removal delay.
|
||||
* @sc: scsi command that the midlayer is about to time out
|
||||
*
|
||||
**/
|
||||
static enum blk_eh_timer_return mptsas_eh_timed_out(struct scsi_cmnd *sc)
|
||||
{
|
||||
MPT_SCSI_HOST *hd;
|
||||
MPT_ADAPTER *ioc;
|
||||
VirtDevice *vdevice;
|
||||
enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
|
||||
|
||||
hd = shost_priv(sc->device->host);
|
||||
if (hd == NULL) {
|
||||
printk(KERN_ERR MYNAM ": %s: Can't locate host! (sc=%p)\n",
|
||||
__func__, sc);
|
||||
goto done;
|
||||
}
|
||||
|
||||
ioc = hd->ioc;
|
||||
if (ioc->bus_type != SAS) {
|
||||
printk(KERN_ERR MYNAM ": %s: Wrong bus type (sc=%p)\n",
|
||||
__func__, sc);
|
||||
goto done;
|
||||
}
|
||||
|
||||
vdevice = sc->device->hostdata;
|
||||
if (vdevice && vdevice->vtarget && (vdevice->vtarget->inDMD
|
||||
|| vdevice->vtarget->deleted)) {
|
||||
dtmprintk(ioc, printk(MYIOC_s_WARN_FMT ": %s: target removed "
|
||||
"or in device removal delay (sc=%p)\n",
|
||||
ioc->name, __func__, sc));
|
||||
rc = BLK_EH_RESET_TIMER;
|
||||
goto done;
|
||||
}
|
||||
|
||||
done:
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
static struct scsi_host_template mptsas_driver_template = {
|
||||
.module = THIS_MODULE,
|
||||
|
@ -2364,7 +2430,7 @@ mptsas_sas_io_unit_pg1(MPT_ADAPTER *ioc)
|
|||
SasIOUnitPage1_t *buffer;
|
||||
dma_addr_t dma_handle;
|
||||
int error;
|
||||
u16 device_missing_delay;
|
||||
u8 device_missing_delay;
|
||||
|
||||
memset(&hdr, 0, sizeof(ConfigExtendedPageHeader_t));
|
||||
memset(&cfg, 0, sizeof(CONFIGPARMS));
|
||||
|
@ -2401,7 +2467,7 @@ mptsas_sas_io_unit_pg1(MPT_ADAPTER *ioc)
|
|||
|
||||
ioc->io_missing_delay =
|
||||
le16_to_cpu(buffer->IODeviceMissingDelay);
|
||||
device_missing_delay = le16_to_cpu(buffer->ReportDeviceMissingDelay);
|
||||
device_missing_delay = buffer->ReportDeviceMissingDelay;
|
||||
ioc->device_missing_delay = (device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_UNIT_16) ?
|
||||
(device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16 :
|
||||
device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
|
||||
|
@ -2549,6 +2615,7 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
|
|||
device_info->sas_address = le64_to_cpu(sas_address);
|
||||
device_info->device_info =
|
||||
le32_to_cpu(buffer->DeviceInfo);
|
||||
device_info->flags = le16_to_cpu(buffer->Flags);
|
||||
|
||||
out_free_consistent:
|
||||
pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
|
||||
|
@ -2960,6 +3027,7 @@ static int mptsas_probe_one_phy(struct device *dev,
|
|||
struct sas_phy *phy;
|
||||
struct sas_port *port;
|
||||
int error = 0;
|
||||
VirtTarget *vtarget;
|
||||
|
||||
if (!dev) {
|
||||
error = -ENODEV;
|
||||
|
@ -3182,6 +3250,16 @@ static int mptsas_probe_one_phy(struct device *dev,
|
|||
rphy_to_expander_device(rphy));
|
||||
}
|
||||
|
||||
/* If the device exists,verify it wasn't previously flagged
|
||||
as a missing device. If so, clear it */
|
||||
vtarget = mptsas_find_vtarget(ioc,
|
||||
phy_info->attached.channel,
|
||||
phy_info->attached.id);
|
||||
if (vtarget && vtarget->inDMD) {
|
||||
printk(KERN_INFO "Device returned, unsetting inDMD\n");
|
||||
vtarget->inDMD = 0;
|
||||
}
|
||||
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
@ -3635,9 +3713,42 @@ mptsas_send_link_status_event(struct fw_event_work *fw_event)
|
|||
MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION)
|
||||
phy_info->phy->negotiated_linkrate =
|
||||
SAS_LINK_RATE_FAILED;
|
||||
else
|
||||
else {
|
||||
phy_info->phy->negotiated_linkrate =
|
||||
SAS_LINK_RATE_UNKNOWN;
|
||||
if (ioc->device_missing_delay &&
|
||||
mptsas_is_end_device(&phy_info->attached)) {
|
||||
struct scsi_device *sdev;
|
||||
VirtDevice *vdevice;
|
||||
u8 channel, id;
|
||||
id = phy_info->attached.id;
|
||||
channel = phy_info->attached.channel;
|
||||
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
|
||||
"Link down for fw_id %d:fw_channel %d\n",
|
||||
ioc->name, phy_info->attached.id,
|
||||
phy_info->attached.channel));
|
||||
|
||||
shost_for_each_device(sdev, ioc->sh) {
|
||||
vdevice = sdev->hostdata;
|
||||
if ((vdevice == NULL) ||
|
||||
(vdevice->vtarget == NULL))
|
||||
continue;
|
||||
if ((vdevice->vtarget->tflags &
|
||||
MPT_TARGET_FLAGS_RAID_COMPONENT ||
|
||||
vdevice->vtarget->raidVolume))
|
||||
continue;
|
||||
if (vdevice->vtarget->id == id &&
|
||||
vdevice->vtarget->channel ==
|
||||
channel)
|
||||
devtprintk(ioc,
|
||||
printk(MYIOC_s_DEBUG_FMT
|
||||
"SDEV OUTSTANDING CMDS"
|
||||
"%d\n", ioc->name,
|
||||
sdev->device_busy));
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
out:
|
||||
mptsas_free_fw_event(ioc, fw_event);
|
||||
|
@ -3840,6 +3951,13 @@ mptsas_probe_devices(MPT_ADAPTER *ioc)
|
|||
MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0)
|
||||
continue;
|
||||
|
||||
/* If there is no FW B_T mapping for this device then continue
|
||||
* */
|
||||
if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
|
||||
|| !(sas_device.flags &
|
||||
MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
|
||||
continue;
|
||||
|
||||
phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
|
||||
if (!phy_info)
|
||||
continue;
|
||||
|
@ -4149,6 +4267,14 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
|
|||
phys_disk.PhysDiskID))
|
||||
continue;
|
||||
|
||||
/* If there is no FW B_T mapping for this device then continue
|
||||
* */
|
||||
if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
|
||||
|| !(sas_device.flags &
|
||||
MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
|
||||
continue;
|
||||
|
||||
|
||||
phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
|
||||
sas_device.sas_address);
|
||||
mptsas_add_end_device(ioc, phy_info);
|
||||
|
@ -4171,6 +4297,7 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
|
|||
struct mptsas_devinfo sas_device;
|
||||
VirtTarget *vtarget;
|
||||
int i;
|
||||
struct mptsas_portinfo *port_info;
|
||||
|
||||
switch (hot_plug_info->event_type) {
|
||||
|
||||
|
@ -4199,12 +4326,47 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
|
|||
(hot_plug_info->channel << 8) +
|
||||
hot_plug_info->id);
|
||||
|
||||
/* If there is no FW B_T mapping for this device then break
|
||||
* */
|
||||
if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
|
||||
|| !(sas_device.flags &
|
||||
MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
|
||||
break;
|
||||
|
||||
if (!sas_device.handle)
|
||||
return;
|
||||
|
||||
phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
|
||||
if (!phy_info)
|
||||
/* Only For SATA Device ADD */
|
||||
if (!phy_info && (sas_device.device_info &
|
||||
MPI_SAS_DEVICE_INFO_SATA_DEVICE)) {
|
||||
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
|
||||
"%s %d SATA HOT PLUG: "
|
||||
"parent handle of device %x\n", ioc->name,
|
||||
__func__, __LINE__, sas_device.handle_parent));
|
||||
port_info = mptsas_find_portinfo_by_handle(ioc,
|
||||
sas_device.handle_parent);
|
||||
|
||||
if (port_info == ioc->hba_port_info)
|
||||
mptsas_probe_hba_phys(ioc);
|
||||
else if (port_info)
|
||||
mptsas_expander_refresh(ioc, port_info);
|
||||
else {
|
||||
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
|
||||
"%s %d port info is NULL\n",
|
||||
ioc->name, __func__, __LINE__));
|
||||
break;
|
||||
}
|
||||
phy_info = mptsas_refreshing_device_handles
|
||||
(ioc, &sas_device);
|
||||
}
|
||||
|
||||
if (!phy_info) {
|
||||
dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
|
||||
"%s %d phy info is NULL\n",
|
||||
ioc->name, __func__, __LINE__));
|
||||
break;
|
||||
}
|
||||
|
||||
if (mptsas_get_rphy(phy_info))
|
||||
break;
|
||||
|
@ -4241,6 +4403,13 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
|
|||
break;
|
||||
}
|
||||
|
||||
/* If there is no FW B_T mapping for this device then break
|
||||
* */
|
||||
if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
|
||||
|| !(sas_device.flags &
|
||||
MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
|
||||
break;
|
||||
|
||||
phy_info = mptsas_find_phyinfo_by_sas_address(
|
||||
ioc, sas_device.sas_address);
|
||||
|
||||
|
@ -4294,6 +4463,13 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
|
|||
break;
|
||||
}
|
||||
|
||||
/* If there is no FW B_T mapping for this device then break
|
||||
* */
|
||||
if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
|
||||
|| !(sas_device.flags &
|
||||
MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
|
||||
break;
|
||||
|
||||
phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
|
||||
sas_device.sas_address);
|
||||
if (!phy_info) {
|
||||
|
@ -4727,8 +4903,9 @@ mptsas_broadcast_primative_work(struct fw_event_work *fw_event)
|
|||
mutex_unlock(&ioc->taskmgmt_cmds.mutex);
|
||||
|
||||
if (issue_reset) {
|
||||
printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
|
||||
ioc->name, __func__);
|
||||
printk(MYIOC_s_WARN_FMT
|
||||
"Issuing Reset from %s!! doorbell=0x%08x\n",
|
||||
ioc->name, __func__, mpt_GetIocState(ioc, 0));
|
||||
mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
|
||||
}
|
||||
mptsas_free_fw_event(ioc, fw_event);
|
||||
|
@ -4816,12 +4993,47 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
|
|||
{
|
||||
EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data =
|
||||
(EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data;
|
||||
u16 ioc_stat;
|
||||
ioc_stat = le16_to_cpu(reply->IOCStatus);
|
||||
|
||||
if (sas_event_data->ReasonCode ==
|
||||
MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING) {
|
||||
mptsas_target_reset_queue(ioc, sas_event_data);
|
||||
return 0;
|
||||
}
|
||||
if (sas_event_data->ReasonCode ==
|
||||
MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
|
||||
ioc->device_missing_delay &&
|
||||
(ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)) {
|
||||
VirtTarget *vtarget = NULL;
|
||||
u8 id, channel;
|
||||
u32 log_info = le32_to_cpu(reply->IOCLogInfo);
|
||||
|
||||
id = sas_event_data->TargetID;
|
||||
channel = sas_event_data->Bus;
|
||||
|
||||
vtarget = mptsas_find_vtarget(ioc, channel, id);
|
||||
if (vtarget) {
|
||||
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
|
||||
"LogInfo (0x%x) available for "
|
||||
"INTERNAL_DEVICE_RESET"
|
||||
"fw_id %d fw_channel %d\n", ioc->name,
|
||||
log_info, id, channel));
|
||||
if (vtarget->raidVolume) {
|
||||
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
|
||||
"Skipping Raid Volume for inDMD\n",
|
||||
ioc->name));
|
||||
} else {
|
||||
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
|
||||
"Setting device flag inDMD\n",
|
||||
ioc->name));
|
||||
vtarget->inDMD = 1;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
|
||||
|
@ -4924,7 +5136,7 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
ioc->DoneCtx = mptsasDoneCtx;
|
||||
ioc->TaskCtx = mptsasTaskCtx;
|
||||
ioc->InternalCtx = mptsasInternalCtx;
|
||||
|
||||
ioc->schedule_target_reset = &mptsas_schedule_target_reset;
|
||||
/* Added sanity check on readiness of the MPT adapter.
|
||||
*/
|
||||
if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) {
|
||||
|
@ -5154,14 +5366,20 @@ mptsas_init(void)
|
|||
sas_attach_transport(&mptsas_transport_functions);
|
||||
if (!mptsas_transport_template)
|
||||
return -ENODEV;
|
||||
mptsas_transport_template->eh_timed_out = mptsas_eh_timed_out;
|
||||
|
||||
mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER);
|
||||
mptsasTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSAS_DRIVER);
|
||||
mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER,
|
||||
"mptscsih_io_done");
|
||||
mptsasTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSAS_DRIVER,
|
||||
"mptscsih_taskmgmt_complete");
|
||||
mptsasInternalCtx =
|
||||
mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER);
|
||||
mptsasMgmtCtx = mpt_register(mptsas_mgmt_done, MPTSAS_DRIVER);
|
||||
mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER,
|
||||
"mptscsih_scandv_complete");
|
||||
mptsasMgmtCtx = mpt_register(mptsas_mgmt_done, MPTSAS_DRIVER,
|
||||
"mptsas_mgmt_done");
|
||||
mptsasDeviceResetCtx =
|
||||
mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER);
|
||||
mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER,
|
||||
"mptsas_taskmgmt_complete");
|
||||
|
||||
mpt_event_register(mptsasDoneCtx, mptsas_event_process);
|
||||
mpt_reset_register(mptsasDoneCtx, mptsas_ioc_reset);
|
||||
|
|
|
@ -140,6 +140,7 @@ struct mptsas_devinfo {
|
|||
u64 sas_address; /* WWN of this device,
|
||||
SATA is assigned by HBA,expander */
|
||||
u32 device_info; /* bitfield detailed info about this device */
|
||||
u16 flags; /* sas device pg0 flags */
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -664,6 +664,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
|
|||
u32 log_info;
|
||||
|
||||
status = le16_to_cpu(pScsiReply->IOCStatus) & MPI_IOCSTATUS_MASK;
|
||||
|
||||
scsi_state = pScsiReply->SCSIState;
|
||||
scsi_status = pScsiReply->SCSIStatus;
|
||||
xfer_cnt = le32_to_cpu(pScsiReply->TransferCount);
|
||||
|
@ -738,13 +739,36 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
|
|||
|
||||
case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
|
||||
if ( ioc->bus_type == SAS ) {
|
||||
u16 ioc_status = le16_to_cpu(pScsiReply->IOCStatus);
|
||||
if (ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
|
||||
if ((log_info & SAS_LOGINFO_MASK)
|
||||
== SAS_LOGINFO_NEXUS_LOSS) {
|
||||
sc->result = (DID_BUS_BUSY << 16);
|
||||
break;
|
||||
}
|
||||
u16 ioc_status =
|
||||
le16_to_cpu(pScsiReply->IOCStatus);
|
||||
if ((ioc_status &
|
||||
MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
|
||||
&&
|
||||
((log_info & SAS_LOGINFO_MASK) ==
|
||||
SAS_LOGINFO_NEXUS_LOSS)) {
|
||||
VirtDevice *vdevice =
|
||||
sc->device->hostdata;
|
||||
|
||||
/* flag the device as being in
|
||||
* device removal delay so we can
|
||||
* notify the midlayer to hold off
|
||||
* on timeout eh */
|
||||
if (vdevice && vdevice->
|
||||
vtarget &&
|
||||
vdevice->vtarget->
|
||||
raidVolume)
|
||||
printk(KERN_INFO
|
||||
"Skipping Raid Volume"
|
||||
"for inDMD\n");
|
||||
else if (vdevice &&
|
||||
vdevice->vtarget)
|
||||
vdevice->vtarget->
|
||||
inDMD = 1;
|
||||
|
||||
sc->result =
|
||||
(DID_TRANSPORT_DISRUPTED
|
||||
<< 16);
|
||||
break;
|
||||
}
|
||||
} else if (ioc->bus_type == FC) {
|
||||
/*
|
||||
|
@ -1704,8 +1728,9 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun,
|
|||
|
||||
CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
|
||||
if (issue_hard_reset) {
|
||||
printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
|
||||
ioc->name, __func__);
|
||||
printk(MYIOC_s_WARN_FMT
|
||||
"Issuing Reset from %s!! doorbell=0x%08x\n",
|
||||
ioc->name, __func__, mpt_GetIocState(ioc, 0));
|
||||
retval = mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
|
||||
mpt_free_msg_frame(ioc, mf);
|
||||
}
|
||||
|
@ -2132,6 +2157,8 @@ mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
|
|||
mpt_clear_taskmgmt_in_progress_flag(ioc);
|
||||
ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
|
||||
complete(&ioc->taskmgmt_cmds.done);
|
||||
if (ioc->bus_type == SAS)
|
||||
ioc->schedule_target_reset(ioc);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
|
@ -2459,6 +2486,8 @@ mptscsih_slave_configure(struct scsi_device *sdev)
|
|||
ioc->name,sdev->tagged_supported, sdev->simple_tags,
|
||||
sdev->ordered_tags));
|
||||
|
||||
blk_queue_dma_alignment (sdev->request_queue, 512 - 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3045,8 +3074,11 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
|
|||
goto out;
|
||||
}
|
||||
if (!timeleft) {
|
||||
printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
|
||||
ioc->name, __func__);
|
||||
printk(MYIOC_s_WARN_FMT
|
||||
"Issuing Reset from %s!! doorbell=0x%08xh"
|
||||
" cmd=0x%02x\n",
|
||||
ioc->name, __func__, mpt_GetIocState(ioc, 0),
|
||||
cmd);
|
||||
mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
|
||||
mpt_free_msg_frame(ioc, mf);
|
||||
}
|
||||
|
|
|
@ -1551,9 +1551,12 @@ mptspi_init(void)
|
|||
if (!mptspi_transport_template)
|
||||
return -ENODEV;
|
||||
|
||||
mptspiDoneCtx = mpt_register(mptscsih_io_done, MPTSPI_DRIVER);
|
||||
mptspiTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSPI_DRIVER);
|
||||
mptspiInternalCtx = mpt_register(mptscsih_scandv_complete, MPTSPI_DRIVER);
|
||||
mptspiDoneCtx = mpt_register(mptscsih_io_done, MPTSPI_DRIVER,
|
||||
"mptscsih_io_done");
|
||||
mptspiTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSPI_DRIVER,
|
||||
"mptscsih_taskmgmt_complete");
|
||||
mptspiInternalCtx = mpt_register(mptscsih_scandv_complete,
|
||||
MPTSPI_DRIVER, "mptscsih_scandv_complete");
|
||||
|
||||
mpt_event_register(mptspiDoneCtx, mptspi_event_process);
|
||||
mpt_reset_register(mptspiDoneCtx, mptspi_ioc_reset);
|
||||
|
|
|
@ -285,8 +285,11 @@ enclosure_component_register(struct enclosure_device *edev,
|
|||
cdev->groups = enclosure_groups;
|
||||
|
||||
err = device_register(cdev);
|
||||
if (err)
|
||||
ERR_PTR(err);
|
||||
if (err) {
|
||||
ecomp->number = -1;
|
||||
put_device(cdev);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
return ecomp;
|
||||
}
|
||||
|
|
|
@ -368,6 +368,8 @@ static void setup_qib(struct qdio_irq *irq_ptr,
|
|||
if (qebsm_possible())
|
||||
irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
|
||||
|
||||
irq_ptr->qib.rflags |= init_data->qib_rflags;
|
||||
|
||||
irq_ptr->qib.qfmt = init_data->q_format;
|
||||
if (init_data->no_input_qs)
|
||||
irq_ptr->qib.isliba =
|
||||
|
|
|
@ -98,13 +98,11 @@ static void __init zfcp_init_device_setup(char *devstr)
|
|||
u64 wwpn, lun;
|
||||
|
||||
/* duplicate devstr and keep the original for sysfs presentation*/
|
||||
str_saved = kmalloc(strlen(devstr) + 1, GFP_KERNEL);
|
||||
str_saved = kstrdup(devstr, GFP_KERNEL);
|
||||
str = str_saved;
|
||||
if (!str)
|
||||
return;
|
||||
|
||||
strcpy(str, devstr);
|
||||
|
||||
token = strsep(&str, ",");
|
||||
if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE)
|
||||
goto err_out;
|
||||
|
@ -314,7 +312,7 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
|
|||
}
|
||||
retval = -EINVAL;
|
||||
|
||||
INIT_WORK(&unit->scsi_work, zfcp_scsi_scan);
|
||||
INIT_WORK(&unit->scsi_work, zfcp_scsi_scan_work);
|
||||
|
||||
spin_lock_init(&unit->latencies.lock);
|
||||
unit->latencies.write.channel.min = 0xFFFFFFFF;
|
||||
|
@ -526,6 +524,10 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
|
|||
rwlock_init(&adapter->port_list_lock);
|
||||
INIT_LIST_HEAD(&adapter->port_list);
|
||||
|
||||
INIT_LIST_HEAD(&adapter->events.list);
|
||||
INIT_WORK(&adapter->events.work, zfcp_fc_post_event);
|
||||
spin_lock_init(&adapter->events.list_lock);
|
||||
|
||||
init_waitqueue_head(&adapter->erp_ready_wq);
|
||||
init_waitqueue_head(&adapter->erp_done_wqh);
|
||||
|
||||
|
|
|
@ -189,18 +189,12 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
|
|||
if (!fsf_cfdc)
|
||||
return -ENOMEM;
|
||||
|
||||
data = kmalloc(sizeof(struct zfcp_cfdc_data), GFP_KERNEL);
|
||||
if (!data) {
|
||||
retval = -ENOMEM;
|
||||
data = memdup_user(data_user, sizeof(*data_user));
|
||||
if (IS_ERR(data)) {
|
||||
retval = PTR_ERR(data);
|
||||
goto no_mem_sense;
|
||||
}
|
||||
|
||||
retval = copy_from_user(data, data_user, sizeof(*data));
|
||||
if (retval) {
|
||||
retval = -EFAULT;
|
||||
goto free_buffer;
|
||||
}
|
||||
|
||||
if (data->signature != 0xCFDCACDF) {
|
||||
retval = -EINVAL;
|
||||
goto free_buffer;
|
||||
|
|
|
@ -155,6 +155,8 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level,
|
|||
if (scsi_cmnd) {
|
||||
response->u.fcp.cmnd = (unsigned long)scsi_cmnd;
|
||||
response->u.fcp.serial = scsi_cmnd->serial_number;
|
||||
response->u.fcp.data_dir =
|
||||
qtcb->bottom.io.data_direction;
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -326,6 +328,7 @@ static void zfcp_dbf_hba_view_response(char **p,
|
|||
case FSF_QTCB_FCP_CMND:
|
||||
if (r->fsf_req_status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
|
||||
break;
|
||||
zfcp_dbf_out(p, "data_direction", "0x%04x", r->u.fcp.data_dir);
|
||||
zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd);
|
||||
zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial);
|
||||
*p += sprintf(*p, "\n");
|
||||
|
@ -1005,7 +1008,7 @@ int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter)
|
|||
char dbf_name[DEBUG_MAX_NAME_LEN];
|
||||
struct zfcp_dbf *dbf;
|
||||
|
||||
dbf = kmalloc(sizeof(struct zfcp_dbf), GFP_KERNEL);
|
||||
dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL);
|
||||
if (!dbf)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -111,6 +111,7 @@ struct zfcp_dbf_hba_record_response {
|
|||
struct {
|
||||
u64 cmnd;
|
||||
u64 serial;
|
||||
u32 data_dir;
|
||||
} fcp;
|
||||
struct {
|
||||
u64 wwpn;
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include <asm/ebcdic.h>
|
||||
#include <asm/sysinfo.h>
|
||||
#include "zfcp_fsf.h"
|
||||
#include "zfcp_fc.h"
|
||||
#include "zfcp_qdio.h"
|
||||
|
||||
struct zfcp_reqlist;
|
||||
|
@ -72,10 +73,12 @@ struct zfcp_reqlist;
|
|||
|
||||
/* adapter status */
|
||||
#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002
|
||||
#define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004
|
||||
#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
|
||||
#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
|
||||
#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
|
||||
#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
|
||||
#define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED 0x00000400
|
||||
|
||||
/* remote port status */
|
||||
#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001
|
||||
|
@ -190,6 +193,7 @@ struct zfcp_adapter {
|
|||
struct service_level service_level;
|
||||
struct workqueue_struct *work_queue;
|
||||
struct device_dma_parameters dma_parms;
|
||||
struct zfcp_fc_events events;
|
||||
};
|
||||
|
||||
struct zfcp_port {
|
||||
|
@ -212,6 +216,7 @@ struct zfcp_port {
|
|||
struct work_struct test_link_work;
|
||||
struct work_struct rport_work;
|
||||
enum { RPORT_NONE, RPORT_ADD, RPORT_DEL } rport_task;
|
||||
unsigned int starget_id;
|
||||
};
|
||||
|
||||
struct zfcp_unit {
|
||||
|
|
|
@ -141,8 +141,12 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
|
|||
if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED))
|
||||
need = ZFCP_ERP_ACTION_REOPEN_PORT;
|
||||
/* fall through */
|
||||
case ZFCP_ERP_ACTION_REOPEN_PORT:
|
||||
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
|
||||
p_status = atomic_read(&port->status);
|
||||
if (!(p_status & ZFCP_STATUS_COMMON_OPEN))
|
||||
need = ZFCP_ERP_ACTION_REOPEN_PORT;
|
||||
/* fall through */
|
||||
case ZFCP_ERP_ACTION_REOPEN_PORT:
|
||||
p_status = atomic_read(&port->status);
|
||||
if (p_status & ZFCP_STATUS_COMMON_ERP_INUSE)
|
||||
return 0;
|
||||
|
@ -893,8 +897,7 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
|
|||
}
|
||||
if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) {
|
||||
port->d_id = 0;
|
||||
_zfcp_erp_port_reopen(port, 0, "erpsoc1", NULL);
|
||||
return ZFCP_ERP_EXIT;
|
||||
return ZFCP_ERP_FAILED;
|
||||
}
|
||||
/* fall through otherwise */
|
||||
}
|
||||
|
@ -1188,19 +1191,14 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
|
|||
|
||||
switch (act->action) {
|
||||
case ZFCP_ERP_ACTION_REOPEN_UNIT:
|
||||
if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) {
|
||||
get_device(&unit->dev);
|
||||
if (scsi_queue_work(unit->port->adapter->scsi_host,
|
||||
&unit->scsi_work) <= 0)
|
||||
put_device(&unit->dev);
|
||||
}
|
||||
put_device(&unit->dev);
|
||||
break;
|
||||
|
||||
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
|
||||
case ZFCP_ERP_ACTION_REOPEN_PORT:
|
||||
if (result == ZFCP_ERP_SUCCEEDED)
|
||||
zfcp_scsi_schedule_rport_register(port);
|
||||
/* fall through */
|
||||
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
|
||||
put_device(&port->dev);
|
||||
break;
|
||||
|
||||
|
@ -1247,6 +1245,11 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
|
|||
goto unlock;
|
||||
}
|
||||
|
||||
if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
|
||||
retval = ZFCP_ERP_FAILED;
|
||||
goto check_target;
|
||||
}
|
||||
|
||||
zfcp_erp_action_to_running(erp_action);
|
||||
|
||||
/* no lock to allow for blocking operations */
|
||||
|
@ -1279,6 +1282,7 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
|
|||
goto unlock;
|
||||
}
|
||||
|
||||
check_target:
|
||||
retval = zfcp_erp_strategy_check_target(erp_action, retval);
|
||||
zfcp_erp_action_dequeue(erp_action);
|
||||
retval = zfcp_erp_strategy_statechange(erp_action, retval);
|
||||
|
|
|
@ -96,6 +96,9 @@ extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *,
|
|||
extern void zfcp_erp_timeout_handler(unsigned long);
|
||||
|
||||
/* zfcp_fc.c */
|
||||
extern void zfcp_fc_enqueue_event(struct zfcp_adapter *,
|
||||
enum fc_host_event_code event_code, u32);
|
||||
extern void zfcp_fc_post_event(struct work_struct *);
|
||||
extern void zfcp_fc_scan_ports(struct work_struct *);
|
||||
extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
|
||||
extern void zfcp_fc_port_did_lookup(struct work_struct *);
|
||||
|
@ -146,9 +149,10 @@ extern void zfcp_qdio_destroy(struct zfcp_qdio *);
|
|||
extern int zfcp_qdio_sbal_get(struct zfcp_qdio *);
|
||||
extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
|
||||
extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
|
||||
struct scatterlist *, int);
|
||||
struct scatterlist *);
|
||||
extern int zfcp_qdio_open(struct zfcp_qdio *);
|
||||
extern void zfcp_qdio_close(struct zfcp_qdio *);
|
||||
extern void zfcp_qdio_siosl(struct zfcp_adapter *);
|
||||
|
||||
/* zfcp_scsi.c */
|
||||
extern struct zfcp_data zfcp_data;
|
||||
|
@ -159,7 +163,10 @@ extern void zfcp_scsi_rport_work(struct work_struct *);
|
|||
extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
|
||||
extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *);
|
||||
extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *);
|
||||
extern void zfcp_scsi_scan(struct work_struct *);
|
||||
extern void zfcp_scsi_scan(struct zfcp_unit *);
|
||||
extern void zfcp_scsi_scan_work(struct work_struct *);
|
||||
extern void zfcp_scsi_set_prot(struct zfcp_adapter *);
|
||||
extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
|
||||
|
||||
/* zfcp_sysfs.c */
|
||||
extern struct attribute_group zfcp_sysfs_unit_attrs;
|
||||
|
|
|
@ -23,6 +23,58 @@ static u32 zfcp_fc_rscn_range_mask[] = {
|
|||
[ELS_ADDR_FMT_FAB] = 0x000000,
|
||||
};
|
||||
|
||||
/**
|
||||
* zfcp_fc_post_event - post event to userspace via fc_transport
|
||||
* @work: work struct with enqueued events
|
||||
*/
|
||||
void zfcp_fc_post_event(struct work_struct *work)
|
||||
{
|
||||
struct zfcp_fc_event *event = NULL, *tmp = NULL;
|
||||
LIST_HEAD(tmp_lh);
|
||||
struct zfcp_fc_events *events = container_of(work,
|
||||
struct zfcp_fc_events, work);
|
||||
struct zfcp_adapter *adapter = container_of(events, struct zfcp_adapter,
|
||||
events);
|
||||
|
||||
spin_lock_bh(&events->list_lock);
|
||||
list_splice_init(&events->list, &tmp_lh);
|
||||
spin_unlock_bh(&events->list_lock);
|
||||
|
||||
list_for_each_entry_safe(event, tmp, &tmp_lh, list) {
|
||||
fc_host_post_event(adapter->scsi_host, fc_get_event_number(),
|
||||
event->code, event->data);
|
||||
list_del(&event->list);
|
||||
kfree(event);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_fc_enqueue_event - safely enqueue FC HBA API event from irq context
|
||||
* @adapter: The adapter where to enqueue the event
|
||||
* @event_code: The event code (as defined in fc_host_event_code in
|
||||
* scsi_transport_fc.h)
|
||||
* @event_data: The event data (e.g. n_port page in case of els)
|
||||
*/
|
||||
void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter,
|
||||
enum fc_host_event_code event_code, u32 event_data)
|
||||
{
|
||||
struct zfcp_fc_event *event;
|
||||
|
||||
event = kmalloc(sizeof(struct zfcp_fc_event), GFP_ATOMIC);
|
||||
if (!event)
|
||||
return;
|
||||
|
||||
event->code = event_code;
|
||||
event->data = event_data;
|
||||
|
||||
spin_lock(&adapter->events.list_lock);
|
||||
list_add_tail(&event->list, &adapter->events.list);
|
||||
spin_unlock(&adapter->events.list_lock);
|
||||
|
||||
queue_work(adapter->work_queue, &adapter->events.work);
|
||||
}
|
||||
|
||||
static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
|
||||
{
|
||||
if (mutex_lock_interruptible(&wka_port->mutex))
|
||||
|
@ -148,6 +200,8 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
|
|||
afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK;
|
||||
_zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt],
|
||||
page);
|
||||
zfcp_fc_enqueue_event(fsf_req->adapter, FCH_EVT_RSCN,
|
||||
*(u32 *)page);
|
||||
}
|
||||
queue_work(fsf_req->adapter->work_queue, &fsf_req->adapter->scan_work);
|
||||
}
|
||||
|
|
|
@ -29,6 +29,30 @@
|
|||
|
||||
#define ZFCP_FC_CTELS_TMO (2 * FC_DEF_R_A_TOV / 1000)
|
||||
|
||||
/**
|
||||
* struct zfcp_fc_event - FC HBAAPI event for internal queueing from irq context
|
||||
* @code: Event code
|
||||
* @data: Event data
|
||||
* @list: list_head for zfcp_fc_events list
|
||||
*/
|
||||
struct zfcp_fc_event {
|
||||
enum fc_host_event_code code;
|
||||
u32 data;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct zfcp_fc_events - Infrastructure for posting FC events from irq context
|
||||
* @list: List for queueing of events from irq context to workqueue
|
||||
* @list_lock: Lock for event list
|
||||
* @work: work_struct for forwarding events in workqueue
|
||||
*/
|
||||
struct zfcp_fc_events {
|
||||
struct list_head list;
|
||||
spinlock_t list_lock;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request
|
||||
* @ct_hdr: FC GS common transport header
|
||||
|
@ -196,6 +220,9 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
|
|||
memcpy(fcp->fc_cdb, scsi->cmnd, scsi->cmd_len);
|
||||
|
||||
fcp->fc_dl = scsi_bufflen(scsi);
|
||||
|
||||
if (scsi_get_prot_type(scsi) == SCSI_PROT_DIF_TYPE1)
|
||||
fcp->fc_dl += fcp->fc_dl / scsi->device->sector_size * 8;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
static void zfcp_fsf_request_timeout_handler(unsigned long data)
|
||||
{
|
||||
struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
|
||||
zfcp_qdio_siosl(adapter);
|
||||
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
|
||||
"fsrth_1", NULL);
|
||||
}
|
||||
|
@ -274,6 +275,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
|
|||
break;
|
||||
case FSF_STATUS_READ_LINK_DOWN:
|
||||
zfcp_fsf_status_read_link_down(req);
|
||||
zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0);
|
||||
break;
|
||||
case FSF_STATUS_READ_LINK_UP:
|
||||
dev_info(&adapter->ccw_device->dev,
|
||||
|
@ -286,6 +288,8 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
|
|||
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
|
||||
ZFCP_STATUS_COMMON_ERP_FAILED,
|
||||
"fssrh_2", req);
|
||||
zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
|
||||
|
||||
break;
|
||||
case FSF_STATUS_READ_NOTIFICATION_LOST:
|
||||
if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
|
||||
|
@ -323,6 +327,7 @@ static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
|
|||
dev_err(&req->adapter->ccw_device->dev,
|
||||
"The FCP adapter reported a problem "
|
||||
"that cannot be recovered\n");
|
||||
zfcp_qdio_siosl(req->adapter);
|
||||
zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req);
|
||||
break;
|
||||
}
|
||||
|
@ -413,6 +418,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
|
|||
dev_err(&adapter->ccw_device->dev,
|
||||
"0x%x is not a valid transfer protocol status\n",
|
||||
qtcb->prefix.prot_status);
|
||||
zfcp_qdio_siosl(adapter);
|
||||
zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req);
|
||||
}
|
||||
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
|
||||
|
@ -495,7 +501,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
|
|||
fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
|
||||
|
||||
adapter->hydra_version = bottom->adapter_type;
|
||||
adapter->timer_ticks = bottom->timer_interval;
|
||||
adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
|
||||
adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
|
||||
(u16)FSF_STATUS_READS_RECOM);
|
||||
|
||||
|
@ -523,6 +529,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
zfcp_scsi_set_prot(adapter);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -732,7 +740,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
|
|||
|
||||
zfcp_reqlist_add(adapter->req_list, req);
|
||||
|
||||
req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q.count);
|
||||
req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
|
||||
req->issued = get_clock();
|
||||
if (zfcp_qdio_send(qdio, &req->qdio_req)) {
|
||||
del_timer(&req->timer);
|
||||
|
@ -959,8 +967,7 @@ static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
|
|||
|
||||
static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
|
||||
struct scatterlist *sg_req,
|
||||
struct scatterlist *sg_resp,
|
||||
int max_sbals)
|
||||
struct scatterlist *sg_resp)
|
||||
{
|
||||
struct zfcp_adapter *adapter = req->adapter;
|
||||
u32 feat = adapter->adapter_features;
|
||||
|
@ -983,18 +990,19 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
|
|||
return 0;
|
||||
}
|
||||
|
||||
bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
|
||||
sg_req, max_sbals);
|
||||
bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, sg_req);
|
||||
if (bytes <= 0)
|
||||
return -EIO;
|
||||
zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
|
||||
req->qtcb->bottom.support.req_buf_length = bytes;
|
||||
zfcp_qdio_skip_to_last_sbale(&req->qdio_req);
|
||||
|
||||
bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
|
||||
sg_resp, max_sbals);
|
||||
sg_resp);
|
||||
req->qtcb->bottom.support.resp_buf_length = bytes;
|
||||
if (bytes <= 0)
|
||||
return -EIO;
|
||||
zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1002,11 +1010,11 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
|
|||
static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
|
||||
struct scatterlist *sg_req,
|
||||
struct scatterlist *sg_resp,
|
||||
int max_sbals, unsigned int timeout)
|
||||
unsigned int timeout)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals);
|
||||
ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1046,8 +1054,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
|
|||
}
|
||||
|
||||
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
|
||||
ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp,
|
||||
ZFCP_FSF_MAX_SBALS_PER_REQ, timeout);
|
||||
ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout);
|
||||
if (ret)
|
||||
goto failed_send;
|
||||
|
||||
|
@ -1143,7 +1150,10 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
|
|||
}
|
||||
|
||||
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
|
||||
ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2, timeout);
|
||||
|
||||
zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
|
||||
|
||||
ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
|
||||
|
||||
if (ret)
|
||||
goto failed_send;
|
||||
|
@ -2025,7 +2035,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
|
|||
blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
|
||||
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
|
||||
blktrc.flags |= ZFCP_BLK_REQ_ERROR;
|
||||
blktrc.inb_usage = req->qdio_req.qdio_inb_usage;
|
||||
blktrc.inb_usage = 0;
|
||||
blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
|
||||
|
||||
if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
|
||||
|
@ -2035,9 +2045,13 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
|
|||
blktrc.fabric_lat = lat_in->fabric_lat * ticks;
|
||||
|
||||
switch (req->qtcb->bottom.io.data_direction) {
|
||||
case FSF_DATADIR_DIF_READ_STRIP:
|
||||
case FSF_DATADIR_DIF_READ_CONVERT:
|
||||
case FSF_DATADIR_READ:
|
||||
lat = &unit->latencies.read;
|
||||
break;
|
||||
case FSF_DATADIR_DIF_WRITE_INSERT:
|
||||
case FSF_DATADIR_DIF_WRITE_CONVERT:
|
||||
case FSF_DATADIR_WRITE:
|
||||
lat = &unit->latencies.write;
|
||||
break;
|
||||
|
@ -2078,6 +2092,21 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
|
|||
goto skip_fsfstatus;
|
||||
}
|
||||
|
||||
switch (req->qtcb->header.fsf_status) {
|
||||
case FSF_INCONSISTENT_PROT_DATA:
|
||||
case FSF_INVALID_PROT_PARM:
|
||||
set_host_byte(scpnt, DID_ERROR);
|
||||
goto skip_fsfstatus;
|
||||
case FSF_BLOCK_GUARD_CHECK_FAILURE:
|
||||
zfcp_scsi_dif_sense_error(scpnt, 0x1);
|
||||
goto skip_fsfstatus;
|
||||
case FSF_APP_TAG_CHECK_FAILURE:
|
||||
zfcp_scsi_dif_sense_error(scpnt, 0x2);
|
||||
goto skip_fsfstatus;
|
||||
case FSF_REF_TAG_CHECK_FAILURE:
|
||||
zfcp_scsi_dif_sense_error(scpnt, 0x3);
|
||||
goto skip_fsfstatus;
|
||||
}
|
||||
fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
|
||||
zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
|
||||
|
||||
|
@ -2187,6 +2216,44 @@ skip_fsfstatus:
|
|||
}
|
||||
}
|
||||
|
||||
static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
|
||||
{
|
||||
switch (scsi_get_prot_op(scsi_cmnd)) {
|
||||
case SCSI_PROT_NORMAL:
|
||||
switch (scsi_cmnd->sc_data_direction) {
|
||||
case DMA_NONE:
|
||||
*data_dir = FSF_DATADIR_CMND;
|
||||
break;
|
||||
case DMA_FROM_DEVICE:
|
||||
*data_dir = FSF_DATADIR_READ;
|
||||
break;
|
||||
case DMA_TO_DEVICE:
|
||||
*data_dir = FSF_DATADIR_WRITE;
|
||||
break;
|
||||
case DMA_BIDIRECTIONAL:
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
|
||||
case SCSI_PROT_READ_STRIP:
|
||||
*data_dir = FSF_DATADIR_DIF_READ_STRIP;
|
||||
break;
|
||||
case SCSI_PROT_WRITE_INSERT:
|
||||
*data_dir = FSF_DATADIR_DIF_WRITE_INSERT;
|
||||
break;
|
||||
case SCSI_PROT_READ_PASS:
|
||||
*data_dir = FSF_DATADIR_DIF_READ_CONVERT;
|
||||
break;
|
||||
case SCSI_PROT_WRITE_PASS:
|
||||
*data_dir = FSF_DATADIR_DIF_WRITE_CONVERT;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
|
||||
* @unit: unit where command is sent to
|
||||
|
@ -2198,16 +2265,17 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
|
|||
struct zfcp_fsf_req *req;
|
||||
struct fcp_cmnd *fcp_cmnd;
|
||||
unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
|
||||
int real_bytes, retval = -EIO;
|
||||
int real_bytes, retval = -EIO, dix_bytes = 0;
|
||||
struct zfcp_adapter *adapter = unit->port->adapter;
|
||||
struct zfcp_qdio *qdio = adapter->qdio;
|
||||
struct fsf_qtcb_bottom_io *io;
|
||||
|
||||
if (unlikely(!(atomic_read(&unit->status) &
|
||||
ZFCP_STATUS_COMMON_UNBLOCKED)))
|
||||
return -EBUSY;
|
||||
|
||||
spin_lock(&qdio->req_q_lock);
|
||||
if (atomic_read(&qdio->req_q.count) <= 0) {
|
||||
if (atomic_read(&qdio->req_q_free) <= 0) {
|
||||
atomic_inc(&qdio->req_q_full);
|
||||
goto out;
|
||||
}
|
||||
|
@ -2223,57 +2291,46 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
|
|||
goto out;
|
||||
}
|
||||
|
||||
scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
|
||||
|
||||
io = &req->qtcb->bottom.io;
|
||||
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
|
||||
req->unit = unit;
|
||||
req->data = scsi_cmnd;
|
||||
req->handler = zfcp_fsf_send_fcp_command_handler;
|
||||
req->qtcb->header.lun_handle = unit->handle;
|
||||
req->qtcb->header.port_handle = unit->port->handle;
|
||||
req->qtcb->bottom.io.service_class = FSF_CLASS_3;
|
||||
req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
|
||||
io->service_class = FSF_CLASS_3;
|
||||
io->fcp_cmnd_length = FCP_CMND_LEN;
|
||||
|
||||
scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
|
||||
|
||||
/*
|
||||
* set depending on data direction:
|
||||
* data direction bits in SBALE (SB Type)
|
||||
* data direction bits in QTCB
|
||||
*/
|
||||
switch (scsi_cmnd->sc_data_direction) {
|
||||
case DMA_NONE:
|
||||
req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
|
||||
break;
|
||||
case DMA_FROM_DEVICE:
|
||||
req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
|
||||
break;
|
||||
case DMA_TO_DEVICE:
|
||||
req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
|
||||
break;
|
||||
case DMA_BIDIRECTIONAL:
|
||||
goto failed_scsi_cmnd;
|
||||
if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
|
||||
io->data_block_length = scsi_cmnd->device->sector_size;
|
||||
io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
|
||||
}
|
||||
|
||||
zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction);
|
||||
|
||||
get_device(&unit->dev);
|
||||
|
||||
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
|
||||
zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
|
||||
|
||||
real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
|
||||
scsi_sglist(scsi_cmnd),
|
||||
ZFCP_FSF_MAX_SBALS_PER_REQ);
|
||||
if (unlikely(real_bytes < 0)) {
|
||||
if (req->qdio_req.sbal_number >= ZFCP_FSF_MAX_SBALS_PER_REQ) {
|
||||
dev_err(&adapter->ccw_device->dev,
|
||||
"Oversize data package, unit 0x%016Lx "
|
||||
"on port 0x%016Lx closed\n",
|
||||
(unsigned long long)unit->fcp_lun,
|
||||
(unsigned long long)unit->port->wwpn);
|
||||
zfcp_erp_unit_shutdown(unit, 0, "fssfct1", req);
|
||||
retval = -EINVAL;
|
||||
}
|
||||
goto failed_scsi_cmnd;
|
||||
if (scsi_prot_sg_count(scsi_cmnd)) {
|
||||
zfcp_qdio_set_data_div(qdio, &req->qdio_req,
|
||||
scsi_prot_sg_count(scsi_cmnd));
|
||||
dix_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
|
||||
scsi_prot_sglist(scsi_cmnd));
|
||||
io->prot_data_length = dix_bytes;
|
||||
}
|
||||
|
||||
real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
|
||||
scsi_sglist(scsi_cmnd));
|
||||
|
||||
if (unlikely(real_bytes < 0) || unlikely(dix_bytes < 0))
|
||||
goto failed_scsi_cmnd;
|
||||
|
||||
zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
|
||||
|
||||
retval = zfcp_fsf_req_send(req);
|
||||
if (unlikely(retval))
|
||||
goto failed_scsi_cmnd;
|
||||
|
@ -2391,13 +2448,13 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
|
|||
bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
|
||||
bottom->option = fsf_cfdc->option;
|
||||
|
||||
bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
|
||||
fsf_cfdc->sg,
|
||||
ZFCP_FSF_MAX_SBALS_PER_REQ);
|
||||
bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, fsf_cfdc->sg);
|
||||
|
||||
if (bytes != ZFCP_CFDC_MAX_SIZE) {
|
||||
zfcp_fsf_req_free(req);
|
||||
goto out;
|
||||
}
|
||||
zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
|
||||
|
||||
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
|
||||
retval = zfcp_fsf_req_send(req);
|
||||
|
@ -2419,7 +2476,7 @@ out:
|
|||
void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
|
||||
{
|
||||
struct zfcp_adapter *adapter = qdio->adapter;
|
||||
struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx];
|
||||
struct qdio_buffer *sbal = qdio->res_q[sbal_idx];
|
||||
struct qdio_buffer_element *sbale;
|
||||
struct zfcp_fsf_req *fsf_req;
|
||||
unsigned long req_id;
|
||||
|
@ -2431,17 +2488,17 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
|
|||
req_id = (unsigned long) sbale->addr;
|
||||
fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
|
||||
|
||||
if (!fsf_req)
|
||||
if (!fsf_req) {
|
||||
/*
|
||||
* Unknown request means that we have potentially memory
|
||||
* corruption and must stop the machine immediately.
|
||||
*/
|
||||
zfcp_qdio_siosl(adapter);
|
||||
panic("error: unknown req_id (%lx) on adapter %s.\n",
|
||||
req_id, dev_name(&adapter->ccw_device->dev));
|
||||
}
|
||||
|
||||
fsf_req->qdio_req.sbal_response = sbal_idx;
|
||||
fsf_req->qdio_req.qdio_inb_usage =
|
||||
atomic_read(&qdio->resp_q.count);
|
||||
zfcp_fsf_req_complete(fsf_req);
|
||||
|
||||
if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY))
|
||||
|
|
|
@ -80,11 +80,15 @@
|
|||
#define FSF_REQUEST_SIZE_TOO_LARGE 0x00000061
|
||||
#define FSF_RESPONSE_SIZE_TOO_LARGE 0x00000062
|
||||
#define FSF_SBAL_MISMATCH 0x00000063
|
||||
#define FSF_INCONSISTENT_PROT_DATA 0x00000070
|
||||
#define FSF_INVALID_PROT_PARM 0x00000071
|
||||
#define FSF_BLOCK_GUARD_CHECK_FAILURE 0x00000081
|
||||
#define FSF_APP_TAG_CHECK_FAILURE 0x00000082
|
||||
#define FSF_REF_TAG_CHECK_FAILURE 0x00000083
|
||||
#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD
|
||||
#define FSF_UNKNOWN_COMMAND 0x000000E2
|
||||
#define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3
|
||||
#define FSF_INVALID_COMMAND_OPTION 0x000000E5
|
||||
/* #define FSF_ERROR 0x000000FF */
|
||||
|
||||
#define FSF_PROT_STATUS_QUAL_SIZE 16
|
||||
#define FSF_STATUS_QUALIFIER_SIZE 16
|
||||
|
@ -147,18 +151,17 @@
|
|||
#define FSF_DATADIR_WRITE 0x00000001
|
||||
#define FSF_DATADIR_READ 0x00000002
|
||||
#define FSF_DATADIR_CMND 0x00000004
|
||||
#define FSF_DATADIR_DIF_WRITE_INSERT 0x00000009
|
||||
#define FSF_DATADIR_DIF_READ_STRIP 0x0000000a
|
||||
#define FSF_DATADIR_DIF_WRITE_CONVERT 0x0000000b
|
||||
#define FSF_DATADIR_DIF_READ_CONVERT 0X0000000c
|
||||
|
||||
/* data protection control flags */
|
||||
#define FSF_APP_TAG_CHECK_ENABLE 0x10
|
||||
|
||||
/* fc service class */
|
||||
#define FSF_CLASS_3 0x00000003
|
||||
|
||||
/* SBAL chaining */
|
||||
#define ZFCP_FSF_MAX_SBALS_PER_REQ 36
|
||||
|
||||
/* max. number of (data buffer) SBALEs in largest SBAL chain
|
||||
* request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
|
||||
#define ZFCP_FSF_MAX_SBALES_PER_REQ \
|
||||
(ZFCP_FSF_MAX_SBALS_PER_REQ * ZFCP_QDIO_MAX_SBALES_PER_SBAL - 2)
|
||||
|
||||
/* logging space behind QTCB */
|
||||
#define FSF_QTCB_LOG_SIZE 1024
|
||||
|
||||
|
@ -170,6 +173,8 @@
|
|||
#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
|
||||
#define FSF_FEATURE_UPDATE_ALERT 0x00000100
|
||||
#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200
|
||||
#define FSF_FEATURE_DIF_PROT_TYPE1 0x00010000
|
||||
#define FSF_FEATURE_DIX_PROT_TCPIP 0x00020000
|
||||
|
||||
/* host connection features */
|
||||
#define FSF_FEATURE_NPIV_MODE 0x00000001
|
||||
|
@ -324,9 +329,14 @@ struct fsf_qtcb_header {
|
|||
struct fsf_qtcb_bottom_io {
|
||||
u32 data_direction;
|
||||
u32 service_class;
|
||||
u8 res1[8];
|
||||
u8 res1;
|
||||
u8 data_prot_flags;
|
||||
u16 app_tag_value;
|
||||
u32 ref_tag_value;
|
||||
u32 fcp_cmnd_length;
|
||||
u8 res2[12];
|
||||
u32 data_block_length;
|
||||
u32 prot_data_length;
|
||||
u8 res2[4];
|
||||
u8 fcp_cmnd[FSF_FCP_CMND_SIZE];
|
||||
u8 fcp_rsp[FSF_FCP_RSP_SIZE];
|
||||
u8 res3[64];
|
||||
|
@ -352,6 +362,8 @@ struct fsf_qtcb_bottom_support {
|
|||
u8 els[256];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
#define ZFCP_FSF_TIMER_INT_MASK 0x3FFF
|
||||
|
||||
struct fsf_qtcb_bottom_config {
|
||||
u32 lic_version;
|
||||
u32 feature_selection;
|
||||
|
|
|
@ -30,12 +30,15 @@ static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id)
|
||||
static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
|
||||
unsigned int qdio_err)
|
||||
{
|
||||
struct zfcp_adapter *adapter = qdio->adapter;
|
||||
|
||||
dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
|
||||
|
||||
if (qdio_err & QDIO_ERROR_SLSB_STATE)
|
||||
zfcp_qdio_siosl(adapter);
|
||||
zfcp_erp_adapter_reopen(adapter,
|
||||
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
|
||||
ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL);
|
||||
|
@ -55,72 +58,47 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
|
|||
static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
|
||||
{
|
||||
unsigned long long now, span;
|
||||
int free, used;
|
||||
int used;
|
||||
|
||||
spin_lock(&qdio->stat_lock);
|
||||
now = get_clock_monotonic();
|
||||
span = (now - qdio->req_q_time) >> 12;
|
||||
free = atomic_read(&qdio->req_q.count);
|
||||
used = QDIO_MAX_BUFFERS_PER_Q - free;
|
||||
used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
|
||||
qdio->req_q_util += used * span;
|
||||
qdio->req_q_time = now;
|
||||
spin_unlock(&qdio->stat_lock);
|
||||
}
|
||||
|
||||
static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
|
||||
int queue_no, int first, int count,
|
||||
int queue_no, int idx, int count,
|
||||
unsigned long parm)
|
||||
{
|
||||
struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
|
||||
struct zfcp_qdio_queue *queue = &qdio->req_q;
|
||||
|
||||
if (unlikely(qdio_err)) {
|
||||
zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first,
|
||||
count);
|
||||
zfcp_qdio_handler_error(qdio, "qdireq1");
|
||||
zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count);
|
||||
zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
|
||||
return;
|
||||
}
|
||||
|
||||
/* cleanup all SBALs being program-owned now */
|
||||
zfcp_qdio_zero_sbals(queue->sbal, first, count);
|
||||
zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
|
||||
|
||||
zfcp_qdio_account(qdio);
|
||||
atomic_add(count, &queue->count);
|
||||
atomic_add(count, &qdio->req_q_free);
|
||||
wake_up(&qdio->req_q_wq);
|
||||
}
|
||||
|
||||
static void zfcp_qdio_resp_put_back(struct zfcp_qdio *qdio, int processed)
|
||||
{
|
||||
struct zfcp_qdio_queue *queue = &qdio->resp_q;
|
||||
struct ccw_device *cdev = qdio->adapter->ccw_device;
|
||||
u8 count, start = queue->first;
|
||||
unsigned int retval;
|
||||
|
||||
count = atomic_read(&queue->count) + processed;
|
||||
|
||||
retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, start, count);
|
||||
|
||||
if (unlikely(retval)) {
|
||||
atomic_set(&queue->count, count);
|
||||
zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdrpb_1", NULL);
|
||||
} else {
|
||||
queue->first += count;
|
||||
queue->first %= QDIO_MAX_BUFFERS_PER_Q;
|
||||
atomic_set(&queue->count, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
|
||||
int queue_no, int first, int count,
|
||||
int queue_no, int idx, int count,
|
||||
unsigned long parm)
|
||||
{
|
||||
struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
|
||||
int sbal_idx, sbal_no;
|
||||
|
||||
if (unlikely(qdio_err)) {
|
||||
zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first,
|
||||
count);
|
||||
zfcp_qdio_handler_error(qdio, "qdires1");
|
||||
zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count);
|
||||
zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -129,25 +107,16 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
|
|||
* returned by QDIO layer
|
||||
*/
|
||||
for (sbal_no = 0; sbal_no < count; sbal_no++) {
|
||||
sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
|
||||
sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
|
||||
/* go through all SBALEs of SBAL */
|
||||
zfcp_fsf_reqid_check(qdio, sbal_idx);
|
||||
}
|
||||
|
||||
/*
|
||||
* put range of SBALs back to response queue
|
||||
* (including SBALs which have already been free before)
|
||||
* put SBALs back to response queue
|
||||
*/
|
||||
zfcp_qdio_resp_put_back(qdio, count);
|
||||
}
|
||||
|
||||
static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
|
||||
struct zfcp_qdio_req *q_req, int max_sbals)
|
||||
{
|
||||
int count = atomic_read(&qdio->req_q.count);
|
||||
count = min(count, max_sbals);
|
||||
q_req->sbal_limit = (q_req->sbal_first + count - 1)
|
||||
% QDIO_MAX_BUFFERS_PER_Q;
|
||||
if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
|
||||
zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2", NULL);
|
||||
}
|
||||
|
||||
static struct qdio_buffer_element *
|
||||
|
@ -173,6 +142,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
|
|||
|
||||
/* keep this requests number of SBALs up-to-date */
|
||||
q_req->sbal_number++;
|
||||
BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ);
|
||||
|
||||
/* start at first SBALE of new SBAL */
|
||||
q_req->sbale_curr = 0;
|
||||
|
@ -193,17 +163,6 @@ zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
|
|||
return zfcp_qdio_sbale_curr(qdio, q_req);
|
||||
}
|
||||
|
||||
static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
|
||||
struct zfcp_qdio_req *q_req)
|
||||
{
|
||||
struct qdio_buffer **sbal = qdio->req_q.sbal;
|
||||
int first = q_req->sbal_first;
|
||||
int last = q_req->sbal_last;
|
||||
int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) %
|
||||
QDIO_MAX_BUFFERS_PER_Q + 1;
|
||||
zfcp_qdio_zero_sbals(sbal, first, count);
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
|
||||
* @qdio: pointer to struct zfcp_qdio
|
||||
|
@ -213,14 +172,11 @@ static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
|
|||
* Returns: number of bytes, or error (negativ)
|
||||
*/
|
||||
int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
|
||||
struct scatterlist *sg, int max_sbals)
|
||||
struct scatterlist *sg)
|
||||
{
|
||||
struct qdio_buffer_element *sbale;
|
||||
int bytes = 0;
|
||||
|
||||
/* figure out last allowed SBAL */
|
||||
zfcp_qdio_sbal_limit(qdio, q_req, max_sbals);
|
||||
|
||||
/* set storage-block type for this request */
|
||||
sbale = zfcp_qdio_sbale_req(qdio, q_req);
|
||||
sbale->flags |= q_req->sbtype;
|
||||
|
@ -229,7 +185,8 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
|
|||
sbale = zfcp_qdio_sbale_next(qdio, q_req);
|
||||
if (!sbale) {
|
||||
atomic_inc(&qdio->req_q_full);
|
||||
zfcp_qdio_undo_sbals(qdio, q_req);
|
||||
zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
|
||||
q_req->sbal_number);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -239,19 +196,13 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
|
|||
bytes += sg->length;
|
||||
}
|
||||
|
||||
/* assume that no other SBALEs are to follow in the same SBAL */
|
||||
sbale = zfcp_qdio_sbale_curr(qdio, q_req);
|
||||
sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
|
||||
|
||||
return bytes;
|
||||
}
|
||||
|
||||
static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
|
||||
{
|
||||
struct zfcp_qdio_queue *req_q = &qdio->req_q;
|
||||
|
||||
spin_lock_bh(&qdio->req_q_lock);
|
||||
if (atomic_read(&req_q->count) ||
|
||||
if (atomic_read(&qdio->req_q_free) ||
|
||||
!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
|
||||
return 1;
|
||||
spin_unlock_bh(&qdio->req_q_lock);
|
||||
|
@ -300,25 +251,25 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
|
|||
*/
|
||||
int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
|
||||
{
|
||||
struct zfcp_qdio_queue *req_q = &qdio->req_q;
|
||||
int first = q_req->sbal_first;
|
||||
int count = q_req->sbal_number;
|
||||
int retval;
|
||||
unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
|
||||
u8 sbal_number = q_req->sbal_number;
|
||||
|
||||
zfcp_qdio_account(qdio);
|
||||
|
||||
retval = do_QDIO(qdio->adapter->ccw_device, qdio_flags, 0, first,
|
||||
count);
|
||||
retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
|
||||
q_req->sbal_first, sbal_number);
|
||||
|
||||
if (unlikely(retval)) {
|
||||
zfcp_qdio_zero_sbals(req_q->sbal, first, count);
|
||||
zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
|
||||
sbal_number);
|
||||
return retval;
|
||||
}
|
||||
|
||||
/* account for transferred buffers */
|
||||
atomic_sub(count, &req_q->count);
|
||||
req_q->first += count;
|
||||
req_q->first %= QDIO_MAX_BUFFERS_PER_Q;
|
||||
atomic_sub(sbal_number, &qdio->req_q_free);
|
||||
qdio->req_q_idx += sbal_number;
|
||||
qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -331,6 +282,7 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
|
|||
id->q_format = QDIO_ZFCP_QFMT;
|
||||
memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
|
||||
ASCEBC(id->adapter_name, 8);
|
||||
id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
|
||||
id->qib_param_field_format = 0;
|
||||
id->qib_param_field = NULL;
|
||||
id->input_slib_elements = NULL;
|
||||
|
@ -340,10 +292,10 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
|
|||
id->input_handler = zfcp_qdio_int_resp;
|
||||
id->output_handler = zfcp_qdio_int_req;
|
||||
id->int_parm = (unsigned long) qdio;
|
||||
id->input_sbal_addr_array = (void **) (qdio->resp_q.sbal);
|
||||
id->output_sbal_addr_array = (void **) (qdio->req_q.sbal);
|
||||
|
||||
id->input_sbal_addr_array = (void **) (qdio->res_q);
|
||||
id->output_sbal_addr_array = (void **) (qdio->req_q);
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
|
||||
* @adapter: pointer to struct zfcp_adapter
|
||||
|
@ -354,8 +306,8 @@ static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
|
|||
{
|
||||
struct qdio_initialize init_data;
|
||||
|
||||
if (zfcp_qdio_buffers_enqueue(qdio->req_q.sbal) ||
|
||||
zfcp_qdio_buffers_enqueue(qdio->resp_q.sbal))
|
||||
if (zfcp_qdio_buffers_enqueue(qdio->req_q) ||
|
||||
zfcp_qdio_buffers_enqueue(qdio->res_q))
|
||||
return -ENOMEM;
|
||||
|
||||
zfcp_qdio_setup_init_data(&init_data, qdio);
|
||||
|
@ -369,34 +321,30 @@ static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
|
|||
*/
|
||||
void zfcp_qdio_close(struct zfcp_qdio *qdio)
|
||||
{
|
||||
struct zfcp_qdio_queue *req_q;
|
||||
int first, count;
|
||||
struct zfcp_adapter *adapter = qdio->adapter;
|
||||
int idx, count;
|
||||
|
||||
if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
|
||||
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
|
||||
return;
|
||||
|
||||
/* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
|
||||
req_q = &qdio->req_q;
|
||||
spin_lock_bh(&qdio->req_q_lock);
|
||||
atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
|
||||
atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
|
||||
spin_unlock_bh(&qdio->req_q_lock);
|
||||
|
||||
wake_up(&qdio->req_q_wq);
|
||||
|
||||
qdio_shutdown(qdio->adapter->ccw_device,
|
||||
QDIO_FLAG_CLEANUP_USING_CLEAR);
|
||||
qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
|
||||
|
||||
/* cleanup used outbound sbals */
|
||||
count = atomic_read(&req_q->count);
|
||||
count = atomic_read(&qdio->req_q_free);
|
||||
if (count < QDIO_MAX_BUFFERS_PER_Q) {
|
||||
first = (req_q->first + count) % QDIO_MAX_BUFFERS_PER_Q;
|
||||
idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
|
||||
count = QDIO_MAX_BUFFERS_PER_Q - count;
|
||||
zfcp_qdio_zero_sbals(req_q->sbal, first, count);
|
||||
zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
|
||||
}
|
||||
req_q->first = 0;
|
||||
atomic_set(&req_q->count, 0);
|
||||
qdio->resp_q.first = 0;
|
||||
atomic_set(&qdio->resp_q.count, 0);
|
||||
qdio->req_q_idx = 0;
|
||||
atomic_set(&qdio->req_q_free, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -408,34 +356,45 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
|
|||
{
|
||||
struct qdio_buffer_element *sbale;
|
||||
struct qdio_initialize init_data;
|
||||
struct ccw_device *cdev = qdio->adapter->ccw_device;
|
||||
struct zfcp_adapter *adapter = qdio->adapter;
|
||||
struct ccw_device *cdev = adapter->ccw_device;
|
||||
struct qdio_ssqd_desc ssqd;
|
||||
int cc;
|
||||
|
||||
if (atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
|
||||
if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
|
||||
return -EIO;
|
||||
|
||||
atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
|
||||
&qdio->adapter->status);
|
||||
|
||||
zfcp_qdio_setup_init_data(&init_data, qdio);
|
||||
|
||||
if (qdio_establish(&init_data))
|
||||
goto failed_establish;
|
||||
|
||||
if (qdio_get_ssqd_desc(init_data.cdev, &ssqd))
|
||||
goto failed_qdio;
|
||||
|
||||
if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
|
||||
atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
|
||||
&qdio->adapter->status);
|
||||
|
||||
if (qdio_activate(cdev))
|
||||
goto failed_qdio;
|
||||
|
||||
for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
|
||||
sbale = &(qdio->resp_q.sbal[cc]->element[0]);
|
||||
sbale = &(qdio->res_q[cc]->element[0]);
|
||||
sbale->length = 0;
|
||||
sbale->flags = SBAL_FLAGS_LAST_ENTRY;
|
||||
sbale->addr = NULL;
|
||||
}
|
||||
|
||||
if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0,
|
||||
QDIO_MAX_BUFFERS_PER_Q))
|
||||
if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
|
||||
goto failed_qdio;
|
||||
|
||||
/* set index of first avalable SBALS / number of available SBALS */
|
||||
qdio->req_q.first = 0;
|
||||
atomic_set(&qdio->req_q.count, QDIO_MAX_BUFFERS_PER_Q);
|
||||
qdio->req_q_idx = 0;
|
||||
atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -449,7 +408,6 @@ failed_establish:
|
|||
|
||||
void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
|
||||
{
|
||||
struct qdio_buffer **sbal_req, **sbal_resp;
|
||||
int p;
|
||||
|
||||
if (!qdio)
|
||||
|
@ -458,12 +416,9 @@ void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
|
|||
if (qdio->adapter->ccw_device)
|
||||
qdio_free(qdio->adapter->ccw_device);
|
||||
|
||||
sbal_req = qdio->req_q.sbal;
|
||||
sbal_resp = qdio->resp_q.sbal;
|
||||
|
||||
for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) {
|
||||
free_page((unsigned long) sbal_req[p]);
|
||||
free_page((unsigned long) sbal_resp[p]);
|
||||
free_page((unsigned long) qdio->req_q[p]);
|
||||
free_page((unsigned long) qdio->res_q[p]);
|
||||
}
|
||||
|
||||
kfree(qdio);
|
||||
|
@ -491,3 +446,26 @@ int zfcp_qdio_setup(struct zfcp_adapter *adapter)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_qdio_siosl - Trigger logging in FCP channel
|
||||
* @adapter: The zfcp_adapter where to trigger logging
|
||||
*
|
||||
* Call the cio siosl function to trigger hardware logging. This
|
||||
* wrapper function sets a flag to ensure hardware logging is only
|
||||
* triggered once before going through qdio shutdown.
|
||||
*
|
||||
* The triggers are always run from qdio tasklet context, so no
|
||||
* additional synchronization is necessary.
|
||||
*/
|
||||
void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED)
|
||||
return;
|
||||
|
||||
rc = ccw_device_siosl(adapter->ccw_device);
|
||||
if (!rc)
|
||||
atomic_set_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
|
||||
&adapter->status);
|
||||
}
|
||||
|
|
|
@ -19,22 +19,20 @@
|
|||
/* index of last SBALE (with respect to DMQ bug workaround) */
|
||||
#define ZFCP_QDIO_LAST_SBALE_PER_SBAL (ZFCP_QDIO_MAX_SBALES_PER_SBAL - 1)
|
||||
|
||||
/**
|
||||
* struct zfcp_qdio_queue - qdio queue buffer, zfcp index and free count
|
||||
* @sbal: qdio buffers
|
||||
* @first: index of next free buffer in queue
|
||||
* @count: number of free buffers in queue
|
||||
*/
|
||||
struct zfcp_qdio_queue {
|
||||
struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
|
||||
u8 first;
|
||||
atomic_t count;
|
||||
};
|
||||
/* Max SBALS for chaining */
|
||||
#define ZFCP_QDIO_MAX_SBALS_PER_REQ 36
|
||||
|
||||
/* max. number of (data buffer) SBALEs in largest SBAL chain
|
||||
* request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
|
||||
#define ZFCP_QDIO_MAX_SBALES_PER_REQ \
|
||||
(ZFCP_QDIO_MAX_SBALS_PER_REQ * ZFCP_QDIO_MAX_SBALES_PER_SBAL - 2)
|
||||
|
||||
/**
|
||||
* struct zfcp_qdio - basic qdio data structure
|
||||
* @resp_q: response queue
|
||||
* @res_q: response queue
|
||||
* @req_q: request queue
|
||||
* @req_q_idx: index of next free buffer
|
||||
* @req_q_free: number of free buffers in queue
|
||||
* @stat_lock: lock to protect req_q_util and req_q_time
|
||||
* @req_q_lock: lock to serialize access to request queue
|
||||
* @req_q_time: time of last fill level change
|
||||
|
@ -44,8 +42,10 @@ struct zfcp_qdio_queue {
|
|||
* @adapter: adapter used in conjunction with this qdio structure
|
||||
*/
|
||||
struct zfcp_qdio {
|
||||
struct zfcp_qdio_queue resp_q;
|
||||
struct zfcp_qdio_queue req_q;
|
||||
struct qdio_buffer *res_q[QDIO_MAX_BUFFERS_PER_Q];
|
||||
struct qdio_buffer *req_q[QDIO_MAX_BUFFERS_PER_Q];
|
||||
u8 req_q_idx;
|
||||
atomic_t req_q_free;
|
||||
spinlock_t stat_lock;
|
||||
spinlock_t req_q_lock;
|
||||
unsigned long long req_q_time;
|
||||
|
@ -65,7 +65,6 @@ struct zfcp_qdio {
|
|||
* @sbale_curr: current sbale at creation of this request
|
||||
* @sbal_response: sbal used in interrupt
|
||||
* @qdio_outb_usage: usage of outbound queue
|
||||
* @qdio_inb_usage: usage of inbound queue
|
||||
*/
|
||||
struct zfcp_qdio_req {
|
||||
u32 sbtype;
|
||||
|
@ -76,21 +75,8 @@ struct zfcp_qdio_req {
|
|||
u8 sbale_curr;
|
||||
u8 sbal_response;
|
||||
u16 qdio_outb_usage;
|
||||
u16 qdio_inb_usage;
|
||||
};
|
||||
|
||||
/**
|
||||
* zfcp_qdio_sbale - return pointer to sbale in qdio queue
|
||||
* @q: queue where to find sbal
|
||||
* @sbal_idx: sbal index in queue
|
||||
* @sbale_idx: sbale index in sbal
|
||||
*/
|
||||
static inline struct qdio_buffer_element *
|
||||
zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
|
||||
{
|
||||
return &q->sbal[sbal_idx]->element[sbale_idx];
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
|
||||
* @qdio: pointer to struct zfcp_qdio
|
||||
|
@ -100,7 +86,7 @@ zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
|
|||
static inline struct qdio_buffer_element *
|
||||
zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
|
||||
{
|
||||
return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0);
|
||||
return &qdio->req_q[q_req->sbal_last]->element[0];
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -112,8 +98,7 @@ zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
|
|||
static inline struct qdio_buffer_element *
|
||||
zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
|
||||
{
|
||||
return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last,
|
||||
q_req->sbale_curr);
|
||||
return &qdio->req_q[q_req->sbal_last]->element[q_req->sbale_curr];
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -134,21 +119,25 @@ void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
|
|||
unsigned long req_id, u32 sbtype, void *data, u32 len)
|
||||
{
|
||||
struct qdio_buffer_element *sbale;
|
||||
int count = min(atomic_read(&qdio->req_q_free),
|
||||
ZFCP_QDIO_MAX_SBALS_PER_REQ);
|
||||
|
||||
q_req->sbal_first = q_req->sbal_last = qdio->req_q.first;
|
||||
q_req->sbal_first = q_req->sbal_last = qdio->req_q_idx;
|
||||
q_req->sbal_number = 1;
|
||||
q_req->sbtype = sbtype;
|
||||
q_req->sbale_curr = 1;
|
||||
q_req->sbal_limit = (q_req->sbal_first + count - 1)
|
||||
% QDIO_MAX_BUFFERS_PER_Q;
|
||||
|
||||
sbale = zfcp_qdio_sbale_req(qdio, q_req);
|
||||
sbale->addr = (void *) req_id;
|
||||
sbale->flags |= SBAL_FLAGS0_COMMAND;
|
||||
sbale->flags |= sbtype;
|
||||
sbale->flags = SBAL_FLAGS0_COMMAND | sbtype;
|
||||
|
||||
q_req->sbale_curr = 1;
|
||||
if (unlikely(!data))
|
||||
return;
|
||||
sbale++;
|
||||
sbale->addr = data;
|
||||
if (likely(data))
|
||||
sbale->length = len;
|
||||
sbale->length = len;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -210,4 +199,36 @@ void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio_req *q_req)
|
|||
q_req->sbale_curr = ZFCP_QDIO_LAST_SBALE_PER_SBAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_qdio_sbal_limit - set the sbal limit for a request in q_req
|
||||
* @qdio: pointer to struct zfcp_qdio
|
||||
* @q_req: The current zfcp_qdio_req
|
||||
* @max_sbals: maximum number of SBALs allowed
|
||||
*/
|
||||
static inline
|
||||
void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
|
||||
struct zfcp_qdio_req *q_req, int max_sbals)
|
||||
{
|
||||
int count = min(atomic_read(&qdio->req_q_free), max_sbals);
|
||||
|
||||
q_req->sbal_limit = (q_req->sbal_first + count - 1) %
|
||||
QDIO_MAX_BUFFERS_PER_Q;
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_qdio_set_data_div - set data division count
|
||||
* @qdio: pointer to struct zfcp_qdio
|
||||
* @q_req: The current zfcp_qdio_req
|
||||
* @count: The data division count
|
||||
*/
|
||||
static inline
|
||||
void zfcp_qdio_set_data_div(struct zfcp_qdio *qdio,
|
||||
struct zfcp_qdio_req *q_req, u32 count)
|
||||
{
|
||||
struct qdio_buffer_element *sbale;
|
||||
|
||||
sbale = &qdio->req_q[q_req->sbal_first]->element[0];
|
||||
sbale->length = count;
|
||||
}
|
||||
|
||||
#endif /* ZFCP_QDIO_H */
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/slab.h>
|
||||
#include <scsi/fc/fc_fcp.h>
|
||||
#include <scsi/scsi_eh.h>
|
||||
#include <asm/atomic.h>
|
||||
#include "zfcp_ext.h"
|
||||
#include "zfcp_dbf.h"
|
||||
|
@ -22,6 +23,13 @@ static unsigned int default_depth = 32;
|
|||
module_param_named(queue_depth, default_depth, uint, 0600);
|
||||
MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
|
||||
|
||||
static bool enable_dif;
|
||||
|
||||
#ifdef CONFIG_ZFCP_DIF
|
||||
module_param_named(dif, enable_dif, bool, 0600);
|
||||
MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support");
|
||||
#endif
|
||||
|
||||
static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
|
||||
int reason)
|
||||
{
|
||||
|
@ -506,8 +514,10 @@ static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
|
|||
* @rport: The FC rport where to teminate I/O
|
||||
*
|
||||
* Abort all pending SCSI commands for a port by closing the
|
||||
* port. Using a reopen avoiding a conflict with a shutdown
|
||||
* overwriting a reopen.
|
||||
* port. Using a reopen avoids a conflict with a shutdown
|
||||
* overwriting a reopen. The "forced" ensures that a disappeared port
|
||||
* is not opened again as valid due to the cached plogi data in
|
||||
* non-NPIV mode.
|
||||
*/
|
||||
static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
|
||||
{
|
||||
|
@ -519,11 +529,25 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
|
|||
port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
|
||||
|
||||
if (port) {
|
||||
zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL);
|
||||
zfcp_erp_port_forced_reopen(port, 0, "sctrpi1", NULL);
|
||||
put_device(&port->dev);
|
||||
}
|
||||
}
|
||||
|
||||
static void zfcp_scsi_queue_unit_register(struct zfcp_port *port)
|
||||
{
|
||||
struct zfcp_unit *unit;
|
||||
|
||||
read_lock_irq(&port->unit_list_lock);
|
||||
list_for_each_entry(unit, &port->unit_list, list) {
|
||||
get_device(&unit->dev);
|
||||
if (scsi_queue_work(port->adapter->scsi_host,
|
||||
&unit->scsi_work) <= 0)
|
||||
put_device(&unit->dev);
|
||||
}
|
||||
read_unlock_irq(&port->unit_list_lock);
|
||||
}
|
||||
|
||||
static void zfcp_scsi_rport_register(struct zfcp_port *port)
|
||||
{
|
||||
struct fc_rport_identifiers ids;
|
||||
|
@ -548,6 +572,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
|
|||
rport->maxframe_size = port->maxframe_size;
|
||||
rport->supported_classes = port->supported_classes;
|
||||
port->rport = rport;
|
||||
port->starget_id = rport->scsi_target_id;
|
||||
|
||||
zfcp_scsi_queue_unit_register(port);
|
||||
}
|
||||
|
||||
static void zfcp_scsi_rport_block(struct zfcp_port *port)
|
||||
|
@ -610,24 +637,74 @@ void zfcp_scsi_rport_work(struct work_struct *work)
|
|||
put_device(&port->dev);
|
||||
}
|
||||
|
||||
|
||||
void zfcp_scsi_scan(struct work_struct *work)
|
||||
/**
|
||||
* zfcp_scsi_scan - Register LUN with SCSI midlayer
|
||||
* @unit: The LUN/unit to register
|
||||
*/
|
||||
void zfcp_scsi_scan(struct zfcp_unit *unit)
|
||||
{
|
||||
struct zfcp_unit *unit = container_of(work, struct zfcp_unit,
|
||||
scsi_work);
|
||||
struct fc_rport *rport;
|
||||
|
||||
flush_work(&unit->port->rport_work);
|
||||
rport = unit->port->rport;
|
||||
struct fc_rport *rport = unit->port->rport;
|
||||
|
||||
if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
|
||||
scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
|
||||
scsilun_to_int((struct scsi_lun *)
|
||||
&unit->fcp_lun), 0);
|
||||
}
|
||||
|
||||
void zfcp_scsi_scan_work(struct work_struct *work)
|
||||
{
|
||||
struct zfcp_unit *unit = container_of(work, struct zfcp_unit,
|
||||
scsi_work);
|
||||
|
||||
zfcp_scsi_scan(unit);
|
||||
put_device(&unit->dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host
|
||||
* @adapter: The adapter where to configure DIF/DIX for the SCSI host
|
||||
*/
|
||||
void zfcp_scsi_set_prot(struct zfcp_adapter *adapter)
|
||||
{
|
||||
unsigned int mask = 0;
|
||||
unsigned int data_div;
|
||||
struct Scsi_Host *shost = adapter->scsi_host;
|
||||
|
||||
data_div = atomic_read(&adapter->status) &
|
||||
ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED;
|
||||
|
||||
if (enable_dif &&
|
||||
adapter->adapter_features & FSF_FEATURE_DIF_PROT_TYPE1)
|
||||
mask |= SHOST_DIF_TYPE1_PROTECTION;
|
||||
|
||||
if (enable_dif && data_div &&
|
||||
adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) {
|
||||
mask |= SHOST_DIX_TYPE1_PROTECTION;
|
||||
scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP);
|
||||
shost->sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2;
|
||||
shost->max_sectors = ZFCP_QDIO_MAX_SBALES_PER_REQ * 8 / 2;
|
||||
}
|
||||
|
||||
scsi_host_set_prot(shost, mask);
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_scsi_dif_sense_error - Report DIF/DIX error as driver sense error
|
||||
* @scmd: The SCSI command to report the error for
|
||||
* @ascq: The ASCQ to put in the sense buffer
|
||||
*
|
||||
* See the error handling in sd_done for the sense codes used here.
|
||||
* Set DID_SOFT_ERROR to retry the request, if possible.
|
||||
*/
|
||||
void zfcp_scsi_dif_sense_error(struct scsi_cmnd *scmd, int ascq)
|
||||
{
|
||||
scsi_build_sense_buffer(1, scmd->sense_buffer,
|
||||
ILLEGAL_REQUEST, 0x10, ascq);
|
||||
set_driver_byte(scmd, DRIVER_SENSE);
|
||||
scmd->result |= SAM_STAT_CHECK_CONDITION;
|
||||
set_host_byte(scmd, DID_SOFT_ERROR);
|
||||
}
|
||||
|
||||
struct fc_function_template zfcp_transport_functions = {
|
||||
.show_starget_port_id = 1,
|
||||
.show_starget_port_name = 1,
|
||||
|
@ -677,11 +754,11 @@ struct zfcp_data zfcp_data = {
|
|||
.eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
|
||||
.can_queue = 4096,
|
||||
.this_id = -1,
|
||||
.sg_tablesize = ZFCP_FSF_MAX_SBALES_PER_REQ,
|
||||
.sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ,
|
||||
.cmd_per_lun = 1,
|
||||
.use_clustering = 1,
|
||||
.sdev_attrs = zfcp_sysfs_sdev_attrs,
|
||||
.max_sectors = (ZFCP_FSF_MAX_SBALES_PER_REQ * 8),
|
||||
.max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8),
|
||||
.dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
|
||||
.shost_attrs = zfcp_sysfs_shost_attrs,
|
||||
},
|
||||
|
|
|
@ -275,7 +275,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
|
|||
|
||||
zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL);
|
||||
zfcp_erp_wait(unit->port->adapter);
|
||||
flush_work(&unit->scsi_work);
|
||||
zfcp_scsi_scan(unit);
|
||||
out:
|
||||
put_device(&port->dev);
|
||||
return retval ? retval : (ssize_t) count;
|
||||
|
@ -290,6 +290,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
|
|||
struct zfcp_unit *unit;
|
||||
u64 fcp_lun;
|
||||
int retval = -EINVAL;
|
||||
struct scsi_device *sdev;
|
||||
|
||||
if (!(port && get_device(&port->dev)))
|
||||
return -EBUSY;
|
||||
|
@ -303,8 +304,13 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
|
|||
else
|
||||
retval = 0;
|
||||
|
||||
/* wait for possible timeout during SCSI probe */
|
||||
flush_work(&unit->scsi_work);
|
||||
sdev = scsi_device_lookup(port->adapter->scsi_host, 0,
|
||||
port->starget_id,
|
||||
scsilun_to_int((struct scsi_lun *)&fcp_lun));
|
||||
if (sdev) {
|
||||
scsi_remove_device(sdev);
|
||||
scsi_device_put(sdev);
|
||||
}
|
||||
|
||||
write_lock_irq(&port->unit_list_lock);
|
||||
list_del(&unit->list);
|
||||
|
|
|
@ -1847,6 +1847,10 @@ config ZFCP
|
|||
called zfcp. If you want to compile it as a module, say M here
|
||||
and read <file:Documentation/kbuild/modules.txt>.
|
||||
|
||||
config ZFCP_DIF
|
||||
tristate "T10 DIF/DIX support for the zfcp driver (EXPERIMENTAL)"
|
||||
depends on ZFCP && EXPERIMENTAL
|
||||
|
||||
config SCSI_PMCRAID
|
||||
tristate "PMC SIERRA Linux MaxRAID adapter support"
|
||||
depends on PCI && SCSI
|
||||
|
|
|
@ -163,6 +163,7 @@ scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o
|
|||
scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o
|
||||
scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
|
||||
scsi_mod-y += scsi_trace.o
|
||||
scsi_mod-$(CONFIG_PM_OPS) += scsi_pm.o
|
||||
|
||||
scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o
|
||||
|
||||
|
|
|
@ -1091,6 +1091,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
|
|||
struct list_head *insert = &aac_devices;
|
||||
int error = -ENODEV;
|
||||
int unique_id = 0;
|
||||
u64 dmamask;
|
||||
|
||||
list_for_each_entry(aac, &aac_devices, entry) {
|
||||
if (aac->id > unique_id)
|
||||
|
@ -1104,17 +1105,18 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
|
|||
goto out;
|
||||
error = -ENODEV;
|
||||
|
||||
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
|
||||
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
|
||||
goto out_disable_pdev;
|
||||
/*
|
||||
* If the quirk31 bit is set, the adapter needs adapter
|
||||
* to driver communication memory to be allocated below 2gig
|
||||
*/
|
||||
if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
|
||||
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(31)) ||
|
||||
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(31)))
|
||||
goto out_disable_pdev;
|
||||
dmamask = DMA_BIT_MASK(31);
|
||||
else
|
||||
dmamask = DMA_BIT_MASK(32);
|
||||
|
||||
if (pci_set_dma_mask(pdev, dmamask) ||
|
||||
pci_set_consistent_dma_mask(pdev, dmamask))
|
||||
goto out_disable_pdev;
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
|
|
|
@ -170,7 +170,7 @@ aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *entry, u_int io)
|
|||
case 15:
|
||||
break;
|
||||
default:
|
||||
printf("aic7770_config: invalid irq setting %d\n", intdef);
|
||||
printk("aic7770_config: invalid irq setting %d\n", intdef);
|
||||
return (ENXIO);
|
||||
}
|
||||
|
||||
|
@ -221,7 +221,7 @@ aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *entry, u_int io)
|
|||
break;
|
||||
}
|
||||
if (have_seeprom == 0) {
|
||||
free(ahc->seep_config, M_DEVBUF);
|
||||
kfree(ahc->seep_config);
|
||||
ahc->seep_config = NULL;
|
||||
}
|
||||
|
||||
|
@ -293,7 +293,7 @@ aha2840_load_seeprom(struct ahc_softc *ahc)
|
|||
sc = ahc->seep_config;
|
||||
|
||||
if (bootverbose)
|
||||
printf("%s: Reading SEEPROM...", ahc_name(ahc));
|
||||
printk("%s: Reading SEEPROM...", ahc_name(ahc));
|
||||
have_seeprom = ahc_read_seeprom(&sd, (uint16_t *)sc,
|
||||
/*start_addr*/0, sizeof(*sc)/2);
|
||||
|
||||
|
@ -301,16 +301,16 @@ aha2840_load_seeprom(struct ahc_softc *ahc)
|
|||
|
||||
if (ahc_verify_cksum(sc) == 0) {
|
||||
if(bootverbose)
|
||||
printf ("checksum error\n");
|
||||
printk ("checksum error\n");
|
||||
have_seeprom = 0;
|
||||
} else if (bootverbose) {
|
||||
printf("done.\n");
|
||||
printk("done.\n");
|
||||
}
|
||||
}
|
||||
|
||||
if (!have_seeprom) {
|
||||
if (bootverbose)
|
||||
printf("%s: No SEEPROM available\n", ahc_name(ahc));
|
||||
printk("%s: No SEEPROM available\n", ahc_name(ahc));
|
||||
ahc->flags |= AHC_USEDEFAULTS;
|
||||
} else {
|
||||
/*
|
||||
|
|
|
@ -85,7 +85,7 @@ aic7770_probe(struct device *dev)
|
|||
int error;
|
||||
|
||||
sprintf(buf, "ahc_eisa:%d", eisaBase >> 12);
|
||||
name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT);
|
||||
name = kmalloc(strlen(buf) + 1, GFP_ATOMIC);
|
||||
if (name == NULL)
|
||||
return (ENOMEM);
|
||||
strcpy(name, buf);
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -674,7 +674,7 @@ ahd_linux_slave_alloc(struct scsi_device *sdev)
|
|||
struct ahd_linux_device *dev;
|
||||
|
||||
if (bootverbose)
|
||||
printf("%s: Slave Alloc %d\n", ahd_name(ahd), sdev->id);
|
||||
printk("%s: Slave Alloc %d\n", ahd_name(ahd), sdev->id);
|
||||
|
||||
dev = scsi_transport_device_data(sdev);
|
||||
memset(dev, 0, sizeof(*dev));
|
||||
|
@ -798,10 +798,10 @@ ahd_linux_dev_reset(struct scsi_cmnd *cmd)
|
|||
scmd_printk(KERN_INFO, cmd,
|
||||
"Attempting to queue a TARGET RESET message:");
|
||||
|
||||
printf("CDB:");
|
||||
printk("CDB:");
|
||||
for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
|
||||
printf(" 0x%x", cmd->cmnd[cdb_byte]);
|
||||
printf("\n");
|
||||
printk(" 0x%x", cmd->cmnd[cdb_byte]);
|
||||
printk("\n");
|
||||
|
||||
/*
|
||||
* Determine if we currently own this command.
|
||||
|
@ -857,16 +857,16 @@ ahd_linux_dev_reset(struct scsi_cmnd *cmd)
|
|||
ahd->platform_data->eh_done = &done;
|
||||
ahd_unlock(ahd, &flags);
|
||||
|
||||
printf("%s: Device reset code sleeping\n", ahd_name(ahd));
|
||||
printk("%s: Device reset code sleeping\n", ahd_name(ahd));
|
||||
if (!wait_for_completion_timeout(&done, 5 * HZ)) {
|
||||
ahd_lock(ahd, &flags);
|
||||
ahd->platform_data->eh_done = NULL;
|
||||
ahd_unlock(ahd, &flags);
|
||||
printf("%s: Device reset timer expired (active %d)\n",
|
||||
printk("%s: Device reset timer expired (active %d)\n",
|
||||
ahd_name(ahd), dev->active);
|
||||
retval = FAILED;
|
||||
}
|
||||
printf("%s: Device reset returning 0x%x\n", ahd_name(ahd), retval);
|
||||
printk("%s: Device reset returning 0x%x\n", ahd_name(ahd), retval);
|
||||
|
||||
return (retval);
|
||||
}
|
||||
|
@ -884,7 +884,7 @@ ahd_linux_bus_reset(struct scsi_cmnd *cmd)
|
|||
ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
|
||||
#ifdef AHD_DEBUG
|
||||
if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
|
||||
printf("%s: Bus reset called for cmd %p\n",
|
||||
printk("%s: Bus reset called for cmd %p\n",
|
||||
ahd_name(ahd), cmd);
|
||||
#endif
|
||||
ahd_lock(ahd, &flags);
|
||||
|
@ -894,7 +894,7 @@ ahd_linux_bus_reset(struct scsi_cmnd *cmd)
|
|||
ahd_unlock(ahd, &flags);
|
||||
|
||||
if (bootverbose)
|
||||
printf("%s: SCSI bus reset delivered. "
|
||||
printk("%s: SCSI bus reset delivered. "
|
||||
"%d SCBs aborted.\n", ahd_name(ahd), found);
|
||||
|
||||
return (SUCCESS);
|
||||
|
@ -935,7 +935,7 @@ ahd_dma_tag_create(struct ahd_softc *ahd, bus_dma_tag_t parent,
|
|||
{
|
||||
bus_dma_tag_t dmat;
|
||||
|
||||
dmat = malloc(sizeof(*dmat), M_DEVBUF, M_NOWAIT);
|
||||
dmat = kmalloc(sizeof(*dmat), GFP_ATOMIC);
|
||||
if (dmat == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
|
@ -956,7 +956,7 @@ ahd_dma_tag_create(struct ahd_softc *ahd, bus_dma_tag_t parent,
|
|||
void
|
||||
ahd_dma_tag_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat)
|
||||
{
|
||||
free(dmat, M_DEVBUF);
|
||||
kfree(dmat);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -1019,7 +1019,7 @@ ahd_linux_setup_iocell_info(u_long index, int instance, int targ, int32_t value)
|
|||
iocell_info = (uint8_t*)&aic79xx_iocell_info[instance];
|
||||
iocell_info[index] = value & 0xFFFF;
|
||||
if (bootverbose)
|
||||
printf("iocell[%d:%ld] = %d\n", instance, index, value);
|
||||
printk("iocell[%d:%ld] = %d\n", instance, index, value);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1029,7 +1029,7 @@ ahd_linux_setup_tag_info_global(char *p)
|
|||
int tags, i, j;
|
||||
|
||||
tags = simple_strtoul(p + 1, NULL, 0) & 0xff;
|
||||
printf("Setting Global Tags= %d\n", tags);
|
||||
printk("Setting Global Tags= %d\n", tags);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(aic79xx_tag_info); i++) {
|
||||
for (j = 0; j < AHD_NUM_TARGETS; j++) {
|
||||
|
@ -1047,7 +1047,7 @@ ahd_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
|
|||
&& (targ < AHD_NUM_TARGETS)) {
|
||||
aic79xx_tag_info[instance].tag_commands[targ] = value & 0x1FF;
|
||||
if (bootverbose)
|
||||
printf("tag_info[%d:%d] = %d\n", instance, targ, value);
|
||||
printk("tag_info[%d:%d] = %d\n", instance, targ, value);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1088,7 +1088,7 @@ ahd_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth,
|
|||
if (targ == -1)
|
||||
targ = 0;
|
||||
} else {
|
||||
printf("Malformed Option %s\n",
|
||||
printk("Malformed Option %s\n",
|
||||
opt_name);
|
||||
done = TRUE;
|
||||
}
|
||||
|
@ -1246,7 +1246,7 @@ ahd_linux_register_host(struct ahd_softc *ahd, struct scsi_host_template *templa
|
|||
ahd_set_unit(ahd, ahd_linux_unit++);
|
||||
ahd_unlock(ahd, &s);
|
||||
sprintf(buf, "scsi%d", host->host_no);
|
||||
new_name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT);
|
||||
new_name = kmalloc(strlen(buf) + 1, GFP_ATOMIC);
|
||||
if (new_name != NULL) {
|
||||
strcpy(new_name, buf);
|
||||
ahd_set_name(ahd, new_name);
|
||||
|
@ -1322,7 +1322,7 @@ int
|
|||
ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
|
||||
{
|
||||
ahd->platform_data =
|
||||
malloc(sizeof(struct ahd_platform_data), M_DEVBUF, M_NOWAIT);
|
||||
kmalloc(sizeof(struct ahd_platform_data), GFP_ATOMIC);
|
||||
if (ahd->platform_data == NULL)
|
||||
return (ENOMEM);
|
||||
memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data));
|
||||
|
@ -1364,7 +1364,7 @@ ahd_platform_free(struct ahd_softc *ahd)
|
|||
if (ahd->platform_data->host)
|
||||
scsi_host_put(ahd->platform_data->host);
|
||||
|
||||
free(ahd->platform_data, M_DEVBUF);
|
||||
kfree(ahd->platform_data);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1502,7 +1502,7 @@ ahd_linux_user_tagdepth(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
|
|||
if (ahd->unit >= ARRAY_SIZE(aic79xx_tag_info)) {
|
||||
|
||||
if (warned_user == 0) {
|
||||
printf(KERN_WARNING
|
||||
printk(KERN_WARNING
|
||||
"aic79xx: WARNING: Insufficient tag_info instances\n"
|
||||
"aic79xx: for installed controllers. Using defaults\n"
|
||||
"aic79xx: Please update the aic79xx_tag_info array in\n"
|
||||
|
@ -1544,7 +1544,7 @@ ahd_linux_device_queue_depth(struct scsi_device *sdev)
|
|||
ahd_send_async(ahd, devinfo.channel, devinfo.target,
|
||||
devinfo.lun, AC_TRANSFER_NEG);
|
||||
ahd_print_devinfo(ahd, &devinfo);
|
||||
printf("Tagged Queuing enabled. Depth %d\n", tags);
|
||||
printk("Tagged Queuing enabled. Depth %d\n", tags);
|
||||
} else {
|
||||
ahd_platform_set_tags(ahd, sdev, &devinfo, AHD_QUEUE_NONE);
|
||||
ahd_send_async(ahd, devinfo.channel, devinfo.target,
|
||||
|
@ -1794,7 +1794,7 @@ ahd_done(struct ahd_softc *ahd, struct scb *scb)
|
|||
struct ahd_linux_device *dev;
|
||||
|
||||
if ((scb->flags & SCB_ACTIVE) == 0) {
|
||||
printf("SCB %d done'd twice\n", SCB_GET_TAG(scb));
|
||||
printk("SCB %d done'd twice\n", SCB_GET_TAG(scb));
|
||||
ahd_dump_card_state(ahd);
|
||||
panic("Stopping for safety");
|
||||
}
|
||||
|
@ -1825,7 +1825,7 @@ ahd_done(struct ahd_softc *ahd, struct scb *scb)
|
|||
#ifdef AHD_DEBUG
|
||||
if ((ahd_debug & AHD_SHOW_MISC) != 0) {
|
||||
ahd_print_path(ahd, scb);
|
||||
printf("Set CAM_UNCOR_PARITY\n");
|
||||
printk("Set CAM_UNCOR_PARITY\n");
|
||||
}
|
||||
#endif
|
||||
ahd_set_transaction_status(scb, CAM_UNCOR_PARITY);
|
||||
|
@ -1843,12 +1843,12 @@ ahd_done(struct ahd_softc *ahd, struct scb *scb)
|
|||
u_int i;
|
||||
|
||||
ahd_print_path(ahd, scb);
|
||||
printf("CDB:");
|
||||
printk("CDB:");
|
||||
for (i = 0; i < scb->io_ctx->cmd_len; i++)
|
||||
printf(" 0x%x", scb->io_ctx->cmnd[i]);
|
||||
printf("\n");
|
||||
printk(" 0x%x", scb->io_ctx->cmnd[i]);
|
||||
printk("\n");
|
||||
ahd_print_path(ahd, scb);
|
||||
printf("Saw underflow (%ld of %ld bytes). "
|
||||
printk("Saw underflow (%ld of %ld bytes). "
|
||||
"Treated as error\n",
|
||||
ahd_get_residual(scb),
|
||||
ahd_get_transfer_length(scb));
|
||||
|
@ -1881,7 +1881,7 @@ ahd_done(struct ahd_softc *ahd, struct scb *scb)
|
|||
dev->commands_since_idle_or_otag = 0;
|
||||
|
||||
if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
|
||||
printf("Recovery SCB completes\n");
|
||||
printk("Recovery SCB completes\n");
|
||||
if (ahd_get_transaction_status(scb) == CAM_BDR_SENT
|
||||
|| ahd_get_transaction_status(scb) == CAM_REQ_ABORTED)
|
||||
ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT);
|
||||
|
@ -1963,14 +1963,14 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
|
|||
if (ahd_debug & AHD_SHOW_SENSE) {
|
||||
int i;
|
||||
|
||||
printf("Copied %d bytes of sense data at %d:",
|
||||
printk("Copied %d bytes of sense data at %d:",
|
||||
sense_size, sense_offset);
|
||||
for (i = 0; i < sense_size; i++) {
|
||||
if ((i & 0xF) == 0)
|
||||
printf("\n");
|
||||
printf("0x%x ", cmd->sense_buffer[i]);
|
||||
printk("\n");
|
||||
printk("0x%x ", cmd->sense_buffer[i]);
|
||||
}
|
||||
printf("\n");
|
||||
printk("\n");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -1995,7 +1995,7 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
|
|||
#ifdef AHD_DEBUG
|
||||
if ((ahd_debug & AHD_SHOW_QFULL) != 0) {
|
||||
ahd_print_path(ahd, scb);
|
||||
printf("Dropping tag count to %d\n",
|
||||
printk("Dropping tag count to %d\n",
|
||||
dev->active);
|
||||
}
|
||||
#endif
|
||||
|
@ -2014,7 +2014,7 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
|
|||
== AHD_LOCK_TAGS_COUNT) {
|
||||
dev->maxtags = dev->active;
|
||||
ahd_print_path(ahd, scb);
|
||||
printf("Locking max tag count at %d\n",
|
||||
printk("Locking max tag count at %d\n",
|
||||
dev->active);
|
||||
}
|
||||
} else {
|
||||
|
@ -2138,7 +2138,7 @@ ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
|
|||
}
|
||||
|
||||
if (do_fallback) {
|
||||
printf("%s: device overrun (status %x) on %d:%d:%d\n",
|
||||
printk("%s: device overrun (status %x) on %d:%d:%d\n",
|
||||
ahd_name(ahd), status, cmd->device->channel,
|
||||
cmd->device->id, cmd->device->lun);
|
||||
}
|
||||
|
@ -2187,10 +2187,10 @@ ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd)
|
|||
scmd_printk(KERN_INFO, cmd,
|
||||
"Attempting to queue an ABORT message:");
|
||||
|
||||
printf("CDB:");
|
||||
printk("CDB:");
|
||||
for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
|
||||
printf(" 0x%x", cmd->cmnd[cdb_byte]);
|
||||
printf("\n");
|
||||
printk(" 0x%x", cmd->cmnd[cdb_byte]);
|
||||
printk("\n");
|
||||
|
||||
ahd_lock(ahd, &flags);
|
||||
|
||||
|
@ -2249,7 +2249,7 @@ ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd)
|
|||
goto no_cmd;
|
||||
}
|
||||
|
||||
printf("%s: At time of recovery, card was %spaused\n",
|
||||
printk("%s: At time of recovery, card was %spaused\n",
|
||||
ahd_name(ahd), was_paused ? "" : "not ");
|
||||
ahd_dump_card_state(ahd);
|
||||
|
||||
|
@ -2260,7 +2260,7 @@ ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd)
|
|||
pending_scb->hscb->tag,
|
||||
ROLE_INITIATOR, CAM_REQ_ABORTED,
|
||||
SEARCH_COMPLETE) > 0) {
|
||||
printf("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
|
||||
printk("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
|
||||
ahd_name(ahd), cmd->device->channel,
|
||||
cmd->device->id, cmd->device->lun);
|
||||
retval = SUCCESS;
|
||||
|
@ -2355,7 +2355,7 @@ ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd)
|
|||
ahd_qinfifo_requeue_tail(ahd, pending_scb);
|
||||
ahd_set_scbptr(ahd, saved_scbptr);
|
||||
ahd_print_path(ahd, pending_scb);
|
||||
printf("Device is disconnected, re-queuing SCB\n");
|
||||
printk("Device is disconnected, re-queuing SCB\n");
|
||||
wait = TRUE;
|
||||
} else {
|
||||
scmd_printk(KERN_INFO, cmd, "Unable to deliver message\n");
|
||||
|
@ -2380,21 +2380,21 @@ done:
|
|||
ahd->platform_data->eh_done = &done;
|
||||
ahd_unlock(ahd, &flags);
|
||||
|
||||
printf("%s: Recovery code sleeping\n", ahd_name(ahd));
|
||||
printk("%s: Recovery code sleeping\n", ahd_name(ahd));
|
||||
if (!wait_for_completion_timeout(&done, 5 * HZ)) {
|
||||
ahd_lock(ahd, &flags);
|
||||
ahd->platform_data->eh_done = NULL;
|
||||
ahd_unlock(ahd, &flags);
|
||||
printf("%s: Timer Expired (active %d)\n",
|
||||
printk("%s: Timer Expired (active %d)\n",
|
||||
ahd_name(ahd), dev->active);
|
||||
retval = FAILED;
|
||||
}
|
||||
printf("Recovery code awake\n");
|
||||
printk("Recovery code awake\n");
|
||||
} else
|
||||
ahd_unlock(ahd, &flags);
|
||||
|
||||
if (retval != SUCCESS)
|
||||
printf("%s: Command abort returning 0x%x\n",
|
||||
printk("%s: Command abort returning 0x%x\n",
|
||||
ahd_name(ahd), retval);
|
||||
|
||||
return retval;
|
||||
|
@ -2431,7 +2431,7 @@ static void ahd_linux_set_period(struct scsi_target *starget, int period)
|
|||
|
||||
#ifdef AHD_DEBUG
|
||||
if ((ahd_debug & AHD_SHOW_DV) != 0)
|
||||
printf("%s: set period to %d\n", ahd_name(ahd), period);
|
||||
printk("%s: set period to %d\n", ahd_name(ahd), period);
|
||||
#endif
|
||||
if (offset == 0)
|
||||
offset = MAX_OFFSET;
|
||||
|
@ -2484,7 +2484,7 @@ static void ahd_linux_set_offset(struct scsi_target *starget, int offset)
|
|||
|
||||
#ifdef AHD_DEBUG
|
||||
if ((ahd_debug & AHD_SHOW_DV) != 0)
|
||||
printf("%s: set offset to %d\n", ahd_name(ahd), offset);
|
||||
printk("%s: set offset to %d\n", ahd_name(ahd), offset);
|
||||
#endif
|
||||
|
||||
ahd_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
|
||||
|
@ -2520,7 +2520,7 @@ static void ahd_linux_set_dt(struct scsi_target *starget, int dt)
|
|||
|
||||
#ifdef AHD_DEBUG
|
||||
if ((ahd_debug & AHD_SHOW_DV) != 0)
|
||||
printf("%s: %s DT\n", ahd_name(ahd),
|
||||
printk("%s: %s DT\n", ahd_name(ahd),
|
||||
dt ? "enabling" : "disabling");
|
||||
#endif
|
||||
if (dt && spi_max_width(starget)) {
|
||||
|
@ -2562,7 +2562,7 @@ static void ahd_linux_set_qas(struct scsi_target *starget, int qas)
|
|||
|
||||
#ifdef AHD_DEBUG
|
||||
if ((ahd_debug & AHD_SHOW_DV) != 0)
|
||||
printf("%s: %s QAS\n", ahd_name(ahd),
|
||||
printk("%s: %s QAS\n", ahd_name(ahd),
|
||||
qas ? "enabling" : "disabling");
|
||||
#endif
|
||||
|
||||
|
@ -2601,7 +2601,7 @@ static void ahd_linux_set_iu(struct scsi_target *starget, int iu)
|
|||
|
||||
#ifdef AHD_DEBUG
|
||||
if ((ahd_debug & AHD_SHOW_DV) != 0)
|
||||
printf("%s: %s IU\n", ahd_name(ahd),
|
||||
printk("%s: %s IU\n", ahd_name(ahd),
|
||||
iu ? "enabling" : "disabling");
|
||||
#endif
|
||||
|
||||
|
@ -2641,7 +2641,7 @@ static void ahd_linux_set_rd_strm(struct scsi_target *starget, int rdstrm)
|
|||
|
||||
#ifdef AHD_DEBUG
|
||||
if ((ahd_debug & AHD_SHOW_DV) != 0)
|
||||
printf("%s: %s Read Streaming\n", ahd_name(ahd),
|
||||
printk("%s: %s Read Streaming\n", ahd_name(ahd),
|
||||
rdstrm ? "enabling" : "disabling");
|
||||
#endif
|
||||
|
||||
|
@ -2677,7 +2677,7 @@ static void ahd_linux_set_wr_flow(struct scsi_target *starget, int wrflow)
|
|||
|
||||
#ifdef AHD_DEBUG
|
||||
if ((ahd_debug & AHD_SHOW_DV) != 0)
|
||||
printf("%s: %s Write Flow Control\n", ahd_name(ahd),
|
||||
printk("%s: %s Write Flow Control\n", ahd_name(ahd),
|
||||
wrflow ? "enabling" : "disabling");
|
||||
#endif
|
||||
|
||||
|
@ -2714,14 +2714,14 @@ static void ahd_linux_set_rti(struct scsi_target *starget, int rti)
|
|||
if ((ahd->features & AHD_RTI) == 0) {
|
||||
#ifdef AHD_DEBUG
|
||||
if ((ahd_debug & AHD_SHOW_DV) != 0)
|
||||
printf("%s: RTI not available\n", ahd_name(ahd));
|
||||
printk("%s: RTI not available\n", ahd_name(ahd));
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef AHD_DEBUG
|
||||
if ((ahd_debug & AHD_SHOW_DV) != 0)
|
||||
printf("%s: %s RTI\n", ahd_name(ahd),
|
||||
printk("%s: %s RTI\n", ahd_name(ahd),
|
||||
rti ? "enabling" : "disabling");
|
||||
#endif
|
||||
|
||||
|
@ -2757,7 +2757,7 @@ static void ahd_linux_set_pcomp_en(struct scsi_target *starget, int pcomp)
|
|||
|
||||
#ifdef AHD_DEBUG
|
||||
if ((ahd_debug & AHD_SHOW_DV) != 0)
|
||||
printf("%s: %s Precompensation\n", ahd_name(ahd),
|
||||
printk("%s: %s Precompensation\n", ahd_name(ahd),
|
||||
pcomp ? "Enable" : "Disable");
|
||||
#endif
|
||||
|
||||
|
|
|
@ -363,13 +363,6 @@ struct ahd_platform_data {
|
|||
resource_size_t mem_busaddr; /* Mem Base Addr */
|
||||
};
|
||||
|
||||
/************************** OS Utility Wrappers *******************************/
|
||||
#define printf printk
|
||||
#define M_NOWAIT GFP_ATOMIC
|
||||
#define M_WAITOK 0
|
||||
#define malloc(size, type, flags) kmalloc(size, flags)
|
||||
#define free(ptr, type) kfree(ptr)
|
||||
|
||||
void ahd_delay(long);
|
||||
|
||||
/***************************** Low Level I/O **********************************/
|
||||
|
|
|
@ -178,7 +178,7 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
ahd_get_pci_bus(pci),
|
||||
ahd_get_pci_slot(pci),
|
||||
ahd_get_pci_function(pci));
|
||||
name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT);
|
||||
name = kmalloc(strlen(buf) + 1, GFP_ATOMIC);
|
||||
if (name == NULL)
|
||||
return (-ENOMEM);
|
||||
strcpy(name, buf);
|
||||
|
@ -333,7 +333,7 @@ ahd_pci_map_registers(struct ahd_softc *ahd)
|
|||
|
||||
if (ahd_pci_test_register_access(ahd) != 0) {
|
||||
|
||||
printf("aic79xx: PCI Device %d:%d:%d "
|
||||
printk("aic79xx: PCI Device %d:%d:%d "
|
||||
"failed memory mapped test. Using PIO.\n",
|
||||
ahd_get_pci_bus(ahd->dev_softc),
|
||||
ahd_get_pci_slot(ahd->dev_softc),
|
||||
|
@ -346,7 +346,7 @@ ahd_pci_map_registers(struct ahd_softc *ahd)
|
|||
} else
|
||||
command |= PCIM_CMD_MEMEN;
|
||||
} else if (bootverbose) {
|
||||
printf("aic79xx: PCI%d:%d:%d MEM region 0x%llx "
|
||||
printk("aic79xx: PCI%d:%d:%d MEM region 0x%llx "
|
||||
"unavailable. Cannot memory map device.\n",
|
||||
ahd_get_pci_bus(ahd->dev_softc),
|
||||
ahd_get_pci_slot(ahd->dev_softc),
|
||||
|
@ -365,7 +365,7 @@ ahd_pci_map_registers(struct ahd_softc *ahd)
|
|||
ahd->bshs[1].ioport = (u_long)base2;
|
||||
command |= PCIM_CMD_PORTEN;
|
||||
} else {
|
||||
printf("aic79xx: PCI%d:%d:%d IO regions 0x%llx and "
|
||||
printk("aic79xx: PCI%d:%d:%d IO regions 0x%llx and "
|
||||
"0x%llx unavailable. Cannot map device.\n",
|
||||
ahd_get_pci_bus(ahd->dev_softc),
|
||||
ahd_get_pci_slot(ahd->dev_softc),
|
||||
|
|
|
@ -338,7 +338,7 @@ ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
|
|||
*/
|
||||
if ((ahd->flags & (AHD_39BIT_ADDRESSING|AHD_64BIT_ADDRESSING)) != 0) {
|
||||
if (bootverbose)
|
||||
printf("%s: Enabling 39Bit Addressing\n",
|
||||
printk("%s: Enabling 39Bit Addressing\n",
|
||||
ahd_name(ahd));
|
||||
devconfig = ahd_pci_read_config(ahd->dev_softc,
|
||||
DEVCONFIG, /*bytes*/4);
|
||||
|
@ -528,7 +528,7 @@ ahd_check_extport(struct ahd_softc *ahd)
|
|||
* Fetch VPD for this function and parse it.
|
||||
*/
|
||||
if (bootverbose)
|
||||
printf("%s: Reading VPD from SEEPROM...",
|
||||
printk("%s: Reading VPD from SEEPROM...",
|
||||
ahd_name(ahd));
|
||||
|
||||
/* Address is always in units of 16bit words */
|
||||
|
@ -541,12 +541,12 @@ ahd_check_extport(struct ahd_softc *ahd)
|
|||
if (error == 0)
|
||||
error = ahd_parse_vpddata(ahd, &vpd);
|
||||
if (bootverbose)
|
||||
printf("%s: VPD parsing %s\n",
|
||||
printk("%s: VPD parsing %s\n",
|
||||
ahd_name(ahd),
|
||||
error == 0 ? "successful" : "failed");
|
||||
|
||||
if (bootverbose)
|
||||
printf("%s: Reading SEEPROM...", ahd_name(ahd));
|
||||
printk("%s: Reading SEEPROM...", ahd_name(ahd));
|
||||
|
||||
/* Address is always in units of 16bit words */
|
||||
start_addr = (sizeof(*sc) / 2) * (ahd->channel - 'A');
|
||||
|
@ -556,16 +556,16 @@ ahd_check_extport(struct ahd_softc *ahd)
|
|||
/*bytestream*/FALSE);
|
||||
|
||||
if (error != 0) {
|
||||
printf("Unable to read SEEPROM\n");
|
||||
printk("Unable to read SEEPROM\n");
|
||||
have_seeprom = 0;
|
||||
} else {
|
||||
have_seeprom = ahd_verify_cksum(sc);
|
||||
|
||||
if (bootverbose) {
|
||||
if (have_seeprom == 0)
|
||||
printf ("checksum error\n");
|
||||
printk ("checksum error\n");
|
||||
else
|
||||
printf ("done.\n");
|
||||
printk ("done.\n");
|
||||
}
|
||||
}
|
||||
ahd_release_seeprom(ahd);
|
||||
|
@ -615,21 +615,21 @@ ahd_check_extport(struct ahd_softc *ahd)
|
|||
uint16_t *sc_data;
|
||||
int i;
|
||||
|
||||
printf("%s: Seeprom Contents:", ahd_name(ahd));
|
||||
printk("%s: Seeprom Contents:", ahd_name(ahd));
|
||||
sc_data = (uint16_t *)sc;
|
||||
for (i = 0; i < (sizeof(*sc)); i += 2)
|
||||
printf("\n\t0x%.4x", sc_data[i]);
|
||||
printf("\n");
|
||||
printk("\n\t0x%.4x", sc_data[i]);
|
||||
printk("\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
if (!have_seeprom) {
|
||||
if (bootverbose)
|
||||
printf("%s: No SEEPROM available.\n", ahd_name(ahd));
|
||||
printk("%s: No SEEPROM available.\n", ahd_name(ahd));
|
||||
ahd->flags |= AHD_USEDEFAULTS;
|
||||
error = ahd_default_config(ahd);
|
||||
adapter_control = CFAUTOTERM|CFSEAUTOTERM;
|
||||
free(ahd->seep_config, M_DEVBUF);
|
||||
kfree(ahd->seep_config);
|
||||
ahd->seep_config = NULL;
|
||||
} else {
|
||||
error = ahd_parse_cfgdata(ahd, sc);
|
||||
|
@ -656,7 +656,7 @@ ahd_configure_termination(struct ahd_softc *ahd, u_int adapter_control)
|
|||
if ((ahd->flags & AHD_STPWLEVEL_A) != 0)
|
||||
devconfig |= STPWLEVEL;
|
||||
if (bootverbose)
|
||||
printf("%s: STPWLEVEL is %s\n",
|
||||
printk("%s: STPWLEVEL is %s\n",
|
||||
ahd_name(ahd), (devconfig & STPWLEVEL) ? "on" : "off");
|
||||
ahd_pci_write_config(ahd->dev_softc, DEVCONFIG, devconfig, /*bytes*/4);
|
||||
|
||||
|
@ -671,7 +671,7 @@ ahd_configure_termination(struct ahd_softc *ahd, u_int adapter_control)
|
|||
error = ahd_read_flexport(ahd, FLXADDR_TERMCTL, &termctl);
|
||||
if ((adapter_control & CFAUTOTERM) == 0) {
|
||||
if (bootverbose)
|
||||
printf("%s: Manual Primary Termination\n",
|
||||
printk("%s: Manual Primary Termination\n",
|
||||
ahd_name(ahd));
|
||||
termctl &= ~(FLX_TERMCTL_ENPRILOW|FLX_TERMCTL_ENPRIHIGH);
|
||||
if ((adapter_control & CFSTERM) != 0)
|
||||
|
@ -679,14 +679,14 @@ ahd_configure_termination(struct ahd_softc *ahd, u_int adapter_control)
|
|||
if ((adapter_control & CFWSTERM) != 0)
|
||||
termctl |= FLX_TERMCTL_ENPRIHIGH;
|
||||
} else if (error != 0) {
|
||||
printf("%s: Primary Auto-Term Sensing failed! "
|
||||
printk("%s: Primary Auto-Term Sensing failed! "
|
||||
"Using Defaults.\n", ahd_name(ahd));
|
||||
termctl = FLX_TERMCTL_ENPRILOW|FLX_TERMCTL_ENPRIHIGH;
|
||||
}
|
||||
|
||||
if ((adapter_control & CFSEAUTOTERM) == 0) {
|
||||
if (bootverbose)
|
||||
printf("%s: Manual Secondary Termination\n",
|
||||
printk("%s: Manual Secondary Termination\n",
|
||||
ahd_name(ahd));
|
||||
termctl &= ~(FLX_TERMCTL_ENSECLOW|FLX_TERMCTL_ENSECHIGH);
|
||||
if ((adapter_control & CFSELOWTERM) != 0)
|
||||
|
@ -694,7 +694,7 @@ ahd_configure_termination(struct ahd_softc *ahd, u_int adapter_control)
|
|||
if ((adapter_control & CFSEHIGHTERM) != 0)
|
||||
termctl |= FLX_TERMCTL_ENSECHIGH;
|
||||
} else if (error != 0) {
|
||||
printf("%s: Secondary Auto-Term Sensing failed! "
|
||||
printk("%s: Secondary Auto-Term Sensing failed! "
|
||||
"Using Defaults.\n", ahd_name(ahd));
|
||||
termctl |= FLX_TERMCTL_ENSECLOW|FLX_TERMCTL_ENSECHIGH;
|
||||
}
|
||||
|
@ -714,22 +714,22 @@ ahd_configure_termination(struct ahd_softc *ahd, u_int adapter_control)
|
|||
|
||||
error = ahd_write_flexport(ahd, FLXADDR_TERMCTL, termctl);
|
||||
if (error != 0) {
|
||||
printf("%s: Unable to set termination settings!\n",
|
||||
printk("%s: Unable to set termination settings!\n",
|
||||
ahd_name(ahd));
|
||||
} else if (bootverbose) {
|
||||
printf("%s: Primary High byte termination %sabled\n",
|
||||
printk("%s: Primary High byte termination %sabled\n",
|
||||
ahd_name(ahd),
|
||||
(termctl & FLX_TERMCTL_ENPRIHIGH) ? "En" : "Dis");
|
||||
|
||||
printf("%s: Primary Low byte termination %sabled\n",
|
||||
printk("%s: Primary Low byte termination %sabled\n",
|
||||
ahd_name(ahd),
|
||||
(termctl & FLX_TERMCTL_ENPRILOW) ? "En" : "Dis");
|
||||
|
||||
printf("%s: Secondary High byte termination %sabled\n",
|
||||
printk("%s: Secondary High byte termination %sabled\n",
|
||||
ahd_name(ahd),
|
||||
(termctl & FLX_TERMCTL_ENSECHIGH) ? "En" : "Dis");
|
||||
|
||||
printf("%s: Secondary Low byte termination %sabled\n",
|
||||
printk("%s: Secondary Low byte termination %sabled\n",
|
||||
ahd_name(ahd),
|
||||
(termctl & FLX_TERMCTL_ENSECLOW) ? "En" : "Dis");
|
||||
}
|
||||
|
@ -805,7 +805,7 @@ ahd_pci_intr(struct ahd_softc *ahd)
|
|||
if ((intstat & PCIINT) == 0)
|
||||
return;
|
||||
|
||||
printf("%s: PCI error Interrupt\n", ahd_name(ahd));
|
||||
printk("%s: PCI error Interrupt\n", ahd_name(ahd));
|
||||
saved_modes = ahd_save_modes(ahd);
|
||||
ahd_dump_card_state(ahd);
|
||||
ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
|
||||
|
@ -832,7 +832,7 @@ ahd_pci_intr(struct ahd_softc *ahd)
|
|||
s = pci_status_strings[bit];
|
||||
if (i == 7/*TARG*/ && bit == 3)
|
||||
s = "%s: Signaled Target Abort\n";
|
||||
printf(s, ahd_name(ahd), pci_status_source[i]);
|
||||
printk(s, ahd_name(ahd), pci_status_source[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -862,7 +862,7 @@ ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat)
|
|||
*/
|
||||
pcix_status = ahd_pci_read_config(ahd->dev_softc, PCIXR_STATUS,
|
||||
/*bytes*/2);
|
||||
printf("%s: PCI Split Interrupt - PCI-X status = 0x%x\n",
|
||||
printk("%s: PCI Split Interrupt - PCI-X status = 0x%x\n",
|
||||
ahd_name(ahd), pcix_status);
|
||||
saved_modes = ahd_save_modes(ahd);
|
||||
for (i = 0; i < 4; i++) {
|
||||
|
@ -891,7 +891,7 @@ ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat)
|
|||
static const char *s;
|
||||
|
||||
s = split_status_strings[bit];
|
||||
printf(s, ahd_name(ahd),
|
||||
printk(s, ahd_name(ahd),
|
||||
split_status_source[i]);
|
||||
}
|
||||
|
||||
|
@ -902,7 +902,7 @@ ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat)
|
|||
static const char *s;
|
||||
|
||||
s = split_status_strings[bit];
|
||||
printf(s, ahd_name(ahd), "SG");
|
||||
printk(s, ahd_name(ahd), "SG");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -950,7 +950,7 @@ ahd_aic790X_setup(struct ahd_softc *ahd)
|
|||
pci = ahd->dev_softc;
|
||||
rev = ahd_pci_read_config(pci, PCIR_REVID, /*bytes*/1);
|
||||
if (rev < ID_AIC7902_PCI_REV_A4) {
|
||||
printf("%s: Unable to attach to unsupported chip revision %d\n",
|
||||
printk("%s: Unable to attach to unsupported chip revision %d\n",
|
||||
ahd_name(ahd), rev);
|
||||
ahd_pci_write_config(pci, PCIR_COMMAND, 0, /*bytes*/2);
|
||||
return (ENXIO);
|
||||
|
|
|
@ -272,33 +272,32 @@ ahd_proc_write_seeprom(struct ahd_softc *ahd, char *buffer, int length)
|
|||
saved_modes = ahd_save_modes(ahd);
|
||||
ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
|
||||
if (length != sizeof(struct seeprom_config)) {
|
||||
printf("ahd_proc_write_seeprom: incorrect buffer size\n");
|
||||
printk("ahd_proc_write_seeprom: incorrect buffer size\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
have_seeprom = ahd_verify_cksum((struct seeprom_config*)buffer);
|
||||
if (have_seeprom == 0) {
|
||||
printf("ahd_proc_write_seeprom: cksum verification failed\n");
|
||||
printk("ahd_proc_write_seeprom: cksum verification failed\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
have_seeprom = ahd_acquire_seeprom(ahd);
|
||||
if (!have_seeprom) {
|
||||
printf("ahd_proc_write_seeprom: No Serial EEPROM\n");
|
||||
printk("ahd_proc_write_seeprom: No Serial EEPROM\n");
|
||||
goto done;
|
||||
} else {
|
||||
u_int start_addr;
|
||||
|
||||
if (ahd->seep_config == NULL) {
|
||||
ahd->seep_config = malloc(sizeof(*ahd->seep_config),
|
||||
M_DEVBUF, M_NOWAIT);
|
||||
ahd->seep_config = kmalloc(sizeof(*ahd->seep_config), GFP_ATOMIC);
|
||||
if (ahd->seep_config == NULL) {
|
||||
printf("aic79xx: Unable to allocate serial "
|
||||
printk("aic79xx: Unable to allocate serial "
|
||||
"eeprom buffer. Write failing\n");
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
printf("aic79xx: Writing Serial EEPROM\n");
|
||||
printk("aic79xx: Writing Serial EEPROM\n");
|
||||
start_addr = 32 * (ahd->channel - 'A');
|
||||
ahd_write_seeprom(ahd, (u_int16_t *)buffer, start_addr,
|
||||
sizeof(struct seeprom_config)/2);
|
||||
|
|
|
@ -207,14 +207,14 @@ ahc_read_seeprom(struct seeprom_descriptor *sd, uint16_t *buf,
|
|||
reset_seeprom(sd);
|
||||
}
|
||||
#ifdef AHC_DUMP_EEPROM
|
||||
printf("\nSerial EEPROM:\n\t");
|
||||
printk("\nSerial EEPROM:\n\t");
|
||||
for (k = 0; k < count; k = k + 1) {
|
||||
if (((k % 8) == 0) && (k != 0)) {
|
||||
printf ("\n\t");
|
||||
printk(KERN_CONT "\n\t");
|
||||
}
|
||||
printf (" 0x%x", buf[k]);
|
||||
printk(KERN_CONT " 0x%x", buf[k]);
|
||||
}
|
||||
printf ("\n");
|
||||
printk(KERN_CONT "\n");
|
||||
#endif
|
||||
return (1);
|
||||
}
|
||||
|
@ -240,7 +240,7 @@ ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf,
|
|||
ewen = &seeprom_long_ewen;
|
||||
ewds = &seeprom_long_ewds;
|
||||
} else {
|
||||
printf("ahc_write_seeprom: unsupported seeprom type %d\n",
|
||||
printk("ahc_write_seeprom: unsupported seeprom type %d\n",
|
||||
sd->sd_chip);
|
||||
return (0);
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -653,7 +653,7 @@ ahc_linux_slave_alloc(struct scsi_device *sdev)
|
|||
struct ahc_linux_device *dev;
|
||||
|
||||
if (bootverbose)
|
||||
printf("%s: Slave Alloc %d\n", ahc_name(ahc), sdev->id);
|
||||
printk("%s: Slave Alloc %d\n", ahc_name(ahc), sdev->id);
|
||||
|
||||
dev = scsi_transport_device_data(sdev);
|
||||
memset(dev, 0, sizeof(*dev));
|
||||
|
@ -755,7 +755,7 @@ ahc_linux_abort(struct scsi_cmnd *cmd)
|
|||
|
||||
error = ahc_linux_queue_recovery_cmd(cmd, SCB_ABORT);
|
||||
if (error != 0)
|
||||
printf("aic7xxx_abort returns 0x%x\n", error);
|
||||
printk("aic7xxx_abort returns 0x%x\n", error);
|
||||
return (error);
|
||||
}
|
||||
|
||||
|
@ -769,7 +769,7 @@ ahc_linux_dev_reset(struct scsi_cmnd *cmd)
|
|||
|
||||
error = ahc_linux_queue_recovery_cmd(cmd, SCB_DEVICE_RESET);
|
||||
if (error != 0)
|
||||
printf("aic7xxx_dev_reset returns 0x%x\n", error);
|
||||
printk("aic7xxx_dev_reset returns 0x%x\n", error);
|
||||
return (error);
|
||||
}
|
||||
|
||||
|
@ -791,7 +791,7 @@ ahc_linux_bus_reset(struct scsi_cmnd *cmd)
|
|||
ahc_unlock(ahc, &flags);
|
||||
|
||||
if (bootverbose)
|
||||
printf("%s: SCSI bus reset delivered. "
|
||||
printk("%s: SCSI bus reset delivered. "
|
||||
"%d SCBs aborted.\n", ahc_name(ahc), found);
|
||||
|
||||
return SUCCESS;
|
||||
|
@ -840,7 +840,7 @@ ahc_dma_tag_create(struct ahc_softc *ahc, bus_dma_tag_t parent,
|
|||
{
|
||||
bus_dma_tag_t dmat;
|
||||
|
||||
dmat = malloc(sizeof(*dmat), M_DEVBUF, M_NOWAIT);
|
||||
dmat = kmalloc(sizeof(*dmat), GFP_ATOMIC);
|
||||
if (dmat == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
|
@ -861,7 +861,7 @@ ahc_dma_tag_create(struct ahc_softc *ahc, bus_dma_tag_t parent,
|
|||
void
|
||||
ahc_dma_tag_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat)
|
||||
{
|
||||
free(dmat, M_DEVBUF);
|
||||
kfree(dmat);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -918,7 +918,7 @@ ahc_linux_setup_tag_info_global(char *p)
|
|||
int tags, i, j;
|
||||
|
||||
tags = simple_strtoul(p + 1, NULL, 0) & 0xff;
|
||||
printf("Setting Global Tags= %d\n", tags);
|
||||
printk("Setting Global Tags= %d\n", tags);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(aic7xxx_tag_info); i++) {
|
||||
for (j = 0; j < AHC_NUM_TARGETS; j++) {
|
||||
|
@ -936,7 +936,7 @@ ahc_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
|
|||
&& (targ < AHC_NUM_TARGETS)) {
|
||||
aic7xxx_tag_info[instance].tag_commands[targ] = value & 0xff;
|
||||
if (bootverbose)
|
||||
printf("tag_info[%d:%d] = %d\n", instance, targ, value);
|
||||
printk("tag_info[%d:%d] = %d\n", instance, targ, value);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -977,7 +977,7 @@ ahc_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth,
|
|||
if (targ == -1)
|
||||
targ = 0;
|
||||
} else {
|
||||
printf("Malformed Option %s\n",
|
||||
printk("Malformed Option %s\n",
|
||||
opt_name);
|
||||
done = TRUE;
|
||||
}
|
||||
|
@ -1120,7 +1120,7 @@ ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *templa
|
|||
ahc_set_unit(ahc, ahc_linux_unit++);
|
||||
ahc_unlock(ahc, &s);
|
||||
sprintf(buf, "scsi%d", host->host_no);
|
||||
new_name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT);
|
||||
new_name = kmalloc(strlen(buf) + 1, GFP_ATOMIC);
|
||||
if (new_name != NULL) {
|
||||
strcpy(new_name, buf);
|
||||
ahc_set_name(ahc, new_name);
|
||||
|
@ -1220,7 +1220,7 @@ ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
|
|||
{
|
||||
|
||||
ahc->platform_data =
|
||||
malloc(sizeof(struct ahc_platform_data), M_DEVBUF, M_NOWAIT);
|
||||
kmalloc(sizeof(struct ahc_platform_data), GFP_ATOMIC);
|
||||
if (ahc->platform_data == NULL)
|
||||
return (ENOMEM);
|
||||
memset(ahc->platform_data, 0, sizeof(struct ahc_platform_data));
|
||||
|
@ -1264,7 +1264,7 @@ ahc_platform_free(struct ahc_softc *ahc)
|
|||
if (ahc->platform_data->host)
|
||||
scsi_host_put(ahc->platform_data->host);
|
||||
|
||||
free(ahc->platform_data, M_DEVBUF);
|
||||
kfree(ahc->platform_data);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1378,7 +1378,7 @@ ahc_linux_user_tagdepth(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
|
|||
if (ahc->unit >= ARRAY_SIZE(aic7xxx_tag_info)) {
|
||||
if (warned_user == 0) {
|
||||
|
||||
printf(KERN_WARNING
|
||||
printk(KERN_WARNING
|
||||
"aic7xxx: WARNING: Insufficient tag_info instances\n"
|
||||
"aic7xxx: for installed controllers. Using defaults\n"
|
||||
"aic7xxx: Please update the aic7xxx_tag_info array in\n"
|
||||
|
@ -1421,7 +1421,7 @@ ahc_linux_device_queue_depth(struct scsi_device *sdev)
|
|||
ahc_send_async(ahc, devinfo.channel, devinfo.target,
|
||||
devinfo.lun, AC_TRANSFER_NEG);
|
||||
ahc_print_devinfo(ahc, &devinfo);
|
||||
printf("Tagged Queuing enabled. Depth %d\n", tags);
|
||||
printk("Tagged Queuing enabled. Depth %d\n", tags);
|
||||
} else {
|
||||
ahc_platform_set_tags(ahc, sdev, &devinfo, AHC_QUEUE_NONE);
|
||||
ahc_send_async(ahc, devinfo.channel, devinfo.target,
|
||||
|
@ -1735,7 +1735,7 @@ ahc_done(struct ahc_softc *ahc, struct scb *scb)
|
|||
* not have been dispatched to the controller, so
|
||||
* only check the SCB_ACTIVE flag for tagged transactions.
|
||||
*/
|
||||
printf("SCB %d done'd twice\n", scb->hscb->tag);
|
||||
printk("SCB %d done'd twice\n", scb->hscb->tag);
|
||||
ahc_dump_card_state(ahc);
|
||||
panic("Stopping for safety");
|
||||
}
|
||||
|
@ -1765,7 +1765,7 @@ ahc_done(struct ahc_softc *ahc, struct scb *scb)
|
|||
#ifdef AHC_DEBUG
|
||||
if ((ahc_debug & AHC_SHOW_MISC) != 0) {
|
||||
ahc_print_path(ahc, scb);
|
||||
printf("Set CAM_UNCOR_PARITY\n");
|
||||
printk("Set CAM_UNCOR_PARITY\n");
|
||||
}
|
||||
#endif
|
||||
ahc_set_transaction_status(scb, CAM_UNCOR_PARITY);
|
||||
|
@ -1783,12 +1783,12 @@ ahc_done(struct ahc_softc *ahc, struct scb *scb)
|
|||
u_int i;
|
||||
|
||||
ahc_print_path(ahc, scb);
|
||||
printf("CDB:");
|
||||
printk("CDB:");
|
||||
for (i = 0; i < scb->io_ctx->cmd_len; i++)
|
||||
printf(" 0x%x", scb->io_ctx->cmnd[i]);
|
||||
printf("\n");
|
||||
printk(" 0x%x", scb->io_ctx->cmnd[i]);
|
||||
printk("\n");
|
||||
ahc_print_path(ahc, scb);
|
||||
printf("Saw underflow (%ld of %ld bytes). "
|
||||
printk("Saw underflow (%ld of %ld bytes). "
|
||||
"Treated as error\n",
|
||||
ahc_get_residual(scb),
|
||||
ahc_get_transfer_length(scb));
|
||||
|
@ -1821,7 +1821,7 @@ ahc_done(struct ahc_softc *ahc, struct scb *scb)
|
|||
dev->commands_since_idle_or_otag = 0;
|
||||
|
||||
if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
|
||||
printf("Recovery SCB completes\n");
|
||||
printk("Recovery SCB completes\n");
|
||||
if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
|
||||
|| ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
|
||||
ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
|
||||
|
@ -1886,14 +1886,14 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
|
|||
if (ahc_debug & AHC_SHOW_SENSE) {
|
||||
int i;
|
||||
|
||||
printf("Copied %d bytes of sense data:",
|
||||
printk("Copied %d bytes of sense data:",
|
||||
sense_size);
|
||||
for (i = 0; i < sense_size; i++) {
|
||||
if ((i & 0xF) == 0)
|
||||
printf("\n");
|
||||
printf("0x%x ", cmd->sense_buffer[i]);
|
||||
printk("\n");
|
||||
printk("0x%x ", cmd->sense_buffer[i]);
|
||||
}
|
||||
printf("\n");
|
||||
printk("\n");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -1918,7 +1918,7 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
|
|||
dev->openings = 0;
|
||||
/*
|
||||
ahc_print_path(ahc, scb);
|
||||
printf("Dropping tag count to %d\n", dev->active);
|
||||
printk("Dropping tag count to %d\n", dev->active);
|
||||
*/
|
||||
if (dev->active == dev->tags_on_last_queuefull) {
|
||||
|
||||
|
@ -1935,7 +1935,7 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
|
|||
== AHC_LOCK_TAGS_COUNT) {
|
||||
dev->maxtags = dev->active;
|
||||
ahc_print_path(ahc, scb);
|
||||
printf("Locking max tag count at %d\n",
|
||||
printk("Locking max tag count at %d\n",
|
||||
dev->active);
|
||||
}
|
||||
} else {
|
||||
|
@ -2100,10 +2100,10 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
|
|||
scmd_printk(KERN_INFO, cmd, "Attempting to queue a%s message\n",
|
||||
flag == SCB_ABORT ? "n ABORT" : " TARGET RESET");
|
||||
|
||||
printf("CDB:");
|
||||
printk("CDB:");
|
||||
for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
|
||||
printf(" 0x%x", cmd->cmnd[cdb_byte]);
|
||||
printf("\n");
|
||||
printk(" 0x%x", cmd->cmnd[cdb_byte]);
|
||||
printk("\n");
|
||||
|
||||
ahc_lock(ahc, &flags);
|
||||
|
||||
|
@ -2121,7 +2121,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
|
|||
* No target device for this command exists,
|
||||
* so we must not still own the command.
|
||||
*/
|
||||
printf("%s:%d:%d:%d: Is not an active device\n",
|
||||
printk("%s:%d:%d:%d: Is not an active device\n",
|
||||
ahc_name(ahc), cmd->device->channel, cmd->device->id,
|
||||
cmd->device->lun);
|
||||
retval = SUCCESS;
|
||||
|
@ -2133,7 +2133,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
|
|||
cmd->device->channel + 'A',
|
||||
cmd->device->lun,
|
||||
CAM_REQ_ABORTED, SEARCH_COMPLETE) != 0) {
|
||||
printf("%s:%d:%d:%d: Command found on untagged queue\n",
|
||||
printk("%s:%d:%d:%d: Command found on untagged queue\n",
|
||||
ahc_name(ahc), cmd->device->channel, cmd->device->id,
|
||||
cmd->device->lun);
|
||||
retval = SUCCESS;
|
||||
|
@ -2187,7 +2187,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
|
|||
goto no_cmd;
|
||||
}
|
||||
|
||||
printf("%s: At time of recovery, card was %spaused\n",
|
||||
printk("%s: At time of recovery, card was %spaused\n",
|
||||
ahc_name(ahc), was_paused ? "" : "not ");
|
||||
ahc_dump_card_state(ahc);
|
||||
|
||||
|
@ -2199,7 +2199,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
|
|||
pending_scb->hscb->tag,
|
||||
ROLE_INITIATOR, CAM_REQ_ABORTED,
|
||||
SEARCH_COMPLETE) > 0) {
|
||||
printf("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
|
||||
printk("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
|
||||
ahc_name(ahc), cmd->device->channel,
|
||||
cmd->device->id, cmd->device->lun);
|
||||
retval = SUCCESS;
|
||||
|
@ -2313,7 +2313,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
|
|||
ahc_qinfifo_requeue_tail(ahc, pending_scb);
|
||||
ahc_outb(ahc, SCBPTR, saved_scbptr);
|
||||
ahc_print_path(ahc, pending_scb);
|
||||
printf("Device is disconnected, re-queuing SCB\n");
|
||||
printk("Device is disconnected, re-queuing SCB\n");
|
||||
wait = TRUE;
|
||||
} else {
|
||||
scmd_printk(KERN_INFO, cmd, "Unable to deliver message\n");
|
||||
|
@ -2338,16 +2338,16 @@ done:
|
|||
ahc->platform_data->eh_done = &done;
|
||||
ahc_unlock(ahc, &flags);
|
||||
|
||||
printf("Recovery code sleeping\n");
|
||||
printk("Recovery code sleeping\n");
|
||||
if (!wait_for_completion_timeout(&done, 5 * HZ)) {
|
||||
ahc_lock(ahc, &flags);
|
||||
ahc->platform_data->eh_done = NULL;
|
||||
ahc_unlock(ahc, &flags);
|
||||
|
||||
printf("Timer Expired\n");
|
||||
printk("Timer Expired\n");
|
||||
retval = FAILED;
|
||||
}
|
||||
printf("Recovery code awake\n");
|
||||
printk("Recovery code awake\n");
|
||||
} else
|
||||
ahc_unlock(ahc, &flags);
|
||||
return (retval);
|
||||
|
|
|
@ -368,13 +368,6 @@ struct ahc_platform_data {
|
|||
resource_size_t mem_busaddr; /* Mem Base Addr */
|
||||
};
|
||||
|
||||
/************************** OS Utility Wrappers *******************************/
|
||||
#define printf printk
|
||||
#define M_NOWAIT GFP_ATOMIC
|
||||
#define M_WAITOK 0
|
||||
#define malloc(size, type, flags) kmalloc(size, flags)
|
||||
#define free(ptr, type) kfree(ptr)
|
||||
|
||||
void ahc_delay(long);
|
||||
|
||||
|
||||
|
|
|
@ -225,7 +225,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
ahc_get_pci_bus(pci),
|
||||
ahc_get_pci_slot(pci),
|
||||
ahc_get_pci_function(pci));
|
||||
name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT);
|
||||
name = kmalloc(strlen(buf) + 1, GFP_ATOMIC);
|
||||
if (name == NULL)
|
||||
return (-ENOMEM);
|
||||
strcpy(name, buf);
|
||||
|
@ -412,7 +412,7 @@ ahc_pci_map_registers(struct ahc_softc *ahc)
|
|||
*/
|
||||
if (ahc_pci_test_register_access(ahc) != 0) {
|
||||
|
||||
printf("aic7xxx: PCI Device %d:%d:%d "
|
||||
printk("aic7xxx: PCI Device %d:%d:%d "
|
||||
"failed memory mapped test. Using PIO.\n",
|
||||
ahc_get_pci_bus(ahc->dev_softc),
|
||||
ahc_get_pci_slot(ahc->dev_softc),
|
||||
|
@ -425,7 +425,7 @@ ahc_pci_map_registers(struct ahc_softc *ahc)
|
|||
} else
|
||||
command |= PCIM_CMD_MEMEN;
|
||||
} else {
|
||||
printf("aic7xxx: PCI%d:%d:%d MEM region 0x%llx "
|
||||
printk("aic7xxx: PCI%d:%d:%d MEM region 0x%llx "
|
||||
"unavailable. Cannot memory map device.\n",
|
||||
ahc_get_pci_bus(ahc->dev_softc),
|
||||
ahc_get_pci_slot(ahc->dev_softc),
|
||||
|
@ -444,7 +444,7 @@ ahc_pci_map_registers(struct ahc_softc *ahc)
|
|||
ahc->bsh.ioport = (u_long)base;
|
||||
command |= PCIM_CMD_PORTEN;
|
||||
} else {
|
||||
printf("aic7xxx: PCI%d:%d:%d IO region 0x%llx[0..255] "
|
||||
printk("aic7xxx: PCI%d:%d:%d IO region 0x%llx[0..255] "
|
||||
"unavailable. Cannot map device.\n",
|
||||
ahc_get_pci_bus(ahc->dev_softc),
|
||||
ahc_get_pci_slot(ahc->dev_softc),
|
||||
|
|
|
@ -752,7 +752,7 @@ ahc_pci_config(struct ahc_softc *ahc, const struct ahc_pci_identity *entry)
|
|||
if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
|
||||
|
||||
if (bootverbose)
|
||||
printf("%s: Enabling 39Bit Addressing\n",
|
||||
printk("%s: Enabling 39Bit Addressing\n",
|
||||
ahc_name(ahc));
|
||||
devconfig |= DACEN;
|
||||
}
|
||||
|
@ -896,7 +896,7 @@ ahc_pci_config(struct ahc_softc *ahc, const struct ahc_pci_identity *entry)
|
|||
/* See if someone else set us up already */
|
||||
if ((ahc->flags & AHC_NO_BIOS_INIT) == 0
|
||||
&& scsiseq != 0) {
|
||||
printf("%s: Using left over BIOS settings\n",
|
||||
printk("%s: Using left over BIOS settings\n",
|
||||
ahc_name(ahc));
|
||||
ahc->flags &= ~AHC_USEDEFAULTS;
|
||||
ahc->flags |= AHC_BIOS_ENABLED;
|
||||
|
@ -1155,7 +1155,7 @@ done:
|
|||
ahc_outb(ahc, CLRINT, CLRPARERR);
|
||||
ahc_outb(ahc, CLRINT, CLRBRKADRINT);
|
||||
if (bootverbose && enable) {
|
||||
printf("%s: External SRAM, %s access%s, %dbytes/SCB\n",
|
||||
printk("%s: External SRAM, %s access%s, %dbytes/SCB\n",
|
||||
ahc_name(ahc), fast ? "fast" : "slow",
|
||||
pcheck ? ", parity checking enabled" : "",
|
||||
large ? 64 : 32);
|
||||
|
@ -1292,7 +1292,7 @@ check_extport(struct ahc_softc *ahc, u_int *sxfrctl1)
|
|||
if (have_seeprom) {
|
||||
|
||||
if (bootverbose)
|
||||
printf("%s: Reading SEEPROM...", ahc_name(ahc));
|
||||
printk("%s: Reading SEEPROM...", ahc_name(ahc));
|
||||
|
||||
for (;;) {
|
||||
u_int start_addr;
|
||||
|
@ -1309,9 +1309,9 @@ check_extport(struct ahc_softc *ahc, u_int *sxfrctl1)
|
|||
if (have_seeprom != 0 || sd.sd_chip == C56_66) {
|
||||
if (bootverbose) {
|
||||
if (have_seeprom == 0)
|
||||
printf ("checksum error\n");
|
||||
printk ("checksum error\n");
|
||||
else
|
||||
printf ("done.\n");
|
||||
printk ("done.\n");
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -1362,9 +1362,9 @@ check_extport(struct ahc_softc *ahc, u_int *sxfrctl1)
|
|||
|
||||
if (!have_seeprom) {
|
||||
if (bootverbose)
|
||||
printf("%s: No SEEPROM available.\n", ahc_name(ahc));
|
||||
printk("%s: No SEEPROM available.\n", ahc_name(ahc));
|
||||
ahc->flags |= AHC_USEDEFAULTS;
|
||||
free(ahc->seep_config, M_DEVBUF);
|
||||
kfree(ahc->seep_config);
|
||||
ahc->seep_config = NULL;
|
||||
sc = NULL;
|
||||
} else {
|
||||
|
@ -1399,7 +1399,7 @@ check_extport(struct ahc_softc *ahc, u_int *sxfrctl1)
|
|||
if ((sc->adapter_control & CFSTERM) != 0)
|
||||
*sxfrctl1 |= STPWEN;
|
||||
if (bootverbose)
|
||||
printf("%s: Low byte termination %sabled\n",
|
||||
printk("%s: Low byte termination %sabled\n",
|
||||
ahc_name(ahc),
|
||||
(*sxfrctl1 & STPWEN) ? "en" : "dis");
|
||||
}
|
||||
|
@ -1569,7 +1569,7 @@ configure_termination(struct ahc_softc *ahc,
|
|||
&eeprom_present);
|
||||
if ((adapter_control & CFSEAUTOTERM) == 0) {
|
||||
if (bootverbose)
|
||||
printf("%s: Manual SE Termination\n",
|
||||
printk("%s: Manual SE Termination\n",
|
||||
ahc_name(ahc));
|
||||
enableSEC_low = (adapter_control & CFSELOWTERM);
|
||||
enableSEC_high =
|
||||
|
@ -1577,7 +1577,7 @@ configure_termination(struct ahc_softc *ahc,
|
|||
}
|
||||
if ((adapter_control & CFAUTOTERM) == 0) {
|
||||
if (bootverbose)
|
||||
printf("%s: Manual LVD Termination\n",
|
||||
printk("%s: Manual LVD Termination\n",
|
||||
ahc_name(ahc));
|
||||
enablePRI_low = (adapter_control & CFSTERM);
|
||||
enablePRI_high = (adapter_control & CFWSTERM);
|
||||
|
@ -1604,19 +1604,19 @@ configure_termination(struct ahc_softc *ahc,
|
|||
|
||||
if (bootverbose
|
||||
&& (ahc->features & AHC_ULTRA2) == 0) {
|
||||
printf("%s: internal 50 cable %s present",
|
||||
printk("%s: internal 50 cable %s present",
|
||||
ahc_name(ahc),
|
||||
internal50_present ? "is":"not");
|
||||
|
||||
if ((ahc->features & AHC_WIDE) != 0)
|
||||
printf(", internal 68 cable %s present",
|
||||
printk(", internal 68 cable %s present",
|
||||
internal68_present ? "is":"not");
|
||||
printf("\n%s: external cable %s present\n",
|
||||
printk("\n%s: external cable %s present\n",
|
||||
ahc_name(ahc),
|
||||
externalcable_present ? "is":"not");
|
||||
}
|
||||
if (bootverbose)
|
||||
printf("%s: BIOS eeprom %s present\n",
|
||||
printk("%s: BIOS eeprom %s present\n",
|
||||
ahc_name(ahc), eeprom_present ? "is" : "not");
|
||||
|
||||
if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0) {
|
||||
|
@ -1642,7 +1642,7 @@ configure_termination(struct ahc_softc *ahc,
|
|||
&& (internal50_present != 0)
|
||||
&& (internal68_present != 0)
|
||||
&& (externalcable_present != 0)) {
|
||||
printf("%s: Illegal cable configuration!!. "
|
||||
printk("%s: Illegal cable configuration!!. "
|
||||
"Only two connectors on the "
|
||||
"adapter may be used at a "
|
||||
"time!\n", ahc_name(ahc));
|
||||
|
@ -1664,10 +1664,10 @@ configure_termination(struct ahc_softc *ahc,
|
|||
brddat |= BRDDAT6;
|
||||
if (bootverbose) {
|
||||
if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0)
|
||||
printf("%s: 68 pin termination "
|
||||
printk("%s: 68 pin termination "
|
||||
"Enabled\n", ahc_name(ahc));
|
||||
else
|
||||
printf("%s: %sHigh byte termination "
|
||||
printk("%s: %sHigh byte termination "
|
||||
"Enabled\n", ahc_name(ahc),
|
||||
enableSEC_high ? "Secondary "
|
||||
: "");
|
||||
|
@ -1683,10 +1683,10 @@ configure_termination(struct ahc_softc *ahc,
|
|||
*sxfrctl1 |= STPWEN;
|
||||
if (bootverbose) {
|
||||
if ((ahc->flags & AHC_INT50_SPEEDFLEX) != 0)
|
||||
printf("%s: 50 pin termination "
|
||||
printk("%s: 50 pin termination "
|
||||
"Enabled\n", ahc_name(ahc));
|
||||
else
|
||||
printf("%s: %sLow byte termination "
|
||||
printk("%s: %sLow byte termination "
|
||||
"Enabled\n", ahc_name(ahc),
|
||||
enableSEC_low ? "Secondary "
|
||||
: "");
|
||||
|
@ -1696,7 +1696,7 @@ configure_termination(struct ahc_softc *ahc,
|
|||
if (enablePRI_low != 0) {
|
||||
*sxfrctl1 |= STPWEN;
|
||||
if (bootverbose)
|
||||
printf("%s: Primary Low Byte termination "
|
||||
printk("%s: Primary Low Byte termination "
|
||||
"Enabled\n", ahc_name(ahc));
|
||||
}
|
||||
|
||||
|
@ -1709,7 +1709,7 @@ configure_termination(struct ahc_softc *ahc,
|
|||
if (enablePRI_high != 0) {
|
||||
brddat |= BRDDAT4;
|
||||
if (bootverbose)
|
||||
printf("%s: Primary High Byte "
|
||||
printk("%s: Primary High Byte "
|
||||
"termination Enabled\n",
|
||||
ahc_name(ahc));
|
||||
}
|
||||
|
@ -1721,7 +1721,7 @@ configure_termination(struct ahc_softc *ahc,
|
|||
*sxfrctl1 |= STPWEN;
|
||||
|
||||
if (bootverbose)
|
||||
printf("%s: %sLow byte termination Enabled\n",
|
||||
printk("%s: %sLow byte termination Enabled\n",
|
||||
ahc_name(ahc),
|
||||
(ahc->features & AHC_ULTRA2) ? "Primary "
|
||||
: "");
|
||||
|
@ -1731,7 +1731,7 @@ configure_termination(struct ahc_softc *ahc,
|
|||
&& (ahc->features & AHC_WIDE) != 0) {
|
||||
brddat |= BRDDAT6;
|
||||
if (bootverbose)
|
||||
printf("%s: %sHigh byte termination Enabled\n",
|
||||
printk("%s: %sHigh byte termination Enabled\n",
|
||||
ahc_name(ahc),
|
||||
(ahc->features & AHC_ULTRA2)
|
||||
? "Secondary " : "");
|
||||
|
@ -1937,29 +1937,29 @@ ahc_pci_intr(struct ahc_softc *ahc)
|
|||
status1 = ahc_pci_read_config(ahc->dev_softc,
|
||||
PCIR_STATUS + 1, /*bytes*/1);
|
||||
|
||||
printf("%s: PCI error Interrupt at seqaddr = 0x%x\n",
|
||||
printk("%s: PCI error Interrupt at seqaddr = 0x%x\n",
|
||||
ahc_name(ahc),
|
||||
ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
|
||||
|
||||
if (status1 & DPE) {
|
||||
ahc->pci_target_perr_count++;
|
||||
printf("%s: Data Parity Error Detected during address "
|
||||
printk("%s: Data Parity Error Detected during address "
|
||||
"or write data phase\n", ahc_name(ahc));
|
||||
}
|
||||
if (status1 & SSE) {
|
||||
printf("%s: Signal System Error Detected\n", ahc_name(ahc));
|
||||
printk("%s: Signal System Error Detected\n", ahc_name(ahc));
|
||||
}
|
||||
if (status1 & RMA) {
|
||||
printf("%s: Received a Master Abort\n", ahc_name(ahc));
|
||||
printk("%s: Received a Master Abort\n", ahc_name(ahc));
|
||||
}
|
||||
if (status1 & RTA) {
|
||||
printf("%s: Received a Target Abort\n", ahc_name(ahc));
|
||||
printk("%s: Received a Target Abort\n", ahc_name(ahc));
|
||||
}
|
||||
if (status1 & STA) {
|
||||
printf("%s: Signaled a Target Abort\n", ahc_name(ahc));
|
||||
printk("%s: Signaled a Target Abort\n", ahc_name(ahc));
|
||||
}
|
||||
if (status1 & DPR) {
|
||||
printf("%s: Data Parity Error has been reported via PERR#\n",
|
||||
printk("%s: Data Parity Error has been reported via PERR#\n",
|
||||
ahc_name(ahc));
|
||||
}
|
||||
|
||||
|
@ -1968,14 +1968,14 @@ ahc_pci_intr(struct ahc_softc *ahc)
|
|||
status1, /*bytes*/1);
|
||||
|
||||
if ((status1 & (DPE|SSE|RMA|RTA|STA|DPR)) == 0) {
|
||||
printf("%s: Latched PCIERR interrupt with "
|
||||
printk("%s: Latched PCIERR interrupt with "
|
||||
"no status bits set\n", ahc_name(ahc));
|
||||
} else {
|
||||
ahc_outb(ahc, CLRINT, CLRPARERR);
|
||||
}
|
||||
|
||||
if (ahc->pci_target_perr_count > AHC_PCI_TARGET_PERR_THRESH) {
|
||||
printf(
|
||||
printk(
|
||||
"%s: WARNING WARNING WARNING WARNING\n"
|
||||
"%s: Too many PCI parity errors observed as a target.\n"
|
||||
"%s: Some device on this bus is generating bad parity.\n"
|
||||
|
@ -2386,7 +2386,7 @@ ahc_aha29160C_setup(struct ahc_softc *ahc)
|
|||
static int
|
||||
ahc_raid_setup(struct ahc_softc *ahc)
|
||||
{
|
||||
printf("RAID functionality unsupported\n");
|
||||
printk("RAID functionality unsupported\n");
|
||||
return (ENXIO);
|
||||
}
|
||||
|
||||
|
@ -2404,7 +2404,7 @@ ahc_aha394XX_setup(struct ahc_softc *ahc)
|
|||
ahc->channel = 'B';
|
||||
break;
|
||||
default:
|
||||
printf("adapter at unexpected slot %d\n"
|
||||
printk("adapter at unexpected slot %d\n"
|
||||
"unable to map to a channel\n",
|
||||
ahc_get_pci_slot(pci));
|
||||
ahc->channel = 'A';
|
||||
|
@ -2429,7 +2429,7 @@ ahc_aha398XX_setup(struct ahc_softc *ahc)
|
|||
ahc->channel = 'C';
|
||||
break;
|
||||
default:
|
||||
printf("adapter at unexpected slot %d\n"
|
||||
printk("adapter at unexpected slot %d\n"
|
||||
"unable to map to a channel\n",
|
||||
ahc_get_pci_slot(pci));
|
||||
ahc->channel = 'A';
|
||||
|
@ -2459,7 +2459,7 @@ ahc_aha494XX_setup(struct ahc_softc *ahc)
|
|||
ahc->channel = 'D';
|
||||
break;
|
||||
default:
|
||||
printf("adapter at unexpected slot %d\n"
|
||||
printk("adapter at unexpected slot %d\n"
|
||||
"unable to map to a channel\n",
|
||||
ahc_get_pci_slot(pci));
|
||||
ahc->channel = 'A';
|
||||
|
|
|
@ -248,13 +248,13 @@ ahc_proc_write_seeprom(struct ahc_softc *ahc, char *buffer, int length)
|
|||
ahc_pause(ahc);
|
||||
|
||||
if (length != sizeof(struct seeprom_config)) {
|
||||
printf("ahc_proc_write_seeprom: incorrect buffer size\n");
|
||||
printk("ahc_proc_write_seeprom: incorrect buffer size\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
have_seeprom = ahc_verify_cksum((struct seeprom_config*)buffer);
|
||||
if (have_seeprom == 0) {
|
||||
printf("ahc_proc_write_seeprom: cksum verification failed\n");
|
||||
printk("ahc_proc_write_seeprom: cksum verification failed\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
|
@ -290,26 +290,25 @@ ahc_proc_write_seeprom(struct ahc_softc *ahc, char *buffer, int length)
|
|||
sd.sd_DI = DI_2840;
|
||||
have_seeprom = TRUE;
|
||||
} else {
|
||||
printf("ahc_proc_write_seeprom: unsupported adapter type\n");
|
||||
printk("ahc_proc_write_seeprom: unsupported adapter type\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (!have_seeprom) {
|
||||
printf("ahc_proc_write_seeprom: No Serial EEPROM\n");
|
||||
printk("ahc_proc_write_seeprom: No Serial EEPROM\n");
|
||||
goto done;
|
||||
} else {
|
||||
u_int start_addr;
|
||||
|
||||
if (ahc->seep_config == NULL) {
|
||||
ahc->seep_config = malloc(sizeof(*ahc->seep_config),
|
||||
M_DEVBUF, M_NOWAIT);
|
||||
ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), GFP_ATOMIC);
|
||||
if (ahc->seep_config == NULL) {
|
||||
printf("aic7xxx: Unable to allocate serial "
|
||||
printk("aic7xxx: Unable to allocate serial "
|
||||
"eeprom buffer. Write failing\n");
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
printf("aic7xxx: Writing Serial EEPROM\n");
|
||||
printk("aic7xxx: Writing Serial EEPROM\n");
|
||||
start_addr = 32 * (ahc->channel - 'A');
|
||||
ahc_write_seeprom(&sd, (u_int16_t *)buffer, start_addr,
|
||||
sizeof(struct seeprom_config)/2);
|
||||
|
|
|
@ -223,7 +223,7 @@ Again:
|
|||
switch (opcode) {
|
||||
case TC_NO_ERROR:
|
||||
ts->resp = SAS_TASK_COMPLETE;
|
||||
ts->stat = SAM_GOOD;
|
||||
ts->stat = SAM_STAT_GOOD;
|
||||
break;
|
||||
case TC_UNDERRUN:
|
||||
ts->resp = SAS_TASK_COMPLETE;
|
||||
|
|
|
@ -43,21 +43,27 @@
|
|||
*******************************************************************************
|
||||
*/
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
struct device_attribute;
|
||||
/*The limit of outstanding scsi command that firmware can handle*/
|
||||
#define ARCMSR_MAX_OUTSTANDING_CMD 256
|
||||
#define ARCMSR_MAX_FREECCB_NUM 320
|
||||
#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2008/11/03"
|
||||
#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2010/02/02"
|
||||
#define ARCMSR_SCSI_INITIATOR_ID 255
|
||||
#define ARCMSR_MAX_XFER_SECTORS 512
|
||||
#define ARCMSR_MAX_XFER_SECTORS_B 4096
|
||||
#define ARCMSR_MAX_XFER_SECTORS_C 304
|
||||
#define ARCMSR_MAX_TARGETID 17
|
||||
#define ARCMSR_MAX_TARGETLUN 8
|
||||
#define ARCMSR_MAX_CMD_PERLUN ARCMSR_MAX_OUTSTANDING_CMD
|
||||
#define ARCMSR_MAX_QBUFFER 4096
|
||||
#define ARCMSR_MAX_SG_ENTRIES 38
|
||||
#define ARCMSR_DEFAULT_SG_ENTRIES 38
|
||||
#define ARCMSR_MAX_HBB_POSTQUEUE 264
|
||||
#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */
|
||||
#define ARCMSR_CDB_SG_PAGE_LENGTH 256
|
||||
#define SCSI_CMD_ARECA_SPECIFIC 0xE1
|
||||
#ifndef PCI_DEVICE_ID_ARECA_1880
|
||||
#define PCI_DEVICE_ID_ARECA_1880 0x1880
|
||||
#endif
|
||||
/*
|
||||
**********************************************************************************
|
||||
**
|
||||
|
@ -132,35 +138,28 @@ struct CMD_MESSAGE_FIELD
|
|||
#define ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE \
|
||||
ARECA_SATA_RAID | FUNCTION_FLUSH_ADAPTER_CACHE
|
||||
/* ARECA IOCTL ReturnCode */
|
||||
#define ARCMSR_MESSAGE_RETURNCODE_OK 0x00000001
|
||||
#define ARCMSR_MESSAGE_RETURNCODE_ERROR 0x00000006
|
||||
#define ARCMSR_MESSAGE_RETURNCODE_3F 0x0000003F
|
||||
#define ARCMSR_MESSAGE_RETURNCODE_OK 0x00000001
|
||||
#define ARCMSR_MESSAGE_RETURNCODE_ERROR 0x00000006
|
||||
#define ARCMSR_MESSAGE_RETURNCODE_3F 0x0000003F
|
||||
#define ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON 0x00000088
|
||||
/*
|
||||
*************************************************************
|
||||
** structure for holding DMA address data
|
||||
*************************************************************
|
||||
*/
|
||||
#define IS_DMA64 (sizeof(dma_addr_t) == 8)
|
||||
#define IS_SG64_ADDR 0x01000000 /* bit24 */
|
||||
struct SG32ENTRY
|
||||
{
|
||||
__le32 length;
|
||||
__le32 address;
|
||||
};
|
||||
}__attribute__ ((packed));
|
||||
struct SG64ENTRY
|
||||
{
|
||||
__le32 length;
|
||||
__le32 address;
|
||||
__le32 addresshigh;
|
||||
};
|
||||
struct SGENTRY_UNION
|
||||
{
|
||||
union
|
||||
{
|
||||
struct SG32ENTRY sg32entry;
|
||||
struct SG64ENTRY sg64entry;
|
||||
}u;
|
||||
};
|
||||
}__attribute__ ((packed));
|
||||
/*
|
||||
********************************************************************
|
||||
** Q Buffer of IOP Message Transfer
|
||||
|
@ -187,6 +186,9 @@ struct FIRMWARE_INFO
|
|||
char model[8]; /*15, 60-67*/
|
||||
char firmware_ver[16]; /*17, 68-83*/
|
||||
char device_map[16]; /*21, 84-99*/
|
||||
uint32_t cfgVersion; /*25,100-103 Added for checking of new firmware capability*/
|
||||
uint8_t cfgSerial[16]; /*26,104-119*/
|
||||
uint32_t cfgPicStatus; /*30,120-123*/
|
||||
};
|
||||
/* signature of set and get firmware config */
|
||||
#define ARCMSR_SIGNATURE_GET_CONFIG 0x87974060
|
||||
|
@ -210,9 +212,15 @@ struct FIRMWARE_INFO
|
|||
#define ARCMSR_CCBPOST_FLAG_SGL_BSIZE 0x80000000
|
||||
#define ARCMSR_CCBPOST_FLAG_IAM_BIOS 0x40000000
|
||||
#define ARCMSR_CCBREPLY_FLAG_IAM_BIOS 0x40000000
|
||||
#define ARCMSR_CCBREPLY_FLAG_ERROR 0x10000000
|
||||
#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE0 0x10000000
|
||||
#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE1 0x00000001
|
||||
/* outbound firmware ok */
|
||||
#define ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK 0x80000000
|
||||
/* ARC-1680 Bus Reset*/
|
||||
#define ARCMSR_ARC1680_BUS_RESET 0x00000003
|
||||
/* ARC-1880 Bus Reset*/
|
||||
#define ARCMSR_ARC1880_RESET_ADAPTER 0x00000024
|
||||
#define ARCMSR_ARC1880_DiagWrite_ENABLE 0x00000080
|
||||
|
||||
/*
|
||||
************************************************************************
|
||||
|
@ -264,11 +272,66 @@ struct FIRMWARE_INFO
|
|||
|
||||
/* data tunnel buffer between user space program and its firmware */
|
||||
/* user space data to iop 128bytes */
|
||||
#define ARCMSR_IOCTL_WBUFFER 0x0000fe00
|
||||
#define ARCMSR_MESSAGE_WBUFFER 0x0000fe00
|
||||
/* iop data to user space 128bytes */
|
||||
#define ARCMSR_IOCTL_RBUFFER 0x0000ff00
|
||||
#define ARCMSR_MESSAGE_RBUFFER 0x0000ff00
|
||||
/* iop message_rwbuffer for message command */
|
||||
#define ARCMSR_MSGCODE_RWBUFFER 0x0000fa00
|
||||
#define ARCMSR_MESSAGE_RWBUFFER 0x0000fa00
|
||||
/*
|
||||
************************************************************************
|
||||
** SPEC. for Areca HBC adapter
|
||||
************************************************************************
|
||||
*/
|
||||
#define ARCMSR_HBC_ISR_THROTTLING_LEVEL 12
|
||||
#define ARCMSR_HBC_ISR_MAX_DONE_QUEUE 20
|
||||
/* Host Interrupt Mask */
|
||||
#define ARCMSR_HBCMU_UTILITY_A_ISR_MASK 0x00000001 /* When clear, the Utility_A interrupt routes to the host.*/
|
||||
#define ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK 0x00000004 /* When clear, the General Outbound Doorbell interrupt routes to the host.*/
|
||||
#define ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK 0x00000008 /* When clear, the Outbound Post List FIFO Not Empty interrupt routes to the host.*/
|
||||
#define ARCMSR_HBCMU_ALL_INTMASKENABLE 0x0000000D /* disable all ISR */
|
||||
/* Host Interrupt Status */
|
||||
#define ARCMSR_HBCMU_UTILITY_A_ISR 0x00000001
|
||||
/*
|
||||
** Set when the Utility_A Interrupt bit is set in the Outbound Doorbell Register.
|
||||
** It clears by writing a 1 to the Utility_A bit in the Outbound Doorbell Clear Register or through automatic clearing (if enabled).
|
||||
*/
|
||||
#define ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR 0x00000004
|
||||
/*
|
||||
** Set if Outbound Doorbell register bits 30:1 have a non-zero
|
||||
** value. This bit clears only when Outbound Doorbell bits
|
||||
** 30:1 are ALL clear. Only a write to the Outbound Doorbell
|
||||
** Clear register clears bits in the Outbound Doorbell register.
|
||||
*/
|
||||
#define ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR 0x00000008
|
||||
/*
|
||||
** Set whenever the Outbound Post List Producer/Consumer
|
||||
** Register (FIFO) is not empty. It clears when the Outbound
|
||||
** Post List FIFO is empty.
|
||||
*/
|
||||
#define ARCMSR_HBCMU_SAS_ALL_INT 0x00000010
|
||||
/*
|
||||
** This bit indicates a SAS interrupt from a source external to
|
||||
** the PCIe core. This bit is not maskable.
|
||||
*/
|
||||
/* DoorBell*/
|
||||
#define ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK 0x00000002
|
||||
#define ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK 0x00000004
|
||||
/*inbound message 0 ready*/
|
||||
#define ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008
|
||||
/*more than 12 request completed in a time*/
|
||||
#define ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING 0x00000010
|
||||
#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK 0x00000002
|
||||
/*outbound DATA WRITE isr door bell clear*/
|
||||
#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_DOORBELL_CLEAR 0x00000002
|
||||
#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK 0x00000004
|
||||
/*outbound DATA READ isr door bell clear*/
|
||||
#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_DOORBELL_CLEAR 0x00000004
|
||||
/*outbound message 0 ready*/
|
||||
#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008
|
||||
/*outbound message cmd isr door bell clear*/
|
||||
#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR 0x00000008
|
||||
/*ARCMSR_HBAMU_MESSAGE_FIRMWARE_OK*/
|
||||
#define ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK 0x80000000
|
||||
/*
|
||||
*******************************************************************************
|
||||
** ARECA SCSI COMMAND DESCRIPTOR BLOCK size 0x1F8 (504)
|
||||
|
@ -290,7 +353,7 @@ struct ARCMSR_CDB
|
|||
#define ARCMSR_CDB_FLAG_HEADQ 0x08
|
||||
#define ARCMSR_CDB_FLAG_ORDEREDQ 0x10
|
||||
|
||||
uint8_t Reserved1;
|
||||
uint8_t msgPages;
|
||||
uint32_t Context;
|
||||
uint32_t DataLength;
|
||||
uint8_t Cdb[16];
|
||||
|
@ -303,8 +366,8 @@ struct ARCMSR_CDB
|
|||
uint8_t SenseData[15];
|
||||
union
|
||||
{
|
||||
struct SG32ENTRY sg32entry[ARCMSR_MAX_SG_ENTRIES];
|
||||
struct SG64ENTRY sg64entry[ARCMSR_MAX_SG_ENTRIES];
|
||||
struct SG32ENTRY sg32entry[1];
|
||||
struct SG64ENTRY sg64entry[1];
|
||||
} u;
|
||||
};
|
||||
/*
|
||||
|
@ -344,15 +407,89 @@ struct MessageUnit_B
|
|||
uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE];
|
||||
uint32_t postq_index;
|
||||
uint32_t doneq_index;
|
||||
uint32_t __iomem *drv2iop_doorbell_reg;
|
||||
uint32_t __iomem *drv2iop_doorbell_mask_reg;
|
||||
uint32_t __iomem *iop2drv_doorbell_reg;
|
||||
uint32_t __iomem *iop2drv_doorbell_mask_reg;
|
||||
uint32_t __iomem *msgcode_rwbuffer_reg;
|
||||
uint32_t __iomem *ioctl_wbuffer_reg;
|
||||
uint32_t __iomem *ioctl_rbuffer_reg;
|
||||
uint32_t __iomem *drv2iop_doorbell;
|
||||
uint32_t __iomem *drv2iop_doorbell_mask;
|
||||
uint32_t __iomem *iop2drv_doorbell;
|
||||
uint32_t __iomem *iop2drv_doorbell_mask;
|
||||
uint32_t __iomem *message_rwbuffer;
|
||||
uint32_t __iomem *message_wbuffer;
|
||||
uint32_t __iomem *message_rbuffer;
|
||||
};
|
||||
/*
|
||||
*********************************************************************
|
||||
** LSI
|
||||
*********************************************************************
|
||||
*/
|
||||
struct MessageUnit_C{
|
||||
uint32_t message_unit_status; /*0000 0003*/
|
||||
uint32_t slave_error_attribute; /*0004 0007*/
|
||||
uint32_t slave_error_address; /*0008 000B*/
|
||||
uint32_t posted_outbound_doorbell; /*000C 000F*/
|
||||
uint32_t master_error_attribute; /*0010 0013*/
|
||||
uint32_t master_error_address_low; /*0014 0017*/
|
||||
uint32_t master_error_address_high; /*0018 001B*/
|
||||
uint32_t hcb_size; /*001C 001F*/
|
||||
uint32_t inbound_doorbell; /*0020 0023*/
|
||||
uint32_t diagnostic_rw_data; /*0024 0027*/
|
||||
uint32_t diagnostic_rw_address_low; /*0028 002B*/
|
||||
uint32_t diagnostic_rw_address_high; /*002C 002F*/
|
||||
uint32_t host_int_status; /*0030 0033*/
|
||||
uint32_t host_int_mask; /*0034 0037*/
|
||||
uint32_t dcr_data; /*0038 003B*/
|
||||
uint32_t dcr_address; /*003C 003F*/
|
||||
uint32_t inbound_queueport; /*0040 0043*/
|
||||
uint32_t outbound_queueport; /*0044 0047*/
|
||||
uint32_t hcb_pci_address_low; /*0048 004B*/
|
||||
uint32_t hcb_pci_address_high; /*004C 004F*/
|
||||
uint32_t iop_int_status; /*0050 0053*/
|
||||
uint32_t iop_int_mask; /*0054 0057*/
|
||||
uint32_t iop_inbound_queue_port; /*0058 005B*/
|
||||
uint32_t iop_outbound_queue_port; /*005C 005F*/
|
||||
uint32_t inbound_free_list_index; /*0060 0063*/
|
||||
uint32_t inbound_post_list_index; /*0064 0067*/
|
||||
uint32_t outbound_free_list_index; /*0068 006B*/
|
||||
uint32_t outbound_post_list_index; /*006C 006F*/
|
||||
uint32_t inbound_doorbell_clear; /*0070 0073*/
|
||||
uint32_t i2o_message_unit_control; /*0074 0077*/
|
||||
uint32_t last_used_message_source_address_low; /*0078 007B*/
|
||||
uint32_t last_used_message_source_address_high; /*007C 007F*/
|
||||
uint32_t pull_mode_data_byte_count[4]; /*0080 008F*/
|
||||
uint32_t message_dest_address_index; /*0090 0093*/
|
||||
uint32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/
|
||||
uint32_t utility_A_int_counter_timer; /*0098 009B*/
|
||||
uint32_t outbound_doorbell; /*009C 009F*/
|
||||
uint32_t outbound_doorbell_clear; /*00A0 00A3*/
|
||||
uint32_t message_source_address_index; /*00A4 00A7*/
|
||||
uint32_t message_done_queue_index; /*00A8 00AB*/
|
||||
uint32_t reserved0; /*00AC 00AF*/
|
||||
uint32_t inbound_msgaddr0; /*00B0 00B3*/
|
||||
uint32_t inbound_msgaddr1; /*00B4 00B7*/
|
||||
uint32_t outbound_msgaddr0; /*00B8 00BB*/
|
||||
uint32_t outbound_msgaddr1; /*00BC 00BF*/
|
||||
uint32_t inbound_queueport_low; /*00C0 00C3*/
|
||||
uint32_t inbound_queueport_high; /*00C4 00C7*/
|
||||
uint32_t outbound_queueport_low; /*00C8 00CB*/
|
||||
uint32_t outbound_queueport_high; /*00CC 00CF*/
|
||||
uint32_t iop_inbound_queue_port_low; /*00D0 00D3*/
|
||||
uint32_t iop_inbound_queue_port_high; /*00D4 00D7*/
|
||||
uint32_t iop_outbound_queue_port_low; /*00D8 00DB*/
|
||||
uint32_t iop_outbound_queue_port_high; /*00DC 00DF*/
|
||||
uint32_t message_dest_queue_port_low; /*00E0 00E3*/
|
||||
uint32_t message_dest_queue_port_high; /*00E4 00E7*/
|
||||
uint32_t last_used_message_dest_address_low; /*00E8 00EB*/
|
||||
uint32_t last_used_message_dest_address_high; /*00EC 00EF*/
|
||||
uint32_t message_done_queue_base_address_low; /*00F0 00F3*/
|
||||
uint32_t message_done_queue_base_address_high; /*00F4 00F7*/
|
||||
uint32_t host_diagnostic; /*00F8 00FB*/
|
||||
uint32_t write_sequence; /*00FC 00FF*/
|
||||
uint32_t reserved1[34]; /*0100 0187*/
|
||||
uint32_t reserved2[1950]; /*0188 1FFF*/
|
||||
uint32_t message_wbuffer[32]; /*2000 207F*/
|
||||
uint32_t reserved3[32]; /*2080 20FF*/
|
||||
uint32_t message_rbuffer[32]; /*2100 217F*/
|
||||
uint32_t reserved4[32]; /*2180 21FF*/
|
||||
uint32_t msgcode_rwbuffer[256]; /*2200 23FF*/
|
||||
};
|
||||
|
||||
/*
|
||||
*******************************************************************************
|
||||
** Adapter Control Block
|
||||
|
@ -370,14 +507,20 @@ struct AdapterControlBlock
|
|||
unsigned long vir2phy_offset;
|
||||
/* Offset is used in making arc cdb physical to virtual calculations */
|
||||
uint32_t outbound_int_enable;
|
||||
|
||||
uint32_t cdb_phyaddr_hi32;
|
||||
uint32_t reg_mu_acc_handle0;
|
||||
spinlock_t eh_lock;
|
||||
spinlock_t ccblist_lock;
|
||||
union {
|
||||
struct MessageUnit_A __iomem * pmuA;
|
||||
struct MessageUnit_B * pmuB;
|
||||
struct MessageUnit_A __iomem *pmuA;
|
||||
struct MessageUnit_B *pmuB;
|
||||
struct MessageUnit_C __iomem *pmuC;
|
||||
};
|
||||
/* message unit ATU inbound base address0 */
|
||||
|
||||
void __iomem *mem_base0;
|
||||
void __iomem *mem_base1;
|
||||
uint32_t acb_flags;
|
||||
u16 dev_id;
|
||||
uint8_t adapter_index;
|
||||
#define ACB_F_SCSISTOPADAPTER 0x0001
|
||||
#define ACB_F_MSG_STOP_BGRB 0x0002
|
||||
|
@ -392,8 +535,11 @@ struct AdapterControlBlock
|
|||
/* message clear rqbuffer */
|
||||
#define ACB_F_MESSAGE_WQBUFFER_READED 0x0040
|
||||
#define ACB_F_BUS_RESET 0x0080
|
||||
#define ACB_F_BUS_HANG_ON 0x0800/* need hardware reset bus */
|
||||
|
||||
#define ACB_F_IOP_INITED 0x0100
|
||||
/* iop init */
|
||||
#define ACB_F_ABORT 0x0200
|
||||
#define ACB_F_FIRMWARE_TRAP 0x0400
|
||||
struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
|
||||
/* used for memory free */
|
||||
|
@ -408,7 +554,8 @@ struct AdapterControlBlock
|
|||
/* dma_coherent used for memory free */
|
||||
dma_addr_t dma_coherent_handle;
|
||||
/* dma_coherent_handle used for memory free */
|
||||
|
||||
dma_addr_t dma_coherent_handle_hbb_mu;
|
||||
unsigned int uncache_size;
|
||||
uint8_t rqbuffer[ARCMSR_MAX_QBUFFER];
|
||||
/* data collection buffer for read from 80331 */
|
||||
int32_t rqbuf_firstindex;
|
||||
|
@ -432,14 +579,18 @@ struct AdapterControlBlock
|
|||
uint32_t firm_numbers_queue;
|
||||
uint32_t firm_sdram_size;
|
||||
uint32_t firm_hd_channels;
|
||||
char firm_model[12];
|
||||
char firm_version[20];
|
||||
uint32_t firm_cfg_version;
|
||||
char firm_model[12];
|
||||
char firm_version[20];
|
||||
char device_map[20]; /*21,84-99*/
|
||||
struct work_struct arcmsr_do_message_isr_bh;
|
||||
struct timer_list eternal_timer;
|
||||
unsigned short fw_state;
|
||||
unsigned short fw_flag;
|
||||
#define FW_NORMAL 0x0000
|
||||
#define FW_BOG 0x0001
|
||||
#define FW_DEADLOCK 0x0010
|
||||
atomic_t rq_map_token;
|
||||
int ante_token_value;
|
||||
atomic_t ante_token_value;
|
||||
};/* HW_DEVICE_EXTENSION */
|
||||
/*
|
||||
*******************************************************************************
|
||||
|
@ -447,67 +598,33 @@ struct AdapterControlBlock
|
|||
** this CCB length must be 32 bytes boundary
|
||||
*******************************************************************************
|
||||
*/
|
||||
struct CommandControlBlock
|
||||
{
|
||||
struct ARCMSR_CDB arcmsr_cdb;
|
||||
/*
|
||||
** 0-503 (size of CDB = 504):
|
||||
** arcmsr messenger scsi command descriptor size 504 bytes
|
||||
*/
|
||||
uint32_t cdb_shifted_phyaddr;
|
||||
/* 504-507 */
|
||||
uint32_t reserved1;
|
||||
/* 508-511 */
|
||||
#if BITS_PER_LONG == 64
|
||||
struct CommandControlBlock{
|
||||
/*x32:sizeof struct_CCB=(32+60)byte, x64:sizeof struct_CCB=(64+60)byte*/
|
||||
struct list_head list; /*x32: 8byte, x64: 16byte*/
|
||||
struct scsi_cmnd *pcmd; /*8 bytes pointer of linux scsi command */
|
||||
struct AdapterControlBlock *acb; /*x32: 4byte, x64: 8byte*/
|
||||
uint32_t cdb_phyaddr_pattern; /*x32: 4byte, x64: 4byte*/
|
||||
uint32_t arc_cdb_size; /*x32:4byte,x64:4byte*/
|
||||
uint16_t ccb_flags; /*x32: 2byte, x64: 2byte*/
|
||||
#define CCB_FLAG_READ 0x0000
|
||||
#define CCB_FLAG_WRITE 0x0001
|
||||
#define CCB_FLAG_ERROR 0x0002
|
||||
#define CCB_FLAG_FLUSHCACHE 0x0004
|
||||
#define CCB_FLAG_MASTER_ABORTED 0x0008
|
||||
uint16_t startdone; /*x32:2byte,x32:2byte*/
|
||||
#define ARCMSR_CCB_DONE 0x0000
|
||||
#define ARCMSR_CCB_START 0x55AA
|
||||
#define ARCMSR_CCB_ABORTED 0xAA55
|
||||
#define ARCMSR_CCB_ILLEGAL 0xFFFF
|
||||
#if BITS_PER_LONG == 64
|
||||
/* ======================512+64 bytes======================== */
|
||||
struct list_head list;
|
||||
/* 512-527 16 bytes next/prev ptrs for ccb lists */
|
||||
struct scsi_cmnd * pcmd;
|
||||
/* 528-535 8 bytes pointer of linux scsi command */
|
||||
struct AdapterControlBlock * acb;
|
||||
/* 536-543 8 bytes pointer of acb */
|
||||
|
||||
uint16_t ccb_flags;
|
||||
/* 544-545 */
|
||||
#define CCB_FLAG_READ 0x0000
|
||||
#define CCB_FLAG_WRITE 0x0001
|
||||
#define CCB_FLAG_ERROR 0x0002
|
||||
#define CCB_FLAG_FLUSHCACHE 0x0004
|
||||
#define CCB_FLAG_MASTER_ABORTED 0x0008
|
||||
uint16_t startdone;
|
||||
/* 546-547 */
|
||||
#define ARCMSR_CCB_DONE 0x0000
|
||||
#define ARCMSR_CCB_START 0x55AA
|
||||
#define ARCMSR_CCB_ABORTED 0xAA55
|
||||
#define ARCMSR_CCB_ILLEGAL 0xFFFF
|
||||
uint32_t reserved2[7];
|
||||
/* 548-551 552-555 556-559 560-563 564-567 568-571 572-575 */
|
||||
#else
|
||||
uint32_t reserved[5]; /*24 byte*/
|
||||
#else
|
||||
/* ======================512+32 bytes======================== */
|
||||
struct list_head list;
|
||||
/* 512-519 8 bytes next/prev ptrs for ccb lists */
|
||||
struct scsi_cmnd * pcmd;
|
||||
/* 520-523 4 bytes pointer of linux scsi command */
|
||||
struct AdapterControlBlock * acb;
|
||||
/* 524-527 4 bytes pointer of acb */
|
||||
|
||||
uint16_t ccb_flags;
|
||||
/* 528-529 */
|
||||
#define CCB_FLAG_READ 0x0000
|
||||
#define CCB_FLAG_WRITE 0x0001
|
||||
#define CCB_FLAG_ERROR 0x0002
|
||||
#define CCB_FLAG_FLUSHCACHE 0x0004
|
||||
#define CCB_FLAG_MASTER_ABORTED 0x0008
|
||||
uint16_t startdone;
|
||||
/* 530-531 */
|
||||
#define ARCMSR_CCB_DONE 0x0000
|
||||
#define ARCMSR_CCB_START 0x55AA
|
||||
#define ARCMSR_CCB_ABORTED 0xAA55
|
||||
#define ARCMSR_CCB_ILLEGAL 0xFFFF
|
||||
uint32_t reserved2[3];
|
||||
/* 532-535 536-539 540-543 */
|
||||
#endif
|
||||
/* ========================================================== */
|
||||
uint32_t reserved; /*8 byte*/
|
||||
#endif
|
||||
/* ======================================================= */
|
||||
struct ARCMSR_CDB arcmsr_cdb;
|
||||
};
|
||||
/*
|
||||
*******************************************************************************
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,6 +1,6 @@
|
|||
config BE2ISCSI
|
||||
tristate "ServerEngines' 10Gbps iSCSI - BladeEngine 2"
|
||||
depends on PCI && SCSI
|
||||
depends on PCI && SCSI && NET
|
||||
select SCSI_ISCSI_ATTRS
|
||||
|
||||
help
|
||||
|
|
|
@ -128,8 +128,8 @@ struct be_ctrl_info {
|
|||
#define mcc_timeout 120000 /* 5s timeout */
|
||||
|
||||
/* Returns number of pages spanned by the data starting at the given addr */
|
||||
#define PAGES_4K_SPANNED(_address, size) \
|
||||
((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
|
||||
#define PAGES_4K_SPANNED(_address, size) \
|
||||
((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
|
||||
(size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
|
||||
|
||||
/* Byte offset into the page corresponding to given address */
|
||||
|
@ -137,7 +137,7 @@ struct be_ctrl_info {
|
|||
((size_t)(addr) & (PAGE_SIZE_4K-1))
|
||||
|
||||
/* Returns bit offset within a DWORD of a bitfield */
|
||||
#define AMAP_BIT_OFFSET(_struct, field) \
|
||||
#define AMAP_BIT_OFFSET(_struct, field) \
|
||||
(((size_t)&(((_struct *)0)->field))%32)
|
||||
|
||||
/* Returns the bit mask of the field that is NOT shifted into location. */
|
||||
|
|
|
@ -19,6 +19,86 @@
|
|||
#include "be_mgmt.h"
|
||||
#include "be_main.h"
|
||||
|
||||
int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
|
||||
{
|
||||
u32 sreset;
|
||||
u8 *pci_reset_offset = 0;
|
||||
u8 *pci_online0_offset = 0;
|
||||
u8 *pci_online1_offset = 0;
|
||||
u32 pconline0 = 0;
|
||||
u32 pconline1 = 0;
|
||||
u32 i;
|
||||
|
||||
pci_reset_offset = (u8 *)phba->pci_va + BE2_SOFT_RESET;
|
||||
pci_online0_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE0;
|
||||
pci_online1_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE1;
|
||||
sreset = readl((void *)pci_reset_offset);
|
||||
sreset |= BE2_SET_RESET;
|
||||
writel(sreset, (void *)pci_reset_offset);
|
||||
|
||||
i = 0;
|
||||
while (sreset & BE2_SET_RESET) {
|
||||
if (i > 64)
|
||||
break;
|
||||
msleep(100);
|
||||
sreset = readl((void *)pci_reset_offset);
|
||||
i++;
|
||||
}
|
||||
|
||||
if (sreset & BE2_SET_RESET) {
|
||||
printk(KERN_ERR "Soft Reset did not deassert\n");
|
||||
return -EIO;
|
||||
}
|
||||
pconline1 = BE2_MPU_IRAM_ONLINE;
|
||||
writel(pconline0, (void *)pci_online0_offset);
|
||||
writel(pconline1, (void *)pci_online1_offset);
|
||||
|
||||
sreset = BE2_SET_RESET;
|
||||
writel(sreset, (void *)pci_reset_offset);
|
||||
|
||||
i = 0;
|
||||
while (sreset & BE2_SET_RESET) {
|
||||
if (i > 64)
|
||||
break;
|
||||
msleep(1);
|
||||
sreset = readl((void *)pci_reset_offset);
|
||||
i++;
|
||||
}
|
||||
if (sreset & BE2_SET_RESET) {
|
||||
printk(KERN_ERR "MPU Online Soft Reset did not deassert\n");
|
||||
return -EIO;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int be_chk_reset_complete(struct beiscsi_hba *phba)
|
||||
{
|
||||
unsigned int num_loop;
|
||||
u8 *mpu_sem = 0;
|
||||
u32 status;
|
||||
|
||||
num_loop = 1000;
|
||||
mpu_sem = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
|
||||
msleep(5000);
|
||||
|
||||
while (num_loop) {
|
||||
status = readl((void *)mpu_sem);
|
||||
|
||||
if ((status & 0x80000000) || (status & 0x0000FFFF) == 0xC000)
|
||||
break;
|
||||
msleep(60);
|
||||
num_loop--;
|
||||
}
|
||||
|
||||
if ((status & 0x80000000) || (!num_loop)) {
|
||||
printk(KERN_ERR "Failed in be_chk_reset_complete"
|
||||
"status = 0x%x\n", status);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void be_mcc_notify(struct beiscsi_hba *phba)
|
||||
{
|
||||
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
|
||||
|
@ -98,7 +178,7 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
|
|||
dev_err(&ctrl->pdev->dev,
|
||||
"error in cmd completion: status(compl/extd)=%d/%d\n",
|
||||
compl_status, extd_status);
|
||||
return -1;
|
||||
return -EBUSY;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -151,20 +231,20 @@ void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
|
|||
{
|
||||
switch (evt->port_link_status) {
|
||||
case ASYNC_EVENT_LINK_DOWN:
|
||||
SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d \n",
|
||||
evt->physical_port);
|
||||
SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d\n",
|
||||
evt->physical_port);
|
||||
phba->state |= BE_ADAPTER_LINK_DOWN;
|
||||
iscsi_host_for_each_session(phba->shost,
|
||||
be2iscsi_fail_session);
|
||||
break;
|
||||
case ASYNC_EVENT_LINK_UP:
|
||||
phba->state = BE_ADAPTER_UP;
|
||||
SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d \n",
|
||||
SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d\n",
|
||||
evt->physical_port);
|
||||
break;
|
||||
default:
|
||||
SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on"
|
||||
"Physical Port %d \n",
|
||||
"Physical Port %d\n",
|
||||
evt->port_link_status,
|
||||
evt->physical_port);
|
||||
}
|
||||
|
@ -199,7 +279,7 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
|
|||
else
|
||||
SE_DEBUG(DBG_LVL_1,
|
||||
" Unsupported Async Event, flags"
|
||||
" = 0x%08x \n", compl->flags);
|
||||
" = 0x%08x\n", compl->flags);
|
||||
|
||||
} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
|
||||
status = be_mcc_compl_process(ctrl, compl);
|
||||
|
@ -231,7 +311,7 @@ static int be_mcc_wait_compl(struct beiscsi_hba *phba)
|
|||
}
|
||||
if (i == mcc_timeout) {
|
||||
dev_err(&phba->pcidev->dev, "mccq poll timed out\n");
|
||||
return -1;
|
||||
return -EBUSY;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -257,7 +337,7 @@ static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
|
|||
|
||||
if (cnt > 6000000) {
|
||||
dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n");
|
||||
return -1;
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (cnt > 50) {
|
||||
|
@ -286,7 +366,7 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
|
|||
|
||||
status = be_mbox_db_ready_wait(ctrl);
|
||||
if (status != 0) {
|
||||
SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed 1\n");
|
||||
SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed\n");
|
||||
return status;
|
||||
}
|
||||
val = 0;
|
||||
|
@ -297,19 +377,19 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
|
|||
|
||||
status = be_mbox_db_ready_wait(ctrl);
|
||||
if (status != 0) {
|
||||
SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed 2\n");
|
||||
SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed\n");
|
||||
return status;
|
||||
}
|
||||
if (be_mcc_compl_is_new(compl)) {
|
||||
status = be_mcc_compl_process(ctrl, &mbox->compl);
|
||||
be_mcc_compl_use(compl);
|
||||
if (status) {
|
||||
SE_DEBUG(DBG_LVL_1, "After be_mcc_compl_process \n");
|
||||
SE_DEBUG(DBG_LVL_1, "After be_mcc_compl_process\n");
|
||||
return status;
|
||||
}
|
||||
} else {
|
||||
dev_err(&ctrl->pdev->dev, "invalid mailbox completion\n");
|
||||
return -1;
|
||||
return -EBUSY;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -355,7 +435,7 @@ static int be_mbox_notify_wait(struct beiscsi_hba *phba)
|
|||
return status;
|
||||
} else {
|
||||
dev_err(&phba->pcidev->dev, "invalid mailbox completion\n");
|
||||
return -1;
|
||||
return -EBUSY;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -500,7 +580,7 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
|
|||
|
||||
status = be_mbox_notify(ctrl);
|
||||
if (status)
|
||||
SE_DEBUG(DBG_LVL_1, "be_cmd_fw_initialize Failed \n");
|
||||
SE_DEBUG(DBG_LVL_1, "be_cmd_fw_initialize Failed\n");
|
||||
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
return status;
|
||||
|
@ -517,7 +597,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
|
|||
void *ctxt = &req->context;
|
||||
int status;
|
||||
|
||||
SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_cq_create \n");
|
||||
SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_cq_create\n");
|
||||
spin_lock(&ctrl->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
|
@ -550,7 +630,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
|
|||
cq->id = le16_to_cpu(resp->cq_id);
|
||||
cq->created = true;
|
||||
} else
|
||||
SE_DEBUG(DBG_LVL_1, "In be_cmd_cq_create, status=ox%08x \n",
|
||||
SE_DEBUG(DBG_LVL_1, "In be_cmd_cq_create, status=ox%08x\n",
|
||||
status);
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
|
||||
|
@ -619,7 +699,7 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
|
|||
u8 subsys = 0, opcode = 0;
|
||||
int status;
|
||||
|
||||
SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_q_destroy \n");
|
||||
SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_q_destroy\n");
|
||||
spin_lock(&ctrl->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
@ -652,7 +732,7 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
|
|||
default:
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
BUG();
|
||||
return -1;
|
||||
return -ENXIO;
|
||||
}
|
||||
be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
|
||||
if (queue_type != QTYPE_SGL)
|
||||
|
|
|
@ -47,8 +47,8 @@ struct be_mcc_wrb {
|
|||
|
||||
#define CQE_FLAGS_VALID_MASK (1 << 31)
|
||||
#define CQE_FLAGS_ASYNC_MASK (1 << 30)
|
||||
#define CQE_FLAGS_COMPLETED_MASK (1 << 28)
|
||||
#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
|
||||
#define CQE_FLAGS_COMPLETED_MASK (1 << 28)
|
||||
#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
|
||||
|
||||
/* Completion Status */
|
||||
#define MCC_STATUS_SUCCESS 0x0
|
||||
|
@ -56,7 +56,7 @@ struct be_mcc_wrb {
|
|||
#define CQE_STATUS_COMPL_MASK 0xFFFF
|
||||
#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
|
||||
#define CQE_STATUS_EXTD_MASK 0xFFFF
|
||||
#define CQE_STATUS_EXTD_SHIFT 0 /* bits 0 - 15 */
|
||||
#define CQE_STATUS_EXTD_SHIFT 16 /* bits 0 - 15 */
|
||||
|
||||
struct be_mcc_compl {
|
||||
u32 status; /* dword 0 */
|
||||
|
@ -143,14 +143,14 @@ struct be_mcc_mailbox {
|
|||
*/
|
||||
#define OPCODE_COMMON_CQ_CREATE 12
|
||||
#define OPCODE_COMMON_EQ_CREATE 13
|
||||
#define OPCODE_COMMON_MCC_CREATE 21
|
||||
#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
|
||||
#define OPCODE_COMMON_MCC_CREATE 21
|
||||
#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
|
||||
#define OPCODE_COMMON_GET_FW_VERSION 35
|
||||
#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
|
||||
#define OPCODE_COMMON_FIRMWARE_CONFIG 42
|
||||
#define OPCODE_COMMON_MCC_DESTROY 53
|
||||
#define OPCODE_COMMON_CQ_DESTROY 54
|
||||
#define OPCODE_COMMON_EQ_DESTROY 55
|
||||
#define OPCODE_COMMON_MCC_DESTROY 53
|
||||
#define OPCODE_COMMON_CQ_DESTROY 54
|
||||
#define OPCODE_COMMON_EQ_DESTROY 55
|
||||
#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
|
||||
#define OPCODE_COMMON_FUNCTION_RESET 61
|
||||
|
||||
|
@ -164,9 +164,9 @@ struct be_mcc_mailbox {
|
|||
#define OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG 7
|
||||
#define OPCODE_COMMON_ISCSI_SET_FRAGNUM_BITS_FOR_SGL_CRA 61
|
||||
#define OPCODE_COMMON_ISCSI_DEFQ_CREATE 64
|
||||
#define OPCODE_COMMON_ISCSI_DEFQ_DESTROY 65
|
||||
#define OPCODE_COMMON_ISCSI_DEFQ_DESTROY 65
|
||||
#define OPCODE_COMMON_ISCSI_WRBQ_CREATE 66
|
||||
#define OPCODE_COMMON_ISCSI_WRBQ_DESTROY 67
|
||||
#define OPCODE_COMMON_ISCSI_WRBQ_DESTROY 67
|
||||
|
||||
struct be_cmd_req_hdr {
|
||||
u8 opcode; /* dword 0 */
|
||||
|
@ -423,7 +423,7 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
|
|||
struct be_queue_info *cq);
|
||||
|
||||
int be_poll_mcc(struct be_ctrl_info *ctrl);
|
||||
unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
|
||||
int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
|
||||
struct beiscsi_hba *phba);
|
||||
unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba);
|
||||
void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
|
||||
|
@ -875,7 +875,7 @@ struct be_fw_cfg {
|
|||
*/
|
||||
#define UNSOL_HDR_NOTIFY 28 /* Unsolicited header notify.*/
|
||||
#define UNSOL_DATA_NOTIFY 29 /* Unsolicited data notify.*/
|
||||
#define UNSOL_DATA_DIGEST_ERROR_NOTIFY 30 /* Unsolicited data digest
|
||||
#define UNSOL_DATA_DIGEST_ERROR_NOTIFY 30 /* Unsolicited data digest
|
||||
* error notify.
|
||||
*/
|
||||
#define DRIVERMSG_NOTIFY 31 /* TCP acknowledge based
|
||||
|
@ -901,6 +901,9 @@ struct be_fw_cfg {
|
|||
* the cxn
|
||||
*/
|
||||
|
||||
int beiscsi_pci_soft_reset(struct beiscsi_hba *phba);
|
||||
int be_chk_reset_complete(struct beiscsi_hba *phba);
|
||||
|
||||
void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
|
||||
bool embedded, u8 sge_cnt);
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
|
|||
SE_DEBUG(DBG_LVL_8, "In beiscsi_session_create\n");
|
||||
|
||||
if (!ep) {
|
||||
SE_DEBUG(DBG_LVL_1, "beiscsi_session_create: invalid ep \n");
|
||||
SE_DEBUG(DBG_LVL_1, "beiscsi_session_create: invalid ep\n");
|
||||
return NULL;
|
||||
}
|
||||
beiscsi_ep = ep->dd_data;
|
||||
|
@ -157,7 +157,7 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
|
|||
"Connection table already occupied. Detected clash\n");
|
||||
return -EINVAL;
|
||||
} else {
|
||||
SE_DEBUG(DBG_LVL_8, "phba->conn_table[%d]=%p(beiscsi_conn) \n",
|
||||
SE_DEBUG(DBG_LVL_8, "phba->conn_table[%d]=%p(beiscsi_conn)\n",
|
||||
cid, beiscsi_conn);
|
||||
phba->conn_table[cid] = beiscsi_conn;
|
||||
}
|
||||
|
@ -196,7 +196,7 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
|
|||
|
||||
if (beiscsi_ep->phba != phba) {
|
||||
SE_DEBUG(DBG_LVL_8,
|
||||
"beiscsi_ep->hba=%p not equal to phba=%p \n",
|
||||
"beiscsi_ep->hba=%p not equal to phba=%p\n",
|
||||
beiscsi_ep->phba, phba);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
@ -204,7 +204,7 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
|
|||
beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid;
|
||||
beiscsi_conn->ep = beiscsi_ep;
|
||||
beiscsi_ep->conn = beiscsi_conn;
|
||||
SE_DEBUG(DBG_LVL_8, "beiscsi_conn=%p conn=%p ep_cid=%d \n",
|
||||
SE_DEBUG(DBG_LVL_8, "beiscsi_conn=%p conn=%p ep_cid=%d\n",
|
||||
beiscsi_conn, conn, beiscsi_ep->ep_cid);
|
||||
return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid);
|
||||
}
|
||||
|
@ -230,7 +230,7 @@ int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
|
|||
if (!beiscsi_ep) {
|
||||
SE_DEBUG(DBG_LVL_1,
|
||||
"In beiscsi_conn_get_param , no beiscsi_ep\n");
|
||||
return -1;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
switch (param) {
|
||||
|
@ -277,6 +277,10 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
|
|||
if (session->max_burst > 262144)
|
||||
session->max_burst = 262144;
|
||||
break;
|
||||
case ISCSI_PARAM_MAX_XMIT_DLENGTH:
|
||||
if ((conn->max_xmit_dlength > 65536) ||
|
||||
(conn->max_xmit_dlength == 0))
|
||||
conn->max_xmit_dlength = 65536;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
@ -308,8 +312,8 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
|
|||
case ISCSI_HOST_PARAM_HWADDRESS:
|
||||
tag = be_cmd_get_mac_addr(phba);
|
||||
if (!tag) {
|
||||
SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed \n");
|
||||
return -1;
|
||||
SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n");
|
||||
return -EAGAIN;
|
||||
} else
|
||||
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
|
||||
phba->ctrl.mcc_numtag[tag]);
|
||||
|
@ -319,10 +323,10 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
|
|||
status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
|
||||
if (status || extd_status) {
|
||||
SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed"
|
||||
" status = %d extd_status = %d \n",
|
||||
" status = %d extd_status = %d\n",
|
||||
status, extd_status);
|
||||
free_mcc_tag(&phba->ctrl, tag);
|
||||
return -1;
|
||||
return -EAGAIN;
|
||||
} else {
|
||||
wrb = queue_get_wrb(mccq, wrb_num);
|
||||
free_mcc_tag(&phba->ctrl, tag);
|
||||
|
@ -441,75 +445,6 @@ static int beiscsi_get_cid(struct beiscsi_hba *phba)
|
|||
return cid;
|
||||
}
|
||||
|
||||
/**
|
||||
* beiscsi_open_conn - Ask FW to open a TCP connection
|
||||
* @ep: endpoint to be used
|
||||
* @src_addr: The source IP address
|
||||
* @dst_addr: The Destination IP address
|
||||
*
|
||||
* Asks the FW to open a TCP connection
|
||||
*/
|
||||
static int beiscsi_open_conn(struct iscsi_endpoint *ep,
|
||||
struct sockaddr *src_addr,
|
||||
struct sockaddr *dst_addr, int non_blocking)
|
||||
{
|
||||
struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
|
||||
struct beiscsi_hba *phba = beiscsi_ep->phba;
|
||||
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct tcp_connect_and_offload_out *ptcpcnct_out;
|
||||
unsigned short status, extd_status;
|
||||
unsigned int tag, wrb_num;
|
||||
int ret = -1;
|
||||
|
||||
SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn\n");
|
||||
beiscsi_ep->ep_cid = beiscsi_get_cid(phba);
|
||||
if (beiscsi_ep->ep_cid == 0xFFFF) {
|
||||
SE_DEBUG(DBG_LVL_1, "No free cid available\n");
|
||||
return ret;
|
||||
}
|
||||
SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn, ep_cid=%d ",
|
||||
beiscsi_ep->ep_cid);
|
||||
phba->ep_array[beiscsi_ep->ep_cid -
|
||||
phba->fw_config.iscsi_cid_start] = ep;
|
||||
if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start +
|
||||
phba->params.cxns_per_ctrl * 2)) {
|
||||
SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
beiscsi_ep->cid_vld = 0;
|
||||
tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep);
|
||||
if (!tag) {
|
||||
SE_DEBUG(DBG_LVL_1,
|
||||
"mgmt_open_connection Failed for cid=%d \n",
|
||||
beiscsi_ep->ep_cid);
|
||||
} else {
|
||||
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
|
||||
phba->ctrl.mcc_numtag[tag]);
|
||||
}
|
||||
wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
|
||||
extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
|
||||
status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
|
||||
if (status || extd_status) {
|
||||
SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed"
|
||||
" status = %d extd_status = %d \n",
|
||||
status, extd_status);
|
||||
free_mcc_tag(&phba->ctrl, tag);
|
||||
return -1;
|
||||
} else {
|
||||
wrb = queue_get_wrb(mccq, wrb_num);
|
||||
free_mcc_tag(&phba->ctrl, tag);
|
||||
|
||||
ptcpcnct_out = embedded_payload(wrb);
|
||||
beiscsi_ep = ep->dd_data;
|
||||
beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
|
||||
beiscsi_ep->cid_vld = 1;
|
||||
SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* beiscsi_put_cid - Free the cid
|
||||
* @phba: The phba for which the cid is being freed
|
||||
|
@ -535,6 +470,103 @@ static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
|
|||
beiscsi_ep->phba = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* beiscsi_open_conn - Ask FW to open a TCP connection
|
||||
* @ep: endpoint to be used
|
||||
* @src_addr: The source IP address
|
||||
* @dst_addr: The Destination IP address
|
||||
*
|
||||
* Asks the FW to open a TCP connection
|
||||
*/
|
||||
static int beiscsi_open_conn(struct iscsi_endpoint *ep,
|
||||
struct sockaddr *src_addr,
|
||||
struct sockaddr *dst_addr, int non_blocking)
|
||||
{
|
||||
struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
|
||||
struct beiscsi_hba *phba = beiscsi_ep->phba;
|
||||
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct tcp_connect_and_offload_out *ptcpcnct_out;
|
||||
unsigned short status, extd_status;
|
||||
struct be_dma_mem nonemb_cmd;
|
||||
unsigned int tag, wrb_num;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn\n");
|
||||
beiscsi_ep->ep_cid = beiscsi_get_cid(phba);
|
||||
if (beiscsi_ep->ep_cid == 0xFFFF) {
|
||||
SE_DEBUG(DBG_LVL_1, "No free cid available\n");
|
||||
return ret;
|
||||
}
|
||||
SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn, ep_cid=%d\n",
|
||||
beiscsi_ep->ep_cid);
|
||||
phba->ep_array[beiscsi_ep->ep_cid -
|
||||
phba->fw_config.iscsi_cid_start] = ep;
|
||||
if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start +
|
||||
phba->params.cxns_per_ctrl * 2)) {
|
||||
SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n");
|
||||
beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
|
||||
goto free_ep;
|
||||
}
|
||||
|
||||
beiscsi_ep->cid_vld = 0;
|
||||
nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
|
||||
sizeof(struct tcp_connect_and_offload_in),
|
||||
&nonemb_cmd.dma);
|
||||
if (nonemb_cmd.va == NULL) {
|
||||
SE_DEBUG(DBG_LVL_1,
|
||||
"Failed to allocate memory for mgmt_open_connection"
|
||||
"\n");
|
||||
beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
|
||||
return -ENOMEM;
|
||||
}
|
||||
nonemb_cmd.size = sizeof(struct tcp_connect_and_offload_in);
|
||||
memset(nonemb_cmd.va, 0, nonemb_cmd.size);
|
||||
tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd);
|
||||
if (!tag) {
|
||||
SE_DEBUG(DBG_LVL_1,
|
||||
"mgmt_open_connection Failed for cid=%d\n",
|
||||
beiscsi_ep->ep_cid);
|
||||
beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
|
||||
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
|
||||
nonemb_cmd.va, nonemb_cmd.dma);
|
||||
return -EAGAIN;
|
||||
} else {
|
||||
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
|
||||
phba->ctrl.mcc_numtag[tag]);
|
||||
}
|
||||
wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
|
||||
extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
|
||||
status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
|
||||
if (status || extd_status) {
|
||||
SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed"
|
||||
" status = %d extd_status = %d\n",
|
||||
status, extd_status);
|
||||
beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
|
||||
free_mcc_tag(&phba->ctrl, tag);
|
||||
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
|
||||
nonemb_cmd.va, nonemb_cmd.dma);
|
||||
goto free_ep;
|
||||
} else {
|
||||
wrb = queue_get_wrb(mccq, wrb_num);
|
||||
free_mcc_tag(&phba->ctrl, tag);
|
||||
|
||||
ptcpcnct_out = embedded_payload(wrb);
|
||||
beiscsi_ep = ep->dd_data;
|
||||
beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
|
||||
beiscsi_ep->cid_vld = 1;
|
||||
SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n");
|
||||
}
|
||||
beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
|
||||
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
|
||||
nonemb_cmd.va, nonemb_cmd.dma);
|
||||
return 0;
|
||||
|
||||
free_ep:
|
||||
beiscsi_free_ep(beiscsi_ep);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/**
|
||||
* beiscsi_ep_connect - Ask chip to create TCP Conn
|
||||
* @scsi_host: Pointer to scsi_host structure
|
||||
|
@ -552,18 +584,18 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
|
|||
struct iscsi_endpoint *ep;
|
||||
int ret;
|
||||
|
||||
SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_connect \n");
|
||||
SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_connect\n");
|
||||
if (shost)
|
||||
phba = iscsi_host_priv(shost);
|
||||
else {
|
||||
ret = -ENXIO;
|
||||
SE_DEBUG(DBG_LVL_1, "shost is NULL \n");
|
||||
SE_DEBUG(DBG_LVL_1, "shost is NULL\n");
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
if (phba->state != BE_ADAPTER_UP) {
|
||||
ret = -EBUSY;
|
||||
SE_DEBUG(DBG_LVL_1, "The Adapter state is Not UP \n");
|
||||
SE_DEBUG(DBG_LVL_1, "The Adapter state is Not UP\n");
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
@ -576,16 +608,16 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
|
|||
beiscsi_ep = ep->dd_data;
|
||||
beiscsi_ep->phba = phba;
|
||||
beiscsi_ep->openiscsi_ep = ep;
|
||||
if (beiscsi_open_conn(ep, NULL, dst_addr, non_blocking)) {
|
||||
SE_DEBUG(DBG_LVL_1, "Failed in beiscsi_open_conn \n");
|
||||
ret = -ENOMEM;
|
||||
ret = beiscsi_open_conn(ep, NULL, dst_addr, non_blocking);
|
||||
if (ret) {
|
||||
SE_DEBUG(DBG_LVL_1, "Failed in beiscsi_open_conn\n");
|
||||
goto free_ep;
|
||||
}
|
||||
|
||||
return ep;
|
||||
|
||||
free_ep:
|
||||
beiscsi_free_ep(beiscsi_ep);
|
||||
iscsi_destroy_endpoint(ep);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
@ -620,9 +652,9 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
|
|||
|
||||
tag = mgmt_upload_connection(phba, beiscsi_ep->ep_cid, flag);
|
||||
if (!tag) {
|
||||
SE_DEBUG(DBG_LVL_8, "upload failed for cid 0x%x",
|
||||
SE_DEBUG(DBG_LVL_8, "upload failed for cid 0x%x\n",
|
||||
beiscsi_ep->ep_cid);
|
||||
ret = -1;
|
||||
ret = -EAGAIN;
|
||||
} else {
|
||||
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
|
||||
phba->ctrl.mcc_numtag[tag]);
|
||||
|
@ -631,30 +663,6 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* beiscsi_ep_disconnect - Tears down the TCP connection
|
||||
* @ep: endpoint to be used
|
||||
*
|
||||
* Tears down the TCP connection
|
||||
*/
|
||||
void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
|
||||
{
|
||||
struct beiscsi_conn *beiscsi_conn;
|
||||
struct beiscsi_endpoint *beiscsi_ep;
|
||||
struct beiscsi_hba *phba;
|
||||
|
||||
beiscsi_ep = ep->dd_data;
|
||||
phba = beiscsi_ep->phba;
|
||||
SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect for ep_cid = %d\n",
|
||||
beiscsi_ep->ep_cid);
|
||||
|
||||
if (beiscsi_ep->conn) {
|
||||
beiscsi_conn = beiscsi_ep->conn;
|
||||
iscsi_suspend_queue(beiscsi_conn->conn);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* beiscsi_unbind_conn_to_cid - Unbind the beiscsi_conn from phba conn table
|
||||
* @phba: The phba instance
|
||||
|
@ -666,50 +674,57 @@ static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba,
|
|||
if (phba->conn_table[cid])
|
||||
phba->conn_table[cid] = NULL;
|
||||
else {
|
||||
SE_DEBUG(DBG_LVL_8, "Connection table Not occupied. \n");
|
||||
SE_DEBUG(DBG_LVL_8, "Connection table Not occupied.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* beiscsi_conn_stop - Invalidate and stop the connection
|
||||
* @cls_conn: pointer to get iscsi_conn
|
||||
* @flag: The type of connection closure
|
||||
* beiscsi_ep_disconnect - Tears down the TCP connection
|
||||
* @ep: endpoint to be used
|
||||
*
|
||||
* Tears down the TCP connection
|
||||
*/
|
||||
void beiscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
|
||||
void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
|
||||
{
|
||||
struct iscsi_conn *conn = cls_conn->dd_data;
|
||||
struct beiscsi_conn *beiscsi_conn = conn->dd_data;
|
||||
struct beiscsi_conn *beiscsi_conn;
|
||||
struct beiscsi_endpoint *beiscsi_ep;
|
||||
struct iscsi_session *session = conn->session;
|
||||
struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
|
||||
struct beiscsi_hba *phba = iscsi_host_priv(shost);
|
||||
struct beiscsi_hba *phba;
|
||||
unsigned int tag;
|
||||
unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH;
|
||||
|
||||
beiscsi_ep = beiscsi_conn->ep;
|
||||
if (!beiscsi_ep) {
|
||||
SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop , no beiscsi_ep\n");
|
||||
beiscsi_ep = ep->dd_data;
|
||||
phba = beiscsi_ep->phba;
|
||||
SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect for ep_cid = %d\n",
|
||||
beiscsi_ep->ep_cid);
|
||||
|
||||
if (!beiscsi_ep->conn) {
|
||||
SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect, no "
|
||||
"beiscsi_ep\n");
|
||||
return;
|
||||
}
|
||||
SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop ep_cid = %d\n",
|
||||
beiscsi_ep->ep_cid);
|
||||
beiscsi_conn = beiscsi_ep->conn;
|
||||
iscsi_suspend_queue(beiscsi_conn->conn);
|
||||
|
||||
SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect ep_cid = %d\n",
|
||||
beiscsi_ep->ep_cid);
|
||||
|
||||
tag = mgmt_invalidate_connection(phba, beiscsi_ep,
|
||||
beiscsi_ep->ep_cid, 1,
|
||||
savecfg_flag);
|
||||
if (!tag) {
|
||||
SE_DEBUG(DBG_LVL_1,
|
||||
"mgmt_invalidate_connection Failed for cid=%d \n",
|
||||
"mgmt_invalidate_connection Failed for cid=%d\n",
|
||||
beiscsi_ep->ep_cid);
|
||||
} else {
|
||||
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
|
||||
phba->ctrl.mcc_numtag[tag]);
|
||||
free_mcc_tag(&phba->ctrl, tag);
|
||||
}
|
||||
|
||||
beiscsi_close_conn(beiscsi_ep, CONNECTION_UPLOAD_GRACEFUL);
|
||||
beiscsi_free_ep(beiscsi_ep);
|
||||
iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
|
||||
beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
|
||||
iscsi_conn_stop(cls_conn, flag);
|
||||
iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
|
||||
}
|
||||
|
|
|
@ -59,8 +59,6 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
|
|||
|
||||
int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn);
|
||||
|
||||
void beiscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag);
|
||||
|
||||
struct iscsi_endpoint *beiscsi_ep_connect(struct Scsi_Host *shost,
|
||||
struct sockaddr *dst_addr,
|
||||
int non_blocking);
|
||||
|
|
|
@ -41,6 +41,8 @@
|
|||
static unsigned int be_iopoll_budget = 10;
|
||||
static unsigned int be_max_phys_size = 64;
|
||||
static unsigned int enable_msix = 1;
|
||||
static unsigned int gcrashmode = 0;
|
||||
static unsigned int num_hba = 0;
|
||||
|
||||
MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
|
||||
MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
|
||||
|
@ -69,6 +71,7 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
|
|||
struct beiscsi_hba *phba;
|
||||
struct iscsi_session *session;
|
||||
struct invalidate_command_table *inv_tbl;
|
||||
struct be_dma_mem nonemb_cmd;
|
||||
unsigned int cid, tag, num_invalidate;
|
||||
|
||||
cls_session = starget_to_session(scsi_target(sc->device));
|
||||
|
@ -99,18 +102,34 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
|
|||
inv_tbl->cid = cid;
|
||||
inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
|
||||
num_invalidate = 1;
|
||||
tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid);
|
||||
nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
|
||||
sizeof(struct invalidate_commands_params_in),
|
||||
&nonemb_cmd.dma);
|
||||
if (nonemb_cmd.va == NULL) {
|
||||
SE_DEBUG(DBG_LVL_1,
|
||||
"Failed to allocate memory for"
|
||||
"mgmt_invalidate_icds\n");
|
||||
return FAILED;
|
||||
}
|
||||
nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
|
||||
|
||||
tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
|
||||
cid, &nonemb_cmd);
|
||||
if (!tag) {
|
||||
shost_printk(KERN_WARNING, phba->shost,
|
||||
"mgmt_invalidate_icds could not be"
|
||||
" submitted\n");
|
||||
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
|
||||
nonemb_cmd.va, nonemb_cmd.dma);
|
||||
|
||||
return FAILED;
|
||||
} else {
|
||||
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
|
||||
phba->ctrl.mcc_numtag[tag]);
|
||||
free_mcc_tag(&phba->ctrl, tag);
|
||||
}
|
||||
|
||||
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
|
||||
nonemb_cmd.va, nonemb_cmd.dma);
|
||||
return iscsi_eh_abort(sc);
|
||||
}
|
||||
|
||||
|
@ -124,6 +143,7 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
|
|||
struct iscsi_session *session;
|
||||
struct iscsi_cls_session *cls_session;
|
||||
struct invalidate_command_table *inv_tbl;
|
||||
struct be_dma_mem nonemb_cmd;
|
||||
unsigned int cid, tag, i, num_invalidate;
|
||||
int rc = FAILED;
|
||||
|
||||
|
@ -158,18 +178,33 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
|
|||
spin_unlock_bh(&session->lock);
|
||||
inv_tbl = phba->inv_tbl;
|
||||
|
||||
tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid);
|
||||
nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
|
||||
sizeof(struct invalidate_commands_params_in),
|
||||
&nonemb_cmd.dma);
|
||||
if (nonemb_cmd.va == NULL) {
|
||||
SE_DEBUG(DBG_LVL_1,
|
||||
"Failed to allocate memory for"
|
||||
"mgmt_invalidate_icds\n");
|
||||
return FAILED;
|
||||
}
|
||||
nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
|
||||
memset(nonemb_cmd.va, 0, nonemb_cmd.size);
|
||||
tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
|
||||
cid, &nonemb_cmd);
|
||||
if (!tag) {
|
||||
shost_printk(KERN_WARNING, phba->shost,
|
||||
"mgmt_invalidate_icds could not be"
|
||||
" submitted\n");
|
||||
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
|
||||
nonemb_cmd.va, nonemb_cmd.dma);
|
||||
return FAILED;
|
||||
} else {
|
||||
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
|
||||
phba->ctrl.mcc_numtag[tag]);
|
||||
free_mcc_tag(&phba->ctrl, tag);
|
||||
}
|
||||
|
||||
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
|
||||
nonemb_cmd.va, nonemb_cmd.dma);
|
||||
return iscsi_eh_device_reset(sc);
|
||||
unlock:
|
||||
spin_unlock_bh(&session->lock);
|
||||
|
@ -216,7 +251,7 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
|
|||
shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
|
||||
if (!shost) {
|
||||
dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
|
||||
"iscsi_host_alloc failed \n");
|
||||
"iscsi_host_alloc failed\n");
|
||||
return NULL;
|
||||
}
|
||||
shost->dma_boundary = pcidev->dma_mask;
|
||||
|
@ -371,7 +406,7 @@ static void beiscsi_get_params(struct beiscsi_hba *phba)
|
|||
+ BE2_TMFS) / 512) + 1) * 512;
|
||||
phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
|
||||
? 1024 : phba->params.num_eq_entries;
|
||||
SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n",
|
||||
SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d\n",
|
||||
phba->params.num_eq_entries);
|
||||
phba->params.num_cq_entries =
|
||||
(((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
|
||||
|
@ -616,7 +651,7 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
|
|||
struct pci_dev *pcidev = phba->pcidev;
|
||||
struct hwi_controller *phwi_ctrlr;
|
||||
struct hwi_context_memory *phwi_context;
|
||||
int ret, msix_vec, i = 0;
|
||||
int ret, msix_vec, i, j;
|
||||
char desc[32];
|
||||
|
||||
phwi_ctrlr = phba->phwi_ctrlr;
|
||||
|
@ -628,10 +663,25 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
|
|||
msix_vec = phba->msix_entries[i].vector;
|
||||
ret = request_irq(msix_vec, be_isr_msix, 0, desc,
|
||||
&phwi_context->be_eq[i]);
|
||||
if (ret) {
|
||||
shost_printk(KERN_ERR, phba->shost,
|
||||
"beiscsi_init_irqs-Failed to"
|
||||
"register msix for i = %d\n", i);
|
||||
if (!i)
|
||||
return ret;
|
||||
goto free_msix_irqs;
|
||||
}
|
||||
}
|
||||
msix_vec = phba->msix_entries[i].vector;
|
||||
ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
|
||||
&phwi_context->be_eq[i]);
|
||||
if (ret) {
|
||||
shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
|
||||
"Failed to register beiscsi_msix_mcc\n");
|
||||
i++;
|
||||
goto free_msix_irqs;
|
||||
}
|
||||
|
||||
} else {
|
||||
ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
|
||||
"beiscsi", phba);
|
||||
|
@ -642,6 +692,10 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
|
|||
}
|
||||
}
|
||||
return 0;
|
||||
free_msix_irqs:
|
||||
for (j = i - 1; j == 0; j++)
|
||||
free_irq(msix_vec, &phwi_context->be_eq[j]);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hwi_ring_cq_db(struct beiscsi_hba *phba,
|
||||
|
@ -692,7 +746,7 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
|
|||
break;
|
||||
default:
|
||||
shost_printk(KERN_WARNING, phba->shost,
|
||||
"Unrecognized opcode 0x%x in async msg \n",
|
||||
"Unrecognized opcode 0x%x in async msg\n",
|
||||
(ppdu->
|
||||
dw[offsetof(struct amap_pdu_base, opcode) / 32]
|
||||
& PDUBASE_OPCODE_MASK));
|
||||
|
@ -711,7 +765,7 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
|
|||
|
||||
if (phba->io_sgl_hndl_avbl) {
|
||||
SE_DEBUG(DBG_LVL_8,
|
||||
"In alloc_io_sgl_handle,io_sgl_alloc_index=%d \n",
|
||||
"In alloc_io_sgl_handle,io_sgl_alloc_index=%d\n",
|
||||
phba->io_sgl_alloc_index);
|
||||
psgl_handle = phba->io_sgl_hndl_base[phba->
|
||||
io_sgl_alloc_index];
|
||||
|
@ -730,7 +784,7 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
|
|||
static void
|
||||
free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
|
||||
{
|
||||
SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d \n",
|
||||
SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d\n",
|
||||
phba->io_sgl_free_index);
|
||||
if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
|
||||
/*
|
||||
|
@ -739,7 +793,7 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
|
|||
*/
|
||||
SE_DEBUG(DBG_LVL_8,
|
||||
"Double Free in IO SGL io_sgl_free_index=%d,"
|
||||
"value there=%p \n", phba->io_sgl_free_index,
|
||||
"value there=%p\n", phba->io_sgl_free_index,
|
||||
phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
|
||||
return;
|
||||
}
|
||||
|
@ -804,7 +858,7 @@ free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
|
|||
|
||||
SE_DEBUG(DBG_LVL_8,
|
||||
"FREE WRB: pwrb_handle=%p free_index=0x%x"
|
||||
"wrb_handles_available=%d \n",
|
||||
"wrb_handles_available=%d\n",
|
||||
pwrb_handle, pwrb_context->free_index,
|
||||
pwrb_context->wrb_handles_available);
|
||||
}
|
||||
|
@ -816,7 +870,7 @@ static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
|
|||
if (phba->eh_sgl_hndl_avbl) {
|
||||
psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
|
||||
phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
|
||||
SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x \n",
|
||||
SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x\n",
|
||||
phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
|
||||
phba->eh_sgl_hndl_avbl--;
|
||||
if (phba->eh_sgl_alloc_index ==
|
||||
|
@ -834,7 +888,7 @@ void
|
|||
free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
|
||||
{
|
||||
|
||||
SE_DEBUG(DBG_LVL_8, "In free_mgmt_sgl_handle,eh_sgl_free_index=%d \n",
|
||||
SE_DEBUG(DBG_LVL_8, "In free_mgmt_sgl_handle,eh_sgl_free_index=%d\n",
|
||||
phba->eh_sgl_free_index);
|
||||
if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
|
||||
/*
|
||||
|
@ -842,7 +896,7 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
|
|||
* failed in xmit_task or alloc_pdu.
|
||||
*/
|
||||
SE_DEBUG(DBG_LVL_8,
|
||||
"Double Free in eh SGL ,eh_sgl_free_index=%d \n",
|
||||
"Double Free in eh SGL ,eh_sgl_free_index=%d\n",
|
||||
phba->eh_sgl_free_index);
|
||||
return;
|
||||
}
|
||||
|
@ -1081,7 +1135,7 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
|
|||
case HWH_TYPE_LOGIN:
|
||||
SE_DEBUG(DBG_LVL_1,
|
||||
"\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
|
||||
"- Solicited path \n");
|
||||
"- Solicited path\n");
|
||||
break;
|
||||
|
||||
case HWH_TYPE_NOP:
|
||||
|
@ -1164,7 +1218,7 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
|
|||
default:
|
||||
pbusy_list = NULL;
|
||||
shost_printk(KERN_WARNING, phba->shost,
|
||||
"Unexpected code=%d \n",
|
||||
"Unexpected code=%d\n",
|
||||
pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
|
||||
code) / 32] & PDUCQE_CODE_MASK);
|
||||
return NULL;
|
||||
|
@ -1552,7 +1606,7 @@ static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
|
|||
else
|
||||
SE_DEBUG(DBG_LVL_1,
|
||||
" Unsupported Async Event, flags"
|
||||
" = 0x%08x \n", mcc_compl->flags);
|
||||
" = 0x%08x\n", mcc_compl->flags);
|
||||
} else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
|
||||
be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
|
||||
atomic_dec(&phba->ctrl.mcc_obj.q.used);
|
||||
|
@ -1611,7 +1665,7 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
|
|||
hwi_complete_cmd(beiscsi_conn, phba, sol);
|
||||
break;
|
||||
case DRIVERMSG_NOTIFY:
|
||||
SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY \n");
|
||||
SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY\n");
|
||||
dmsg = (struct dmsg_cqe *)sol;
|
||||
hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
|
||||
break;
|
||||
|
@ -1782,9 +1836,9 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
|
|||
sg_len = sg_dma_len(sg);
|
||||
addr = (u64) sg_dma_address(sg);
|
||||
AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
|
||||
(addr & 0xFFFFFFFF));
|
||||
((u32)(addr & 0xFFFFFFFF)));
|
||||
AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
|
||||
(addr >> 32));
|
||||
((u32)(addr >> 32)));
|
||||
AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
|
||||
sg_len);
|
||||
sge_len = sg_len;
|
||||
|
@ -1794,9 +1848,9 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
|
|||
sg_len = sg_dma_len(sg);
|
||||
addr = (u64) sg_dma_address(sg);
|
||||
AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
|
||||
(addr & 0xFFFFFFFF));
|
||||
((u32)(addr & 0xFFFFFFFF)));
|
||||
AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
|
||||
(addr >> 32));
|
||||
((u32)(addr >> 32)));
|
||||
AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
|
||||
sg_len);
|
||||
}
|
||||
|
@ -1872,9 +1926,9 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
|
|||
addr = 0;
|
||||
}
|
||||
AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
|
||||
(addr & 0xFFFFFFFF));
|
||||
((u32)(addr & 0xFFFFFFFF)));
|
||||
AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
|
||||
(addr >> 32));
|
||||
((u32)(addr >> 32)));
|
||||
AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
|
||||
task->data_count);
|
||||
|
||||
|
@ -1904,9 +1958,9 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
|
|||
psgl++;
|
||||
if (task->data) {
|
||||
AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
|
||||
(addr & 0xFFFFFFFF));
|
||||
((u32)(addr & 0xFFFFFFFF)));
|
||||
AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
|
||||
(addr >> 32));
|
||||
((u32)(addr >> 32)));
|
||||
}
|
||||
AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
|
||||
}
|
||||
|
@ -2054,7 +2108,8 @@ free_mem:
|
|||
mem_descr->mem_array[j - 1].size,
|
||||
mem_descr->mem_array[j - 1].
|
||||
virtual_address,
|
||||
mem_descr->mem_array[j - 1].
|
||||
(unsigned long)mem_descr->
|
||||
mem_array[j - 1].
|
||||
bus_address.u.a64.address);
|
||||
}
|
||||
if (i) {
|
||||
|
@ -2223,10 +2278,10 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
|
|||
if (mem_descr->mem_array[0].virtual_address) {
|
||||
SE_DEBUG(DBG_LVL_8,
|
||||
"hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
|
||||
"va=%p \n", mem_descr->mem_array[0].virtual_address);
|
||||
"va=%p\n", mem_descr->mem_array[0].virtual_address);
|
||||
} else
|
||||
shost_printk(KERN_WARNING, phba->shost,
|
||||
"No Virtual address \n");
|
||||
"No Virtual address\n");
|
||||
|
||||
pasync_ctx->async_header.va_base =
|
||||
mem_descr->mem_array[0].virtual_address;
|
||||
|
@ -2239,10 +2294,10 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
|
|||
if (mem_descr->mem_array[0].virtual_address) {
|
||||
SE_DEBUG(DBG_LVL_8,
|
||||
"hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
|
||||
"va=%p \n", mem_descr->mem_array[0].virtual_address);
|
||||
"va=%p\n", mem_descr->mem_array[0].virtual_address);
|
||||
} else
|
||||
shost_printk(KERN_WARNING, phba->shost,
|
||||
"No Virtual address \n");
|
||||
"No Virtual address\n");
|
||||
pasync_ctx->async_header.ring_base =
|
||||
mem_descr->mem_array[0].virtual_address;
|
||||
|
||||
|
@ -2251,10 +2306,10 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
|
|||
if (mem_descr->mem_array[0].virtual_address) {
|
||||
SE_DEBUG(DBG_LVL_8,
|
||||
"hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
|
||||
"va=%p \n", mem_descr->mem_array[0].virtual_address);
|
||||
"va=%p\n", mem_descr->mem_array[0].virtual_address);
|
||||
} else
|
||||
shost_printk(KERN_WARNING, phba->shost,
|
||||
"No Virtual address \n");
|
||||
"No Virtual address\n");
|
||||
|
||||
pasync_ctx->async_header.handle_base =
|
||||
mem_descr->mem_array[0].virtual_address;
|
||||
|
@ -2266,10 +2321,10 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
|
|||
if (mem_descr->mem_array[0].virtual_address) {
|
||||
SE_DEBUG(DBG_LVL_8,
|
||||
"hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
|
||||
"va=%p \n", mem_descr->mem_array[0].virtual_address);
|
||||
"va=%p\n", mem_descr->mem_array[0].virtual_address);
|
||||
} else
|
||||
shost_printk(KERN_WARNING, phba->shost,
|
||||
"No Virtual address \n");
|
||||
"No Virtual address\n");
|
||||
pasync_ctx->async_data.va_base =
|
||||
mem_descr->mem_array[0].virtual_address;
|
||||
pasync_ctx->async_data.pa_base.u.a64.address =
|
||||
|
@ -2280,10 +2335,10 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
|
|||
if (mem_descr->mem_array[0].virtual_address) {
|
||||
SE_DEBUG(DBG_LVL_8,
|
||||
"hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
|
||||
"va=%p \n", mem_descr->mem_array[0].virtual_address);
|
||||
"va=%p\n", mem_descr->mem_array[0].virtual_address);
|
||||
} else
|
||||
shost_printk(KERN_WARNING, phba->shost,
|
||||
"No Virtual address \n");
|
||||
"No Virtual address\n");
|
||||
|
||||
pasync_ctx->async_data.ring_base =
|
||||
mem_descr->mem_array[0].virtual_address;
|
||||
|
@ -2292,7 +2347,7 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
|
|||
mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
|
||||
if (!mem_descr->mem_array[0].virtual_address)
|
||||
shost_printk(KERN_WARNING, phba->shost,
|
||||
"No Virtual address \n");
|
||||
"No Virtual address\n");
|
||||
|
||||
pasync_ctx->async_data.handle_base =
|
||||
mem_descr->mem_array[0].virtual_address;
|
||||
|
@ -2364,7 +2419,7 @@ be_sgl_create_contiguous(void *virtual_address,
|
|||
WARN_ON(!sgl);
|
||||
|
||||
sgl->va = virtual_address;
|
||||
sgl->dma = physical_address;
|
||||
sgl->dma = (unsigned long)physical_address;
|
||||
sgl->size = length;
|
||||
|
||||
return 0;
|
||||
|
@ -2447,7 +2502,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
|
|||
sizeof(struct be_eq_entry), eq_vaddress);
|
||||
if (ret) {
|
||||
shost_printk(KERN_ERR, phba->shost,
|
||||
"be_fill_queue Failed for EQ \n");
|
||||
"be_fill_queue Failed for EQ\n");
|
||||
goto create_eq_error;
|
||||
}
|
||||
|
||||
|
@ -2457,7 +2512,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
|
|||
if (ret) {
|
||||
shost_printk(KERN_ERR, phba->shost,
|
||||
"beiscsi_cmd_eq_create"
|
||||
"Failedfor EQ \n");
|
||||
"Failedfor EQ\n");
|
||||
goto create_eq_error;
|
||||
}
|
||||
SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
|
||||
|
@ -2505,7 +2560,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
|
|||
sizeof(struct sol_cqe), cq_vaddress);
|
||||
if (ret) {
|
||||
shost_printk(KERN_ERR, phba->shost,
|
||||
"be_fill_queue Failed for ISCSI CQ \n");
|
||||
"be_fill_queue Failed for ISCSI CQ\n");
|
||||
goto create_cq_error;
|
||||
}
|
||||
|
||||
|
@ -2515,7 +2570,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
|
|||
if (ret) {
|
||||
shost_printk(KERN_ERR, phba->shost,
|
||||
"beiscsi_cmd_eq_create"
|
||||
"Failed for ISCSI CQ \n");
|
||||
"Failed for ISCSI CQ\n");
|
||||
goto create_cq_error;
|
||||
}
|
||||
SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
|
||||
|
@ -2565,7 +2620,8 @@ beiscsi_create_def_hdr(struct beiscsi_hba *phba,
|
|||
"be_fill_queue Failed for DEF PDU HDR\n");
|
||||
return ret;
|
||||
}
|
||||
mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
|
||||
mem->dma = (unsigned long)mem_descr->mem_array[idx].
|
||||
bus_address.u.a64.address;
|
||||
ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
|
||||
def_pdu_ring_sz,
|
||||
phba->params.defpdu_hdr_sz);
|
||||
|
@ -2609,7 +2665,8 @@ beiscsi_create_def_data(struct beiscsi_hba *phba,
|
|||
"be_fill_queue Failed for DEF PDU DATA\n");
|
||||
return ret;
|
||||
}
|
||||
mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
|
||||
mem->dma = (unsigned long)mem_descr->mem_array[idx].
|
||||
bus_address.u.a64.address;
|
||||
ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
|
||||
def_pdu_ring_sz,
|
||||
phba->params.defpdu_data_sz);
|
||||
|
@ -2623,7 +2680,7 @@ beiscsi_create_def_data(struct beiscsi_hba *phba,
|
|||
SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
|
||||
phwi_context->be_def_dataq.id);
|
||||
hwi_post_async_buffers(phba, 0);
|
||||
SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED \n");
|
||||
SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2655,7 +2712,7 @@ beiscsi_post_pages(struct beiscsi_hba *phba)
|
|||
}
|
||||
pm_arr++;
|
||||
}
|
||||
SE_DEBUG(DBG_LVL_8, "POSTED PAGES \n");
|
||||
SE_DEBUG(DBG_LVL_8, "POSTED PAGES\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2678,7 +2735,7 @@ static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
|
|||
mem->size = len * entry_size;
|
||||
mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
|
||||
if (!mem->va)
|
||||
return -1;
|
||||
return -ENOMEM;
|
||||
memset(mem->va, 0, mem->size);
|
||||
return 0;
|
||||
}
|
||||
|
@ -2750,6 +2807,7 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
|
|||
if (status != 0) {
|
||||
shost_printk(KERN_ERR, phba->shost,
|
||||
"wrbq create failed.");
|
||||
kfree(pwrb_arr);
|
||||
return status;
|
||||
}
|
||||
phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
|
||||
|
@ -2873,7 +2931,7 @@ mcc_cq_destroy:
|
|||
mcc_cq_free:
|
||||
be_queue_free(phba, cq);
|
||||
err:
|
||||
return -1;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int find_num_cpus(void)
|
||||
|
@ -2884,7 +2942,7 @@ static int find_num_cpus(void)
|
|||
if (num_cpus >= MAX_CPUS)
|
||||
num_cpus = MAX_CPUS - 1;
|
||||
|
||||
SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", num_cpus);
|
||||
SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", num_cpus);
|
||||
return num_cpus;
|
||||
}
|
||||
|
||||
|
@ -2907,7 +2965,7 @@ static int hwi_init_port(struct beiscsi_hba *phba)
|
|||
|
||||
status = beiscsi_create_eqs(phba, phwi_context);
|
||||
if (status != 0) {
|
||||
shost_printk(KERN_ERR, phba->shost, "EQ not created \n");
|
||||
shost_printk(KERN_ERR, phba->shost, "EQ not created\n");
|
||||
goto error;
|
||||
}
|
||||
|
||||
|
@ -2918,7 +2976,7 @@ static int hwi_init_port(struct beiscsi_hba *phba)
|
|||
status = mgmt_check_supported_fw(ctrl, phba);
|
||||
if (status != 0) {
|
||||
shost_printk(KERN_ERR, phba->shost,
|
||||
"Unsupported fw version \n");
|
||||
"Unsupported fw version\n");
|
||||
goto error;
|
||||
}
|
||||
|
||||
|
@ -2974,7 +3032,7 @@ static int hwi_init_controller(struct beiscsi_hba *phba)
|
|||
if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
|
||||
phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
|
||||
init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
|
||||
SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p \n",
|
||||
SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p\n",
|
||||
phwi_ctrlr->phwi_ctxt);
|
||||
} else {
|
||||
shost_printk(KERN_ERR, phba->shost,
|
||||
|
@ -3007,8 +3065,8 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
|
|||
pci_free_consistent(phba->pcidev,
|
||||
mem_descr->mem_array[j - 1].size,
|
||||
mem_descr->mem_array[j - 1].virtual_address,
|
||||
mem_descr->mem_array[j - 1].bus_address.
|
||||
u.a64.address);
|
||||
(unsigned long)mem_descr->mem_array[j - 1].
|
||||
bus_address.u.a64.address);
|
||||
}
|
||||
kfree(mem_descr->mem_array);
|
||||
mem_descr++;
|
||||
|
@ -3024,7 +3082,7 @@ static int beiscsi_init_controller(struct beiscsi_hba *phba)
|
|||
ret = beiscsi_get_memory(phba);
|
||||
if (ret < 0) {
|
||||
shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
|
||||
"Failed in beiscsi_alloc_memory \n");
|
||||
"Failed in beiscsi_alloc_memory\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3101,12 +3159,12 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
|
|||
}
|
||||
SE_DEBUG(DBG_LVL_8,
|
||||
"phba->io_sgl_hndl_avbl=%d"
|
||||
"phba->eh_sgl_hndl_avbl=%d \n",
|
||||
"phba->eh_sgl_hndl_avbl=%d\n",
|
||||
phba->io_sgl_hndl_avbl,
|
||||
phba->eh_sgl_hndl_avbl);
|
||||
mem_descr_sg = phba->init_mem;
|
||||
mem_descr_sg += HWI_MEM_SGE;
|
||||
SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d \n",
|
||||
SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d\n",
|
||||
mem_descr_sg->num_elements);
|
||||
arr_index = 0;
|
||||
idx = 0;
|
||||
|
@ -3155,7 +3213,7 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
|
|||
if (!phba->ep_array) {
|
||||
shost_printk(KERN_ERR, phba->shost,
|
||||
"Failed to allocate memory in "
|
||||
"hba_setup_cid_tbls \n");
|
||||
"hba_setup_cid_tbls\n");
|
||||
kfree(phba->cid_array);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -3168,7 +3226,7 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
|
||||
static void hwi_enable_intr(struct beiscsi_hba *phba)
|
||||
{
|
||||
struct be_ctrl_info *ctrl = &phba->ctrl;
|
||||
struct hwi_controller *phwi_ctrlr;
|
||||
|
@ -3184,26 +3242,25 @@ static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
|
|||
addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
|
||||
PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
|
||||
reg = ioread32(addr);
|
||||
SE_DEBUG(DBG_LVL_8, "reg =x%08x \n", reg);
|
||||
SE_DEBUG(DBG_LVL_8, "reg =x%08x\n", reg);
|
||||
|
||||
enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
|
||||
if (!enabled) {
|
||||
reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
|
||||
SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr);
|
||||
SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p\n", reg, addr);
|
||||
iowrite32(reg, addr);
|
||||
if (!phba->msix_enabled) {
|
||||
eq = &phwi_context->be_eq[0].q;
|
||||
SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
|
||||
SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
|
||||
hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
|
||||
} else {
|
||||
for (i = 0; i <= phba->num_cpus; i++) {
|
||||
eq = &phwi_context->be_eq[i].q;
|
||||
SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
|
||||
SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
|
||||
hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void hwi_disable_intr(struct beiscsi_hba *phba)
|
||||
|
@ -3219,7 +3276,7 @@ static void hwi_disable_intr(struct beiscsi_hba *phba)
|
|||
iowrite32(reg, addr);
|
||||
} else
|
||||
shost_printk(KERN_WARNING, phba->shost,
|
||||
"In hwi_disable_intr, Already Disabled \n");
|
||||
"In hwi_disable_intr, Already Disabled\n");
|
||||
}
|
||||
|
||||
static int beiscsi_init_port(struct beiscsi_hba *phba)
|
||||
|
@ -3230,14 +3287,14 @@ static int beiscsi_init_port(struct beiscsi_hba *phba)
|
|||
if (ret < 0) {
|
||||
shost_printk(KERN_ERR, phba->shost,
|
||||
"beiscsi_dev_probe - Failed in"
|
||||
"beiscsi_init_controller \n");
|
||||
"beiscsi_init_controller\n");
|
||||
return ret;
|
||||
}
|
||||
ret = beiscsi_init_sgl_handle(phba);
|
||||
if (ret < 0) {
|
||||
shost_printk(KERN_ERR, phba->shost,
|
||||
"beiscsi_dev_probe - Failed in"
|
||||
"beiscsi_init_sgl_handle \n");
|
||||
"beiscsi_init_sgl_handle\n");
|
||||
goto do_cleanup_ctrlr;
|
||||
}
|
||||
|
||||
|
@ -3291,12 +3348,12 @@ static void hwi_purge_eq(struct beiscsi_hba *phba)
|
|||
|
||||
static void beiscsi_clean_port(struct beiscsi_hba *phba)
|
||||
{
|
||||
unsigned char mgmt_status;
|
||||
int mgmt_status;
|
||||
|
||||
mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
|
||||
if (mgmt_status)
|
||||
shost_printk(KERN_WARNING, phba->shost,
|
||||
"mgmt_epfw_cleanup FAILED \n");
|
||||
"mgmt_epfw_cleanup FAILED\n");
|
||||
|
||||
hwi_purge_eq(phba);
|
||||
hwi_cleanup(phba);
|
||||
|
@ -3428,14 +3485,12 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
|
|||
return -ENOMEM;
|
||||
io_task->bhs_pa.u.a64.address = paddr;
|
||||
io_task->libiscsi_itt = (itt_t)task->itt;
|
||||
io_task->pwrb_handle = alloc_wrb_handle(phba,
|
||||
beiscsi_conn->beiscsi_conn_cid -
|
||||
phba->fw_config.iscsi_cid_start
|
||||
);
|
||||
io_task->conn = beiscsi_conn;
|
||||
|
||||
task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
|
||||
task->hdr_max = sizeof(struct be_cmd_bhs);
|
||||
io_task->psgl_handle = NULL;
|
||||
io_task->psgl_handle = NULL;
|
||||
|
||||
if (task->sc) {
|
||||
spin_lock(&phba->io_sgl_lock);
|
||||
|
@ -3443,6 +3498,11 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
|
|||
spin_unlock(&phba->io_sgl_lock);
|
||||
if (!io_task->psgl_handle)
|
||||
goto free_hndls;
|
||||
io_task->pwrb_handle = alloc_wrb_handle(phba,
|
||||
beiscsi_conn->beiscsi_conn_cid -
|
||||
phba->fw_config.iscsi_cid_start);
|
||||
if (!io_task->pwrb_handle)
|
||||
goto free_io_hndls;
|
||||
} else {
|
||||
io_task->scsi_cmnd = NULL;
|
||||
if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
|
||||
|
@ -3457,9 +3517,20 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
|
|||
beiscsi_conn->login_in_progress = 1;
|
||||
beiscsi_conn->plogin_sgl_handle =
|
||||
io_task->psgl_handle;
|
||||
io_task->pwrb_handle =
|
||||
alloc_wrb_handle(phba,
|
||||
beiscsi_conn->beiscsi_conn_cid -
|
||||
phba->fw_config.iscsi_cid_start);
|
||||
if (!io_task->pwrb_handle)
|
||||
goto free_io_hndls;
|
||||
beiscsi_conn->plogin_wrb_handle =
|
||||
io_task->pwrb_handle;
|
||||
|
||||
} else {
|
||||
io_task->psgl_handle =
|
||||
beiscsi_conn->plogin_sgl_handle;
|
||||
io_task->pwrb_handle =
|
||||
beiscsi_conn->plogin_wrb_handle;
|
||||
}
|
||||
} else {
|
||||
spin_lock(&phba->mgmt_sgl_lock);
|
||||
|
@ -3467,6 +3538,13 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
|
|||
spin_unlock(&phba->mgmt_sgl_lock);
|
||||
if (!io_task->psgl_handle)
|
||||
goto free_hndls;
|
||||
io_task->pwrb_handle =
|
||||
alloc_wrb_handle(phba,
|
||||
beiscsi_conn->beiscsi_conn_cid -
|
||||
phba->fw_config.iscsi_cid_start);
|
||||
if (!io_task->pwrb_handle)
|
||||
goto free_mgmt_hndls;
|
||||
|
||||
}
|
||||
}
|
||||
itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
|
||||
|
@ -3477,16 +3555,26 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
|
|||
io_task->cmd_bhs->iscsi_hdr.itt = itt;
|
||||
return 0;
|
||||
|
||||
free_io_hndls:
|
||||
spin_lock(&phba->io_sgl_lock);
|
||||
free_io_sgl_handle(phba, io_task->psgl_handle);
|
||||
spin_unlock(&phba->io_sgl_lock);
|
||||
goto free_hndls;
|
||||
free_mgmt_hndls:
|
||||
spin_lock(&phba->mgmt_sgl_lock);
|
||||
free_mgmt_sgl_handle(phba, io_task->psgl_handle);
|
||||
spin_unlock(&phba->mgmt_sgl_lock);
|
||||
free_hndls:
|
||||
phwi_ctrlr = phba->phwi_ctrlr;
|
||||
pwrb_context = &phwi_ctrlr->wrb_context[
|
||||
beiscsi_conn->beiscsi_conn_cid -
|
||||
phba->fw_config.iscsi_cid_start];
|
||||
free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
|
||||
if (io_task->pwrb_handle)
|
||||
free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
|
||||
io_task->pwrb_handle = NULL;
|
||||
pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
|
||||
io_task->bhs_pa.u.a64.address);
|
||||
SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed \n");
|
||||
SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -3653,7 +3741,7 @@ static int beiscsi_mtask(struct iscsi_task *task)
|
|||
break;
|
||||
|
||||
default:
|
||||
SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported \n",
|
||||
SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported\n",
|
||||
task->hdr->opcode & ISCSI_OPCODE_MASK);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -3689,13 +3777,11 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
|
|||
SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
|
||||
return num_sg;
|
||||
}
|
||||
SE_DEBUG(DBG_LVL_4, "xferlen=0x%08x scmd=%p num_sg=%d sernum=%lu\n",
|
||||
(scsi_bufflen(sc)), sc, num_sg, sc->serial_number);
|
||||
xferlen = scsi_bufflen(sc);
|
||||
sg = scsi_sglist(sc);
|
||||
if (sc->sc_data_direction == DMA_TO_DEVICE) {
|
||||
writedir = 1;
|
||||
SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x \n",
|
||||
SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x\n",
|
||||
task->imm_count);
|
||||
} else
|
||||
writedir = 0;
|
||||
|
@ -3709,10 +3795,12 @@ static void beiscsi_remove(struct pci_dev *pcidev)
|
|||
struct hwi_context_memory *phwi_context;
|
||||
struct be_eq_obj *pbe_eq;
|
||||
unsigned int i, msix_vec;
|
||||
u8 *real_offset = 0;
|
||||
u32 value = 0;
|
||||
|
||||
phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
|
||||
if (!phba) {
|
||||
dev_err(&pcidev->dev, "beiscsi_remove called with no phba \n");
|
||||
dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -3737,6 +3825,14 @@ static void beiscsi_remove(struct pci_dev *pcidev)
|
|||
|
||||
beiscsi_clean_port(phba);
|
||||
beiscsi_free_mem(phba);
|
||||
real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
|
||||
|
||||
value = readl((void *)real_offset);
|
||||
|
||||
if (value & 0x00010000) {
|
||||
value &= 0xfffeffff;
|
||||
writel(value, (void *)real_offset);
|
||||
}
|
||||
beiscsi_unmap_pci_function(phba);
|
||||
pci_free_consistent(phba->pcidev,
|
||||
phba->ctrl.mbox_mem_alloced.size,
|
||||
|
@ -3769,19 +3865,21 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
|
|||
struct hwi_controller *phwi_ctrlr;
|
||||
struct hwi_context_memory *phwi_context;
|
||||
struct be_eq_obj *pbe_eq;
|
||||
int ret, msix_vec, num_cpus, i;
|
||||
int ret, num_cpus, i;
|
||||
u8 *real_offset = 0;
|
||||
u32 value = 0;
|
||||
|
||||
ret = beiscsi_enable_pci(pcidev);
|
||||
if (ret < 0) {
|
||||
shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
|
||||
"Failed to enable pci device \n");
|
||||
dev_err(&pcidev->dev, "beiscsi_dev_probe-"
|
||||
" Failed to enable pci device\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
phba = beiscsi_hba_alloc(pcidev);
|
||||
if (!phba) {
|
||||
dev_err(&pcidev->dev, "beiscsi_dev_probe-"
|
||||
" Failed in beiscsi_hba_alloc \n");
|
||||
" Failed in beiscsi_hba_alloc\n");
|
||||
goto disable_pci;
|
||||
}
|
||||
|
||||
|
@ -3804,7 +3902,7 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
|
|||
else
|
||||
num_cpus = 1;
|
||||
phba->num_cpus = num_cpus;
|
||||
SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", phba->num_cpus);
|
||||
SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", phba->num_cpus);
|
||||
|
||||
if (enable_msix)
|
||||
beiscsi_msix_enable(phba);
|
||||
|
@ -3815,6 +3913,33 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
|
|||
goto hba_free;
|
||||
}
|
||||
|
||||
if (!num_hba) {
|
||||
real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
|
||||
value = readl((void *)real_offset);
|
||||
if (value & 0x00010000) {
|
||||
gcrashmode++;
|
||||
shost_printk(KERN_ERR, phba->shost,
|
||||
"Loading Driver in crashdump mode\n");
|
||||
ret = beiscsi_pci_soft_reset(phba);
|
||||
if (ret) {
|
||||
shost_printk(KERN_ERR, phba->shost,
|
||||
"Reset Failed. Aborting Crashdump\n");
|
||||
goto hba_free;
|
||||
}
|
||||
ret = be_chk_reset_complete(phba);
|
||||
if (ret) {
|
||||
shost_printk(KERN_ERR, phba->shost,
|
||||
"Failed to get out of reset."
|
||||
"Aborting Crashdump\n");
|
||||
goto hba_free;
|
||||
}
|
||||
} else {
|
||||
value |= 0x00010000;
|
||||
writel(value, (void *)real_offset);
|
||||
num_hba++;
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_init(&phba->io_sgl_lock);
|
||||
spin_lock_init(&phba->mgmt_sgl_lock);
|
||||
spin_lock_init(&phba->isr_lock);
|
||||
|
@ -3870,25 +3995,10 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
|
|||
"Failed to beiscsi_init_irqs\n");
|
||||
goto free_blkenbld;
|
||||
}
|
||||
ret = hwi_enable_intr(phba);
|
||||
if (ret < 0) {
|
||||
shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
|
||||
"Failed to hwi_enable_intr\n");
|
||||
goto free_ctrlr;
|
||||
}
|
||||
SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n");
|
||||
hwi_enable_intr(phba);
|
||||
SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n");
|
||||
return 0;
|
||||
|
||||
free_ctrlr:
|
||||
if (phba->msix_enabled) {
|
||||
for (i = 0; i <= phba->num_cpus; i++) {
|
||||
msix_vec = phba->msix_entries[i].vector;
|
||||
free_irq(msix_vec, &phwi_context->be_eq[i]);
|
||||
}
|
||||
} else
|
||||
if (phba->pcidev->irq)
|
||||
free_irq(phba->pcidev->irq, phba);
|
||||
pci_disable_msix(phba->pcidev);
|
||||
free_blkenbld:
|
||||
destroy_workqueue(phba->wq);
|
||||
if (blk_iopoll_enabled)
|
||||
|
@ -3900,12 +4010,23 @@ free_twq:
|
|||
beiscsi_clean_port(phba);
|
||||
beiscsi_free_mem(phba);
|
||||
free_port:
|
||||
real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
|
||||
|
||||
value = readl((void *)real_offset);
|
||||
|
||||
if (value & 0x00010000) {
|
||||
value &= 0xfffeffff;
|
||||
writel(value, (void *)real_offset);
|
||||
}
|
||||
|
||||
pci_free_consistent(phba->pcidev,
|
||||
phba->ctrl.mbox_mem_alloced.size,
|
||||
phba->ctrl.mbox_mem_alloced.va,
|
||||
phba->ctrl.mbox_mem_alloced.dma);
|
||||
beiscsi_unmap_pci_function(phba);
|
||||
hba_free:
|
||||
if (phba->msix_enabled)
|
||||
pci_disable_msix(phba->pcidev);
|
||||
iscsi_host_remove(phba->shost);
|
||||
pci_dev_put(phba->pcidev);
|
||||
iscsi_host_free(phba->shost);
|
||||
|
@ -3955,7 +4076,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
|
|||
.get_session_param = iscsi_session_get_param,
|
||||
.get_host_param = beiscsi_get_host_param,
|
||||
.start_conn = beiscsi_conn_start,
|
||||
.stop_conn = beiscsi_conn_stop,
|
||||
.stop_conn = iscsi_conn_stop,
|
||||
.send_pdu = iscsi_conn_send_pdu,
|
||||
.xmit_task = beiscsi_task_xmit,
|
||||
.cleanup_task = beiscsi_cleanup_task,
|
||||
|
@ -3988,7 +4109,7 @@ static int __init beiscsi_module_init(void)
|
|||
"transport.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n",
|
||||
SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p\n",
|
||||
&beiscsi_iscsi_transport);
|
||||
|
||||
ret = pci_register_driver(&beiscsi_pci_driver);
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/in.h>
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
|
@ -39,7 +40,7 @@
|
|||
"Linux iSCSI Driver version" BUILD_STR
|
||||
#define DRV_DESC BE_NAME " " "Driver"
|
||||
|
||||
#define BE_VENDOR_ID 0x19A2
|
||||
#define BE_VENDOR_ID 0x19A2
|
||||
/* DEVICE ID's for BE2 */
|
||||
#define BE_DEVICE_ID1 0x212
|
||||
#define OC_DEVICE_ID1 0x702
|
||||
|
@ -68,8 +69,15 @@
|
|||
#define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */
|
||||
#define BEISCSI_NUM_DEVICES_SUPPORTED 0x01
|
||||
#define BEISCSI_MAX_FRAGS_INIT 192
|
||||
#define BE_NUM_MSIX_ENTRIES 1
|
||||
#define MPU_EP_SEMAPHORE 0xac
|
||||
#define BE_NUM_MSIX_ENTRIES 1
|
||||
|
||||
#define MPU_EP_CONTROL 0
|
||||
#define MPU_EP_SEMAPHORE 0xac
|
||||
#define BE2_SOFT_RESET 0x5c
|
||||
#define BE2_PCI_ONLINE0 0xb0
|
||||
#define BE2_PCI_ONLINE1 0xb4
|
||||
#define BE2_SET_RESET 0x80
|
||||
#define BE2_MPU_IRAM_ONLINE 0x00000080
|
||||
|
||||
#define BE_SENSE_INFO_SIZE 258
|
||||
#define BE_ISCSI_PDU_HEADER_SIZE 64
|
||||
|
@ -105,7 +113,7 @@ do { \
|
|||
#define HWI_GET_ASYNC_PDU_CTX(phwi) (phwi->phwi_ctxt->pasync_ctx)
|
||||
|
||||
/********* Memory BAR register ************/
|
||||
#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
|
||||
#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
|
||||
/**
|
||||
* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
|
||||
* Disable" may still globally block interrupts in addition to individual
|
||||
|
@ -116,7 +124,7 @@ do { \
|
|||
#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */
|
||||
|
||||
/********* ISR0 Register offset **********/
|
||||
#define CEV_ISR0_OFFSET 0xC18
|
||||
#define CEV_ISR0_OFFSET 0xC18
|
||||
#define CEV_ISR_SIZE 4
|
||||
|
||||
/**
|
||||
|
@ -139,12 +147,12 @@ do { \
|
|||
#define DB_EQ_REARM_SHIFT (29) /* bit 29 */
|
||||
|
||||
/********* Compl Q door bell *************/
|
||||
#define DB_CQ_OFFSET 0x120
|
||||
#define DB_CQ_OFFSET 0x120
|
||||
#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
|
||||
/* Number of event entries processed */
|
||||
#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
|
||||
#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
|
||||
/* Rearm bit */
|
||||
#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
|
||||
#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
|
||||
|
||||
#define GET_HWI_CONTROLLER_WS(pc) (pc->phwi_ctrlr)
|
||||
#define HWI_GET_DEF_BUFQ_ID(pc) (((struct hwi_controller *)\
|
||||
|
@ -161,12 +169,12 @@ enum be_mem_enum {
|
|||
HWI_MEM_WRBH,
|
||||
HWI_MEM_SGLH,
|
||||
HWI_MEM_SGE,
|
||||
HWI_MEM_ASYNC_HEADER_BUF, /* 5 */
|
||||
HWI_MEM_ASYNC_HEADER_BUF, /* 5 */
|
||||
HWI_MEM_ASYNC_DATA_BUF,
|
||||
HWI_MEM_ASYNC_HEADER_RING,
|
||||
HWI_MEM_ASYNC_DATA_RING,
|
||||
HWI_MEM_ASYNC_HEADER_HANDLE,
|
||||
HWI_MEM_ASYNC_DATA_HANDLE, /* 10 */
|
||||
HWI_MEM_ASYNC_DATA_HANDLE, /* 10 */
|
||||
HWI_MEM_ASYNC_PDU_CONTEXT,
|
||||
ISCSI_MEM_GLOBAL_HEADER,
|
||||
SE_MEM_MAX
|
||||
|
@ -352,6 +360,7 @@ struct beiscsi_conn {
|
|||
u32 beiscsi_conn_cid;
|
||||
struct beiscsi_endpoint *ep;
|
||||
unsigned short login_in_progress;
|
||||
struct wrb_handle *plogin_wrb_handle;
|
||||
struct sgl_handle *plogin_sgl_handle;
|
||||
struct beiscsi_session *beiscsi_sess;
|
||||
struct iscsi_task *task;
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
#include "be_mgmt.h"
|
||||
#include "be_iscsi.h"
|
||||
|
||||
unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl,
|
||||
int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
|
||||
struct beiscsi_hba *phba)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
|
||||
|
@ -50,7 +50,7 @@ unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl,
|
|||
pfw_cfg->ulp[0].sq_count;
|
||||
if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) {
|
||||
SE_DEBUG(DBG_LVL_8,
|
||||
"FW reported MAX CXNS as %d \t"
|
||||
"FW reported MAX CXNS as %d\t"
|
||||
"Max Supported = %d.\n",
|
||||
phba->fw_config.iscsi_cid_count,
|
||||
BE2_MAX_SESSIONS);
|
||||
|
@ -58,14 +58,14 @@ unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl,
|
|||
}
|
||||
} else {
|
||||
shost_printk(KERN_WARNING, phba->shost,
|
||||
"Failed in mgmt_get_fw_config \n");
|
||||
"Failed in mgmt_get_fw_config\n");
|
||||
}
|
||||
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
|
||||
int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
|
||||
struct beiscsi_hba *phba)
|
||||
{
|
||||
struct be_dma_mem nonemb_cmd;
|
||||
|
@ -81,7 +81,7 @@ unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
|
|||
SE_DEBUG(DBG_LVL_1,
|
||||
"Failed to allocate memory for mgmt_check_supported_fw"
|
||||
"\n");
|
||||
return -1;
|
||||
return -ENOMEM;
|
||||
}
|
||||
nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
|
||||
req = nonemb_cmd.va;
|
||||
|
@ -117,8 +117,7 @@ unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
|
|||
return status;
|
||||
}
|
||||
|
||||
|
||||
unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
|
||||
int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
|
||||
{
|
||||
struct be_ctrl_info *ctrl = &phba->ctrl;
|
||||
struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
|
||||
|
@ -144,11 +143,12 @@ unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
|
|||
return status;
|
||||
}
|
||||
|
||||
unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
|
||||
unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba,
|
||||
struct invalidate_command_table *inv_tbl,
|
||||
unsigned int num_invalidate, unsigned int cid)
|
||||
unsigned int num_invalidate, unsigned int cid,
|
||||
struct be_dma_mem *nonemb_cmd)
|
||||
|
||||
{
|
||||
struct be_dma_mem nonemb_cmd;
|
||||
struct be_ctrl_info *ctrl = &phba->ctrl;
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_sge *sge;
|
||||
|
@ -162,17 +162,7 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
|
|||
return tag;
|
||||
}
|
||||
|
||||
nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
|
||||
sizeof(struct invalidate_commands_params_in),
|
||||
&nonemb_cmd.dma);
|
||||
if (nonemb_cmd.va == NULL) {
|
||||
SE_DEBUG(DBG_LVL_1,
|
||||
"Failed to allocate memory for mgmt_invalidate_icds\n");
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
return 0;
|
||||
}
|
||||
nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
|
||||
req = nonemb_cmd.va;
|
||||
req = nonemb_cmd->va;
|
||||
memset(req, 0, sizeof(*req));
|
||||
wrb = wrb_from_mccq(phba);
|
||||
sge = nonembedded_sgl(wrb);
|
||||
|
@ -190,19 +180,16 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
|
|||
req->icd_count++;
|
||||
inv_tbl++;
|
||||
}
|
||||
sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
|
||||
sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
|
||||
sge->len = cpu_to_le32(nonemb_cmd.size);
|
||||
sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
|
||||
sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
|
||||
sge->len = cpu_to_le32(nonemb_cmd->size);
|
||||
|
||||
be_mcc_notify(phba);
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
if (nonemb_cmd.va)
|
||||
pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
|
||||
nonemb_cmd.va, nonemb_cmd.dma);
|
||||
return tag;
|
||||
}
|
||||
|
||||
unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
|
||||
unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
|
||||
struct beiscsi_endpoint *beiscsi_ep,
|
||||
unsigned short cid,
|
||||
unsigned short issue_reset,
|
||||
|
@ -239,7 +226,7 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
|
|||
return tag;
|
||||
}
|
||||
|
||||
unsigned char mgmt_upload_connection(struct beiscsi_hba *phba,
|
||||
unsigned int mgmt_upload_connection(struct beiscsi_hba *phba,
|
||||
unsigned short cid, unsigned int upload_flag)
|
||||
{
|
||||
struct be_ctrl_info *ctrl = &phba->ctrl;
|
||||
|
@ -269,7 +256,9 @@ unsigned char mgmt_upload_connection(struct beiscsi_hba *phba,
|
|||
|
||||
int mgmt_open_connection(struct beiscsi_hba *phba,
|
||||
struct sockaddr *dst_addr,
|
||||
struct beiscsi_endpoint *beiscsi_ep)
|
||||
struct beiscsi_endpoint *beiscsi_ep,
|
||||
struct be_dma_mem *nonemb_cmd)
|
||||
|
||||
{
|
||||
struct hwi_controller *phwi_ctrlr;
|
||||
struct hwi_context_memory *phwi_context;
|
||||
|
@ -285,6 +274,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
|
|||
unsigned int tag = 0;
|
||||
unsigned int i;
|
||||
unsigned short cid = beiscsi_ep->ep_cid;
|
||||
struct be_sge *sge;
|
||||
|
||||
phwi_ctrlr = phba->phwi_ctrlr;
|
||||
phwi_context = phwi_ctrlr->phwi_ctxt;
|
||||
|
@ -300,10 +290,14 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
|
|||
return tag;
|
||||
}
|
||||
wrb = wrb_from_mccq(phba);
|
||||
req = embedded_payload(wrb);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
sge = nonembedded_sgl(wrb);
|
||||
|
||||
req = nonemb_cmd->va;
|
||||
memset(req, 0, sizeof(*req));
|
||||
wrb->tag0 |= tag;
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 1);
|
||||
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
|
||||
OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD,
|
||||
sizeof(*req));
|
||||
|
@ -331,6 +325,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
|
|||
shost_printk(KERN_ERR, phba->shost, "unknown addr family %d\n",
|
||||
dst_addr->sa_family);
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
free_mcc_tag(&phba->ctrl, tag);
|
||||
return -EINVAL;
|
||||
|
||||
}
|
||||
|
@ -339,13 +334,16 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
|
|||
if (phba->nxt_cqid == phba->num_cpus)
|
||||
phba->nxt_cqid = 0;
|
||||
req->cq_id = phwi_context->be_cq[i].id;
|
||||
SE_DEBUG(DBG_LVL_8, "i=%d cq_id=%d \n", i, req->cq_id);
|
||||
SE_DEBUG(DBG_LVL_8, "i=%d cq_id=%d\n", i, req->cq_id);
|
||||
req->defq_id = def_hdr_id;
|
||||
req->hdr_ring_id = def_hdr_id;
|
||||
req->data_ring_id = def_data_id;
|
||||
req->do_offload = 1;
|
||||
req->dataout_template_pa.lo = ptemplate_address->lo;
|
||||
req->dataout_template_pa.hi = ptemplate_address->hi;
|
||||
sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
|
||||
sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
|
||||
sge->len = cpu_to_le32(nonemb_cmd->size);
|
||||
be_mcc_notify(phba);
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
return tag;
|
||||
|
|
|
@ -86,16 +86,19 @@ struct mcc_wrb {
|
|||
struct mcc_wrb_payload payload;
|
||||
};
|
||||
|
||||
unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute);
|
||||
int mgmt_open_connection(struct beiscsi_hba *phba, struct sockaddr *dst_addr,
|
||||
struct beiscsi_endpoint *beiscsi_ep);
|
||||
int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute);
|
||||
int mgmt_open_connection(struct beiscsi_hba *phba,
|
||||
struct sockaddr *dst_addr,
|
||||
struct beiscsi_endpoint *beiscsi_ep,
|
||||
struct be_dma_mem *nonemb_cmd);
|
||||
|
||||
unsigned char mgmt_upload_connection(struct beiscsi_hba *phba,
|
||||
unsigned int mgmt_upload_connection(struct beiscsi_hba *phba,
|
||||
unsigned short cid,
|
||||
unsigned int upload_flag);
|
||||
unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
|
||||
unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba,
|
||||
struct invalidate_command_table *inv_tbl,
|
||||
unsigned int num_invalidate, unsigned int cid);
|
||||
unsigned int num_invalidate, unsigned int cid,
|
||||
struct be_dma_mem *nonemb_cmd);
|
||||
|
||||
struct iscsi_invalidate_connection_params_in {
|
||||
struct be_cmd_req_hdr hdr;
|
||||
|
@ -237,10 +240,10 @@ struct beiscsi_endpoint {
|
|||
u16 cid_vld;
|
||||
};
|
||||
|
||||
unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl,
|
||||
int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
|
||||
struct beiscsi_hba *phba);
|
||||
|
||||
unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
|
||||
unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
|
||||
struct beiscsi_endpoint *beiscsi_ep,
|
||||
unsigned short cid,
|
||||
unsigned short issue_reset,
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
obj-$(CONFIG_SCSI_BFA_FC) := bfa.o
|
||||
|
||||
bfa-y := bfad.o bfad_intr.o bfad_os.o bfad_im.o bfad_attr.o bfad_fwimg.o
|
||||
|
||||
bfa-y += bfad_debugfs.o
|
||||
bfa-y += bfa_core.o bfa_ioc.o bfa_ioc_ct.o bfa_ioc_cb.o bfa_iocfc.o bfa_fcxp.o
|
||||
bfa-y += bfa_lps.o bfa_hw_cb.o bfa_hw_ct.o bfa_intr.o bfa_timer.o bfa_rport.o
|
||||
bfa-y += bfa_fcport.o bfa_port.o bfa_uf.o bfa_sgpg.o bfa_module.o bfa_ioim.o
|
||||
|
|
|
@ -171,6 +171,11 @@ bfa_cb_ioim_get_cdblen(struct bfad_ioim_s *dio)
|
|||
return cmnd->cmd_len;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Assign queue to be used for the I/O request. This value depends on whether
|
||||
* the driver wants to use the queues via any specific algorithm. Currently,
|
||||
* this is not supported.
|
||||
*/
|
||||
#define bfa_cb_ioim_get_reqq(__dio) BFA_FALSE
|
||||
|
||||
#endif /* __BFA_HCB_IOIM_MACROS_H__ */
|
||||
|
|
|
@ -333,6 +333,7 @@ bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
|
|||
{BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
|
||||
{BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
|
||||
{BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
|
||||
{BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
|
||||
};
|
||||
|
||||
*npciids = sizeof(__pciids) / sizeof(__pciids[0]);
|
||||
|
|
|
@ -79,11 +79,6 @@ bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
|
|||
bfa_ioim_attach(fcpim, meminfo);
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_fcpim_initdone(struct bfa_s *bfa)
|
||||
{
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_fcpim_detach(struct bfa_s *bfa)
|
||||
{
|
||||
|
@ -172,4 +167,28 @@ bfa_fcpim_qdepth_get(struct bfa_s *bfa)
|
|||
return fcpim->q_depth;
|
||||
}
|
||||
|
||||
void
|
||||
bfa_fcpim_update_ioredirect(struct bfa_s *bfa)
|
||||
{
|
||||
bfa_boolean_t ioredirect;
|
||||
|
||||
/*
|
||||
* IO redirection is turned off when QoS is enabled and vice versa
|
||||
*/
|
||||
ioredirect = bfa_fcport_is_qos_enabled(bfa) ? BFA_FALSE : BFA_TRUE;
|
||||
|
||||
/*
|
||||
* Notify the bfad module of a possible state change in
|
||||
* IO redirection capability, due to a QoS state change. bfad will
|
||||
* check on the support for io redirection and update the
|
||||
* fcpim's ioredirect state accordingly.
|
||||
*/
|
||||
bfa_cb_ioredirect_state_change((void *)(bfa->bfad), ioredirect);
|
||||
}
|
||||
|
||||
void
|
||||
bfa_fcpim_set_ioredirect(struct bfa_s *bfa, bfa_boolean_t state)
|
||||
{
|
||||
struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
|
||||
fcpim->ioredirect = state;
|
||||
}
|
||||
|
|
|
@ -49,7 +49,8 @@ struct bfa_fcpim_mod_s {
|
|||
int num_tskim_reqs;
|
||||
u32 path_tov;
|
||||
u16 q_depth;
|
||||
u16 rsvd;
|
||||
u8 reqq; /* Request queue to be used */
|
||||
u8 rsvd;
|
||||
struct list_head itnim_q; /* queue of active itnim */
|
||||
struct list_head ioim_free_q; /* free IO resources */
|
||||
struct list_head ioim_resfree_q; /* IOs waiting for f/w */
|
||||
|
@ -58,6 +59,7 @@ struct bfa_fcpim_mod_s {
|
|||
u32 ios_active; /* current active IOs */
|
||||
u32 delay_comp;
|
||||
struct bfa_fcpim_stats_s stats;
|
||||
bfa_boolean_t ioredirect;
|
||||
};
|
||||
|
||||
struct bfa_ioim_s;
|
||||
|
@ -82,6 +84,7 @@ struct bfa_ioim_s {
|
|||
struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */
|
||||
bfa_cb_cbfn_t io_cbfn; /* IO completion handler */
|
||||
struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */
|
||||
u8 reqq; /* Request queue for I/O */
|
||||
};
|
||||
|
||||
struct bfa_ioim_sp_s {
|
||||
|
@ -141,6 +144,7 @@ struct bfa_itnim_s {
|
|||
struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
|
||||
struct bfa_fcpim_mod_s *fcpim; /* fcpim module */
|
||||
struct bfa_itnim_hal_stats_s stats;
|
||||
struct bfa_itnim_latency_s io_latency;
|
||||
};
|
||||
|
||||
#define bfa_itnim_is_online(_itnim) ((_itnim)->is_online)
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <bfa.h>
|
||||
#include <bfa_svc.h>
|
||||
#include <bfi/bfi_pport.h>
|
||||
#include <bfi/bfi_pbc.h>
|
||||
#include <cs/bfa_debug.h>
|
||||
#include <aen/bfa_aen.h>
|
||||
#include <cs/bfa_plog.h>
|
||||
|
@ -310,10 +311,12 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
|
|||
|
||||
if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
|
||||
|
||||
bfa_trc(fcport->bfa, pevent->link_state.fcf.fipenabled);
|
||||
bfa_trc(fcport->bfa, pevent->link_state.fcf.fipfailed);
|
||||
bfa_trc(fcport->bfa,
|
||||
pevent->link_state.vc_fcf.fcf.fipenabled);
|
||||
bfa_trc(fcport->bfa,
|
||||
pevent->link_state.vc_fcf.fcf.fipfailed);
|
||||
|
||||
if (pevent->link_state.fcf.fipfailed)
|
||||
if (pevent->link_state.vc_fcf.fcf.fipfailed)
|
||||
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
|
||||
BFA_PL_EID_FIP_FCF_DISC, 0,
|
||||
"FIP FCF Discovery Failed");
|
||||
|
@ -888,6 +891,7 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
|
|||
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
|
||||
struct bfa_pport_cfg_s *port_cfg = &fcport->cfg;
|
||||
struct bfa_fcport_ln_s *ln = &fcport->ln;
|
||||
struct bfa_timeval_s tv;
|
||||
|
||||
bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s));
|
||||
fcport->bfa = bfa;
|
||||
|
@ -898,6 +902,12 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
|
|||
bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
|
||||
bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
|
||||
|
||||
/**
|
||||
* initialize time stamp for stats reset
|
||||
*/
|
||||
bfa_os_gettimeofday(&tv);
|
||||
fcport->stats_reset_time = tv.tv_sec;
|
||||
|
||||
/**
|
||||
* initialize and set default configuration
|
||||
*/
|
||||
|
@ -911,25 +921,6 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
|
|||
bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_fcport_initdone(struct bfa_s *bfa)
|
||||
{
|
||||
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
|
||||
|
||||
/**
|
||||
* Initialize port attributes from IOC hardware data.
|
||||
*/
|
||||
bfa_fcport_set_wwns(fcport);
|
||||
if (fcport->cfg.maxfrsize == 0)
|
||||
fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
|
||||
fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
|
||||
fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
|
||||
|
||||
bfa_assert(fcport->cfg.maxfrsize);
|
||||
bfa_assert(fcport->cfg.rx_bbcredit);
|
||||
bfa_assert(fcport->speed_sup);
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_fcport_detach(struct bfa_s *bfa)
|
||||
{
|
||||
|
@ -971,14 +962,15 @@ bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
|
|||
fcport->topology = pevent->link_state.topology;
|
||||
|
||||
if (fcport->topology == BFA_PPORT_TOPOLOGY_LOOP)
|
||||
fcport->myalpa =
|
||||
pevent->link_state.tl.loop_info.myalpa;
|
||||
fcport->myalpa = 0;
|
||||
|
||||
/*
|
||||
* QoS Details
|
||||
*/
|
||||
bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr);
|
||||
bfa_os_assign(fcport->qos_vc_attr, pevent->link_state.qos_vc_attr);
|
||||
bfa_os_assign(fcport->qos_vc_attr,
|
||||
pevent->link_state.vc_fcf.qos_vc_attr);
|
||||
|
||||
|
||||
bfa_trc(fcport->bfa, fcport->speed);
|
||||
bfa_trc(fcport->bfa, fcport->topology);
|
||||
|
@ -1145,16 +1137,22 @@ __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
|
|||
|
||||
if (complete) {
|
||||
if (fcport->stats_status == BFA_STATUS_OK) {
|
||||
struct bfa_timeval_s tv;
|
||||
|
||||
/* Swap FC QoS or FCoE stats */
|
||||
if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
|
||||
if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
|
||||
bfa_fcport_qos_stats_swap(
|
||||
&fcport->stats_ret->fcqos,
|
||||
&fcport->stats->fcqos);
|
||||
else
|
||||
} else {
|
||||
bfa_fcport_fcoe_stats_swap(
|
||||
&fcport->stats_ret->fcoe,
|
||||
&fcport->stats->fcoe);
|
||||
|
||||
bfa_os_gettimeofday(&tv);
|
||||
fcport->stats_ret->fcoe.secs_reset =
|
||||
tv.tv_sec - fcport->stats_reset_time;
|
||||
}
|
||||
}
|
||||
fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
|
||||
} else {
|
||||
|
@ -1210,6 +1208,14 @@ __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
|
|||
struct bfa_fcport_s *fcport = cbarg;
|
||||
|
||||
if (complete) {
|
||||
struct bfa_timeval_s tv;
|
||||
|
||||
/**
|
||||
* re-initialize time stamp for stats reset
|
||||
*/
|
||||
bfa_os_gettimeofday(&tv);
|
||||
fcport->stats_reset_time = tv.tv_sec;
|
||||
|
||||
fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
|
||||
} else {
|
||||
fcport->stats_busy = BFA_FALSE;
|
||||
|
@ -1262,6 +1268,29 @@ bfa_fcport_send_stats_clear(void *cbarg)
|
|||
* bfa_pport_public
|
||||
*/
|
||||
|
||||
/**
|
||||
* Called to initialize port attributes
|
||||
*/
|
||||
void
|
||||
bfa_fcport_init(struct bfa_s *bfa)
|
||||
{
|
||||
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
|
||||
|
||||
/**
|
||||
* Initialize port attributes from IOC hardware data.
|
||||
*/
|
||||
bfa_fcport_set_wwns(fcport);
|
||||
if (fcport->cfg.maxfrsize == 0)
|
||||
fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
|
||||
fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
|
||||
fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
|
||||
|
||||
bfa_assert(fcport->cfg.maxfrsize);
|
||||
bfa_assert(fcport->cfg.rx_bbcredit);
|
||||
bfa_assert(fcport->speed_sup);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Firmware message handler.
|
||||
*/
|
||||
|
@ -1355,6 +1384,17 @@ bfa_status_t
|
|||
bfa_fcport_enable(struct bfa_s *bfa)
|
||||
{
|
||||
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
|
||||
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
|
||||
struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
|
||||
|
||||
/* if port is PBC disabled, return error */
|
||||
if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
|
||||
bfa_trc(bfa, fcport->pwwn);
|
||||
return BFA_STATUS_PBC;
|
||||
}
|
||||
|
||||
if (bfa_ioc_is_disabled(&bfa->ioc))
|
||||
return BFA_STATUS_IOC_DISABLED;
|
||||
|
||||
if (fcport->diag_busy)
|
||||
return BFA_STATUS_DIAG_BUSY;
|
||||
|
@ -1369,6 +1409,16 @@ bfa_fcport_enable(struct bfa_s *bfa)
|
|||
bfa_status_t
|
||||
bfa_fcport_disable(struct bfa_s *bfa)
|
||||
{
|
||||
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
|
||||
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
|
||||
struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
|
||||
|
||||
/* if port is PBC disabled, return error */
|
||||
if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
|
||||
bfa_trc(bfa, fcport->pwwn);
|
||||
return BFA_STATUS_PBC;
|
||||
}
|
||||
|
||||
bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
@ -1559,12 +1609,17 @@ void
|
|||
bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr)
|
||||
{
|
||||
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
|
||||
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
|
||||
struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
|
||||
|
||||
bfa_os_memset(attr, 0, sizeof(struct bfa_pport_attr_s));
|
||||
|
||||
attr->nwwn = fcport->nwwn;
|
||||
attr->pwwn = fcport->pwwn;
|
||||
|
||||
attr->factorypwwn = bfa_ioc_get_mfg_pwwn(&bfa->ioc);
|
||||
attr->factorynwwn = bfa_ioc_get_mfg_nwwn(&bfa->ioc);
|
||||
|
||||
bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg,
|
||||
sizeof(struct bfa_pport_cfg_s));
|
||||
/*
|
||||
|
@ -1590,11 +1645,18 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr)
|
|||
|
||||
attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
|
||||
attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
|
||||
attr->port_state = bfa_sm_to_state(hal_pport_sm_table, fcport->sm);
|
||||
if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
|
||||
attr->port_state = BFA_PPORT_ST_IOCDIS;
|
||||
else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
|
||||
attr->port_state = BFA_PPORT_ST_FWMISMATCH;
|
||||
|
||||
/* PBC Disabled State */
|
||||
if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED)
|
||||
attr->port_state = BFA_PPORT_ST_PREBOOT_DISABLED;
|
||||
else {
|
||||
attr->port_state = bfa_sm_to_state(
|
||||
hal_pport_sm_table, fcport->sm);
|
||||
if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
|
||||
attr->port_state = BFA_PPORT_ST_IOCDIS;
|
||||
else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
|
||||
attr->port_state = BFA_PPORT_ST_FWMISMATCH;
|
||||
}
|
||||
}
|
||||
|
||||
#define BFA_FCPORT_STATS_TOV 1000
|
||||
|
@ -1801,8 +1863,13 @@ bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
|
|||
|
||||
bfa_trc(bfa, ioc_type);
|
||||
|
||||
if (ioc_type == BFA_IOC_TYPE_FC)
|
||||
if (ioc_type == BFA_IOC_TYPE_FC) {
|
||||
fcport->cfg.qos_enabled = on_off;
|
||||
/**
|
||||
* Notify fcpim of the change in QoS state
|
||||
*/
|
||||
bfa_fcpim_update_ioredirect(bfa);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -1886,4 +1953,10 @@ bfa_fcport_is_linkup(struct bfa_s *bfa)
|
|||
return bfa_sm_cmp_state(BFA_FCPORT_MOD(bfa), bfa_fcport_sm_linkup);
|
||||
}
|
||||
|
||||
bfa_boolean_t
|
||||
bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
|
||||
{
|
||||
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
|
||||
|
||||
return fcport->cfg.qos_enabled;
|
||||
}
|
||||
|
|
|
@ -99,14 +99,22 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
|
|||
void
|
||||
bfa_fcs_init(struct bfa_fcs_s *fcs)
|
||||
{
|
||||
int i;
|
||||
int i, npbc_vports;
|
||||
struct bfa_fcs_mod_s *mod;
|
||||
struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS];
|
||||
|
||||
for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
|
||||
mod = &fcs_modules[i];
|
||||
if (mod->modinit)
|
||||
mod->modinit(fcs);
|
||||
}
|
||||
/* Initialize pbc vports */
|
||||
if (!fcs->min_cfg) {
|
||||
npbc_vports =
|
||||
bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports);
|
||||
for (i = 0; i < npbc_vports; i++)
|
||||
bfa_fcb_pbc_vport_create(fcs->bfa->bfad, pbc_vports[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -135,6 +135,9 @@ bfa_fcs_port_sm_init(struct bfa_fcs_port_s *port, enum bfa_fcs_port_event event)
|
|||
bfa_fcs_port_deleted(port);
|
||||
break;
|
||||
|
||||
case BFA_FCS_PORT_SM_OFFLINE:
|
||||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(port->fcs, event);
|
||||
}
|
||||
|
|
|
@ -148,11 +148,6 @@ bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
|
|||
claim_fcxps_mem(mod, meminfo);
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_fcxp_initdone(struct bfa_s *bfa)
|
||||
{
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_fcxp_detach(struct bfa_s *bfa)
|
||||
{
|
||||
|
@ -225,7 +220,7 @@ bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
|
|||
bfa_status_t req_status, u32 rsp_len,
|
||||
u32 resid_len, struct fchs_s *rsp_fchs)
|
||||
{
|
||||
/**discarded fcxp completion */
|
||||
/* discarded fcxp completion */
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -527,11 +522,8 @@ bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
|
|||
if (nreq_sgles > BFI_SGE_INLINE) {
|
||||
nreq_sgpg = BFA_SGPG_NPAGE(nreq_sgles);
|
||||
|
||||
if (bfa_sgpg_malloc
|
||||
(bfa, &fcxp->req_sgpg_q, nreq_sgpg)
|
||||
if (bfa_sgpg_malloc(bfa, &fcxp->req_sgpg_q, nreq_sgpg)
|
||||
!= BFA_STATUS_OK) {
|
||||
/* bfa_sgpg_wait(bfa, &fcxp->req_sgpg_wqe,
|
||||
nreq_sgpg); */
|
||||
/*
|
||||
* TODO
|
||||
*/
|
||||
|
@ -685,7 +677,7 @@ bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
|
|||
fcxp->send_cbarg = cbarg;
|
||||
|
||||
/**
|
||||
* If no room in CPE queue, wait for
|
||||
* If no room in CPE queue, wait for space in request queue
|
||||
*/
|
||||
send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
|
||||
if (!send_req) {
|
||||
|
|
|
@ -21,11 +21,24 @@
|
|||
#define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */
|
||||
#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
|
||||
|
||||
extern u32 *bfi_image_ct_get_chunk(u32 off);
|
||||
extern u32 bfi_image_ct_size;
|
||||
extern u32 *bfi_image_cb_get_chunk(u32 off);
|
||||
extern u32 bfi_image_cb_size;
|
||||
extern u32 *bfi_image_cb;
|
||||
extern u32 *bfi_image_ct;
|
||||
/**
|
||||
* BFI FW image type
|
||||
*/
|
||||
enum {
|
||||
BFI_IMAGE_CB_FC,
|
||||
BFI_IMAGE_CT_FC,
|
||||
BFI_IMAGE_CT_CNA,
|
||||
BFI_IMAGE_MAX,
|
||||
};
|
||||
|
||||
extern u32 *bfi_image_get_chunk(int type, uint32_t off);
|
||||
extern u32 bfi_image_get_size(int type);
|
||||
extern u32 bfi_image_ct_fc_size;
|
||||
extern u32 bfi_image_ct_cna_size;
|
||||
extern u32 bfi_image_cb_fc_size;
|
||||
extern u32 *bfi_image_ct_fc;
|
||||
extern u32 *bfi_image_ct_cna;
|
||||
extern u32 *bfi_image_cb_fc;
|
||||
|
||||
|
||||
#endif /* __BFA_FWIMG_PRIV_H__ */
|
||||
|
|
|
@ -152,4 +152,9 @@ bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
|
|||
bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end)
|
||||
{
|
||||
*start = BFA_MSIX_RME_Q0;
|
||||
*end = BFA_MSIX_RME_Q7;
|
||||
}
|
||||
|
|
|
@ -168,4 +168,9 @@ bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
|
|||
bfa_ioc_isr_mode_set(&bfa->ioc, msix);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end)
|
||||
{
|
||||
*start = BFA_MSIX_RME_Q0;
|
||||
*end = BFA_MSIX_RME_Q3;
|
||||
}
|
||||
|
|
|
@ -134,6 +134,7 @@ bfa_isr_enable(struct bfa_s *bfa)
|
|||
|
||||
bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask);
|
||||
bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask);
|
||||
bfa->iocfc.intr_mask = ~intr_unmask;
|
||||
bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -59,22 +59,18 @@ BFA_TRC_FILE(CNA, IOC);
|
|||
((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
|
||||
#define bfa_ioc_firmware_unlock(__ioc) \
|
||||
((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
|
||||
#define bfa_ioc_fwimg_get_chunk(__ioc, __off) \
|
||||
((__ioc)->ioc_hwif->ioc_fwimg_get_chunk(__ioc, __off))
|
||||
#define bfa_ioc_fwimg_get_size(__ioc) \
|
||||
((__ioc)->ioc_hwif->ioc_fwimg_get_size(__ioc))
|
||||
#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
|
||||
#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
|
||||
#define bfa_ioc_notify_hbfail(__ioc) \
|
||||
((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
|
||||
#define bfa_ioc_is_optrom(__ioc) \
|
||||
(bfi_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
|
||||
|
||||
bfa_boolean_t bfa_auto_recover = BFA_TRUE;
|
||||
|
||||
/*
|
||||
* forward declarations
|
||||
*/
|
||||
static void bfa_ioc_aen_post(struct bfa_ioc_s *bfa,
|
||||
enum bfa_ioc_aen_event event);
|
||||
static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
|
||||
static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
|
||||
static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
|
||||
|
@ -88,6 +84,7 @@ static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
|
|||
static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
|
||||
static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
|
||||
static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
|
||||
static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
|
||||
static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
|
||||
static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
|
||||
|
||||
|
@ -433,6 +430,7 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
|
|||
switch (event) {
|
||||
case IOC_E_FWRSP_GETATTR:
|
||||
bfa_ioc_timer_stop(ioc);
|
||||
bfa_ioc_check_attr_wwns(ioc);
|
||||
bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
|
||||
break;
|
||||
|
||||
|
@ -879,8 +877,8 @@ bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
|
|||
struct bfi_ioc_image_hdr_s *drv_fwhdr;
|
||||
int i;
|
||||
|
||||
drv_fwhdr =
|
||||
(struct bfi_ioc_image_hdr_s *)bfa_ioc_fwimg_get_chunk(ioc, 0);
|
||||
drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
|
||||
bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
|
||||
|
||||
for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
|
||||
if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
|
||||
|
@ -907,12 +905,13 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc)
|
|||
/**
|
||||
* If bios/efi boot (flash based) -- return true
|
||||
*/
|
||||
if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
|
||||
if (bfa_ioc_is_optrom(ioc))
|
||||
return BFA_TRUE;
|
||||
|
||||
bfa_ioc_fwver_get(ioc, &fwhdr);
|
||||
drv_fwhdr =
|
||||
(struct bfi_ioc_image_hdr_s *)bfa_ioc_fwimg_get_chunk(ioc, 0);
|
||||
drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
|
||||
bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
|
||||
|
||||
|
||||
if (fwhdr.signature != drv_fwhdr->signature) {
|
||||
bfa_trc(ioc, fwhdr.signature);
|
||||
|
@ -980,8 +979,13 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
|
|||
/**
|
||||
* If IOC function is disabled and firmware version is same,
|
||||
* just re-enable IOC.
|
||||
*
|
||||
* If option rom, IOC must not be in operational state. With
|
||||
* convergence, IOC will be in operational state when 2nd driver
|
||||
* is loaded.
|
||||
*/
|
||||
if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
|
||||
if (ioc_fwstate == BFI_IOC_DISABLED ||
|
||||
(!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
|
||||
bfa_trc(ioc, ioc_fwstate);
|
||||
|
||||
/**
|
||||
|
@ -1125,21 +1129,22 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
|
|||
/**
|
||||
* Flash based firmware boot
|
||||
*/
|
||||
bfa_trc(ioc, bfa_ioc_fwimg_get_size(ioc));
|
||||
if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
|
||||
bfa_trc(ioc, bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
|
||||
if (bfa_ioc_is_optrom(ioc))
|
||||
boot_type = BFI_BOOT_TYPE_FLASH;
|
||||
fwimg = bfa_ioc_fwimg_get_chunk(ioc, chunkno);
|
||||
fwimg = bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
|
||||
|
||||
|
||||
pgnum = bfa_ioc_smem_pgnum(ioc, loff);
|
||||
pgoff = bfa_ioc_smem_pgoff(ioc, loff);
|
||||
|
||||
bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
|
||||
|
||||
for (i = 0; i < bfa_ioc_fwimg_get_size(ioc); i++) {
|
||||
for (i = 0; i < bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
|
||||
|
||||
if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
|
||||
chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
|
||||
fwimg = bfa_ioc_fwimg_get_chunk(ioc,
|
||||
fwimg = bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
|
||||
BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
|
||||
}
|
||||
|
||||
|
@ -1188,6 +1193,7 @@ bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
|
|||
struct bfi_ioc_attr_s *attr = ioc->attr;
|
||||
|
||||
attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop);
|
||||
attr->card_type = bfa_os_ntohl(attr->card_type);
|
||||
attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize);
|
||||
|
||||
bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
|
||||
|
@ -1282,6 +1288,7 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param)
|
|||
bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_INITING);
|
||||
}
|
||||
|
||||
bfa_ioc_msgflush(ioc);
|
||||
bfa_ioc_download_fw(ioc, boot_type, boot_param);
|
||||
|
||||
/**
|
||||
|
@ -1416,7 +1423,7 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
|
|||
{
|
||||
ioc->ioc_mc = mc;
|
||||
ioc->pcidev = *pcidev;
|
||||
ioc->ctdev = (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT);
|
||||
ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
|
||||
ioc->cna = ioc->ctdev && !ioc->fcmode;
|
||||
|
||||
/**
|
||||
|
@ -1607,6 +1614,13 @@ bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
|
|||
bfa_fsm_send_event(ioc, IOC_E_HWERROR);
|
||||
}
|
||||
|
||||
void
|
||||
bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
ioc->fcmode = BFA_TRUE;
|
||||
ioc->port_id = bfa_ioc_pcifn(ioc);
|
||||
}
|
||||
|
||||
#ifndef BFA_BIOS_BUILD
|
||||
|
||||
/**
|
||||
|
@ -1696,6 +1710,9 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
|
|||
/* For now, model descr uses same model string */
|
||||
bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
|
||||
|
||||
ad_attr->card_type = ioc_attr->card_type;
|
||||
ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
|
||||
|
||||
if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
|
||||
ad_attr->prototype = 1;
|
||||
else
|
||||
|
@ -1779,28 +1796,17 @@ void
|
|||
bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
|
||||
{
|
||||
struct bfi_ioc_attr_s *ioc_attr;
|
||||
u8 nports;
|
||||
u8 max_speed;
|
||||
|
||||
bfa_assert(model);
|
||||
bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
|
||||
|
||||
ioc_attr = ioc->attr;
|
||||
|
||||
nports = bfa_ioc_get_nports(ioc);
|
||||
max_speed = bfa_ioc_speed_sup(ioc);
|
||||
|
||||
/**
|
||||
* model name
|
||||
*/
|
||||
if (max_speed == 10) {
|
||||
strcpy(model, "BR-10?0");
|
||||
model[5] = '0' + nports;
|
||||
} else {
|
||||
strcpy(model, "Brocade-??5");
|
||||
model[8] = '0' + max_speed;
|
||||
model[9] = '0' + nports;
|
||||
}
|
||||
snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
|
||||
BFA_MFG_NAME, ioc_attr->card_type);
|
||||
}
|
||||
|
||||
enum bfa_ioc_state
|
||||
|
@ -1827,78 +1833,54 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
|
|||
}
|
||||
|
||||
/**
|
||||
* hal_wwn_public
|
||||
* bfa_wwn_public
|
||||
*/
|
||||
wwn_t
|
||||
bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
union {
|
||||
wwn_t wwn;
|
||||
u8 byte[sizeof(wwn_t)];
|
||||
}
|
||||
w;
|
||||
|
||||
w.wwn = ioc->attr->mfg_wwn;
|
||||
|
||||
if (bfa_ioc_portid(ioc) == 1)
|
||||
w.byte[7]++;
|
||||
|
||||
return w.wwn;
|
||||
return ioc->attr->pwwn;
|
||||
}
|
||||
|
||||
wwn_t
|
||||
bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
union {
|
||||
wwn_t wwn;
|
||||
u8 byte[sizeof(wwn_t)];
|
||||
}
|
||||
w;
|
||||
|
||||
w.wwn = ioc->attr->mfg_wwn;
|
||||
|
||||
if (bfa_ioc_portid(ioc) == 1)
|
||||
w.byte[7]++;
|
||||
|
||||
w.byte[0] = 0x20;
|
||||
|
||||
return w.wwn;
|
||||
}
|
||||
|
||||
wwn_t
|
||||
bfa_ioc_get_wwn_naa5(struct bfa_ioc_s *ioc, u16 inst)
|
||||
{
|
||||
union {
|
||||
wwn_t wwn;
|
||||
u8 byte[sizeof(wwn_t)];
|
||||
}
|
||||
w , w5;
|
||||
|
||||
bfa_trc(ioc, inst);
|
||||
|
||||
w.wwn = ioc->attr->mfg_wwn;
|
||||
w5.byte[0] = 0x50 | w.byte[2] >> 4;
|
||||
w5.byte[1] = w.byte[2] << 4 | w.byte[3] >> 4;
|
||||
w5.byte[2] = w.byte[3] << 4 | w.byte[4] >> 4;
|
||||
w5.byte[3] = w.byte[4] << 4 | w.byte[5] >> 4;
|
||||
w5.byte[4] = w.byte[5] << 4 | w.byte[6] >> 4;
|
||||
w5.byte[5] = w.byte[6] << 4 | w.byte[7] >> 4;
|
||||
w5.byte[6] = w.byte[7] << 4 | (inst & 0x0f00) >> 8;
|
||||
w5.byte[7] = (inst & 0xff);
|
||||
|
||||
return w5.wwn;
|
||||
return ioc->attr->nwwn;
|
||||
}
|
||||
|
||||
u64
|
||||
bfa_ioc_get_adid(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
return ioc->attr->mfg_wwn;
|
||||
return ioc->attr->mfg_pwwn;
|
||||
}
|
||||
|
||||
mac_t
|
||||
bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
mac_t mac;
|
||||
/*
|
||||
* Currently mfg mac is used as FCoE enode mac (not configured by PBC)
|
||||
*/
|
||||
if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
|
||||
return bfa_ioc_get_mfg_mac(ioc);
|
||||
else
|
||||
return ioc->attr->mac;
|
||||
}
|
||||
|
||||
wwn_t
|
||||
bfa_ioc_get_mfg_pwwn(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
return ioc->attr->mfg_pwwn;
|
||||
}
|
||||
|
||||
wwn_t
|
||||
bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
return ioc->attr->mfg_nwwn;
|
||||
}
|
||||
|
||||
mac_t
|
||||
bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
mac_t mac;
|
||||
|
||||
mac = ioc->attr->mfg_mac;
|
||||
mac.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
|
||||
|
@ -1906,23 +1888,16 @@ bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
|
|||
return mac;
|
||||
}
|
||||
|
||||
void
|
||||
bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
ioc->fcmode = BFA_TRUE;
|
||||
ioc->port_id = bfa_ioc_pcifn(ioc);
|
||||
}
|
||||
|
||||
bfa_boolean_t
|
||||
bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
return ioc->fcmode || (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_CT);
|
||||
return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* Send AEN notification
|
||||
*/
|
||||
static void
|
||||
void
|
||||
bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
|
||||
{
|
||||
union bfa_aen_data_u aen_data;
|
||||
|
@ -2070,19 +2045,16 @@ bfa_ioc_recover(struct bfa_ioc_s *ioc)
|
|||
bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static void
|
||||
bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
|
||||
bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
}
|
||||
if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
|
||||
return;
|
||||
|
||||
static void
|
||||
bfa_ioc_recover(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
bfa_assert(0);
|
||||
if (ioc->attr->nwwn == 0)
|
||||
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN);
|
||||
if (ioc->attr->pwwn == 0)
|
||||
bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
|
|
@ -186,9 +186,6 @@ struct bfa_ioc_hwif_s {
|
|||
bfa_status_t (*ioc_pll_init) (struct bfa_ioc_s *ioc);
|
||||
bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc);
|
||||
void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc);
|
||||
u32 * (*ioc_fwimg_get_chunk) (struct bfa_ioc_s *ioc,
|
||||
u32 off);
|
||||
u32 (*ioc_fwimg_get_size) (struct bfa_ioc_s *ioc);
|
||||
void (*ioc_reg_init) (struct bfa_ioc_s *ioc);
|
||||
void (*ioc_map_port) (struct bfa_ioc_s *ioc);
|
||||
void (*ioc_isr_mode_set) (struct bfa_ioc_s *ioc,
|
||||
|
@ -214,6 +211,10 @@ struct bfa_ioc_hwif_s {
|
|||
|
||||
#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
|
||||
#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
|
||||
#define BFA_IOC_FWIMG_TYPE(__ioc) \
|
||||
(((__ioc)->ctdev) ? \
|
||||
(((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \
|
||||
BFI_IMAGE_CB_FC)
|
||||
|
||||
#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
|
||||
#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
|
||||
|
@ -296,14 +297,17 @@ void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
|
|||
struct bfi_ioc_image_hdr_s *fwhdr);
|
||||
bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
|
||||
struct bfi_ioc_image_hdr_s *fwhdr);
|
||||
void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event);
|
||||
|
||||
/*
|
||||
* bfa mfg wwn API functions
|
||||
*/
|
||||
wwn_t bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc);
|
||||
wwn_t bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc);
|
||||
wwn_t bfa_ioc_get_wwn_naa5(struct bfa_ioc_s *ioc, u16 inst);
|
||||
mac_t bfa_ioc_get_mac(struct bfa_ioc_s *ioc);
|
||||
wwn_t bfa_ioc_get_mfg_pwwn(struct bfa_ioc_s *ioc);
|
||||
wwn_t bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc);
|
||||
mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc);
|
||||
u64 bfa_ioc_get_adid(struct bfa_ioc_s *ioc);
|
||||
|
||||
#endif /* __BFA_IOC_H__ */
|
||||
|
|
|
@ -33,26 +33,13 @@ BFA_TRC_FILE(CNA, IOC_CB);
|
|||
static bfa_status_t bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc);
|
||||
static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc);
|
||||
static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc);
|
||||
static u32 *bfa_ioc_cb_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off);
|
||||
static u32 bfa_ioc_cb_fwimg_get_size(struct bfa_ioc_s *ioc);
|
||||
static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc);
|
||||
static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc);
|
||||
static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
|
||||
static void bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc);
|
||||
static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
|
||||
|
||||
struct bfa_ioc_hwif_s hwif_cb = {
|
||||
bfa_ioc_cb_pll_init,
|
||||
bfa_ioc_cb_firmware_lock,
|
||||
bfa_ioc_cb_firmware_unlock,
|
||||
bfa_ioc_cb_fwimg_get_chunk,
|
||||
bfa_ioc_cb_fwimg_get_size,
|
||||
bfa_ioc_cb_reg_init,
|
||||
bfa_ioc_cb_map_port,
|
||||
bfa_ioc_cb_isr_mode_set,
|
||||
bfa_ioc_cb_notify_hbfail,
|
||||
bfa_ioc_cb_ownership_reset,
|
||||
};
|
||||
struct bfa_ioc_hwif_s hwif_cb;
|
||||
|
||||
/**
|
||||
* Called from bfa_ioc_attach() to map asic specific calls.
|
||||
|
@ -60,21 +47,18 @@ struct bfa_ioc_hwif_s hwif_cb = {
|
|||
void
|
||||
bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
hwif_cb.ioc_pll_init = bfa_ioc_cb_pll_init;
|
||||
hwif_cb.ioc_firmware_lock = bfa_ioc_cb_firmware_lock;
|
||||
hwif_cb.ioc_firmware_unlock = bfa_ioc_cb_firmware_unlock;
|
||||
hwif_cb.ioc_reg_init = bfa_ioc_cb_reg_init;
|
||||
hwif_cb.ioc_map_port = bfa_ioc_cb_map_port;
|
||||
hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set;
|
||||
hwif_cb.ioc_notify_hbfail = bfa_ioc_cb_notify_hbfail;
|
||||
hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset;
|
||||
|
||||
ioc->ioc_hwif = &hwif_cb;
|
||||
}
|
||||
|
||||
static u32 *
|
||||
bfa_ioc_cb_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off)
|
||||
{
|
||||
return bfi_image_cb_get_chunk(off);
|
||||
}
|
||||
|
||||
static u32
|
||||
bfa_ioc_cb_fwimg_get_size(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
return bfi_image_cb_size;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return true if firmware of current driver matches the running firmware.
|
||||
*/
|
||||
|
|
|
@ -33,27 +33,13 @@ BFA_TRC_FILE(CNA, IOC_CT);
|
|||
static bfa_status_t bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc);
|
||||
static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
|
||||
static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
|
||||
static u32* bfa_ioc_ct_fwimg_get_chunk(struct bfa_ioc_s *ioc,
|
||||
u32 off);
|
||||
static u32 bfa_ioc_ct_fwimg_get_size(struct bfa_ioc_s *ioc);
|
||||
static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
|
||||
static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
|
||||
static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
|
||||
static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc);
|
||||
static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
|
||||
|
||||
struct bfa_ioc_hwif_s hwif_ct = {
|
||||
bfa_ioc_ct_pll_init,
|
||||
bfa_ioc_ct_firmware_lock,
|
||||
bfa_ioc_ct_firmware_unlock,
|
||||
bfa_ioc_ct_fwimg_get_chunk,
|
||||
bfa_ioc_ct_fwimg_get_size,
|
||||
bfa_ioc_ct_reg_init,
|
||||
bfa_ioc_ct_map_port,
|
||||
bfa_ioc_ct_isr_mode_set,
|
||||
bfa_ioc_ct_notify_hbfail,
|
||||
bfa_ioc_ct_ownership_reset,
|
||||
};
|
||||
struct bfa_ioc_hwif_s hwif_ct;
|
||||
|
||||
/**
|
||||
* Called from bfa_ioc_attach() to map asic specific calls.
|
||||
|
@ -61,21 +47,18 @@ struct bfa_ioc_hwif_s hwif_ct = {
|
|||
void
|
||||
bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
|
||||
hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
|
||||
hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
|
||||
hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
|
||||
hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
|
||||
hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
|
||||
hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
|
||||
hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
|
||||
|
||||
ioc->ioc_hwif = &hwif_ct;
|
||||
}
|
||||
|
||||
static u32*
|
||||
bfa_ioc_ct_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off)
|
||||
{
|
||||
return bfi_image_ct_get_chunk(off);
|
||||
}
|
||||
|
||||
static u32
|
||||
bfa_ioc_ct_fwimg_get_size(struct bfa_ioc_s *ioc)
|
||||
{
|
||||
return bfi_image_ct_size;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return true if firmware of current driver matches the running firmware.
|
||||
*/
|
||||
|
@ -95,7 +78,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
|
|||
/**
|
||||
* If bios boot (flash based) -- do not increment usage count
|
||||
*/
|
||||
if (bfa_ioc_ct_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
|
||||
if (bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < BFA_IOC_FWIMG_MINSZ)
|
||||
return BFA_TRUE;
|
||||
|
||||
bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
|
||||
|
@ -146,9 +129,14 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
|
|||
|
||||
/**
|
||||
* Firmware lock is relevant only for CNA.
|
||||
*/
|
||||
if (!ioc->cna)
|
||||
return;
|
||||
|
||||
/**
|
||||
* If bios boot (flash based) -- do not decrement usage count
|
||||
*/
|
||||
if (!ioc->cna || bfa_ioc_ct_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
|
||||
if (bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < BFA_IOC_FWIMG_MINSZ)
|
||||
return;
|
||||
|
||||
/**
|
||||
|
@ -388,10 +376,35 @@ bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc)
|
|||
bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk |
|
||||
__APP_PLL_425_ENABLE);
|
||||
|
||||
/**
|
||||
* PSS memory reset is asserted at power-on-reset. Need to clear
|
||||
* this before running EDRAM BISTR
|
||||
*/
|
||||
if (ioc->cna) {
|
||||
bfa_reg_write((rb + PMM_1T_RESET_REG_P0), __PMM_1T_RESET_P);
|
||||
bfa_reg_write((rb + PMM_1T_RESET_REG_P1), __PMM_1T_RESET_P);
|
||||
}
|
||||
|
||||
r32 = bfa_reg_read((rb + PSS_CTL_REG));
|
||||
r32 &= ~__PSS_LMEM_RESET;
|
||||
bfa_reg_write((rb + PSS_CTL_REG), r32);
|
||||
bfa_os_udelay(1000);
|
||||
|
||||
if (ioc->cna) {
|
||||
bfa_reg_write((rb + PMM_1T_RESET_REG_P0), 0);
|
||||
bfa_reg_write((rb + PMM_1T_RESET_REG_P1), 0);
|
||||
}
|
||||
|
||||
bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
|
||||
bfa_os_udelay(1000);
|
||||
r32 = bfa_reg_read((rb + MBIST_STAT_REG));
|
||||
bfa_trc(ioc, r32);
|
||||
|
||||
/**
|
||||
* Clear BISTR
|
||||
*/
|
||||
bfa_reg_write((rb + MBIST_CTL_REG), 0);
|
||||
|
||||
/*
|
||||
* release semaphore.
|
||||
*/
|
||||
|
|
|
@ -113,7 +113,6 @@ bfa_iocfc_send_cfg(void *bfa_arg)
|
|||
bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS);
|
||||
bfa_trc(bfa, cfg->fwcfg.num_cqs);
|
||||
|
||||
iocfc->cfgdone = BFA_FALSE;
|
||||
bfa_iocfc_reset_queues(bfa);
|
||||
|
||||
/**
|
||||
|
@ -144,6 +143,15 @@ bfa_iocfc_send_cfg(void *bfa_arg)
|
|||
bfa_os_htons(cfg->drvcfg.num_rspq_elems);
|
||||
}
|
||||
|
||||
/**
|
||||
* Enable interrupt coalescing if it is driver init path
|
||||
* and not ioc disable/enable path.
|
||||
*/
|
||||
if (!iocfc->cfgdone)
|
||||
cfg_info->intr_attr.coalesce = BFA_TRUE;
|
||||
|
||||
iocfc->cfgdone = BFA_FALSE;
|
||||
|
||||
/**
|
||||
* dma map IOC configuration itself
|
||||
*/
|
||||
|
@ -170,7 +178,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
|
|||
/**
|
||||
* Initialize chip specific handlers.
|
||||
*/
|
||||
if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT) {
|
||||
if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
|
||||
iocfc->hwif.hw_reginit = bfa_hwct_reginit;
|
||||
iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
|
||||
iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
|
||||
|
@ -179,6 +187,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
|
|||
iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
|
||||
iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
|
||||
iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
|
||||
iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
|
||||
} else {
|
||||
iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
|
||||
iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
|
||||
|
@ -188,6 +197,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
|
|||
iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
|
||||
iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
|
||||
iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
|
||||
iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
|
||||
}
|
||||
|
||||
iocfc->hwif.hw_reginit(bfa);
|
||||
|
@ -290,18 +300,6 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* BFA submodules initialization completion notification.
|
||||
*/
|
||||
static void
|
||||
bfa_iocfc_initdone_submod(struct bfa_s *bfa)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; hal_mods[i]; i++)
|
||||
hal_mods[i]->initdone(bfa);
|
||||
}
|
||||
|
||||
/**
|
||||
* Start BFA submodules.
|
||||
*/
|
||||
|
@ -376,7 +374,6 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
|
|||
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
|
||||
struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
|
||||
struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
|
||||
struct bfi_iocfc_cfg_s *cfginfo = iocfc->cfginfo;
|
||||
|
||||
fwcfg->num_cqs = fwcfg->num_cqs;
|
||||
fwcfg->num_ioim_reqs = bfa_os_ntohs(fwcfg->num_ioim_reqs);
|
||||
|
@ -385,15 +382,13 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
|
|||
fwcfg->num_uf_bufs = bfa_os_ntohs(fwcfg->num_uf_bufs);
|
||||
fwcfg->num_rports = bfa_os_ntohs(fwcfg->num_rports);
|
||||
|
||||
cfginfo->intr_attr.coalesce = cfgrsp->intr_attr.coalesce;
|
||||
cfginfo->intr_attr.delay = bfa_os_ntohs(cfgrsp->intr_attr.delay);
|
||||
cfginfo->intr_attr.latency = bfa_os_ntohs(cfgrsp->intr_attr.latency);
|
||||
|
||||
iocfc->cfgdone = BFA_TRUE;
|
||||
|
||||
/**
|
||||
* Configuration is complete - initialize/start submodules
|
||||
*/
|
||||
bfa_fcport_init(bfa);
|
||||
|
||||
if (iocfc->action == BFA_IOCFC_ACT_INIT)
|
||||
bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
|
||||
else
|
||||
|
@ -531,7 +526,6 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
|
|||
return;
|
||||
}
|
||||
|
||||
bfa_iocfc_initdone_submod(bfa);
|
||||
bfa_iocfc_send_cfg(bfa);
|
||||
}
|
||||
|
||||
|
@ -625,9 +619,9 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
|
|||
bfa->trcmod, bfa->aen, bfa->logm);
|
||||
|
||||
/**
|
||||
* Choose FC (ssid: 0x1C) v/s FCoE (ssid: 0x14) mode.
|
||||
* Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
|
||||
*/
|
||||
if (0)
|
||||
if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
|
||||
bfa_ioc_set_fcmode(&bfa->ioc);
|
||||
|
||||
bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
|
||||
|
@ -748,10 +742,20 @@ bfa_adapter_get_id(struct bfa_s *bfa)
|
|||
void
|
||||
bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
|
||||
{
|
||||
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
|
||||
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
|
||||
|
||||
attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
|
||||
|
||||
attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
|
||||
bfa_os_ntohs(iocfc->cfginfo->intr_attr.delay) :
|
||||
bfa_os_ntohs(iocfc->cfgrsp->intr_attr.delay);
|
||||
|
||||
attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
|
||||
bfa_os_ntohs(iocfc->cfginfo->intr_attr.latency) :
|
||||
bfa_os_ntohs(iocfc->cfgrsp->intr_attr.latency);
|
||||
|
||||
attr->config = iocfc->cfg;
|
||||
|
||||
attr->intr_attr = iocfc->cfginfo->intr_attr;
|
||||
attr->config = iocfc->cfg;
|
||||
}
|
||||
|
||||
bfa_status_t
|
||||
|
@ -760,7 +764,10 @@ bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
|
|||
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
|
||||
struct bfi_iocfc_set_intr_req_s *m;
|
||||
|
||||
iocfc->cfginfo->intr_attr = *attr;
|
||||
iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
|
||||
iocfc->cfginfo->intr_attr.delay = bfa_os_htons(attr->delay);
|
||||
iocfc->cfginfo->intr_attr.latency = bfa_os_htons(attr->latency);
|
||||
|
||||
if (!bfa_iocfc_is_operational(bfa))
|
||||
return BFA_STATUS_OK;
|
||||
|
||||
|
@ -770,9 +777,10 @@ bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
|
|||
|
||||
bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
|
||||
bfa_lpuid(bfa));
|
||||
m->coalesce = attr->coalesce;
|
||||
m->delay = bfa_os_htons(attr->delay);
|
||||
m->latency = bfa_os_htons(attr->latency);
|
||||
m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
|
||||
m->delay = iocfc->cfginfo->intr_attr.delay;
|
||||
m->latency = iocfc->cfginfo->intr_attr.latency;
|
||||
|
||||
|
||||
bfa_trc(bfa, attr->delay);
|
||||
bfa_trc(bfa, attr->latency);
|
||||
|
@ -872,15 +880,48 @@ bfa_iocfc_is_operational(struct bfa_s *bfa)
|
|||
* Return boot target port wwns -- read from boot information in flash.
|
||||
*/
|
||||
void
|
||||
bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t **wwns)
|
||||
bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
|
||||
{
|
||||
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
|
||||
struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
|
||||
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
|
||||
struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
|
||||
int i;
|
||||
|
||||
if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
|
||||
bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
|
||||
*nwwns = cfgrsp->pbc_cfg.nbluns;
|
||||
for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
|
||||
wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
*nwwns = cfgrsp->bootwwns.nwwns;
|
||||
*wwns = cfgrsp->bootwwns.wwn;
|
||||
memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
|
||||
}
|
||||
|
||||
void
|
||||
bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, struct bfa_boot_pbc_s *pbcfg)
|
||||
{
|
||||
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
|
||||
struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
|
||||
|
||||
pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
|
||||
pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
|
||||
pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
|
||||
memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
|
||||
}
|
||||
|
||||
int
|
||||
bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
|
||||
{
|
||||
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
|
||||
struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
|
||||
|
||||
memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
|
||||
return cfgrsp->pbc_cfg.nvports;
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <bfa_ioc.h>
|
||||
#include <bfa.h>
|
||||
#include <bfi/bfi_iocfc.h>
|
||||
#include <bfi/bfi_pbc.h>
|
||||
#include <bfa_callback_priv.h>
|
||||
|
||||
#define BFA_REQQ_NELEMS_MIN (4)
|
||||
|
@ -62,6 +63,8 @@ struct bfa_hwif_s {
|
|||
void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
|
||||
void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
|
||||
u32 *nvecs, u32 *maxvec);
|
||||
void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
|
||||
u32 *end);
|
||||
};
|
||||
typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
|
||||
|
||||
|
@ -103,7 +106,8 @@ struct bfa_iocfc_s {
|
|||
struct bfa_hwif_s hwif;
|
||||
|
||||
bfa_cb_iocfc_t updateq_cbfn; /* bios callback function */
|
||||
void *updateq_cbarg; /* bios callback arg */
|
||||
void *updateq_cbarg; /* bios callback arg */
|
||||
u32 intr_mask;
|
||||
};
|
||||
|
||||
#define bfa_lpuid(__bfa) bfa_ioc_portid(&(__bfa)->ioc)
|
||||
|
@ -116,7 +120,10 @@ struct bfa_iocfc_s {
|
|||
#define bfa_isr_mode_set(__bfa, __msix) \
|
||||
((__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix))
|
||||
#define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) \
|
||||
(__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec)
|
||||
((__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, \
|
||||
__nvecs, __maxvec))
|
||||
#define bfa_msix_get_rme_range(__bfa, __start, __end) \
|
||||
((__bfa)->iocfc.hwif.hw_msix_get_rme_range(__bfa, __start, __end))
|
||||
|
||||
/*
|
||||
* FC specific IOC functions.
|
||||
|
@ -152,6 +159,7 @@ void bfa_hwcb_msix_uninstall(struct bfa_s *bfa);
|
|||
void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
|
||||
void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap,
|
||||
u32 *nvecs, u32 *maxvec);
|
||||
void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end);
|
||||
void bfa_hwct_reginit(struct bfa_s *bfa);
|
||||
void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
|
||||
void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq);
|
||||
|
@ -161,11 +169,16 @@ void bfa_hwct_msix_uninstall(struct bfa_s *bfa);
|
|||
void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
|
||||
void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap,
|
||||
u32 *nvecs, u32 *maxvec);
|
||||
void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end);
|
||||
|
||||
void bfa_com_meminfo(bfa_boolean_t mincfg, u32 *dm_len);
|
||||
void bfa_com_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi,
|
||||
bfa_boolean_t mincfg);
|
||||
void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t **wwns);
|
||||
void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns);
|
||||
void bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa,
|
||||
struct bfa_boot_pbc_s *pbcfg);
|
||||
int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
|
||||
struct bfi_pbc_vport_s *pbc_vport);
|
||||
|
||||
#endif /* __BFA_IOCFC_H__ */
|
||||
|
||||
|
|
|
@ -133,6 +133,8 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
|
|||
|
||||
case BFA_IOIM_SM_IOTOV:
|
||||
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
|
||||
list_del(&ioim->qe);
|
||||
list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
|
||||
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
|
||||
__bfa_cb_ioim_pathtov, ioim);
|
||||
break;
|
||||
|
@ -182,6 +184,8 @@ bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
|
|||
case BFA_IOIM_SM_ABORT:
|
||||
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
|
||||
bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
|
||||
list_del(&ioim->qe);
|
||||
list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
|
||||
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
|
||||
ioim);
|
||||
break;
|
||||
|
@ -189,6 +193,8 @@ bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
|
|||
case BFA_IOIM_SM_HWFAIL:
|
||||
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
|
||||
bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
|
||||
list_del(&ioim->qe);
|
||||
list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
|
||||
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
|
||||
ioim);
|
||||
break;
|
||||
|
@ -210,18 +216,24 @@ bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
|
|||
switch (event) {
|
||||
case BFA_IOIM_SM_COMP_GOOD:
|
||||
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
|
||||
list_del(&ioim->qe);
|
||||
list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
|
||||
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
|
||||
__bfa_cb_ioim_good_comp, ioim);
|
||||
break;
|
||||
|
||||
case BFA_IOIM_SM_COMP:
|
||||
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
|
||||
list_del(&ioim->qe);
|
||||
list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
|
||||
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
|
||||
ioim);
|
||||
break;
|
||||
|
||||
case BFA_IOIM_SM_DONE:
|
||||
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
|
||||
list_del(&ioim->qe);
|
||||
list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
|
||||
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
|
||||
ioim);
|
||||
break;
|
||||
|
@ -234,8 +246,8 @@ bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
|
|||
bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
|
||||
else {
|
||||
bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
|
||||
bfa_reqq_wait(ioim->bfa, ioim->itnim->reqq,
|
||||
&ioim->iosp->reqq_wait);
|
||||
bfa_reqq_wait(ioim->bfa, ioim->reqq,
|
||||
&ioim->iosp->reqq_wait);
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -247,13 +259,15 @@ bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
|
|||
bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
|
||||
else {
|
||||
bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
|
||||
bfa_reqq_wait(ioim->bfa, ioim->itnim->reqq,
|
||||
&ioim->iosp->reqq_wait);
|
||||
bfa_reqq_wait(ioim->bfa, ioim->reqq,
|
||||
&ioim->iosp->reqq_wait);
|
||||
}
|
||||
break;
|
||||
|
||||
case BFA_IOIM_SM_HWFAIL:
|
||||
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
|
||||
list_del(&ioim->qe);
|
||||
list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
|
||||
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
|
||||
ioim);
|
||||
break;
|
||||
|
@ -287,12 +301,16 @@ bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
|
|||
|
||||
case BFA_IOIM_SM_ABORT_COMP:
|
||||
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
|
||||
list_del(&ioim->qe);
|
||||
list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
|
||||
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
|
||||
ioim);
|
||||
break;
|
||||
|
||||
case BFA_IOIM_SM_COMP_UTAG:
|
||||
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
|
||||
list_del(&ioim->qe);
|
||||
list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
|
||||
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
|
||||
ioim);
|
||||
break;
|
||||
|
@ -305,13 +323,15 @@ bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
|
|||
bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
|
||||
else {
|
||||
bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
|
||||
bfa_reqq_wait(ioim->bfa, ioim->itnim->reqq,
|
||||
bfa_reqq_wait(ioim->bfa, ioim->reqq,
|
||||
&ioim->iosp->reqq_wait);
|
||||
}
|
||||
break;
|
||||
|
||||
case BFA_IOIM_SM_HWFAIL:
|
||||
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
|
||||
list_del(&ioim->qe);
|
||||
list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
|
||||
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
|
||||
ioim);
|
||||
break;
|
||||
|
@ -365,6 +385,8 @@ bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
|
|||
|
||||
case BFA_IOIM_SM_HWFAIL:
|
||||
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
|
||||
list_del(&ioim->qe);
|
||||
list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
|
||||
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
|
||||
ioim);
|
||||
break;
|
||||
|
@ -399,6 +421,8 @@ bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
|
|||
case BFA_IOIM_SM_ABORT:
|
||||
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
|
||||
bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
|
||||
list_del(&ioim->qe);
|
||||
list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
|
||||
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
|
||||
ioim);
|
||||
break;
|
||||
|
@ -414,6 +438,8 @@ bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
|
|||
case BFA_IOIM_SM_HWFAIL:
|
||||
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
|
||||
bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
|
||||
list_del(&ioim->qe);
|
||||
list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
|
||||
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
|
||||
ioim);
|
||||
break;
|
||||
|
@ -448,6 +474,8 @@ bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
|
|||
case BFA_IOIM_SM_COMP:
|
||||
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
|
||||
bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
|
||||
list_del(&ioim->qe);
|
||||
list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
|
||||
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
|
||||
ioim);
|
||||
break;
|
||||
|
@ -455,6 +483,8 @@ bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
|
|||
case BFA_IOIM_SM_DONE:
|
||||
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
|
||||
bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
|
||||
list_del(&ioim->qe);
|
||||
list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
|
||||
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
|
||||
ioim);
|
||||
break;
|
||||
|
@ -462,6 +492,8 @@ bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
|
|||
case BFA_IOIM_SM_HWFAIL:
|
||||
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
|
||||
bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
|
||||
list_del(&ioim->qe);
|
||||
list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
|
||||
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
|
||||
ioim);
|
||||
break;
|
||||
|
@ -511,6 +543,8 @@ bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
|
|||
case BFA_IOIM_SM_HWFAIL:
|
||||
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
|
||||
bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
|
||||
list_del(&ioim->qe);
|
||||
list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
|
||||
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
|
||||
ioim);
|
||||
break;
|
||||
|
@ -738,9 +772,9 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
|
|||
/**
|
||||
* check for room in queue to send request now
|
||||
*/
|
||||
m = bfa_reqq_next(ioim->bfa, itnim->reqq);
|
||||
m = bfa_reqq_next(ioim->bfa, ioim->reqq);
|
||||
if (!m) {
|
||||
bfa_reqq_wait(ioim->bfa, ioim->itnim->reqq,
|
||||
bfa_reqq_wait(ioim->bfa, ioim->reqq,
|
||||
&ioim->iosp->reqq_wait);
|
||||
return BFA_FALSE;
|
||||
}
|
||||
|
@ -832,7 +866,7 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
|
|||
/**
|
||||
* queue I/O message to firmware
|
||||
*/
|
||||
bfa_reqq_produce(ioim->bfa, itnim->reqq);
|
||||
bfa_reqq_produce(ioim->bfa, ioim->reqq);
|
||||
return BFA_TRUE;
|
||||
}
|
||||
|
||||
|
@ -930,14 +964,13 @@ bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
|
|||
static bfa_boolean_t
|
||||
bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
|
||||
{
|
||||
struct bfa_itnim_s *itnim = ioim->itnim;
|
||||
struct bfi_ioim_abort_req_s *m;
|
||||
enum bfi_ioim_h2i msgop;
|
||||
|
||||
/**
|
||||
* check for room in queue to send request now
|
||||
*/
|
||||
m = bfa_reqq_next(ioim->bfa, itnim->reqq);
|
||||
m = bfa_reqq_next(ioim->bfa, ioim->reqq);
|
||||
if (!m)
|
||||
return BFA_FALSE;
|
||||
|
||||
|
@ -956,7 +989,7 @@ bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
|
|||
/**
|
||||
* queue I/O message to firmware
|
||||
*/
|
||||
bfa_reqq_produce(ioim->bfa, itnim->reqq);
|
||||
bfa_reqq_produce(ioim->bfa, ioim->reqq);
|
||||
return BFA_TRUE;
|
||||
}
|
||||
|
||||
|
@ -1306,6 +1339,14 @@ void
|
|||
bfa_ioim_start(struct bfa_ioim_s *ioim)
|
||||
{
|
||||
bfa_trc_fp(ioim->bfa, ioim->iotag);
|
||||
|
||||
/**
|
||||
* Obtain the queue over which this request has to be issued
|
||||
*/
|
||||
ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
|
||||
bfa_cb_ioim_get_reqq(ioim->dio) :
|
||||
bfa_itnim_get_reqq(ioim);
|
||||
|
||||
bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
|
||||
}
|
||||
|
||||
|
|
|
@ -110,6 +110,27 @@ struct bfa_log_msgdef_s bfa_log_msg_array[] = {
|
|||
"Running firmware version is incompatible with the driver version.",
|
||||
(0), 0},
|
||||
|
||||
{BFA_AEN_IOC_FWCFG_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
|
||||
BFA_LOG_CRITICAL, "BFA_AEN_IOC_FWCFG_ERROR",
|
||||
"Link initialization failed due to firmware configuration read error:"
|
||||
" WWN = %s.",
|
||||
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
|
||||
|
||||
{BFA_AEN_IOC_INVALID_VENDOR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
|
||||
BFA_LOG_ERROR, "BFA_AEN_IOC_INVALID_VENDOR",
|
||||
"Unsupported switch vendor. Link initialization failed: WWN = %s.",
|
||||
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
|
||||
|
||||
{BFA_AEN_IOC_INVALID_NWWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
|
||||
BFA_LOG_ERROR, "BFA_AEN_IOC_INVALID_NWWN",
|
||||
"Invalid NWWN. Link initialization failed: NWWN = 00:00:00:00:00:00:00:00.",
|
||||
(0), 0},
|
||||
|
||||
{BFA_AEN_IOC_INVALID_PWWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
|
||||
BFA_LOG_ERROR, "BFA_AEN_IOC_INVALID_PWWN",
|
||||
"Invalid PWWN. Link initialization failed: PWWN = 00:00:00:00:00:00:00:00.",
|
||||
(0), 0},
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -347,6 +368,22 @@ struct bfa_log_msgdef_s bfa_log_msg_array[] = {
|
|||
((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
|
||||
(BFA_LOG_D << BFA_LOG_ARG2) | 0), 3},
|
||||
|
||||
{BFA_LOG_HAL_DRIVER_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
|
||||
BFA_LOG_INFO, "HAL_DRIVER_ERROR",
|
||||
"%s",
|
||||
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
|
||||
|
||||
{BFA_LOG_HAL_DRIVER_CONFIG_ERROR,
|
||||
BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
|
||||
"HAL_DRIVER_CONFIG_ERROR",
|
||||
"%s",
|
||||
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
|
||||
|
||||
{BFA_LOG_HAL_MBOX_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
|
||||
BFA_LOG_INFO, "HAL_MBOX_ERROR",
|
||||
"%s",
|
||||
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -412,6 +449,55 @@ struct bfa_log_msgdef_s bfa_log_msg_array[] = {
|
|||
((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_P << BFA_LOG_ARG1) |
|
||||
(BFA_LOG_X << BFA_LOG_ARG2) | 0), 3},
|
||||
|
||||
{BFA_LOG_LINUX_DRIVER_CONFIG_ERROR,
|
||||
BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
|
||||
"LINUX_DRIVER_CONFIG_ERROR",
|
||||
"%s",
|
||||
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
|
||||
|
||||
{BFA_LOG_LINUX_BNA_STATE_MACHINE,
|
||||
BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
|
||||
"LINUX_BNA_STATE_MACHINE",
|
||||
"%s",
|
||||
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
|
||||
|
||||
{BFA_LOG_LINUX_IOC_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
|
||||
BFA_LOG_INFO, "LINUX_IOC_ERROR",
|
||||
"%s",
|
||||
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
|
||||
|
||||
{BFA_LOG_LINUX_RESOURCE_ALLOC_ERROR,
|
||||
BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
|
||||
"LINUX_RESOURCE_ALLOC_ERROR",
|
||||
"%s",
|
||||
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
|
||||
|
||||
{BFA_LOG_LINUX_RING_BUFFER_ERROR,
|
||||
BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
|
||||
"LINUX_RING_BUFFER_ERROR",
|
||||
"%s",
|
||||
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
|
||||
|
||||
{BFA_LOG_LINUX_DRIVER_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
|
||||
BFA_LOG_ERROR, "LINUX_DRIVER_ERROR",
|
||||
"%s",
|
||||
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
|
||||
|
||||
{BFA_LOG_LINUX_DRIVER_INFO, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
|
||||
BFA_LOG_INFO, "LINUX_DRIVER_INFO",
|
||||
"%s",
|
||||
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
|
||||
|
||||
{BFA_LOG_LINUX_DRIVER_DIAG, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
|
||||
BFA_LOG_INFO, "LINUX_DRIVER_DIAG",
|
||||
"%s",
|
||||
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
|
||||
|
||||
{BFA_LOG_LINUX_DRIVER_AEN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
|
||||
BFA_LOG_INFO, "LINUX_DRIVER_AEN",
|
||||
"%s",
|
||||
((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -41,7 +41,6 @@ static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
|
|||
struct bfa_iocfc_cfg_s *cfg,
|
||||
struct bfa_meminfo_s *meminfo,
|
||||
struct bfa_pcidev_s *pcidev);
|
||||
static void bfa_lps_initdone(struct bfa_s *bfa);
|
||||
static void bfa_lps_detach(struct bfa_s *bfa);
|
||||
static void bfa_lps_start(struct bfa_s *bfa);
|
||||
static void bfa_lps_stop(struct bfa_s *bfa);
|
||||
|
@ -346,11 +345,6 @@ bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_lps_initdone(struct bfa_s *bfa)
|
||||
{
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_lps_detach(struct bfa_s *bfa)
|
||||
{
|
||||
|
|
|
@ -102,9 +102,14 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
|
|||
port->stats_busy = BFA_FALSE;
|
||||
|
||||
if (status == BFA_STATUS_OK) {
|
||||
struct bfa_timeval_s tv;
|
||||
|
||||
memcpy(port->stats, port->stats_dma.kva,
|
||||
sizeof(union bfa_pport_stats_u));
|
||||
bfa_port_stats_swap(port, port->stats);
|
||||
|
||||
bfa_os_gettimeofday(&tv);
|
||||
port->stats->fc.secs_reset = tv.tv_sec - port->stats_reset_time;
|
||||
}
|
||||
|
||||
if (port->stats_cbfn) {
|
||||
|
@ -125,9 +130,17 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
|
|||
static void
|
||||
bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
|
||||
{
|
||||
struct bfa_timeval_s tv;
|
||||
|
||||
port->stats_status = status;
|
||||
port->stats_busy = BFA_FALSE;
|
||||
|
||||
/**
|
||||
* re-initialize time stamp for stats reset
|
||||
*/
|
||||
bfa_os_gettimeofday(&tv);
|
||||
port->stats_reset_time = tv.tv_sec;
|
||||
|
||||
if (port->stats_cbfn) {
|
||||
port->stats_cbfn(port->stats_cbarg, status);
|
||||
port->stats_cbfn = NULL;
|
||||
|
@ -394,7 +407,7 @@ bfa_port_hbfail(void *arg)
|
|||
*/
|
||||
if (port->stats_busy) {
|
||||
if (port->stats_cbfn)
|
||||
port->stats_cbfn(port->dev, BFA_STATUS_FAILED);
|
||||
port->stats_cbfn(port->stats_cbarg, BFA_STATUS_FAILED);
|
||||
port->stats_cbfn = NULL;
|
||||
port->stats_busy = BFA_FALSE;
|
||||
}
|
||||
|
@ -404,7 +417,7 @@ bfa_port_hbfail(void *arg)
|
|||
*/
|
||||
if (port->endis_pending) {
|
||||
if (port->endis_cbfn)
|
||||
port->endis_cbfn(port->dev, BFA_STATUS_FAILED);
|
||||
port->endis_cbfn(port->endis_cbarg, BFA_STATUS_FAILED);
|
||||
port->endis_cbfn = NULL;
|
||||
port->endis_pending = BFA_FALSE;
|
||||
}
|
||||
|
@ -428,6 +441,8 @@ void
|
|||
bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, void *dev,
|
||||
struct bfa_trc_mod_s *trcmod, struct bfa_log_mod_s *logmod)
|
||||
{
|
||||
struct bfa_timeval_s tv;
|
||||
|
||||
bfa_assert(port);
|
||||
|
||||
port->dev = dev;
|
||||
|
@ -435,13 +450,21 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, void *dev,
|
|||
port->trcmod = trcmod;
|
||||
port->logmod = logmod;
|
||||
|
||||
port->stats_busy = port->endis_pending = BFA_FALSE;
|
||||
port->stats_cbfn = port->endis_cbfn = NULL;
|
||||
port->stats_busy = BFA_FALSE;
|
||||
port->endis_pending = BFA_FALSE;
|
||||
port->stats_cbfn = NULL;
|
||||
port->endis_cbfn = NULL;
|
||||
|
||||
bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port);
|
||||
bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port);
|
||||
bfa_ioc_hbfail_register(port->ioc, &port->hbfail);
|
||||
|
||||
/**
|
||||
* initialize time stamp for stats reset
|
||||
*/
|
||||
bfa_os_gettimeofday(&tv);
|
||||
port->stats_reset_time = tv.tv_sec;
|
||||
|
||||
bfa_trc(port, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -75,8 +75,9 @@ struct bfa_fcport_s {
|
|||
bfa_status_t stats_status; /* stats/statsclr status */
|
||||
bfa_boolean_t stats_busy; /* outstanding stats/statsclr */
|
||||
bfa_boolean_t stats_qfull;
|
||||
u32 stats_reset_time; /* stats reset time stamp */
|
||||
bfa_cb_pport_t stats_cbfn; /* driver callback function */
|
||||
void *stats_cbarg; /* *!< user callback arg */
|
||||
void *stats_cbarg; /* user callback arg */
|
||||
bfa_boolean_t diag_busy; /* diag busy status */
|
||||
bfa_boolean_t beacon; /* port beacon status */
|
||||
bfa_boolean_t link_e2e_beacon; /* link beacon status */
|
||||
|
@ -87,5 +88,7 @@ struct bfa_fcport_s {
|
|||
/*
|
||||
* public functions
|
||||
*/
|
||||
void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
|
||||
void bfa_fcport_init(struct bfa_s *bfa);
|
||||
void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
|
||||
|
||||
#endif /* __BFA_PORT_PRIV_H__ */
|
||||
|
|
|
@ -37,7 +37,6 @@
|
|||
void *bfad, struct bfa_iocfc_cfg_s *cfg, \
|
||||
struct bfa_meminfo_s *meminfo, \
|
||||
struct bfa_pcidev_s *pcidev); \
|
||||
static void bfa_ ## __mod ## _initdone(struct bfa_s *bfa); \
|
||||
static void bfa_ ## __mod ## _detach(struct bfa_s *bfa); \
|
||||
static void bfa_ ## __mod ## _start(struct bfa_s *bfa); \
|
||||
static void bfa_ ## __mod ## _stop(struct bfa_s *bfa); \
|
||||
|
@ -47,7 +46,6 @@
|
|||
struct bfa_module_s hal_mod_ ## __mod = { \
|
||||
bfa_ ## __mod ## _meminfo, \
|
||||
bfa_ ## __mod ## _attach, \
|
||||
bfa_ ## __mod ## _initdone, \
|
||||
bfa_ ## __mod ## _detach, \
|
||||
bfa_ ## __mod ## _start, \
|
||||
bfa_ ## __mod ## _stop, \
|
||||
|
@ -69,7 +67,6 @@ struct bfa_module_s {
|
|||
struct bfa_iocfc_cfg_s *cfg,
|
||||
struct bfa_meminfo_s *meminfo,
|
||||
struct bfa_pcidev_s *pcidev);
|
||||
void (*initdone) (struct bfa_s *bfa);
|
||||
void (*detach) (struct bfa_s *bfa);
|
||||
void (*start) (struct bfa_s *bfa);
|
||||
void (*stop) (struct bfa_s *bfa);
|
||||
|
|
|
@ -635,11 +635,6 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
|
|||
bfa_meminfo_kva(meminfo) = (u8 *) rp;
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_rport_initdone(struct bfa_s *bfa)
|
||||
{
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_rport_detach(struct bfa_s *bfa)
|
||||
{
|
||||
|
|
|
@ -93,11 +93,6 @@ bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
|
|||
bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_sgpg_initdone(struct bfa_s *bfa)
|
||||
{
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_sgpg_detach(struct bfa_s *bfa)
|
||||
{
|
||||
|
|
|
@ -169,11 +169,6 @@ bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
|
|||
uf_mem_claim(ufm, meminfo);
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_uf_initdone(struct bfa_s *bfa)
|
||||
{
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_uf_detach(struct bfa_s *bfa)
|
||||
{
|
||||
|
@ -256,7 +251,10 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
|
|||
(struct fchs_s *) buf, pld_w0);
|
||||
}
|
||||
|
||||
bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
|
||||
if (bfa->fcs)
|
||||
__bfa_cb_uf_recv(uf, BFA_TRUE);
|
||||
else
|
||||
bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -54,31 +54,62 @@ static int bfa_io_max_sge = BFAD_IO_MAX_SGE;
|
|||
static int log_level = BFA_LOG_WARNING;
|
||||
static int ioc_auto_recover = BFA_TRUE;
|
||||
static int ipfc_enable = BFA_FALSE;
|
||||
static int ipfc_mtu = -1;
|
||||
static int fdmi_enable = BFA_TRUE;
|
||||
int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH;
|
||||
int bfa_linkup_delay = -1;
|
||||
int bfa_debugfs_enable = 1;
|
||||
|
||||
module_param(os_name, charp, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(os_name, "OS name of the hba host machine");
|
||||
module_param(os_patch, charp, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(os_patch, "OS patch level of the hba host machine");
|
||||
module_param(host_name, charp, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(host_name, "Hostname of the hba host machine");
|
||||
module_param(num_rports, int, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(num_rports, "Max number of rports supported per port"
|
||||
" (physical/logical), default=1024");
|
||||
module_param(num_ios, int, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000");
|
||||
module_param(num_tms, int, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(num_tms, "Max number of task im requests, default=128");
|
||||
module_param(num_fcxps, int, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64");
|
||||
module_param(num_ufbufs, int, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame buffers,"
|
||||
" default=64");
|
||||
module_param(reqq_size, int, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(reqq_size, "Max number of request queue elements,"
|
||||
" default=256");
|
||||
module_param(rspq_size, int, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(rspq_size, "Max number of response queue elements,"
|
||||
" default=64");
|
||||
module_param(num_sgpgs, int, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048");
|
||||
module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs,"
|
||||
" Range[>0]");
|
||||
module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32,"
|
||||
" Range[>0]");
|
||||
module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255");
|
||||
module_param(log_level, int, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(log_level, "Driver log level, default=3,"
|
||||
" Range[Critical:1|Error:2|Warning:3|Info:4]");
|
||||
module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1,"
|
||||
" Range[off:0|on:1]");
|
||||
module_param(ipfc_enable, int, S_IRUGO | S_IWUSR);
|
||||
module_param(ipfc_mtu, int, S_IRUGO | S_IWUSR);
|
||||
module_param(fdmi_enable, int, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(ipfc_enable, "Enable IPoFC, default=0, Range[off:0|on:1]");
|
||||
module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for boot"
|
||||
" port. Otherwise Range[>0]");
|
||||
module_param(fdmi_enable, int, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1,"
|
||||
" Range[false:0|true:1]");
|
||||
module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1,"
|
||||
" Range[false:0|true:1]");
|
||||
|
||||
/*
|
||||
* Stores the module parm num_sgpgs value;
|
||||
|
@ -322,7 +353,31 @@ ext:
|
|||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief
|
||||
* FCS PBC VPORT Create
|
||||
*/
|
||||
void
|
||||
bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)
|
||||
{
|
||||
|
||||
struct bfad_pcfg_s *pcfg;
|
||||
|
||||
pcfg = kzalloc(sizeof(struct bfad_pcfg_s), GFP_ATOMIC);
|
||||
if (!pcfg) {
|
||||
bfa_trc(bfad, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
pcfg->port_cfg.roles = BFA_PORT_ROLE_FCP_IM;
|
||||
pcfg->port_cfg.pwwn = pbc_vport.vp_pwwn;
|
||||
pcfg->port_cfg.nwwn = pbc_vport.vp_nwwn;
|
||||
pcfg->port_cfg.preboot_vp = BFA_TRUE;
|
||||
|
||||
list_add_tail(&pcfg->list_entry, &bfad->pbc_pcfg_list);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
bfad_hal_mem_release(struct bfad_s *bfad)
|
||||
|
@ -481,10 +536,10 @@ ext:
|
|||
*/
|
||||
bfa_status_t
|
||||
bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
|
||||
struct bfa_port_cfg_s *port_cfg, struct device *dev)
|
||||
struct bfa_port_cfg_s *port_cfg, struct device *dev)
|
||||
{
|
||||
struct bfad_vport_s *vport;
|
||||
int rc = BFA_STATUS_OK;
|
||||
int rc = BFA_STATUS_OK;
|
||||
unsigned long flags;
|
||||
struct completion fcomp;
|
||||
|
||||
|
@ -496,8 +551,12 @@ bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
|
|||
|
||||
vport->drv_port.bfad = bfad;
|
||||
spin_lock_irqsave(&bfad->bfad_lock, flags);
|
||||
rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id,
|
||||
port_cfg, vport);
|
||||
if (port_cfg->preboot_vp == BFA_TRUE)
|
||||
rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport,
|
||||
&bfad->bfa_fcs, vf_id, port_cfg, vport);
|
||||
else
|
||||
rc = bfa_fcs_vport_create(&vport->fcs_vport,
|
||||
&bfad->bfa_fcs, vf_id, port_cfg, vport);
|
||||
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
|
||||
|
||||
if (rc != BFA_STATUS_OK)
|
||||
|
@ -848,6 +907,10 @@ bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role)
|
|||
bfad->pport.roles |= BFA_PORT_ROLE_FCP_IM;
|
||||
}
|
||||
|
||||
/* Setup the debugfs node for this scsi_host */
|
||||
if (bfa_debugfs_enable)
|
||||
bfad_debugfs_init(&bfad->pport);
|
||||
|
||||
bfad->bfad_flags |= BFAD_CFG_PPORT_DONE;
|
||||
|
||||
out:
|
||||
|
@ -857,6 +920,10 @@ out:
|
|||
void
|
||||
bfad_uncfg_pport(struct bfad_s *bfad)
|
||||
{
|
||||
/* Remove the debugfs node for this scsi_host */
|
||||
kfree(bfad->regdata);
|
||||
bfad_debugfs_exit(&bfad->pport);
|
||||
|
||||
if ((bfad->pport.roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) {
|
||||
bfad_ipfc_port_delete(bfad, &bfad->pport);
|
||||
bfad->pport.roles &= ~BFA_PORT_ROLE_FCP_IPFC;
|
||||
|
@ -884,6 +951,7 @@ bfa_status_t
|
|||
bfad_start_ops(struct bfad_s *bfad)
|
||||
{
|
||||
int retval;
|
||||
struct bfad_pcfg_s *pcfg, *pcfg_new;
|
||||
|
||||
/* PPORT FCS config */
|
||||
bfad_fcs_port_cfg(bfad);
|
||||
|
@ -901,6 +969,27 @@ bfad_start_ops(struct bfad_s *bfad)
|
|||
|
||||
bfad_drv_start(bfad);
|
||||
|
||||
/* pbc vport creation */
|
||||
list_for_each_entry_safe(pcfg, pcfg_new, &bfad->pbc_pcfg_list,
|
||||
list_entry) {
|
||||
struct fc_vport_identifiers vid;
|
||||
struct fc_vport *fc_vport;
|
||||
|
||||
memset(&vid, 0, sizeof(vid));
|
||||
vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
|
||||
vid.vport_type = FC_PORTTYPE_NPIV;
|
||||
vid.disable = false;
|
||||
vid.node_name = wwn_to_u64((u8 *)&pcfg->port_cfg.nwwn);
|
||||
vid.port_name = wwn_to_u64((u8 *)&pcfg->port_cfg.pwwn);
|
||||
fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid);
|
||||
if (!fc_vport)
|
||||
printk(KERN_WARNING "bfad%d: failed to create pbc vport"
|
||||
" %llx\n", bfad->inst_no, vid.port_name);
|
||||
list_del(&pcfg->list_entry);
|
||||
kfree(pcfg);
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* If bfa_linkup_delay is set to -1 default; try to retrive the
|
||||
* value using the bfad_os_get_linkup_delay(); else use the
|
||||
|
@ -928,7 +1017,7 @@ out_cfg_pport_failure:
|
|||
}
|
||||
|
||||
int
|
||||
bfad_worker (void *ptr)
|
||||
bfad_worker(void *ptr)
|
||||
{
|
||||
struct bfad_s *bfad;
|
||||
unsigned long flags;
|
||||
|
@ -1031,6 +1120,7 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
|
|||
|
||||
bfad->ref_count = 0;
|
||||
bfad->pport.bfad = bfad;
|
||||
INIT_LIST_HEAD(&bfad->pbc_pcfg_list);
|
||||
|
||||
bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad, "%s",
|
||||
"bfad_worker");
|
||||
|
@ -1172,6 +1262,14 @@ static struct pci_device_id bfad_id_table[] = {
|
|||
.class = (PCI_CLASS_SERIAL_FIBER << 8),
|
||||
.class_mask = ~0,
|
||||
},
|
||||
{
|
||||
.vendor = BFA_PCI_VENDOR_ID_BROCADE,
|
||||
.device = BFA_PCI_DEVICE_ID_CT_FC,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.class = (PCI_CLASS_SERIAL_FIBER << 8),
|
||||
.class_mask = ~0,
|
||||
},
|
||||
|
||||
{0, 0},
|
||||
};
|
||||
|
|
|
@ -373,47 +373,53 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
|
|||
(struct bfad_im_port_s *) shost->hostdata[0];
|
||||
struct bfad_s *bfad = im_port->bfad;
|
||||
struct bfa_port_cfg_s port_cfg;
|
||||
struct bfad_pcfg_s *pcfg;
|
||||
int status = 0, rc;
|
||||
unsigned long flags;
|
||||
|
||||
memset(&port_cfg, 0, sizeof(port_cfg));
|
||||
|
||||
port_cfg.pwwn = wwn_to_u64((u8 *) &fc_vport->port_name);
|
||||
port_cfg.nwwn = wwn_to_u64((u8 *) &fc_vport->node_name);
|
||||
|
||||
u64_to_wwn(fc_vport->node_name, (u8 *)&port_cfg.nwwn);
|
||||
u64_to_wwn(fc_vport->port_name, (u8 *)&port_cfg.pwwn);
|
||||
if (strlen(vname) > 0)
|
||||
strcpy((char *)&port_cfg.sym_name, vname);
|
||||
|
||||
port_cfg.roles = BFA_PORT_ROLE_FCP_IM;
|
||||
rc = bfad_vport_create(bfad, 0, &port_cfg, &fc_vport->dev);
|
||||
|
||||
spin_lock_irqsave(&bfad->bfad_lock, flags);
|
||||
list_for_each_entry(pcfg, &bfad->pbc_pcfg_list, list_entry) {
|
||||
if (port_cfg.pwwn == pcfg->port_cfg.pwwn) {
|
||||
port_cfg.preboot_vp = pcfg->port_cfg.preboot_vp;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
|
||||
|
||||
rc = bfad_vport_create(bfad, 0, &port_cfg, &fc_vport->dev);
|
||||
if (rc == BFA_STATUS_OK) {
|
||||
struct bfad_vport_s *vport;
|
||||
struct bfad_vport_s *vport;
|
||||
struct bfa_fcs_vport_s *fcs_vport;
|
||||
struct Scsi_Host *vshost;
|
||||
|
||||
spin_lock_irqsave(&bfad->bfad_lock, flags);
|
||||
fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0,
|
||||
port_cfg.pwwn);
|
||||
if (fcs_vport == NULL) {
|
||||
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
|
||||
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
|
||||
if (fcs_vport == NULL)
|
||||
return VPCERR_BAD_WWN;
|
||||
}
|
||||
|
||||
fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
|
||||
if (disable) {
|
||||
spin_lock_irqsave(&bfad->bfad_lock, flags);
|
||||
bfa_fcs_vport_stop(fcs_vport);
|
||||
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
|
||||
fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
|
||||
}
|
||||
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
|
||||
|
||||
vport = fcs_vport->vport_drv;
|
||||
vshost = vport->drv_port.im_port->shost;
|
||||
fc_host_node_name(vshost) = wwn_to_u64((u8 *) &port_cfg.nwwn);
|
||||
fc_host_port_name(vshost) = wwn_to_u64((u8 *) &port_cfg.pwwn);
|
||||
fc_host_node_name(vshost) = wwn_to_u64((u8 *)&port_cfg.nwwn);
|
||||
fc_host_port_name(vshost) = wwn_to_u64((u8 *)&port_cfg.pwwn);
|
||||
fc_vport->dd_data = vport;
|
||||
vport->drv_port.im_port->fc_vport = fc_vport;
|
||||
|
||||
} else if (rc == BFA_STATUS_INVALID_WWN)
|
||||
return VPCERR_BAD_WWN;
|
||||
else if (rc == BFA_STATUS_VPORT_EXISTS)
|
||||
|
@ -422,7 +428,7 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
|
|||
return VPCERR_NO_FABRIC_SUPP;
|
||||
else if (rc == BFA_STATUS_VPORT_WWN_BP)
|
||||
return VPCERR_BAD_WWN;
|
||||
else
|
||||
else
|
||||
return FC_VPORT_FAILED;
|
||||
|
||||
return status;
|
||||
|
@ -449,7 +455,7 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
|
|||
port = im_port->port;
|
||||
|
||||
vshost = vport->drv_port.im_port->shost;
|
||||
pwwn = wwn_to_u64((u8 *) &fc_host_port_name(vshost));
|
||||
u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn);
|
||||
|
||||
spin_lock_irqsave(&bfad->bfad_lock, flags);
|
||||
fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn);
|
||||
|
@ -467,6 +473,12 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
|
|||
rc = bfa_fcs_vport_delete(&vport->fcs_vport);
|
||||
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
|
||||
|
||||
if (rc == BFA_STATUS_PBC) {
|
||||
vport->drv_port.flags &= ~BFAD_PORT_DELETE;
|
||||
vport->comp_del = NULL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
wait_for_completion(vport->comp_del);
|
||||
|
||||
free_scsi_host:
|
||||
|
@ -490,7 +502,7 @@ bfad_im_vport_disable(struct fc_vport *fc_vport, bool disable)
|
|||
vport = (struct bfad_vport_s *)fc_vport->dd_data;
|
||||
bfad = vport->drv_port.bfad;
|
||||
vshost = vport->drv_port.im_port->shost;
|
||||
pwwn = wwn_to_u64((u8 *) &fc_vport->port_name);
|
||||
u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn);
|
||||
|
||||
spin_lock_irqsave(&bfad->bfad_lock, flags);
|
||||
fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn);
|
||||
|
|
547
drivers/scsi/bfa/bfad_debugfs.c
Normal file
547
drivers/scsi/bfa/bfad_debugfs.c
Normal file
|
@ -0,0 +1,547 @@
|
|||
/*
|
||||
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
|
||||
* All rights reserved
|
||||
* www.brocade.com
|
||||
*
|
||||
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License (GPL) Version 2 as
|
||||
* published by the Free Software Foundation
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
#include <bfad_drv.h>
|
||||
#include <bfad_im.h>
|
||||
|
||||
/*
|
||||
* BFA debufs interface
|
||||
*
|
||||
* To access the interface, debugfs file system should be mounted
|
||||
* if not already mounted using:
|
||||
* mount -t debugfs none /sys/kernel/debug
|
||||
*
|
||||
* BFA Hierarchy:
|
||||
* - bfa/host#
|
||||
* where the host number corresponds to the one under /sys/class/scsi_host/host#
|
||||
*
|
||||
* Debugging service available per host:
|
||||
* fwtrc: To collect current firmware trace.
|
||||
* drvtrc: To collect current driver trace
|
||||
* fwsave: To collect last saved fw trace as a result of firmware crash.
|
||||
* regwr: To write one word to chip register
|
||||
* regrd: To read one or more words from chip register.
|
||||
*/
|
||||
|
||||
struct bfad_debug_info {
|
||||
char *debug_buffer;
|
||||
void *i_private;
|
||||
int buffer_len;
|
||||
};
|
||||
|
||||
static int
|
||||
bfad_debugfs_open_drvtrc(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct bfad_port_s *port = inode->i_private;
|
||||
struct bfad_s *bfad = port->bfad;
|
||||
struct bfad_debug_info *debug;
|
||||
|
||||
debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL);
|
||||
if (!debug)
|
||||
return -ENOMEM;
|
||||
|
||||
debug->debug_buffer = (void *) bfad->trcmod;
|
||||
debug->buffer_len = sizeof(struct bfa_trc_mod_s);
|
||||
|
||||
file->private_data = debug;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
bfad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct bfad_port_s *port = inode->i_private;
|
||||
struct bfad_s *bfad = port->bfad;
|
||||
struct bfad_debug_info *fw_debug;
|
||||
unsigned long flags;
|
||||
int rc;
|
||||
|
||||
fw_debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL);
|
||||
if (!fw_debug)
|
||||
return -ENOMEM;
|
||||
|
||||
fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s);
|
||||
|
||||
fw_debug->debug_buffer = vmalloc(fw_debug->buffer_len);
|
||||
if (!fw_debug->debug_buffer) {
|
||||
kfree(fw_debug);
|
||||
printk(KERN_INFO "bfad[%d]: Failed to allocate fwtrc buffer\n",
|
||||
bfad->inst_no);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
|
||||
|
||||
spin_lock_irqsave(&bfad->bfad_lock, flags);
|
||||
rc = bfa_debug_fwtrc(&bfad->bfa,
|
||||
fw_debug->debug_buffer,
|
||||
&fw_debug->buffer_len);
|
||||
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
|
||||
if (rc != BFA_STATUS_OK) {
|
||||
vfree(fw_debug->debug_buffer);
|
||||
fw_debug->debug_buffer = NULL;
|
||||
kfree(fw_debug);
|
||||
printk(KERN_INFO "bfad[%d]: Failed to collect fwtrc\n",
|
||||
bfad->inst_no);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
file->private_data = fw_debug;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
bfad_debugfs_open_fwsave(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct bfad_port_s *port = inode->i_private;
|
||||
struct bfad_s *bfad = port->bfad;
|
||||
struct bfad_debug_info *fw_debug;
|
||||
unsigned long flags;
|
||||
int rc;
|
||||
|
||||
fw_debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL);
|
||||
if (!fw_debug)
|
||||
return -ENOMEM;
|
||||
|
||||
fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s);
|
||||
|
||||
fw_debug->debug_buffer = vmalloc(fw_debug->buffer_len);
|
||||
if (!fw_debug->debug_buffer) {
|
||||
kfree(fw_debug);
|
||||
printk(KERN_INFO "bfad[%d]: Failed to allocate fwsave buffer\n",
|
||||
bfad->inst_no);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
|
||||
|
||||
spin_lock_irqsave(&bfad->bfad_lock, flags);
|
||||
rc = bfa_debug_fwsave(&bfad->bfa,
|
||||
fw_debug->debug_buffer,
|
||||
&fw_debug->buffer_len);
|
||||
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
|
||||
if (rc != BFA_STATUS_OK) {
|
||||
vfree(fw_debug->debug_buffer);
|
||||
fw_debug->debug_buffer = NULL;
|
||||
kfree(fw_debug);
|
||||
printk(KERN_INFO "bfad[%d]: Failed to collect fwsave\n",
|
||||
bfad->inst_no);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
file->private_data = fw_debug;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
bfad_debugfs_open_reg(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct bfad_debug_info *reg_debug;
|
||||
|
||||
reg_debug = kzalloc(sizeof(struct bfad_debug_info), GFP_KERNEL);
|
||||
if (!reg_debug)
|
||||
return -ENOMEM;
|
||||
|
||||
reg_debug->i_private = inode->i_private;
|
||||
|
||||
file->private_data = reg_debug;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Changes the current file position */
|
||||
static loff_t
|
||||
bfad_debugfs_lseek(struct file *file, loff_t offset, int orig)
|
||||
{
|
||||
struct bfad_debug_info *debug;
|
||||
loff_t pos = file->f_pos;
|
||||
|
||||
debug = file->private_data;
|
||||
|
||||
switch (orig) {
|
||||
case 0:
|
||||
file->f_pos = offset;
|
||||
break;
|
||||
case 1:
|
||||
file->f_pos += offset;
|
||||
break;
|
||||
case 2:
|
||||
file->f_pos = debug->buffer_len - offset;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (file->f_pos < 0 || file->f_pos > debug->buffer_len) {
|
||||
file->f_pos = pos;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return file->f_pos;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
bfad_debugfs_read(struct file *file, char __user *buf,
|
||||
size_t nbytes, loff_t *pos)
|
||||
{
|
||||
struct bfad_debug_info *debug = file->private_data;
|
||||
|
||||
if (!debug || !debug->debug_buffer)
|
||||
return 0;
|
||||
|
||||
return memory_read_from_buffer(buf, nbytes, pos,
|
||||
debug->debug_buffer, debug->buffer_len);
|
||||
}
|
||||
|
||||
#define BFA_REG_CT_ADDRSZ (0x40000)
|
||||
#define BFA_REG_CB_ADDRSZ (0x20000)
|
||||
#define BFA_REG_ADDRSZ(__bfa) \
|
||||
((bfa_ioc_devid(&(__bfa)->ioc) == BFA_PCI_DEVICE_ID_CT) ? \
|
||||
BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ)
|
||||
#define BFA_REG_ADDRMSK(__bfa) ((uint32_t)(BFA_REG_ADDRSZ(__bfa) - 1))
|
||||
|
||||
static bfa_status_t
|
||||
bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len)
|
||||
{
|
||||
u8 area;
|
||||
|
||||
/* check [16:15] */
|
||||
area = (offset >> 15) & 0x7;
|
||||
if (area == 0) {
|
||||
/* PCIe core register */
|
||||
if ((offset + (len<<2)) > 0x8000) /* 8k dwords or 32KB */
|
||||
return BFA_STATUS_EINVAL;
|
||||
} else if (area == 0x1) {
|
||||
/* CB 32 KB memory page */
|
||||
if ((offset + (len<<2)) > 0x10000) /* 8k dwords or 32KB */
|
||||
return BFA_STATUS_EINVAL;
|
||||
} else {
|
||||
/* CB register space 64KB */
|
||||
if ((offset + (len<<2)) > BFA_REG_ADDRMSK(bfa))
|
||||
return BFA_STATUS_EINVAL;
|
||||
}
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
bfad_debugfs_read_regrd(struct file *file, char __user *buf,
|
||||
size_t nbytes, loff_t *pos)
|
||||
{
|
||||
struct bfad_debug_info *regrd_debug = file->private_data;
|
||||
struct bfad_port_s *port = (struct bfad_port_s *)regrd_debug->i_private;
|
||||
struct bfad_s *bfad = port->bfad;
|
||||
ssize_t rc;
|
||||
|
||||
if (!bfad->regdata)
|
||||
return 0;
|
||||
|
||||
rc = memory_read_from_buffer(buf, nbytes, pos,
|
||||
bfad->regdata, bfad->reglen);
|
||||
|
||||
if ((*pos + nbytes) >= bfad->reglen) {
|
||||
kfree(bfad->regdata);
|
||||
bfad->regdata = NULL;
|
||||
bfad->reglen = 0;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
|
||||
size_t nbytes, loff_t *ppos)
|
||||
{
|
||||
struct bfad_debug_info *regrd_debug = file->private_data;
|
||||
struct bfad_port_s *port = (struct bfad_port_s *)regrd_debug->i_private;
|
||||
struct bfad_s *bfad = port->bfad;
|
||||
struct bfa_s *bfa = &bfad->bfa;
|
||||
struct bfa_ioc_s *ioc = &bfa->ioc;
|
||||
int addr, len, rc, i;
|
||||
u32 *regbuf;
|
||||
void __iomem *rb, *reg_addr;
|
||||
unsigned long flags;
|
||||
|
||||
rc = sscanf(buf, "%x:%x", &addr, &len);
|
||||
if (rc < 2) {
|
||||
printk(KERN_INFO
|
||||
"bfad[%d]: %s failed to read user buf\n",
|
||||
bfad->inst_no, __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
kfree(bfad->regdata);
|
||||
bfad->regdata = NULL;
|
||||
bfad->reglen = 0;
|
||||
|
||||
bfad->regdata = kzalloc(len << 2, GFP_KERNEL);
|
||||
if (!bfad->regdata) {
|
||||
printk(KERN_INFO "bfad[%d]: Failed to allocate regrd buffer\n",
|
||||
bfad->inst_no);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
bfad->reglen = len << 2;
|
||||
rb = bfa_ioc_bar0(ioc);
|
||||
addr &= BFA_REG_ADDRMSK(bfa);
|
||||
|
||||
/* offset and len sanity check */
|
||||
rc = bfad_reg_offset_check(bfa, addr, len);
|
||||
if (rc) {
|
||||
printk(KERN_INFO "bfad[%d]: Failed reg offset check\n",
|
||||
bfad->inst_no);
|
||||
kfree(bfad->regdata);
|
||||
bfad->regdata = NULL;
|
||||
bfad->reglen = 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
reg_addr = rb + addr;
|
||||
regbuf = (u32 *)bfad->regdata;
|
||||
spin_lock_irqsave(&bfad->bfad_lock, flags);
|
||||
for (i = 0; i < len; i++) {
|
||||
*regbuf = bfa_reg_read(reg_addr);
|
||||
regbuf++;
|
||||
reg_addr += sizeof(u32);
|
||||
}
|
||||
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
|
||||
size_t nbytes, loff_t *ppos)
|
||||
{
|
||||
struct bfad_debug_info *debug = file->private_data;
|
||||
struct bfad_port_s *port = (struct bfad_port_s *)debug->i_private;
|
||||
struct bfad_s *bfad = port->bfad;
|
||||
struct bfa_s *bfa = &bfad->bfa;
|
||||
struct bfa_ioc_s *ioc = &bfa->ioc;
|
||||
int addr, val, rc;
|
||||
void __iomem *reg_addr;
|
||||
unsigned long flags;
|
||||
|
||||
rc = sscanf(buf, "%x:%x", &addr, &val);
|
||||
if (rc < 2) {
|
||||
printk(KERN_INFO
|
||||
"bfad[%d]: %s failed to read user buf\n",
|
||||
bfad->inst_no, __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
addr &= BFA_REG_ADDRMSK(bfa); /* offset only 17 bit and word align */
|
||||
|
||||
/* offset and len sanity check */
|
||||
rc = bfad_reg_offset_check(bfa, addr, 1);
|
||||
if (rc) {
|
||||
printk(KERN_INFO
|
||||
"bfad[%d]: Failed reg offset check\n",
|
||||
bfad->inst_no);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
reg_addr = (uint32_t *) ((uint8_t *) bfa_ioc_bar0(ioc) + addr);
|
||||
spin_lock_irqsave(&bfad->bfad_lock, flags);
|
||||
bfa_reg_write(reg_addr, val);
|
||||
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int
|
||||
bfad_debugfs_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct bfad_debug_info *debug = file->private_data;
|
||||
|
||||
if (!debug)
|
||||
return 0;
|
||||
|
||||
file->private_data = NULL;
|
||||
kfree(debug);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
bfad_debugfs_release_fwtrc(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct bfad_debug_info *fw_debug = file->private_data;
|
||||
|
||||
if (!fw_debug)
|
||||
return 0;
|
||||
|
||||
if (fw_debug->debug_buffer)
|
||||
vfree(fw_debug->debug_buffer);
|
||||
|
||||
file->private_data = NULL;
|
||||
kfree(fw_debug);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations bfad_debugfs_op_drvtrc = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = bfad_debugfs_open_drvtrc,
|
||||
.llseek = bfad_debugfs_lseek,
|
||||
.read = bfad_debugfs_read,
|
||||
.release = bfad_debugfs_release,
|
||||
};
|
||||
|
||||
static const struct file_operations bfad_debugfs_op_fwtrc = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = bfad_debugfs_open_fwtrc,
|
||||
.llseek = bfad_debugfs_lseek,
|
||||
.read = bfad_debugfs_read,
|
||||
.release = bfad_debugfs_release_fwtrc,
|
||||
};
|
||||
|
||||
static const struct file_operations bfad_debugfs_op_fwsave = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = bfad_debugfs_open_fwsave,
|
||||
.llseek = bfad_debugfs_lseek,
|
||||
.read = bfad_debugfs_read,
|
||||
.release = bfad_debugfs_release_fwtrc,
|
||||
};
|
||||
|
||||
static const struct file_operations bfad_debugfs_op_regrd = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = bfad_debugfs_open_reg,
|
||||
.llseek = bfad_debugfs_lseek,
|
||||
.read = bfad_debugfs_read_regrd,
|
||||
.write = bfad_debugfs_write_regrd,
|
||||
.release = bfad_debugfs_release,
|
||||
};
|
||||
|
||||
static const struct file_operations bfad_debugfs_op_regwr = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = bfad_debugfs_open_reg,
|
||||
.llseek = bfad_debugfs_lseek,
|
||||
.write = bfad_debugfs_write_regwr,
|
||||
.release = bfad_debugfs_release,
|
||||
};
|
||||
|
||||
struct bfad_debugfs_entry {
|
||||
const char *name;
|
||||
mode_t mode;
|
||||
const struct file_operations *fops;
|
||||
};
|
||||
|
||||
static const struct bfad_debugfs_entry bfad_debugfs_files[] = {
|
||||
{ "drvtrc", S_IFREG|S_IRUGO, &bfad_debugfs_op_drvtrc, },
|
||||
{ "fwtrc", S_IFREG|S_IRUGO, &bfad_debugfs_op_fwtrc, },
|
||||
{ "fwsave", S_IFREG|S_IRUGO, &bfad_debugfs_op_fwsave, },
|
||||
{ "regrd", S_IFREG|S_IRUGO|S_IWUSR, &bfad_debugfs_op_regrd, },
|
||||
{ "regwr", S_IFREG|S_IWUSR, &bfad_debugfs_op_regwr, },
|
||||
};
|
||||
|
||||
static struct dentry *bfa_debugfs_root;
|
||||
static atomic_t bfa_debugfs_port_count;
|
||||
|
||||
inline void
|
||||
bfad_debugfs_init(struct bfad_port_s *port)
|
||||
{
|
||||
struct bfad_im_port_s *im_port = port->im_port;
|
||||
struct bfad_s *bfad = im_port->bfad;
|
||||
struct Scsi_Host *shost = im_port->shost;
|
||||
const struct bfad_debugfs_entry *file;
|
||||
char name[16];
|
||||
int i;
|
||||
|
||||
if (!bfa_debugfs_enable)
|
||||
return;
|
||||
|
||||
/* Setup the BFA debugfs root directory*/
|
||||
if (!bfa_debugfs_root) {
|
||||
bfa_debugfs_root = debugfs_create_dir("bfa", NULL);
|
||||
atomic_set(&bfa_debugfs_port_count, 0);
|
||||
if (!bfa_debugfs_root) {
|
||||
printk(KERN_WARNING
|
||||
"BFA debugfs root dir creation failed\n");
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup the host# directory for the port,
|
||||
* corresponds to the scsi_host num of this port.
|
||||
*/
|
||||
snprintf(name, sizeof(name), "host%d", shost->host_no);
|
||||
if (!port->port_debugfs_root) {
|
||||
port->port_debugfs_root =
|
||||
debugfs_create_dir(name, bfa_debugfs_root);
|
||||
if (!port->port_debugfs_root) {
|
||||
printk(KERN_WARNING
|
||||
"BFA host root dir creation failed\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
atomic_inc(&bfa_debugfs_port_count);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(bfad_debugfs_files); i++) {
|
||||
file = &bfad_debugfs_files[i];
|
||||
bfad->bfad_dentry_files[i] =
|
||||
debugfs_create_file(file->name,
|
||||
file->mode,
|
||||
port->port_debugfs_root,
|
||||
port,
|
||||
file->fops);
|
||||
if (!bfad->bfad_dentry_files[i]) {
|
||||
printk(KERN_WARNING
|
||||
"BFA host%d: create %s entry failed\n",
|
||||
shost->host_no, file->name);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err:
|
||||
return;
|
||||
}
|
||||
|
||||
inline void
|
||||
bfad_debugfs_exit(struct bfad_port_s *port)
|
||||
{
|
||||
struct bfad_im_port_s *im_port = port->im_port;
|
||||
struct bfad_s *bfad = im_port->bfad;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(bfad_debugfs_files); i++) {
|
||||
if (bfad->bfad_dentry_files[i]) {
|
||||
debugfs_remove(bfad->bfad_dentry_files[i]);
|
||||
bfad->bfad_dentry_files[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove the host# directory for the port,
|
||||
* corresponds to the scsi_host num of this port.
|
||||
*/
|
||||
if (port->port_debugfs_root) {
|
||||
debugfs_remove(port->port_debugfs_root);
|
||||
port->port_debugfs_root = NULL;
|
||||
atomic_dec(&bfa_debugfs_port_count);
|
||||
}
|
||||
|
||||
/* Remove the BFA debugfs root directory */
|
||||
if (atomic_read(&bfa_debugfs_port_count) == 0) {
|
||||
debugfs_remove(bfa_debugfs_root);
|
||||
bfa_debugfs_root = NULL;
|
||||
}
|
||||
}
|
|
@ -46,7 +46,7 @@
|
|||
#ifdef BFA_DRIVER_VERSION
|
||||
#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
|
||||
#else
|
||||
#define BFAD_DRIVER_VERSION "2.1.2.1"
|
||||
#define BFAD_DRIVER_VERSION "2.2.2.1"
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -111,6 +111,9 @@ struct bfad_port_s {
|
|||
struct bfad_im_port_s *im_port; /* IM specific data */
|
||||
struct bfad_tm_port_s *tm_port; /* TM specific data */
|
||||
struct bfad_ipfc_port_s *ipfc_port; /* IPFC specific data */
|
||||
|
||||
/* port debugfs specific data */
|
||||
struct dentry *port_debugfs_root;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -120,6 +123,8 @@ struct bfad_vport_s {
|
|||
struct bfad_port_s drv_port;
|
||||
struct bfa_fcs_vport_s fcs_vport;
|
||||
struct completion *comp_del;
|
||||
struct list_head list_entry;
|
||||
struct bfa_port_cfg_s port_cfg;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -139,18 +144,6 @@ struct bfad_cfg_param_s {
|
|||
u32 binding_method;
|
||||
};
|
||||
|
||||
union bfad_tmp_buf {
|
||||
/* From struct bfa_adapter_attr_s */
|
||||
char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
|
||||
char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
|
||||
char model[BFA_ADAPTER_MODEL_NAME_LEN];
|
||||
char fw_ver[BFA_VERSION_LEN];
|
||||
char optrom_ver[BFA_VERSION_LEN];
|
||||
|
||||
/* From struct bfa_ioc_pci_attr_s */
|
||||
u8 chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */
|
||||
};
|
||||
|
||||
/*
|
||||
* BFAD (PCI function) data structure
|
||||
*/
|
||||
|
@ -193,8 +186,18 @@ struct bfad_s {
|
|||
struct bfa_plog_s plog_buf;
|
||||
int ref_count;
|
||||
bfa_boolean_t ipfc_enabled;
|
||||
union bfad_tmp_buf tmp_buf;
|
||||
struct fc_host_statistics link_stats;
|
||||
struct list_head pbc_pcfg_list;
|
||||
atomic_t wq_reqcnt;
|
||||
/* debugfs specific data */
|
||||
char *regdata;
|
||||
u32 reglen;
|
||||
struct dentry *bfad_dentry_files[5];
|
||||
};
|
||||
|
||||
struct bfad_pcfg_s {
|
||||
struct list_head list_entry;
|
||||
struct bfa_port_cfg_s port_cfg;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -280,7 +283,9 @@ void bfad_drv_uninit(struct bfad_s *bfad);
|
|||
void bfad_drv_log_level_set(struct bfad_s *bfad);
|
||||
bfa_status_t bfad_fc4_module_init(void);
|
||||
void bfad_fc4_module_exit(void);
|
||||
int bfad_worker (void *ptr);
|
||||
int bfad_worker(void *ptr);
|
||||
void bfad_debugfs_init(struct bfad_port_s *port);
|
||||
void bfad_debugfs_exit(struct bfad_port_s *port);
|
||||
|
||||
void bfad_pci_remove(struct pci_dev *pdev);
|
||||
int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid);
|
||||
|
@ -293,6 +298,7 @@ extern struct list_head bfad_list;
|
|||
extern int bfa_lun_queue_depth;
|
||||
extern int bfad_supported_fc4s;
|
||||
extern int bfa_linkup_delay;
|
||||
extern int bfa_debugfs_enable;
|
||||
extern struct mutex bfad_mutex;
|
||||
|
||||
#endif /* __BFAD_DRV_H__ */
|
||||
|
|
|
@ -33,16 +33,20 @@
|
|||
#include <bfa_fwimg_priv.h>
|
||||
#include <bfa.h>
|
||||
|
||||
u32 bfi_image_ct_size;
|
||||
u32 bfi_image_cb_size;
|
||||
u32 *bfi_image_ct;
|
||||
u32 *bfi_image_cb;
|
||||
u32 bfi_image_ct_fc_size;
|
||||
u32 bfi_image_ct_cna_size;
|
||||
u32 bfi_image_cb_fc_size;
|
||||
u32 *bfi_image_ct_fc;
|
||||
u32 *bfi_image_ct_cna;
|
||||
u32 *bfi_image_cb_fc;
|
||||
|
||||
|
||||
#define BFAD_FW_FILE_CT "ctfw.bin"
|
||||
#define BFAD_FW_FILE_CB "cbfw.bin"
|
||||
MODULE_FIRMWARE(BFAD_FW_FILE_CT);
|
||||
MODULE_FIRMWARE(BFAD_FW_FILE_CB);
|
||||
#define BFAD_FW_FILE_CT_FC "ctfw_fc.bin"
|
||||
#define BFAD_FW_FILE_CT_CNA "ctfw_cna.bin"
|
||||
#define BFAD_FW_FILE_CB_FC "cbfw_fc.bin"
|
||||
MODULE_FIRMWARE(BFAD_FW_FILE_CT_FC);
|
||||
MODULE_FIRMWARE(BFAD_FW_FILE_CT_CNA);
|
||||
MODULE_FIRMWARE(BFAD_FW_FILE_CB_FC);
|
||||
|
||||
u32 *
|
||||
bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
|
||||
|
@ -74,24 +78,54 @@ error:
|
|||
u32 *
|
||||
bfad_get_firmware_buf(struct pci_dev *pdev)
|
||||
{
|
||||
if (pdev->device == BFA_PCI_DEVICE_ID_CT) {
|
||||
if (bfi_image_ct_size == 0)
|
||||
bfad_read_firmware(pdev, &bfi_image_ct,
|
||||
&bfi_image_ct_size, BFAD_FW_FILE_CT);
|
||||
return bfi_image_ct;
|
||||
if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) {
|
||||
if (bfi_image_ct_fc_size == 0)
|
||||
bfad_read_firmware(pdev, &bfi_image_ct_fc,
|
||||
&bfi_image_ct_fc_size, BFAD_FW_FILE_CT_FC);
|
||||
return bfi_image_ct_fc;
|
||||
} else if (pdev->device == BFA_PCI_DEVICE_ID_CT) {
|
||||
if (bfi_image_ct_cna_size == 0)
|
||||
bfad_read_firmware(pdev, &bfi_image_ct_cna,
|
||||
&bfi_image_ct_cna_size, BFAD_FW_FILE_CT_CNA);
|
||||
return bfi_image_ct_cna;
|
||||
} else {
|
||||
if (bfi_image_cb_size == 0)
|
||||
bfad_read_firmware(pdev, &bfi_image_cb,
|
||||
&bfi_image_cb_size, BFAD_FW_FILE_CB);
|
||||
return bfi_image_cb;
|
||||
if (bfi_image_cb_fc_size == 0)
|
||||
bfad_read_firmware(pdev, &bfi_image_cb_fc,
|
||||
&bfi_image_cb_fc_size, BFAD_FW_FILE_CB_FC);
|
||||
return bfi_image_cb_fc;
|
||||
}
|
||||
}
|
||||
|
||||
u32 *
|
||||
bfi_image_ct_get_chunk(u32 off)
|
||||
{ return (u32 *)(bfi_image_ct + off); }
|
||||
bfi_image_ct_fc_get_chunk(u32 off)
|
||||
{ return (u32 *)(bfi_image_ct_fc + off); }
|
||||
|
||||
u32 *
|
||||
bfi_image_cb_get_chunk(u32 off)
|
||||
{ return (u32 *)(bfi_image_cb + off); }
|
||||
bfi_image_ct_cna_get_chunk(u32 off)
|
||||
{ return (u32 *)(bfi_image_ct_cna + off); }
|
||||
|
||||
u32 *
|
||||
bfi_image_cb_fc_get_chunk(u32 off)
|
||||
{ return (u32 *)(bfi_image_cb_fc + off); }
|
||||
|
||||
uint32_t *
|
||||
bfi_image_get_chunk(int type, uint32_t off)
|
||||
{
|
||||
switch (type) {
|
||||
case BFI_IMAGE_CT_FC: return bfi_image_ct_fc_get_chunk(off); break;
|
||||
case BFI_IMAGE_CT_CNA: return bfi_image_ct_cna_get_chunk(off); break;
|
||||
case BFI_IMAGE_CB_FC: return bfi_image_cb_fc_get_chunk(off); break;
|
||||
default: return 0; break;
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t
|
||||
bfi_image_get_size(int type)
|
||||
{
|
||||
switch (type) {
|
||||
case BFI_IMAGE_CT_FC: return bfi_image_ct_fc_size; break;
|
||||
case BFI_IMAGE_CT_CNA: return bfi_image_ct_cna_size; break;
|
||||
case BFI_IMAGE_CB_FC: return bfi_image_cb_fc_size; break;
|
||||
default: return 0; break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -554,7 +554,7 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
|
|||
im_port->shost->transportt =
|
||||
bfad_im_scsi_vport_transport_template;
|
||||
|
||||
error = scsi_add_host(im_port->shost, dev);
|
||||
error = scsi_add_host_with_dma(im_port->shost, dev, &bfad->pcidev->dev);
|
||||
if (error) {
|
||||
printk(KERN_WARNING "scsi_add_host failure %d\n", error);
|
||||
goto out_fc_rel;
|
||||
|
@ -567,6 +567,7 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
|
|||
|
||||
out_fc_rel:
|
||||
scsi_host_put(im_port->shost);
|
||||
im_port->shost = NULL;
|
||||
out_free_idr:
|
||||
mutex_lock(&bfad_mutex);
|
||||
idr_remove(&bfad_im_port_index, im_port->idr_id);
|
||||
|
@ -597,10 +598,12 @@ bfad_im_port_delete_handler(struct work_struct *work)
|
|||
{
|
||||
struct bfad_im_port_s *im_port =
|
||||
container_of(work, struct bfad_im_port_s, port_delete_work);
|
||||
struct bfad_s *bfad = im_port->bfad;
|
||||
|
||||
if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) {
|
||||
im_port->flags |= BFAD_PORT_DELETE;
|
||||
fc_vport_terminate(im_port->fc_vport);
|
||||
atomic_dec(&bfad->wq_reqcnt);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -633,8 +636,11 @@ bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port)
|
|||
{
|
||||
struct bfad_im_port_s *im_port = port->im_port;
|
||||
|
||||
queue_work(bfad->im->drv_workq,
|
||||
if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) {
|
||||
atomic_inc(&bfad->wq_reqcnt);
|
||||
queue_work(bfad->im->drv_workq,
|
||||
&im_port->port_delete_work);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -695,12 +701,27 @@ void
|
|||
bfad_im_probe_undo(struct bfad_s *bfad)
|
||||
{
|
||||
if (bfad->im) {
|
||||
while (atomic_read(&bfad->wq_reqcnt)) {
|
||||
printk(KERN_INFO "bfa %s: waiting workq processing,"
|
||||
" wq_reqcnt:%x\n", bfad->pci_name,
|
||||
atomic_read(&bfad->wq_reqcnt));
|
||||
schedule_timeout_uninterruptible(HZ);
|
||||
}
|
||||
bfad_os_destroy_workq(bfad->im);
|
||||
kfree(bfad->im);
|
||||
bfad->im = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Call back function to handle IO redirection state change
|
||||
*/
|
||||
void
|
||||
bfa_cb_ioredirect_state_change(void *hcb_bfad, bfa_boolean_t ioredirect)
|
||||
{
|
||||
/* Do nothing */
|
||||
}
|
||||
|
||||
struct Scsi_Host *
|
||||
bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
|
||||
{
|
||||
|
@ -1204,9 +1225,9 @@ int
|
|||
bfad_os_get_linkup_delay(struct bfad_s *bfad)
|
||||
{
|
||||
|
||||
u8 nwwns = 0;
|
||||
wwn_t *wwns;
|
||||
int ldelay;
|
||||
u8 nwwns = 0;
|
||||
wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX];
|
||||
int ldelay;
|
||||
|
||||
/*
|
||||
* Querying for the boot target port wwns
|
||||
|
@ -1215,7 +1236,7 @@ bfad_os_get_linkup_delay(struct bfad_s *bfad)
|
|||
* else => local boot machine set bfa_linkup_delay = 10
|
||||
*/
|
||||
|
||||
bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, &wwns);
|
||||
bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, wwns);
|
||||
|
||||
if (nwwns > 0) {
|
||||
/* If boot over SAN; linkup_delay = 30sec */
|
||||
|
|
|
@ -18,9 +18,6 @@
|
|||
#ifndef __BFAD_IM_COMPAT_H__
|
||||
#define __BFAD_IM_COMPAT_H__
|
||||
|
||||
extern u32 *bfi_image_buf;
|
||||
extern u32 bfi_image_size;
|
||||
|
||||
extern struct device_attribute *bfad_im_host_attrs[];
|
||||
extern struct device_attribute *bfad_im_vport_attrs[];
|
||||
|
||||
|
@ -37,10 +34,12 @@ bfad_load_fwimg(struct pci_dev *pdev)
|
|||
static inline void
|
||||
bfad_free_fwimg(void)
|
||||
{
|
||||
if (bfi_image_ct_size && bfi_image_ct)
|
||||
vfree(bfi_image_ct);
|
||||
if (bfi_image_cb_size && bfi_image_cb)
|
||||
vfree(bfi_image_cb);
|
||||
if (bfi_image_ct_fc_size && bfi_image_ct_fc)
|
||||
vfree(bfi_image_ct_fc);
|
||||
if (bfi_image_ct_cna_size && bfi_image_ct_cna)
|
||||
vfree(bfi_image_ct_cna);
|
||||
if (bfi_image_cb_fc_size && bfi_image_cb_fc)
|
||||
vfree(bfi_image_cb_fc);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -26,7 +26,11 @@ BFA_TRC_FILE(LDRV, INTR);
|
|||
static int msix_disable_cb;
|
||||
static int msix_disable_ct;
|
||||
module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(msix_disable_cb, "Disable MSIX for Brocade-415/425/815/825"
|
||||
" cards, default=0, Range[false:0|true:1]");
|
||||
module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(msix_disable_ct, "Disable MSIX for Brocade-1010/1020/804"
|
||||
" cards, default=0, Range[false:0|true:1]");
|
||||
/**
|
||||
* Line based interrupt handler.
|
||||
*/
|
||||
|
@ -151,8 +155,8 @@ bfad_setup_intr(struct bfad_s *bfad)
|
|||
/* Set up the msix entry table */
|
||||
bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
|
||||
|
||||
if ((pdev->device == BFA_PCI_DEVICE_ID_CT && !msix_disable_ct) ||
|
||||
(pdev->device != BFA_PCI_DEVICE_ID_CT && !msix_disable_cb)) {
|
||||
if ((bfa_asic_id_ct(pdev->device) && !msix_disable_ct) ||
|
||||
(!bfa_asic_id_ct(pdev->device) && !msix_disable_cb)) {
|
||||
|
||||
error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
|
||||
if (error) {
|
||||
|
|
|
@ -789,7 +789,7 @@ bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric)
|
|||
|
||||
list_for_each_safe(qe, qen, &fabric->vport_q) {
|
||||
vport = (struct bfa_fcs_vport_s *)qe;
|
||||
bfa_fcs_vport_delete(vport);
|
||||
bfa_fcs_vport_fcs_delete(vport);
|
||||
}
|
||||
|
||||
bfa_fcs_port_delete(&fabric->bport);
|
||||
|
@ -1027,6 +1027,32 @@ bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric)
|
|||
return fabric->num_vports;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get OUI of the attached switch.
|
||||
*
|
||||
* Note : Use of this function should be avoided as much as possible.
|
||||
* This function should be used only if there is any requirement
|
||||
* to check for FOS version below 6.3.
|
||||
* To check if the attached fabric is a brocade fabric, use
|
||||
* bfa_lps_is_brcd_fabric() which works for FOS versions 6.3
|
||||
* or above only.
|
||||
*/
|
||||
|
||||
u16
|
||||
bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric)
|
||||
{
|
||||
wwn_t fab_nwwn;
|
||||
u8 *tmp;
|
||||
u16 oui;
|
||||
|
||||
fab_nwwn = bfa_lps_get_peer_nwwn(fabric->lps);
|
||||
|
||||
tmp = (uint8_t *)&fab_nwwn;
|
||||
oui = (tmp[3] << 8) | tmp[4];
|
||||
|
||||
return oui;
|
||||
}
|
||||
|
||||
/**
|
||||
* Unsolicited frame receive handling.
|
||||
*/
|
||||
|
@ -1270,6 +1296,22 @@ bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
|
|||
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param[in] fabric - fabric
|
||||
* @param[in] node_symname -
|
||||
* Caller allocated buffer to receive the symbolic name
|
||||
*
|
||||
* @return - none
|
||||
*/
|
||||
void
|
||||
bfa_fcs_get_sym_name(const struct bfa_fcs_s *fcs, char *node_symname)
|
||||
{
|
||||
bfa_os_memcpy(node_symname,
|
||||
fcs->fabric.bport.port_cfg.sym_name.symname,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
}
|
||||
|
||||
/**
|
||||
* Not used by FCS.
|
||||
*/
|
||||
|
|
|
@ -110,6 +110,7 @@ bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
|
|||
switch (event) {
|
||||
case BFA_FCS_ITNIM_SM_ONLINE:
|
||||
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send);
|
||||
itnim->prli_retries = 0;
|
||||
bfa_fcs_itnim_send_prli(itnim, NULL);
|
||||
break;
|
||||
|
||||
|
@ -174,8 +175,12 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
|
|||
|
||||
switch (event) {
|
||||
case BFA_FCS_ITNIM_SM_RSP_OK:
|
||||
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_online);
|
||||
bfa_itnim_online(itnim->bfa_itnim, itnim->seq_rec);
|
||||
if (itnim->rport->scsi_function == BFA_RPORT_INITIATOR) {
|
||||
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
|
||||
} else {
|
||||
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_online);
|
||||
bfa_itnim_online(itnim->bfa_itnim, itnim->seq_rec);
|
||||
}
|
||||
break;
|
||||
|
||||
case BFA_FCS_ITNIM_SM_RSP_ERROR:
|
||||
|
@ -193,9 +198,7 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
|
|||
|
||||
case BFA_FCS_ITNIM_SM_INITIATOR:
|
||||
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
|
||||
/*
|
||||
* dont discard fcxp. accept will reach same state
|
||||
*/
|
||||
bfa_fcxp_discard(itnim->fcxp);
|
||||
break;
|
||||
|
||||
case BFA_FCS_ITNIM_SM_DELETE:
|
||||
|
@ -218,8 +221,16 @@ bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
|
|||
|
||||
switch (event) {
|
||||
case BFA_FCS_ITNIM_SM_TIMEOUT:
|
||||
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send);
|
||||
bfa_fcs_itnim_send_prli(itnim, NULL);
|
||||
if (itnim->prli_retries < BFA_FCS_RPORT_MAX_RETRIES) {
|
||||
itnim->prli_retries++;
|
||||
bfa_trc(itnim->fcs, itnim->prli_retries);
|
||||
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send);
|
||||
bfa_fcs_itnim_send_prli(itnim, NULL);
|
||||
} else {
|
||||
/* invoke target offline */
|
||||
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
|
||||
bfa_fcs_rport_logo_imp(itnim->rport);
|
||||
}
|
||||
break;
|
||||
|
||||
case BFA_FCS_ITNIM_SM_OFFLINE:
|
||||
|
@ -422,7 +433,7 @@ bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced)
|
|||
bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, port->lp_tag,
|
||||
BFA_FALSE, FC_CLASS_3, len, &fchs,
|
||||
bfa_fcs_itnim_prli_response, (void *)itnim, FC_MAX_PDUSZ,
|
||||
FC_RA_TOV);
|
||||
FC_ELS_TOV);
|
||||
|
||||
itnim->stats.prli_sent++;
|
||||
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_FRMSENT);
|
||||
|
@ -467,7 +478,7 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
|
|||
BFA_RPORT_INITIATOR;
|
||||
itnim->stats.prli_rsp_acc++;
|
||||
bfa_sm_send_event(itnim,
|
||||
BFA_FCS_ITNIM_SM_INITIATOR);
|
||||
BFA_FCS_ITNIM_SM_RSP_OK);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -738,6 +749,7 @@ bfa_fcs_itnim_attr_get(struct bfa_fcs_port_s *port, wwn_t rpwwn,
|
|||
attr->rec_support = itnim->rec_support;
|
||||
attr->conf_comp = itnim->conf_comp;
|
||||
attr->task_retry_id = itnim->task_retry_id;
|
||||
bfa_os_memset(&attr->io_latency, 0, sizeof(struct bfa_itnim_latency_s));
|
||||
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
@ -793,7 +805,7 @@ bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs,
|
|||
|
||||
switch (els_cmd->els_code) {
|
||||
case FC_ELS_PRLO:
|
||||
/* bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_PRLO); */
|
||||
bfa_fcs_rport_prlo(itnim->rport, fchs->ox_id);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -26,6 +26,8 @@
|
|||
#include <fcs/bfa_fcs_vport.h>
|
||||
#include <fcs/bfa_fcs_lport.h>
|
||||
|
||||
#define BFA_FCS_BRCD_SWITCH_OUI 0x051e
|
||||
|
||||
/*
|
||||
* fcs friend functions: only between fcs modules
|
||||
*/
|
||||
|
@ -60,4 +62,7 @@ void bfa_fcs_auth_finished(struct bfa_fcs_fabric_s *fabric,
|
|||
|
||||
void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
|
||||
wwn_t fabric_name);
|
||||
u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric);
|
||||
void bfa_fcs_get_sym_name(const struct bfa_fcs_s *fcs, char *node_symname);
|
||||
|
||||
#endif /* __FCS_FABRIC_H__ */
|
||||
|
|
|
@ -24,6 +24,8 @@
|
|||
|
||||
#include <fcs/bfa_fcs_rport.h>
|
||||
|
||||
#define BFA_FCS_RPORT_MAX_RETRIES (5)
|
||||
|
||||
void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
|
||||
u16 len);
|
||||
void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport);
|
||||
|
@ -41,6 +43,7 @@ void bfa_fcs_rport_plogi_create(struct bfa_fcs_port_s *port,
|
|||
void bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
|
||||
struct fc_logi_s *plogi);
|
||||
void bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport);
|
||||
void bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, uint16_t ox_id);
|
||||
void bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport);
|
||||
void bfa_fcs_rport_itntm_ack(struct bfa_fcs_rport_s *rport);
|
||||
void bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue