SCSI misc on 20120724
The most important feature of this patch set is the new async infrastructure that makes sure async_synchronize_full() synchronizes all domains and allows us to remove all the hacks (like having scsi_complete_async_scans() in the device base code) and means that the async infrastructure will "just work" in future. The rest is assorted driver updates (aacraid, bnx2fc, virto-scsi, megaraid, bfa, lpfc, qla2xxx, qla4xxx) plus a lot of infrastructure work in sas and FC. Signed-off-by: James Bottomley <JBottomley@Parallels.com> -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.18 (GNU/Linux) iQEcBAABAgAGBQJQDjDCAAoJEDeqqVYsXL0M/sMH/jVgBfF1mjR+DQuTscKyD21w 0BQLn5OmvDZDqo44iqQzNRObw7CxkBkUtHoozsknLijw+KggER653ZOAtUdIHfI/ /uo7iJQ3J3D/Ezm99HYSpZiF2juZwsBRtFBoKkGqOpMlzFUx5o4hUbH5OcINxnHR VmvJU5K1kg8D77Q6zK+Atl14/Rfibc2IoufFmbYdplUAM/tV0BpBSSHJAJvqua76 NGMl4KJcPZnXe/4LXcxZia5A2efdFFEzaQ2mM9rUVEAgHDAxc0Zg9IoDhGd08FX4 G55NK+6+bKb9s7bgyva0T/iy817TRCzjteeYNFrb8nBRe7aQbAivaBHQFXIyvdQ= =y2sh -----END PGP SIGNATURE----- Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi Pull first round of SCSI updates from James Bottomley: "The most important feature of this patch set is the new async infrastructure that makes sure async_synchronize_full() synchronizes all domains and allows us to remove all the hacks (like having scsi_complete_async_scans() in the device base code) and means that the async infrastructure will "just work" in future. The rest is assorted driver updates (aacraid, bnx2fc, virto-scsi, megaraid, bfa, lpfc, qla2xxx, qla4xxx) plus a lot of infrastructure work in sas and FC. Signed-off-by: James Bottomley <JBottomley@Parallels.com>" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (97 commits) [SCSI] Revert "[SCSI] fix async probe regression" [SCSI] cleanup usages of scsi_complete_async_scans [SCSI] queue async scan work to an async_schedule domain [SCSI] async: make async_synchronize_full() flush all work regardless of domain [SCSI] async: introduce 'async_domain' type [SCSI] bfa: Fix to set correct return error codes and misc cleanup. [SCSI] aacraid: Series 7 Async. (performance) mode support [SCSI] aha152x: Allow use on 64bit systems [SCSI] virtio-scsi: Add vdrv->scan for post VIRTIO_CONFIG_S_DRIVER_OK LUN scanning [SCSI] bfa: squelch lockdep complaint with a spin_lock_init [SCSI] qla2xxx: remove unnecessary reads of PCI_CAP_ID_EXP [SCSI] qla4xxx: remove unnecessary read of PCI_CAP_ID_EXP [SCSI] ufs: fix incorrect return value about SUCCESS and FAILED [SCSI] ufs: reverse the ufshcd_is_device_present logic [SCSI] ufs: use module_pci_driver [SCSI] usb-storage: update usb devices for write cache quirk in quirk list. [SCSI] usb-storage: add support for write cache quirk [SCSI] set to WCE if usb cache quirk is present. [SCSI] virtio-scsi: hotplug support for virtio-scsi [SCSI] virtio-scsi: split scatterlist per target ...
This commit is contained in:
commit
bdc0077af5
114 changed files with 2513 additions and 1022 deletions
|
@ -2936,6 +2936,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
initial READ(10) command);
|
||||
o = CAPACITY_OK (accept the capacity
|
||||
reported by the device);
|
||||
p = WRITE_CACHE (the device cache is ON
|
||||
by default);
|
||||
r = IGNORE_RESIDUE (the device reports
|
||||
bogus residue values);
|
||||
s = SINGLE_LUN (the device has only one
|
||||
|
|
|
@ -43,6 +43,9 @@ static void blk_end_sync_rq(struct request *rq, int error)
|
|||
* Description:
|
||||
* Insert a fully prepared request at the back of the I/O scheduler queue
|
||||
* for execution. Don't wait for completion.
|
||||
*
|
||||
* Note:
|
||||
* This function will invoke @done directly if the queue is dead.
|
||||
*/
|
||||
void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
||||
struct request *rq, int at_head,
|
||||
|
@ -51,18 +54,20 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
|||
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
|
||||
|
||||
WARN_ON(irqs_disabled());
|
||||
spin_lock_irq(q->queue_lock);
|
||||
|
||||
if (unlikely(blk_queue_dead(q))) {
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
rq->errors = -ENXIO;
|
||||
if (rq->end_io)
|
||||
rq->end_io(rq, rq->errors);
|
||||
return;
|
||||
}
|
||||
|
||||
rq->rq_disk = bd_disk;
|
||||
rq->end_io = done;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
|
||||
if (unlikely(blk_queue_dead(q))) {
|
||||
rq->errors = -ENXIO;
|
||||
if (rq->end_io)
|
||||
rq->end_io(rq, rq->errors);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
__elv_add_request(q, rq, where);
|
||||
__blk_run_queue(q);
|
||||
/* the queue is stopped so it won't be run */
|
||||
|
|
|
@ -80,6 +80,8 @@ const struct ata_port_operations ata_base_port_ops = {
|
|||
.prereset = ata_std_prereset,
|
||||
.postreset = ata_std_postreset,
|
||||
.error_handler = ata_std_error_handler,
|
||||
.sched_eh = ata_std_sched_eh,
|
||||
.end_eh = ata_std_end_eh,
|
||||
};
|
||||
|
||||
const struct ata_port_operations sata_port_ops = {
|
||||
|
@ -6642,6 +6644,8 @@ struct ata_port_operations ata_dummy_port_ops = {
|
|||
.qc_prep = ata_noop_qc_prep,
|
||||
.qc_issue = ata_dummy_qc_issue,
|
||||
.error_handler = ata_dummy_error_handler,
|
||||
.sched_eh = ata_std_sched_eh,
|
||||
.end_eh = ata_std_end_eh,
|
||||
};
|
||||
|
||||
const struct ata_port_info ata_dummy_port_info = {
|
||||
|
|
|
@ -793,12 +793,12 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
|
|||
ata_for_each_link(link, ap, HOST_FIRST)
|
||||
memset(&link->eh_info, 0, sizeof(link->eh_info));
|
||||
|
||||
/* Clear host_eh_scheduled while holding ap->lock such
|
||||
* that if exception occurs after this point but
|
||||
* before EH completion, SCSI midlayer will
|
||||
/* end eh (clear host_eh_scheduled) while holding
|
||||
* ap->lock such that if exception occurs after this
|
||||
* point but before EH completion, SCSI midlayer will
|
||||
* re-initiate EH.
|
||||
*/
|
||||
host->host_eh_scheduled = 0;
|
||||
ap->ops->end_eh(ap);
|
||||
|
||||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
ata_eh_release(ap);
|
||||
|
@ -985,6 +985,48 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
|
|||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
|
||||
* @ap: ATA port to schedule EH for
|
||||
*
|
||||
* LOCKING: inherited from ata_port_schedule_eh
|
||||
* spin_lock_irqsave(host lock)
|
||||
*/
|
||||
void ata_std_sched_eh(struct ata_port *ap)
|
||||
{
|
||||
WARN_ON(!ap->ops->error_handler);
|
||||
|
||||
if (ap->pflags & ATA_PFLAG_INITIALIZING)
|
||||
return;
|
||||
|
||||
ata_eh_set_pending(ap, 1);
|
||||
scsi_schedule_eh(ap->scsi_host);
|
||||
|
||||
DPRINTK("port EH scheduled\n");
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ata_std_sched_eh);
|
||||
|
||||
/**
|
||||
* ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
|
||||
* @ap: ATA port to end EH for
|
||||
*
|
||||
* In the libata object model there is a 1:1 mapping of ata_port to
|
||||
* shost, so host fields can be directly manipulated under ap->lock, in
|
||||
* the libsas case we need to hold a lock at the ha->level to coordinate
|
||||
* these events.
|
||||
*
|
||||
* LOCKING:
|
||||
* spin_lock_irqsave(host lock)
|
||||
*/
|
||||
void ata_std_end_eh(struct ata_port *ap)
|
||||
{
|
||||
struct Scsi_Host *host = ap->scsi_host;
|
||||
|
||||
host->host_eh_scheduled = 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ata_std_end_eh);
|
||||
|
||||
|
||||
/**
|
||||
* ata_port_schedule_eh - schedule error handling without a qc
|
||||
* @ap: ATA port to schedule EH for
|
||||
|
@ -997,15 +1039,8 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
|
|||
*/
|
||||
void ata_port_schedule_eh(struct ata_port *ap)
|
||||
{
|
||||
WARN_ON(!ap->ops->error_handler);
|
||||
|
||||
if (ap->pflags & ATA_PFLAG_INITIALIZING)
|
||||
return;
|
||||
|
||||
ata_eh_set_pending(ap, 1);
|
||||
scsi_schedule_eh(ap->scsi_host);
|
||||
|
||||
DPRINTK("port EH scheduled\n");
|
||||
/* see: ata_std_sched_eh, unless you know better */
|
||||
ap->ops->sched_eh(ap);
|
||||
}
|
||||
|
||||
static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
#include <linux/wait.h>
|
||||
#include <linux/async.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <scsi/scsi_scan.h>
|
||||
|
||||
#include "base.h"
|
||||
#include "power/power.h"
|
||||
|
@ -333,7 +332,6 @@ void wait_for_device_probe(void)
|
|||
/* wait for the known devices to complete their probing */
|
||||
wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
|
||||
async_synchronize_full();
|
||||
scsi_complete_async_scans();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(wait_for_device_probe);
|
||||
|
||||
|
|
|
@ -2826,7 +2826,7 @@ static void regulator_bulk_enable_async(void *data, async_cookie_t cookie)
|
|||
int regulator_bulk_enable(int num_consumers,
|
||||
struct regulator_bulk_data *consumers)
|
||||
{
|
||||
LIST_HEAD(async_domain);
|
||||
ASYNC_DOMAIN_EXCLUSIVE(async_domain);
|
||||
int i;
|
||||
int ret = 0;
|
||||
|
||||
|
|
|
@ -263,23 +263,6 @@ config SCSI_SCAN_ASYNC
|
|||
You can override this choice by specifying "scsi_mod.scan=sync"
|
||||
or async on the kernel's command line.
|
||||
|
||||
config SCSI_WAIT_SCAN
|
||||
tristate # No prompt here, this is an invisible symbol.
|
||||
default m
|
||||
depends on SCSI
|
||||
depends on MODULES
|
||||
# scsi_wait_scan is a loadable module which waits until all the async scans are
|
||||
# complete. The idea is to use it in initrd/ initramfs scripts. You modprobe
|
||||
# it after all the modprobes of the root SCSI drivers and it will wait until
|
||||
# they have all finished scanning their buses before allowing the boot to
|
||||
# proceed. (This method is not applicable if targets boot independently in
|
||||
# parallel with the initiator, or with transports with non-deterministic target
|
||||
# discovery schemes, or if a transport driver does not support scsi_wait_scan.)
|
||||
#
|
||||
# This symbol is not exposed as a prompt because little is to be gained by
|
||||
# disabling it, whereas people who accidentally switch it off may wonder why
|
||||
# their mkinitrd gets into trouble.
|
||||
|
||||
menu "SCSI Transports"
|
||||
depends on SCSI
|
||||
|
||||
|
@ -461,7 +444,7 @@ config SCSI_ACARD
|
|||
|
||||
config SCSI_AHA152X
|
||||
tristate "Adaptec AHA152X/2825 support"
|
||||
depends on ISA && SCSI && !64BIT
|
||||
depends on ISA && SCSI
|
||||
select SCSI_SPI_ATTRS
|
||||
select CHECK_SIGNATURE
|
||||
---help---
|
||||
|
|
|
@ -159,8 +159,6 @@ obj-$(CONFIG_SCSI_OSD_INITIATOR) += osd/
|
|||
# This goes last, so that "real" scsi devices probe earlier
|
||||
obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
|
||||
|
||||
obj-$(CONFIG_SCSI_WAIT_SCAN) += scsi_wait_scan.o
|
||||
|
||||
scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \
|
||||
scsicam.o scsi_error.o scsi_lib.o
|
||||
scsi_mod-$(CONFIG_SCSI_DMA) += scsi_lib_dma.o
|
||||
|
|
|
@ -135,6 +135,8 @@ struct inquiry_data {
|
|||
static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap);
|
||||
static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg);
|
||||
static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg);
|
||||
static unsigned long aac_build_sgraw2(struct scsi_cmnd *scsicmd, struct aac_raw_io2 *rio2, int sg_max);
|
||||
static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new);
|
||||
static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
|
||||
#ifdef AAC_DETAILED_STATUS_INFO
|
||||
static char *aac_get_status_string(u32 status);
|
||||
|
@ -152,10 +154,14 @@ int aac_commit = -1;
|
|||
int startup_timeout = 180;
|
||||
int aif_timeout = 120;
|
||||
int aac_sync_mode; /* Only Sync. transfer - disabled */
|
||||
int aac_convert_sgl = 1; /* convert non-conformable s/g list - enabled */
|
||||
|
||||
module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode"
|
||||
" 0=off, 1=on");
|
||||
module_param(aac_convert_sgl, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(aac_convert_sgl, "Convert non-conformable s/g list"
|
||||
" 0=off, 1=on");
|
||||
module_param(nondasd, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices."
|
||||
" 0=off, 1=on");
|
||||
|
@ -963,25 +969,44 @@ static void io_callback(void *context, struct fib * fibptr);
|
|||
|
||||
static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
|
||||
{
|
||||
u16 fibsize;
|
||||
struct aac_raw_io *readcmd;
|
||||
aac_fib_init(fib);
|
||||
readcmd = (struct aac_raw_io *) fib_data(fib);
|
||||
readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
|
||||
readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
|
||||
readcmd->count = cpu_to_le32(count<<9);
|
||||
readcmd->cid = cpu_to_le16(scmd_id(cmd));
|
||||
readcmd->flags = cpu_to_le16(IO_TYPE_READ);
|
||||
readcmd->bpTotal = 0;
|
||||
readcmd->bpComplete = 0;
|
||||
struct aac_dev *dev = fib->dev;
|
||||
u16 fibsize, command;
|
||||
|
||||
aac_fib_init(fib);
|
||||
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) {
|
||||
struct aac_raw_io2 *readcmd2;
|
||||
readcmd2 = (struct aac_raw_io2 *) fib_data(fib);
|
||||
memset(readcmd2, 0, sizeof(struct aac_raw_io2));
|
||||
readcmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
|
||||
readcmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
|
||||
readcmd2->byteCount = cpu_to_le32(count<<9);
|
||||
readcmd2->cid = cpu_to_le16(scmd_id(cmd));
|
||||
readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ);
|
||||
aac_build_sgraw2(cmd, readcmd2, dev->scsi_host_ptr->sg_tablesize);
|
||||
command = ContainerRawIo2;
|
||||
fibsize = sizeof(struct aac_raw_io2) +
|
||||
((le32_to_cpu(readcmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
|
||||
} else {
|
||||
struct aac_raw_io *readcmd;
|
||||
readcmd = (struct aac_raw_io *) fib_data(fib);
|
||||
readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
|
||||
readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
|
||||
readcmd->count = cpu_to_le32(count<<9);
|
||||
readcmd->cid = cpu_to_le16(scmd_id(cmd));
|
||||
readcmd->flags = cpu_to_le16(RIO_TYPE_READ);
|
||||
readcmd->bpTotal = 0;
|
||||
readcmd->bpComplete = 0;
|
||||
aac_build_sgraw(cmd, &readcmd->sg);
|
||||
command = ContainerRawIo;
|
||||
fibsize = sizeof(struct aac_raw_io) +
|
||||
((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw));
|
||||
}
|
||||
|
||||
aac_build_sgraw(cmd, &readcmd->sg);
|
||||
fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(readcmd->sg.count) - 1) * sizeof (struct sgentryraw));
|
||||
BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
|
||||
/*
|
||||
* Now send the Fib to the adapter
|
||||
*/
|
||||
return aac_fib_send(ContainerRawIo,
|
||||
return aac_fib_send(command,
|
||||
fib,
|
||||
fibsize,
|
||||
FsaNormal,
|
||||
|
@ -1052,28 +1077,50 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32
|
|||
|
||||
static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
|
||||
{
|
||||
u16 fibsize;
|
||||
struct aac_raw_io *writecmd;
|
||||
aac_fib_init(fib);
|
||||
writecmd = (struct aac_raw_io *) fib_data(fib);
|
||||
writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
|
||||
writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
|
||||
writecmd->count = cpu_to_le32(count<<9);
|
||||
writecmd->cid = cpu_to_le16(scmd_id(cmd));
|
||||
writecmd->flags = (fua && ((aac_cache & 5) != 1) &&
|
||||
(((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
|
||||
cpu_to_le16(IO_TYPE_WRITE|IO_SUREWRITE) :
|
||||
cpu_to_le16(IO_TYPE_WRITE);
|
||||
writecmd->bpTotal = 0;
|
||||
writecmd->bpComplete = 0;
|
||||
struct aac_dev *dev = fib->dev;
|
||||
u16 fibsize, command;
|
||||
|
||||
aac_fib_init(fib);
|
||||
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) {
|
||||
struct aac_raw_io2 *writecmd2;
|
||||
writecmd2 = (struct aac_raw_io2 *) fib_data(fib);
|
||||
memset(writecmd2, 0, sizeof(struct aac_raw_io2));
|
||||
writecmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
|
||||
writecmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
|
||||
writecmd2->byteCount = cpu_to_le32(count<<9);
|
||||
writecmd2->cid = cpu_to_le16(scmd_id(cmd));
|
||||
writecmd2->flags = (fua && ((aac_cache & 5) != 1) &&
|
||||
(((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
|
||||
cpu_to_le16(RIO2_IO_TYPE_WRITE|RIO2_IO_SUREWRITE) :
|
||||
cpu_to_le16(RIO2_IO_TYPE_WRITE);
|
||||
aac_build_sgraw2(cmd, writecmd2, dev->scsi_host_ptr->sg_tablesize);
|
||||
command = ContainerRawIo2;
|
||||
fibsize = sizeof(struct aac_raw_io2) +
|
||||
((le32_to_cpu(writecmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
|
||||
} else {
|
||||
struct aac_raw_io *writecmd;
|
||||
writecmd = (struct aac_raw_io *) fib_data(fib);
|
||||
writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
|
||||
writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
|
||||
writecmd->count = cpu_to_le32(count<<9);
|
||||
writecmd->cid = cpu_to_le16(scmd_id(cmd));
|
||||
writecmd->flags = (fua && ((aac_cache & 5) != 1) &&
|
||||
(((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
|
||||
cpu_to_le16(RIO_TYPE_WRITE|RIO_SUREWRITE) :
|
||||
cpu_to_le16(RIO_TYPE_WRITE);
|
||||
writecmd->bpTotal = 0;
|
||||
writecmd->bpComplete = 0;
|
||||
aac_build_sgraw(cmd, &writecmd->sg);
|
||||
command = ContainerRawIo;
|
||||
fibsize = sizeof(struct aac_raw_io) +
|
||||
((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw));
|
||||
}
|
||||
|
||||
aac_build_sgraw(cmd, &writecmd->sg);
|
||||
fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(writecmd->sg.count) - 1) * sizeof (struct sgentryraw));
|
||||
BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
|
||||
/*
|
||||
* Now send the Fib to the adapter
|
||||
*/
|
||||
return aac_fib_send(ContainerRawIo,
|
||||
return aac_fib_send(command,
|
||||
fib,
|
||||
fibsize,
|
||||
FsaNormal,
|
||||
|
@ -1492,8 +1539,6 @@ int aac_get_adapter_info(struct aac_dev* dev)
|
|||
dev->a_ops.adapter_write = aac_write_block;
|
||||
}
|
||||
dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
|
||||
if (dev->adapter_info.options & AAC_OPT_NEW_COMM_TYPE1)
|
||||
dev->adapter_info.options |= AAC_OPT_NEW_COMM;
|
||||
if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
|
||||
/*
|
||||
* Worst case size that could cause sg overflow when
|
||||
|
@ -2616,12 +2661,18 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
|
|||
srbreply = (struct aac_srb_reply *) fib_data(fibptr);
|
||||
|
||||
scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
|
||||
/*
|
||||
* Calculate resid for sg
|
||||
*/
|
||||
|
||||
scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
|
||||
- le32_to_cpu(srbreply->data_xfer_length));
|
||||
if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
|
||||
/* fast response */
|
||||
srbreply->srb_status = cpu_to_le32(SRB_STATUS_SUCCESS);
|
||||
srbreply->scsi_status = cpu_to_le32(SAM_STAT_GOOD);
|
||||
} else {
|
||||
/*
|
||||
* Calculate resid for sg
|
||||
*/
|
||||
scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
|
||||
- le32_to_cpu(srbreply->data_xfer_length));
|
||||
}
|
||||
|
||||
scsi_dma_unmap(scsicmd);
|
||||
|
||||
|
@ -2954,6 +3005,118 @@ static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw*
|
|||
return byte_count;
|
||||
}
|
||||
|
||||
static unsigned long aac_build_sgraw2(struct scsi_cmnd *scsicmd, struct aac_raw_io2 *rio2, int sg_max)
|
||||
{
|
||||
unsigned long byte_count = 0;
|
||||
int nseg;
|
||||
|
||||
nseg = scsi_dma_map(scsicmd);
|
||||
BUG_ON(nseg < 0);
|
||||
if (nseg) {
|
||||
struct scatterlist *sg;
|
||||
int i, conformable = 0;
|
||||
u32 min_size = PAGE_SIZE, cur_size;
|
||||
|
||||
scsi_for_each_sg(scsicmd, sg, nseg, i) {
|
||||
int count = sg_dma_len(sg);
|
||||
u64 addr = sg_dma_address(sg);
|
||||
|
||||
BUG_ON(i >= sg_max);
|
||||
rio2->sge[i].addrHigh = cpu_to_le32((u32)(addr>>32));
|
||||
rio2->sge[i].addrLow = cpu_to_le32((u32)(addr & 0xffffffff));
|
||||
cur_size = cpu_to_le32(count);
|
||||
rio2->sge[i].length = cur_size;
|
||||
rio2->sge[i].flags = 0;
|
||||
if (i == 0) {
|
||||
conformable = 1;
|
||||
rio2->sgeFirstSize = cur_size;
|
||||
} else if (i == 1) {
|
||||
rio2->sgeNominalSize = cur_size;
|
||||
min_size = cur_size;
|
||||
} else if ((i+1) < nseg && cur_size != rio2->sgeNominalSize) {
|
||||
conformable = 0;
|
||||
if (cur_size < min_size)
|
||||
min_size = cur_size;
|
||||
}
|
||||
byte_count += count;
|
||||
}
|
||||
|
||||
/* hba wants the size to be exact */
|
||||
if (byte_count > scsi_bufflen(scsicmd)) {
|
||||
u32 temp = le32_to_cpu(rio2->sge[i-1].length) -
|
||||
(byte_count - scsi_bufflen(scsicmd));
|
||||
rio2->sge[i-1].length = cpu_to_le32(temp);
|
||||
byte_count = scsi_bufflen(scsicmd);
|
||||
}
|
||||
|
||||
rio2->sgeCnt = cpu_to_le32(nseg);
|
||||
rio2->flags |= cpu_to_le16(RIO2_SG_FORMAT_IEEE1212);
|
||||
/* not conformable: evaluate required sg elements */
|
||||
if (!conformable) {
|
||||
int j, nseg_new = nseg, err_found;
|
||||
for (i = min_size / PAGE_SIZE; i >= 1; --i) {
|
||||
err_found = 0;
|
||||
nseg_new = 2;
|
||||
for (j = 1; j < nseg - 1; ++j) {
|
||||
if (rio2->sge[j].length % (i*PAGE_SIZE)) {
|
||||
err_found = 1;
|
||||
break;
|
||||
}
|
||||
nseg_new += (rio2->sge[j].length / (i*PAGE_SIZE));
|
||||
}
|
||||
if (!err_found)
|
||||
break;
|
||||
}
|
||||
if (i > 0 && nseg_new <= sg_max)
|
||||
aac_convert_sgraw2(rio2, i, nseg, nseg_new);
|
||||
} else
|
||||
rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
|
||||
|
||||
/* Check for command underflow */
|
||||
if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
|
||||
printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
|
||||
byte_count, scsicmd->underflow);
|
||||
}
|
||||
}
|
||||
|
||||
return byte_count;
|
||||
}
|
||||
|
||||
static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new)
|
||||
{
|
||||
struct sge_ieee1212 *sge;
|
||||
int i, j, pos;
|
||||
u32 addr_low;
|
||||
|
||||
if (aac_convert_sgl == 0)
|
||||
return 0;
|
||||
|
||||
sge = kmalloc(nseg_new * sizeof(struct sge_ieee1212), GFP_ATOMIC);
|
||||
if (sge == NULL)
|
||||
return -1;
|
||||
|
||||
for (i = 1, pos = 1; i < nseg-1; ++i) {
|
||||
for (j = 0; j < rio2->sge[i].length / (pages * PAGE_SIZE); ++j) {
|
||||
addr_low = rio2->sge[i].addrLow + j * pages * PAGE_SIZE;
|
||||
sge[pos].addrLow = addr_low;
|
||||
sge[pos].addrHigh = rio2->sge[i].addrHigh;
|
||||
if (addr_low < rio2->sge[i].addrLow)
|
||||
sge[pos].addrHigh++;
|
||||
sge[pos].length = pages * PAGE_SIZE;
|
||||
sge[pos].flags = 0;
|
||||
pos++;
|
||||
}
|
||||
}
|
||||
sge[pos] = rio2->sge[nseg-1];
|
||||
memcpy(&rio2->sge[1], &sge[1], (nseg_new-1)*sizeof(struct sge_ieee1212));
|
||||
|
||||
kfree(sge);
|
||||
rio2->sgeCnt = cpu_to_le32(nseg_new);
|
||||
rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
|
||||
rio2->sgeNominalSize = pages * PAGE_SIZE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef AAC_DETAILED_STATUS_INFO
|
||||
|
||||
struct aac_srb_status_info {
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
*----------------------------------------------------------------------------*/
|
||||
|
||||
#ifndef AAC_DRIVER_BUILD
|
||||
# define AAC_DRIVER_BUILD 28900
|
||||
# define AAC_DRIVER_BUILD 29800
|
||||
# define AAC_DRIVER_BRANCH "-ms"
|
||||
#endif
|
||||
#define MAXIMUM_NUM_CONTAINERS 32
|
||||
|
@ -100,6 +100,13 @@ struct user_sgentryraw {
|
|||
u32 flags; /* reserved for F/W use */
|
||||
};
|
||||
|
||||
struct sge_ieee1212 {
|
||||
u32 addrLow;
|
||||
u32 addrHigh;
|
||||
u32 length;
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
/*
|
||||
* SGMAP
|
||||
*
|
||||
|
@ -270,6 +277,8 @@ enum aac_queue_types {
|
|||
*/
|
||||
|
||||
#define FIB_MAGIC 0x0001
|
||||
#define FIB_MAGIC2 0x0004
|
||||
#define FIB_MAGIC2_64 0x0005
|
||||
|
||||
/*
|
||||
* Define the priority levels the FSA communication routines support.
|
||||
|
@ -296,22 +305,20 @@ struct aac_fibhdr {
|
|||
__le32 XferState; /* Current transfer state for this CCB */
|
||||
__le16 Command; /* Routing information for the destination */
|
||||
u8 StructType; /* Type FIB */
|
||||
u8 Flags; /* Flags for FIB */
|
||||
u8 Unused; /* Unused */
|
||||
__le16 Size; /* Size of this FIB in bytes */
|
||||
__le16 SenderSize; /* Size of the FIB in the sender
|
||||
(for response sizing) */
|
||||
__le32 SenderFibAddress; /* Host defined data in the FIB */
|
||||
__le32 ReceiverFibAddress;/* Logical address of this FIB for
|
||||
the adapter */
|
||||
u32 SenderData; /* Place holder for the sender to store data */
|
||||
union {
|
||||
struct {
|
||||
__le32 _ReceiverTimeStart; /* Timestamp for
|
||||
receipt of fib */
|
||||
__le32 _ReceiverTimeDone; /* Timestamp for
|
||||
completion of fib */
|
||||
} _s;
|
||||
} _u;
|
||||
__le32 ReceiverFibAddress;/* Logical address of this FIB for
|
||||
the adapter (old) */
|
||||
__le32 SenderFibAddressHigh;/* upper 32bit of phys. FIB address */
|
||||
__le32 TimeStamp; /* otherwise timestamp for FW internal use */
|
||||
} u;
|
||||
u32 Handle; /* FIB handle used for MSGU commnunication */
|
||||
u32 Previous; /* FW internal use */
|
||||
u32 Next; /* FW internal use */
|
||||
};
|
||||
|
||||
struct hw_fib {
|
||||
|
@ -361,6 +368,7 @@ struct hw_fib {
|
|||
#define ContainerCommand 500
|
||||
#define ContainerCommand64 501
|
||||
#define ContainerRawIo 502
|
||||
#define ContainerRawIo2 503
|
||||
/*
|
||||
* Scsi Port commands (scsi passthrough)
|
||||
*/
|
||||
|
@ -417,6 +425,7 @@ enum fib_xfer_state {
|
|||
#define ADAPTER_INIT_STRUCT_REVISION 3
|
||||
#define ADAPTER_INIT_STRUCT_REVISION_4 4 // rocket science
|
||||
#define ADAPTER_INIT_STRUCT_REVISION_6 6 /* PMC src */
|
||||
#define ADAPTER_INIT_STRUCT_REVISION_7 7 /* Denali */
|
||||
|
||||
struct aac_init
|
||||
{
|
||||
|
@ -441,7 +450,9 @@ struct aac_init
|
|||
#define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001
|
||||
#define INITFLAGS_DRIVER_USES_UTC_TIME 0x00000010
|
||||
#define INITFLAGS_DRIVER_SUPPORTS_PM 0x00000020
|
||||
#define INITFLAGS_NEW_COMM_TYPE1_SUPPORTED 0x00000041
|
||||
#define INITFLAGS_NEW_COMM_TYPE1_SUPPORTED 0x00000040
|
||||
#define INITFLAGS_FAST_JBOD_SUPPORTED 0x00000080
|
||||
#define INITFLAGS_NEW_COMM_TYPE2_SUPPORTED 0x00000100
|
||||
__le32 MaxIoCommands; /* max outstanding commands */
|
||||
__le32 MaxIoSize; /* largest I/O command */
|
||||
__le32 MaxFibSize; /* largest FIB to adapter */
|
||||
|
@ -1052,10 +1063,11 @@ struct aac_dev
|
|||
struct adapter_ops a_ops;
|
||||
unsigned long fsrev; /* Main driver's revision number */
|
||||
|
||||
unsigned long dbg_base; /* address of UART
|
||||
resource_size_t base_start; /* main IO base */
|
||||
resource_size_t dbg_base; /* address of UART
|
||||
* debug buffer */
|
||||
|
||||
unsigned base_size, dbg_size; /* Size of
|
||||
resource_size_t base_size, dbg_size; /* Size of
|
||||
* mapped in region */
|
||||
|
||||
struct aac_init *init; /* Holds initialization info to communicate with adapter */
|
||||
|
@ -1123,6 +1135,7 @@ struct aac_dev
|
|||
# define AAC_COMM_PRODUCER 0
|
||||
# define AAC_COMM_MESSAGE 1
|
||||
# define AAC_COMM_MESSAGE_TYPE1 3
|
||||
# define AAC_COMM_MESSAGE_TYPE2 4
|
||||
u8 raw_io_interface;
|
||||
u8 raw_io_64;
|
||||
u8 printf_enabled;
|
||||
|
@ -1181,6 +1194,7 @@ struct aac_dev
|
|||
#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001)
|
||||
#define FIB_CONTEXT_FLAG (0x00000002)
|
||||
#define FIB_CONTEXT_FLAG_WAIT (0x00000004)
|
||||
#define FIB_CONTEXT_FLAG_FASTRESP (0x00000008)
|
||||
|
||||
/*
|
||||
* Define the command values
|
||||
|
@ -1287,6 +1301,22 @@ struct aac_dev
|
|||
#define CMDATA_SYNCH 4
|
||||
#define CMUNSTABLE 5
|
||||
|
||||
#define RIO_TYPE_WRITE 0x0000
|
||||
#define RIO_TYPE_READ 0x0001
|
||||
#define RIO_SUREWRITE 0x0008
|
||||
|
||||
#define RIO2_IO_TYPE 0x0003
|
||||
#define RIO2_IO_TYPE_WRITE 0x0000
|
||||
#define RIO2_IO_TYPE_READ 0x0001
|
||||
#define RIO2_IO_TYPE_VERIFY 0x0002
|
||||
#define RIO2_IO_ERROR 0x0004
|
||||
#define RIO2_IO_SUREWRITE 0x0008
|
||||
#define RIO2_SGL_CONFORMANT 0x0010
|
||||
#define RIO2_SG_FORMAT 0xF000
|
||||
#define RIO2_SG_FORMAT_ARC 0x0000
|
||||
#define RIO2_SG_FORMAT_SRL 0x1000
|
||||
#define RIO2_SG_FORMAT_IEEE1212 0x2000
|
||||
|
||||
struct aac_read
|
||||
{
|
||||
__le32 command;
|
||||
|
@ -1331,9 +1361,6 @@ struct aac_write64
|
|||
__le32 block;
|
||||
__le16 pad;
|
||||
__le16 flags;
|
||||
#define IO_TYPE_WRITE 0x00000000
|
||||
#define IO_TYPE_READ 0x00000001
|
||||
#define IO_SUREWRITE 0x00000008
|
||||
struct sgmap64 sg; // Must be last in struct because it is variable
|
||||
};
|
||||
struct aac_write_reply
|
||||
|
@ -1354,6 +1381,22 @@ struct aac_raw_io
|
|||
struct sgmapraw sg;
|
||||
};
|
||||
|
||||
struct aac_raw_io2 {
|
||||
__le32 blockLow;
|
||||
__le32 blockHigh;
|
||||
__le32 byteCount;
|
||||
__le16 cid;
|
||||
__le16 flags; /* RIO2 flags */
|
||||
__le32 sgeFirstSize; /* size of first sge el. */
|
||||
__le32 sgeNominalSize; /* size of 2nd sge el. (if conformant) */
|
||||
u8 sgeCnt; /* only 8 bits required */
|
||||
u8 bpTotal; /* reserved for F/W use */
|
||||
u8 bpComplete; /* reserved for F/W use */
|
||||
u8 sgeFirstIndex; /* reserved for F/W use */
|
||||
u8 unused[4];
|
||||
struct sge_ieee1212 sge[1];
|
||||
};
|
||||
|
||||
#define CT_FLUSH_CACHE 129
|
||||
struct aac_synchronize {
|
||||
__le32 command; /* VM_ContainerConfig */
|
||||
|
|
|
@ -498,6 +498,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
|||
return -ENOMEM;
|
||||
}
|
||||
aac_fib_init(srbfib);
|
||||
/* raw_srb FIB is not FastResponseCapable */
|
||||
srbfib->hw_fib_va->header.XferState &= ~cpu_to_le32(FastResponseCapable);
|
||||
|
||||
srbcmd = (struct aac_srb*) fib_data(srbfib);
|
||||
|
||||
|
|
|
@ -58,7 +58,8 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
|
|||
dma_addr_t phys;
|
||||
unsigned long aac_max_hostphysmempages;
|
||||
|
||||
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1)
|
||||
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
|
||||
dev->comm_interface == AAC_COMM_MESSAGE_TYPE2)
|
||||
host_rrq_size = (dev->scsi_host_ptr->can_queue
|
||||
+ AAC_NUM_MGT_FIB) * sizeof(u32);
|
||||
size = fibsize + sizeof(struct aac_init) + commsize +
|
||||
|
@ -75,7 +76,8 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
|
|||
dev->comm_phys = phys;
|
||||
dev->comm_size = size;
|
||||
|
||||
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) {
|
||||
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
|
||||
dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
|
||||
dev->host_rrq = (u32 *)(base + fibsize);
|
||||
dev->host_rrq_pa = phys + fibsize;
|
||||
memset(dev->host_rrq, 0, host_rrq_size);
|
||||
|
@ -115,26 +117,32 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
|
|||
else
|
||||
init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
|
||||
|
||||
init->InitFlags = 0;
|
||||
init->InitFlags = cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME |
|
||||
INITFLAGS_DRIVER_SUPPORTS_PM);
|
||||
init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
|
||||
init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
|
||||
init->MaxFibSize = cpu_to_le32(dev->max_fib_size);
|
||||
init->MaxNumAif = cpu_to_le32(dev->max_num_aif);
|
||||
|
||||
if (dev->comm_interface == AAC_COMM_MESSAGE) {
|
||||
init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
|
||||
dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n"));
|
||||
} else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) {
|
||||
init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6);
|
||||
init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_TYPE1_SUPPORTED);
|
||||
dprintk((KERN_WARNING
|
||||
"aacraid: New Comm Interface type1 enabled\n"));
|
||||
init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
|
||||
INITFLAGS_NEW_COMM_TYPE1_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED);
|
||||
init->HostRRQ_AddrHigh = cpu_to_le32((u32)((u64)dev->host_rrq_pa >> 32));
|
||||
init->HostRRQ_AddrLow = cpu_to_le32((u32)(dev->host_rrq_pa & 0xffffffff));
|
||||
dprintk((KERN_WARNING"aacraid: New Comm Interface type1 enabled\n"));
|
||||
} else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
|
||||
init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_7);
|
||||
init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
|
||||
INITFLAGS_NEW_COMM_TYPE2_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED);
|
||||
init->HostRRQ_AddrHigh = cpu_to_le32((u32)((u64)dev->host_rrq_pa >> 32));
|
||||
init->HostRRQ_AddrLow = cpu_to_le32((u32)(dev->host_rrq_pa & 0xffffffff));
|
||||
init->MiniPortRevision = cpu_to_le32(0L); /* number of MSI-X */
|
||||
dprintk((KERN_WARNING"aacraid: New Comm Interface type2 enabled\n"));
|
||||
}
|
||||
init->InitFlags |= cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME |
|
||||
INITFLAGS_DRIVER_SUPPORTS_PM);
|
||||
init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
|
||||
init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
|
||||
init->MaxFibSize = cpu_to_le32(dev->max_fib_size);
|
||||
|
||||
init->MaxNumAif = cpu_to_le32(dev->max_num_aif);
|
||||
init->HostRRQ_AddrHigh = (u32)((u64)dev->host_rrq_pa >> 32);
|
||||
init->HostRRQ_AddrLow = (u32)(dev->host_rrq_pa & 0xffffffff);
|
||||
|
||||
|
||||
/*
|
||||
* Increment the base address by the amount already used
|
||||
|
@ -354,13 +362,15 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
|
|||
if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1))) {
|
||||
/* driver supports TYPE1 (Tupelo) */
|
||||
dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
|
||||
} else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE2))) {
|
||||
/* driver supports TYPE2 (Denali) */
|
||||
dev->comm_interface = AAC_COMM_MESSAGE_TYPE2;
|
||||
} else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE4)) ||
|
||||
(status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE3)) ||
|
||||
(status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE2))) {
|
||||
/* driver doesn't support TYPE2 (Series7), TYPE3 and TYPE4 */
|
||||
/* switch to sync. mode */
|
||||
dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
|
||||
dev->sync_mode = 1;
|
||||
(status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE3))) {
|
||||
/* driver doesn't TYPE3 and TYPE4 */
|
||||
/* switch to sync. mode */
|
||||
dev->comm_interface = AAC_COMM_MESSAGE_TYPE2;
|
||||
dev->sync_mode = 1;
|
||||
}
|
||||
}
|
||||
if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
|
||||
|
|
|
@ -136,6 +136,7 @@ int aac_fib_setup(struct aac_dev * dev)
|
|||
i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
|
||||
i++, fibptr++)
|
||||
{
|
||||
fibptr->flags = 0;
|
||||
fibptr->dev = dev;
|
||||
fibptr->hw_fib_va = hw_fib;
|
||||
fibptr->data = (void *) fibptr->hw_fib_va->data;
|
||||
|
@ -240,11 +241,11 @@ void aac_fib_init(struct fib *fibptr)
|
|||
{
|
||||
struct hw_fib *hw_fib = fibptr->hw_fib_va;
|
||||
|
||||
memset(&hw_fib->header, 0, sizeof(struct aac_fibhdr));
|
||||
hw_fib->header.StructType = FIB_MAGIC;
|
||||
hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
|
||||
hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
|
||||
hw_fib->header.SenderFibAddress = 0; /* Filled in later if needed */
|
||||
hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
|
||||
hw_fib->header.u.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
|
||||
hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
|
||||
}
|
||||
|
||||
|
@ -259,7 +260,6 @@ void aac_fib_init(struct fib *fibptr)
|
|||
static void fib_dealloc(struct fib * fibptr)
|
||||
{
|
||||
struct hw_fib *hw_fib = fibptr->hw_fib_va;
|
||||
BUG_ON(hw_fib->header.StructType != FIB_MAGIC);
|
||||
hw_fib->header.XferState = 0;
|
||||
}
|
||||
|
||||
|
@ -370,7 +370,7 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw
|
|||
entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
|
||||
entry->addr = hw_fib->header.SenderFibAddress;
|
||||
/* Restore adapters pointer to the FIB */
|
||||
hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
|
||||
hw_fib->header.u.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
|
||||
map = 0;
|
||||
}
|
||||
/*
|
||||
|
@ -450,7 +450,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
|
|||
*/
|
||||
|
||||
hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
|
||||
hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
|
||||
hw_fib->header.Handle = (u32)(fibptr - dev->fibs) + 1;
|
||||
/*
|
||||
* Set FIB state to indicate where it came from and if we want a
|
||||
* response from the adapter. Also load the command from the
|
||||
|
@ -460,7 +460,6 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
|
|||
*/
|
||||
hw_fib->header.Command = cpu_to_le16(command);
|
||||
hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
|
||||
fibptr->hw_fib_va->header.Flags = 0; /* 0 the flags field - internal only*/
|
||||
/*
|
||||
* Set the size of the Fib we want to send to the adapter
|
||||
*/
|
||||
|
@ -564,10 +563,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
|
|||
* functioning because an interrupt routing or other
|
||||
* hardware failure has occurred.
|
||||
*/
|
||||
unsigned long count = 36000000L; /* 3 minutes */
|
||||
unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */
|
||||
while (down_trylock(&fibptr->event_wait)) {
|
||||
int blink;
|
||||
if (--count == 0) {
|
||||
if (time_is_before_eq_jiffies(timeout)) {
|
||||
struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
|
||||
spin_lock_irqsave(q->lock, qflags);
|
||||
q->numpending--;
|
||||
|
@ -588,7 +587,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
|
|||
}
|
||||
return -EFAULT;
|
||||
}
|
||||
udelay(5);
|
||||
/* We used to udelay() here but that absorbed
|
||||
* a CPU when a timeout occured. Not very
|
||||
* useful. */
|
||||
cpu_relax();
|
||||
}
|
||||
} else if (down_interruptible(&fibptr->event_wait)) {
|
||||
/* Do nothing ... satisfy
|
||||
|
@ -708,7 +710,8 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
|
|||
unsigned long nointr = 0;
|
||||
unsigned long qflags;
|
||||
|
||||
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) {
|
||||
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
|
||||
dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
|
||||
kfree(hw_fib);
|
||||
return 0;
|
||||
}
|
||||
|
@ -721,7 +724,9 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
|
|||
/*
|
||||
* If we plan to do anything check the structure type first.
|
||||
*/
|
||||
if (hw_fib->header.StructType != FIB_MAGIC) {
|
||||
if (hw_fib->header.StructType != FIB_MAGIC &&
|
||||
hw_fib->header.StructType != FIB_MAGIC2 &&
|
||||
hw_fib->header.StructType != FIB_MAGIC2_64) {
|
||||
if (dev->comm_interface == AAC_COMM_MESSAGE)
|
||||
kfree(hw_fib);
|
||||
return -EINVAL;
|
||||
|
@ -783,7 +788,9 @@ int aac_fib_complete(struct fib *fibptr)
|
|||
* If we plan to do anything check the structure type first.
|
||||
*/
|
||||
|
||||
if (hw_fib->header.StructType != FIB_MAGIC)
|
||||
if (hw_fib->header.StructType != FIB_MAGIC &&
|
||||
hw_fib->header.StructType != FIB_MAGIC2 &&
|
||||
hw_fib->header.StructType != FIB_MAGIC2_64)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* This block completes a cdb which orginated on the host and we
|
||||
|
|
|
@ -101,6 +101,7 @@ unsigned int aac_response_normal(struct aac_queue * q)
|
|||
*/
|
||||
*(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
|
||||
hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
|
||||
fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
|
||||
}
|
||||
|
||||
FIB_COUNTER_INCREMENT(aac_config.FibRecved);
|
||||
|
@ -121,7 +122,7 @@ unsigned int aac_response_normal(struct aac_queue * q)
|
|||
* NOTE: we cannot touch the fib after this
|
||||
* call, because it may have been deallocated.
|
||||
*/
|
||||
fib->flags = 0;
|
||||
fib->flags &= FIB_CONTEXT_FLAG_FASTRESP;
|
||||
fib->callback(fib->callback_data, fib);
|
||||
} else {
|
||||
unsigned long flagv;
|
||||
|
@ -367,6 +368,7 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
|
|||
*/
|
||||
*(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
|
||||
hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
|
||||
fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
|
||||
}
|
||||
|
||||
FIB_COUNTER_INCREMENT(aac_config.FibRecved);
|
||||
|
@ -387,7 +389,7 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
|
|||
* NOTE: we cannot touch the fib after this
|
||||
* call, because it may have been deallocated.
|
||||
*/
|
||||
fib->flags = 0;
|
||||
fib->flags &= FIB_CONTEXT_FLAG_FASTRESP;
|
||||
fib->callback(fib->callback_data, fib);
|
||||
} else {
|
||||
unsigned long flagv;
|
||||
|
|
|
@ -1089,8 +1089,17 @@ static struct scsi_host_template aac_driver_template = {
|
|||
|
||||
static void __aac_shutdown(struct aac_dev * aac)
|
||||
{
|
||||
if (aac->aif_thread)
|
||||
if (aac->aif_thread) {
|
||||
int i;
|
||||
/* Clear out events first */
|
||||
for (i = 0; i < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++) {
|
||||
struct fib *fib = &aac->fibs[i];
|
||||
if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
|
||||
(fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected)))
|
||||
up(&fib->event_wait);
|
||||
}
|
||||
kthread_stop(aac->thread);
|
||||
}
|
||||
aac_send_shutdown(aac);
|
||||
aac_adapter_disable_int(aac);
|
||||
free_irq(aac->pdev->irq, aac);
|
||||
|
@ -1145,11 +1154,11 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
|
|||
goto out_disable_pdev;
|
||||
|
||||
shost->irq = pdev->irq;
|
||||
shost->base = pci_resource_start(pdev, 0);
|
||||
shost->unique_id = unique_id;
|
||||
shost->max_cmd_len = 16;
|
||||
|
||||
aac = (struct aac_dev *)shost->hostdata;
|
||||
aac->base_start = pci_resource_start(pdev, 0);
|
||||
aac->scsi_host_ptr = shost;
|
||||
aac->pdev = pdev;
|
||||
aac->name = aac_driver_template.name;
|
||||
|
@ -1157,7 +1166,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
|
|||
aac->cardtype = index;
|
||||
INIT_LIST_HEAD(&aac->entry);
|
||||
|
||||
aac->fibs = kmalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL);
|
||||
aac->fibs = kzalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL);
|
||||
if (!aac->fibs)
|
||||
goto out_free_host;
|
||||
spin_lock_init(&aac->fib_lock);
|
||||
|
@ -1191,6 +1200,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
|
|||
if (IS_ERR(aac->thread)) {
|
||||
printk(KERN_ERR "aacraid: Unable to create command thread.\n");
|
||||
error = PTR_ERR(aac->thread);
|
||||
aac->thread = NULL;
|
||||
goto out_deinit;
|
||||
}
|
||||
|
||||
|
|
|
@ -49,14 +49,14 @@ static int aac_nark_ioremap(struct aac_dev * dev, u32 size)
|
|||
dev->base = NULL;
|
||||
return 0;
|
||||
}
|
||||
dev->scsi_host_ptr->base = pci_resource_start(dev->pdev, 2);
|
||||
dev->base_start = pci_resource_start(dev->pdev, 2);
|
||||
dev->regs.rx = ioremap((u64)pci_resource_start(dev->pdev, 0) |
|
||||
((u64)pci_resource_start(dev->pdev, 1) << 32),
|
||||
sizeof(struct rx_registers) - sizeof(struct rx_inbound));
|
||||
dev->base = NULL;
|
||||
if (dev->regs.rx == NULL)
|
||||
return -1;
|
||||
dev->base = ioremap(dev->scsi_host_ptr->base, size);
|
||||
dev->base = ioremap(dev->base_start, size);
|
||||
if (dev->base == NULL) {
|
||||
iounmap(dev->regs.rx);
|
||||
dev->regs.rx = NULL;
|
||||
|
|
|
@ -79,7 +79,7 @@ static int aac_rkt_ioremap(struct aac_dev * dev, u32 size)
|
|||
iounmap(dev->regs.rkt);
|
||||
return 0;
|
||||
}
|
||||
dev->base = dev->regs.rkt = ioremap(dev->scsi_host_ptr->base, size);
|
||||
dev->base = dev->regs.rkt = ioremap(dev->base_start, size);
|
||||
if (dev->base == NULL)
|
||||
return -1;
|
||||
dev->IndexRegs = &dev->regs.rkt->IndexRegs;
|
||||
|
|
|
@ -471,7 +471,7 @@ static int aac_rx_ioremap(struct aac_dev * dev, u32 size)
|
|||
iounmap(dev->regs.rx);
|
||||
return 0;
|
||||
}
|
||||
dev->base = dev->regs.rx = ioremap(dev->scsi_host_ptr->base, size);
|
||||
dev->base = dev->regs.rx = ioremap(dev->base_start, size);
|
||||
if (dev->base == NULL)
|
||||
return -1;
|
||||
dev->IndexRegs = &dev->regs.rx->IndexRegs;
|
||||
|
@ -653,7 +653,7 @@ int _aac_rx_init(struct aac_dev *dev)
|
|||
name, instance);
|
||||
goto error_iounmap;
|
||||
}
|
||||
dev->dbg_base = dev->scsi_host_ptr->base;
|
||||
dev->dbg_base = dev->base_start;
|
||||
dev->dbg_base_mapped = dev->base;
|
||||
dev->dbg_size = dev->base_size;
|
||||
|
||||
|
|
|
@ -305,7 +305,7 @@ static int aac_sa_ioremap(struct aac_dev * dev, u32 size)
|
|||
iounmap(dev->regs.sa);
|
||||
return 0;
|
||||
}
|
||||
dev->base = dev->regs.sa = ioremap(dev->scsi_host_ptr->base, size);
|
||||
dev->base = dev->regs.sa = ioremap(dev->base_start, size);
|
||||
return (dev->base == NULL) ? -1 : 0;
|
||||
}
|
||||
|
||||
|
@ -393,7 +393,7 @@ int aac_sa_init(struct aac_dev *dev)
|
|||
name, instance);
|
||||
goto error_iounmap;
|
||||
}
|
||||
dev->dbg_base = dev->scsi_host_ptr->base;
|
||||
dev->dbg_base = dev->base_start;
|
||||
dev->dbg_base_mapped = dev->base;
|
||||
dev->dbg_size = dev->base_size;
|
||||
|
||||
|
|
|
@ -56,25 +56,14 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
|
|||
if (bellbits & PmDoorBellResponseSent) {
|
||||
bellbits = PmDoorBellResponseSent;
|
||||
/* handle async. status */
|
||||
src_writel(dev, MUnit.ODR_C, bellbits);
|
||||
src_readl(dev, MUnit.ODR_C);
|
||||
our_interrupt = 1;
|
||||
index = dev->host_rrq_idx;
|
||||
if (dev->host_rrq[index] == 0) {
|
||||
u32 old_index = index;
|
||||
/* adjust index */
|
||||
do {
|
||||
index++;
|
||||
if (index == dev->scsi_host_ptr->can_queue +
|
||||
AAC_NUM_MGT_FIB)
|
||||
index = 0;
|
||||
if (dev->host_rrq[index] != 0)
|
||||
break;
|
||||
} while (index != old_index);
|
||||
dev->host_rrq_idx = index;
|
||||
}
|
||||
for (;;) {
|
||||
isFastResponse = 0;
|
||||
/* remove toggle bit (31) */
|
||||
handle = (dev->host_rrq[index] & 0x7fffffff);
|
||||
handle = le32_to_cpu(dev->host_rrq[index]) & 0x7fffffff;
|
||||
/* check fast response bit (30) */
|
||||
if (handle & 0x40000000)
|
||||
isFastResponse = 1;
|
||||
|
@ -93,6 +82,8 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
|
|||
} else {
|
||||
bellbits_shifted = (bellbits >> SRC_ODR_SHIFT);
|
||||
if (bellbits_shifted & DoorBellAifPending) {
|
||||
src_writel(dev, MUnit.ODR_C, bellbits);
|
||||
src_readl(dev, MUnit.ODR_C);
|
||||
our_interrupt = 1;
|
||||
/* handle AIF */
|
||||
aac_intr_normal(dev, 0, 2, 0, NULL);
|
||||
|
@ -100,6 +91,13 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
|
|||
unsigned long sflags;
|
||||
struct list_head *entry;
|
||||
int send_it = 0;
|
||||
extern int aac_sync_mode;
|
||||
|
||||
if (!aac_sync_mode) {
|
||||
src_writel(dev, MUnit.ODR_C, bellbits);
|
||||
src_readl(dev, MUnit.ODR_C);
|
||||
our_interrupt = 1;
|
||||
}
|
||||
|
||||
if (dev->sync_fib) {
|
||||
our_interrupt = 1;
|
||||
|
@ -132,7 +130,6 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
|
|||
}
|
||||
|
||||
if (our_interrupt) {
|
||||
src_writel(dev, MUnit.ODR_C, bellbits);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
return IRQ_NONE;
|
||||
|
@ -336,6 +333,9 @@ static void aac_src_start_adapter(struct aac_dev *dev)
|
|||
{
|
||||
struct aac_init *init;
|
||||
|
||||
/* reset host_rrq_idx first */
|
||||
dev->host_rrq_idx = 0;
|
||||
|
||||
init = dev->init;
|
||||
init->HostElapsedSeconds = cpu_to_le32(get_seconds());
|
||||
|
||||
|
@ -389,30 +389,51 @@ static int aac_src_deliver_message(struct fib *fib)
|
|||
struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
|
||||
unsigned long qflags;
|
||||
u32 fibsize;
|
||||
u64 address;
|
||||
dma_addr_t address;
|
||||
struct aac_fib_xporthdr *pFibX;
|
||||
u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size);
|
||||
|
||||
spin_lock_irqsave(q->lock, qflags);
|
||||
q->numpending++;
|
||||
spin_unlock_irqrestore(q->lock, qflags);
|
||||
|
||||
/* Calculate the amount to the fibsize bits */
|
||||
fibsize = (sizeof(struct aac_fib_xporthdr) +
|
||||
fib->hw_fib_va->header.Size + 127) / 128 - 1;
|
||||
if (fibsize > (ALIGN32 - 1))
|
||||
fibsize = ALIGN32 - 1;
|
||||
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
|
||||
/* Calculate the amount to the fibsize bits */
|
||||
fibsize = (hdr_size + 127) / 128 - 1;
|
||||
if (fibsize > (ALIGN32 - 1))
|
||||
return -EMSGSIZE;
|
||||
/* New FIB header, 32-bit */
|
||||
address = fib->hw_fib_pa;
|
||||
fib->hw_fib_va->header.StructType = FIB_MAGIC2;
|
||||
fib->hw_fib_va->header.SenderFibAddress = (u32)address;
|
||||
fib->hw_fib_va->header.u.TimeStamp = 0;
|
||||
BUG_ON((u32)(address >> 32) != 0L);
|
||||
address |= fibsize;
|
||||
} else {
|
||||
/* Calculate the amount to the fibsize bits */
|
||||
fibsize = (sizeof(struct aac_fib_xporthdr) + hdr_size + 127) / 128 - 1;
|
||||
if (fibsize > (ALIGN32 - 1))
|
||||
return -EMSGSIZE;
|
||||
|
||||
/* Fill XPORT header */
|
||||
pFibX = (struct aac_fib_xporthdr *)
|
||||
((unsigned char *)fib->hw_fib_va -
|
||||
sizeof(struct aac_fib_xporthdr));
|
||||
pFibX->Handle = fib->hw_fib_va->header.SenderData + 1;
|
||||
pFibX->HostAddress = fib->hw_fib_pa;
|
||||
pFibX->Size = fib->hw_fib_va->header.Size;
|
||||
address = fib->hw_fib_pa - (u64)sizeof(struct aac_fib_xporthdr);
|
||||
/* Fill XPORT header */
|
||||
pFibX = (void *)fib->hw_fib_va - sizeof(struct aac_fib_xporthdr);
|
||||
pFibX->Handle = cpu_to_le32(fib->hw_fib_va->header.Handle);
|
||||
pFibX->HostAddress = cpu_to_le64(fib->hw_fib_pa);
|
||||
pFibX->Size = cpu_to_le32(hdr_size);
|
||||
|
||||
/*
|
||||
* The xport header has been 32-byte aligned for us so that fibsize
|
||||
* can be masked out of this address by hardware. -- BenC
|
||||
*/
|
||||
address = fib->hw_fib_pa - sizeof(struct aac_fib_xporthdr);
|
||||
if (address & (ALIGN32 - 1))
|
||||
return -EINVAL;
|
||||
address |= fibsize;
|
||||
}
|
||||
|
||||
src_writel(dev, MUnit.IQ_H, (address >> 32) & 0xffffffff);
|
||||
src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
|
||||
|
||||
src_writel(dev, MUnit.IQ_H, (u32)(address >> 32));
|
||||
src_writel(dev, MUnit.IQ_L, (u32)(address & 0xffffffff) + fibsize);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -435,8 +456,7 @@ static int aac_src_ioremap(struct aac_dev *dev, u32 size)
|
|||
dev->base = NULL;
|
||||
if (dev->regs.src.bar1 == NULL)
|
||||
return -1;
|
||||
dev->base = dev->regs.src.bar0 = ioremap(dev->scsi_host_ptr->base,
|
||||
size);
|
||||
dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
|
||||
if (dev->base == NULL) {
|
||||
iounmap(dev->regs.src.bar1);
|
||||
dev->regs.src.bar1 = NULL;
|
||||
|
@ -459,7 +479,7 @@ static int aac_srcv_ioremap(struct aac_dev *dev, u32 size)
|
|||
dev->base = dev->regs.src.bar0 = NULL;
|
||||
return 0;
|
||||
}
|
||||
dev->base = dev->regs.src.bar0 = ioremap(dev->scsi_host_ptr->base, size);
|
||||
dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
|
||||
if (dev->base == NULL)
|
||||
return -1;
|
||||
dev->IndexRegs = &((struct src_registers __iomem *)
|
||||
|
@ -753,7 +773,7 @@ int aac_srcv_init(struct aac_dev *dev)
|
|||
|
||||
if (aac_init_adapter(dev) == NULL)
|
||||
goto error_iounmap;
|
||||
if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1)
|
||||
if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE2)
|
||||
goto error_iounmap;
|
||||
dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
|
||||
if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
|
||||
|
@ -764,7 +784,7 @@ int aac_srcv_init(struct aac_dev *dev)
|
|||
name, instance);
|
||||
goto error_iounmap;
|
||||
}
|
||||
dev->dbg_base = dev->scsi_host_ptr->base;
|
||||
dev->dbg_base = dev->base_start;
|
||||
dev->dbg_base_mapped = dev->base;
|
||||
dev->dbg_size = dev->base_size;
|
||||
|
||||
|
|
|
@ -2821,7 +2821,7 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
|
|||
int i, count = 0;
|
||||
struct MessageUnit_A __iomem *pmuA = acb->pmuA;
|
||||
struct MessageUnit_C __iomem *pmuC = acb->pmuC;
|
||||
u32 temp = 0;
|
||||
|
||||
/* backup pci config data */
|
||||
printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no);
|
||||
for (i = 0; i < 64; i++) {
|
||||
|
@ -2839,7 +2839,7 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
|
|||
writel(0x2, &pmuC->write_sequence);
|
||||
writel(0x7, &pmuC->write_sequence);
|
||||
writel(0xD, &pmuC->write_sequence);
|
||||
} while ((((temp = readl(&pmuC->host_diagnostic)) | ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
|
||||
} while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
|
||||
writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
|
||||
} else {
|
||||
pci_write_config_byte(acb->pdev, 0x84, 0x20);
|
||||
|
|
|
@ -5722,9 +5722,7 @@ bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport)
|
|||
* The memory for the bfad_vport_s is freed from the FC function
|
||||
* template vport_delete entry point.
|
||||
*/
|
||||
if (vport_drv)
|
||||
bfad_im_port_delete(vport_drv->drv_port.bfad,
|
||||
&vport_drv->drv_port);
|
||||
bfad_im_port_delete(vport_drv->drv_port.bfad, &vport_drv->drv_port);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -2357,7 +2357,7 @@ bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
|
|||
return;
|
||||
}
|
||||
|
||||
if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
|
||||
if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
|
||||
return;
|
||||
|
||||
mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
|
||||
|
|
|
@ -1383,6 +1383,8 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
|
|||
bfa_sm_set_state(bfad, bfad_sm_uninit);
|
||||
|
||||
spin_lock_init(&bfad->bfad_lock);
|
||||
spin_lock_init(&bfad->bfad_aen_spinlock);
|
||||
|
||||
pci_set_drvdata(pdev, bfad);
|
||||
|
||||
bfad->ref_count = 0;
|
||||
|
|
|
@ -3008,12 +3008,15 @@ bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
|
|||
* buffer of size bsg_data->payload_len
|
||||
*/
|
||||
bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL);
|
||||
if (!bsg_fcpt)
|
||||
if (!bsg_fcpt) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (copy_from_user((uint8_t *)bsg_fcpt, bsg_data->payload,
|
||||
bsg_data->payload_len)) {
|
||||
kfree(bsg_fcpt);
|
||||
rc = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -687,25 +687,21 @@ bfa_status_t
|
|||
bfad_im_probe(struct bfad_s *bfad)
|
||||
{
|
||||
struct bfad_im_s *im;
|
||||
bfa_status_t rc = BFA_STATUS_OK;
|
||||
|
||||
im = kzalloc(sizeof(struct bfad_im_s), GFP_KERNEL);
|
||||
if (im == NULL) {
|
||||
rc = BFA_STATUS_ENOMEM;
|
||||
goto ext;
|
||||
}
|
||||
if (im == NULL)
|
||||
return BFA_STATUS_ENOMEM;
|
||||
|
||||
bfad->im = im;
|
||||
im->bfad = bfad;
|
||||
|
||||
if (bfad_thread_workq(bfad) != BFA_STATUS_OK) {
|
||||
kfree(im);
|
||||
rc = BFA_STATUS_FAILED;
|
||||
return BFA_STATUS_FAILED;
|
||||
}
|
||||
|
||||
INIT_WORK(&im->aen_im_notify_work, bfad_aen_im_notify_handler);
|
||||
ext:
|
||||
return rc;
|
||||
return BFA_STATUS_OK;
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
obj-$(CONFIG_SCSI_BNX2X_FCOE) += bnx2fc.o
|
||||
|
||||
bnx2fc-y := bnx2fc_els.o bnx2fc_fcoe.o bnx2fc_hwi.o bnx2fc_io.o bnx2fc_tgt.o
|
||||
bnx2fc-y := bnx2fc_els.o bnx2fc_fcoe.o bnx2fc_hwi.o bnx2fc_io.o bnx2fc_tgt.o \
|
||||
bnx2fc_debug.o
|
||||
|
|
|
@ -11,6 +11,8 @@
|
|||
* Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/kernel.h>
|
||||
|
@ -57,13 +59,12 @@
|
|||
#include <scsi/fc/fc_fcp.h>
|
||||
|
||||
#include "57xx_hsi_bnx2fc.h"
|
||||
#include "bnx2fc_debug.h"
|
||||
#include "../../net/ethernet/broadcom/cnic_if.h"
|
||||
#include "../../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h"
|
||||
#include "bnx2fc_constants.h"
|
||||
|
||||
#define BNX2FC_NAME "bnx2fc"
|
||||
#define BNX2FC_VERSION "1.0.11"
|
||||
#define BNX2FC_VERSION "1.0.12"
|
||||
|
||||
#define PFX "bnx2fc: "
|
||||
|
||||
|
@ -278,6 +279,7 @@ struct bnx2fc_rport {
|
|||
#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x6
|
||||
#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x7
|
||||
#define BNX2FC_FLAG_EXPL_LOGO 0x8
|
||||
#define BNX2FC_FLAG_DISABLE_FAILED 0x9
|
||||
|
||||
u8 src_addr[ETH_ALEN];
|
||||
u32 max_sqes;
|
||||
|
@ -558,4 +560,7 @@ void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnup_req,
|
|||
int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
|
||||
enum fc_rctl r_ctl);
|
||||
|
||||
|
||||
#include "bnx2fc_debug.h"
|
||||
|
||||
#endif
|
||||
|
|
70
drivers/scsi/bnx2fc/bnx2fc_debug.c
Normal file
70
drivers/scsi/bnx2fc/bnx2fc_debug.c
Normal file
|
@ -0,0 +1,70 @@
|
|||
#include "bnx2fc.h"
|
||||
|
||||
void BNX2FC_IO_DBG(const struct bnx2fc_cmd *io_req, const char *fmt, ...)
|
||||
{
|
||||
struct va_format vaf;
|
||||
va_list args;
|
||||
|
||||
if (likely(!(bnx2fc_debug_level & LOG_IO)))
|
||||
return;
|
||||
|
||||
va_start(args, fmt);
|
||||
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
|
||||
if (io_req && io_req->port && io_req->port->lport &&
|
||||
io_req->port->lport->host)
|
||||
shost_printk(KERN_INFO, io_req->port->lport->host,
|
||||
PFX "xid:0x%x %pV",
|
||||
io_req->xid, &vaf);
|
||||
else
|
||||
pr_info("NULL %pV", &vaf);
|
||||
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
void BNX2FC_TGT_DBG(const struct bnx2fc_rport *tgt, const char *fmt, ...)
|
||||
{
|
||||
struct va_format vaf;
|
||||
va_list args;
|
||||
|
||||
if (likely(!(bnx2fc_debug_level & LOG_TGT)))
|
||||
return;
|
||||
|
||||
va_start(args, fmt);
|
||||
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
|
||||
if (tgt && tgt->port && tgt->port->lport && tgt->port->lport->host &&
|
||||
tgt->rport)
|
||||
shost_printk(KERN_INFO, tgt->port->lport->host,
|
||||
PFX "port:%x %pV",
|
||||
tgt->rport->port_id, &vaf);
|
||||
else
|
||||
pr_info("NULL %pV", &vaf);
|
||||
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
void BNX2FC_HBA_DBG(const struct fc_lport *lport, const char *fmt, ...)
|
||||
{
|
||||
struct va_format vaf;
|
||||
va_list args;
|
||||
|
||||
if (likely(!(bnx2fc_debug_level & LOG_HBA)))
|
||||
return;
|
||||
|
||||
va_start(args, fmt);
|
||||
|
||||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
|
||||
if (lport && lport->host)
|
||||
shost_printk(KERN_INFO, lport->host, PFX "%pV", &vaf);
|
||||
else
|
||||
pr_info("NULL %pV", &vaf);
|
||||
|
||||
va_end(args);
|
||||
}
|
|
@ -11,60 +11,23 @@
|
|||
|
||||
extern unsigned int bnx2fc_debug_level;
|
||||
|
||||
#define BNX2FC_CHK_LOGGING(LEVEL, CMD) \
|
||||
do { \
|
||||
if (unlikely(bnx2fc_debug_level & LEVEL)) \
|
||||
do { \
|
||||
CMD; \
|
||||
} while (0); \
|
||||
} while (0)
|
||||
#define BNX2FC_ELS_DBG(fmt, ...) \
|
||||
do { \
|
||||
if (unlikely(bnx2fc_debug_level & LOG_ELS)) \
|
||||
pr_info(fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define BNX2FC_ELS_DBG(fmt, arg...) \
|
||||
BNX2FC_CHK_LOGGING(LOG_ELS, \
|
||||
printk(KERN_INFO PFX fmt, ##arg))
|
||||
#define BNX2FC_MISC_DBG(fmt, ...) \
|
||||
do { \
|
||||
if (unlikely(bnx2fc_debug_level & LOG_MISC)) \
|
||||
pr_info(fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define BNX2FC_MISC_DBG(fmt, arg...) \
|
||||
BNX2FC_CHK_LOGGING(LOG_MISC, \
|
||||
printk(KERN_INFO PFX fmt, ##arg))
|
||||
|
||||
#define BNX2FC_IO_DBG(io_req, fmt, arg...) \
|
||||
do { \
|
||||
if (!io_req || !io_req->port || !io_req->port->lport || \
|
||||
!io_req->port->lport->host) \
|
||||
BNX2FC_CHK_LOGGING(LOG_IO, \
|
||||
printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
|
||||
else \
|
||||
BNX2FC_CHK_LOGGING(LOG_IO, \
|
||||
shost_printk(KERN_INFO, \
|
||||
(io_req)->port->lport->host, \
|
||||
PFX "xid:0x%x " fmt, \
|
||||
(io_req)->xid, ##arg)); \
|
||||
} while (0)
|
||||
|
||||
#define BNX2FC_TGT_DBG(tgt, fmt, arg...) \
|
||||
do { \
|
||||
if (!tgt || !tgt->port || !tgt->port->lport || \
|
||||
!tgt->port->lport->host || !tgt->rport) \
|
||||
BNX2FC_CHK_LOGGING(LOG_TGT, \
|
||||
printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
|
||||
else \
|
||||
BNX2FC_CHK_LOGGING(LOG_TGT, \
|
||||
shost_printk(KERN_INFO, \
|
||||
(tgt)->port->lport->host, \
|
||||
PFX "port:%x " fmt, \
|
||||
(tgt)->rport->port_id, ##arg)); \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define BNX2FC_HBA_DBG(lport, fmt, arg...) \
|
||||
do { \
|
||||
if (!lport || !lport->host) \
|
||||
BNX2FC_CHK_LOGGING(LOG_HBA, \
|
||||
printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
|
||||
else \
|
||||
BNX2FC_CHK_LOGGING(LOG_HBA, \
|
||||
shost_printk(KERN_INFO, lport->host, \
|
||||
PFX fmt, ##arg)); \
|
||||
} while (0)
|
||||
__printf(2, 3)
|
||||
void BNX2FC_IO_DBG(const struct bnx2fc_cmd *io_req, const char *fmt, ...);
|
||||
__printf(2, 3)
|
||||
void BNX2FC_TGT_DBG(const struct bnx2fc_rport *tgt, const char *fmt, ...);
|
||||
__printf(2, 3)
|
||||
void BNX2FC_HBA_DBG(const struct fc_lport *lport, const char *fmt, ...);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
|
|||
|
||||
#define DRV_MODULE_NAME "bnx2fc"
|
||||
#define DRV_MODULE_VERSION BNX2FC_VERSION
|
||||
#define DRV_MODULE_RELDATE "Apr 24, 2012"
|
||||
#define DRV_MODULE_RELDATE "Jun 04, 2012"
|
||||
|
||||
|
||||
static char version[] __devinitdata =
|
||||
|
@ -286,7 +286,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
|
|||
struct fcoe_port *port;
|
||||
struct fcoe_hdr *hp;
|
||||
struct bnx2fc_rport *tgt;
|
||||
struct fcoe_dev_stats *stats;
|
||||
struct fc_stats *stats;
|
||||
u8 sof, eof;
|
||||
u32 crc;
|
||||
unsigned int hlen, tlen, elen;
|
||||
|
@ -412,7 +412,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
|
|||
}
|
||||
|
||||
/*update tx stats */
|
||||
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
|
||||
stats = per_cpu_ptr(lport->stats, get_cpu());
|
||||
stats->TxFrames++;
|
||||
stats->TxWords += wlen;
|
||||
put_cpu();
|
||||
|
@ -522,7 +522,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
|
|||
u32 fr_len;
|
||||
struct fc_lport *lport;
|
||||
struct fcoe_rcv_info *fr;
|
||||
struct fcoe_dev_stats *stats;
|
||||
struct fc_stats *stats;
|
||||
struct fc_frame_header *fh;
|
||||
struct fcoe_crc_eof crc_eof;
|
||||
struct fc_frame *fp;
|
||||
|
@ -551,7 +551,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
|
|||
skb_pull(skb, sizeof(struct fcoe_hdr));
|
||||
fr_len = skb->len - sizeof(struct fcoe_crc_eof);
|
||||
|
||||
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
|
||||
stats = per_cpu_ptr(lport->stats, get_cpu());
|
||||
stats->RxFrames++;
|
||||
stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
|
||||
|
||||
|
@ -942,7 +942,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
|
|||
FC_PORTTYPE_UNKNOWN;
|
||||
mutex_unlock(&lport->lp_mutex);
|
||||
fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
|
||||
per_cpu_ptr(lport->dev_stats,
|
||||
per_cpu_ptr(lport->stats,
|
||||
get_cpu())->LinkFailureCount++;
|
||||
put_cpu();
|
||||
fcoe_clean_pending_queue(lport);
|
||||
|
@ -2062,11 +2062,11 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
|
|||
struct fcoe_ctlr *ctlr;
|
||||
struct bnx2fc_interface *interface;
|
||||
struct bnx2fc_hba *hba;
|
||||
struct net_device *phys_dev;
|
||||
struct net_device *phys_dev = netdev;
|
||||
struct fc_lport *lport;
|
||||
struct ethtool_drvinfo drvinfo;
|
||||
int rc = 0;
|
||||
int vlan_id;
|
||||
int vlan_id = 0;
|
||||
|
||||
BNX2FC_MISC_DBG("Entered bnx2fc_create\n");
|
||||
if (fip_mode != FIP_MODE_FABRIC) {
|
||||
|
@ -2084,14 +2084,9 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
|
|||
}
|
||||
|
||||
/* obtain physical netdev */
|
||||
if (netdev->priv_flags & IFF_802_1Q_VLAN) {
|
||||
if (netdev->priv_flags & IFF_802_1Q_VLAN)
|
||||
phys_dev = vlan_dev_real_dev(netdev);
|
||||
vlan_id = vlan_dev_vlan_id(netdev);
|
||||
} else {
|
||||
printk(KERN_ERR PFX "Not a vlan device\n");
|
||||
rc = -EINVAL;
|
||||
goto netdev_err;
|
||||
}
|
||||
|
||||
/* verify if the physical device is a netxtreme2 device */
|
||||
if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
|
||||
memset(&drvinfo, 0, sizeof(drvinfo));
|
||||
|
@ -2126,9 +2121,13 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
|
|||
goto ifput_err;
|
||||
}
|
||||
|
||||
if (netdev->priv_flags & IFF_802_1Q_VLAN) {
|
||||
vlan_id = vlan_dev_vlan_id(netdev);
|
||||
interface->vlan_enabled = 1;
|
||||
}
|
||||
|
||||
ctlr = bnx2fc_to_ctlr(interface);
|
||||
interface->vlan_id = vlan_id;
|
||||
interface->vlan_enabled = 1;
|
||||
|
||||
interface->timer_work_queue =
|
||||
create_singlethread_workqueue("bnx2fc_timer_wq");
|
||||
|
@ -2195,13 +2194,10 @@ mod_err:
|
|||
**/
|
||||
static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic)
|
||||
{
|
||||
struct list_head *list;
|
||||
struct list_head *temp;
|
||||
struct bnx2fc_hba *hba;
|
||||
|
||||
/* Called with bnx2fc_dev_lock held */
|
||||
list_for_each_safe(list, temp, &adapter_list) {
|
||||
hba = (struct bnx2fc_hba *)list;
|
||||
list_for_each_entry(hba, &adapter_list, list) {
|
||||
if (hba->cnic == cnic)
|
||||
return hba;
|
||||
}
|
||||
|
@ -2295,15 +2291,17 @@ static int bnx2fc_fcoe_reset(struct Scsi_Host *shost)
|
|||
|
||||
static bool bnx2fc_match(struct net_device *netdev)
|
||||
{
|
||||
mutex_lock(&bnx2fc_dev_lock);
|
||||
if (netdev->priv_flags & IFF_802_1Q_VLAN) {
|
||||
struct net_device *phys_dev = vlan_dev_real_dev(netdev);
|
||||
struct net_device *phys_dev = netdev;
|
||||
|
||||
if (bnx2fc_hba_lookup(phys_dev)) {
|
||||
mutex_unlock(&bnx2fc_dev_lock);
|
||||
return true;
|
||||
}
|
||||
mutex_lock(&bnx2fc_dev_lock);
|
||||
if (netdev->priv_flags & IFF_802_1Q_VLAN)
|
||||
phys_dev = vlan_dev_real_dev(netdev);
|
||||
|
||||
if (bnx2fc_hba_lookup(phys_dev)) {
|
||||
mutex_unlock(&bnx2fc_dev_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
mutex_unlock(&bnx2fc_dev_lock);
|
||||
return false;
|
||||
}
|
||||
|
@ -2333,9 +2331,9 @@ static void bnx2fc_percpu_thread_create(unsigned int cpu)
|
|||
|
||||
p = &per_cpu(bnx2fc_percpu, cpu);
|
||||
|
||||
thread = kthread_create(bnx2fc_percpu_io_thread,
|
||||
(void *)p,
|
||||
"bnx2fc_thread/%d", cpu);
|
||||
thread = kthread_create_on_node(bnx2fc_percpu_io_thread,
|
||||
(void *)p, cpu_to_node(cpu),
|
||||
"bnx2fc_thread/%d", cpu);
|
||||
/* bind thread to the cpu */
|
||||
if (likely(!IS_ERR(thread))) {
|
||||
kthread_bind(thread, cpu);
|
||||
|
|
|
@ -1244,7 +1244,9 @@ static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
|
|||
if (disable_kcqe->completion_status) {
|
||||
printk(KERN_ERR PFX "Disable failed with cmpl status %d\n",
|
||||
disable_kcqe->completion_status);
|
||||
return;
|
||||
set_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags);
|
||||
set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
|
||||
wake_up_interruptible(&tgt->upld_wait);
|
||||
} else {
|
||||
/* disable successful */
|
||||
BNX2FC_TGT_DBG(tgt, "disable successful\n");
|
||||
|
|
|
@ -405,11 +405,10 @@ free_cmd_pool:
|
|||
goto free_cmgr;
|
||||
|
||||
for (i = 0; i < num_possible_cpus() + 1; i++) {
|
||||
struct list_head *list;
|
||||
struct list_head *tmp;
|
||||
struct bnx2fc_cmd *tmp, *io_req;
|
||||
|
||||
list_for_each_safe(list, tmp, &cmgr->free_list[i]) {
|
||||
struct bnx2fc_cmd *io_req = (struct bnx2fc_cmd *)list;
|
||||
list_for_each_entry_safe(io_req, tmp,
|
||||
&cmgr->free_list[i], link) {
|
||||
list_del(&io_req->link);
|
||||
kfree(io_req);
|
||||
}
|
||||
|
@ -1436,9 +1435,7 @@ static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
|
|||
{
|
||||
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
|
||||
struct bnx2fc_rport *tgt = io_req->tgt;
|
||||
struct list_head *list;
|
||||
struct list_head *tmp;
|
||||
struct bnx2fc_cmd *cmd;
|
||||
struct bnx2fc_cmd *cmd, *tmp;
|
||||
int tm_lun = sc_cmd->device->lun;
|
||||
int rc = 0;
|
||||
int lun;
|
||||
|
@ -1449,9 +1446,8 @@ static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
|
|||
* Walk thru the active_ios queue and ABORT the IO
|
||||
* that matches with the LUN that was reset
|
||||
*/
|
||||
list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
|
||||
list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
|
||||
BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n");
|
||||
cmd = (struct bnx2fc_cmd *)list;
|
||||
lun = cmd->sc_cmd->device->lun;
|
||||
if (lun == tm_lun) {
|
||||
/* Initiate ABTS on this cmd */
|
||||
|
@ -1476,9 +1472,7 @@ static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
|
|||
static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
|
||||
{
|
||||
struct bnx2fc_rport *tgt = io_req->tgt;
|
||||
struct list_head *list;
|
||||
struct list_head *tmp;
|
||||
struct bnx2fc_cmd *cmd;
|
||||
struct bnx2fc_cmd *cmd, *tmp;
|
||||
int rc = 0;
|
||||
|
||||
/* called with tgt_lock held */
|
||||
|
@ -1487,9 +1481,8 @@ static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
|
|||
* Walk thru the active_ios queue and ABORT the IO
|
||||
* that matches with the LUN that was reset
|
||||
*/
|
||||
list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
|
||||
list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
|
||||
BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n");
|
||||
cmd = (struct bnx2fc_cmd *)list;
|
||||
/* Initiate ABTS */
|
||||
if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
|
||||
&cmd->req_flags)) {
|
||||
|
@ -1980,7 +1973,7 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
|
|||
struct bnx2fc_interface *interface = port->priv;
|
||||
struct bnx2fc_hba *hba = interface->hba;
|
||||
struct fc_lport *lport = port->lport;
|
||||
struct fcoe_dev_stats *stats;
|
||||
struct fc_stats *stats;
|
||||
int task_idx, index;
|
||||
u16 xid;
|
||||
|
||||
|
@ -1991,7 +1984,7 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
|
|||
io_req->data_xfer_len = scsi_bufflen(sc_cmd);
|
||||
sc_cmd->SCp.ptr = (char *)io_req;
|
||||
|
||||
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
|
||||
stats = per_cpu_ptr(lport->stats, get_cpu());
|
||||
if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
|
||||
io_req->io_req_flags = BNX2FC_READ;
|
||||
stats->InputRequests++;
|
||||
|
|
|
@ -150,8 +150,7 @@ tgt_init_err:
|
|||
void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
|
||||
{
|
||||
struct bnx2fc_cmd *io_req;
|
||||
struct list_head *list;
|
||||
struct list_head *tmp;
|
||||
struct bnx2fc_cmd *tmp;
|
||||
int rc;
|
||||
int i = 0;
|
||||
BNX2FC_TGT_DBG(tgt, "Entered flush_active_ios - %d\n",
|
||||
|
@ -160,9 +159,8 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
|
|||
spin_lock_bh(&tgt->tgt_lock);
|
||||
tgt->flush_in_prog = 1;
|
||||
|
||||
list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
|
||||
list_for_each_entry_safe(io_req, tmp, &tgt->active_cmd_queue, link) {
|
||||
i++;
|
||||
io_req = (struct bnx2fc_cmd *)list;
|
||||
list_del_init(&io_req->link);
|
||||
io_req->on_active_queue = 0;
|
||||
BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n");
|
||||
|
@ -181,13 +179,18 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
|
|||
|
||||
set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags);
|
||||
set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
|
||||
rc = bnx2fc_initiate_cleanup(io_req);
|
||||
BUG_ON(rc);
|
||||
|
||||
/* Do not issue cleanup when disable request failed */
|
||||
if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags))
|
||||
bnx2fc_process_cleanup_compl(io_req, io_req->task, 0);
|
||||
else {
|
||||
rc = bnx2fc_initiate_cleanup(io_req);
|
||||
BUG_ON(rc);
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_safe(list, tmp, &tgt->active_tm_queue) {
|
||||
list_for_each_entry_safe(io_req, tmp, &tgt->active_tm_queue, link) {
|
||||
i++;
|
||||
io_req = (struct bnx2fc_cmd *)list;
|
||||
list_del_init(&io_req->link);
|
||||
io_req->on_tmf_queue = 0;
|
||||
BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n");
|
||||
|
@ -195,9 +198,8 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
|
|||
complete(&io_req->tm_done);
|
||||
}
|
||||
|
||||
list_for_each_safe(list, tmp, &tgt->els_queue) {
|
||||
list_for_each_entry_safe(io_req, tmp, &tgt->els_queue, link) {
|
||||
i++;
|
||||
io_req = (struct bnx2fc_cmd *)list;
|
||||
list_del_init(&io_req->link);
|
||||
io_req->on_active_queue = 0;
|
||||
|
||||
|
@ -212,13 +214,17 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
|
|||
io_req->cb_arg = NULL;
|
||||
}
|
||||
|
||||
rc = bnx2fc_initiate_cleanup(io_req);
|
||||
BUG_ON(rc);
|
||||
/* Do not issue cleanup when disable request failed */
|
||||
if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags))
|
||||
bnx2fc_process_cleanup_compl(io_req, io_req->task, 0);
|
||||
else {
|
||||
rc = bnx2fc_initiate_cleanup(io_req);
|
||||
BUG_ON(rc);
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_safe(list, tmp, &tgt->io_retire_queue) {
|
||||
list_for_each_entry_safe(io_req, tmp, &tgt->io_retire_queue, link) {
|
||||
i++;
|
||||
io_req = (struct bnx2fc_cmd *)list;
|
||||
list_del_init(&io_req->link);
|
||||
|
||||
BNX2FC_IO_DBG(io_req, "retire_queue flush\n");
|
||||
|
@ -321,9 +327,13 @@ static void bnx2fc_upload_session(struct fcoe_port *port,
|
|||
|
||||
del_timer_sync(&tgt->upld_timer);
|
||||
|
||||
} else
|
||||
} else if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) {
|
||||
printk(KERN_ERR PFX "ERROR!! DISABLE req failed, destroy"
|
||||
" not sent to FW\n");
|
||||
} else {
|
||||
printk(KERN_ERR PFX "ERROR!! DISABLE req timed out, destroy"
|
||||
" not sent to FW\n");
|
||||
}
|
||||
|
||||
/* Free session resources */
|
||||
bnx2fc_free_session_resc(hba, tgt);
|
||||
|
|
|
@ -438,8 +438,8 @@ static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
|
|||
if (submode)
|
||||
wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE(ULP2_MODE_ISCSI) |
|
||||
FW_OFLD_TX_DATA_WR_ULPSUBMODE(submode);
|
||||
req->tunnel_to_proxy = htonl(wr_ulp_mode) |
|
||||
FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk->write_queue) ? 0 : 1);
|
||||
req->tunnel_to_proxy = htonl(wr_ulp_mode |
|
||||
FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk->write_queue) ? 0 : 1));
|
||||
req->plen = htonl(len);
|
||||
if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
|
||||
cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
|
||||
|
|
|
@ -468,7 +468,8 @@ EXPORT_SYMBOL_GPL(scsi_dh_handler_exist);
|
|||
|
||||
/*
|
||||
* scsi_dh_attach - Attach device handler
|
||||
* @sdev - sdev the handler should be attached to
|
||||
* @q - Request queue that is associated with the scsi_device
|
||||
* the handler should be attached to
|
||||
* @name - name of the handler to attach
|
||||
*/
|
||||
int scsi_dh_attach(struct request_queue *q, const char *name)
|
||||
|
@ -498,7 +499,8 @@ EXPORT_SYMBOL_GPL(scsi_dh_attach);
|
|||
|
||||
/*
|
||||
* scsi_dh_detach - Detach device handler
|
||||
* @sdev - sdev the handler should be detached from
|
||||
* @q - Request queue that is associated with the scsi_device
|
||||
* the handler should be detached from
|
||||
*
|
||||
* This function will detach the device handler only
|
||||
* if the sdev is not part of the internal list, ie
|
||||
|
@ -527,6 +529,38 @@ void scsi_dh_detach(struct request_queue *q)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(scsi_dh_detach);
|
||||
|
||||
/*
|
||||
* scsi_dh_attached_handler_name - Get attached device handler's name
|
||||
* @q - Request queue that is associated with the scsi_device
|
||||
* that may have a device handler attached
|
||||
* @gfp - the GFP mask used in the kmalloc() call when allocating memory
|
||||
*
|
||||
* Returns name of attached handler, NULL if no handler is attached.
|
||||
* Caller must take care to free the returned string.
|
||||
*/
|
||||
const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct scsi_device *sdev;
|
||||
const char *handler_name = NULL;
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
sdev = q->queuedata;
|
||||
if (!sdev || !get_device(&sdev->sdev_gendev))
|
||||
sdev = NULL;
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
|
||||
if (!sdev)
|
||||
return NULL;
|
||||
|
||||
if (sdev->scsi_dh_data)
|
||||
handler_name = kstrdup(sdev->scsi_dh_data->scsi_dh->name, gfp);
|
||||
|
||||
put_device(&sdev->sdev_gendev);
|
||||
return handler_name;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(scsi_dh_attached_handler_name);
|
||||
|
||||
static struct notifier_block scsi_dh_nb = {
|
||||
.notifier_call = scsi_dh_notifier
|
||||
};
|
||||
|
|
|
@ -46,13 +46,16 @@
|
|||
#define TPGS_SUPPORT_OFFLINE 0x40
|
||||
#define TPGS_SUPPORT_TRANSITION 0x80
|
||||
|
||||
#define RTPG_FMT_MASK 0x70
|
||||
#define RTPG_FMT_EXT_HDR 0x10
|
||||
|
||||
#define TPGS_MODE_UNINITIALIZED -1
|
||||
#define TPGS_MODE_NONE 0x0
|
||||
#define TPGS_MODE_IMPLICIT 0x1
|
||||
#define TPGS_MODE_EXPLICIT 0x2
|
||||
|
||||
#define ALUA_INQUIRY_SIZE 36
|
||||
#define ALUA_FAILOVER_TIMEOUT (60 * HZ)
|
||||
#define ALUA_FAILOVER_TIMEOUT 60
|
||||
#define ALUA_FAILOVER_RETRIES 5
|
||||
|
||||
/* flags passed from user level */
|
||||
|
@ -68,6 +71,7 @@ struct alua_dh_data {
|
|||
unsigned char inq[ALUA_INQUIRY_SIZE];
|
||||
unsigned char *buff;
|
||||
int bufflen;
|
||||
unsigned char transition_tmo;
|
||||
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
|
||||
int senselen;
|
||||
struct scsi_device *sdev;
|
||||
|
@ -128,7 +132,7 @@ static struct request *get_alua_req(struct scsi_device *sdev,
|
|||
rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
|
||||
REQ_FAILFAST_DRIVER;
|
||||
rq->retries = ALUA_FAILOVER_RETRIES;
|
||||
rq->timeout = ALUA_FAILOVER_TIMEOUT;
|
||||
rq->timeout = ALUA_FAILOVER_TIMEOUT * HZ;
|
||||
|
||||
return rq;
|
||||
}
|
||||
|
@ -174,7 +178,8 @@ done:
|
|||
* submit_rtpg - Issue a REPORT TARGET GROUP STATES command
|
||||
* @sdev: sdev the command should be sent to
|
||||
*/
|
||||
static unsigned submit_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
|
||||
static unsigned submit_rtpg(struct scsi_device *sdev, struct alua_dh_data *h,
|
||||
bool rtpg_ext_hdr_req)
|
||||
{
|
||||
struct request *rq;
|
||||
int err = SCSI_DH_RES_TEMP_UNAVAIL;
|
||||
|
@ -185,7 +190,10 @@ static unsigned submit_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
|
|||
|
||||
/* Prepare the command. */
|
||||
rq->cmd[0] = MAINTENANCE_IN;
|
||||
rq->cmd[1] = MI_REPORT_TARGET_PGS;
|
||||
if (rtpg_ext_hdr_req)
|
||||
rq->cmd[1] = MI_REPORT_TARGET_PGS | MI_EXT_HDR_PARAM_FMT;
|
||||
else
|
||||
rq->cmd[1] = MI_REPORT_TARGET_PGS;
|
||||
rq->cmd[6] = (h->bufflen >> 24) & 0xff;
|
||||
rq->cmd[7] = (h->bufflen >> 16) & 0xff;
|
||||
rq->cmd[8] = (h->bufflen >> 8) & 0xff;
|
||||
|
@ -518,11 +526,18 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
|
|||
int len, k, off, valid_states = 0;
|
||||
unsigned char *ucp;
|
||||
unsigned err;
|
||||
unsigned long expiry, interval = 1000;
|
||||
bool rtpg_ext_hdr_req = 1;
|
||||
unsigned long expiry, interval = 0;
|
||||
unsigned int tpg_desc_tbl_off;
|
||||
unsigned char orig_transition_tmo;
|
||||
|
||||
if (!h->transition_tmo)
|
||||
expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT * HZ);
|
||||
else
|
||||
expiry = round_jiffies_up(jiffies + h->transition_tmo * HZ);
|
||||
|
||||
expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT);
|
||||
retry:
|
||||
err = submit_rtpg(sdev, h);
|
||||
err = submit_rtpg(sdev, h, rtpg_ext_hdr_req);
|
||||
|
||||
if (err == SCSI_DH_IO && h->senselen > 0) {
|
||||
err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
|
||||
|
@ -530,6 +545,21 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
|
|||
if (!err)
|
||||
return SCSI_DH_IO;
|
||||
|
||||
/*
|
||||
* submit_rtpg() has failed on existing arrays
|
||||
* when requesting extended header info, and
|
||||
* the array doesn't support extended headers,
|
||||
* even though it shouldn't according to T10.
|
||||
* The retry without rtpg_ext_hdr_req set
|
||||
* handles this.
|
||||
*/
|
||||
if (rtpg_ext_hdr_req == 1 &&
|
||||
sense_hdr.sense_key == ILLEGAL_REQUEST &&
|
||||
sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) {
|
||||
rtpg_ext_hdr_req = 0;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
err = alua_check_sense(sdev, &sense_hdr);
|
||||
if (err == ADD_TO_MLQUEUE && time_before(jiffies, expiry))
|
||||
goto retry;
|
||||
|
@ -556,7 +586,28 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
|
|||
goto retry;
|
||||
}
|
||||
|
||||
for (k = 4, ucp = h->buff + 4; k < len; k += off, ucp += off) {
|
||||
orig_transition_tmo = h->transition_tmo;
|
||||
if ((h->buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR && h->buff[5] != 0)
|
||||
h->transition_tmo = h->buff[5];
|
||||
else
|
||||
h->transition_tmo = ALUA_FAILOVER_TIMEOUT;
|
||||
|
||||
if (orig_transition_tmo != h->transition_tmo) {
|
||||
sdev_printk(KERN_INFO, sdev,
|
||||
"%s: transition timeout set to %d seconds\n",
|
||||
ALUA_DH_NAME, h->transition_tmo);
|
||||
expiry = jiffies + h->transition_tmo * HZ;
|
||||
}
|
||||
|
||||
if ((h->buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR)
|
||||
tpg_desc_tbl_off = 8;
|
||||
else
|
||||
tpg_desc_tbl_off = 4;
|
||||
|
||||
for (k = tpg_desc_tbl_off, ucp = h->buff + tpg_desc_tbl_off;
|
||||
k < len;
|
||||
k += off, ucp += off) {
|
||||
|
||||
if (h->group_id == (ucp[2] << 8) + ucp[3]) {
|
||||
h->state = ucp[0] & 0x0f;
|
||||
h->pref = ucp[0] >> 7;
|
||||
|
@ -581,7 +632,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
|
|||
case TPGS_STATE_TRANSITIONING:
|
||||
if (time_before(jiffies, expiry)) {
|
||||
/* State transition, retry */
|
||||
interval *= 2;
|
||||
interval += 2000;
|
||||
msleep(interval);
|
||||
goto retry;
|
||||
}
|
||||
|
@ -691,9 +742,9 @@ static int alua_activate(struct scsi_device *sdev,
|
|||
stpg = 0;
|
||||
break;
|
||||
case TPGS_STATE_STANDBY:
|
||||
case TPGS_STATE_UNAVAILABLE:
|
||||
stpg = 1;
|
||||
break;
|
||||
case TPGS_STATE_UNAVAILABLE:
|
||||
case TPGS_STATE_OFFLINE:
|
||||
err = SCSI_DH_IO;
|
||||
break;
|
||||
|
|
|
@ -1529,7 +1529,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
|
|||
|
||||
return 0;
|
||||
err:
|
||||
per_cpu_ptr(lport->dev_stats, get_cpu())->ErrorFrames++;
|
||||
per_cpu_ptr(lport->stats, get_cpu())->ErrorFrames++;
|
||||
put_cpu();
|
||||
err2:
|
||||
kfree_skb(skb);
|
||||
|
@ -1569,7 +1569,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
|
|||
struct ethhdr *eh;
|
||||
struct fcoe_crc_eof *cp;
|
||||
struct sk_buff *skb;
|
||||
struct fcoe_dev_stats *stats;
|
||||
struct fc_stats *stats;
|
||||
struct fc_frame_header *fh;
|
||||
unsigned int hlen; /* header length implies the version */
|
||||
unsigned int tlen; /* trailer length */
|
||||
|
@ -1680,7 +1680,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
|
|||
skb_shinfo(skb)->gso_size = 0;
|
||||
}
|
||||
/* update tx stats: regardless if LLD fails */
|
||||
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
|
||||
stats = per_cpu_ptr(lport->stats, get_cpu());
|
||||
stats->TxFrames++;
|
||||
stats->TxWords += wlen;
|
||||
put_cpu();
|
||||
|
@ -1714,7 +1714,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
|
|||
struct fcoe_interface *fcoe;
|
||||
struct fc_frame_header *fh;
|
||||
struct sk_buff *skb = (struct sk_buff *)fp;
|
||||
struct fcoe_dev_stats *stats;
|
||||
struct fc_stats *stats;
|
||||
|
||||
/*
|
||||
* We only check CRC if no offload is available and if it is
|
||||
|
@ -1745,7 +1745,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
|
|||
return 0;
|
||||
}
|
||||
|
||||
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
|
||||
stats = per_cpu_ptr(lport->stats, get_cpu());
|
||||
stats->InvalidCRCCount++;
|
||||
if (stats->InvalidCRCCount < 5)
|
||||
printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
|
||||
|
@ -1762,7 +1762,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
|
|||
u32 fr_len;
|
||||
struct fc_lport *lport;
|
||||
struct fcoe_rcv_info *fr;
|
||||
struct fcoe_dev_stats *stats;
|
||||
struct fc_stats *stats;
|
||||
struct fcoe_crc_eof crc_eof;
|
||||
struct fc_frame *fp;
|
||||
struct fcoe_port *port;
|
||||
|
@ -1793,7 +1793,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
|
|||
*/
|
||||
hp = (struct fcoe_hdr *) skb_network_header(skb);
|
||||
|
||||
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
|
||||
stats = per_cpu_ptr(lport->stats, get_cpu());
|
||||
if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
|
||||
if (stats->ErrorFrames < 5)
|
||||
printk(KERN_WARNING "fcoe: FCoE version "
|
||||
|
@ -1851,23 +1851,25 @@ static int fcoe_percpu_receive_thread(void *arg)
|
|||
|
||||
set_user_nice(current, -20);
|
||||
|
||||
retry:
|
||||
while (!kthread_should_stop()) {
|
||||
|
||||
spin_lock_bh(&p->fcoe_rx_list.lock);
|
||||
skb_queue_splice_init(&p->fcoe_rx_list, &tmp);
|
||||
|
||||
if (!skb_queue_len(&tmp)) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
spin_unlock_bh(&p->fcoe_rx_list.lock);
|
||||
schedule();
|
||||
set_current_state(TASK_RUNNING);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
spin_unlock_bh(&p->fcoe_rx_list.lock);
|
||||
|
||||
while ((skb = __skb_dequeue(&tmp)) != NULL)
|
||||
fcoe_recv_frame(skb);
|
||||
|
||||
spin_lock_bh(&p->fcoe_rx_list.lock);
|
||||
if (!skb_queue_len(&p->fcoe_rx_list)) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
spin_unlock_bh(&p->fcoe_rx_list.lock);
|
||||
schedule();
|
||||
set_current_state(TASK_RUNNING);
|
||||
} else
|
||||
spin_unlock_bh(&p->fcoe_rx_list.lock);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1970,7 +1972,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
|
|||
struct fcoe_ctlr *ctlr;
|
||||
struct fcoe_interface *fcoe;
|
||||
struct fcoe_port *port;
|
||||
struct fcoe_dev_stats *stats;
|
||||
struct fc_stats *stats;
|
||||
u32 link_possible = 1;
|
||||
u32 mfs;
|
||||
int rc = NOTIFY_OK;
|
||||
|
@ -2024,7 +2026,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
|
|||
if (link_possible && !fcoe_link_ok(lport))
|
||||
fcoe_ctlr_link_up(ctlr);
|
||||
else if (fcoe_ctlr_link_down(ctlr)) {
|
||||
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
|
||||
stats = per_cpu_ptr(lport->stats, get_cpu());
|
||||
stats->LinkFailureCount++;
|
||||
put_cpu();
|
||||
fcoe_clean_pending_queue(lport);
|
||||
|
|
|
@ -788,11 +788,11 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
|
|||
unsigned long deadline;
|
||||
unsigned long sel_time = 0;
|
||||
struct list_head del_list;
|
||||
struct fcoe_dev_stats *stats;
|
||||
struct fc_stats *stats;
|
||||
|
||||
INIT_LIST_HEAD(&del_list);
|
||||
|
||||
stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu());
|
||||
stats = per_cpu_ptr(fip->lp->stats, get_cpu());
|
||||
|
||||
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
|
||||
deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
|
||||
|
@ -1104,8 +1104,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
|
|||
struct fc_frame_header *fh = NULL;
|
||||
struct fip_desc *desc;
|
||||
struct fip_encaps *els;
|
||||
struct fcoe_dev_stats *stats;
|
||||
struct fcoe_fcf *sel;
|
||||
struct fc_stats *stats;
|
||||
enum fip_desc_type els_dtype = 0;
|
||||
u8 els_op;
|
||||
u8 sub;
|
||||
|
@ -1249,7 +1249,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
|
|||
fr_dev(fp) = lport;
|
||||
fr_encaps(fp) = els_dtype;
|
||||
|
||||
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
|
||||
stats = per_cpu_ptr(lport->stats, get_cpu());
|
||||
stats->RxFrames++;
|
||||
stats->RxWords += skb->len / FIP_BPW;
|
||||
put_cpu();
|
||||
|
@ -1353,7 +1353,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
|
|||
ntoh24(vp->fd_fc_id));
|
||||
if (vn_port && (vn_port == lport)) {
|
||||
mutex_lock(&fip->ctlr_mutex);
|
||||
per_cpu_ptr(lport->dev_stats,
|
||||
per_cpu_ptr(lport->stats,
|
||||
get_cpu())->VLinkFailureCount++;
|
||||
put_cpu();
|
||||
fcoe_ctlr_reset(fip);
|
||||
|
@ -1383,8 +1383,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
|
|||
* followed by physical port
|
||||
*/
|
||||
mutex_lock(&fip->ctlr_mutex);
|
||||
per_cpu_ptr(lport->dev_stats,
|
||||
get_cpu())->VLinkFailureCount++;
|
||||
per_cpu_ptr(lport->stats, get_cpu())->VLinkFailureCount++;
|
||||
put_cpu();
|
||||
fcoe_ctlr_reset(fip);
|
||||
mutex_unlock(&fip->ctlr_mutex);
|
||||
|
|
|
@ -102,7 +102,7 @@ static int fcoe_str_to_dev_loss(const char *buf, unsigned long *val)
|
|||
int ret;
|
||||
|
||||
ret = kstrtoul(buf, 0, val);
|
||||
if (ret || *val < 0)
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* Check for overflow; dev_loss_tmo is u32
|
||||
|
|
|
@ -89,7 +89,7 @@ void __fcoe_get_lesb(struct fc_lport *lport,
|
|||
{
|
||||
unsigned int cpu;
|
||||
u32 lfc, vlfc, mdac;
|
||||
struct fcoe_dev_stats *devst;
|
||||
struct fc_stats *stats;
|
||||
struct fcoe_fc_els_lesb *lesb;
|
||||
struct rtnl_link_stats64 temp;
|
||||
|
||||
|
@ -99,10 +99,10 @@ void __fcoe_get_lesb(struct fc_lport *lport,
|
|||
lesb = (struct fcoe_fc_els_lesb *)fc_lesb;
|
||||
memset(lesb, 0, sizeof(*lesb));
|
||||
for_each_possible_cpu(cpu) {
|
||||
devst = per_cpu_ptr(lport->dev_stats, cpu);
|
||||
lfc += devst->LinkFailureCount;
|
||||
vlfc += devst->VLinkFailureCount;
|
||||
mdac += devst->MissDiscAdvCount;
|
||||
stats = per_cpu_ptr(lport->stats, cpu);
|
||||
lfc += stats->LinkFailureCount;
|
||||
vlfc += stats->VLinkFailureCount;
|
||||
mdac += stats->MissDiscAdvCount;
|
||||
}
|
||||
lesb->lesb_link_fail = htonl(lfc);
|
||||
lesb->lesb_vlink_fail = htonl(vlfc);
|
||||
|
@ -502,7 +502,7 @@ static int __init fcoe_transport_init(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __exit fcoe_transport_exit(void)
|
||||
static int fcoe_transport_exit(void)
|
||||
{
|
||||
struct fcoe_transport *ft;
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@
|
|||
#include "scsi_logging.h"
|
||||
|
||||
|
||||
static atomic_t scsi_host_next_hn; /* host_no for next new host */
|
||||
static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
|
||||
|
||||
|
||||
static void scsi_host_cls_release(struct device *dev)
|
||||
|
@ -290,6 +290,7 @@ static void scsi_host_dev_release(struct device *dev)
|
|||
struct Scsi_Host *shost = dev_to_shost(dev);
|
||||
struct device *parent = dev->parent;
|
||||
struct request_queue *q;
|
||||
void *queuedata;
|
||||
|
||||
scsi_proc_hostdir_rm(shost->hostt);
|
||||
|
||||
|
@ -299,9 +300,9 @@ static void scsi_host_dev_release(struct device *dev)
|
|||
destroy_workqueue(shost->work_q);
|
||||
q = shost->uspace_req_q;
|
||||
if (q) {
|
||||
kfree(q->queuedata);
|
||||
q->queuedata = NULL;
|
||||
scsi_free_queue(q);
|
||||
queuedata = q->queuedata;
|
||||
blk_cleanup_queue(q);
|
||||
kfree(queuedata);
|
||||
}
|
||||
|
||||
scsi_destroy_command_freelist(shost);
|
||||
|
|
|
@ -42,7 +42,7 @@ MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
|
|||
|
||||
static char driver_name[] = "hptiop";
|
||||
static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
|
||||
static const char driver_ver[] = "v1.6 (090910)";
|
||||
static const char driver_ver[] = "v1.6 (091225)";
|
||||
|
||||
static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
|
||||
static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
|
||||
|
@ -958,6 +958,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
|
|||
{
|
||||
struct Scsi_Host *host = NULL;
|
||||
struct hptiop_hba *hba;
|
||||
struct hptiop_adapter_ops *iop_ops;
|
||||
struct hpt_iop_request_get_config iop_config;
|
||||
struct hpt_iop_request_set_config set_config;
|
||||
dma_addr_t start_phy;
|
||||
|
@ -978,7 +979,8 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
|
|||
pci_set_master(pcidev);
|
||||
|
||||
/* Enable 64bit DMA if possible */
|
||||
if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
|
||||
iop_ops = (struct hptiop_adapter_ops *)id->driver_data;
|
||||
if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(iop_ops->hw_dma_bit_mask))) {
|
||||
if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) {
|
||||
printk(KERN_ERR "hptiop: fail to set dma_mask\n");
|
||||
goto disable_pci_device;
|
||||
|
@ -998,7 +1000,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
|
|||
|
||||
hba = (struct hptiop_hba *)host->hostdata;
|
||||
|
||||
hba->ops = (struct hptiop_adapter_ops *)id->driver_data;
|
||||
hba->ops = iop_ops;
|
||||
hba->pcidev = pcidev;
|
||||
hba->host = host;
|
||||
hba->initialized = 0;
|
||||
|
@ -1239,6 +1241,7 @@ static struct hptiop_adapter_ops hptiop_itl_ops = {
|
|||
.iop_intr = iop_intr_itl,
|
||||
.post_msg = hptiop_post_msg_itl,
|
||||
.post_req = hptiop_post_req_itl,
|
||||
.hw_dma_bit_mask = 64,
|
||||
};
|
||||
|
||||
static struct hptiop_adapter_ops hptiop_mv_ops = {
|
||||
|
@ -1254,6 +1257,7 @@ static struct hptiop_adapter_ops hptiop_mv_ops = {
|
|||
.iop_intr = iop_intr_mv,
|
||||
.post_msg = hptiop_post_msg_mv,
|
||||
.post_req = hptiop_post_req_mv,
|
||||
.hw_dma_bit_mask = 33,
|
||||
};
|
||||
|
||||
static struct pci_device_id hptiop_id_table[] = {
|
||||
|
|
|
@ -297,6 +297,7 @@ struct hptiop_adapter_ops {
|
|||
int (*iop_intr)(struct hptiop_hba *hba);
|
||||
void (*post_msg)(struct hptiop_hba *hba, u32 msg);
|
||||
void (*post_req)(struct hptiop_hba *hba, struct hptiop_request *_req);
|
||||
int hw_dma_bit_mask;
|
||||
};
|
||||
|
||||
#define HPT_IOCTL_RESULT_OK 0
|
||||
|
|
|
@ -166,6 +166,9 @@ static struct scsi_host_template isci_sht = {
|
|||
.sg_tablesize = SG_ALL,
|
||||
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.eh_abort_handler = sas_eh_abort_handler,
|
||||
.eh_device_reset_handler = sas_eh_device_reset_handler,
|
||||
.eh_bus_reset_handler = sas_eh_bus_reset_handler,
|
||||
.target_destroy = sas_target_destroy,
|
||||
.ioctl = sas_ioctl,
|
||||
.shost_attrs = isci_host_attrs,
|
||||
|
|
|
@ -99,11 +99,6 @@ struct fc_exch_mgr {
|
|||
u16 max_xid;
|
||||
u16 pool_max_index;
|
||||
|
||||
/*
|
||||
* currently exchange mgr stats are updated but not used.
|
||||
* either stats can be expose via sysfs or remove them
|
||||
* all together if not used XXX
|
||||
*/
|
||||
struct {
|
||||
atomic_t no_free_exch;
|
||||
atomic_t no_free_exch_xid;
|
||||
|
@ -124,7 +119,7 @@ struct fc_exch_mgr {
|
|||
* for each anchor to determine if that EM should be used. The last
|
||||
* anchor in the list will always match to handle any exchanges not
|
||||
* handled by other EMs. The non-default EMs would be added to the
|
||||
* anchor list by HW that provides FCoE offloads.
|
||||
* anchor list by HW that provides offloads.
|
||||
*/
|
||||
struct fc_exch_mgr_anchor {
|
||||
struct list_head ema_list;
|
||||
|
@ -338,6 +333,52 @@ static void fc_exch_release(struct fc_exch *ep)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* fc_exch_timer_cancel() - cancel exch timer
|
||||
* @ep: The exchange whose timer to be canceled
|
||||
*/
|
||||
static inline void fc_exch_timer_cancel(struct fc_exch *ep)
|
||||
{
|
||||
if (cancel_delayed_work(&ep->timeout_work)) {
|
||||
FC_EXCH_DBG(ep, "Exchange timer canceled\n");
|
||||
atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* fc_exch_timer_set_locked() - Start a timer for an exchange w/ the
|
||||
* the exchange lock held
|
||||
* @ep: The exchange whose timer will start
|
||||
* @timer_msec: The timeout period
|
||||
*
|
||||
* Used for upper level protocols to time out the exchange.
|
||||
* The timer is cancelled when it fires or when the exchange completes.
|
||||
*/
|
||||
static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
|
||||
unsigned int timer_msec)
|
||||
{
|
||||
if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
|
||||
return;
|
||||
|
||||
FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec);
|
||||
|
||||
if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
|
||||
msecs_to_jiffies(timer_msec)))
|
||||
fc_exch_hold(ep); /* hold for timer */
|
||||
}
|
||||
|
||||
/**
|
||||
* fc_exch_timer_set() - Lock the exchange and set the timer
|
||||
* @ep: The exchange whose timer will start
|
||||
* @timer_msec: The timeout period
|
||||
*/
|
||||
static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
|
||||
{
|
||||
spin_lock_bh(&ep->ex_lock);
|
||||
fc_exch_timer_set_locked(ep, timer_msec);
|
||||
spin_unlock_bh(&ep->ex_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* fc_exch_done_locked() - Complete an exchange with the exchange lock held
|
||||
* @ep: The exchange that is complete
|
||||
|
@ -359,8 +400,7 @@ static int fc_exch_done_locked(struct fc_exch *ep)
|
|||
|
||||
if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
|
||||
ep->state |= FC_EX_DONE;
|
||||
if (cancel_delayed_work(&ep->timeout_work))
|
||||
atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
|
||||
fc_exch_timer_cancel(ep);
|
||||
rc = 0;
|
||||
}
|
||||
return rc;
|
||||
|
@ -423,40 +463,6 @@ static void fc_exch_delete(struct fc_exch *ep)
|
|||
fc_exch_release(ep); /* drop hold for exch in mp */
|
||||
}
|
||||
|
||||
/**
|
||||
* fc_exch_timer_set_locked() - Start a timer for an exchange w/ the
|
||||
* the exchange lock held
|
||||
* @ep: The exchange whose timer will start
|
||||
* @timer_msec: The timeout period
|
||||
*
|
||||
* Used for upper level protocols to time out the exchange.
|
||||
* The timer is cancelled when it fires or when the exchange completes.
|
||||
*/
|
||||
static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
|
||||
unsigned int timer_msec)
|
||||
{
|
||||
if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
|
||||
return;
|
||||
|
||||
FC_EXCH_DBG(ep, "Exchange timer armed\n");
|
||||
|
||||
if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
|
||||
msecs_to_jiffies(timer_msec)))
|
||||
fc_exch_hold(ep); /* hold for timer */
|
||||
}
|
||||
|
||||
/**
|
||||
* fc_exch_timer_set() - Lock the exchange and set the timer
|
||||
* @ep: The exchange whose timer will start
|
||||
* @timer_msec: The timeout period
|
||||
*/
|
||||
static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
|
||||
{
|
||||
spin_lock_bh(&ep->ex_lock);
|
||||
fc_exch_timer_set_locked(ep, timer_msec);
|
||||
spin_unlock_bh(&ep->ex_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* fc_seq_send() - Send a frame using existing sequence/exchange pair
|
||||
* @lport: The local port that the exchange will be sent on
|
||||
|
@ -986,7 +992,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
|
|||
/*
|
||||
* Update sequence_id based on incoming last
|
||||
* frame of sequence exchange. This is needed
|
||||
* for FCoE target where DDP has been used
|
||||
* for FC target where DDP has been used
|
||||
* on target where, stack is indicated only
|
||||
* about last frame's (payload _header) header.
|
||||
* Whereas "seq_id" which is part of
|
||||
|
@ -1549,8 +1555,10 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
|
|||
FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl,
|
||||
fc_exch_rctl_name(fh->fh_r_ctl));
|
||||
|
||||
if (cancel_delayed_work_sync(&ep->timeout_work))
|
||||
if (cancel_delayed_work_sync(&ep->timeout_work)) {
|
||||
FC_EXCH_DBG(ep, "Exchange timer canceled\n");
|
||||
fc_exch_release(ep); /* release from pending timer hold */
|
||||
}
|
||||
|
||||
spin_lock_bh(&ep->ex_lock);
|
||||
switch (fh->fh_r_ctl) {
|
||||
|
@ -1737,8 +1745,7 @@ static void fc_exch_reset(struct fc_exch *ep)
|
|||
spin_lock_bh(&ep->ex_lock);
|
||||
fc_exch_abort_locked(ep, 0);
|
||||
ep->state |= FC_EX_RST_CLEANUP;
|
||||
if (cancel_delayed_work(&ep->timeout_work))
|
||||
atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
|
||||
fc_exch_timer_cancel(ep);
|
||||
resp = ep->resp;
|
||||
ep->resp = NULL;
|
||||
if (ep->esb_stat & ESB_ST_REC_QUAL)
|
||||
|
@ -2133,10 +2140,8 @@ static void fc_exch_els_rrq(struct fc_frame *fp)
|
|||
ep->esb_stat &= ~ESB_ST_REC_QUAL;
|
||||
atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */
|
||||
}
|
||||
if (ep->esb_stat & ESB_ST_COMPLETE) {
|
||||
if (cancel_delayed_work(&ep->timeout_work))
|
||||
atomic_dec(&ep->ex_refcnt); /* drop timer hold */
|
||||
}
|
||||
if (ep->esb_stat & ESB_ST_COMPLETE)
|
||||
fc_exch_timer_cancel(ep);
|
||||
|
||||
spin_unlock_bh(&ep->ex_lock);
|
||||
|
||||
|
@ -2155,6 +2160,31 @@ out:
|
|||
fc_exch_release(ep); /* drop hold from fc_exch_find */
|
||||
}
|
||||
|
||||
/**
|
||||
* fc_exch_update_stats() - update exches stats to lport
|
||||
* @lport: The local port to update exchange manager stats
|
||||
*/
|
||||
void fc_exch_update_stats(struct fc_lport *lport)
|
||||
{
|
||||
struct fc_host_statistics *st;
|
||||
struct fc_exch_mgr_anchor *ema;
|
||||
struct fc_exch_mgr *mp;
|
||||
|
||||
st = &lport->host_stats;
|
||||
|
||||
list_for_each_entry(ema, &lport->ema_list, ema_list) {
|
||||
mp = ema->mp;
|
||||
st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
|
||||
st->fc_no_free_exch_xid +=
|
||||
atomic_read(&mp->stats.no_free_exch_xid);
|
||||
st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
|
||||
st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
|
||||
st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
|
||||
st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(fc_exch_update_stats);
|
||||
|
||||
/**
|
||||
* fc_exch_mgr_add() - Add an exchange manager to a local port's list of EMs
|
||||
* @lport: The local port to add the exchange manager to
|
||||
|
|
|
@ -158,6 +158,9 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
|
|||
fsp->timer.data = (unsigned long)fsp;
|
||||
INIT_LIST_HEAD(&fsp->list);
|
||||
spin_lock_init(&fsp->scsi_pkt_lock);
|
||||
} else {
|
||||
per_cpu_ptr(lport->stats, get_cpu())->FcpPktAllocFails++;
|
||||
put_cpu();
|
||||
}
|
||||
return fsp;
|
||||
}
|
||||
|
@ -264,6 +267,9 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
|
|||
if (!fsp->seq_ptr)
|
||||
return -EINVAL;
|
||||
|
||||
per_cpu_ptr(fsp->lp->stats, get_cpu())->FcpPktAborts++;
|
||||
put_cpu();
|
||||
|
||||
fsp->state |= FC_SRB_ABORT_PENDING;
|
||||
return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0);
|
||||
}
|
||||
|
@ -420,6 +426,8 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
|
|||
if (likely(fp))
|
||||
return fp;
|
||||
|
||||
per_cpu_ptr(lport->stats, get_cpu())->FcpFrameAllocFails++;
|
||||
put_cpu();
|
||||
/* error case */
|
||||
fc_fcp_can_queue_ramp_down(lport);
|
||||
return NULL;
|
||||
|
@ -434,7 +442,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
|
|||
{
|
||||
struct scsi_cmnd *sc = fsp->cmd;
|
||||
struct fc_lport *lport = fsp->lp;
|
||||
struct fcoe_dev_stats *stats;
|
||||
struct fc_stats *stats;
|
||||
struct fc_frame_header *fh;
|
||||
size_t start_offset;
|
||||
size_t offset;
|
||||
|
@ -496,7 +504,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
|
|||
|
||||
if (~crc != le32_to_cpu(fr_crc(fp))) {
|
||||
crc_err:
|
||||
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
|
||||
stats = per_cpu_ptr(lport->stats, get_cpu());
|
||||
stats->ErrorFrames++;
|
||||
/* per cpu count, not total count, but OK for limit */
|
||||
if (stats->InvalidCRCCount++ < FC_MAX_ERROR_CNT)
|
||||
|
@ -1372,10 +1380,10 @@ static void fc_fcp_timeout(unsigned long data)
|
|||
|
||||
fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
|
||||
|
||||
if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
|
||||
fc_fcp_rec(fsp);
|
||||
else if (fsp->state & FC_SRB_RCV_STATUS)
|
||||
if (fsp->state & FC_SRB_RCV_STATUS)
|
||||
fc_fcp_complete_locked(fsp);
|
||||
else if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
|
||||
fc_fcp_rec(fsp);
|
||||
else
|
||||
fc_fcp_recovery(fsp, FC_TIMED_OUT);
|
||||
fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
|
||||
|
@ -1786,7 +1794,7 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
|
|||
struct fc_rport_libfc_priv *rpriv;
|
||||
int rval;
|
||||
int rc = 0;
|
||||
struct fcoe_dev_stats *stats;
|
||||
struct fc_stats *stats;
|
||||
|
||||
rval = fc_remote_port_chkready(rport);
|
||||
if (rval) {
|
||||
|
@ -1835,7 +1843,7 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
|
|||
/*
|
||||
* setup the data direction
|
||||
*/
|
||||
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
|
||||
stats = per_cpu_ptr(lport->stats, get_cpu());
|
||||
if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
|
||||
fsp->req_flags = FC_SRB_READ;
|
||||
stats->InputRequests++;
|
||||
|
|
|
@ -49,7 +49,7 @@ u32 fc_frame_crc_check(struct fc_frame *fp)
|
|||
EXPORT_SYMBOL(fc_frame_crc_check);
|
||||
|
||||
/*
|
||||
* Allocate a frame intended to be sent via fcoe_xmit.
|
||||
* Allocate a frame intended to be sent.
|
||||
* Get an sk_buff for the frame and set the length.
|
||||
*/
|
||||
struct fc_frame *_fc_frame_alloc(size_t len)
|
||||
|
|
|
@ -299,47 +299,54 @@ EXPORT_SYMBOL(fc_get_host_speed);
|
|||
*/
|
||||
struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
|
||||
{
|
||||
struct fc_host_statistics *fcoe_stats;
|
||||
struct fc_host_statistics *fc_stats;
|
||||
struct fc_lport *lport = shost_priv(shost);
|
||||
struct timespec v0, v1;
|
||||
unsigned int cpu;
|
||||
u64 fcp_in_bytes = 0;
|
||||
u64 fcp_out_bytes = 0;
|
||||
|
||||
fcoe_stats = &lport->host_stats;
|
||||
memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
|
||||
fc_stats = &lport->host_stats;
|
||||
memset(fc_stats, 0, sizeof(struct fc_host_statistics));
|
||||
|
||||
jiffies_to_timespec(jiffies, &v0);
|
||||
jiffies_to_timespec(lport->boot_time, &v1);
|
||||
fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
|
||||
fc_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct fcoe_dev_stats *stats;
|
||||
struct fc_stats *stats;
|
||||
|
||||
stats = per_cpu_ptr(lport->dev_stats, cpu);
|
||||
stats = per_cpu_ptr(lport->stats, cpu);
|
||||
|
||||
fcoe_stats->tx_frames += stats->TxFrames;
|
||||
fcoe_stats->tx_words += stats->TxWords;
|
||||
fcoe_stats->rx_frames += stats->RxFrames;
|
||||
fcoe_stats->rx_words += stats->RxWords;
|
||||
fcoe_stats->error_frames += stats->ErrorFrames;
|
||||
fcoe_stats->invalid_crc_count += stats->InvalidCRCCount;
|
||||
fcoe_stats->fcp_input_requests += stats->InputRequests;
|
||||
fcoe_stats->fcp_output_requests += stats->OutputRequests;
|
||||
fcoe_stats->fcp_control_requests += stats->ControlRequests;
|
||||
fc_stats->tx_frames += stats->TxFrames;
|
||||
fc_stats->tx_words += stats->TxWords;
|
||||
fc_stats->rx_frames += stats->RxFrames;
|
||||
fc_stats->rx_words += stats->RxWords;
|
||||
fc_stats->error_frames += stats->ErrorFrames;
|
||||
fc_stats->invalid_crc_count += stats->InvalidCRCCount;
|
||||
fc_stats->fcp_input_requests += stats->InputRequests;
|
||||
fc_stats->fcp_output_requests += stats->OutputRequests;
|
||||
fc_stats->fcp_control_requests += stats->ControlRequests;
|
||||
fcp_in_bytes += stats->InputBytes;
|
||||
fcp_out_bytes += stats->OutputBytes;
|
||||
fcoe_stats->link_failure_count += stats->LinkFailureCount;
|
||||
fc_stats->fcp_packet_alloc_failures += stats->FcpPktAllocFails;
|
||||
fc_stats->fcp_packet_aborts += stats->FcpPktAborts;
|
||||
fc_stats->fcp_frame_alloc_failures += stats->FcpFrameAllocFails;
|
||||
fc_stats->link_failure_count += stats->LinkFailureCount;
|
||||
}
|
||||
fcoe_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000);
|
||||
fcoe_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000);
|
||||
fcoe_stats->lip_count = -1;
|
||||
fcoe_stats->nos_count = -1;
|
||||
fcoe_stats->loss_of_sync_count = -1;
|
||||
fcoe_stats->loss_of_signal_count = -1;
|
||||
fcoe_stats->prim_seq_protocol_err_count = -1;
|
||||
fcoe_stats->dumped_frames = -1;
|
||||
return fcoe_stats;
|
||||
fc_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000);
|
||||
fc_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000);
|
||||
fc_stats->lip_count = -1;
|
||||
fc_stats->nos_count = -1;
|
||||
fc_stats->loss_of_sync_count = -1;
|
||||
fc_stats->loss_of_signal_count = -1;
|
||||
fc_stats->prim_seq_protocol_err_count = -1;
|
||||
fc_stats->dumped_frames = -1;
|
||||
|
||||
/* update exches stats */
|
||||
fc_exch_update_stats(lport);
|
||||
|
||||
return fc_stats;
|
||||
}
|
||||
EXPORT_SYMBOL(fc_get_host_stats);
|
||||
|
||||
|
@ -973,7 +980,8 @@ drop:
|
|||
rcu_read_unlock();
|
||||
FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type);
|
||||
fc_frame_free(fp);
|
||||
lport->tt.exch_done(sp);
|
||||
if (sp)
|
||||
lport->tt.exch_done(sp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1590,8 +1598,9 @@ static void fc_lport_timeout(struct work_struct *work)
|
|||
case LPORT_ST_RPA:
|
||||
case LPORT_ST_DHBA:
|
||||
case LPORT_ST_DPRT:
|
||||
fc_lport_enter_ms(lport, lport->state);
|
||||
break;
|
||||
FC_LPORT_DBG(lport, "Skipping lport state %s to SCR\n",
|
||||
fc_lport_state(lport));
|
||||
/* fall thru */
|
||||
case LPORT_ST_SCR:
|
||||
fc_lport_enter_scr(lport);
|
||||
break;
|
||||
|
|
|
@ -523,6 +523,31 @@ static void sas_ata_set_dmamode(struct ata_port *ap, struct ata_device *ata_dev)
|
|||
i->dft->lldd_ata_set_dmamode(dev);
|
||||
}
|
||||
|
||||
static void sas_ata_sched_eh(struct ata_port *ap)
|
||||
{
|
||||
struct domain_device *dev = ap->private_data;
|
||||
struct sas_ha_struct *ha = dev->port->ha;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ha->lock, flags);
|
||||
if (!test_and_set_bit(SAS_DEV_EH_PENDING, &dev->state))
|
||||
ha->eh_active++;
|
||||
ata_std_sched_eh(ap);
|
||||
spin_unlock_irqrestore(&ha->lock, flags);
|
||||
}
|
||||
|
||||
void sas_ata_end_eh(struct ata_port *ap)
|
||||
{
|
||||
struct domain_device *dev = ap->private_data;
|
||||
struct sas_ha_struct *ha = dev->port->ha;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ha->lock, flags);
|
||||
if (test_and_clear_bit(SAS_DEV_EH_PENDING, &dev->state))
|
||||
ha->eh_active--;
|
||||
spin_unlock_irqrestore(&ha->lock, flags);
|
||||
}
|
||||
|
||||
static struct ata_port_operations sas_sata_ops = {
|
||||
.prereset = ata_std_prereset,
|
||||
.hardreset = sas_ata_hard_reset,
|
||||
|
@ -536,6 +561,8 @@ static struct ata_port_operations sas_sata_ops = {
|
|||
.port_start = ata_sas_port_start,
|
||||
.port_stop = ata_sas_port_stop,
|
||||
.set_dmamode = sas_ata_set_dmamode,
|
||||
.sched_eh = sas_ata_sched_eh,
|
||||
.end_eh = sas_ata_end_eh,
|
||||
};
|
||||
|
||||
static struct ata_port_info sata_port_info = {
|
||||
|
@ -591,7 +618,6 @@ void sas_ata_task_abort(struct sas_task *task)
|
|||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
blk_abort_request(qc->scsicmd->request);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
scsi_schedule_eh(qc->scsicmd->device->host);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -708,10 +734,6 @@ static void async_sas_ata_eh(void *data, async_cookie_t cookie)
|
|||
struct ata_port *ap = dev->sata_dev.ap;
|
||||
struct sas_ha_struct *ha = dev->port->ha;
|
||||
|
||||
/* hold a reference over eh since we may be racing with final
|
||||
* remove once all commands are completed
|
||||
*/
|
||||
kref_get(&dev->kref);
|
||||
sas_ata_printk(KERN_DEBUG, dev, "dev error handler\n");
|
||||
ata_scsi_port_error_handler(ha->core.shost, ap);
|
||||
sas_put_device(dev);
|
||||
|
@ -720,7 +742,7 @@ static void async_sas_ata_eh(void *data, async_cookie_t cookie)
|
|||
void sas_ata_strategy_handler(struct Scsi_Host *shost)
|
||||
{
|
||||
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
|
||||
LIST_HEAD(async);
|
||||
ASYNC_DOMAIN_EXCLUSIVE(async);
|
||||
int i;
|
||||
|
||||
/* it's ok to defer revalidation events during ata eh, these
|
||||
|
@ -742,6 +764,13 @@ void sas_ata_strategy_handler(struct Scsi_Host *shost)
|
|||
list_for_each_entry(dev, &port->dev_list, dev_list_node) {
|
||||
if (!dev_is_sata(dev))
|
||||
continue;
|
||||
|
||||
/* hold a reference over eh since we may be
|
||||
* racing with final remove once all commands
|
||||
* are completed
|
||||
*/
|
||||
kref_get(&dev->kref);
|
||||
|
||||
async_schedule_domain(async_sas_ata_eh, dev, &async);
|
||||
}
|
||||
spin_unlock(&port->dev_list_lock);
|
||||
|
|
|
@ -39,18 +39,13 @@ void sas_init_dev(struct domain_device *dev)
|
|||
{
|
||||
switch (dev->dev_type) {
|
||||
case SAS_END_DEV:
|
||||
INIT_LIST_HEAD(&dev->ssp_dev.eh_list_node);
|
||||
break;
|
||||
case EDGE_DEV:
|
||||
case FANOUT_DEV:
|
||||
INIT_LIST_HEAD(&dev->ex_dev.children);
|
||||
mutex_init(&dev->ex_dev.cmd_mutex);
|
||||
break;
|
||||
case SATA_DEV:
|
||||
case SATA_PM:
|
||||
case SATA_PM_PORT:
|
||||
case SATA_PENDING:
|
||||
INIT_LIST_HEAD(&dev->sata_dev.children);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -286,6 +281,8 @@ void sas_free_device(struct kref *kref)
|
|||
|
||||
static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_device *dev)
|
||||
{
|
||||
struct sas_ha_struct *ha = port->ha;
|
||||
|
||||
sas_notify_lldd_dev_gone(dev);
|
||||
if (!dev->parent)
|
||||
dev->port->port_dev = NULL;
|
||||
|
@ -294,8 +291,18 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d
|
|||
|
||||
spin_lock_irq(&port->dev_list_lock);
|
||||
list_del_init(&dev->dev_list_node);
|
||||
if (dev_is_sata(dev))
|
||||
sas_ata_end_eh(dev->sata_dev.ap);
|
||||
spin_unlock_irq(&port->dev_list_lock);
|
||||
|
||||
spin_lock_irq(&ha->lock);
|
||||
if (dev->dev_type == SAS_END_DEV &&
|
||||
!list_empty(&dev->ssp_dev.eh_list_node)) {
|
||||
list_del_init(&dev->ssp_dev.eh_list_node);
|
||||
ha->eh_active--;
|
||||
}
|
||||
spin_unlock_irq(&ha->lock);
|
||||
|
||||
sas_put_device(dev);
|
||||
}
|
||||
|
||||
|
@ -488,9 +495,9 @@ static void sas_chain_event(int event, unsigned long *pending,
|
|||
if (!test_and_set_bit(event, pending)) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ha->state_lock, flags);
|
||||
spin_lock_irqsave(&ha->lock, flags);
|
||||
sas_chain_work(ha, sw);
|
||||
spin_unlock_irqrestore(&ha->state_lock, flags);
|
||||
spin_unlock_irqrestore(&ha->lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -47,9 +47,9 @@ static void sas_queue_event(int event, unsigned long *pending,
|
|||
if (!test_and_set_bit(event, pending)) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ha->state_lock, flags);
|
||||
spin_lock_irqsave(&ha->lock, flags);
|
||||
sas_queue_work(ha, work);
|
||||
spin_unlock_irqrestore(&ha->state_lock, flags);
|
||||
spin_unlock_irqrestore(&ha->lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -61,18 +61,18 @@ void __sas_drain_work(struct sas_ha_struct *ha)
|
|||
|
||||
set_bit(SAS_HA_DRAINING, &ha->state);
|
||||
/* flush submitters */
|
||||
spin_lock_irq(&ha->state_lock);
|
||||
spin_unlock_irq(&ha->state_lock);
|
||||
spin_lock_irq(&ha->lock);
|
||||
spin_unlock_irq(&ha->lock);
|
||||
|
||||
drain_workqueue(wq);
|
||||
|
||||
spin_lock_irq(&ha->state_lock);
|
||||
spin_lock_irq(&ha->lock);
|
||||
clear_bit(SAS_HA_DRAINING, &ha->state);
|
||||
list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
|
||||
list_del_init(&sw->drain_node);
|
||||
sas_queue_work(ha, sw);
|
||||
}
|
||||
spin_unlock_irq(&ha->state_lock);
|
||||
spin_unlock_irq(&ha->lock);
|
||||
}
|
||||
|
||||
int sas_drain_work(struct sas_ha_struct *ha)
|
||||
|
|
|
@ -51,14 +51,14 @@ static void smp_task_timedout(unsigned long _task)
|
|||
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
|
||||
spin_unlock_irqrestore(&task->task_state_lock, flags);
|
||||
|
||||
complete(&task->completion);
|
||||
complete(&task->slow_task->completion);
|
||||
}
|
||||
|
||||
static void smp_task_done(struct sas_task *task)
|
||||
{
|
||||
if (!del_timer(&task->timer))
|
||||
if (!del_timer(&task->slow_task->timer))
|
||||
return;
|
||||
complete(&task->completion);
|
||||
complete(&task->slow_task->completion);
|
||||
}
|
||||
|
||||
/* Give it some long enough timeout. In seconds. */
|
||||
|
@ -79,7 +79,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
|
|||
break;
|
||||
}
|
||||
|
||||
task = sas_alloc_task(GFP_KERNEL);
|
||||
task = sas_alloc_slow_task(GFP_KERNEL);
|
||||
if (!task) {
|
||||
res = -ENOMEM;
|
||||
break;
|
||||
|
@ -91,20 +91,20 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
|
|||
|
||||
task->task_done = smp_task_done;
|
||||
|
||||
task->timer.data = (unsigned long) task;
|
||||
task->timer.function = smp_task_timedout;
|
||||
task->timer.expires = jiffies + SMP_TIMEOUT*HZ;
|
||||
add_timer(&task->timer);
|
||||
task->slow_task->timer.data = (unsigned long) task;
|
||||
task->slow_task->timer.function = smp_task_timedout;
|
||||
task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ;
|
||||
add_timer(&task->slow_task->timer);
|
||||
|
||||
res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL);
|
||||
|
||||
if (res) {
|
||||
del_timer(&task->timer);
|
||||
del_timer(&task->slow_task->timer);
|
||||
SAS_DPRINTK("executing SMP task failed:%d\n", res);
|
||||
break;
|
||||
}
|
||||
|
||||
wait_for_completion(&task->completion);
|
||||
wait_for_completion(&task->slow_task->completion);
|
||||
res = -ECOMM;
|
||||
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
|
||||
SAS_DPRINTK("smp task timed out or aborted\n");
|
||||
|
@ -868,7 +868,7 @@ static struct domain_device *sas_ex_discover_end_dev(
|
|||
}
|
||||
|
||||
/* See if this phy is part of a wide port */
|
||||
static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
|
||||
static bool sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
|
||||
{
|
||||
struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id];
|
||||
int i;
|
||||
|
@ -884,11 +884,11 @@ static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
|
|||
sas_port_add_phy(ephy->port, phy->phy);
|
||||
phy->port = ephy->port;
|
||||
phy->phy_state = PHY_DEVICE_DISCOVERED;
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return -ENODEV;
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct domain_device *sas_ex_discover_expander(
|
||||
|
@ -1030,8 +1030,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
|
|||
return res;
|
||||
}
|
||||
|
||||
res = sas_ex_join_wide_port(dev, phy_id);
|
||||
if (!res) {
|
||||
if (sas_ex_join_wide_port(dev, phy_id)) {
|
||||
SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
|
||||
phy_id, SAS_ADDR(ex_phy->attached_sas_addr));
|
||||
return res;
|
||||
|
@ -1077,8 +1076,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
|
|||
if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) ==
|
||||
SAS_ADDR(child->sas_addr)) {
|
||||
ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED;
|
||||
res = sas_ex_join_wide_port(dev, i);
|
||||
if (!res)
|
||||
if (sas_ex_join_wide_port(dev, i))
|
||||
SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
|
||||
i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr));
|
||||
|
||||
|
@ -1943,32 +1941,20 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
|
|||
{
|
||||
struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id];
|
||||
struct domain_device *child;
|
||||
bool found = false;
|
||||
int res, i;
|
||||
int res;
|
||||
|
||||
SAS_DPRINTK("ex %016llx phy%d new device attached\n",
|
||||
SAS_ADDR(dev->sas_addr), phy_id);
|
||||
res = sas_ex_phy_discover(dev, phy_id);
|
||||
if (res)
|
||||
goto out;
|
||||
/* to support the wide port inserted */
|
||||
for (i = 0; i < dev->ex_dev.num_phys; i++) {
|
||||
struct ex_phy *ex_phy_temp = &dev->ex_dev.ex_phy[i];
|
||||
if (i == phy_id)
|
||||
continue;
|
||||
if (SAS_ADDR(ex_phy_temp->attached_sas_addr) ==
|
||||
SAS_ADDR(ex_phy->attached_sas_addr)) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (found) {
|
||||
sas_ex_join_wide_port(dev, phy_id);
|
||||
return res;
|
||||
|
||||
if (sas_ex_join_wide_port(dev, phy_id))
|
||||
return 0;
|
||||
}
|
||||
|
||||
res = sas_ex_discover_devices(dev, phy_id);
|
||||
if (!res)
|
||||
goto out;
|
||||
if (res)
|
||||
return res;
|
||||
list_for_each_entry(child, &dev->ex_dev.children, siblings) {
|
||||
if (SAS_ADDR(child->sas_addr) ==
|
||||
SAS_ADDR(ex_phy->attached_sas_addr)) {
|
||||
|
@ -1978,7 +1964,6 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
|
|||
break;
|
||||
}
|
||||
}
|
||||
out:
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -2005,6 +1990,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
|
|||
u8 sas_addr[8];
|
||||
int res;
|
||||
|
||||
memset(sas_addr, 0, 8);
|
||||
res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type);
|
||||
switch (res) {
|
||||
case SMP_RESP_NO_PHY:
|
||||
|
@ -2017,9 +2003,13 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
|
|||
return res;
|
||||
case SMP_RESP_FUNC_ACC:
|
||||
break;
|
||||
case -ECOMM:
|
||||
break;
|
||||
default:
|
||||
return res;
|
||||
}
|
||||
|
||||
if (SAS_ADDR(sas_addr) == 0) {
|
||||
if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) {
|
||||
phy->phy_state = PHY_EMPTY;
|
||||
sas_unregister_devs_sas_addr(dev, phy_id, last);
|
||||
return res;
|
||||
|
@ -2109,9 +2099,7 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
|
|||
struct domain_device *dev = NULL;
|
||||
|
||||
res = sas_find_bcast_dev(port_dev, &dev);
|
||||
if (res)
|
||||
goto out;
|
||||
if (dev) {
|
||||
while (res == 0 && dev) {
|
||||
struct expander_device *ex = &dev->ex_dev;
|
||||
int i = 0, phy_id;
|
||||
|
||||
|
@ -2123,8 +2111,10 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
|
|||
res = sas_rediscover(dev, phy_id);
|
||||
i = phy_id + 1;
|
||||
} while (i < ex->num_phys);
|
||||
|
||||
dev = NULL;
|
||||
res = sas_find_bcast_dev(port_dev, &dev);
|
||||
}
|
||||
out:
|
||||
return res;
|
||||
}
|
||||
|
||||
|
|
|
@ -48,18 +48,37 @@ struct sas_task *sas_alloc_task(gfp_t flags)
|
|||
INIT_LIST_HEAD(&task->list);
|
||||
spin_lock_init(&task->task_state_lock);
|
||||
task->task_state_flags = SAS_TASK_STATE_PENDING;
|
||||
init_timer(&task->timer);
|
||||
init_completion(&task->completion);
|
||||
}
|
||||
|
||||
return task;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sas_alloc_task);
|
||||
|
||||
struct sas_task *sas_alloc_slow_task(gfp_t flags)
|
||||
{
|
||||
struct sas_task *task = sas_alloc_task(flags);
|
||||
struct sas_task_slow *slow = kmalloc(sizeof(*slow), flags);
|
||||
|
||||
if (!task || !slow) {
|
||||
if (task)
|
||||
kmem_cache_free(sas_task_cache, task);
|
||||
kfree(slow);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
task->slow_task = slow;
|
||||
init_timer(&slow->timer);
|
||||
init_completion(&slow->completion);
|
||||
|
||||
return task;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sas_alloc_slow_task);
|
||||
|
||||
void sas_free_task(struct sas_task *task)
|
||||
{
|
||||
if (task) {
|
||||
BUG_ON(!list_empty(&task->list));
|
||||
kfree(task->slow_task);
|
||||
kmem_cache_free(sas_task_cache, task);
|
||||
}
|
||||
}
|
||||
|
@ -114,9 +133,11 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
|
|||
sas_ha->lldd_queue_size = 128; /* Sanity */
|
||||
|
||||
set_bit(SAS_HA_REGISTERED, &sas_ha->state);
|
||||
spin_lock_init(&sas_ha->state_lock);
|
||||
spin_lock_init(&sas_ha->lock);
|
||||
mutex_init(&sas_ha->drain_mutex);
|
||||
init_waitqueue_head(&sas_ha->eh_wait_q);
|
||||
INIT_LIST_HEAD(&sas_ha->defer_q);
|
||||
INIT_LIST_HEAD(&sas_ha->eh_dev_q);
|
||||
|
||||
error = sas_register_phys(sas_ha);
|
||||
if (error) {
|
||||
|
@ -163,9 +184,9 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha)
|
|||
* events to be queued, and flush any in-progress drainers
|
||||
*/
|
||||
mutex_lock(&sas_ha->drain_mutex);
|
||||
spin_lock_irq(&sas_ha->state_lock);
|
||||
spin_lock_irq(&sas_ha->lock);
|
||||
clear_bit(SAS_HA_REGISTERED, &sas_ha->state);
|
||||
spin_unlock_irq(&sas_ha->state_lock);
|
||||
spin_unlock_irq(&sas_ha->lock);
|
||||
__sas_drain_work(sas_ha);
|
||||
mutex_unlock(&sas_ha->drain_mutex);
|
||||
|
||||
|
@ -411,9 +432,9 @@ static int queue_phy_reset(struct sas_phy *phy, int hard_reset)
|
|||
d->reset_result = 0;
|
||||
d->hard_reset = hard_reset;
|
||||
|
||||
spin_lock_irq(&ha->state_lock);
|
||||
spin_lock_irq(&ha->lock);
|
||||
sas_queue_work(ha, &d->reset_work);
|
||||
spin_unlock_irq(&ha->state_lock);
|
||||
spin_unlock_irq(&ha->lock);
|
||||
|
||||
rc = sas_drain_work(ha);
|
||||
if (rc == 0)
|
||||
|
@ -438,9 +459,9 @@ static int queue_phy_enable(struct sas_phy *phy, int enable)
|
|||
d->enable_result = 0;
|
||||
d->enable = enable;
|
||||
|
||||
spin_lock_irq(&ha->state_lock);
|
||||
spin_lock_irq(&ha->lock);
|
||||
sas_queue_work(ha, &d->enable_work);
|
||||
spin_unlock_irq(&ha->state_lock);
|
||||
spin_unlock_irq(&ha->lock);
|
||||
|
||||
rc = sas_drain_work(ha);
|
||||
if (rc == 0)
|
||||
|
|
|
@ -460,14 +460,109 @@ struct sas_phy *sas_get_local_phy(struct domain_device *dev)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(sas_get_local_phy);
|
||||
|
||||
static void sas_wait_eh(struct domain_device *dev)
|
||||
{
|
||||
struct sas_ha_struct *ha = dev->port->ha;
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
if (dev_is_sata(dev)) {
|
||||
ata_port_wait_eh(dev->sata_dev.ap);
|
||||
return;
|
||||
}
|
||||
retry:
|
||||
spin_lock_irq(&ha->lock);
|
||||
|
||||
while (test_bit(SAS_DEV_EH_PENDING, &dev->state)) {
|
||||
prepare_to_wait(&ha->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
|
||||
spin_unlock_irq(&ha->lock);
|
||||
schedule();
|
||||
spin_lock_irq(&ha->lock);
|
||||
}
|
||||
finish_wait(&ha->eh_wait_q, &wait);
|
||||
|
||||
spin_unlock_irq(&ha->lock);
|
||||
|
||||
/* make sure SCSI EH is complete */
|
||||
if (scsi_host_in_recovery(ha->core.shost)) {
|
||||
msleep(10);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(sas_wait_eh);
|
||||
|
||||
static int sas_queue_reset(struct domain_device *dev, int reset_type, int lun, int wait)
|
||||
{
|
||||
struct sas_ha_struct *ha = dev->port->ha;
|
||||
int scheduled = 0, tries = 100;
|
||||
|
||||
/* ata: promote lun reset to bus reset */
|
||||
if (dev_is_sata(dev)) {
|
||||
sas_ata_schedule_reset(dev);
|
||||
if (wait)
|
||||
sas_ata_wait_eh(dev);
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
while (!scheduled && tries--) {
|
||||
spin_lock_irq(&ha->lock);
|
||||
if (!test_bit(SAS_DEV_EH_PENDING, &dev->state) &&
|
||||
!test_bit(reset_type, &dev->state)) {
|
||||
scheduled = 1;
|
||||
ha->eh_active++;
|
||||
list_add_tail(&dev->ssp_dev.eh_list_node, &ha->eh_dev_q);
|
||||
set_bit(SAS_DEV_EH_PENDING, &dev->state);
|
||||
set_bit(reset_type, &dev->state);
|
||||
int_to_scsilun(lun, &dev->ssp_dev.reset_lun);
|
||||
scsi_schedule_eh(ha->core.shost);
|
||||
}
|
||||
spin_unlock_irq(&ha->lock);
|
||||
|
||||
if (wait)
|
||||
sas_wait_eh(dev);
|
||||
|
||||
if (scheduled)
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
SAS_DPRINTK("%s reset of %s failed\n",
|
||||
reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus",
|
||||
dev_name(&dev->rphy->dev));
|
||||
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
int sas_eh_abort_handler(struct scsi_cmnd *cmd)
|
||||
{
|
||||
int res;
|
||||
struct sas_task *task = TO_SAS_TASK(cmd);
|
||||
struct Scsi_Host *host = cmd->device->host;
|
||||
struct sas_internal *i = to_sas_internal(host->transportt);
|
||||
|
||||
if (current != host->ehandler)
|
||||
return FAILED;
|
||||
|
||||
if (!i->dft->lldd_abort_task)
|
||||
return FAILED;
|
||||
|
||||
res = i->dft->lldd_abort_task(task);
|
||||
if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
|
||||
return SUCCESS;
|
||||
|
||||
return FAILED;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sas_eh_abort_handler);
|
||||
|
||||
/* Attempt to send a LUN reset message to a device */
|
||||
int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct domain_device *dev = cmd_to_domain_dev(cmd);
|
||||
struct sas_internal *i =
|
||||
to_sas_internal(dev->port->ha->core.shost->transportt);
|
||||
struct scsi_lun lun;
|
||||
int res;
|
||||
struct scsi_lun lun;
|
||||
struct Scsi_Host *host = cmd->device->host;
|
||||
struct domain_device *dev = cmd_to_domain_dev(cmd);
|
||||
struct sas_internal *i = to_sas_internal(host->transportt);
|
||||
|
||||
if (current != host->ehandler)
|
||||
return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun, 0);
|
||||
|
||||
int_to_scsilun(cmd->device->lun, &lun);
|
||||
|
||||
|
@ -481,21 +576,22 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
|
|||
return FAILED;
|
||||
}
|
||||
|
||||
/* Attempt to send a phy (bus) reset */
|
||||
int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct domain_device *dev = cmd_to_domain_dev(cmd);
|
||||
struct sas_phy *phy = sas_get_local_phy(dev);
|
||||
int res;
|
||||
struct Scsi_Host *host = cmd->device->host;
|
||||
struct domain_device *dev = cmd_to_domain_dev(cmd);
|
||||
struct sas_internal *i = to_sas_internal(host->transportt);
|
||||
|
||||
res = sas_phy_reset(phy, 1);
|
||||
if (res)
|
||||
SAS_DPRINTK("Bus reset of %s failed 0x%x\n",
|
||||
kobject_name(&phy->dev.kobj),
|
||||
res);
|
||||
sas_put_local_phy(phy);
|
||||
if (current != host->ehandler)
|
||||
return sas_queue_reset(dev, SAS_DEV_RESET, 0, 0);
|
||||
|
||||
if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
|
||||
if (!i->dft->lldd_I_T_nexus_reset)
|
||||
return FAILED;
|
||||
|
||||
res = i->dft->lldd_I_T_nexus_reset(dev);
|
||||
if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE ||
|
||||
res == -ENODEV)
|
||||
return SUCCESS;
|
||||
|
||||
return FAILED;
|
||||
|
@ -667,16 +763,53 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
|
|||
goto out;
|
||||
}
|
||||
|
||||
static void sas_eh_handle_resets(struct Scsi_Host *shost)
|
||||
{
|
||||
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
|
||||
struct sas_internal *i = to_sas_internal(shost->transportt);
|
||||
|
||||
/* handle directed resets to sas devices */
|
||||
spin_lock_irq(&ha->lock);
|
||||
while (!list_empty(&ha->eh_dev_q)) {
|
||||
struct domain_device *dev;
|
||||
struct ssp_device *ssp;
|
||||
|
||||
ssp = list_entry(ha->eh_dev_q.next, typeof(*ssp), eh_list_node);
|
||||
list_del_init(&ssp->eh_list_node);
|
||||
dev = container_of(ssp, typeof(*dev), ssp_dev);
|
||||
kref_get(&dev->kref);
|
||||
WARN_ONCE(dev_is_sata(dev), "ssp reset to ata device?\n");
|
||||
|
||||
spin_unlock_irq(&ha->lock);
|
||||
|
||||
if (test_and_clear_bit(SAS_DEV_LU_RESET, &dev->state))
|
||||
i->dft->lldd_lu_reset(dev, ssp->reset_lun.scsi_lun);
|
||||
|
||||
if (test_and_clear_bit(SAS_DEV_RESET, &dev->state))
|
||||
i->dft->lldd_I_T_nexus_reset(dev);
|
||||
|
||||
sas_put_device(dev);
|
||||
spin_lock_irq(&ha->lock);
|
||||
clear_bit(SAS_DEV_EH_PENDING, &dev->state);
|
||||
ha->eh_active--;
|
||||
}
|
||||
spin_unlock_irq(&ha->lock);
|
||||
}
|
||||
|
||||
|
||||
void sas_scsi_recover_host(struct Scsi_Host *shost)
|
||||
{
|
||||
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
|
||||
unsigned long flags;
|
||||
LIST_HEAD(eh_work_q);
|
||||
int tries = 0;
|
||||
bool retry;
|
||||
|
||||
spin_lock_irqsave(shost->host_lock, flags);
|
||||
retry:
|
||||
tries++;
|
||||
retry = true;
|
||||
spin_lock_irq(shost->host_lock);
|
||||
list_splice_init(&shost->eh_cmd_q, &eh_work_q);
|
||||
shost->host_eh_scheduled = 0;
|
||||
spin_unlock_irqrestore(shost->host_lock, flags);
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
|
||||
SAS_DPRINTK("Enter %s busy: %d failed: %d\n",
|
||||
__func__, shost->host_busy, shost->host_failed);
|
||||
|
@ -705,13 +838,26 @@ out:
|
|||
if (ha->lldd_max_execute_num > 1)
|
||||
wake_up_process(ha->core.queue_thread);
|
||||
|
||||
sas_eh_handle_resets(shost);
|
||||
|
||||
/* now link into libata eh --- if we have any ata devices */
|
||||
sas_ata_strategy_handler(shost);
|
||||
|
||||
scsi_eh_flush_done_q(&ha->eh_done_q);
|
||||
|
||||
SAS_DPRINTK("--- Exit %s: busy: %d failed: %d\n",
|
||||
__func__, shost->host_busy, shost->host_failed);
|
||||
/* check if any new eh work was scheduled during the last run */
|
||||
spin_lock_irq(&ha->lock);
|
||||
if (ha->eh_active == 0) {
|
||||
shost->host_eh_scheduled = 0;
|
||||
retry = false;
|
||||
}
|
||||
spin_unlock_irq(&ha->lock);
|
||||
|
||||
if (retry)
|
||||
goto retry;
|
||||
|
||||
SAS_DPRINTK("--- Exit %s: busy: %d failed: %d tries: %d\n",
|
||||
__func__, shost->host_busy, shost->host_failed, tries);
|
||||
}
|
||||
|
||||
enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
|
||||
|
@ -988,9 +1134,13 @@ void sas_task_abort(struct sas_task *task)
|
|||
|
||||
/* Escape for libsas internal commands */
|
||||
if (!sc) {
|
||||
if (!del_timer(&task->timer))
|
||||
struct sas_task_slow *slow = task->slow_task;
|
||||
|
||||
if (!slow)
|
||||
return;
|
||||
task->timer.function(task->timer.data);
|
||||
if (!del_timer(&slow->timer))
|
||||
return;
|
||||
slow->timer.function(slow->timer.data);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1003,7 +1153,6 @@ void sas_task_abort(struct sas_task *task)
|
|||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
blk_abort_request(sc->request);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
scsi_schedule_eh(sc->device->host);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,9 @@
|
|||
ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage
|
||||
ccflags-$(GCOV) += -O0
|
||||
|
||||
ifdef WARNINGS_BECOME_ERRORS
|
||||
ccflags-y += -Werror
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_SCSI_LPFC) := lpfc.o
|
||||
|
||||
|
|
|
@ -96,6 +96,10 @@ struct lpfc_sli2_slim;
|
|||
/* queue dump line buffer size */
|
||||
#define LPFC_LBUF_SZ 128
|
||||
|
||||
/* mailbox system shutdown options */
|
||||
#define LPFC_MBX_NO_WAIT 0
|
||||
#define LPFC_MBX_WAIT 1
|
||||
|
||||
enum lpfc_polling_flags {
|
||||
ENABLE_FCP_RING_POLLING = 0x1,
|
||||
DISABLE_FCP_RING_INT = 0x2
|
||||
|
|
|
@ -3617,6 +3617,91 @@ lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val)
|
|||
static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
|
||||
lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
|
||||
|
||||
/**
|
||||
* lpfc_fcp_imax_store
|
||||
*
|
||||
* @dev: class device that is converted into a Scsi_host.
|
||||
* @attr: device attribute, not used.
|
||||
* @buf: string with the number of fast-path FCP interrupts per second.
|
||||
* @count: unused variable.
|
||||
*
|
||||
* Description:
|
||||
* If val is in a valid range [636,651042], then set the adapter's
|
||||
* maximum number of fast-path FCP interrupts per second.
|
||||
*
|
||||
* Returns:
|
||||
* length of the buf on success if val is in range the intended mode
|
||||
* is supported.
|
||||
* -EINVAL if val out of range or intended mode is not supported.
|
||||
**/
|
||||
static ssize_t
|
||||
lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct Scsi_Host *shost = class_to_shost(dev);
|
||||
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
int val = 0, i;
|
||||
|
||||
/* Sanity check on user data */
|
||||
if (!isdigit(buf[0]))
|
||||
return -EINVAL;
|
||||
if (sscanf(buf, "%i", &val) != 1)
|
||||
return -EINVAL;
|
||||
|
||||
/* Value range is [636,651042] */
|
||||
if (val < LPFC_MIM_IMAX || val > LPFC_DMULT_CONST)
|
||||
return -EINVAL;
|
||||
|
||||
phba->cfg_fcp_imax = (uint32_t)val;
|
||||
for (i = 0; i < phba->cfg_fcp_eq_count; i += LPFC_MAX_EQ_DELAY)
|
||||
lpfc_modify_fcp_eq_delay(phba, i);
|
||||
|
||||
return strlen(buf);
|
||||
}
|
||||
|
||||
/*
|
||||
# lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second
|
||||
#
|
||||
# Value range is [636,651042]. Default value is 10000.
|
||||
*/
|
||||
static int lpfc_fcp_imax = LPFC_FP_DEF_IMAX;
|
||||
module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(lpfc_fcp_imax,
|
||||
"Set the maximum number of fast-path FCP interrupts per second");
|
||||
lpfc_param_show(fcp_imax)
|
||||
|
||||
/**
|
||||
* lpfc_fcp_imax_init - Set the initial sr-iov virtual function enable
|
||||
* @phba: lpfc_hba pointer.
|
||||
* @val: link speed value.
|
||||
*
|
||||
* Description:
|
||||
* If val is in a valid range [636,651042], then initialize the adapter's
|
||||
* maximum number of fast-path FCP interrupts per second.
|
||||
*
|
||||
* Returns:
|
||||
* zero if val saved.
|
||||
* -EINVAL val out of range
|
||||
**/
|
||||
static int
|
||||
lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
|
||||
{
|
||||
if (val >= LPFC_MIM_IMAX && val <= LPFC_DMULT_CONST) {
|
||||
phba->cfg_fcp_imax = val;
|
||||
return 0;
|
||||
}
|
||||
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3016 fcp_imax: %d out of range, using default\n", val);
|
||||
phba->cfg_fcp_imax = LPFC_FP_DEF_IMAX;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR,
|
||||
lpfc_fcp_imax_show, lpfc_fcp_imax_store);
|
||||
|
||||
/*
|
||||
# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
|
||||
# Value range is [2,3]. Default value is 3.
|
||||
|
@ -3757,14 +3842,6 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
|
|||
LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
|
||||
"MSI-X (2), if possible");
|
||||
|
||||
/*
|
||||
# lpfc_fcp_imax: Set the maximum number of fast-path FCP interrupts per second
|
||||
#
|
||||
# Value range is [636,651042]. Default value is 10000.
|
||||
*/
|
||||
LPFC_ATTR_R(fcp_imax, LPFC_FP_DEF_IMAX, LPFC_MIM_IMAX, LPFC_DMULT_CONST,
|
||||
"Set the maximum number of fast-path FCP interrupts per second");
|
||||
|
||||
/*
|
||||
# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
|
||||
#
|
||||
|
|
|
@ -183,7 +183,7 @@ int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int);
|
|||
void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
|
||||
int lpfc_online(struct lpfc_hba *);
|
||||
void lpfc_unblock_mgmt_io(struct lpfc_hba *);
|
||||
void lpfc_offline_prep(struct lpfc_hba *);
|
||||
void lpfc_offline_prep(struct lpfc_hba *, int);
|
||||
void lpfc_offline(struct lpfc_hba *);
|
||||
void lpfc_reset_hba(struct lpfc_hba *);
|
||||
|
||||
|
@ -273,7 +273,7 @@ int lpfc_sli_host_down(struct lpfc_vport *);
|
|||
int lpfc_sli_hba_down(struct lpfc_hba *);
|
||||
int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
|
||||
int lpfc_sli_handle_mb_event(struct lpfc_hba *);
|
||||
void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *);
|
||||
void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *, int);
|
||||
int lpfc_sli_check_eratt(struct lpfc_hba *);
|
||||
void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
|
||||
struct lpfc_sli_ring *, uint32_t);
|
||||
|
|
|
@ -395,8 +395,13 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
|
|||
for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++)
|
||||
if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
|
||||
break;
|
||||
if (fcp_cqidx >= phba->cfg_fcp_eq_count)
|
||||
return;
|
||||
if (phba->intr_type == MSIX) {
|
||||
if (fcp_cqidx >= phba->cfg_fcp_eq_count)
|
||||
return;
|
||||
} else {
|
||||
if (fcp_cqidx > 0)
|
||||
return;
|
||||
}
|
||||
|
||||
printk(KERN_ERR "FCP CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]:\n",
|
||||
fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
|
||||
|
@ -426,8 +431,13 @@ lpfc_debug_dump_fcp_eq(struct lpfc_hba *phba, int fcp_wqidx)
|
|||
for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++)
|
||||
if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
|
||||
break;
|
||||
if (fcp_cqidx >= phba->cfg_fcp_eq_count)
|
||||
return;
|
||||
if (phba->intr_type == MSIX) {
|
||||
if (fcp_cqidx >= phba->cfg_fcp_eq_count)
|
||||
return;
|
||||
} else {
|
||||
if (fcp_cqidx > 0)
|
||||
return;
|
||||
}
|
||||
|
||||
if (phba->cfg_fcp_eq_count == 0) {
|
||||
fcp_eqidx = -1;
|
||||
|
|
|
@ -530,7 +530,7 @@ lpfc_work_list_done(struct lpfc_hba *phba)
|
|||
break;
|
||||
case LPFC_EVT_OFFLINE_PREP:
|
||||
if (phba->link_state >= LPFC_LINK_DOWN)
|
||||
lpfc_offline_prep(phba);
|
||||
lpfc_offline_prep(phba, LPFC_MBX_WAIT);
|
||||
*(int *)(evtp->evt_arg1) = 0;
|
||||
complete((struct completion *)(evtp->evt_arg2));
|
||||
break;
|
||||
|
|
|
@ -874,6 +874,7 @@ struct mbox_header {
|
|||
#define LPFC_MBOX_OPCODE_MQ_CREATE 0x15
|
||||
#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20
|
||||
#define LPFC_MBOX_OPCODE_NOP 0x21
|
||||
#define LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY 0x29
|
||||
#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35
|
||||
#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36
|
||||
#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
|
||||
|
@ -940,6 +941,13 @@ struct eq_context {
|
|||
uint32_t reserved3;
|
||||
};
|
||||
|
||||
struct eq_delay_info {
|
||||
uint32_t eq_id;
|
||||
uint32_t phase;
|
||||
uint32_t delay_multi;
|
||||
};
|
||||
#define LPFC_MAX_EQ_DELAY 8
|
||||
|
||||
struct sgl_page_pairs {
|
||||
uint32_t sgl_pg0_addr_lo;
|
||||
uint32_t sgl_pg0_addr_hi;
|
||||
|
@ -1002,6 +1010,19 @@ struct lpfc_mbx_eq_create {
|
|||
} u;
|
||||
};
|
||||
|
||||
struct lpfc_mbx_modify_eq_delay {
|
||||
struct mbox_header header;
|
||||
union {
|
||||
struct {
|
||||
uint32_t num_eq;
|
||||
struct eq_delay_info eq[LPFC_MAX_EQ_DELAY];
|
||||
} request;
|
||||
struct {
|
||||
uint32_t word0;
|
||||
} response;
|
||||
} u;
|
||||
};
|
||||
|
||||
struct lpfc_mbx_eq_destroy {
|
||||
struct mbox_header header;
|
||||
union {
|
||||
|
@ -2875,6 +2896,7 @@ struct lpfc_mqe {
|
|||
struct lpfc_mbx_mq_create mq_create;
|
||||
struct lpfc_mbx_mq_create_ext mq_create_ext;
|
||||
struct lpfc_mbx_eq_create eq_create;
|
||||
struct lpfc_mbx_modify_eq_delay eq_delay;
|
||||
struct lpfc_mbx_cq_create cq_create;
|
||||
struct lpfc_mbx_wq_create wq_create;
|
||||
struct lpfc_mbx_rq_create rq_create;
|
||||
|
@ -3084,6 +3106,28 @@ struct lpfc_acqe_fc_la {
|
|||
#define LPFC_FC_LA_EVENT_TYPE_SHARED_LINK 0x2
|
||||
};
|
||||
|
||||
struct lpfc_acqe_misconfigured_event {
|
||||
struct {
|
||||
uint32_t word0;
|
||||
#define lpfc_sli_misconfigured_port0_SHIFT 0
|
||||
#define lpfc_sli_misconfigured_port0_MASK 0x000000FF
|
||||
#define lpfc_sli_misconfigured_port0_WORD word0
|
||||
#define lpfc_sli_misconfigured_port1_SHIFT 8
|
||||
#define lpfc_sli_misconfigured_port1_MASK 0x000000FF
|
||||
#define lpfc_sli_misconfigured_port1_WORD word0
|
||||
#define lpfc_sli_misconfigured_port2_SHIFT 16
|
||||
#define lpfc_sli_misconfigured_port2_MASK 0x000000FF
|
||||
#define lpfc_sli_misconfigured_port2_WORD word0
|
||||
#define lpfc_sli_misconfigured_port3_SHIFT 24
|
||||
#define lpfc_sli_misconfigured_port3_MASK 0x000000FF
|
||||
#define lpfc_sli_misconfigured_port3_WORD word0
|
||||
} theEvent;
|
||||
#define LPFC_SLI_EVENT_STATUS_VALID 0x00
|
||||
#define LPFC_SLI_EVENT_STATUS_NOT_PRESENT 0x01
|
||||
#define LPFC_SLI_EVENT_STATUS_WRONG_TYPE 0x02
|
||||
#define LPFC_SLI_EVENT_STATUS_UNSUPPORTED 0x03
|
||||
};
|
||||
|
||||
struct lpfc_acqe_sli {
|
||||
uint32_t event_data1;
|
||||
uint32_t event_data2;
|
||||
|
@ -3094,6 +3138,7 @@ struct lpfc_acqe_sli {
|
|||
#define LPFC_SLI_EVENT_TYPE_NORM_TEMP 0x3
|
||||
#define LPFC_SLI_EVENT_TYPE_NVLOG_POST 0x4
|
||||
#define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5
|
||||
#define LPFC_SLI_EVENT_TYPE_MISCONFIGURED 0x9
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -73,6 +73,8 @@ static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
|
|||
static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
|
||||
static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
|
||||
static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
|
||||
static void lpfc_sli4_disable_intr(struct lpfc_hba *);
|
||||
static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
|
||||
|
||||
static struct scsi_transport_template *lpfc_transport_template = NULL;
|
||||
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
|
||||
|
@ -1169,7 +1171,7 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
|
|||
spin_lock_irq(&phba->hbalock);
|
||||
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
lpfc_offline_prep(phba);
|
||||
lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
|
||||
|
||||
lpfc_offline(phba);
|
||||
lpfc_reset_barrier(phba);
|
||||
|
@ -1193,7 +1195,7 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
|
|||
static void
|
||||
lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
|
||||
{
|
||||
lpfc_offline_prep(phba);
|
||||
lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
|
||||
lpfc_offline(phba);
|
||||
lpfc_sli4_brdreset(phba);
|
||||
lpfc_hba_down_post(phba);
|
||||
|
@ -1251,7 +1253,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
|
|||
* There was a firmware error. Take the hba offline and then
|
||||
* attempt to restart it.
|
||||
*/
|
||||
lpfc_offline_prep(phba);
|
||||
lpfc_offline_prep(phba, LPFC_MBX_WAIT);
|
||||
lpfc_offline(phba);
|
||||
|
||||
/* Wait for the ER1 bit to clear.*/
|
||||
|
@ -1372,7 +1374,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
|
|||
* There was a firmware error. Take the hba offline and then
|
||||
* attempt to restart it.
|
||||
*/
|
||||
lpfc_offline_prep(phba);
|
||||
lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
|
||||
lpfc_offline(phba);
|
||||
lpfc_sli_brdrestart(phba);
|
||||
if (lpfc_online(phba) == 0) { /* Initialize the HBA */
|
||||
|
@ -1427,6 +1429,54 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
|
|||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @mbx_action: flag for mailbox shutdown action.
|
||||
*
|
||||
* This routine is invoked to perform an SLI4 port PCI function reset in
|
||||
* response to port status register polling attention. It waits for port
|
||||
* status register (ERR, RDY, RN) bits before proceeding with function reset.
|
||||
* During this process, interrupt vectors are freed and later requested
|
||||
* for handling possible port resource change.
|
||||
**/
|
||||
static int
|
||||
lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action)
|
||||
{
|
||||
int rc;
|
||||
uint32_t intr_mode;
|
||||
|
||||
/*
|
||||
* On error status condition, driver need to wait for port
|
||||
* ready before performing reset.
|
||||
*/
|
||||
rc = lpfc_sli4_pdev_status_reg_wait(phba);
|
||||
if (!rc) {
|
||||
/* need reset: attempt for port recovery */
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"2887 Reset Needed: Attempting Port "
|
||||
"Recovery...\n");
|
||||
lpfc_offline_prep(phba, mbx_action);
|
||||
lpfc_offline(phba);
|
||||
/* release interrupt for possible resource change */
|
||||
lpfc_sli4_disable_intr(phba);
|
||||
lpfc_sli_brdrestart(phba);
|
||||
/* request and enable interrupt */
|
||||
intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
|
||||
if (intr_mode == LPFC_INTR_ERROR) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3175 Failed to enable interrupt\n");
|
||||
return -EIO;
|
||||
} else {
|
||||
phba->intr_mode = intr_mode;
|
||||
}
|
||||
rc = lpfc_online(phba);
|
||||
if (rc == 0)
|
||||
lpfc_unblock_mgmt_io(phba);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
|
@ -1506,30 +1556,18 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
|
|||
reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3145 Port Down: Provisioning\n");
|
||||
/*
|
||||
* On error status condition, driver need to wait for port
|
||||
* ready before performing reset.
|
||||
*/
|
||||
rc = lpfc_sli4_pdev_status_reg_wait(phba);
|
||||
if (!rc) {
|
||||
/* need reset: attempt for port recovery */
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"2887 Reset Needed: Attempting Port "
|
||||
"Recovery...\n");
|
||||
lpfc_offline_prep(phba);
|
||||
lpfc_offline(phba);
|
||||
lpfc_sli_brdrestart(phba);
|
||||
if (lpfc_online(phba) == 0) {
|
||||
lpfc_unblock_mgmt_io(phba);
|
||||
/* don't report event on forced debug dump */
|
||||
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
|
||||
reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
|
||||
return;
|
||||
else
|
||||
break;
|
||||
}
|
||||
/* fall through for not able to recover */
|
||||
|
||||
/* Check port status register for function reset */
|
||||
rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT);
|
||||
if (rc == 0) {
|
||||
/* don't report event on forced debug dump */
|
||||
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
|
||||
reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
|
||||
return;
|
||||
else
|
||||
break;
|
||||
}
|
||||
/* fall through for not able to recover */
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3152 Unrecoverable error, bring the port "
|
||||
"offline\n");
|
||||
|
@ -2494,15 +2532,19 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
|
|||
* driver prepares the HBA interface for online or offline.
|
||||
**/
|
||||
static void
|
||||
lpfc_block_mgmt_io(struct lpfc_hba * phba)
|
||||
lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
|
||||
{
|
||||
unsigned long iflag;
|
||||
uint8_t actcmd = MBX_HEARTBEAT;
|
||||
unsigned long timeout;
|
||||
|
||||
timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
|
||||
spin_lock_irqsave(&phba->hbalock, iflag);
|
||||
phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||
if (mbx_action == LPFC_MBX_NO_WAIT)
|
||||
return;
|
||||
timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
|
||||
spin_lock_irqsave(&phba->hbalock, iflag);
|
||||
if (phba->sli.mbox_active) {
|
||||
actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
|
||||
/* Determine how long we might wait for the active mailbox
|
||||
|
@ -2592,7 +2634,7 @@ lpfc_online(struct lpfc_hba *phba)
|
|||
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
|
||||
"0458 Bring Adapter online\n");
|
||||
|
||||
lpfc_block_mgmt_io(phba);
|
||||
lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
|
||||
|
||||
if (!lpfc_sli_queue_setup(phba)) {
|
||||
lpfc_unblock_mgmt_io(phba);
|
||||
|
@ -2660,7 +2702,7 @@ lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
|
|||
* queue to make it ready to be brought offline.
|
||||
**/
|
||||
void
|
||||
lpfc_offline_prep(struct lpfc_hba * phba)
|
||||
lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
|
||||
{
|
||||
struct lpfc_vport *vport = phba->pport;
|
||||
struct lpfc_nodelist *ndlp, *next_ndlp;
|
||||
|
@ -2671,7 +2713,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
|
|||
if (vport->fc_flag & FC_OFFLINE_MODE)
|
||||
return;
|
||||
|
||||
lpfc_block_mgmt_io(phba);
|
||||
lpfc_block_mgmt_io(phba, mbx_action);
|
||||
|
||||
lpfc_linkdown(phba);
|
||||
|
||||
|
@ -2718,7 +2760,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
|
|||
}
|
||||
lpfc_destroy_vport_work_array(phba, vports);
|
||||
|
||||
lpfc_sli_mbox_sys_shutdown(phba);
|
||||
lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3684,12 +3726,76 @@ out_free_pmb:
|
|||
static void
|
||||
lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
|
||||
{
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
||||
"2901 Async SLI event - Event Data1:x%08x Event Data2:"
|
||||
"x%08x SLI Event Type:%d",
|
||||
acqe_sli->event_data1, acqe_sli->event_data2,
|
||||
bf_get(lpfc_trailer_type, acqe_sli));
|
||||
return;
|
||||
char port_name;
|
||||
char message[80];
|
||||
uint8_t status;
|
||||
struct lpfc_acqe_misconfigured_event *misconfigured;
|
||||
|
||||
/* special case misconfigured event as it contains data for all ports */
|
||||
if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
|
||||
LPFC_SLI_INTF_IF_TYPE_2) ||
|
||||
(bf_get(lpfc_trailer_type, acqe_sli) !=
|
||||
LPFC_SLI_EVENT_TYPE_MISCONFIGURED)) {
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
||||
"2901 Async SLI event - Event Data1:x%08x Event Data2:"
|
||||
"x%08x SLI Event Type:%d\n",
|
||||
acqe_sli->event_data1, acqe_sli->event_data2,
|
||||
bf_get(lpfc_trailer_type, acqe_sli));
|
||||
return;
|
||||
}
|
||||
|
||||
port_name = phba->Port[0];
|
||||
if (port_name == 0x00)
|
||||
port_name = '?'; /* get port name is empty */
|
||||
|
||||
misconfigured = (struct lpfc_acqe_misconfigured_event *)
|
||||
&acqe_sli->event_data1;
|
||||
|
||||
/* fetch the status for this port */
|
||||
switch (phba->sli4_hba.lnk_info.lnk_no) {
|
||||
case LPFC_LINK_NUMBER_0:
|
||||
status = bf_get(lpfc_sli_misconfigured_port0,
|
||||
&misconfigured->theEvent);
|
||||
break;
|
||||
case LPFC_LINK_NUMBER_1:
|
||||
status = bf_get(lpfc_sli_misconfigured_port1,
|
||||
&misconfigured->theEvent);
|
||||
break;
|
||||
case LPFC_LINK_NUMBER_2:
|
||||
status = bf_get(lpfc_sli_misconfigured_port2,
|
||||
&misconfigured->theEvent);
|
||||
break;
|
||||
case LPFC_LINK_NUMBER_3:
|
||||
status = bf_get(lpfc_sli_misconfigured_port3,
|
||||
&misconfigured->theEvent);
|
||||
break;
|
||||
default:
|
||||
status = ~LPFC_SLI_EVENT_STATUS_VALID;
|
||||
break;
|
||||
}
|
||||
|
||||
switch (status) {
|
||||
case LPFC_SLI_EVENT_STATUS_VALID:
|
||||
return; /* no message if the sfp is okay */
|
||||
case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
|
||||
sprintf(message, "Not installed");
|
||||
break;
|
||||
case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
|
||||
sprintf(message,
|
||||
"Optics of two types installed");
|
||||
break;
|
||||
case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
|
||||
sprintf(message, "Incompatible optics");
|
||||
break;
|
||||
default:
|
||||
/* firmware is reporting a status we don't know about */
|
||||
sprintf(message, "Unknown event status x%02x", status);
|
||||
break;
|
||||
}
|
||||
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"3176 Misconfigured Physical Port - "
|
||||
"Port Name %c %s\n", port_name, message);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -4312,7 +4418,7 @@ lpfc_reset_hba(struct lpfc_hba *phba)
|
|||
phba->link_state = LPFC_HBA_ERROR;
|
||||
return;
|
||||
}
|
||||
lpfc_offline_prep(phba);
|
||||
lpfc_offline_prep(phba, LPFC_MBX_WAIT);
|
||||
lpfc_offline(phba);
|
||||
lpfc_sli_brdrestart(phba);
|
||||
lpfc_online(phba);
|
||||
|
@ -5514,14 +5620,45 @@ lpfc_destroy_shost(struct lpfc_hba *phba)
|
|||
static void
|
||||
lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
|
||||
{
|
||||
uint32_t old_mask;
|
||||
uint32_t old_guard;
|
||||
|
||||
int pagecnt = 10;
|
||||
if (lpfc_prot_mask && lpfc_prot_guard) {
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||||
"1478 Registering BlockGuard with the "
|
||||
"SCSI layer\n");
|
||||
scsi_host_set_prot(shost, lpfc_prot_mask);
|
||||
scsi_host_set_guard(shost, lpfc_prot_guard);
|
||||
|
||||
old_mask = lpfc_prot_mask;
|
||||
old_guard = lpfc_prot_guard;
|
||||
|
||||
/* Only allow supported values */
|
||||
lpfc_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
|
||||
SHOST_DIX_TYPE0_PROTECTION |
|
||||
SHOST_DIX_TYPE1_PROTECTION);
|
||||
lpfc_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC);
|
||||
|
||||
/* DIF Type 1 protection for profiles AST1/C1 is end to end */
|
||||
if (lpfc_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
|
||||
lpfc_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
|
||||
|
||||
if (lpfc_prot_mask && lpfc_prot_guard) {
|
||||
if ((old_mask != lpfc_prot_mask) ||
|
||||
(old_guard != lpfc_prot_guard))
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"1475 Registering BlockGuard with the "
|
||||
"SCSI layer: mask %d guard %d\n",
|
||||
lpfc_prot_mask, lpfc_prot_guard);
|
||||
|
||||
scsi_host_set_prot(shost, lpfc_prot_mask);
|
||||
scsi_host_set_guard(shost, lpfc_prot_guard);
|
||||
} else
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"1479 Not Registering BlockGuard with the SCSI "
|
||||
"layer, Bad protection parameters: %d %d\n",
|
||||
old_mask, old_guard);
|
||||
}
|
||||
|
||||
if (!_dump_buf_data) {
|
||||
while (pagecnt) {
|
||||
spin_lock_init(&_dump_buf_lock);
|
||||
|
@ -8859,7 +8996,7 @@ lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
|
|||
"0473 PCI device Power Management suspend.\n");
|
||||
|
||||
/* Bring down the device */
|
||||
lpfc_offline_prep(phba);
|
||||
lpfc_offline_prep(phba, LPFC_MBX_WAIT);
|
||||
lpfc_offline(phba);
|
||||
kthread_stop(phba->worker_thread);
|
||||
|
||||
|
@ -8985,7 +9122,7 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
|
|||
"2710 PCI channel disable preparing for reset\n");
|
||||
|
||||
/* Block any management I/Os to the device */
|
||||
lpfc_block_mgmt_io(phba);
|
||||
lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
|
||||
|
||||
/* Block all SCSI devices' I/Os on the host */
|
||||
lpfc_scsi_dev_block(phba);
|
||||
|
@ -9129,7 +9266,7 @@ lpfc_io_slot_reset_s3(struct pci_dev *pdev)
|
|||
phba->intr_mode = intr_mode;
|
||||
|
||||
/* Take device offline, it will perform cleanup */
|
||||
lpfc_offline_prep(phba);
|
||||
lpfc_offline_prep(phba, LPFC_MBX_WAIT);
|
||||
lpfc_offline(phba);
|
||||
lpfc_sli_brdrestart(phba);
|
||||
|
||||
|
@ -9603,7 +9740,7 @@ lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
|
|||
"2843 PCI device Power Management suspend.\n");
|
||||
|
||||
/* Bring down the device */
|
||||
lpfc_offline_prep(phba);
|
||||
lpfc_offline_prep(phba, LPFC_MBX_WAIT);
|
||||
lpfc_offline(phba);
|
||||
kthread_stop(phba->worker_thread);
|
||||
|
||||
|
@ -9729,7 +9866,7 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
|
|||
"2826 PCI channel disable preparing for reset\n");
|
||||
|
||||
/* Block any management I/Os to the device */
|
||||
lpfc_block_mgmt_io(phba);
|
||||
lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
|
||||
|
||||
/* Block all SCSI devices' I/Os on the host */
|
||||
lpfc_scsi_dev_block(phba);
|
||||
|
@ -9902,7 +10039,7 @@ lpfc_io_resume_s4(struct pci_dev *pdev)
|
|||
*/
|
||||
if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
|
||||
/* Perform device reset */
|
||||
lpfc_offline_prep(phba);
|
||||
lpfc_offline_prep(phba, LPFC_MBX_WAIT);
|
||||
lpfc_offline(phba);
|
||||
lpfc_sli_brdrestart(phba);
|
||||
/* Bring the device back online */
|
||||
|
|
|
@ -4275,10 +4275,8 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
|
|||
* Catch race where our node has transitioned, but the
|
||||
* transport is still transitioning.
|
||||
*/
|
||||
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
|
||||
cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
|
||||
goto out_fail_command;
|
||||
}
|
||||
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
|
||||
goto out_tgt_busy;
|
||||
if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
|
||||
goto out_tgt_busy;
|
||||
|
||||
|
@ -4412,12 +4410,12 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
|
|||
struct lpfc_iocbq *abtsiocb;
|
||||
struct lpfc_scsi_buf *lpfc_cmd;
|
||||
IOCB_t *cmd, *icmd;
|
||||
int ret = SUCCESS;
|
||||
int ret = SUCCESS, status = 0;
|
||||
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
|
||||
|
||||
ret = fc_block_scsi_eh(cmnd);
|
||||
if (ret)
|
||||
return ret;
|
||||
status = fc_block_scsi_eh(cmnd);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
/* driver queued commands are in process of being flushed */
|
||||
|
@ -4435,7 +4433,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
|
|||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
|
||||
"2873 SCSI Layer I/O Abort Request IO CMPL Status "
|
||||
"x%x ID %d LUN %d\n",
|
||||
ret, cmnd->device->id, cmnd->device->lun);
|
||||
SUCCESS, cmnd->device->id, cmnd->device->lun);
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -4762,7 +4760,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
|
|||
unsigned tgt_id = cmnd->device->id;
|
||||
unsigned int lun_id = cmnd->device->lun;
|
||||
struct lpfc_scsi_event_header scsi_event;
|
||||
int status;
|
||||
int status, ret = SUCCESS;
|
||||
|
||||
if (!rdata) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
|
||||
|
@ -4803,9 +4801,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
|
|||
* So, continue on.
|
||||
* We will report success if all the i/o aborts successfully.
|
||||
*/
|
||||
status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
|
||||
ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
|
||||
LPFC_CTX_LUN);
|
||||
return status;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -4829,7 +4827,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
|
|||
unsigned tgt_id = cmnd->device->id;
|
||||
unsigned int lun_id = cmnd->device->lun;
|
||||
struct lpfc_scsi_event_header scsi_event;
|
||||
int status;
|
||||
int status, ret = SUCCESS;
|
||||
|
||||
if (!rdata) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
|
||||
|
@ -4870,9 +4868,9 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
|
|||
* So, continue on.
|
||||
* We will report success if all the i/o aborts successfully.
|
||||
*/
|
||||
status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
|
||||
LPFC_CTX_TGT);
|
||||
return status;
|
||||
ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
|
||||
LPFC_CTX_TGT);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -4982,7 +4980,7 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
|
|||
struct lpfc_hba *phba = vport->phba;
|
||||
int rc, ret = SUCCESS;
|
||||
|
||||
lpfc_offline_prep(phba);
|
||||
lpfc_offline_prep(phba, LPFC_MBX_WAIT);
|
||||
lpfc_offline(phba);
|
||||
rc = lpfc_sli_brdrestart(phba);
|
||||
if (rc)
|
||||
|
|
|
@ -8984,7 +8984,7 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
|
|||
int i;
|
||||
|
||||
/* Shutdown the mailbox command sub-system */
|
||||
lpfc_sli_mbox_sys_shutdown(phba);
|
||||
lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
|
||||
|
||||
lpfc_hba_down_prep(phba);
|
||||
|
||||
|
@ -9996,11 +9996,17 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
|
|||
* sub-system flush routine to gracefully bring down mailbox sub-system.
|
||||
**/
|
||||
void
|
||||
lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
|
||||
lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
|
||||
{
|
||||
struct lpfc_sli *psli = &phba->sli;
|
||||
unsigned long timeout;
|
||||
|
||||
if (mbx_action == LPFC_MBX_NO_WAIT) {
|
||||
/* delay 100ms for port state */
|
||||
msleep(100);
|
||||
lpfc_sli_mbox_sys_flush(phba);
|
||||
return;
|
||||
}
|
||||
timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
|
||||
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
|
@ -12041,6 +12047,83 @@ out_fail:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs
|
||||
* @phba: HBA structure that indicates port to create a queue on.
|
||||
* @startq: The starting FCP EQ to modify
|
||||
*
|
||||
* This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
|
||||
*
|
||||
* The @phba struct is used to send mailbox command to HBA. The @startq
|
||||
* is used to get the starting FCP EQ to change.
|
||||
* This function is asynchronous and will wait for the mailbox
|
||||
* command to finish before continuing.
|
||||
*
|
||||
* On success this function will return a zero. If unable to allocate enough
|
||||
* memory this function will return -ENOMEM. If the queue create mailbox command
|
||||
* fails this function will return -ENXIO.
|
||||
**/
|
||||
uint32_t
|
||||
lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
|
||||
{
|
||||
struct lpfc_mbx_modify_eq_delay *eq_delay;
|
||||
LPFC_MBOXQ_t *mbox;
|
||||
struct lpfc_queue *eq;
|
||||
int cnt, rc, length, status = 0;
|
||||
uint32_t shdr_status, shdr_add_status;
|
||||
int fcp_eqidx;
|
||||
union lpfc_sli4_cfg_shdr *shdr;
|
||||
uint16_t dmult;
|
||||
|
||||
if (startq >= phba->cfg_fcp_eq_count)
|
||||
return 0;
|
||||
|
||||
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (!mbox)
|
||||
return -ENOMEM;
|
||||
length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
|
||||
sizeof(struct lpfc_sli4_cfg_mhdr));
|
||||
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
|
||||
LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
|
||||
length, LPFC_SLI4_MBX_EMBED);
|
||||
eq_delay = &mbox->u.mqe.un.eq_delay;
|
||||
|
||||
/* Calculate delay multiper from maximum interrupt per second */
|
||||
dmult = LPFC_DMULT_CONST/phba->cfg_fcp_imax - 1;
|
||||
|
||||
cnt = 0;
|
||||
for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_eq_count;
|
||||
fcp_eqidx++) {
|
||||
eq = phba->sli4_hba.fp_eq[fcp_eqidx];
|
||||
if (!eq)
|
||||
continue;
|
||||
eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
|
||||
eq_delay->u.request.eq[cnt].phase = 0;
|
||||
eq_delay->u.request.eq[cnt].delay_multi = dmult;
|
||||
cnt++;
|
||||
if (cnt >= LPFC_MAX_EQ_DELAY)
|
||||
break;
|
||||
}
|
||||
eq_delay->u.request.num_eq = cnt;
|
||||
|
||||
mbox->vport = phba->pport;
|
||||
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
||||
mbox->context1 = NULL;
|
||||
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
|
||||
shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
|
||||
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
|
||||
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
|
||||
if (shdr_status || shdr_add_status || rc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"2512 MODIFY_EQ_DELAY mailbox failed with "
|
||||
"status x%x add_status x%x, mbx status x%x\n",
|
||||
shdr_status, shdr_add_status, rc);
|
||||
status = -ENXIO;
|
||||
}
|
||||
mempool_free(mbox, phba->mbox_mem_pool);
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_eq_create - Create an Event Queue on the HBA
|
||||
* @phba: HBA structure that indicates port to create a queue on.
|
||||
|
@ -12228,8 +12311,10 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
|
|||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"0361 Unsupported CQ count. (%d)\n",
|
||||
cq->entry_count);
|
||||
if (cq->entry_count < 256)
|
||||
return -EINVAL;
|
||||
if (cq->entry_count < 256) {
|
||||
status = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
/* otherwise default to smallest count (drop through) */
|
||||
case 256:
|
||||
bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
|
||||
|
@ -12420,8 +12505,10 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
|
|||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"0362 Unsupported MQ count. (%d)\n",
|
||||
mq->entry_count);
|
||||
if (mq->entry_count < 16)
|
||||
return -EINVAL;
|
||||
if (mq->entry_count < 16) {
|
||||
status = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
/* otherwise default to smallest count (drop through) */
|
||||
case 16:
|
||||
bf_set(lpfc_mq_context_ring_size,
|
||||
|
@ -12710,8 +12797,10 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
|
|||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"2535 Unsupported RQ count. (%d)\n",
|
||||
hrq->entry_count);
|
||||
if (hrq->entry_count < 512)
|
||||
return -EINVAL;
|
||||
if (hrq->entry_count < 512) {
|
||||
status = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
/* otherwise default to smallest count (drop through) */
|
||||
case 512:
|
||||
bf_set(lpfc_rq_context_rqe_count,
|
||||
|
@ -12791,8 +12880,10 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
|
|||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"2536 Unsupported RQ count. (%d)\n",
|
||||
drq->entry_count);
|
||||
if (drq->entry_count < 512)
|
||||
return -EINVAL;
|
||||
if (drq->entry_count < 512) {
|
||||
status = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
/* otherwise default to smallest count (drop through) */
|
||||
case 512:
|
||||
bf_set(lpfc_rq_context_rqe_count,
|
||||
|
@ -15855,24 +15946,18 @@ lpfc_drain_txq(struct lpfc_hba *phba)
|
|||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
|
||||
piocbq = lpfc_sli_ringtx_get(phba, pring);
|
||||
if (!piocbq) {
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"2823 txq empty and txq_cnt is %d\n ",
|
||||
pring->txq_cnt);
|
||||
break;
|
||||
}
|
||||
sglq = __lpfc_sli_get_sglq(phba, piocbq);
|
||||
if (!sglq) {
|
||||
__lpfc_sli_ringtx_put(phba, pring, piocbq);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
break;
|
||||
} else {
|
||||
if (!piocbq) {
|
||||
/* The txq_cnt out of sync. This should
|
||||
* never happen
|
||||
*/
|
||||
sglq = __lpfc_clear_active_sglq(phba,
|
||||
sglq->sli4_lxritag);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"2823 txq empty and txq_cnt is %d\n ",
|
||||
pring->txq_cnt);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* The xri and iocb resources secured,
|
||||
|
|
|
@ -598,6 +598,7 @@ struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
|
|||
uint32_t);
|
||||
void lpfc_sli4_queue_free(struct lpfc_queue *);
|
||||
uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t);
|
||||
uint32_t lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint16_t);
|
||||
uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
|
||||
struct lpfc_queue *, uint32_t, uint32_t);
|
||||
int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
* included with this package. *
|
||||
*******************************************************************/
|
||||
|
||||
#define LPFC_DRIVER_VERSION "8.3.31"
|
||||
#define LPFC_DRIVER_VERSION "8.3.32"
|
||||
#define LPFC_DRIVER_NAME "lpfc"
|
||||
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
|
||||
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
|
||||
|
|
|
@ -524,7 +524,7 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
|
|||
mega_passthru *pthru;
|
||||
scb_t *scb;
|
||||
mbox_t *mbox;
|
||||
long seg;
|
||||
u32 seg;
|
||||
char islogical;
|
||||
int max_ldrv_num;
|
||||
int channel = 0;
|
||||
|
@ -858,7 +858,7 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
|
|||
|
||||
/* Calculate Scatter-Gather info */
|
||||
mbox->m_out.numsgelements = mega_build_sglist(adapter, scb,
|
||||
(u32 *)&mbox->m_out.xferaddr, (u32 *)&seg);
|
||||
(u32 *)&mbox->m_out.xferaddr, &seg);
|
||||
|
||||
return scb;
|
||||
|
||||
|
|
|
@ -2731,7 +2731,7 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
|
|||
}
|
||||
|
||||
out:
|
||||
spin_unlock_irq(&adapter->lock);
|
||||
spin_unlock(&adapter->lock);
|
||||
return rval;
|
||||
}
|
||||
|
||||
|
|
|
@ -1158,6 +1158,7 @@ extern struct scsi_transport_template *mpt2sas_transport_template;
|
|||
extern int scsi_internal_device_block(struct scsi_device *sdev);
|
||||
extern u8 mpt2sas_stm_zero_smid_handler(struct MPT2SAS_ADAPTER *ioc,
|
||||
u8 msix_index, u32 reply);
|
||||
extern int scsi_internal_device_unblock(struct scsi_device *sdev);
|
||||
extern int scsi_internal_device_unblock(struct scsi_device *sdev,
|
||||
enum scsi_device_state new_state);
|
||||
|
||||
#endif /* MPT2SAS_BASE_H_INCLUDED */
|
||||
|
|
|
@ -2904,7 +2904,7 @@ _scsih_ublock_io_all_device(struct MPT2SAS_ADAPTER *ioc)
|
|||
dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, "device_running, "
|
||||
"handle(0x%04x)\n",
|
||||
sas_device_priv_data->sas_target->handle));
|
||||
scsi_internal_device_unblock(sdev);
|
||||
scsi_internal_device_unblock(sdev, SDEV_RUNNING);
|
||||
}
|
||||
}
|
||||
/**
|
||||
|
@ -2933,7 +2933,7 @@ _scsih_ublock_io_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
|
|||
"sas address(0x%016llx)\n", ioc->name,
|
||||
(unsigned long long)sas_address));
|
||||
sas_device_priv_data->block = 0;
|
||||
scsi_internal_device_unblock(sdev);
|
||||
scsi_internal_device_unblock(sdev, SDEV_RUNNING);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -885,7 +885,6 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
|
|||
struct completion *completion, int is_tmf,
|
||||
struct mvs_tmf_task *tmf)
|
||||
{
|
||||
struct domain_device *dev = task->dev;
|
||||
struct mvs_info *mvi = NULL;
|
||||
u32 rc = 0;
|
||||
u32 pass = 0;
|
||||
|
@ -1365,9 +1364,9 @@ void mvs_dev_gone(struct domain_device *dev)
|
|||
|
||||
static void mvs_task_done(struct sas_task *task)
|
||||
{
|
||||
if (!del_timer(&task->timer))
|
||||
if (!del_timer(&task->slow_task->timer))
|
||||
return;
|
||||
complete(&task->completion);
|
||||
complete(&task->slow_task->completion);
|
||||
}
|
||||
|
||||
static void mvs_tmf_timedout(unsigned long data)
|
||||
|
@ -1375,7 +1374,7 @@ static void mvs_tmf_timedout(unsigned long data)
|
|||
struct sas_task *task = (struct sas_task *)data;
|
||||
|
||||
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
|
||||
complete(&task->completion);
|
||||
complete(&task->slow_task->completion);
|
||||
}
|
||||
|
||||
#define MVS_TASK_TIMEOUT 20
|
||||
|
@ -1386,7 +1385,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
|
|||
struct sas_task *task = NULL;
|
||||
|
||||
for (retry = 0; retry < 3; retry++) {
|
||||
task = sas_alloc_task(GFP_KERNEL);
|
||||
task = sas_alloc_slow_task(GFP_KERNEL);
|
||||
if (!task)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1396,20 +1395,20 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
|
|||
memcpy(&task->ssp_task, parameter, para_len);
|
||||
task->task_done = mvs_task_done;
|
||||
|
||||
task->timer.data = (unsigned long) task;
|
||||
task->timer.function = mvs_tmf_timedout;
|
||||
task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
|
||||
add_timer(&task->timer);
|
||||
task->slow_task->timer.data = (unsigned long) task;
|
||||
task->slow_task->timer.function = mvs_tmf_timedout;
|
||||
task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
|
||||
add_timer(&task->slow_task->timer);
|
||||
|
||||
res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf);
|
||||
|
||||
if (res) {
|
||||
del_timer(&task->timer);
|
||||
del_timer(&task->slow_task->timer);
|
||||
mv_printk("executing internel task failed:%d\n", res);
|
||||
goto ex_err;
|
||||
}
|
||||
|
||||
wait_for_completion(&task->completion);
|
||||
wait_for_completion(&task->slow_task->completion);
|
||||
res = TMF_RESP_FUNC_FAILED;
|
||||
/* Even TMF timed out, return direct. */
|
||||
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
|
||||
|
|
|
@ -650,9 +650,9 @@ int pm8001_dev_found(struct domain_device *dev)
|
|||
|
||||
static void pm8001_task_done(struct sas_task *task)
|
||||
{
|
||||
if (!del_timer(&task->timer))
|
||||
if (!del_timer(&task->slow_task->timer))
|
||||
return;
|
||||
complete(&task->completion);
|
||||
complete(&task->slow_task->completion);
|
||||
}
|
||||
|
||||
static void pm8001_tmf_timedout(unsigned long data)
|
||||
|
@ -660,7 +660,7 @@ static void pm8001_tmf_timedout(unsigned long data)
|
|||
struct sas_task *task = (struct sas_task *)data;
|
||||
|
||||
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
|
||||
complete(&task->completion);
|
||||
complete(&task->slow_task->completion);
|
||||
}
|
||||
|
||||
#define PM8001_TASK_TIMEOUT 20
|
||||
|
@ -683,7 +683,7 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
|
|||
struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
|
||||
|
||||
for (retry = 0; retry < 3; retry++) {
|
||||
task = sas_alloc_task(GFP_KERNEL);
|
||||
task = sas_alloc_slow_task(GFP_KERNEL);
|
||||
if (!task)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -691,21 +691,21 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
|
|||
task->task_proto = dev->tproto;
|
||||
memcpy(&task->ssp_task, parameter, para_len);
|
||||
task->task_done = pm8001_task_done;
|
||||
task->timer.data = (unsigned long)task;
|
||||
task->timer.function = pm8001_tmf_timedout;
|
||||
task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
|
||||
add_timer(&task->timer);
|
||||
task->slow_task->timer.data = (unsigned long)task;
|
||||
task->slow_task->timer.function = pm8001_tmf_timedout;
|
||||
task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
|
||||
add_timer(&task->slow_task->timer);
|
||||
|
||||
res = pm8001_task_exec(task, 1, GFP_KERNEL, 1, tmf);
|
||||
|
||||
if (res) {
|
||||
del_timer(&task->timer);
|
||||
del_timer(&task->slow_task->timer);
|
||||
PM8001_FAIL_DBG(pm8001_ha,
|
||||
pm8001_printk("Executing internal task "
|
||||
"failed\n"));
|
||||
goto ex_err;
|
||||
}
|
||||
wait_for_completion(&task->completion);
|
||||
wait_for_completion(&task->slow_task->completion);
|
||||
res = -TMF_RESP_FUNC_FAILED;
|
||||
/* Even TMF timed out, return direct. */
|
||||
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
|
||||
|
@ -765,17 +765,17 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
|
|||
struct sas_task *task = NULL;
|
||||
|
||||
for (retry = 0; retry < 3; retry++) {
|
||||
task = sas_alloc_task(GFP_KERNEL);
|
||||
task = sas_alloc_slow_task(GFP_KERNEL);
|
||||
if (!task)
|
||||
return -ENOMEM;
|
||||
|
||||
task->dev = dev;
|
||||
task->task_proto = dev->tproto;
|
||||
task->task_done = pm8001_task_done;
|
||||
task->timer.data = (unsigned long)task;
|
||||
task->timer.function = pm8001_tmf_timedout;
|
||||
task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
|
||||
add_timer(&task->timer);
|
||||
task->slow_task->timer.data = (unsigned long)task;
|
||||
task->slow_task->timer.function = pm8001_tmf_timedout;
|
||||
task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
|
||||
add_timer(&task->slow_task->timer);
|
||||
|
||||
res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
|
||||
if (res)
|
||||
|
@ -789,13 +789,13 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
|
|||
pm8001_dev, flag, task_tag, ccb_tag);
|
||||
|
||||
if (res) {
|
||||
del_timer(&task->timer);
|
||||
del_timer(&task->slow_task->timer);
|
||||
PM8001_FAIL_DBG(pm8001_ha,
|
||||
pm8001_printk("Executing internal task "
|
||||
"failed\n"));
|
||||
goto ex_err;
|
||||
}
|
||||
wait_for_completion(&task->completion);
|
||||
wait_for_completion(&task->slow_task->completion);
|
||||
res = TMF_RESP_FUNC_FAILED;
|
||||
/* Even TMF timed out, return direct. */
|
||||
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
|
||||
|
@ -962,8 +962,9 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
|
|||
struct pm8001_device *pm8001_dev;
|
||||
struct pm8001_hba_info *pm8001_ha;
|
||||
struct sas_phy *phy;
|
||||
|
||||
if (!dev || !dev->lldd_dev)
|
||||
return -1;
|
||||
return -ENODEV;
|
||||
|
||||
pm8001_dev = dev->lldd_dev;
|
||||
pm8001_ha = pm8001_find_ha_by_dev(dev);
|
||||
|
|
|
@ -685,7 +685,7 @@ qla24xx_pci_config(scsi_qla_host_t *vha)
|
|||
pcix_set_mmrbc(ha->pdev, 2048);
|
||||
|
||||
/* PCIe -- adjust Maximum Read Request Size (2048). */
|
||||
if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
|
||||
if (pci_is_pcie(ha->pdev))
|
||||
pcie_set_readrq(ha->pdev, 2048);
|
||||
|
||||
pci_disable_rom(ha->pdev);
|
||||
|
@ -721,7 +721,7 @@ qla25xx_pci_config(scsi_qla_host_t *vha)
|
|||
pci_write_config_word(ha->pdev, PCI_COMMAND, w);
|
||||
|
||||
/* PCIe -- adjust Maximum Read Request Size (2048). */
|
||||
if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
|
||||
if (pci_is_pcie(ha->pdev))
|
||||
pcie_set_readrq(ha->pdev, 2048);
|
||||
|
||||
pci_disable_rom(ha->pdev);
|
||||
|
|
|
@ -1620,7 +1620,7 @@ qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str)
|
|||
char lwstr[6];
|
||||
uint16_t lnk;
|
||||
|
||||
pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
|
||||
pcie_reg = pci_pcie_cap(ha->pdev);
|
||||
pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk);
|
||||
ha->link_width = (lnk >> 4) & 0x3f;
|
||||
|
||||
|
@ -2528,7 +2528,7 @@ qla82xx_start_firmware(scsi_qla_host_t *vha)
|
|||
}
|
||||
|
||||
/* Negotiated Link width */
|
||||
pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
|
||||
pcie_cap = pci_pcie_cap(ha->pdev);
|
||||
pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk);
|
||||
ha->link_width = (lnk >> 4) & 0x3f;
|
||||
|
||||
|
|
|
@ -482,12 +482,12 @@ qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
|
|||
uint32_t pci_bus;
|
||||
int pcie_reg;
|
||||
|
||||
pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
|
||||
pcie_reg = pci_pcie_cap(ha->pdev);
|
||||
if (pcie_reg) {
|
||||
char lwstr[6];
|
||||
uint16_t pcie_lstat, lspeed, lwidth;
|
||||
|
||||
pcie_reg += 0x12;
|
||||
pcie_reg += PCI_EXP_LNKCAP;
|
||||
pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat);
|
||||
lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3);
|
||||
lwidth = (pcie_lstat &
|
||||
|
|
|
@ -279,6 +279,7 @@ struct qla_ddb_index {
|
|||
struct list_head list;
|
||||
uint16_t fw_ddb_idx;
|
||||
struct dev_db_entry fw_ddb;
|
||||
uint8_t flash_isid[6];
|
||||
};
|
||||
|
||||
#define DDB_IPADDR_LEN 64
|
||||
|
|
|
@ -183,7 +183,8 @@ int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
|
|||
int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
|
||||
struct ddb_entry *ddb_entry, uint32_t state);
|
||||
void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset);
|
||||
int qla4xxx_post_aen_work(struct scsi_qla_host *ha, uint32_t aen_code,
|
||||
int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
|
||||
enum iscsi_host_event_code aen_code,
|
||||
uint32_t data_size, uint8_t *data);
|
||||
int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options,
|
||||
uint32_t payload_size, uint32_t pid, uint8_t *ipaddr);
|
||||
|
|
|
@ -126,7 +126,7 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha)
|
|||
|
||||
qla4xxx_init_response_q_entries(ha);
|
||||
|
||||
/* Initialize mabilbox active array */
|
||||
/* Initialize mailbox active array */
|
||||
for (i = 0; i < MAX_MRB; i++)
|
||||
ha->active_mrb_array[i] = NULL;
|
||||
|
||||
|
|
|
@ -1590,7 +1590,7 @@ qla4_8xxx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start)
|
|||
}
|
||||
|
||||
/* Negotiated Link width */
|
||||
pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
|
||||
pcie_cap = pci_pcie_cap(ha->pdev);
|
||||
pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk);
|
||||
ha->link_width = (lnk >> 4) & 0x3f;
|
||||
|
||||
|
|
|
@ -4299,7 +4299,8 @@ static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
|
|||
}
|
||||
|
||||
static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
|
||||
struct ql4_tuple_ddb *tddb)
|
||||
struct ql4_tuple_ddb *tddb,
|
||||
uint8_t *flash_isid)
|
||||
{
|
||||
uint16_t options = 0;
|
||||
|
||||
|
@ -4314,7 +4315,12 @@ static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
|
|||
sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
|
||||
|
||||
tddb->port = le16_to_cpu(fw_ddb_entry->port);
|
||||
memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0], sizeof(tddb->isid));
|
||||
|
||||
if (flash_isid == NULL)
|
||||
memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0],
|
||||
sizeof(tddb->isid));
|
||||
else
|
||||
memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid));
|
||||
}
|
||||
|
||||
static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
|
||||
|
@ -4385,7 +4391,7 @@ static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
|
|||
goto exit_check;
|
||||
}
|
||||
|
||||
qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
|
||||
qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
|
||||
|
||||
for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
|
||||
ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
|
||||
|
@ -4407,6 +4413,102 @@ exit_check:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* qla4xxx_check_existing_isid - check if target with same isid exist
|
||||
* in target list
|
||||
* @list_nt: list of target
|
||||
* @isid: isid to check
|
||||
*
|
||||
* This routine return QLA_SUCCESS if target with same isid exist
|
||||
**/
|
||||
static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid)
|
||||
{
|
||||
struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
|
||||
struct dev_db_entry *fw_ddb_entry;
|
||||
|
||||
list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
|
||||
fw_ddb_entry = &nt_ddb_idx->fw_ddb;
|
||||
|
||||
if (memcmp(&fw_ddb_entry->isid[0], &isid[0],
|
||||
sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) {
|
||||
return QLA_SUCCESS;
|
||||
}
|
||||
}
|
||||
return QLA_ERROR;
|
||||
}
|
||||
|
||||
/**
|
||||
* qla4xxx_update_isid - compare ddbs and updated isid
|
||||
* @ha: Pointer to host adapter structure.
|
||||
* @list_nt: list of nt target
|
||||
* @fw_ddb_entry: firmware ddb entry
|
||||
*
|
||||
* This routine update isid if ddbs have same iqn, same isid and
|
||||
* different IP addr.
|
||||
* Return QLA_SUCCESS if isid is updated.
|
||||
**/
|
||||
static int qla4xxx_update_isid(struct scsi_qla_host *ha,
|
||||
struct list_head *list_nt,
|
||||
struct dev_db_entry *fw_ddb_entry)
|
||||
{
|
||||
uint8_t base_value, i;
|
||||
|
||||
base_value = fw_ddb_entry->isid[1] & 0x1f;
|
||||
for (i = 0; i < 8; i++) {
|
||||
fw_ddb_entry->isid[1] = (base_value | (i << 5));
|
||||
if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
|
||||
break;
|
||||
}
|
||||
|
||||
if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
|
||||
return QLA_ERROR;
|
||||
|
||||
return QLA_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* qla4xxx_should_update_isid - check if isid need to update
|
||||
* @ha: Pointer to host adapter structure.
|
||||
* @old_tddb: ddb tuple
|
||||
* @new_tddb: ddb tuple
|
||||
*
|
||||
* Return QLA_SUCCESS if different IP, different PORT, same iqn,
|
||||
* same isid
|
||||
**/
|
||||
static int qla4xxx_should_update_isid(struct scsi_qla_host *ha,
|
||||
struct ql4_tuple_ddb *old_tddb,
|
||||
struct ql4_tuple_ddb *new_tddb)
|
||||
{
|
||||
if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) {
|
||||
/* Same ip */
|
||||
if (old_tddb->port == new_tddb->port)
|
||||
return QLA_ERROR;
|
||||
}
|
||||
|
||||
if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
|
||||
/* different iqn */
|
||||
return QLA_ERROR;
|
||||
|
||||
if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
|
||||
sizeof(old_tddb->isid)))
|
||||
/* different isid */
|
||||
return QLA_ERROR;
|
||||
|
||||
return QLA_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt
|
||||
* @ha: Pointer to host adapter structure.
|
||||
* @list_nt: list of nt target.
|
||||
* @fw_ddb_entry: firmware ddb entry.
|
||||
*
|
||||
* This routine check if fw_ddb_entry already exists in list_nt to avoid
|
||||
* duplicate ddb in list_nt.
|
||||
* Return QLA_SUCCESS if duplicate ddb exit in list_nl.
|
||||
* Note: This function also update isid of DDB if required.
|
||||
**/
|
||||
|
||||
static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
|
||||
struct list_head *list_nt,
|
||||
struct dev_db_entry *fw_ddb_entry)
|
||||
|
@ -4414,7 +4516,7 @@ static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
|
|||
struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
|
||||
struct ql4_tuple_ddb *fw_tddb = NULL;
|
||||
struct ql4_tuple_ddb *tmp_tddb = NULL;
|
||||
int ret = QLA_ERROR;
|
||||
int rval, ret = QLA_ERROR;
|
||||
|
||||
fw_tddb = vzalloc(sizeof(*fw_tddb));
|
||||
if (!fw_tddb) {
|
||||
|
@ -4432,12 +4534,28 @@ static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
|
|||
goto exit_check;
|
||||
}
|
||||
|
||||
qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
|
||||
qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
|
||||
|
||||
list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
|
||||
qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb);
|
||||
if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true)) {
|
||||
ret = QLA_SUCCESS; /* found */
|
||||
qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb,
|
||||
nt_ddb_idx->flash_isid);
|
||||
ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true);
|
||||
/* found duplicate ddb */
|
||||
if (ret == QLA_SUCCESS)
|
||||
goto exit_check;
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
|
||||
qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL);
|
||||
|
||||
ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb);
|
||||
if (ret == QLA_SUCCESS) {
|
||||
rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry);
|
||||
if (rval == QLA_SUCCESS)
|
||||
ret = QLA_ERROR;
|
||||
else
|
||||
ret = QLA_SUCCESS;
|
||||
|
||||
goto exit_check;
|
||||
}
|
||||
}
|
||||
|
@ -4788,14 +4906,26 @@ static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
|
|||
|
||||
nt_ddb_idx->fw_ddb_idx = idx;
|
||||
|
||||
memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
|
||||
sizeof(struct dev_db_entry));
|
||||
/* Copy original isid as it may get updated in function
|
||||
* qla4xxx_update_isid(). We need original isid in
|
||||
* function qla4xxx_compare_tuple_ddb to find duplicate
|
||||
* target */
|
||||
memcpy(&nt_ddb_idx->flash_isid[0],
|
||||
&fw_ddb_entry->isid[0],
|
||||
sizeof(nt_ddb_idx->flash_isid));
|
||||
|
||||
if (qla4xxx_is_flash_ddb_exists(ha, list_nt,
|
||||
fw_ddb_entry) == QLA_SUCCESS) {
|
||||
ret = qla4xxx_is_flash_ddb_exists(ha, list_nt,
|
||||
fw_ddb_entry);
|
||||
if (ret == QLA_SUCCESS) {
|
||||
/* free nt_ddb_idx and do not add to list_nt */
|
||||
vfree(nt_ddb_idx);
|
||||
goto continue_next_nt;
|
||||
}
|
||||
|
||||
/* Copy updated isid */
|
||||
memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
|
||||
sizeof(struct dev_db_entry));
|
||||
|
||||
list_add_tail(&nt_ddb_idx->list, list_nt);
|
||||
} else if (is_reset == RESET_ADAPTER) {
|
||||
if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
|
||||
|
|
|
@ -5,4 +5,4 @@
|
|||
* See LICENSE.qla4xxx for copyright and licensing details.
|
||||
*/
|
||||
|
||||
#define QLA4XXX_DRIVER_VERSION "5.02.00-k17"
|
||||
#define QLA4XXX_DRIVER_VERSION "5.02.00-k18"
|
||||
|
|
|
@ -54,6 +54,7 @@
|
|||
#include <linux/notifier.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/async.h>
|
||||
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
|
@ -91,7 +92,7 @@ EXPORT_SYMBOL(scsi_logging_level);
|
|||
#endif
|
||||
|
||||
/* sd, scsi core and power management need to coordinate flushing async actions */
|
||||
LIST_HEAD(scsi_sd_probe_domain);
|
||||
ASYNC_DOMAIN(scsi_sd_probe_domain);
|
||||
EXPORT_SYMBOL(scsi_sd_probe_domain);
|
||||
|
||||
/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
|
||||
|
@ -1354,6 +1355,7 @@ static void __exit exit_scsi(void)
|
|||
scsi_exit_devinfo();
|
||||
scsi_exit_procfs();
|
||||
scsi_exit_queue();
|
||||
async_unregister_domain(&scsi_sd_probe_domain);
|
||||
}
|
||||
|
||||
subsys_initcall(init_scsi);
|
||||
|
|
|
@ -1687,6 +1687,20 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
|
|||
* requests are started.
|
||||
*/
|
||||
scsi_run_host_queues(shost);
|
||||
|
||||
/*
|
||||
* if eh is active and host_eh_scheduled is pending we need to re-run
|
||||
* recovery. we do this check after scsi_run_host_queues() to allow
|
||||
* everything pent up since the last eh run a chance to make forward
|
||||
* progress before we sync again. Either we'll immediately re-run
|
||||
* recovery or scsi_device_unbusy() will wake us again when these
|
||||
* pending commands complete.
|
||||
*/
|
||||
spin_lock_irqsave(shost->host_lock, flags);
|
||||
if (shost->host_eh_scheduled)
|
||||
if (scsi_host_set_state(shost, SHOST_RECOVERY))
|
||||
WARN_ON(scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY));
|
||||
spin_unlock_irqrestore(shost->host_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1804,15 +1818,14 @@ int scsi_error_handler(void *data)
|
|||
* We never actually get interrupted because kthread_run
|
||||
* disables signal delivery for the created thread.
|
||||
*/
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
while (!kthread_should_stop()) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
|
||||
shost->host_failed != shost->host_busy) {
|
||||
SCSI_LOG_ERROR_RECOVERY(1,
|
||||
printk("Error handler scsi_eh_%d sleeping\n",
|
||||
shost->host_no));
|
||||
schedule();
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1849,7 +1862,6 @@ int scsi_error_handler(void *data)
|
|||
scsi_restart_operations(shost);
|
||||
if (!shost->eh_noresume)
|
||||
scsi_autopm_put_host(shost);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
}
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ static void scsi_unprep_request(struct request *req)
|
|||
* for a requeue after completion, which should only occur in this
|
||||
* file.
|
||||
*/
|
||||
static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
|
||||
static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
|
||||
{
|
||||
struct Scsi_Host *host = cmd->device->host;
|
||||
struct scsi_device *device = cmd->device;
|
||||
|
@ -155,15 +155,14 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
|
|||
|
||||
/*
|
||||
* Requeue this command. It will go before all other commands
|
||||
* that are already in the queue.
|
||||
* that are already in the queue. Schedule requeue work under
|
||||
* lock such that the kblockd_schedule_work() call happens
|
||||
* before blk_cleanup_queue() finishes.
|
||||
*/
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
blk_requeue_request(q, cmd->request);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
|
||||
kblockd_schedule_work(q, &device->requeue_work);
|
||||
|
||||
return 0;
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -185,9 +184,9 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
|
|||
* Notes: This could be called either from an interrupt context or a
|
||||
* normal process context.
|
||||
*/
|
||||
int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
|
||||
void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
|
||||
{
|
||||
return __scsi_queue_insert(cmd, reason, 1);
|
||||
__scsi_queue_insert(cmd, reason, 1);
|
||||
}
|
||||
/**
|
||||
* scsi_execute - insert request and wait for the result
|
||||
|
@ -406,10 +405,6 @@ static void scsi_run_queue(struct request_queue *q)
|
|||
LIST_HEAD(starved_list);
|
||||
unsigned long flags;
|
||||
|
||||
/* if the device is dead, sdev will be NULL, so no queue to run */
|
||||
if (!sdev)
|
||||
return;
|
||||
|
||||
shost = sdev->host;
|
||||
if (scsi_target(sdev)->single_lun)
|
||||
scsi_single_lun_run(sdev);
|
||||
|
@ -483,15 +478,26 @@ void scsi_requeue_run_queue(struct work_struct *work)
|
|||
*/
|
||||
static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct scsi_device *sdev = cmd->device;
|
||||
struct request *req = cmd->request;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* We need to hold a reference on the device to avoid the queue being
|
||||
* killed after the unlock and before scsi_run_queue is invoked which
|
||||
* may happen because scsi_unprep_request() puts the command which
|
||||
* releases its reference on the device.
|
||||
*/
|
||||
get_device(&sdev->sdev_gendev);
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
scsi_unprep_request(req);
|
||||
blk_requeue_request(q, req);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
|
||||
scsi_run_queue(q);
|
||||
|
||||
put_device(&sdev->sdev_gendev);
|
||||
}
|
||||
|
||||
void scsi_next_command(struct scsi_cmnd *cmd)
|
||||
|
@ -1173,6 +1179,7 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
|
|||
if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
|
||||
switch (sdev->sdev_state) {
|
||||
case SDEV_OFFLINE:
|
||||
case SDEV_TRANSPORT_OFFLINE:
|
||||
/*
|
||||
* If the device is offline we refuse to process any
|
||||
* commands. The device must be brought online
|
||||
|
@ -1370,16 +1377,16 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
|
|||
* may be changed after request stacking drivers call the function,
|
||||
* regardless of taking lock or not.
|
||||
*
|
||||
* When scsi can't dispatch I/Os anymore and needs to kill I/Os
|
||||
* (e.g. !sdev), scsi needs to return 'not busy'.
|
||||
* Otherwise, request stacking drivers may hold requests forever.
|
||||
* When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
|
||||
* needs to return 'not busy'. Otherwise, request stacking drivers
|
||||
* may hold requests forever.
|
||||
*/
|
||||
static int scsi_lld_busy(struct request_queue *q)
|
||||
{
|
||||
struct scsi_device *sdev = q->queuedata;
|
||||
struct Scsi_Host *shost;
|
||||
|
||||
if (!sdev)
|
||||
if (blk_queue_dead(q))
|
||||
return 0;
|
||||
|
||||
shost = sdev->host;
|
||||
|
@ -1490,12 +1497,6 @@ static void scsi_request_fn(struct request_queue *q)
|
|||
struct scsi_cmnd *cmd;
|
||||
struct request *req;
|
||||
|
||||
if (!sdev) {
|
||||
while ((req = blk_peek_request(q)) != NULL)
|
||||
scsi_kill_request(req, q);
|
||||
return;
|
||||
}
|
||||
|
||||
if(!get_device(&sdev->sdev_gendev))
|
||||
/* We must be tearing the block queue down already */
|
||||
return;
|
||||
|
@ -1697,20 +1698,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
|
|||
return q;
|
||||
}
|
||||
|
||||
void scsi_free_queue(struct request_queue *q)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
WARN_ON(q->queuedata);
|
||||
|
||||
/* cause scsi_request_fn() to kill all non-finished requests */
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
q->request_fn(q);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
|
||||
blk_cleanup_queue(q);
|
||||
}
|
||||
|
||||
/*
|
||||
* Function: scsi_block_requests()
|
||||
*
|
||||
|
@ -2081,6 +2068,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
|
|||
switch (oldstate) {
|
||||
case SDEV_CREATED:
|
||||
case SDEV_OFFLINE:
|
||||
case SDEV_TRANSPORT_OFFLINE:
|
||||
case SDEV_QUIESCE:
|
||||
case SDEV_BLOCK:
|
||||
break;
|
||||
|
@ -2093,6 +2081,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
|
|||
switch (oldstate) {
|
||||
case SDEV_RUNNING:
|
||||
case SDEV_OFFLINE:
|
||||
case SDEV_TRANSPORT_OFFLINE:
|
||||
break;
|
||||
default:
|
||||
goto illegal;
|
||||
|
@ -2100,6 +2089,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
|
|||
break;
|
||||
|
||||
case SDEV_OFFLINE:
|
||||
case SDEV_TRANSPORT_OFFLINE:
|
||||
switch (oldstate) {
|
||||
case SDEV_CREATED:
|
||||
case SDEV_RUNNING:
|
||||
|
@ -2136,6 +2126,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
|
|||
case SDEV_RUNNING:
|
||||
case SDEV_QUIESCE:
|
||||
case SDEV_OFFLINE:
|
||||
case SDEV_TRANSPORT_OFFLINE:
|
||||
case SDEV_BLOCK:
|
||||
break;
|
||||
default:
|
||||
|
@ -2148,6 +2139,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
|
|||
case SDEV_CREATED:
|
||||
case SDEV_RUNNING:
|
||||
case SDEV_OFFLINE:
|
||||
case SDEV_TRANSPORT_OFFLINE:
|
||||
case SDEV_CANCEL:
|
||||
break;
|
||||
default:
|
||||
|
@ -2405,7 +2397,6 @@ EXPORT_SYMBOL(scsi_target_resume);
|
|||
* (which must be a legal transition). When the device is in this
|
||||
* state, all commands are deferred until the scsi lld reenables
|
||||
* the device with scsi_device_unblock or device_block_tmo fires.
|
||||
* This routine assumes the host_lock is held on entry.
|
||||
*/
|
||||
int
|
||||
scsi_internal_device_block(struct scsi_device *sdev)
|
||||
|
@ -2438,6 +2429,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block);
|
|||
/**
|
||||
* scsi_internal_device_unblock - resume a device after a block request
|
||||
* @sdev: device to resume
|
||||
* @new_state: state to set devices to after unblocking
|
||||
*
|
||||
* Called by scsi lld's or the midlayer to restart the device queue
|
||||
* for the previously suspended scsi device. Called from interrupt or
|
||||
|
@ -2447,25 +2439,29 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block);
|
|||
*
|
||||
* Notes:
|
||||
* This routine transitions the device to the SDEV_RUNNING state
|
||||
* (which must be a legal transition) allowing the midlayer to
|
||||
* goose the queue for this device. This routine assumes the
|
||||
* host_lock is held upon entry.
|
||||
* or to one of the offline states (which must be a legal transition)
|
||||
* allowing the midlayer to goose the queue for this device.
|
||||
*/
|
||||
int
|
||||
scsi_internal_device_unblock(struct scsi_device *sdev)
|
||||
scsi_internal_device_unblock(struct scsi_device *sdev,
|
||||
enum scsi_device_state new_state)
|
||||
{
|
||||
struct request_queue *q = sdev->request_queue;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Try to transition the scsi device to SDEV_RUNNING
|
||||
* and goose the device queue if successful.
|
||||
|
||||
/*
|
||||
* Try to transition the scsi device to SDEV_RUNNING or one of the
|
||||
* offlined states and goose the device queue if successful.
|
||||
*/
|
||||
if (sdev->sdev_state == SDEV_BLOCK)
|
||||
sdev->sdev_state = SDEV_RUNNING;
|
||||
else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
|
||||
sdev->sdev_state = SDEV_CREATED;
|
||||
else if (sdev->sdev_state != SDEV_CANCEL &&
|
||||
sdev->sdev_state = new_state;
|
||||
else if (sdev->sdev_state == SDEV_CREATED_BLOCK) {
|
||||
if (new_state == SDEV_TRANSPORT_OFFLINE ||
|
||||
new_state == SDEV_OFFLINE)
|
||||
sdev->sdev_state = new_state;
|
||||
else
|
||||
sdev->sdev_state = SDEV_CREATED;
|
||||
} else if (sdev->sdev_state != SDEV_CANCEL &&
|
||||
sdev->sdev_state != SDEV_OFFLINE)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -2506,26 +2502,26 @@ EXPORT_SYMBOL_GPL(scsi_target_block);
|
|||
static void
|
||||
device_unblock(struct scsi_device *sdev, void *data)
|
||||
{
|
||||
scsi_internal_device_unblock(sdev);
|
||||
scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
|
||||
}
|
||||
|
||||
static int
|
||||
target_unblock(struct device *dev, void *data)
|
||||
{
|
||||
if (scsi_is_target_device(dev))
|
||||
starget_for_each_device(to_scsi_target(dev), NULL,
|
||||
starget_for_each_device(to_scsi_target(dev), data,
|
||||
device_unblock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
scsi_target_unblock(struct device *dev)
|
||||
scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
|
||||
{
|
||||
if (scsi_is_target_device(dev))
|
||||
starget_for_each_device(to_scsi_target(dev), NULL,
|
||||
starget_for_each_device(to_scsi_target(dev), &new_state,
|
||||
device_unblock);
|
||||
else
|
||||
device_for_each_child(dev, NULL, target_unblock);
|
||||
device_for_each_child(dev, &new_state, target_unblock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(scsi_target_unblock);
|
||||
|
||||
|
|
|
@ -76,23 +76,24 @@ static int scsi_bus_resume_common(struct device *dev)
|
|||
{
|
||||
int err = 0;
|
||||
|
||||
if (scsi_is_sdev_device(dev)) {
|
||||
/*
|
||||
* Parent device may have runtime suspended as soon as
|
||||
* it is woken up during the system resume.
|
||||
*
|
||||
* Resume it on behalf of child.
|
||||
*/
|
||||
pm_runtime_get_sync(dev->parent);
|
||||
err = scsi_dev_type_resume(dev);
|
||||
pm_runtime_put_sync(dev->parent);
|
||||
}
|
||||
/*
|
||||
* Parent device may have runtime suspended as soon as
|
||||
* it is woken up during the system resume.
|
||||
*
|
||||
* Resume it on behalf of child.
|
||||
*/
|
||||
pm_runtime_get_sync(dev->parent);
|
||||
|
||||
if (scsi_is_sdev_device(dev))
|
||||
err = scsi_dev_type_resume(dev);
|
||||
if (err == 0) {
|
||||
pm_runtime_disable(dev);
|
||||
pm_runtime_set_active(dev);
|
||||
pm_runtime_enable(dev);
|
||||
}
|
||||
|
||||
pm_runtime_put_sync(dev->parent);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
#define _SCSI_PRIV_H
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/async.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
|
||||
struct request_queue;
|
||||
struct request;
|
||||
|
@ -79,12 +81,11 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd);
|
|||
/* scsi_lib.c */
|
||||
extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
|
||||
extern void scsi_device_unbusy(struct scsi_device *sdev);
|
||||
extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
|
||||
extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
|
||||
extern void scsi_next_command(struct scsi_cmnd *cmd);
|
||||
extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
|
||||
extern void scsi_run_host_queues(struct Scsi_Host *shost);
|
||||
extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
|
||||
extern void scsi_free_queue(struct request_queue *q);
|
||||
extern int scsi_init_queue(void);
|
||||
extern void scsi_exit_queue(void);
|
||||
struct request_queue;
|
||||
|
@ -163,7 +164,7 @@ static inline int scsi_autopm_get_host(struct Scsi_Host *h) { return 0; }
|
|||
static inline void scsi_autopm_put_host(struct Scsi_Host *h) {}
|
||||
#endif /* CONFIG_PM_RUNTIME */
|
||||
|
||||
extern struct list_head scsi_sd_probe_domain;
|
||||
extern struct async_domain scsi_sd_probe_domain;
|
||||
|
||||
/*
|
||||
* internal scsi timeout functions: for use by mid-layer and transport
|
||||
|
@ -172,6 +173,7 @@ extern struct list_head scsi_sd_probe_domain;
|
|||
|
||||
#define SCSI_DEVICE_BLOCK_MAX_TIMEOUT 600 /* units in seconds */
|
||||
extern int scsi_internal_device_block(struct scsi_device *sdev);
|
||||
extern int scsi_internal_device_unblock(struct scsi_device *sdev);
|
||||
extern int scsi_internal_device_unblock(struct scsi_device *sdev,
|
||||
enum scsi_device_state new_state);
|
||||
|
||||
#endif /* _SCSI_PRIV_H */
|
||||
|
|
|
@ -147,7 +147,7 @@ int scsi_complete_async_scans(void)
|
|||
|
||||
do {
|
||||
if (list_empty(&scanning_hosts))
|
||||
goto out;
|
||||
return 0;
|
||||
/* If we can't get memory immediately, that's OK. Just
|
||||
* sleep a little. Even if we never get memory, the async
|
||||
* scans will finish eventually.
|
||||
|
@ -179,26 +179,11 @@ int scsi_complete_async_scans(void)
|
|||
}
|
||||
done:
|
||||
spin_unlock(&async_scan_lock);
|
||||
|
||||
kfree(data);
|
||||
|
||||
out:
|
||||
async_synchronize_full_domain(&scsi_sd_probe_domain);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Only exported for the benefit of scsi_wait_scan */
|
||||
EXPORT_SYMBOL_GPL(scsi_complete_async_scans);
|
||||
|
||||
#ifndef MODULE
|
||||
/*
|
||||
* For async scanning we need to wait for all the scans to complete before
|
||||
* trying to mount the root fs. Otherwise non-modular drivers may not be ready
|
||||
* yet.
|
||||
*/
|
||||
late_initcall(scsi_complete_async_scans);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* scsi_unlock_floptical - unlock device via a special MODE SENSE command
|
||||
* @sdev: scsi device to send command to
|
||||
|
@ -1717,6 +1702,9 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
|
|||
{
|
||||
struct scsi_device *sdev;
|
||||
shost_for_each_device(sdev, shost) {
|
||||
/* target removed before the device could be added */
|
||||
if (sdev->sdev_state == SDEV_DEL)
|
||||
continue;
|
||||
if (!scsi_host_scan_allowed(shost) ||
|
||||
scsi_sysfs_add_sdev(sdev) != 0)
|
||||
__scsi_remove_device(sdev);
|
||||
|
@ -1842,14 +1830,13 @@ static void do_scsi_scan_host(struct Scsi_Host *shost)
|
|||
}
|
||||
}
|
||||
|
||||
static int do_scan_async(void *_data)
|
||||
static void do_scan_async(void *_data, async_cookie_t c)
|
||||
{
|
||||
struct async_scan_data *data = _data;
|
||||
struct Scsi_Host *shost = data->shost;
|
||||
|
||||
do_scsi_scan_host(shost);
|
||||
scsi_finish_async_scan(data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1858,7 +1845,6 @@ static int do_scan_async(void *_data)
|
|||
**/
|
||||
void scsi_scan_host(struct Scsi_Host *shost)
|
||||
{
|
||||
struct task_struct *p;
|
||||
struct async_scan_data *data;
|
||||
|
||||
if (strncmp(scsi_scan_type, "none", 4) == 0)
|
||||
|
@ -1873,9 +1859,11 @@ void scsi_scan_host(struct Scsi_Host *shost)
|
|||
return;
|
||||
}
|
||||
|
||||
p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no);
|
||||
if (IS_ERR(p))
|
||||
do_scan_async(data);
|
||||
/* register with the async subsystem so wait_for_device_probe()
|
||||
* will flush this work
|
||||
*/
|
||||
async_schedule(do_scan_async, data);
|
||||
|
||||
/* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_scan_host);
|
||||
|
|
|
@ -35,6 +35,7 @@ static const struct {
|
|||
{ SDEV_DEL, "deleted" },
|
||||
{ SDEV_QUIESCE, "quiesce" },
|
||||
{ SDEV_OFFLINE, "offline" },
|
||||
{ SDEV_TRANSPORT_OFFLINE, "transport-offline" },
|
||||
{ SDEV_BLOCK, "blocked" },
|
||||
{ SDEV_CREATED_BLOCK, "created-blocked" },
|
||||
};
|
||||
|
@ -966,16 +967,20 @@ void __scsi_remove_device(struct scsi_device *sdev)
|
|||
device_del(dev);
|
||||
} else
|
||||
put_device(&sdev->sdev_dev);
|
||||
|
||||
/*
|
||||
* Stop accepting new requests and wait until all queuecommand() and
|
||||
* scsi_run_queue() invocations have finished before tearing down the
|
||||
* device.
|
||||
*/
|
||||
scsi_device_set_state(sdev, SDEV_DEL);
|
||||
blk_cleanup_queue(sdev->request_queue);
|
||||
cancel_work_sync(&sdev->requeue_work);
|
||||
|
||||
if (sdev->host->hostt->slave_destroy)
|
||||
sdev->host->hostt->slave_destroy(sdev);
|
||||
transport_destroy_device(dev);
|
||||
|
||||
/* cause the request function to reject all I/O requests */
|
||||
sdev->request_queue->queuedata = NULL;
|
||||
|
||||
/* Freeing the queue signals to block that we're done */
|
||||
scsi_free_queue(sdev->request_queue);
|
||||
put_device(dev);
|
||||
}
|
||||
|
||||
|
@ -1000,7 +1005,6 @@ static void __scsi_remove_target(struct scsi_target *starget)
|
|||
struct scsi_device *sdev;
|
||||
|
||||
spin_lock_irqsave(shost->host_lock, flags);
|
||||
starget->reap_ref++;
|
||||
restart:
|
||||
list_for_each_entry(sdev, &shost->__devices, siblings) {
|
||||
if (sdev->channel != starget->channel ||
|
||||
|
@ -1014,14 +1018,6 @@ static void __scsi_remove_target(struct scsi_target *starget)
|
|||
goto restart;
|
||||
}
|
||||
spin_unlock_irqrestore(shost->host_lock, flags);
|
||||
scsi_target_reap(starget);
|
||||
}
|
||||
|
||||
static int __remove_child (struct device * dev, void * data)
|
||||
{
|
||||
if (scsi_is_target_device(dev))
|
||||
__scsi_remove_target(to_scsi_target(dev));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1034,14 +1030,34 @@ static int __remove_child (struct device * dev, void * data)
|
|||
*/
|
||||
void scsi_remove_target(struct device *dev)
|
||||
{
|
||||
if (scsi_is_target_device(dev)) {
|
||||
__scsi_remove_target(to_scsi_target(dev));
|
||||
return;
|
||||
}
|
||||
struct Scsi_Host *shost = dev_to_shost(dev->parent);
|
||||
struct scsi_target *starget, *found;
|
||||
unsigned long flags;
|
||||
|
||||
get_device(dev);
|
||||
device_for_each_child(dev, NULL, __remove_child);
|
||||
put_device(dev);
|
||||
restart:
|
||||
found = NULL;
|
||||
spin_lock_irqsave(shost->host_lock, flags);
|
||||
list_for_each_entry(starget, &shost->__targets, siblings) {
|
||||
if (starget->state == STARGET_DEL)
|
||||
continue;
|
||||
if (starget->dev.parent == dev || &starget->dev == dev) {
|
||||
found = starget;
|
||||
found->reap_ref++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(shost->host_lock, flags);
|
||||
|
||||
if (found) {
|
||||
__scsi_remove_target(found);
|
||||
scsi_target_reap(found);
|
||||
/* in the case where @dev has multiple starget children,
|
||||
* continue removing.
|
||||
*
|
||||
* FIXME: does such a case exist?
|
||||
*/
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_remove_target);
|
||||
|
||||
|
|
|
@ -1744,6 +1744,15 @@ fc_host_statistic(fcp_output_requests);
|
|||
fc_host_statistic(fcp_control_requests);
|
||||
fc_host_statistic(fcp_input_megabytes);
|
||||
fc_host_statistic(fcp_output_megabytes);
|
||||
fc_host_statistic(fcp_packet_alloc_failures);
|
||||
fc_host_statistic(fcp_packet_aborts);
|
||||
fc_host_statistic(fcp_frame_alloc_failures);
|
||||
fc_host_statistic(fc_no_free_exch);
|
||||
fc_host_statistic(fc_no_free_exch_xid);
|
||||
fc_host_statistic(fc_xid_not_found);
|
||||
fc_host_statistic(fc_xid_busy);
|
||||
fc_host_statistic(fc_seq_not_found);
|
||||
fc_host_statistic(fc_non_bls_resp);
|
||||
|
||||
static ssize_t
|
||||
fc_reset_statistics(struct device *dev, struct device_attribute *attr,
|
||||
|
@ -1784,6 +1793,15 @@ static struct attribute *fc_statistics_attrs[] = {
|
|||
&device_attr_host_fcp_control_requests.attr,
|
||||
&device_attr_host_fcp_input_megabytes.attr,
|
||||
&device_attr_host_fcp_output_megabytes.attr,
|
||||
&device_attr_host_fcp_packet_alloc_failures.attr,
|
||||
&device_attr_host_fcp_packet_aborts.attr,
|
||||
&device_attr_host_fcp_frame_alloc_failures.attr,
|
||||
&device_attr_host_fc_no_free_exch.attr,
|
||||
&device_attr_host_fc_no_free_exch_xid.attr,
|
||||
&device_attr_host_fc_xid_not_found.attr,
|
||||
&device_attr_host_fc_xid_busy.attr,
|
||||
&device_attr_host_fc_seq_not_found.attr,
|
||||
&device_attr_host_fc_non_bls_resp.attr,
|
||||
&device_attr_host_reset_statistics.attr,
|
||||
NULL
|
||||
};
|
||||
|
@ -2477,11 +2495,9 @@ static void fc_terminate_rport_io(struct fc_rport *rport)
|
|||
i->f->terminate_rport_io(rport);
|
||||
|
||||
/*
|
||||
* must unblock to flush queued IO. The caller will have set
|
||||
* the port_state or flags, so that fc_remote_port_chkready will
|
||||
* fail IO.
|
||||
* Must unblock to flush queued IO. scsi-ml will fail incoming reqs.
|
||||
*/
|
||||
scsi_target_unblock(&rport->dev);
|
||||
scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2812,8 +2828,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
|
|||
|
||||
/* if target, initiate a scan */
|
||||
if (rport->scsi_target_id != -1) {
|
||||
scsi_target_unblock(&rport->dev);
|
||||
|
||||
scsi_target_unblock(&rport->dev,
|
||||
SDEV_RUNNING);
|
||||
spin_lock_irqsave(shost->host_lock,
|
||||
flags);
|
||||
rport->flags |= FC_RPORT_SCAN_PENDING;
|
||||
|
@ -2882,7 +2898,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
|
|||
spin_unlock_irqrestore(shost->host_lock, flags);
|
||||
|
||||
if (ids->roles & FC_PORT_ROLE_FCP_TARGET) {
|
||||
scsi_target_unblock(&rport->dev);
|
||||
scsi_target_unblock(&rport->dev, SDEV_RUNNING);
|
||||
|
||||
/* initiate a scan of the target */
|
||||
spin_lock_irqsave(shost->host_lock, flags);
|
||||
|
@ -3087,7 +3103,7 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
|
|||
/* ensure any stgt delete functions are done */
|
||||
fc_flush_work(shost);
|
||||
|
||||
scsi_target_unblock(&rport->dev);
|
||||
scsi_target_unblock(&rport->dev, SDEV_RUNNING);
|
||||
/* initiate a scan of the target */
|
||||
spin_lock_irqsave(shost->host_lock, flags);
|
||||
rport->flags |= FC_RPORT_SCAN_PENDING;
|
||||
|
@ -3131,7 +3147,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
|
|||
"blocked FC remote port time out: no longer"
|
||||
" a FCP target, removing starget\n");
|
||||
spin_unlock_irqrestore(shost->host_lock, flags);
|
||||
scsi_target_unblock(&rport->dev);
|
||||
scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE);
|
||||
fc_queue_work(shost, &rport->stgt_delete_work);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -907,7 +907,7 @@ static void session_recovery_timedout(struct work_struct *work)
|
|||
session->transport->session_recovery_timedout(session);
|
||||
|
||||
ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n");
|
||||
scsi_target_unblock(&session->dev);
|
||||
scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
|
||||
ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n");
|
||||
}
|
||||
|
||||
|
@ -930,7 +930,7 @@ static void __iscsi_unblock_session(struct work_struct *work)
|
|||
session->state = ISCSI_SESSION_LOGGED_IN;
|
||||
spin_unlock_irqrestore(&session->lock, flags);
|
||||
/* start IO */
|
||||
scsi_target_unblock(&session->dev);
|
||||
scsi_target_unblock(&session->dev, SDEV_RUNNING);
|
||||
/*
|
||||
* Only do kernel scanning if the driver is properly hooked into
|
||||
* the async scanning code (drivers like iscsi_tcp do login and
|
||||
|
@ -1180,7 +1180,7 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
|
|||
session->state = ISCSI_SESSION_FREE;
|
||||
spin_unlock_irqrestore(&session->lock, flags);
|
||||
|
||||
scsi_target_unblock(&session->dev);
|
||||
scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
|
||||
/* flush running scans then delete devices */
|
||||
scsi_flush_work(shost);
|
||||
__iscsi_unbind_session(&session->unbind_work);
|
||||
|
|
|
@ -1,37 +0,0 @@
|
|||
/*
|
||||
* scsi_wait_scan.c
|
||||
*
|
||||
* Copyright (C) 2006 James Bottomley <James.Bottomley@SteelEye.com>
|
||||
*
|
||||
* This is a simple module to wait until all the async scans are
|
||||
* complete. The idea is to use it in initrd/initramfs scripts. You
|
||||
* modprobe it after all the modprobes of the root SCSI drivers and it
|
||||
* will wait until they have all finished scanning their busses before
|
||||
* allowing the boot to proceed
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/device.h>
|
||||
#include "scsi_priv.h"
|
||||
|
||||
static int __init wait_scan_init(void)
|
||||
{
|
||||
/*
|
||||
* First we need to wait for device probing to finish;
|
||||
* the drivers we just loaded might just still be probing
|
||||
* and might not yet have reached the scsi async scanning
|
||||
*/
|
||||
wait_for_device_probe();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit wait_scan_exit(void)
|
||||
{
|
||||
}
|
||||
|
||||
MODULE_DESCRIPTION("SCSI wait for scans");
|
||||
MODULE_AUTHOR("James Bottomley");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
late_initcall(wait_scan_init);
|
||||
module_exit(wait_scan_exit);
|
|
@ -2261,8 +2261,13 @@ bad_sense:
|
|||
sd_printk(KERN_ERR, sdkp, "Asking for cache data failed\n");
|
||||
|
||||
defaults:
|
||||
sd_printk(KERN_ERR, sdkp, "Assuming drive cache: write through\n");
|
||||
sdkp->WCE = 0;
|
||||
if (sdp->wce_default_on) {
|
||||
sd_printk(KERN_NOTICE, sdkp, "Assuming drive cache: write back\n");
|
||||
sdkp->WCE = 1;
|
||||
} else {
|
||||
sd_printk(KERN_ERR, sdkp, "Assuming drive cache: write through\n");
|
||||
sdkp->WCE = 0;
|
||||
}
|
||||
sdkp->RCD = 0;
|
||||
sdkp->DPOFUA = 0;
|
||||
}
|
||||
|
@ -2704,6 +2709,7 @@ static int sd_probe(struct device *dev)
|
|||
sdkp->disk = gd;
|
||||
sdkp->index = index;
|
||||
atomic_set(&sdkp->openers, 0);
|
||||
atomic_set(&sdkp->device->ioerr_cnt, 0);
|
||||
|
||||
if (!sdp->request_queue->rq_timeout) {
|
||||
if (sdp->type != TYPE_MOD)
|
||||
|
|
|
@ -232,11 +232,11 @@ static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
|
|||
* the host controller
|
||||
* @reg_hcs - host controller status register value
|
||||
*
|
||||
* Returns 0 if device present, non-zero if no device detected
|
||||
* Returns 1 if device present, 0 if no device detected
|
||||
*/
|
||||
static inline int ufshcd_is_device_present(u32 reg_hcs)
|
||||
{
|
||||
return (DEVICE_PRESENT & reg_hcs) ? 0 : -1;
|
||||
return (DEVICE_PRESENT & reg_hcs) ? 1 : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -911,7 +911,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
|
|||
|
||||
/* check if device present */
|
||||
reg = readl((hba->mmio_base + REG_CONTROLLER_STATUS));
|
||||
if (ufshcd_is_device_present(reg)) {
|
||||
if (!ufshcd_is_device_present(reg)) {
|
||||
dev_err(&hba->pdev->dev, "cc: Device not present\n");
|
||||
err = -ENXIO;
|
||||
goto out;
|
||||
|
@ -1163,6 +1163,8 @@ static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
|
|||
if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL &&
|
||||
task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED)
|
||||
task_result = FAILED;
|
||||
else
|
||||
task_result = SUCCESS;
|
||||
} else {
|
||||
task_result = FAILED;
|
||||
dev_err(&hba->pdev->dev,
|
||||
|
@ -1556,7 +1558,7 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
|
|||
goto out;
|
||||
}
|
||||
clear_bit(free_slot, &hba->tm_condition);
|
||||
return ufshcd_task_req_compl(hba, free_slot);
|
||||
err = ufshcd_task_req_compl(hba, free_slot);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
@ -1580,7 +1582,7 @@ static int ufshcd_device_reset(struct scsi_cmnd *cmd)
|
|||
tag = cmd->request->tag;
|
||||
|
||||
err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET);
|
||||
if (err)
|
||||
if (err == FAILED)
|
||||
goto out;
|
||||
|
||||
for (pos = 0; pos < hba->nutrs; pos++) {
|
||||
|
@ -1620,7 +1622,7 @@ static int ufshcd_host_reset(struct scsi_cmnd *cmd)
|
|||
if (hba->ufshcd_state == UFSHCD_STATE_RESET)
|
||||
return SUCCESS;
|
||||
|
||||
return (ufshcd_do_reset(hba) == SUCCESS) ? SUCCESS : FAILED;
|
||||
return ufshcd_do_reset(hba);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1652,7 +1654,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
|
|||
spin_unlock_irqrestore(host->host_lock, flags);
|
||||
|
||||
err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK);
|
||||
if (err)
|
||||
if (err == FAILED)
|
||||
goto out;
|
||||
|
||||
scsi_dma_unmap(cmd);
|
||||
|
@ -1953,24 +1955,7 @@ static struct pci_driver ufshcd_pci_driver = {
|
|||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* ufshcd_init - Driver registration routine
|
||||
*/
|
||||
static int __init ufshcd_init(void)
|
||||
{
|
||||
return pci_register_driver(&ufshcd_pci_driver);
|
||||
}
|
||||
module_init(ufshcd_init);
|
||||
|
||||
/**
|
||||
* ufshcd_exit - Driver exit clean-up routine
|
||||
*/
|
||||
static void __exit ufshcd_exit(void)
|
||||
{
|
||||
pci_unregister_driver(&ufshcd_pci_driver);
|
||||
}
|
||||
module_exit(ufshcd_exit);
|
||||
|
||||
module_pci_driver(ufshcd_pci_driver);
|
||||
|
||||
MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>, "
|
||||
"Vinayak Holikatti <h.vinayak@samsung.com>");
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <scsi/scsi_cmnd.h>
|
||||
|
||||
#define VIRTIO_SCSI_MEMPOOL_SZ 64
|
||||
#define VIRTIO_SCSI_EVENT_LEN 8
|
||||
|
||||
/* Command queue element */
|
||||
struct virtio_scsi_cmd {
|
||||
|
@ -43,20 +44,42 @@ struct virtio_scsi_cmd {
|
|||
} resp;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
/* Driver instance state */
|
||||
struct virtio_scsi {
|
||||
/* Protects ctrl_vq, req_vq and sg[] */
|
||||
struct virtio_scsi_event_node {
|
||||
struct virtio_scsi *vscsi;
|
||||
struct virtio_scsi_event event;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
struct virtio_scsi_vq {
|
||||
/* Protects vq */
|
||||
spinlock_t vq_lock;
|
||||
|
||||
struct virtio_device *vdev;
|
||||
struct virtqueue *ctrl_vq;
|
||||
struct virtqueue *event_vq;
|
||||
struct virtqueue *req_vq;
|
||||
struct virtqueue *vq;
|
||||
};
|
||||
|
||||
/* Per-target queue state */
|
||||
struct virtio_scsi_target_state {
|
||||
/* Protects sg. Lock hierarchy is tgt_lock -> vq_lock. */
|
||||
spinlock_t tgt_lock;
|
||||
|
||||
/* For sglist construction when adding commands to the virtqueue. */
|
||||
struct scatterlist sg[];
|
||||
};
|
||||
|
||||
/* Driver instance state */
|
||||
struct virtio_scsi {
|
||||
struct virtio_device *vdev;
|
||||
|
||||
struct virtio_scsi_vq ctrl_vq;
|
||||
struct virtio_scsi_vq event_vq;
|
||||
struct virtio_scsi_vq req_vq;
|
||||
|
||||
/* Get some buffers ready for event vq */
|
||||
struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
|
||||
|
||||
struct virtio_scsi_target_state *tgt[];
|
||||
};
|
||||
|
||||
static struct kmem_cache *virtscsi_cmd_cache;
|
||||
static mempool_t *virtscsi_cmd_pool;
|
||||
|
||||
|
@ -147,26 +170,25 @@ static void virtscsi_complete_cmd(void *buf)
|
|||
|
||||
static void virtscsi_vq_done(struct virtqueue *vq, void (*fn)(void *buf))
|
||||
{
|
||||
struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
|
||||
struct virtio_scsi *vscsi = shost_priv(sh);
|
||||
void *buf;
|
||||
unsigned long flags;
|
||||
unsigned int len;
|
||||
|
||||
spin_lock_irqsave(&vscsi->vq_lock, flags);
|
||||
|
||||
do {
|
||||
virtqueue_disable_cb(vq);
|
||||
while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
|
||||
fn(buf);
|
||||
} while (!virtqueue_enable_cb(vq));
|
||||
|
||||
spin_unlock_irqrestore(&vscsi->vq_lock, flags);
|
||||
}
|
||||
|
||||
static void virtscsi_req_done(struct virtqueue *vq)
|
||||
{
|
||||
struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
|
||||
struct virtio_scsi *vscsi = shost_priv(sh);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&vscsi->req_vq.vq_lock, flags);
|
||||
virtscsi_vq_done(vq, virtscsi_complete_cmd);
|
||||
spin_unlock_irqrestore(&vscsi->req_vq.vq_lock, flags);
|
||||
};
|
||||
|
||||
static void virtscsi_complete_free(void *buf)
|
||||
|
@ -181,12 +203,123 @@ static void virtscsi_complete_free(void *buf)
|
|||
|
||||
static void virtscsi_ctrl_done(struct virtqueue *vq)
|
||||
{
|
||||
struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
|
||||
struct virtio_scsi *vscsi = shost_priv(sh);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&vscsi->ctrl_vq.vq_lock, flags);
|
||||
virtscsi_vq_done(vq, virtscsi_complete_free);
|
||||
spin_unlock_irqrestore(&vscsi->ctrl_vq.vq_lock, flags);
|
||||
};
|
||||
|
||||
static int virtscsi_kick_event(struct virtio_scsi *vscsi,
|
||||
struct virtio_scsi_event_node *event_node)
|
||||
{
|
||||
int ret;
|
||||
struct scatterlist sg;
|
||||
unsigned long flags;
|
||||
|
||||
sg_set_buf(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
|
||||
|
||||
spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
|
||||
|
||||
ret = virtqueue_add_buf(vscsi->event_vq.vq, &sg, 0, 1, event_node, GFP_ATOMIC);
|
||||
if (ret >= 0)
|
||||
virtqueue_kick(vscsi->event_vq.vq);
|
||||
|
||||
spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) {
|
||||
vscsi->event_list[i].vscsi = vscsi;
|
||||
virtscsi_kick_event(vscsi, &vscsi->event_list[i]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
|
||||
cancel_work_sync(&vscsi->event_list[i].work);
|
||||
}
|
||||
|
||||
static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
|
||||
struct virtio_scsi_event *event)
|
||||
{
|
||||
struct scsi_device *sdev;
|
||||
struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
|
||||
unsigned int target = event->lun[1];
|
||||
unsigned int lun = (event->lun[2] << 8) | event->lun[3];
|
||||
|
||||
switch (event->reason) {
|
||||
case VIRTIO_SCSI_EVT_RESET_RESCAN:
|
||||
scsi_add_device(shost, 0, target, lun);
|
||||
break;
|
||||
case VIRTIO_SCSI_EVT_RESET_REMOVED:
|
||||
sdev = scsi_device_lookup(shost, 0, target, lun);
|
||||
if (sdev) {
|
||||
scsi_remove_device(sdev);
|
||||
scsi_device_put(sdev);
|
||||
} else {
|
||||
pr_err("SCSI device %d 0 %d %d not found\n",
|
||||
shost->host_no, target, lun);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
|
||||
}
|
||||
}
|
||||
|
||||
static void virtscsi_handle_event(struct work_struct *work)
|
||||
{
|
||||
struct virtio_scsi_event_node *event_node =
|
||||
container_of(work, struct virtio_scsi_event_node, work);
|
||||
struct virtio_scsi *vscsi = event_node->vscsi;
|
||||
struct virtio_scsi_event *event = &event_node->event;
|
||||
|
||||
if (event->event & VIRTIO_SCSI_T_EVENTS_MISSED) {
|
||||
event->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED;
|
||||
scsi_scan_host(virtio_scsi_host(vscsi->vdev));
|
||||
}
|
||||
|
||||
switch (event->event) {
|
||||
case VIRTIO_SCSI_T_NO_EVENT:
|
||||
break;
|
||||
case VIRTIO_SCSI_T_TRANSPORT_RESET:
|
||||
virtscsi_handle_transport_reset(vscsi, event);
|
||||
break;
|
||||
default:
|
||||
pr_err("Unsupport virtio scsi event %x\n", event->event);
|
||||
}
|
||||
virtscsi_kick_event(vscsi, event_node);
|
||||
}
|
||||
|
||||
static void virtscsi_complete_event(void *buf)
|
||||
{
|
||||
struct virtio_scsi_event_node *event_node = buf;
|
||||
|
||||
INIT_WORK(&event_node->work, virtscsi_handle_event);
|
||||
schedule_work(&event_node->work);
|
||||
}
|
||||
|
||||
static void virtscsi_event_done(struct virtqueue *vq)
|
||||
{
|
||||
virtscsi_vq_done(vq, virtscsi_complete_free);
|
||||
struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
|
||||
struct virtio_scsi *vscsi = shost_priv(sh);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
|
||||
virtscsi_vq_done(vq, virtscsi_complete_event);
|
||||
spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
|
||||
};
|
||||
|
||||
static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx,
|
||||
|
@ -212,25 +345,17 @@ static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx,
|
|||
* @req_size : size of the request buffer
|
||||
* @resp_size : size of the response buffer
|
||||
*
|
||||
* Called with vq_lock held.
|
||||
* Called with tgt_lock held.
|
||||
*/
|
||||
static void virtscsi_map_cmd(struct virtio_scsi *vscsi,
|
||||
static void virtscsi_map_cmd(struct virtio_scsi_target_state *tgt,
|
||||
struct virtio_scsi_cmd *cmd,
|
||||
unsigned *out_num, unsigned *in_num,
|
||||
size_t req_size, size_t resp_size)
|
||||
{
|
||||
struct scsi_cmnd *sc = cmd->sc;
|
||||
struct scatterlist *sg = vscsi->sg;
|
||||
struct scatterlist *sg = tgt->sg;
|
||||
unsigned int idx = 0;
|
||||
|
||||
if (sc) {
|
||||
struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
|
||||
BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
|
||||
|
||||
/* TODO: check feature bit and fail if unsupported? */
|
||||
BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
|
||||
}
|
||||
|
||||
/* Request header. */
|
||||
sg_set_buf(&sg[idx++], &cmd->req, req_size);
|
||||
|
||||
|
@ -250,7 +375,8 @@ static void virtscsi_map_cmd(struct virtio_scsi *vscsi,
|
|||
*in_num = idx - *out_num;
|
||||
}
|
||||
|
||||
static int virtscsi_kick_cmd(struct virtio_scsi *vscsi, struct virtqueue *vq,
|
||||
static int virtscsi_kick_cmd(struct virtio_scsi_target_state *tgt,
|
||||
struct virtio_scsi_vq *vq,
|
||||
struct virtio_scsi_cmd *cmd,
|
||||
size_t req_size, size_t resp_size, gfp_t gfp)
|
||||
{
|
||||
|
@ -258,24 +384,35 @@ static int virtscsi_kick_cmd(struct virtio_scsi *vscsi, struct virtqueue *vq,
|
|||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&vscsi->vq_lock, flags);
|
||||
spin_lock_irqsave(&tgt->tgt_lock, flags);
|
||||
virtscsi_map_cmd(tgt, cmd, &out_num, &in_num, req_size, resp_size);
|
||||
|
||||
virtscsi_map_cmd(vscsi, cmd, &out_num, &in_num, req_size, resp_size);
|
||||
|
||||
ret = virtqueue_add_buf(vq, vscsi->sg, out_num, in_num, cmd, gfp);
|
||||
spin_lock(&vq->vq_lock);
|
||||
ret = virtqueue_add_buf(vq->vq, tgt->sg, out_num, in_num, cmd, gfp);
|
||||
spin_unlock(&tgt->tgt_lock);
|
||||
if (ret >= 0)
|
||||
virtqueue_kick(vq);
|
||||
ret = virtqueue_kick_prepare(vq->vq);
|
||||
|
||||
spin_unlock_irqrestore(&vscsi->vq_lock, flags);
|
||||
spin_unlock_irqrestore(&vq->vq_lock, flags);
|
||||
|
||||
if (ret > 0)
|
||||
virtqueue_notify(vq->vq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
|
||||
{
|
||||
struct virtio_scsi *vscsi = shost_priv(sh);
|
||||
struct virtio_scsi_target_state *tgt = vscsi->tgt[sc->device->id];
|
||||
struct virtio_scsi_cmd *cmd;
|
||||
int ret;
|
||||
|
||||
struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
|
||||
BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
|
||||
|
||||
/* TODO: check feature bit and fail if unsupported? */
|
||||
BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
|
||||
|
||||
dev_dbg(&sc->device->sdev_gendev,
|
||||
"cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
|
||||
|
||||
|
@ -300,7 +437,7 @@ static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
|
|||
BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
|
||||
memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
|
||||
|
||||
if (virtscsi_kick_cmd(vscsi, vscsi->req_vq, cmd,
|
||||
if (virtscsi_kick_cmd(tgt, &vscsi->req_vq, cmd,
|
||||
sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
|
||||
GFP_ATOMIC) >= 0)
|
||||
ret = 0;
|
||||
|
@ -312,10 +449,11 @@ out:
|
|||
static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
|
||||
{
|
||||
DECLARE_COMPLETION_ONSTACK(comp);
|
||||
struct virtio_scsi_target_state *tgt = vscsi->tgt[cmd->sc->device->id];
|
||||
int ret = FAILED;
|
||||
|
||||
cmd->comp = ∁
|
||||
if (virtscsi_kick_cmd(vscsi, vscsi->ctrl_vq, cmd,
|
||||
if (virtscsi_kick_cmd(tgt, &vscsi->ctrl_vq, cmd,
|
||||
sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
|
||||
GFP_NOIO) < 0)
|
||||
goto out;
|
||||
|
@ -408,11 +546,63 @@ static struct scsi_host_template virtscsi_host_template = {
|
|||
&__val, sizeof(__val)); \
|
||||
})
|
||||
|
||||
static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
|
||||
struct virtqueue *vq)
|
||||
{
|
||||
spin_lock_init(&virtscsi_vq->vq_lock);
|
||||
virtscsi_vq->vq = vq;
|
||||
}
|
||||
|
||||
static struct virtio_scsi_target_state *virtscsi_alloc_tgt(
|
||||
struct virtio_device *vdev, int sg_elems)
|
||||
{
|
||||
struct virtio_scsi_target_state *tgt;
|
||||
gfp_t gfp_mask = GFP_KERNEL;
|
||||
|
||||
/* We need extra sg elements at head and tail. */
|
||||
tgt = kmalloc(sizeof(*tgt) + sizeof(tgt->sg[0]) * (sg_elems + 2),
|
||||
gfp_mask);
|
||||
|
||||
if (!tgt)
|
||||
return NULL;
|
||||
|
||||
spin_lock_init(&tgt->tgt_lock);
|
||||
sg_init_table(tgt->sg, sg_elems + 2);
|
||||
return tgt;
|
||||
}
|
||||
|
||||
static void virtscsi_scan(struct virtio_device *vdev)
|
||||
{
|
||||
struct Scsi_Host *shost = (struct Scsi_Host *)vdev->priv;
|
||||
|
||||
scsi_scan_host(shost);
|
||||
}
|
||||
|
||||
static void virtscsi_remove_vqs(struct virtio_device *vdev)
|
||||
{
|
||||
struct Scsi_Host *sh = virtio_scsi_host(vdev);
|
||||
struct virtio_scsi *vscsi = shost_priv(sh);
|
||||
u32 i, num_targets;
|
||||
|
||||
/* Stop all the virtqueues. */
|
||||
vdev->config->reset(vdev);
|
||||
|
||||
num_targets = sh->max_id;
|
||||
for (i = 0; i < num_targets; i++) {
|
||||
kfree(vscsi->tgt[i]);
|
||||
vscsi->tgt[i] = NULL;
|
||||
}
|
||||
|
||||
vdev->config->del_vqs(vdev);
|
||||
}
|
||||
|
||||
static int virtscsi_init(struct virtio_device *vdev,
|
||||
struct virtio_scsi *vscsi)
|
||||
struct virtio_scsi *vscsi, int num_targets)
|
||||
{
|
||||
int err;
|
||||
struct virtqueue *vqs[3];
|
||||
u32 i, sg_elems;
|
||||
|
||||
vq_callback_t *callbacks[] = {
|
||||
virtscsi_ctrl_done,
|
||||
virtscsi_event_done,
|
||||
|
@ -429,13 +619,32 @@ static int virtscsi_init(struct virtio_device *vdev,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
vscsi->ctrl_vq = vqs[0];
|
||||
vscsi->event_vq = vqs[1];
|
||||
vscsi->req_vq = vqs[2];
|
||||
virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
|
||||
virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
|
||||
virtscsi_init_vq(&vscsi->req_vq, vqs[2]);
|
||||
|
||||
virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
|
||||
virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
|
||||
return 0;
|
||||
|
||||
if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
|
||||
virtscsi_kick_event_all(vscsi);
|
||||
|
||||
/* We need to know how many segments before we allocate. */
|
||||
sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
|
||||
|
||||
for (i = 0; i < num_targets; i++) {
|
||||
vscsi->tgt[i] = virtscsi_alloc_tgt(vdev, sg_elems);
|
||||
if (!vscsi->tgt[i]) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
err = 0;
|
||||
|
||||
out:
|
||||
if (err)
|
||||
virtscsi_remove_vqs(vdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __devinit virtscsi_probe(struct virtio_device *vdev)
|
||||
|
@ -443,31 +652,25 @@ static int __devinit virtscsi_probe(struct virtio_device *vdev)
|
|||
struct Scsi_Host *shost;
|
||||
struct virtio_scsi *vscsi;
|
||||
int err;
|
||||
u32 sg_elems;
|
||||
u32 sg_elems, num_targets;
|
||||
u32 cmd_per_lun;
|
||||
|
||||
/* We need to know how many segments before we allocate.
|
||||
* We need an extra sg elements at head and tail.
|
||||
*/
|
||||
sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
|
||||
|
||||
/* Allocate memory and link the structs together. */
|
||||
num_targets = virtscsi_config_get(vdev, max_target) + 1;
|
||||
shost = scsi_host_alloc(&virtscsi_host_template,
|
||||
sizeof(*vscsi) + sizeof(vscsi->sg[0]) * (sg_elems + 2));
|
||||
sizeof(*vscsi)
|
||||
+ num_targets * sizeof(struct virtio_scsi_target_state));
|
||||
|
||||
if (!shost)
|
||||
return -ENOMEM;
|
||||
|
||||
sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
|
||||
shost->sg_tablesize = sg_elems;
|
||||
vscsi = shost_priv(shost);
|
||||
vscsi->vdev = vdev;
|
||||
vdev->priv = shost;
|
||||
|
||||
/* Random initializations. */
|
||||
spin_lock_init(&vscsi->vq_lock);
|
||||
sg_init_table(vscsi->sg, sg_elems + 2);
|
||||
|
||||
err = virtscsi_init(vdev, vscsi);
|
||||
err = virtscsi_init(vdev, vscsi, num_targets);
|
||||
if (err)
|
||||
goto virtscsi_init_failed;
|
||||
|
||||
|
@ -475,15 +678,16 @@ static int __devinit virtscsi_probe(struct virtio_device *vdev)
|
|||
shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
|
||||
shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
|
||||
shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1;
|
||||
shost->max_id = virtscsi_config_get(vdev, max_target) + 1;
|
||||
shost->max_id = num_targets;
|
||||
shost->max_channel = 0;
|
||||
shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
|
||||
err = scsi_add_host(shost, &vdev->dev);
|
||||
if (err)
|
||||
goto scsi_add_host_failed;
|
||||
|
||||
scsi_scan_host(shost);
|
||||
|
||||
/*
|
||||
* scsi_scan_host() happens in virtscsi_scan() via virtio_driver->scan()
|
||||
* after VIRTIO_CONFIG_S_DRIVER_OK has been set..
|
||||
*/
|
||||
return 0;
|
||||
|
||||
scsi_add_host_failed:
|
||||
|
@ -493,17 +697,13 @@ virtscsi_init_failed:
|
|||
return err;
|
||||
}
|
||||
|
||||
static void virtscsi_remove_vqs(struct virtio_device *vdev)
|
||||
{
|
||||
/* Stop all the virtqueues. */
|
||||
vdev->config->reset(vdev);
|
||||
|
||||
vdev->config->del_vqs(vdev);
|
||||
}
|
||||
|
||||
static void __devexit virtscsi_remove(struct virtio_device *vdev)
|
||||
{
|
||||
struct Scsi_Host *shost = virtio_scsi_host(vdev);
|
||||
struct virtio_scsi *vscsi = shost_priv(shost);
|
||||
|
||||
if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
|
||||
virtscsi_cancel_event_work(vscsi);
|
||||
|
||||
scsi_remove_host(shost);
|
||||
|
||||
|
@ -523,7 +723,7 @@ static int virtscsi_restore(struct virtio_device *vdev)
|
|||
struct Scsi_Host *sh = virtio_scsi_host(vdev);
|
||||
struct virtio_scsi *vscsi = shost_priv(sh);
|
||||
|
||||
return virtscsi_init(vdev, vscsi);
|
||||
return virtscsi_init(vdev, vscsi, sh->max_id);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -532,11 +732,18 @@ static struct virtio_device_id id_table[] = {
|
|||
{ 0 },
|
||||
};
|
||||
|
||||
static unsigned int features[] = {
|
||||
VIRTIO_SCSI_F_HOTPLUG
|
||||
};
|
||||
|
||||
static struct virtio_driver virtio_scsi_driver = {
|
||||
.feature_table = features,
|
||||
.feature_table_size = ARRAY_SIZE(features),
|
||||
.driver.name = KBUILD_MODNAME,
|
||||
.driver.owner = THIS_MODULE,
|
||||
.id_table = id_table,
|
||||
.probe = virtscsi_probe,
|
||||
.scan = virtscsi_scan,
|
||||
#ifdef CONFIG_PM
|
||||
.freeze = virtscsi_freeze,
|
||||
.restore = virtscsi_restore,
|
||||
|
|
|
@ -236,6 +236,11 @@ static int slave_configure(struct scsi_device *sdev)
|
|||
US_FL_SCM_MULT_TARG)) &&
|
||||
us->protocol == USB_PR_BULK)
|
||||
us->use_last_sector_hacks = 1;
|
||||
|
||||
/* Check if write cache default on flag is set or not */
|
||||
if (us->fflags & US_FL_WRITE_CACHE)
|
||||
sdev->wce_default_on = 1;
|
||||
|
||||
} else {
|
||||
|
||||
/* Non-disk-type devices don't need to blacklist any pages
|
||||
|
|
|
@ -1267,6 +1267,12 @@ UNUSUAL_DEV( 0x0af0, 0xd357, 0x0000, 0x0000,
|
|||
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
|
||||
0 ),
|
||||
|
||||
/* Reported by Namjae Jeon <namjae.jeon@samsung.com> */
|
||||
UNUSUAL_DEV(0x0bc2, 0x2300, 0x0000, 0x9999,
|
||||
"Seagate",
|
||||
"Portable HDD",
|
||||
USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_WRITE_CACHE),
|
||||
|
||||
/* Reported by Ben Efros <ben@pc-doctor.com> */
|
||||
UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000,
|
||||
"Seagate",
|
||||
|
@ -1468,6 +1474,12 @@ UNUSUAL_DEV( 0x1058, 0x0704, 0x0000, 0x9999,
|
|||
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
|
||||
US_FL_SANE_SENSE),
|
||||
|
||||
/* Reported by Namjae Jeon <namjae.jeon@samsung.com> */
|
||||
UNUSUAL_DEV(0x1058, 0x070a, 0x0000, 0x9999,
|
||||
"Western Digital",
|
||||
"My Passport HDD",
|
||||
USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_WRITE_CACHE),
|
||||
|
||||
/* Reported by Fabio Venturi <f.venturi@tdnet.it>
|
||||
* The device reports a vendor-specific bDeviceClass.
|
||||
*/
|
||||
|
|
|
@ -473,7 +473,7 @@ static void adjust_quirks(struct us_data *us)
|
|||
US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE |
|
||||
US_FL_SINGLE_LUN | US_FL_NO_WP_DETECT |
|
||||
US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
|
||||
US_FL_INITIAL_READ10);
|
||||
US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE);
|
||||
|
||||
p = quirks;
|
||||
while (*p) {
|
||||
|
@ -529,6 +529,9 @@ static void adjust_quirks(struct us_data *us)
|
|||
case 'o':
|
||||
f |= US_FL_CAPACITY_OK;
|
||||
break;
|
||||
case 'p':
|
||||
f |= US_FL_WRITE_CACHE;
|
||||
break;
|
||||
case 'r':
|
||||
f |= US_FL_IGNORE_RESIDUE;
|
||||
break;
|
||||
|
|
|
@ -141,8 +141,11 @@ static int virtio_dev_probe(struct device *_d)
|
|||
err = drv->probe(dev);
|
||||
if (err)
|
||||
add_status(dev, VIRTIO_CONFIG_S_FAILED);
|
||||
else
|
||||
else {
|
||||
add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
|
||||
if (drv->scan)
|
||||
drv->scan(dev);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -9,19 +9,47 @@
|
|||
* as published by the Free Software Foundation; version 2
|
||||
* of the License.
|
||||
*/
|
||||
#ifndef __ASYNC_H__
|
||||
#define __ASYNC_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
typedef u64 async_cookie_t;
|
||||
typedef void (async_func_ptr) (void *data, async_cookie_t cookie);
|
||||
struct async_domain {
|
||||
struct list_head node;
|
||||
struct list_head domain;
|
||||
int count;
|
||||
unsigned registered:1;
|
||||
};
|
||||
|
||||
/*
|
||||
* domain participates in global async_synchronize_full
|
||||
*/
|
||||
#define ASYNC_DOMAIN(_name) \
|
||||
struct async_domain _name = { .node = LIST_HEAD_INIT(_name.node), \
|
||||
.domain = LIST_HEAD_INIT(_name.domain), \
|
||||
.count = 0, \
|
||||
.registered = 1 }
|
||||
|
||||
/*
|
||||
* domain is free to go out of scope as soon as all pending work is
|
||||
* complete, this domain does not participate in async_synchronize_full
|
||||
*/
|
||||
#define ASYNC_DOMAIN_EXCLUSIVE(_name) \
|
||||
struct async_domain _name = { .node = LIST_HEAD_INIT(_name.node), \
|
||||
.domain = LIST_HEAD_INIT(_name.domain), \
|
||||
.count = 0, \
|
||||
.registered = 0 }
|
||||
|
||||
extern async_cookie_t async_schedule(async_func_ptr *ptr, void *data);
|
||||
extern async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
|
||||
struct list_head *list);
|
||||
struct async_domain *domain);
|
||||
void async_unregister_domain(struct async_domain *domain);
|
||||
extern void async_synchronize_full(void);
|
||||
extern void async_synchronize_full_domain(struct list_head *list);
|
||||
extern void async_synchronize_full_domain(struct async_domain *domain);
|
||||
extern void async_synchronize_cookie(async_cookie_t cookie);
|
||||
extern void async_synchronize_cookie_domain(async_cookie_t cookie,
|
||||
struct list_head *list);
|
||||
|
||||
struct async_domain *domain);
|
||||
#endif
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue