diff options
36 files changed, 1232 insertions, 370 deletions
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 425f60c..bfda731 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -1470,9 +1470,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) if (mpt_debug_level) printk(KERN_INFO MYNAM ": mpt_debug_level=%xh\n", mpt_debug_level); - if (pci_enable_device(pdev)) - return r; - ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_ATOMIC); if (ioc == NULL) { printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n"); @@ -1482,6 +1479,20 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) ioc->id = mpt_ids++; sprintf(ioc->name, "ioc%d", ioc->id); + ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); + if (pci_enable_device_mem(pdev)) { + kfree(ioc); + printk(MYIOC_s_ERR_FMT "pci_enable_device_mem() " + "failed\n", ioc->name); + return r; + } + if (pci_request_selected_regions(pdev, ioc->bars, "mpt")) { + kfree(ioc); + printk(MYIOC_s_ERR_FMT "pci_request_selected_regions() with " + "MEM failed\n", ioc->name); + return r; + } + dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": mpt_adapter_install\n", ioc->name)); if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { @@ -1658,6 +1669,9 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) ioc->active = 0; CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); + /* Set IOC ptr in the pcidev's driver data. */ + pci_set_drvdata(ioc->pcidev, ioc); + /* Set lookup ptr. */ list_add_tail(&ioc->list, &ioc_list); @@ -1791,6 +1805,7 @@ mpt_suspend(struct pci_dev *pdev, pm_message_t state) CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); pci_disable_device(pdev); + pci_release_selected_regions(pdev, ioc->bars); pci_set_power_state(pdev, device_state); return 0; @@ -1807,7 +1822,6 @@ mpt_resume(struct pci_dev *pdev) MPT_ADAPTER *ioc = pci_get_drvdata(pdev); u32 device_state = pdev->current_state; int recovery_state; - int err; printk(MYIOC_s_INFO_FMT "pci-resume: pdev=0x%p, slot=%s, Previous operating state [D%d]\n", @@ -1815,9 +1829,18 @@ mpt_resume(struct pci_dev *pdev) pci_set_power_state(pdev, 0); pci_restore_state(pdev); - err = pci_enable_device(pdev); - if (err) - return err; + if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) { + ioc->bars = pci_select_bars(ioc->pcidev, IORESOURCE_MEM | + IORESOURCE_IO); + if (pci_enable_device(pdev)) + return 0; + } else { + ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); + if (pci_enable_device_mem(pdev)) + return 0; + } + if (pci_request_selected_regions(pdev, ioc->bars, "mpt")) + return 0; /* enable interrupts */ CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM); @@ -1878,6 +1901,7 @@ mpt_signal_reset(u8 index, MPT_ADAPTER *ioc, int reset_phase) * -2 if READY but IOCFacts Failed * -3 if READY but PrimeIOCFifos Failed * -4 if READY but IOCInit Failed + * -5 if failed to enable_device and/or request_selected_regions */ static int mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) @@ -1976,6 +2000,18 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) } } + if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP) && + (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)) { + pci_release_selected_regions(ioc->pcidev, ioc->bars); + ioc->bars = pci_select_bars(ioc->pcidev, IORESOURCE_MEM | + IORESOURCE_IO); + if (pci_enable_device(ioc->pcidev)) + return -5; + if (pci_request_selected_regions(ioc->pcidev, ioc->bars, + "mpt")) + return -5; + } + /* * Device is reset now. It must have de-asserted the interrupt line * (if it was asserted) and it should be safe to register for the @@ -1999,7 +2035,6 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) irq_allocated = 1; ioc->pci_irq = ioc->pcidev->irq; pci_set_master(ioc->pcidev); /* ?? */ - pci_set_drvdata(ioc->pcidev, ioc); dprintk(ioc, printk(MYIOC_s_INFO_FMT "installed at interrupt " "%d\n", ioc->name, ioc->pcidev->irq)); } @@ -2381,6 +2416,9 @@ mpt_adapter_dispose(MPT_ADAPTER *ioc) ioc->memmap = NULL; } + pci_disable_device(ioc->pcidev); + pci_release_selected_regions(ioc->pcidev, ioc->bars); + #if defined(CONFIG_MTRR) && 0 if (ioc->mtrr_reg > 0) { mtrr_del(ioc->mtrr_reg, 0, 0); diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index b49b706..d83ea96 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -629,6 +629,7 @@ typedef struct _MPT_ADAPTER dma_addr_t HostPageBuffer_dma; int mtrr_reg; struct pci_dev *pcidev; /* struct pci_dev pointer */ + int bars; /* bitmask of BAR's that must be configured */ u8 __iomem *memmap; /* mmap address */ struct Scsi_Host *sh; /* Scsi Host pointer */ SpiCfgData spi_data; /* Scsi config. data */ diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index a5f0aaa..a7a0813 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -722,7 +722,7 @@ config SCSI_FD_MCS config SCSI_GDTH tristate "Intel/ICP (former GDT SCSI Disk Array) RAID Controller support" - depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API && PCI_LEGACY + depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API ---help--- Formerly called GDT SCSI Disk Array Controller Support. diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index bfd0e64..c05092f 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -144,51 +144,77 @@ static char *aac_get_status_string(u32 status); */ static int nondasd = -1; -static int aac_cache = 0; +static int aac_cache; static int dacmode = -1; - +int aac_msi; int aac_commit = -1; int startup_timeout = 180; int aif_timeout = 120; module_param(nondasd, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on"); +MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices." + " 0=off, 1=on"); module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n\tbit 0 - Disable FUA in WRITE SCSI commands\n\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n\tbit 2 - Disable only if Battery not protecting Cache"); +MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n" + "\tbit 0 - Disable FUA in WRITE SCSI commands\n" + "\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n" + "\tbit 2 - Disable only if Battery not protecting Cache"); module_param(dacmode, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC. 0=off, 1=on"); +MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC." + " 0=off, 1=on"); module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the adapter for foreign arrays.\nThis is typically needed in systems that do not have a BIOS. 0=off, 1=on"); +MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the" + " adapter for foreign arrays.\n" + "This is typically needed in systems that do not have a BIOS." + " 0=off, 1=on"); +module_param_named(msi, aac_msi, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(msi, "IRQ handling." + " 0=PIC(default), 1=MSI, 2=MSI-X(unsupported, uses MSI)"); module_param(startup_timeout, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for adapter to have it's kernel up and\nrunning. This is typically adjusted for large systems that do not have a BIOS."); +MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for" + " adapter to have it's kernel up and\n" + "running. This is typically adjusted for large systems that do not" + " have a BIOS."); module_param(aif_timeout, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for applications to pick up AIFs before\nderegistering them. This is typically adjusted for heavily burdened systems."); +MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for" + " applications to pick up AIFs before\n" + "deregistering them. This is typically adjusted for heavily burdened" + " systems."); int numacb = -1; module_param(numacb, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control blocks (FIB) allocated. Valid values are 512 and down. Default is to use suggestion from Firmware."); +MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control" + " blocks (FIB) allocated. Valid values are 512 and down. Default is" + " to use suggestion from Firmware."); int acbsize = -1; module_param(acbsize, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware."); +MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB)" + " size. Valid values are 512, 2048, 4096 and 8192. Default is to use" + " suggestion from Firmware."); int update_interval = 30 * 60; module_param(update_interval, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync updates issued to adapter."); +MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync" + " updates issued to adapter."); int check_interval = 24 * 60 * 60; module_param(check_interval, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health checks."); +MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health" + " checks."); int aac_check_reset = 1; module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(aac_check_reset, "If adapter fails health check, reset the adapter. a value of -1 forces the reset to adapters programmed to ignore it."); +MODULE_PARM_DESC(aac_check_reset, "If adapter fails health check, reset the" + " adapter. a value of -1 forces the reset to adapters programmed to" + " ignore it."); int expose_physicals = -1; module_param(expose_physicals, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. -1=protect 0=off, 1=on"); +MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays." + " -1=protect 0=off, 1=on"); -int aac_reset_devices = 0; +int aac_reset_devices; module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization."); @@ -1315,7 +1341,7 @@ int aac_get_adapter_info(struct aac_dev* dev) (int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid), dev->supplement_adapter_info.VpdInfo.Tsid); } - if (!aac_check_reset || ((aac_check_reset != 1) && + if (!aac_check_reset || ((aac_check_reset == 1) && (dev->supplement_adapter_info.SupportedOptions2 & AAC_OPTION_IGNORE_RESET))) { printk(KERN_INFO "%s%d: Reset Adapter Ignored\n", @@ -1353,13 +1379,14 @@ int aac_get_adapter_info(struct aac_dev* dev) if (nondasd != -1) dev->nondasd_support = (nondasd!=0); - if(dev->nondasd_support != 0) { + if (dev->nondasd_support && !dev->in_reset) printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id); - } dev->dac_support = 0; if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){ - printk(KERN_INFO "%s%d: 64bit support enabled.\n", dev->name, dev->id); + if (!dev->in_reset) + printk(KERN_INFO "%s%d: 64bit support enabled.\n", + dev->name, dev->id); dev->dac_support = 1; } @@ -1369,8 +1396,9 @@ int aac_get_adapter_info(struct aac_dev* dev) if(dev->dac_support != 0) { if (!pci_set_dma_mask(dev->pdev, DMA_64BIT_MASK) && !pci_set_consistent_dma_mask(dev->pdev, DMA_64BIT_MASK)) { - printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n", - dev->name, dev->id); + if (!dev->in_reset) + printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n", + dev->name, dev->id); } else if (!pci_set_dma_mask(dev->pdev, DMA_32BIT_MASK) && !pci_set_consistent_dma_mask(dev->pdev, DMA_32BIT_MASK)) { printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n", diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 3195d29..ace0b75 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -1026,6 +1026,7 @@ struct aac_dev u8 raw_io_64; u8 printf_enabled; u8 in_reset; + u8 msi; }; #define aac_adapter_interrupt(dev) \ @@ -1881,6 +1882,7 @@ extern int startup_timeout; extern int aif_timeout; extern int expose_physicals; extern int aac_reset_devices; +extern int aac_msi; extern int aac_commit; extern int update_interval; extern int check_interval; diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 81b3692..4743449 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -1458,7 +1458,7 @@ int aac_check_health(struct aac_dev * aac) printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED); - if (!aac_check_reset || ((aac_check_reset != 1) && + if (!aac_check_reset || ((aac_check_reset == 1) && (aac->supplement_adapter_info.SupportedOptions2 & AAC_OPTION_IGNORE_RESET))) goto out; diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index e80d2a0..ae5f74f 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -275,9 +275,9 @@ static const char *aac_info(struct Scsi_Host *shost) /** * aac_get_driver_ident - * @devtype: index into lookup table + * @devtype: index into lookup table * - * Returns a pointer to the entry in the driver lookup table. + * Returns a pointer to the entry in the driver lookup table. */ struct aac_driver_ident* aac_get_driver_ident(int devtype) @@ -494,13 +494,14 @@ static int aac_change_queue_depth(struct scsi_device *sdev, int depth) static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf) { - struct scsi_device * sdev = to_scsi_device(dev); + struct scsi_device *sdev = to_scsi_device(dev); + struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata); if (sdev_channel(sdev) != CONTAINER_CHANNEL) return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach - ? "Hidden\n" : "JBOD"); + ? "Hidden\n" : + ((aac->jbod && (sdev->type == TYPE_DISK)) ? "JBOD\n" : "")); return snprintf(buf, PAGE_SIZE, "%s\n", - get_container_type(((struct aac_dev *)(sdev->host->hostdata)) - ->fsa_dev[sdev_id(sdev)].type)); + get_container_type(aac->fsa_dev[sdev_id(sdev)].type)); } static struct device_attribute aac_raid_level_attr = { @@ -641,7 +642,7 @@ static int aac_eh_reset(struct scsi_cmnd* cmd) AAC_OPTION_MU_RESET) && aac_check_reset && ((aac_check_reset != 1) || - (aac->supplement_adapter_info.SupportedOptions2 & + !(aac->supplement_adapter_info.SupportedOptions2 & AAC_OPTION_IGNORE_RESET))) aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */ return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */ @@ -860,8 +861,8 @@ ssize_t aac_show_serial_number(struct class_device *class_dev, char *buf) le32_to_cpu(dev->adapter_info.serial[0])); if (len && !memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[ - sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)+2-len], - buf, len)) + sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)-len], + buf, len-1)) len = snprintf(buf, PAGE_SIZE, "%.*s\n", (int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo), dev->supplement_adapter_info.MfgPcbaSerialNo); @@ -1004,32 +1005,32 @@ static const struct file_operations aac_cfg_fops = { static struct scsi_host_template aac_driver_template = { .module = THIS_MODULE, - .name = "AAC", + .name = "AAC", .proc_name = AAC_DRIVERNAME, - .info = aac_info, - .ioctl = aac_ioctl, + .info = aac_info, + .ioctl = aac_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = aac_compat_ioctl, #endif - .queuecommand = aac_queuecommand, - .bios_param = aac_biosparm, + .queuecommand = aac_queuecommand, + .bios_param = aac_biosparm, .shost_attrs = aac_attrs, .slave_configure = aac_slave_configure, .change_queue_depth = aac_change_queue_depth, .sdev_attrs = aac_dev_attrs, .eh_abort_handler = aac_eh_abort, .eh_host_reset_handler = aac_eh_reset, - .can_queue = AAC_NUM_IO_FIB, - .this_id = MAXIMUM_NUM_CONTAINERS, - .sg_tablesize = 16, - .max_sectors = 128, + .can_queue = AAC_NUM_IO_FIB, + .this_id = MAXIMUM_NUM_CONTAINERS, + .sg_tablesize = 16, + .max_sectors = 128, #if (AAC_NUM_IO_FIB > 256) .cmd_per_lun = 256, #else - .cmd_per_lun = AAC_NUM_IO_FIB, + .cmd_per_lun = AAC_NUM_IO_FIB, #endif .use_clustering = ENABLE_CLUSTERING, - .emulated = 1, + .emulated = 1, }; static void __aac_shutdown(struct aac_dev * aac) @@ -1039,6 +1040,8 @@ static void __aac_shutdown(struct aac_dev * aac) aac_send_shutdown(aac); aac_adapter_disable_int(aac); free_irq(aac->pdev->irq, aac); + if (aac->msi) + pci_disable_msi(aac->pdev); } static int __devinit aac_probe_one(struct pci_dev *pdev, @@ -1254,7 +1257,7 @@ static struct pci_driver aac_pci_driver = { .id_table = aac_pci_tbl, .probe = aac_probe_one, .remove = __devexit_p(aac_remove_one), - .shutdown = aac_shutdown, + .shutdown = aac_shutdown, }; static int __init aac_init(void) @@ -1271,7 +1274,7 @@ static int __init aac_init(void) aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops); if (aac_cfg_major < 0) { printk(KERN_WARNING - "aacraid: unable to register \"aac\" device.\n"); + "aacraid: unable to register \"aac\" device.\n"); } return 0; diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c index a08bbf1..1f18b83 100644 --- a/drivers/scsi/aacraid/rx.c +++ b/drivers/scsi/aacraid/rx.c @@ -625,8 +625,11 @@ int _aac_rx_init(struct aac_dev *dev) if (aac_init_adapter(dev) == NULL) goto error_iounmap; aac_adapter_comm(dev, dev->comm_interface); - if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr, + dev->msi = aac_msi && !pci_enable_msi(dev->pdev); + if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { + if (dev->msi) + pci_disable_msi(dev->pdev); printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance); goto error_iounmap; diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c index 85b91bc..cfc3410 100644 --- a/drivers/scsi/aacraid/sa.c +++ b/drivers/scsi/aacraid/sa.c @@ -31,6 +31,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> +#include <linux/pci.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/blkdev.h> @@ -385,7 +386,7 @@ int aac_sa_init(struct aac_dev *dev) if(aac_init_adapter(dev) == NULL) goto error_irq; - if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr, + if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, IRQF_SHARED|IRQF_DISABLED, "aacraid", (void *)dev ) < 0) { printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", @@ -403,7 +404,7 @@ int aac_sa_init(struct aac_dev *dev) error_irq: aac_sa_disable_interrupt(dev); - free_irq(dev->scsi_host_ptr->irq, (void *)dev); + free_irq(dev->pdev->irq, (void *)dev); error_iounmap: diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index ccef891..3c2d688 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c @@ -566,7 +566,7 @@ typedef struct asc_dvc_var { ASC_SCSI_BIT_ID_TYPE unit_not_ready; ASC_SCSI_BIT_ID_TYPE queue_full_or_busy; ASC_SCSI_BIT_ID_TYPE start_motor; - uchar overrun_buf[ASC_OVERRUN_BSIZE] __aligned(8); + uchar *overrun_buf; dma_addr_t overrun_dma; uchar scsi_reset_wait; uchar chip_no; @@ -13833,6 +13833,12 @@ static int __devinit advansys_board_found(struct Scsi_Host *shost, */ if (ASC_NARROW_BOARD(boardp)) { ASC_DBG(2, "AscInitAsc1000Driver()\n"); + + asc_dvc_varp->overrun_buf = kzalloc(ASC_OVERRUN_BSIZE, GFP_KERNEL); + if (!asc_dvc_varp->overrun_buf) { + ret = -ENOMEM; + goto err_free_wide_mem; + } warn_code = AscInitAsc1000Driver(asc_dvc_varp); if (warn_code || asc_dvc_varp->err_code) { @@ -13840,8 +13846,10 @@ static int __devinit advansys_board_found(struct Scsi_Host *shost, "warn 0x%x, error 0x%x\n", asc_dvc_varp->init_state, warn_code, asc_dvc_varp->err_code); - if (asc_dvc_varp->err_code) + if (asc_dvc_varp->err_code) { ret = -ENODEV; + kfree(asc_dvc_varp->overrun_buf); + } } } else { if (advansys_wide_init_chip(shost)) @@ -13894,6 +13902,7 @@ static int advansys_release(struct Scsi_Host *shost) dma_unmap_single(board->dev, board->dvc_var.asc_dvc_var.overrun_dma, ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); + kfree(board->dvc_var.asc_dvc_var.overrun_buf); } else { iounmap(board->ioremap_addr); advansys_wide_free_mem(board); diff --git a/drivers/scsi/aic94xx/aic94xx_sas.h b/drivers/scsi/aic94xx/aic94xx_sas.h index fa7c529..912e6b7 100644 --- a/drivers/scsi/aic94xx/aic94xx_sas.h +++ b/drivers/scsi/aic94xx/aic94xx_sas.h @@ -292,7 +292,7 @@ struct scb_header { #define INITIATE_SSP_TASK 0x00 #define INITIATE_LONG_SSP_TASK 0x01 #define INITIATE_BIDIR_SSP_TASK 0x02 -#define ABORT_TASK 0x03 +#define SCB_ABORT_TASK 0x03 #define INITIATE_SSP_TMF 0x04 #define SSP_TARG_GET_DATA 0x05 #define SSP_TARG_GET_DATA_GOOD 0x06 diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c index 87b2f6e..b52124f 100644 --- a/drivers/scsi/aic94xx/aic94xx_tmf.c +++ b/drivers/scsi/aic94xx/aic94xx_tmf.c @@ -369,7 +369,7 @@ int asd_abort_task(struct sas_task *task) return -ENOMEM; scb = ascb->scb; - scb->header.opcode = ABORT_TASK; + scb->header.opcode = SCB_ABORT_TASK; switch (task->task_proto) { case SAS_PROTOCOL_SATA: diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index fb5f202..a715632 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c @@ -2018,6 +2018,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, * the upper layers to process. This would have been set * correctly by fas216_std_done. */ + scsi_eh_restore_cmnd(SCpnt, &info->ses); SCpnt->scsi_done(SCpnt); } @@ -2103,23 +2104,12 @@ request_sense: if (SCpnt->cmnd[0] == REQUEST_SENSE) goto done; + scsi_eh_prep_cmnd(SCpnt, &info->ses, NULL, 0, ~0); fas216_log_target(info, LOG_CONNECT, SCpnt->device->id, "requesting sense"); - memset(SCpnt->cmnd, 0, sizeof (SCpnt->cmnd)); - SCpnt->cmnd[0] = REQUEST_SENSE; - SCpnt->cmnd[1] = SCpnt->device->lun << 5; - SCpnt->cmnd[4] = sizeof(SCpnt->sense_buffer); - SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]); - SCpnt->SCp.buffer = NULL; - SCpnt->SCp.buffers_residual = 0; - SCpnt->SCp.ptr = (char *)SCpnt->sense_buffer; - SCpnt->SCp.this_residual = sizeof(SCpnt->sense_buffer); - SCpnt->SCp.phase = sizeof(SCpnt->sense_buffer); + init_SCp(SCpnt); SCpnt->SCp.Message = 0; SCpnt->SCp.Status = 0; - SCpnt->request_bufflen = sizeof(SCpnt->sense_buffer); - SCpnt->sc_data_direction = DMA_FROM_DEVICE; - SCpnt->use_sg = 0; SCpnt->tag = 0; SCpnt->host_scribble = (void *)fas216_rq_sns_done; diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h index 00e5f05..3e73e26 100644 --- a/drivers/scsi/arm/fas216.h +++ b/drivers/scsi/arm/fas216.h @@ -16,6 +16,8 @@ #define NO_IRQ 255 #endif +#include <scsi/scsi_eh.h> + #include "queue.h" #include "msgqueue.h" @@ -311,6 +313,7 @@ typedef struct { /* miscellaneous */ int internal_done; /* flag to indicate request done */ + struct scsi_eh_save *ses; /* holds request sense restore info */ unsigned long magic_end; } FAS216_Info; diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index c825239..6d67f5c 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c @@ -642,12 +642,15 @@ static void __init gdth_search_dev(gdth_pci_str *pcistr, ushort *cnt, *cnt, vendor, device)); pdev = NULL; - while ((pdev = pci_find_device(vendor, device, pdev)) + while ((pdev = pci_get_device(vendor, device, pdev)) != NULL) { if (pci_enable_device(pdev)) continue; - if (*cnt >= MAXHA) + if (*cnt >= MAXHA) { + pci_dev_put(pdev); return; + } + /* GDT PCI controller found, resources are already in pdev */ pcistr[*cnt].pdev = pdev; pcistr[*cnt].irq = pdev->irq; @@ -4836,6 +4839,9 @@ static int __init gdth_isa_probe_one(ulong32 isa_bios) if (error) goto out_free_coal_stat; list_add_tail(&ha->list, &gdth_instances); + + scsi_scan_host(shp); + return 0; out_free_coal_stat: @@ -4963,6 +4969,9 @@ static int __init gdth_eisa_probe_one(ushort eisa_slot) if (error) goto out_free_coal_stat; list_add_tail(&ha->list, &gdth_instances); + + scsi_scan_host(shp); + return 0; out_free_ccb_phys: @@ -5100,6 +5109,9 @@ static int __init gdth_pci_probe_one(gdth_pci_str *pcistr, int ctr) if (error) goto out_free_coal_stat; list_add_tail(&ha->list, &gdth_instances); + + scsi_scan_host(shp); + return 0; out_free_coal_stat: diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 83567b9..2ab2d24 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2007 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -307,6 +307,7 @@ struct lpfc_vport { uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */ uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */ + uint32_t fc_rscn_flush; /* flag use of fc_rscn_id_list */ struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN]; struct lpfc_name fc_nodename; /* fc nodename */ struct lpfc_name fc_portname; /* fc portname */ @@ -392,6 +393,13 @@ enum hba_temp_state { HBA_OVER_TEMP }; +enum intr_type_t { + NONE = 0, + INTx, + MSI, + MSIX, +}; + struct lpfc_hba { struct lpfc_sli sli; uint32_t sli_rev; /* SLI2 or SLI3 */ @@ -409,7 +417,7 @@ struct lpfc_hba { /* This flag is set while issuing */ /* INIT_LINK mailbox command */ #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ -#define LS_IGNORE_ERATT 0x3 /* intr handler should ignore ERATT */ +#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ struct lpfc_sli2_slim *slim2p; struct lpfc_dmabuf hbqslimp; @@ -487,6 +495,8 @@ struct lpfc_hba { wait_queue_head_t *work_wait; struct task_struct *worker_thread; + uint32_t hbq_in_use; /* HBQs in use flag */ + struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */ uint32_t hbq_count; /* Count of configured HBQs */ struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ @@ -555,7 +565,8 @@ struct lpfc_hba { mempool_t *nlp_mem_pool; struct fc_host_statistics link_stats; - uint8_t using_msi; + enum intr_type_t intr_type; + struct msix_entry msix_entries[1]; struct list_head port_list; struct lpfc_vport *pport; /* physical lpfc_vport pointer */ @@ -595,6 +606,8 @@ struct lpfc_hba { unsigned long last_completion_time; struct timer_list hb_tmofunc; uint8_t hb_outstanding; + /* ndlp reference management */ + spinlock_t ndlp_lock; /* * Following bit will be set for all buffer tags which are not * associated with any HBQ. diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 4bae4a2..b12a841 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1191,7 +1191,7 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) - if (ndlp->rport) + if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport) ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; spin_unlock_irq(shost->host_lock); } @@ -1592,9 +1592,11 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255, # support this feature # 0 = MSI disabled (default) # 1 = MSI enabled -# Value range is [0,1]. Default value is 0. +# 2 = MSI-X enabled +# Value range is [0,2]. Default value is 0. */ -LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible"); +LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or " + "MSI-X (2), if possible"); /* # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. @@ -1946,11 +1948,13 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, } /* If HBA encountered an error attention, allow only DUMP - * mailbox command until the HBA is restarted. + * or RESTART mailbox commands until the HBA is restarted. */ if ((phba->pport->stopped) && - (phba->sysfs_mbox.mbox->mb.mbxCommand - != MBX_DUMP_MEMORY)) { + (phba->sysfs_mbox.mbox->mb.mbxCommand != + MBX_DUMP_MEMORY && + phba->sysfs_mbox.mbox->mb.mbxCommand != + MBX_RESTART)) { sysfs_mbox_idle(phba); spin_unlock_irq(&phba->hbalock); return -EPERM; @@ -2384,7 +2388,8 @@ lpfc_get_node_by_target(struct scsi_target *starget) spin_lock_irq(shost->host_lock); /* Search for this, mapped, target ID */ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { - if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && + if (NLP_CHK_NODE_ACT(ndlp) && + ndlp->nlp_state == NLP_STE_MAPPED_NODE && starget->id == ndlp->nlp_sid) { spin_unlock_irq(shost->host_lock); return ndlp; diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 50fcb7c..848d977 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2007 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -53,7 +53,11 @@ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); +void lpfc_disable_node(struct lpfc_vport *, struct lpfc_nodelist *); +struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *, + struct lpfc_nodelist *, int); void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int); void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *); void lpfc_set_disctmo(struct lpfc_vport *); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 92441ce..3d0ccd9 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2007 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -294,7 +294,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, /* Save for completion so we can release these resources */ geniocb->context1 = (uint8_t *) inp; geniocb->context2 = (uint8_t *) outp; - geniocb->context_un.ndlp = ndlp; + geniocb->context_un.ndlp = lpfc_nlp_get(ndlp); /* Fill in payload, bp points to frame payload */ icmd->ulpCommand = CMD_GEN_REQUEST64_CR; @@ -489,8 +489,10 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size) */ ndlp = lpfc_findnode_did(vport, Did); - if (ndlp && (ndlp->nlp_type & - NLP_FCP_TARGET)) + if (ndlp && + NLP_CHK_NODE_ACT(ndlp) + && (ndlp->nlp_type & + NLP_FCP_TARGET)) lpfc_setup_disc_node (vport, Did); else if (lpfc_ns_cmd(vport, @@ -773,7 +775,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, "0267 NameServer GFF Rsp " "x%x Error (%d %d) Data: x%x x%x\n", did, irsp->ulpStatus, irsp->un.ulpWord[4], - vport->fc_flag, vport->fc_rscn_id_cnt) + vport->fc_flag, vport->fc_rscn_id_cnt); } /* This is a target port, unregistered port, or the GFF_ID failed */ @@ -1064,7 +1066,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, int rc = 0; ndlp = lpfc_findnode_did(vport, NameServer_DID); - if (ndlp == NULL || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) { + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) + || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) { rc=1; goto ns_cmd_exit; } @@ -1213,8 +1216,9 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, cmpl = lpfc_cmpl_ct_cmd_rff_id; break; } - lpfc_nlp_get(ndlp); - + /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count + * to hold ndlp reference for the corresponding callback function. + */ if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) { /* On success, The cmpl function will free the buffers */ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, @@ -1222,9 +1226,13 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, cmdcode, ndlp->nlp_DID, 0); return 0; } - rc=6; + + /* Decrement ndlp reference count to release ndlp reference held + * for the failed command's callback function. + */ lpfc_nlp_put(ndlp); + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); ns_cmd_free_bmp: kfree(bmp); @@ -1271,6 +1279,9 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } ndlp = lpfc_findnode_did(vport, FDMI_DID); + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) + goto fail_out; + if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { /* FDMI rsp failed */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, @@ -1294,6 +1305,8 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA); break; } + +fail_out: lpfc_ct_free_iocb(phba, cmdiocb); return; } @@ -1650,12 +1663,18 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode) bpl->tus.w = le32_to_cpu(bpl->tus.w); cmpl = lpfc_cmpl_ct_cmd_fdmi; - lpfc_nlp_get(ndlp); + /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count + * to hold ndlp reference for the corresponding callback function. + */ if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP, 0)) return 0; + /* Decrement ndlp reference count to release ndlp reference held + * for the failed command's callback function. + */ lpfc_nlp_put(ndlp); + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); fdmi_cmd_free_bmp: kfree(bmp); @@ -1698,7 +1717,7 @@ lpfc_fdmi_timeout_handler(struct lpfc_vport *vport) struct lpfc_nodelist *ndlp; ndlp = lpfc_findnode_did(vport, FDMI_DID); - if (ndlp) { + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { if (init_utsname()->nodename[0] != '\0') lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); else diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index cfe81c5..2db0b74 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2007 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -73,6 +73,12 @@ struct lpfc_nodelist { uint8_t nlp_fcp_info; /* class info, bits 0-3 */ #define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */ + uint16_t nlp_usg_map; /* ndlp management usage bitmap */ +#define NLP_USG_NODE_ACT_BIT 0x1 /* Indicate ndlp is actively used */ +#define NLP_USG_IACT_REQ_BIT 0x2 /* Request to inactivate ndlp */ +#define NLP_USG_FREE_REQ_BIT 0x4 /* Request to invoke ndlp memory free */ +#define NLP_USG_FREE_ACK_BIT 0x8 /* Indicate ndlp memory free invoked */ + struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */ struct fc_rport *rport; /* Corresponding FC transport port structure */ @@ -85,25 +91,51 @@ struct lpfc_nodelist { }; /* Defines for nlp_flag (uint32) */ -#define NLP_PLOGI_SND 0x20 /* sent PLOGI request for this entry */ -#define NLP_PRLI_SND 0x40 /* sent PRLI request for this entry */ -#define NLP_ADISC_SND 0x80 /* sent ADISC request for this entry */ -#define NLP_LOGO_SND 0x100 /* sent LOGO request for this entry */ -#define NLP_RNID_SND 0x400 /* sent RNID request for this entry */ -#define NLP_ELS_SND_MASK 0x7e0 /* sent ELS request for this entry */ -#define NLP_DEFER_RM 0x10000 /* Remove this ndlp if no longer used */ -#define NLP_DELAY_TMO 0x20000 /* delay timeout is running for node */ -#define NLP_NPR_2B_DISC 0x40000 /* node is included in num_disc_nodes */ -#define NLP_RCV_PLOGI 0x80000 /* Rcv'ed PLOGI from remote system */ -#define NLP_LOGO_ACC 0x100000 /* Process LOGO after ACC completes */ -#define NLP_TGT_NO_SCSIID 0x200000 /* good PRLI but no binding for scsid */ -#define NLP_ACC_REGLOGIN 0x1000000 /* Issue Reg Login after successful +#define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */ +#define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */ +#define NLP_ADISC_SND 0x00000080 /* sent ADISC request for this entry */ +#define NLP_LOGO_SND 0x00000100 /* sent LOGO request for this entry */ +#define NLP_RNID_SND 0x00000400 /* sent RNID request for this entry */ +#define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */ +#define NLP_DEFER_RM 0x00010000 /* Remove this ndlp if no longer used */ +#define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */ +#define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */ +#define NLP_RCV_PLOGI 0x00080000 /* Rcv'ed PLOGI from remote system */ +#define NLP_LOGO_ACC 0x00100000 /* Process LOGO after ACC completes */ +#define NLP_TGT_NO_SCSIID 0x00200000 /* good PRLI but no binding for scsid */ +#define NLP_ACC_REGLOGIN 0x01000000 /* Issue Reg Login after successful ACC */ -#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from +#define NLP_NPR_ADISC 0x02000000 /* Issue ADISC when dq'ed from NPR list */ -#define NLP_RM_DFLT_RPI 0x4000000 /* need to remove leftover dflt RPI */ -#define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */ +#define NLP_RM_DFLT_RPI 0x04000000 /* need to remove leftover dflt RPI */ +#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ #define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ +#define NLP_SC_REQ 0x20000000 /* Target requires authentication */ + +/* ndlp usage management macros */ +#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \ + & NLP_USG_NODE_ACT_BIT) \ + && \ + !((ndlp)->nlp_usg_map \ + & NLP_USG_FREE_ACK_BIT)) +#define NLP_SET_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \ + |= NLP_USG_NODE_ACT_BIT) +#define NLP_INT_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \ + = NLP_USG_NODE_ACT_BIT) +#define NLP_CLR_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \ + &= ~NLP_USG_NODE_ACT_BIT) +#define NLP_CHK_IACT_REQ(ndlp) ((ndlp)->nlp_usg_map \ + & NLP_USG_IACT_REQ_BIT) +#define NLP_SET_IACT_REQ(ndlp) ((ndlp)->nlp_usg_map \ + |= NLP_USG_IACT_REQ_BIT) +#define NLP_CHK_FREE_REQ(ndlp) ((ndlp)->nlp_usg_map \ + & NLP_USG_FREE_REQ_BIT) +#define NLP_SET_FREE_REQ(ndlp) ((ndlp)->nlp_usg_map \ + |= NLP_USG_FREE_REQ_BIT) +#define NLP_CHK_FREE_ACK(ndlp) ((ndlp)->nlp_usg_map \ + & NLP_USG_FREE_ACK_BIT) +#define NLP_SET_FREE_ACK(ndlp) ((ndlp)->nlp_usg_map \ + |= NLP_USG_FREE_ACK_BIT) /* There are 4 different double linked lists nodelist entries can reside on. * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index c6b739d..cbb68a9 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2007 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -113,6 +113,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, if (elsiocb == NULL) return NULL; + icmd = &elsiocb->iocb; /* fill in BDEs for command */ @@ -134,9 +135,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, if (!prsp || !prsp->virt) goto els_iocb_free_prsp_exit; INIT_LIST_HEAD(&prsp->list); - } else { + } else prsp = NULL; - } /* Allocate buffer for Buffer ptr list */ pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); @@ -246,7 +246,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) sp = &phba->fc_fabparam; ndlp = lpfc_findnode_did(vport, Fabric_DID); - if (!ndlp) { + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { err = 1; goto fail; } @@ -282,6 +282,9 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; mbox->vport = vport; + /* increment the reference count on ndlp to hold reference + * for the callback routine. + */ mbox->context2 = lpfc_nlp_get(ndlp); rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); @@ -293,6 +296,9 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) return 0; fail_issue_reg_login: + /* decrement the reference count on ndlp just incremented + * for the failed mbox command. + */ lpfc_nlp_put(ndlp); mp = (struct lpfc_dmabuf *) mbox->context1; lpfc_mbuf_free(phba, mp->virt, mp->phys); @@ -381,6 +387,8 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, */ list_for_each_entry_safe(np, next_np, &vport->fc_nodes, nlp_listp) { + if (!NLP_CHK_NODE_ACT(ndlp)) + continue; if ((np->nlp_state != NLP_STE_NPR_NODE) || !(np->nlp_flag & NLP_NPR_ADISC)) continue; @@ -456,6 +464,9 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, mempool_free(mbox, phba->mbox_mem_pool); goto fail; } + /* Decrement ndlp reference count indicating that ndlp can be + * safely released when other references to it are done. + */ lpfc_nlp_put(ndlp); ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); @@ -467,22 +478,29 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); if (!ndlp) goto fail; - lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID); + } else if (!NLP_CHK_NODE_ACT(ndlp)) { + ndlp = lpfc_enable_node(vport, ndlp, + NLP_STE_UNUSED_NODE); + if(!ndlp) + goto fail; } memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); + /* Set state will put ndlp onto node list if not already done */ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); - } else { - /* This side will wait for the PLOGI */ + } else + /* This side will wait for the PLOGI, decrement ndlp reference + * count indicating that ndlp can be released when other + * references to it are done. + */ lpfc_nlp_put(ndlp); - } /* If we are pt2pt with another NPort, force NPIV off! */ phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; @@ -728,16 +746,21 @@ lpfc_initial_flogi(struct lpfc_vport *vport) if (!ndlp) return 0; lpfc_nlp_init(vport, ndlp, Fabric_DID); - } else { - lpfc_dequeue_node(vport, ndlp); + /* Put ndlp onto node list */ + lpfc_enqueue_node(vport, ndlp); + } else if (!NLP_CHK_NODE_ACT(ndlp)) { + /* re-setup ndlp without removing from node list */ + ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); + if (!ndlp) + return 0; } - if (lpfc_issue_els_flogi(vport, ndlp, 0)) { + if (lpfc_issue_els_flogi(vport, ndlp, 0)) /* This decrement of reference count to node shall kick off * the release of the node. */ lpfc_nlp_put(ndlp); - } + return 1; } @@ -755,9 +778,15 @@ lpfc_initial_fdisc(struct lpfc_vport *vport) if (!ndlp) return 0; lpfc_nlp_init(vport, ndlp, Fabric_DID); - } else { - lpfc_dequeue_node(vport, ndlp); + /* Put ndlp onto node list */ + lpfc_enqueue_node(vport, ndlp); + } else if (!NLP_CHK_NODE_ACT(ndlp)) { + /* re-setup ndlp without removing from node list */ + ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); + if (!ndlp) + return 0; } + if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { /* decrement node reference count to trigger the release of * the node. @@ -816,7 +845,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, */ new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); - if (new_ndlp == ndlp) + if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp)) return ndlp; if (!new_ndlp) { @@ -827,8 +856,12 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); if (!new_ndlp) return ndlp; - lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID); + } else if (!NLP_CHK_NODE_ACT(new_ndlp)) { + new_ndlp = lpfc_enable_node(vport, new_ndlp, + NLP_STE_UNUSED_NODE); + if (!new_ndlp) + return ndlp; } lpfc_unreg_rpi(vport, new_ndlp); @@ -839,6 +872,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, new_ndlp->nlp_flag |= NLP_NPR_2B_DISC; ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + /* Set state will put new_ndlp on to node list if not already done */ lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); /* Move this back to NPR state */ @@ -912,7 +946,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, irsp->un.elsreq64.remoteID); ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); - if (!ndlp) { + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "0136 PLOGI completes to NPort x%x " "with no ndlp. Data: x%x x%x x%x\n", @@ -962,12 +996,11 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } /* PLOGI failed */ /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ - if (lpfc_error_lost_link(irsp)) { + if (lpfc_error_lost_link(irsp)) rc = NLP_STE_FREED_NODE; - } else { + else rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_PLOGI); - } } else { /* Good status, call state machine */ prsp = list_entry(((struct lpfc_dmabuf *) @@ -1015,8 +1048,10 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ ndlp = lpfc_findnode_did(vport, did); - /* If ndlp if not NULL, we will bump the reference count on it */ + if (ndlp && !NLP_CHK_NODE_ACT(ndlp)) + ndlp = NULL; + /* If ndlp is not NULL, we will bump the reference count on it */ cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, ELS_CMD_PLOGI); @@ -1097,18 +1132,15 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } /* PRLI failed */ /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ - if (lpfc_error_lost_link(irsp)) { + if (lpfc_error_lost_link(irsp)) goto out; - } else { + else lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_PRLI); - } - } else { + } else /* Good status, call state machine */ lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_PRLI); - } - out: lpfc_els_free_iocb(phba, cmdiocb); return; @@ -1275,15 +1307,13 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } /* ADISC failed */ /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ - if (!lpfc_error_lost_link(irsp)) { + if (!lpfc_error_lost_link(irsp)) lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_ADISC); - } - } else { + } else /* Good status, call state machine */ lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_ADISC); - } if (disc && vport->num_disc_nodes) { /* Check to see if there are more ADISCs to be sent */ @@ -1443,14 +1473,12 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, else lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); - } else { + } else /* Good status, call state machine. * This will unregister the rpi if needed. */ lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); - } - out: lpfc_els_free_iocb(phba, cmdiocb); return; @@ -1556,11 +1584,19 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) psli = &phba->sli; pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ cmdsize = (sizeof(uint32_t) + sizeof(SCR)); - ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); - if (!ndlp) - return 1; - lpfc_nlp_init(vport, ndlp, nportid); + ndlp = lpfc_findnode_did(vport, nportid); + if (!ndlp) { + ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); + if (!ndlp) + return 1; + lpfc_nlp_init(vport, ndlp, nportid); + lpfc_enqueue_node(vport, ndlp); + } else if (!NLP_CHK_NODE_ACT(ndlp)) { + ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); + if (!ndlp) + return 1; + } elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, ELS_CMD_SCR); @@ -1623,11 +1659,19 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) psli = &phba->sli; pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ cmdsize = (sizeof(uint32_t) + sizeof(FARP)); - ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); - if (!ndlp) - return 1; - lpfc_nlp_init(vport, ndlp, nportid); + ndlp = lpfc_findnode_did(vport, nportid); + if (!ndlp) { + ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); + if (!ndlp) + return 1; + lpfc_nlp_init(vport, ndlp, nportid); + lpfc_enqueue_node(vport, ndlp); + } else if (!NLP_CHK_NODE_ACT(ndlp)) { + ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); + if (!ndlp) + return 1; + } elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, ELS_CMD_RNID); @@ -1657,7 +1701,7 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); ondlp = lpfc_findnode_did(vport, nportid); - if (ondlp) { + if (ondlp && NLP_CHK_NODE_ACT(ondlp)) { memcpy(&fp->OportName, &ondlp->nlp_portname, sizeof(struct lpfc_name)); memcpy(&fp->OnodeName, &ondlp->nlp_nodename, @@ -1690,6 +1734,7 @@ void lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_work_evt *evtp; spin_lock_irq(shost->host_lock); nlp->nlp_flag &= ~NLP_DELAY_TMO; @@ -1697,8 +1742,12 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) del_timer_sync(&nlp->nlp_delayfunc); nlp->nlp_last_elscmd = 0; - if (!list_empty(&nlp->els_retry_evt.evt_listp)) + if (!list_empty(&nlp->els_retry_evt.evt_listp)) { list_del_init(&nlp->els_retry_evt.evt_listp); + /* Decrement nlp reference count held for the delayed retry */ + evtp = &nlp->els_retry_evt; + lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); + } if (nlp->nlp_flag & NLP_NPR_2B_DISC) { spin_lock_irq(shost->host_lock); @@ -1842,13 +1891,14 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, cmd = *elscmd++; } - if (ndlp) + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) did = ndlp->nlp_DID; else { /* We should only hit this case for retrying PLOGI */ did = irsp->un.elsreq64.remoteID; ndlp = lpfc_findnode_did(vport, did); - if (!ndlp && (cmd != ELS_CMD_PLOGI)) + if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp)) + && (cmd != ELS_CMD_PLOGI)) return 1; } @@ -1870,18 +1920,15 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, break; case IOERR_ILLEGAL_COMMAND: - if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) && - (cmd == ELS_CMD_FDISC)) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, - "0124 FDISC failed (3/6) " - "retrying...\n"); - lpfc_mbx_unreg_vpi(vport); - retry = 1; - /* FDISC retry policy */ - maxretry = 48; - if (cmdiocb->retry >= 32) - delay = 1000; - } + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "0124 Retry illegal cmd x%x " + "retry:x%x delay:x%x\n", + cmd, cmdiocb->retry, delay); + retry = 1; + /* All command's retry policy */ + maxretry = 8; + if (cmdiocb->retry > 2) + delay = 1000; break; case IOERR_NO_RESOURCES: @@ -1967,6 +2014,17 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, break; case LSRJT_LOGICAL_ERR: + /* There are some cases where switches return this + * error when they are not ready and should be returning + * Logical Busy. We should delay every time. + */ + if (cmd == ELS_CMD_FDISC && + stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { + maxretry = 3; + delay = 1000; + retry = 1; + break; + } case LSRJT_PROTOCOL_ERR: if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && (cmd == ELS_CMD_FDISC) && @@ -1996,7 +2054,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, retry = 1; if ((cmd == ELS_CMD_FLOGI) && - (phba->fc_topology != TOPOLOGY_LOOP)) { + (phba->fc_topology != TOPOLOGY_LOOP) && + !lpfc_error_lost_link(irsp)) { /* FLOGI retry policy */ retry = 1; maxretry = 48; @@ -2322,6 +2381,9 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if ((rspiocb->iocb.ulpStatus == 0) && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { lpfc_unreg_rpi(vport, ndlp); + /* Increment reference count to ndlp to hold the + * reference to ndlp for the callback function. + */ mbox->context2 = lpfc_nlp_get(ndlp); mbox->vport = vport; if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { @@ -2335,9 +2397,13 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, NLP_STE_REG_LOGIN_ISSUE); } if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) - != MBX_NOT_FINISHED) { + != MBX_NOT_FINISHED) goto out; - } + else + /* Decrement the ndlp reference count we + * set for this failed mailbox command. + */ + lpfc_nlp_put(ndlp); /* ELS rsp: Cannot issue reg_login for <NPortid> */ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, @@ -2796,6 +2862,8 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport) /* go thru NPR nodes and issue any remaining ELS ADISCs */ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { + if (!NLP_CHK_NODE_ACT(ndlp)) + continue; if (ndlp->nlp_state == NLP_STE_NPR_NODE && (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) { @@ -2833,6 +2901,8 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport) /* go thru NPR nodes and issue any remaining ELS PLOGIs */ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { + if (!NLP_CHK_NODE_ACT(ndlp)) + continue; if (ndlp->nlp_state == NLP_STE_NPR_NODE && (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && @@ -2869,6 +2939,16 @@ lpfc_els_flush_rscn(struct lpfc_vport *vport) struct lpfc_hba *phba = vport->phba; int i; + spin_lock_irq(shost->host_lock); + if (vport->fc_rscn_flush) { + /* Another thread is walking fc_rscn_id_list on this vport */ + spin_unlock_irq(shost->host_lock); + return; + } + /* Indicate we are walking lpfc_els_flush_rscn on this vport */ + vport->fc_rscn_flush = 1; + spin_unlock_irq(shost->host_lock); + for (i = 0; i < vport->fc_rscn_id_cnt; i++) { lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); vport->fc_rscn_id_list[i] = NULL; @@ -2878,6 +2958,8 @@ lpfc_els_flush_rscn(struct lpfc_vport *vport) vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); spin_unlock_irq(shost->host_lock); lpfc_can_disctmo(vport); + /* Indicate we are done walking this fc_rscn_id_list */ + vport->fc_rscn_flush = 0; } int @@ -2887,6 +2969,7 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) D_ID rscn_did; uint32_t *lp; uint32_t payload_len, i; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); ns_did.un.word = did; @@ -2898,6 +2981,15 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) if (vport->fc_flag & FC_RSCN_DISCOVERY) return did; + spin_lock_irq(shost->host_lock); + if (vport->fc_rscn_flush) { + /* Another thread is walking fc_rscn_id_list on this vport */ + spin_unlock_irq(shost->host_lock); + return 0; + } + /* Indicate we are walking fc_rscn_id_list on this vport */ + vport->fc_rscn_flush = 1; + spin_unlock_irq(shost->host_lock); for (i = 0; i < vport->fc_rscn_id_cnt; i++) { lp = vport->fc_rscn_id_list[i]->virt; payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); @@ -2908,16 +3000,16 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) switch (rscn_did.un.b.resv) { case 0: /* Single N_Port ID effected */ if (ns_did.un.word == rscn_did.un.word) - return did; + goto return_did_out; break; case 1: /* Whole N_Port Area effected */ if ((ns_did.un.b.domain == rscn_did.un.b.domain) && (ns_did.un.b.area == rscn_did.un.b.area)) - return did; + goto return_did_out; break; case 2: /* Whole N_Port Domain effected */ if (ns_did.un.b.domain == rscn_did.un.b.domain) - return did; + goto return_did_out; break; default: /* Unknown Identifier in RSCN node */ @@ -2926,11 +3018,17 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) "RSCN payload Data: x%x\n", rscn_did.un.word); case 3: /* Whole Fabric effected */ - return did; + goto return_did_out; } } } + /* Indicate we are done with walking fc_rscn_id_list on this vport */ + vport->fc_rscn_flush = 0; return 0; +return_did_out: + /* Indicate we are done with walking fc_rscn_id_list on this vport */ + vport->fc_rscn_flush = 0; + return did; } static int @@ -2943,7 +3041,8 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport) */ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { - if (ndlp->nlp_state == NLP_STE_UNUSED_NODE || + if (!NLP_CHK_NODE_ACT(ndlp) || + ndlp->nlp_state == NLP_STE_UNUSED_NODE || lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0) continue; @@ -2971,7 +3070,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, uint32_t *lp, *datap; IOCB_t *icmd; uint32_t payload_len, length, nportid, *cmd; - int rscn_cnt = vport->fc_rscn_id_cnt; + int rscn_cnt; int rscn_id = 0, hba_id = 0; int i; @@ -2984,7 +3083,8 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, /* RSCN received */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0214 RSCN received Data: x%x x%x x%x x%x\n", - vport->fc_flag, payload_len, *lp, rscn_cnt); + vport->fc_flag, payload_len, *lp, + vport->fc_rscn_id_cnt); for (i = 0; i < payload_len/sizeof(uint32_t); i++) fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_RSCN, lp[i]); @@ -3022,7 +3122,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, "0214 Ignore RSCN " "Data: x%x x%x x%x x%x\n", vport->fc_flag, payload_len, - *lp, rscn_cnt); + *lp, vport->fc_rscn_id_cnt); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", ndlp->nlp_DID, vport->port_state, @@ -3034,6 +3134,18 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, } } + spin_lock_irq(shost->host_lock); + if (vport->fc_rscn_flush) { + /* Another thread is walking fc_rscn_id_list on this vport */ + spin_unlock_irq(shost->host_lock); + vport->fc_flag |= FC_RSCN_DISCOVERY; + return 0; + } + /* Indicate we are walking fc_rscn_id_list on this vport */ + vport->fc_rscn_flush = 1; + spin_unlock_irq(shost->host_lock); + /* Get the array count after sucessfully have the token */ + rscn_cnt = vport->fc_rscn_id_cnt; /* If we are already processing an RSCN, save the received * RSCN payload buffer, cmdiocb->context2 to process later. */ @@ -3055,7 +3167,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, if ((rscn_cnt) && (payload_len + length <= LPFC_BPL_SIZE)) { *cmd &= ELS_CMD_MASK; - *cmd |= be32_to_cpu(payload_len + length); + *cmd |= cpu_to_be32(payload_len + length); memcpy(((uint8_t *)cmd) + length, lp, payload_len); } else { @@ -3066,7 +3178,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, */ cmdiocb->context2 = NULL; } - /* Deferred RSCN */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0235 Deferred RSCN " @@ -3083,9 +3194,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, vport->fc_rscn_id_cnt, vport->fc_flag, vport->port_state); } + /* Indicate we are done walking fc_rscn_id_list on this vport */ + vport->fc_rscn_flush = 0; /* Send back ACC */ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); - /* send RECOVERY event for ALL nodes that match RSCN payload */ lpfc_rscn_recovery_check(vport); spin_lock_irq(shost->host_lock); @@ -3093,7 +3205,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, spin_unlock_irq(shost->host_lock); return 0; } - lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV RSCN: did:x%x/ste:x%x flg:x%x", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); @@ -3102,20 +3213,18 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, vport->fc_flag |= FC_RSCN_MODE; spin_unlock_irq(shost->host_lock); vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; + /* Indicate we are done walking fc_rscn_id_list on this vport */ + vport->fc_rscn_flush = 0; /* * If we zero, cmdiocb->context2, the calling routine will * not try to free it. */ cmdiocb->context2 = NULL; - lpfc_set_disctmo(vport); - /* Send back ACC */ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); - /* send RECOVERY event for ALL nodes that match RSCN payload */ lpfc_rscn_recovery_check(vport); - return lpfc_els_handle_rscn(vport); } @@ -3145,7 +3254,8 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport) vport->num_disc_nodes = 0; ndlp = lpfc_findnode_did(vport, NameServer_DID); - if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { + if (ndlp && NLP_CHK_NODE_ACT(ndlp) + && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { /* Good ndlp, issue CT Request to NameServer */ if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0) /* Wait for NameServer query cmpl before we can @@ -3155,25 +3265,35 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport) /* If login to NameServer does not exist, issue one */ /* Good status, issue PLOGI to NameServer */ ndlp = lpfc_findnode_did(vport, NameServer_DID); - if (ndlp) + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) /* Wait for NameServer login cmpl before we can continue */ return 1; - ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); - if (!ndlp) { - lpfc_els_flush_rscn(vport); - return 0; + if (ndlp) { + ndlp = lpfc_enable_node(vport, ndlp, + NLP_STE_PLOGI_ISSUE); + if (!ndlp) { + lpfc_els_flush_rscn(vport); + return 0; + } + ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; } else { + ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); + if (!ndlp) { + lpfc_els_flush_rscn(vport); + return 0; + } lpfc_nlp_init(vport, ndlp, NameServer_DID); - ndlp->nlp_type |= NLP_FABRIC; ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); - lpfc_issue_els_plogi(vport, NameServer_DID, 0); - /* Wait for NameServer login cmpl before we can - continue */ - return 1; } + ndlp->nlp_type |= NLP_FABRIC; + lpfc_issue_els_plogi(vport, NameServer_DID, 0); + /* Wait for NameServer login cmpl before we can + * continue + */ + return 1; } lpfc_els_flush_rscn(vport); @@ -3672,6 +3792,8 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { + if (!NLP_CHK_NODE_ACT(ndlp)) + continue; if (ndlp->nlp_state != NLP_STE_NPR_NODE) continue; if (ndlp->nlp_type & NLP_FABRIC) { @@ -3697,6 +3819,8 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, */ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { + if (!NLP_CHK_NODE_ACT(ndlp)) + continue; if (ndlp->nlp_state != NLP_STE_NPR_NODE) continue; @@ -3936,7 +4060,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, uint32_t cmd, did, newnode, rjt_err = 0; IOCB_t *icmd = &elsiocb->iocb; - if (vport == NULL || elsiocb->context2 == NULL) + if (!vport || !(elsiocb->context2)) goto dropit; newnode = 0; @@ -3971,14 +4095,20 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, lpfc_nlp_init(vport, ndlp, did); lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); newnode = 1; - if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) { + if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) ndlp->nlp_type |= NLP_FABRIC; + } else { + if (!NLP_CHK_NODE_ACT(ndlp)) { + ndlp = lpfc_enable_node(vport, ndlp, + NLP_STE_UNUSED_NODE); + if (!ndlp) + goto dropit; } - } - else { if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { /* This is simular to the new node path */ - lpfc_nlp_get(ndlp); + ndlp = lpfc_nlp_get(ndlp); + if (!ndlp) + goto dropit; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); newnode = 1; } @@ -3987,6 +4117,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, phba->fc_stat.elsRcvFrame++; if (elsiocb->context1) lpfc_nlp_put(elsiocb->context1); + elsiocb->context1 = lpfc_nlp_get(ndlp); elsiocb->vport = vport; @@ -4007,8 +4138,15 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); if (vport->port_state < LPFC_DISC_AUTH) { - rjt_err = LSRJT_UNABLE_TPC; - break; + if (!(phba->pport->fc_flag & FC_PT2PT) || + (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { + rjt_err = LSRJT_UNABLE_TPC; + break; + } + /* We get here, and drop thru, if we are PT2PT with + * another NPort and the other side has initiated + * the PLOGI before responding to our FLOGI. + */ } shost = lpfc_shost_from_vport(vport); @@ -4251,15 +4389,15 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, vport = lpfc_find_vport_by_vpid(phba, vpi); } } - /* If there are no BDEs associated - * with this IOCB, there is nothing to do. - */ + /* If there are no BDEs associated + * with this IOCB, there is nothing to do. + */ if (icmd->ulpBdeCount == 0) return; - /* type of ELS cmd is first 32bit word - * in packet - */ + /* type of ELS cmd is first 32bit word + * in packet + */ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { elsiocb->context2 = bdeBuf1; } else { @@ -4314,6 +4452,18 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) } lpfc_nlp_init(vport, ndlp, NameServer_DID); ndlp->nlp_type |= NLP_FABRIC; + } else if (!NLP_CHK_NODE_ACT(ndlp)) { + ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); + if (!ndlp) { + if (phba->fc_topology == TOPOLOGY_LOOP) { + lpfc_disc_start(vport); + return; + } + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "0348 NameServer login: node freed\n"); + return; + } } lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); @@ -4360,6 +4510,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) switch (mb->mbxStatus) { case 0x11: /* unsupported feature */ case 0x9603: /* max_vpi exceeded */ + case 0x9602: /* Link event since CLEAR_LA */ /* giving up on vport registration */ lpfc_vport_set_state(vport, FC_VPORT_FAILED); spin_lock_irq(shost->host_lock); @@ -4373,7 +4524,10 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; spin_unlock_irq(shost->host_lock); - lpfc_initial_fdisc(vport); + if (vport->port_type == LPFC_PHYSICAL_PORT) + lpfc_initial_flogi(vport); + else + lpfc_initial_fdisc(vport); break; } @@ -4471,7 +4625,6 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, irsp->ulpStatus, irsp->un.ulpWord[4]); if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING) lpfc_vport_set_state(vport, FC_VPORT_FAILED); - lpfc_nlp_put(ndlp); /* giving up on FDISC. Cancel discovery timer */ lpfc_can_disctmo(vport); @@ -4492,8 +4645,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, */ list_for_each_entry_safe(np, next_np, &vport->fc_nodes, nlp_listp) { - if (np->nlp_state != NLP_STE_NPR_NODE - || !(np->nlp_flag & NLP_NPR_ADISC)) + if (!NLP_CHK_NODE_ACT(ndlp) || + (np->nlp_state != NLP_STE_NPR_NODE) || + !(np->nlp_flag & NLP_NPR_ADISC)) continue; spin_lock_irq(shost->host_lock); np->nlp_flag &= ~NLP_NPR_ADISC; @@ -4599,6 +4753,8 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, { struct lpfc_vport *vport = cmdiocb->vport; IOCB_t *irsp; + struct lpfc_nodelist *ndlp; + ndlp = (struct lpfc_nodelist *)cmdiocb->context1; irsp = &rspiocb->iocb; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, @@ -4607,6 +4763,9 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_els_free_iocb(phba, cmdiocb); vport->unreg_vpi_cmpl = VPORT_ERROR; + + /* Trigger the release of the ndlp after logo */ + lpfc_nlp_put(ndlp); } int @@ -4686,11 +4845,12 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) repeat: iocb = NULL; spin_lock_irqsave(&phba->hbalock, iflags); - /* Post any pending iocb to the SLI layer */ + /* Post any pending iocb to the SLI layer */ if (atomic_read(&phba->fabric_iocb_count) == 0) { list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), list); if (iocb) + /* Increment fabric iocb count to hold the position */ atomic_inc(&phba->fabric_iocb_count); } spin_unlock_irqrestore(&phba->hbalock, iflags); @@ -4737,9 +4897,7 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba) int blocked; blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); - /* Start a timer to unblock fabric - * iocbs after 100ms - */ + /* Start a timer to unblock fabric iocbs after 100ms */ if (!blocked) mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 ); @@ -4787,8 +4945,8 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, atomic_dec(&phba->fabric_iocb_count); if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { - /* Post any pending iocbs to HBA */ - lpfc_resume_fabric_iocbs(phba); + /* Post any pending iocbs to HBA */ + lpfc_resume_fabric_iocbs(phba); } } @@ -4807,6 +4965,9 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) ready = atomic_read(&phba->fabric_iocb_count) == 0 && !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); + if (ready) + /* Increment fabric iocb count to hold the position */ + atomic_inc(&phba->fabric_iocb_count); spin_unlock_irqrestore(&phba->hbalock, iflags); if (ready) { iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; @@ -4817,7 +4978,6 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) "Fabric sched2: ste:x%x", iocb->vport->port_state, 0, 0); - atomic_inc(&phba->fabric_iocb_count); ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); if (ret == IOCB_ERROR) { diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index dc042bd..bd572d6 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2007 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -272,9 +272,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) if (!(vport->load_flag & FC_UNLOADING) && !(ndlp->nlp_flag & NLP_DELAY_TMO) && !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && - (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) { + (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); - } } @@ -566,9 +565,10 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) int rc; list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { + if (!NLP_CHK_NODE_ACT(ndlp)) + continue; if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) continue; - if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || ((vport->port_type == LPFC_NPIV_PORT) && (ndlp->nlp_DID == NameServer_DID))) @@ -629,9 +629,8 @@ lpfc_linkdown(struct lpfc_hba *phba) LPFC_MBOXQ_t *mb; int i; - if (phba->link_state == LPFC_LINK_DOWN) { + if (phba->link_state == LPFC_LINK_DOWN) return 0; - } spin_lock_irq(&phba->hbalock); if (phba->link_state > LPFC_LINK_DOWN) { phba->link_state = LPFC_LINK_DOWN; @@ -684,20 +683,21 @@ lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport) struct lpfc_nodelist *ndlp; list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { + if (!NLP_CHK_NODE_ACT(ndlp)) + continue; if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) continue; - if (ndlp->nlp_type & NLP_FABRIC) { - /* On Linkup its safe to clean up the ndlp - * from Fabric connections. - */ + /* On Linkup its safe to clean up the ndlp + * from Fabric connections. + */ if (ndlp->nlp_DID != Fabric_DID) lpfc_unreg_rpi(vport, ndlp); lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { - /* Fail outstanding IO now since device is - * marked for PLOGI. - */ + /* Fail outstanding IO now since device is + * marked for PLOGI. + */ lpfc_unreg_rpi(vport, ndlp); } } @@ -799,21 +799,9 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ spin_unlock_irq(&phba->hbalock); + mempool_free(pmb, phba->mbox_mem_pool); return; - vport->num_disc_nodes = 0; - /* go thru NPR nodes and issue ELS PLOGIs */ - if (vport->fc_npr_cnt) - lpfc_els_disc_plogi(vport); - - if (!vport->num_disc_nodes) { - spin_lock_irq(shost->host_lock); - vport->fc_flag &= ~FC_NDISC_ACTIVE; - spin_unlock_irq(shost->host_lock); - } - - vport->port_state = LPFC_VPORT_READY; - out: /* Device Discovery completes */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, @@ -1133,7 +1121,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (la->attType == AT_LINK_UP) { phba->fc_stat.LinkUp++; if (phba->link_flag & LS_LOOPBACK_MODE) { - lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, + lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1306 Link Up Event in loop back mode " "x%x received Data: x%x x%x x%x x%x\n", la->eventTag, phba->fc_eventTag, @@ -1150,11 +1138,21 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) lpfc_mbx_process_link_up(phba, la); } else { phba->fc_stat.LinkDown++; - lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, + if (phba->link_flag & LS_LOOPBACK_MODE) { + lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, + "1308 Link Down Event in loop back mode " + "x%x received " + "Data: x%x x%x x%x\n", + la->eventTag, phba->fc_eventTag, + phba->pport->port_state, vport->fc_flag); + } + else { + lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1305 Link Down Event x%x received " "Data: x%x x%x x%x\n", la->eventTag, phba->fc_eventTag, phba->pport->port_state, vport->fc_flag); + } lpfc_mbx_issue_link_down(phba); } @@ -1305,7 +1303,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); - lpfc_nlp_put(ndlp); if (phba->fc_topology == TOPOLOGY_LOOP) { /* FLOGI failed, use loop map to make discovery list */ @@ -1313,6 +1310,10 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* Start discovery */ lpfc_disc_start(vport); + /* Decrement the reference count to ndlp after the + * reference to the ndlp are done. + */ + lpfc_nlp_put(ndlp); return; } @@ -1320,6 +1321,10 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "0258 Register Fabric login error: 0x%x\n", mb->mbxStatus); + /* Decrement the reference count to ndlp after the reference + * to the ndlp are done. + */ + lpfc_nlp_put(ndlp); return; } @@ -1327,8 +1332,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); - lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */ - if (vport->port_state == LPFC_FABRIC_CFG_LINK) { vports = lpfc_create_vport_work_array(phba); if (vports != NULL) @@ -1356,6 +1359,11 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); + + /* Drop the reference count from the mbox at the end after + * all the current reference to the ndlp have been done. + */ + lpfc_nlp_put(ndlp); return; } @@ -1463,9 +1471,8 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * registered the port. */ if (ndlp->rport && ndlp->rport->dd_data && - ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) { + ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) lpfc_nlp_put(ndlp); - } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, "rport add: did:x%x flg:x%x type x%x", @@ -1660,6 +1667,18 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } void +lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + if (list_empty(&ndlp->nlp_listp)) { + spin_lock_irq(shost->host_lock); + list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); + spin_unlock_irq(shost->host_lock); + } +} + +void lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); @@ -1672,7 +1691,80 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) list_del_init(&ndlp->nlp_listp); spin_unlock_irq(shost->host_lock); lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, - NLP_STE_UNUSED_NODE); + NLP_STE_UNUSED_NODE); +} + +void +lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) + lpfc_cancel_retry_delay_tmo(vport, ndlp); + if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) + lpfc_nlp_counters(vport, ndlp->nlp_state, -1); + lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, + NLP_STE_UNUSED_NODE); +} + +struct lpfc_nodelist * +lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + int state) +{ + struct lpfc_hba *phba = vport->phba; + uint32_t did; + unsigned long flags; + + if (!ndlp) + return NULL; + + spin_lock_irqsave(&phba->ndlp_lock, flags); + /* The ndlp should not be in memory free mode */ + if (NLP_CHK_FREE_REQ(ndlp)) { + spin_unlock_irqrestore(&phba->ndlp_lock, flags); + lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, + "0277 lpfc_enable_node: ndlp:x%p " + "usgmap:x%x refcnt:%d\n", + (void *)ndlp, ndlp->nlp_usg_map, + atomic_read(&ndlp->kref.refcount)); + return NULL; + } + /* The ndlp should not already be in active mode */ + if (NLP_CHK_NODE_ACT(ndlp)) { + spin_unlock_irqrestore(&phba->ndlp_lock, flags); + lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, + "0278 lpfc_enable_node: ndlp:x%p " + "usgmap:x%x refcnt:%d\n", + (void *)ndlp, ndlp->nlp_usg_map, + atomic_read(&ndlp->kref.refcount)); + return NULL; + } + + /* Keep the original DID */ + did = ndlp->nlp_DID; + + /* re-initialize ndlp except of ndlp linked list pointer */ + memset((((char *)ndlp) + sizeof (struct list_head)), 0, + sizeof (struct lpfc_nodelist) - sizeof (struct list_head)); + INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); + INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); + init_timer(&ndlp->nlp_delayfunc); + ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; + ndlp->nlp_delayfunc.data = (unsigned long)ndlp; + ndlp->nlp_DID = did; + ndlp->vport = vport; + ndlp->nlp_sid = NLP_NO_SID; + /* ndlp management re-initialize */ + kref_init(&ndlp->kref); + NLP_INT_NODE_ACT(ndlp); + + spin_unlock_irqrestore(&phba->ndlp_lock, flags); + + if (state != NLP_STE_UNUSED_NODE) + lpfc_nlp_set_state(vport, ndlp, state); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, + "node enable: did:x%x", + ndlp->nlp_DID, 0, 0); + return ndlp; } void @@ -1972,7 +2064,21 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) "Data: x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); - lpfc_dequeue_node(vport, ndlp); + if (NLP_CHK_FREE_REQ(ndlp)) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, + "0280 lpfc_cleanup_node: ndlp:x%p " + "usgmap:x%x refcnt:%d\n", + (void *)ndlp, ndlp->nlp_usg_map, + atomic_read(&ndlp->kref.refcount)); + lpfc_dequeue_node(vport, ndlp); + } else { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, + "0281 lpfc_cleanup_node: ndlp:x%p " + "usgmap:x%x refcnt:%d\n", + (void *)ndlp, ndlp->nlp_usg_map, + atomic_read(&ndlp->kref.refcount)); + lpfc_disable_node(vport, ndlp); + } /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ if ((mb = phba->sli.mbox_active)) { @@ -1994,12 +2100,16 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) } list_del(&mb->list); mempool_free(mb, phba->mbox_mem_pool); - lpfc_nlp_put(ndlp); + /* We shall not invoke the lpfc_nlp_put to decrement + * the ndlp reference count as we are in the process + * of lpfc_nlp_release. + */ } } spin_unlock_irq(&phba->hbalock); - lpfc_els_abort(phba,ndlp); + lpfc_els_abort(phba, ndlp); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -2057,7 +2167,6 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) } } } - lpfc_cleanup_node(vport, ndlp); /* @@ -2182,7 +2291,16 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); return ndlp; + } else if (!NLP_CHK_NODE_ACT(ndlp)) { + ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE); + if (!ndlp) + return NULL; + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag |= NLP_NPR_2B_DISC; + spin_unlock_irq(shost->host_lock); + return ndlp; } + if (vport->fc_flag & FC_RSCN_MODE) { if (lpfc_rscn_payload_check(vport, did)) { /* If we've already recieved a PLOGI from this NPort @@ -2363,6 +2481,7 @@ lpfc_disc_start(struct lpfc_vport *vport) * continue discovery. */ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && + !(vport->fc_flag & FC_PT2PT) && !(vport->fc_flag & FC_RSCN_MODE)) { lpfc_issue_reg_vpi(phba, vport); return; @@ -2485,6 +2604,8 @@ lpfc_disc_flush_list(struct lpfc_vport *vport) if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) { list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { + if (!NLP_CHK_NODE_ACT(ndlp)) + continue; if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { lpfc_free_tx(phba, ndlp); @@ -2572,6 +2693,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) /* Start discovery by sending FLOGI, clean up old rpis */ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { + if (!NLP_CHK_NODE_ACT(ndlp)) + continue; if (ndlp->nlp_state != NLP_STE_NPR_NODE) continue; if (ndlp->nlp_type & NLP_FABRIC) { @@ -2618,7 +2741,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) "NameServer login\n"); /* Next look for NameServer ndlp */ ndlp = lpfc_findnode_did(vport, NameServer_DID); - if (ndlp) + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) lpfc_els_abort(phba, ndlp); /* ReStart discovery */ @@ -2897,6 +3020,7 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_sid = NLP_NO_SID; INIT_LIST_HEAD(&ndlp->nlp_listp); kref_init(&ndlp->kref); + NLP_INT_NODE_ACT(ndlp); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, "node init: did:x%x", @@ -2911,6 +3035,8 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, static void lpfc_nlp_release(struct kref *kref) { + struct lpfc_hba *phba; + unsigned long flags; struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, kref); @@ -2918,8 +3044,24 @@ lpfc_nlp_release(struct kref *kref) "node release: did:x%x flg:x%x type:x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, + "0279 lpfc_nlp_release: ndlp:x%p " + "usgmap:x%x refcnt:%d\n", + (void *)ndlp, ndlp->nlp_usg_map, + atomic_read(&ndlp->kref.refcount)); + + /* remove ndlp from action. */ lpfc_nlp_remove(ndlp->vport, ndlp); - mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool); + + /* clear the ndlp active flag for all release cases */ + phba = ndlp->vport->phba; + spin_lock_irqsave(&phba->ndlp_lock, flags); + NLP_CLR_NODE_ACT(ndlp); + spin_unlock_irqrestore(&phba->ndlp_lock, flags); + + /* free ndlp memory for final ndlp release */ + if (NLP_CHK_FREE_REQ(ndlp)) + mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool); } /* This routine bumps the reference count for a ndlp structure to ensure @@ -2929,37 +3071,108 @@ lpfc_nlp_release(struct kref *kref) struct lpfc_nodelist * lpfc_nlp_get(struct lpfc_nodelist *ndlp) { + struct lpfc_hba *phba; + unsigned long flags; + if (ndlp) { lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, "node get: did:x%x flg:x%x refcnt:x%x", ndlp->nlp_DID, ndlp->nlp_flag, atomic_read(&ndlp->kref.refcount)); - kref_get(&ndlp->kref); + /* The check of ndlp usage to prevent incrementing the + * ndlp reference count that is in the process of being + * released. + */ + phba = ndlp->vport->phba; + spin_lock_irqsave(&phba->ndlp_lock, flags); + if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) { + spin_unlock_irqrestore(&phba->ndlp_lock, flags); + lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, + "0276 lpfc_nlp_get: ndlp:x%p " + "usgmap:x%x refcnt:%d\n", + (void *)ndlp, ndlp->nlp_usg_map, + atomic_read(&ndlp->kref.refcount)); + return NULL; + } else + kref_get(&ndlp->kref); + spin_unlock_irqrestore(&phba->ndlp_lock, flags); } return ndlp; } - /* This routine decrements the reference count for a ndlp structure. If the - * count goes to 0, this indicates the the associated nodelist should be freed. + * count goes to 0, this indicates the the associated nodelist should be + * freed. Returning 1 indicates the ndlp resource has been released; on the + * other hand, returning 0 indicates the ndlp resource has not been released + * yet. */ int lpfc_nlp_put(struct lpfc_nodelist *ndlp) { - if (ndlp) { - lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, - "node put: did:x%x flg:x%x refcnt:x%x", - ndlp->nlp_DID, ndlp->nlp_flag, - atomic_read(&ndlp->kref.refcount)); + struct lpfc_hba *phba; + unsigned long flags; + + if (!ndlp) + return 1; + + lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, + "node put: did:x%x flg:x%x refcnt:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, + atomic_read(&ndlp->kref.refcount)); + phba = ndlp->vport->phba; + spin_lock_irqsave(&phba->ndlp_lock, flags); + /* Check the ndlp memory free acknowledge flag to avoid the + * possible race condition that kref_put got invoked again + * after previous one has done ndlp memory free. + */ + if (NLP_CHK_FREE_ACK(ndlp)) { + spin_unlock_irqrestore(&phba->ndlp_lock, flags); + lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, + "0274 lpfc_nlp_put: ndlp:x%p " + "usgmap:x%x refcnt:%d\n", + (void *)ndlp, ndlp->nlp_usg_map, + atomic_read(&ndlp->kref.refcount)); + return 1; } - return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0; + /* Check the ndlp inactivate log flag to avoid the possible + * race condition that kref_put got invoked again after ndlp + * is already in inactivating state. + */ + if (NLP_CHK_IACT_REQ(ndlp)) { + spin_unlock_irqrestore(&phba->ndlp_lock, flags); + lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, + "0275 lpfc_nlp_put: ndlp:x%p " + "usgmap:x%x refcnt:%d\n", + (void *)ndlp, ndlp->nlp_usg_map, + atomic_read(&ndlp->kref.refcount)); + return 1; + } + /* For last put, mark the ndlp usage flags to make sure no + * other kref_get and kref_put on the same ndlp shall get + * in between the process when the final kref_put has been + * invoked on this ndlp. + */ + if (atomic_read(&ndlp->kref.refcount) == 1) { + /* Indicate ndlp is put to inactive state. */ + NLP_SET_IACT_REQ(ndlp); + /* Acknowledge ndlp memory free has been seen. */ + if (NLP_CHK_FREE_REQ(ndlp)) + NLP_SET_FREE_ACK(ndlp); + } + spin_unlock_irqrestore(&phba->ndlp_lock, flags); + /* Note, the kref_put returns 1 when decrementing a reference + * count that was 1, it invokes the release callback function, + * but it still left the reference count as 1 (not actually + * performs the last decrementation). Otherwise, it actually + * decrements the reference count and returns 0. + */ + return kref_put(&ndlp->kref, lpfc_nlp_release); } /* This routine free's the specified nodelist if it is not in use - * by any other discovery thread. This routine returns 1 if the ndlp - * is not being used by anyone and has been freed. A return value of - * 0 indicates it is being used by another discovery thread and the - * refcount is left unchanged. + * by any other discovery thread. This routine returns 1 if the + * ndlp has been freed. A return value of 0 indicates the ndlp is + * not yet been released. */ int lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) @@ -2968,11 +3181,8 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) "node not used: did:x%x flg:x%x refcnt:x%x", ndlp->nlp_DID, ndlp->nlp_flag, atomic_read(&ndlp->kref.refcount)); - - if (atomic_read(&ndlp->kref.refcount) == 1) { - lpfc_nlp_put(ndlp); - return 1; - } + if (atomic_read(&ndlp->kref.refcount) == 1) + if (lpfc_nlp_put(ndlp)) + return 1; return 0; } - diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 041f83e..7773b94 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2007 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -581,6 +581,7 @@ struct ls_rjt { /* Structure is in Big Endian format */ #define LSEXP_INVALID_O_SID 0x15 #define LSEXP_INVALID_OX_RX 0x17 #define LSEXP_CMD_IN_PROGRESS 0x19 +#define LSEXP_PORT_LOGIN_REQ 0x1E #define LSEXP_INVALID_NPORT_ID 0x1F #define LSEXP_INVALID_SEQ_ID 0x21 #define LSEXP_INVALID_XCHG 0x23 @@ -1376,11 +1377,26 @@ typedef struct { /* FireFly BIU registers */ #define CMD_QUE_XRI64_CX 0xB3 #define CMD_IOCB_RCV_SEQ64_CX 0xB5 #define CMD_IOCB_RCV_ELS64_CX 0xB7 +#define CMD_IOCB_RET_XRI64_CX 0xB9 #define CMD_IOCB_RCV_CONT64_CX 0xBB #define CMD_GEN_REQUEST64_CR 0xC2 #define CMD_GEN_REQUEST64_CX 0xC3 +/* Unhandled SLI-3 Commands */ +#define CMD_IOCB_XMIT_MSEQ64_CR 0xB0 +#define CMD_IOCB_XMIT_MSEQ64_CX 0xB1 +#define CMD_IOCB_RCV_SEQ_LIST64_CX 0xC1 +#define CMD_IOCB_RCV_ELS_LIST64_CX 0xCD +#define CMD_IOCB_CLOSE_EXTENDED_CN 0xB6 +#define CMD_IOCB_ABORT_EXTENDED_CN 0xBA +#define CMD_IOCB_RET_HBQE64_CN 0xCA +#define CMD_IOCB_FCP_IBIDIR64_CR 0xAC +#define CMD_IOCB_FCP_IBIDIR64_CX 0xAD +#define CMD_IOCB_FCP_ITASKMGT64_CX 0xAF +#define CMD_IOCB_LOGENTRY_CN 0x94 +#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96 + #define CMD_MAX_IOCB_CMD 0xE6 #define CMD_IOCB_MASK 0xff diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 6cfeba7..2284375 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2007 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -461,11 +461,21 @@ lpfc_config_port_post(struct lpfc_hba *phba) int lpfc_hba_down_prep(struct lpfc_hba *phba) { + struct lpfc_vport **vports; + int i; /* Disable interrupts */ writel(0, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ - lpfc_cleanup_discovery_resources(phba->pport); + if (phba->pport->load_flag & FC_UNLOADING) + lpfc_cleanup_discovery_resources(phba->pport); + else { + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) + for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) + lpfc_cleanup_discovery_resources(vports[i]); + lpfc_destroy_vport_work_array(phba, vports); + } return 0; } @@ -1422,9 +1432,32 @@ lpfc_cleanup(struct lpfc_vport *vport) lpfc_port_link_failure(vport); list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { + if (!NLP_CHK_NODE_ACT(ndlp)) { + ndlp = lpfc_enable_node(vport, ndlp, + NLP_STE_UNUSED_NODE); + if (!ndlp) + continue; + spin_lock_irq(&phba->ndlp_lock); + NLP_SET_FREE_REQ(ndlp); + spin_unlock_irq(&phba->ndlp_lock); + /* Trigger the release of the ndlp memory */ + lpfc_nlp_put(ndlp); + continue; + } + spin_lock_irq(&phba->ndlp_lock); + if (NLP_CHK_FREE_REQ(ndlp)) { + /* The ndlp should not be in memory free mode already */ + spin_unlock_irq(&phba->ndlp_lock); + continue; + } else + /* Indicate request for freeing ndlp memory */ + NLP_SET_FREE_REQ(ndlp); + spin_unlock_irq(&phba->ndlp_lock); + if (ndlp->nlp_type & NLP_FABRIC) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); + lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); } @@ -1438,6 +1471,17 @@ lpfc_cleanup(struct lpfc_vport *vport) if (i++ > 3000) { lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0233 Nodelist not empty\n"); + list_for_each_entry_safe(ndlp, next_ndlp, + &vport->fc_nodes, nlp_listp) { + lpfc_printf_vlog(ndlp->vport, KERN_ERR, + LOG_NODE, + "0282: did:x%x ndlp:x%p " + "usgmap:x%x refcnt:%d\n", + ndlp->nlp_DID, (void *)ndlp, + ndlp->nlp_usg_map, + atomic_read( + &ndlp->kref.refcount)); + } break; } @@ -1586,6 +1630,8 @@ lpfc_offline_prep(struct lpfc_hba * phba) list_for_each_entry_safe(ndlp, next_ndlp, &vports[i]->fc_nodes, nlp_listp) { + if (!NLP_CHK_NODE_ACT(ndlp)) + continue; if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) continue; if (ndlp->nlp_type & NLP_FABRIC) { @@ -1695,9 +1741,9 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) vport = (struct lpfc_vport *) shost->hostdata; vport->phba = phba; - vport->load_flag |= FC_LOADING; vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + vport->fc_rscn_flush = 0; lpfc_get_vport_cfgparam(vport); shost->unique_id = instance; @@ -1879,6 +1925,42 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost) spin_unlock_irq(shost->host_lock); } +static int +lpfc_enable_msix(struct lpfc_hba *phba) +{ + int error; + + phba->msix_entries[0].entry = 0; + phba->msix_entries[0].vector = 0; + + error = pci_enable_msix(phba->pcidev, phba->msix_entries, + ARRAY_SIZE(phba->msix_entries)); + if (error) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0420 Enable MSI-X failed (%d), continuing " + "with MSI\n", error); + pci_disable_msix(phba->pcidev); + return error; + } + + error = request_irq(phba->msix_entries[0].vector, lpfc_intr_handler, 0, + LPFC_DRIVER_NAME, phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0421 MSI-X request_irq failed (%d), " + "continuing with MSI\n", error); + pci_disable_msix(phba->pcidev); + } + return error; +} + +static void +lpfc_disable_msix(struct lpfc_hba *phba) +{ + free_irq(phba->msix_entries[0].vector, phba); + pci_disable_msix(phba->pcidev); +} + static int __devinit lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) { @@ -1905,6 +1987,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) spin_lock_init(&phba->hbalock); + /* Initialize ndlp management spinlock */ + spin_lock_init(&phba->ndlp_lock); + phba->pcidev = pdev; /* Assign an unused board number */ @@ -2002,6 +2087,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); + INIT_LIST_HEAD(&phba->hbqbuf_in_list); + /* Initialize the SLI Layer to run with lpfc HBAs. */ lpfc_sli_setup(phba); lpfc_sli_queue_setup(phba); @@ -2077,24 +2164,36 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) lpfc_debugfs_initialize(vport); pci_set_drvdata(pdev, shost); + phba->intr_type = NONE; - if (phba->cfg_use_msi) { + if (phba->cfg_use_msi == 2) { + error = lpfc_enable_msix(phba); + if (!error) + phba->intr_type = MSIX; + } + + /* Fallback to MSI if MSI-X initialization failed */ + if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) { retval = pci_enable_msi(phba->pcidev); if (!retval) - phba->using_msi = 1; + phba->intr_type = MSI; else lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0452 Enable MSI failed, continuing " "with IRQ\n"); } - retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED, - LPFC_DRIVER_NAME, phba); - if (retval) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0451 Enable interrupt handler failed\n"); - error = retval; - goto out_disable_msi; + /* MSI-X is the only case the doesn't need to call request_irq */ + if (phba->intr_type != MSIX) { + retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, + IRQF_SHARED, LPFC_DRIVER_NAME, phba); + if (retval) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0451 Enable " + "interrupt handler failed\n"); + error = retval; + goto out_disable_msi; + } else if (phba->intr_type != MSI) + phba->intr_type = INTx; } phba->MBslimaddr = phba->slim_memmap_p; @@ -2139,9 +2238,14 @@ out_remove_device: out_free_irq: lpfc_stop_phba_timers(phba); phba->pport->work_port_events = 0; - free_irq(phba->pcidev->irq, phba); + + if (phba->intr_type == MSIX) + lpfc_disable_msix(phba); + else + free_irq(phba->pcidev->irq, phba); + out_disable_msi: - if (phba->using_msi) + if (phba->intr_type == MSI) pci_disable_msi(phba->pcidev); destroy_port(vport); out_kthread_stop: @@ -2214,10 +2318,13 @@ lpfc_pci_remove_one(struct pci_dev *pdev) lpfc_debugfs_terminate(vport); - /* Release the irq reservation */ - free_irq(phba->pcidev->irq, phba); - if (phba->using_msi) - pci_disable_msi(phba->pcidev); + if (phba->intr_type == MSIX) + lpfc_disable_msix(phba); + else { + free_irq(phba->pcidev->irq, phba); + if (phba->intr_type == MSI) + pci_disable_msi(phba->pcidev); + } pci_set_drvdata(pdev, NULL); scsi_host_put(shost); @@ -2276,10 +2383,13 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, pring = &psli->ring[psli->fcp_ring]; lpfc_sli_abort_iocb_ring(phba, pring); - /* Release the irq reservation */ - free_irq(phba->pcidev->irq, phba); - if (phba->using_msi) - pci_disable_msi(phba->pcidev); + if (phba->intr_type == MSIX) + lpfc_disable_msix(phba); + else { + free_irq(phba->pcidev->irq, phba); + if (phba->intr_type == MSI) + pci_disable_msi(phba->pcidev); + } /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index c5841d7..39fd2b8 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2005 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -35,11 +35,15 @@ #define LOG_ALL_MSG 0xffff /* LOG all messages */ #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ + do { \ { if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \ dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ - fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } + fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \ + } while (0) #define lpfc_printf_log(phba, level, mask, fmt, arg...) \ + do { \ { if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \ dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ - fmt, phba->brd_no, ##arg); } + fmt, phba->brd_no, ##arg); } \ + } while (0) diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 6dc5ab8..3c0cebc 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -264,19 +264,30 @@ void lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) { struct hbq_dmabuf *hbq_entry; + unsigned long flags; + + if (!mp) + return; if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { + /* Check whether HBQ is still in use */ + spin_lock_irqsave(&phba->hbalock, flags); + if (!phba->hbq_in_use) { + spin_unlock_irqrestore(&phba->hbalock, flags); + return; + } hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf); + list_del(&hbq_entry->dbuf.list); if (hbq_entry->tag == -1) { (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) (phba, hbq_entry); } else { lpfc_sli_free_hbq(phba, hbq_entry); } + spin_unlock_irqrestore(&phba->hbalock, flags); } else { lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } return; } - diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 4a0e340..d513813 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2007 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -249,6 +249,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *pcmd; + struct lpfc_work_evt *evtp; uint32_t *lp; IOCB_t *icmd; struct serv_parm *sp; @@ -435,8 +436,14 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, del_timer_sync(&ndlp->nlp_delayfunc); ndlp->nlp_last_elscmd = 0; - if (!list_empty(&ndlp->els_retry_evt.evt_listp)) + if (!list_empty(&ndlp->els_retry_evt.evt_listp)) { list_del_init(&ndlp->els_retry_evt.evt_listp); + /* Decrement ndlp reference count held for the + * delayed retry + */ + evtp = &ndlp->els_retry_evt; + lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); + } if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { spin_lock_irq(shost->host_lock); @@ -638,13 +645,15 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) return 0; } - /* Check config parameter use-adisc or FCP-2 */ - if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) || - ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { - spin_lock_irq(shost->host_lock); - ndlp->nlp_flag |= NLP_NPR_ADISC; - spin_unlock_irq(shost->host_lock); - return 1; + if (!(vport->fc_flag & FC_PT2PT)) { + /* Check config parameter use-adisc or FCP-2 */ + if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) || + ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag |= NLP_NPR_ADISC; + spin_unlock_irq(shost->host_lock); + return 1; + } } ndlp->nlp_flag &= ~NLP_NPR_ADISC; lpfc_unreg_rpi(vport, ndlp); @@ -656,7 +665,7 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, - "0253 Illegal State Transition: node x%x " + "0271 Illegal State Transition: node x%x " "event x%x, state x%x Data: x%x x%x\n", ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, ndlp->nlp_flag); @@ -674,7 +683,7 @@ lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, */ if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, - "0253 Illegal State Transition: node x%x " + "0272 Illegal State Transition: node x%x " "event x%x, state x%x Data: x%x x%x\n", ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, ndlp->nlp_flag); @@ -2144,8 +2153,11 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint32_t cur_state, rc; uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t); + uint32_t got_ndlp = 0; + + if (lpfc_nlp_get(ndlp)) + got_ndlp = 1; - lpfc_nlp_get(ndlp); cur_state = ndlp->nlp_state; /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ @@ -2162,15 +2174,24 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, rc = (func) (vport, ndlp, arg, evt); /* DSM out state <rc> on NPort <nlp_DID> */ - lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + if (got_ndlp) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0212 DSM out state %d on NPort x%x Data: x%x\n", rc, ndlp->nlp_DID, ndlp->nlp_flag); - lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, - "DSM out: ste:%d did:x%x flg:x%x", - rc, ndlp->nlp_DID, ndlp->nlp_flag); + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, + "DSM out: ste:%d did:x%x flg:x%x", + rc, ndlp->nlp_DID, ndlp->nlp_flag); + /* Decrement the ndlp reference count held for this function */ + lpfc_nlp_put(ndlp); + } else { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "0212 DSM out state %d on NPort free\n", rc); - lpfc_nlp_put(ndlp); + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, + "DSM out: ste:%d did:x%x flg:x%x", + rc, 0, 0); + } return rc; } diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index fc5c3a4..70255c1 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2007 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -1283,6 +1283,8 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) match = 0; spin_lock_irq(shost->host_lock); list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { + if (!NLP_CHK_NODE_ACT(ndlp)) + continue; if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && i == ndlp->nlp_sid && ndlp->rport) { diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index fdd01e3..f532064 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2007 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -203,8 +203,25 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) case CMD_IOCB_RCV_SEQ64_CX: case CMD_IOCB_RCV_ELS64_CX: case CMD_IOCB_RCV_CONT64_CX: + case CMD_IOCB_RET_XRI64_CX: type = LPFC_UNSOL_IOCB; break; + case CMD_IOCB_XMIT_MSEQ64_CR: + case CMD_IOCB_XMIT_MSEQ64_CX: + case CMD_IOCB_RCV_SEQ_LIST64_CX: + case CMD_IOCB_RCV_ELS_LIST64_CX: + case CMD_IOCB_CLOSE_EXTENDED_CN: + case CMD_IOCB_ABORT_EXTENDED_CN: + case CMD_IOCB_RET_HBQE64_CN: + case CMD_IOCB_FCP_IBIDIR64_CR: + case CMD_IOCB_FCP_IBIDIR64_CX: + case CMD_IOCB_FCP_ITASKMGT64_CX: + case CMD_IOCB_LOGENTRY_CN: + case CMD_IOCB_LOGENTRY_ASYNC_CN: + printk("%s - Unhandled SLI-3 Command x%x\n", + __FUNCTION__, iocb_cmnd); + type = LPFC_UNKNOWN_IOCB; + break; default: type = LPFC_UNKNOWN_IOCB; break; @@ -529,10 +546,13 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) { struct lpfc_dmabuf *dmabuf, *next_dmabuf; struct hbq_dmabuf *hbq_buf; + unsigned long flags; int i, hbq_count; + uint32_t hbqno; hbq_count = lpfc_sli_hbq_count(); /* Return all memory used by all HBQs */ + spin_lock_irqsave(&phba->hbalock, flags); for (i = 0; i < hbq_count; ++i) { list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->hbqs[i].hbq_buffer_list, list) { @@ -542,6 +562,28 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) } phba->hbqs[i].buffer_count = 0; } + /* Return all HBQ buffer that are in-fly */ + list_for_each_entry_safe(dmabuf, next_dmabuf, + &phba->hbqbuf_in_list, list) { + hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); + list_del(&hbq_buf->dbuf.list); + if (hbq_buf->tag == -1) { + (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) + (phba, hbq_buf); + } else { + hbqno = hbq_buf->tag >> 16; + if (hbqno >= LPFC_MAX_HBQS) + (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) + (phba, hbq_buf); + else + (phba->hbqs[hbqno].hbq_free_buffer)(phba, + hbq_buf); + } + } + + /* Mark the HBQs not in use */ + phba->hbq_in_use = 0; + spin_unlock_irqrestore(&phba->hbalock, flags); } static struct lpfc_hbq_entry * @@ -603,6 +645,7 @@ static int lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) { uint32_t i, start, end; + unsigned long flags; struct hbq_dmabuf *hbq_buffer; if (!phba->hbqs[hbqno].hbq_alloc_buffer) { @@ -615,6 +658,13 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) end = lpfc_hbq_defs[hbqno]->entry_count; } + /* Check whether HBQ is still in use */ + spin_lock_irqsave(&phba->hbalock, flags); + if (!phba->hbq_in_use) { + spin_unlock_irqrestore(&phba->hbalock, flags); + return 0; + } + /* Populate HBQ entries */ for (i = start; i < end; i++) { hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); @@ -626,6 +676,8 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) else (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); } + + spin_unlock_irqrestore(&phba->hbalock, flags); return 0; } @@ -910,16 +962,29 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag) uint32_t hbqno; void *virt; /* virtual address ptr */ dma_addr_t phys; /* mapped address */ + unsigned long flags; + + /* Check whether HBQ is still in use */ + spin_lock_irqsave(&phba->hbalock, flags); + if (!phba->hbq_in_use) { + spin_unlock_irqrestore(&phba->hbalock, flags); + return NULL; + } hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); - if (hbq_entry == NULL) + if (hbq_entry == NULL) { + spin_unlock_irqrestore(&phba->hbalock, flags); return NULL; + } list_del(&hbq_entry->dbuf.list); hbqno = tag >> 16; new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); - if (new_hbq_entry == NULL) + if (new_hbq_entry == NULL) { + list_add_tail(&hbq_entry->dbuf.list, &phba->hbqbuf_in_list); + spin_unlock_irqrestore(&phba->hbalock, flags); return &hbq_entry->dbuf; + } new_hbq_entry->tag = -1; phys = new_hbq_entry->dbuf.phys; virt = new_hbq_entry->dbuf.virt; @@ -928,6 +993,9 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag) hbq_entry->dbuf.phys = phys; hbq_entry->dbuf.virt = virt; lpfc_sli_free_hbq(phba, hbq_entry); + list_add_tail(&new_hbq_entry->dbuf.list, &phba->hbqbuf_in_list); + spin_unlock_irqrestore(&phba->hbalock, flags); + return &new_hbq_entry->dbuf; } @@ -951,6 +1019,7 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, uint32_t Rctl, Type; uint32_t match, i; struct lpfc_iocbq *iocbq; + struct lpfc_dmabuf *dmzbuf; match = 0; irsp = &(saveq->iocb); @@ -972,6 +1041,29 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, return 1; } + if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && + (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { + if (irsp->ulpBdeCount > 0) { + dmzbuf = lpfc_sli_get_buff(phba, pring, + irsp->un.ulpWord[3]); + lpfc_in_buf_free(phba, dmzbuf); + } + + if (irsp->ulpBdeCount > 1) { + dmzbuf = lpfc_sli_get_buff(phba, pring, + irsp->unsli3.sli3Words[3]); + lpfc_in_buf_free(phba, dmzbuf); + } + + if (irsp->ulpBdeCount > 2) { + dmzbuf = lpfc_sli_get_buff(phba, pring, + irsp->unsli3.sli3Words[7]); + lpfc_in_buf_free(phba, dmzbuf); + } + + return 1; + } + if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { if (irsp->ulpBdeCount != 0) { saveq->context2 = lpfc_sli_get_buff(phba, pring, @@ -2293,6 +2385,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba) /* Initialize the struct lpfc_sli_hbq structure for each hbq */ phba->link_state = LPFC_INIT_MBX_CMDS; + phba->hbq_in_use = 1; hbq_entry_index = 0; for (hbqno = 0; hbqno < hbq_count; ++hbqno) { @@ -2404,9 +2497,7 @@ lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode) if ((pmb->mb.un.varCfgPort.sli_mode == 3) && (!pmb->mb.un.varCfgPort.cMA)) { rc = -ENXIO; - goto do_prep_failed; } - return rc; do_prep_failed: mempool_free(pmb, phba->mbox_mem_pool); @@ -2625,14 +2716,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command <mbxCommand> cannot issue */ - LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) + LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); return MBX_NOT_FINISHED; } if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); - LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) + LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); return MBX_NOT_FINISHED; } diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 4b633d3..ca540d1 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.2.4" +#define LPFC_DRIVER_VERSION "8.2.5" #define LPFC_DRIVER_NAME "lpfc" diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 9fad766..86d05be 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2006 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -327,7 +327,8 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) * up and ready to FDISC. */ ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); - if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { + if (ndlp && NLP_CHK_NODE_ACT(ndlp) && + ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { lpfc_set_disctmo(vport); lpfc_initial_fdisc(vport); @@ -358,7 +359,8 @@ disable_vport(struct fc_vport *fc_vport) long timeout; ndlp = lpfc_findnode_did(vport, Fabric_DID); - if (ndlp && phba->link_state >= LPFC_LINK_UP) { + if (ndlp && NLP_CHK_NODE_ACT(ndlp) + && phba->link_state >= LPFC_LINK_UP) { vport->unreg_vpi_cmpl = VPORT_INVAL; timeout = msecs_to_jiffies(phba->fc_ratov * 2000); if (!lpfc_issue_els_npiv_logo(vport, ndlp)) @@ -372,6 +374,8 @@ disable_vport(struct fc_vport *fc_vport) * calling lpfc_cleanup_rpis(vport, 1) */ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { + if (!NLP_CHK_NODE_ACT(ndlp)) + continue; if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) continue; lpfc_disc_state_machine(vport, ndlp, NULL, @@ -414,7 +418,8 @@ enable_vport(struct fc_vport *fc_vport) * up and ready to FDISC. */ ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); - if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { + if (ndlp && NLP_CHK_NODE_ACT(ndlp) + && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { lpfc_set_disctmo(vport); lpfc_initial_fdisc(vport); @@ -498,7 +503,41 @@ lpfc_vport_delete(struct fc_vport *fc_vport) scsi_remove_host(lpfc_shost_from_vport(vport)); ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); - if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && + + /* In case of driver unload, we shall not perform fabric logo as the + * worker thread already stopped at this stage and, in this case, we + * can safely skip the fabric logo. + */ + if (phba->pport->load_flag & FC_UNLOADING) { + if (ndlp && NLP_CHK_NODE_ACT(ndlp) && + ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && + phba->link_state >= LPFC_LINK_UP) { + /* First look for the Fabric ndlp */ + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp) + goto skip_logo; + else if (!NLP_CHK_NODE_ACT(ndlp)) { + ndlp = lpfc_enable_node(vport, ndlp, + NLP_STE_UNUSED_NODE); + if (!ndlp) + goto skip_logo; + } + /* Remove ndlp from vport npld list */ + lpfc_dequeue_node(vport, ndlp); + + /* Indicate free memory when release */ + spin_lock_irq(&phba->ndlp_lock); + NLP_SET_FREE_REQ(ndlp); + spin_unlock_irq(&phba->ndlp_lock); + /* Kick off release ndlp when it can be safely done */ + lpfc_nlp_put(ndlp); + } + goto skip_logo; + } + + /* Otherwise, we will perform fabric logo as needed */ + if (ndlp && NLP_CHK_NODE_ACT(ndlp) && + ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && phba->link_state >= LPFC_LINK_UP) { if (vport->cfg_enable_da_id) { timeout = msecs_to_jiffies(phba->fc_ratov * 2000); @@ -519,8 +558,27 @@ lpfc_vport_delete(struct fc_vport *fc_vport) if (!ndlp) goto skip_logo; lpfc_nlp_init(vport, ndlp, Fabric_DID); + /* Indicate free memory when release */ + NLP_SET_FREE_REQ(ndlp); } else { + if (!NLP_CHK_NODE_ACT(ndlp)) + ndlp = lpfc_enable_node(vport, ndlp, + NLP_STE_UNUSED_NODE); + if (!ndlp) + goto skip_logo; + + /* Remove ndlp from vport npld list */ lpfc_dequeue_node(vport, ndlp); + spin_lock_irq(&phba->ndlp_lock); + if (!NLP_CHK_FREE_REQ(ndlp)) + /* Indicate free memory when release */ + NLP_SET_FREE_REQ(ndlp); + else { + /* Skip this if ndlp is already in free mode */ + spin_unlock_irq(&phba->ndlp_lock); + goto skip_logo; + } + spin_unlock_irq(&phba->ndlp_lock); } vport->unreg_vpi_cmpl = VPORT_INVAL; timeout = msecs_to_jiffies(phba->fc_ratov * 2000); @@ -534,9 +592,9 @@ skip_logo: lpfc_sli_host_down(vport); lpfc_stop_vport_timers(vport); - lpfc_unreg_all_rpis(vport); if (!(phba->pport->load_flag & FC_UNLOADING)) { + lpfc_unreg_all_rpis(vport); lpfc_unreg_default_rpis(vport); /* * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index b6587a6..0ad215e 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c @@ -59,7 +59,6 @@ EXPORT_SYMBOL(mraid_mm_register_adp); EXPORT_SYMBOL(mraid_mm_unregister_adp); EXPORT_SYMBOL(mraid_mm_adapter_app_handle); -static int majorno; static uint32_t drvr_ver = 0x02200207; static int adapters_count_g; @@ -76,6 +75,12 @@ static const struct file_operations lsi_fops = { .owner = THIS_MODULE, }; +static struct miscdevice megaraid_mm_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "megadev0", + .fops = &lsi_fops, +}; + /** * mraid_mm_open - open routine for char node interface * @inode : unused @@ -1184,15 +1189,16 @@ mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp) static int __init mraid_mm_init(void) { + int err; + // Announce the driver version con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n", LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION)); - majorno = register_chrdev(0, "megadev", &lsi_fops); - - if (majorno < 0) { - con_log(CL_ANN, ("megaraid cmm: cannot get major\n")); - return majorno; + err = misc_register(&megaraid_mm_dev); + if (err < 0) { + con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n")); + return err; } init_waitqueue_head(&wait_q); @@ -1230,7 +1236,7 @@ mraid_mm_exit(void) { con_log(CL_DLEVEL1 , ("exiting common mod\n")); - unregister_chrdev(majorno, "megadev"); + misc_deregister(&megaraid_mm_dev); } module_init(mraid_mm_init); diff --git a/drivers/scsi/megaraid/megaraid_mm.h b/drivers/scsi/megaraid/megaraid_mm.h index c8762b2..55b425c 100644 --- a/drivers/scsi/megaraid/megaraid_mm.h +++ b/drivers/scsi/megaraid/megaraid_mm.h @@ -22,6 +22,7 @@ #include <linux/moduleparam.h> #include <linux/pci.h> #include <linux/list.h> +#include <linux/miscdevice.h> #include "mbox_defs.h" #include "megaraid_ioctl.h" diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 2a6e4f4..a57fed4 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -416,11 +416,11 @@ static int ses_intf_add(struct class_device *cdev, int i, j, types, len, components = 0; int err = -ENOMEM; struct enclosure_device *edev; - struct ses_component *scomp; + struct ses_component *scomp = NULL; if (!scsi_device_enclosure(sdev)) { /* not an enclosure, but might be in one */ - edev = enclosure_find(&sdev->host->shost_gendev); + edev = enclosure_find(&sdev->host->shost_gendev); if (edev) { ses_match_to_enclosure(edev, sdev); class_device_put(&edev->cdev); @@ -456,9 +456,6 @@ static int ses_intf_add(struct class_device *cdev, if (!buf) goto err_free; - ses_dev->page1 = buf; - ses_dev->page1_len = len; - result = ses_recv_diag(sdev, 1, buf, len); if (result) goto recv_failed; @@ -473,6 +470,9 @@ static int ses_intf_add(struct class_device *cdev, type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) components += type_ptr[1]; } + ses_dev->page1 = buf; + ses_dev->page1_len = len; + buf = NULL; result = ses_recv_diag(sdev, 2, hdr_buf, INIT_ALLOC_SIZE); if (result) @@ -489,6 +489,7 @@ static int ses_intf_add(struct class_device *cdev, goto recv_failed; ses_dev->page2 = buf; ses_dev->page2_len = len; + buf = NULL; /* The additional information page --- allows us * to match up the devices */ @@ -506,11 +507,12 @@ static int ses_intf_add(struct class_device *cdev, goto recv_failed; ses_dev->page10 = buf; ses_dev->page10_len = len; + buf = NULL; no_page10: - scomp = kmalloc(sizeof(struct ses_component) * components, GFP_KERNEL); + scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL); if (!scomp) - goto err_free; + goto err_free; edev = enclosure_register(cdev->dev, sdev->sdev_gendev.bus_id, components, &ses_enclosure_callbacks); @@ -521,10 +523,9 @@ static int ses_intf_add(struct class_device *cdev, edev->scratch = ses_dev; for (i = 0; i < components; i++) - edev->component[i].scratch = scomp++; + edev->component[i].scratch = scomp + i; /* Page 7 for the descriptors is optional */ - buf = NULL; result = ses_recv_diag(sdev, 7, hdr_buf, INIT_ALLOC_SIZE); if (result) goto simple_populate; @@ -532,6 +533,8 @@ static int ses_intf_add(struct class_device *cdev, len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; /* add 1 for trailing '\0' we'll use */ buf = kzalloc(len + 1, GFP_KERNEL); + if (!buf) + goto simple_populate; result = ses_recv_diag(sdev, 7, buf, len); if (result) { simple_populate: @@ -598,6 +601,7 @@ static int ses_intf_add(struct class_device *cdev, err = -ENODEV; err_free: kfree(buf); + kfree(scomp); kfree(ses_dev->page10); kfree(ses_dev->page2); kfree(ses_dev->page1); @@ -630,6 +634,7 @@ static void ses_intf_remove(struct class_device *cdev, ses_dev = edev->scratch; edev->scratch = NULL; + kfree(ses_dev->page10); kfree(ses_dev->page1); kfree(ses_dev->page2); kfree(ses_dev); diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c index 6325901..f7d2795 100644 --- a/drivers/scsi/sym53c416.c +++ b/drivers/scsi/sym53c416.c @@ -187,10 +187,10 @@ #define sym53c416_base_2 sym53c416_2 #define sym53c416_base_3 sym53c416_3 -static unsigned int sym53c416_base[2] = {0,0}; -static unsigned int sym53c416_base_1[2] = {0,0}; -static unsigned int sym53c416_base_2[2] = {0,0}; -static unsigned int sym53c416_base_3[2] = {0,0}; +static unsigned int sym53c416_base[2]; +static unsigned int sym53c416_base_1[2]; +static unsigned int sym53c416_base_2[2]; +static unsigned int sym53c416_base_3[2]; #endif @@ -621,25 +621,25 @@ int __init sym53c416_detect(struct scsi_host_template *tpnt) int ints[3]; ints[0] = 2; - if(sym53c416_base) + if(sym53c416_base[0]) { ints[1] = sym53c416_base[0]; ints[2] = sym53c416_base[1]; sym53c416_setup(NULL, ints); } - if(sym53c416_base_1) + if(sym53c416_base_1[0]) { ints[1] = sym53c416_base_1[0]; ints[2] = sym53c416_base_1[1]; sym53c416_setup(NULL, ints); } - if(sym53c416_base_2) + if(sym53c416_base_2[0]) { ints[1] = sym53c416_base_2[0]; ints[2] = sym53c416_base_2[1]; sym53c416_setup(NULL, ints); } - if(sym53c416_base_3) + if(sym53c416_base_3[0]) { ints[1] = sym53c416_base_3[0]; ints[2] = sym53c416_base_3[1]; diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index d1299e9..530ff4c 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -6,6 +6,7 @@ #include <linux/types.h> #include <linux/workqueue.h> #include <linux/mutex.h> +#include <scsi/scsi.h> struct request_queue; struct block_device; @@ -25,12 +26,15 @@ struct blk_queue_tags; * NONE: Self evident. Host adapter is not capable of scatter-gather. * ALL: Means that the host adapter module can do scatter-gather, * and that there is no limit to the size of the table to which - * we scatter/gather data. + * we scatter/gather data. The value we set here is the maximum + * single element sglist. To use chained sglists, the adapter + * has to set a value beyond ALL (and correctly use the chain + * handling API. * Anything else: Indicates the maximum number of chains that can be * used in one scatter-gather request. */ #define SG_NONE 0 -#define SG_ALL 0xff +#define SG_ALL SCSI_MAX_SG_SEGMENTS #define MODE_UNKNOWN 0x00 #define MODE_INITIATOR 0x01 |