aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-06-10 14:47:26 -0400
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-06-10 14:47:26 -0400
commitf0cd91a68acdc9b49d7f6738b514a426da627649 (patch)
tree8ad73564015794197583b094217ae0a71e71e753 /drivers/scsi
parent60eef25701d25e99c991dd0f4a9f3832a0c3ad3e (diff)
parent128e6ced247cda88f96fa9f2e4ba8b2c4a681560 (diff)
Merge ../linux-2.6
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Kconfig4
-rw-r--r--drivers/scsi/advansys.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c1
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_pci.c12
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c30
-rw-r--r--drivers/scsi/libata-core.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c95
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c18
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c134
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c68
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c59
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.h7
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c6
-rw-r--r--drivers/scsi/ppa.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c19
-rw-r--r--drivers/scsi/sata_mv.c134
-rw-r--r--drivers/scsi/sata_sil24.c6
-rw-r--r--drivers/scsi/scsi_devinfo.c3
-rw-r--r--drivers/scsi/scsi_lib.c29
-rw-r--r--drivers/scsi/scsi_transport_sas.c4
-rw-r--r--drivers/scsi/sim710.c2
-rw-r--r--drivers/scsi/st.c2
29 files changed, 410 insertions, 307 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 13ad88a064b7..44728ae3fe77 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -446,7 +446,9 @@ config SCSI_DPT_I2O
446 446
447config SCSI_ADVANSYS 447config SCSI_ADVANSYS
448 tristate "AdvanSys SCSI support" 448 tristate "AdvanSys SCSI support"
449 depends on (ISA || EISA || PCI) && SCSI && BROKEN 449 depends on SCSI
450 depends on ISA || EISA || PCI
451 depends on BROKEN || X86_32
450 help 452 help
451 This is a driver for all SCSI host adapters manufactured by 453 This is a driver for all SCSI host adapters manufactured by
452 AdvanSys. It is documented in the kernel source in 454 AdvanSys. It is documented in the kernel source in
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 28b93057b607..2a419634b256 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -2051,7 +2051,7 @@ STATIC ASC_DCNT AscGetMaxDmaCount(ushort);
2051#define ADV_VADDR_TO_U32 virt_to_bus 2051#define ADV_VADDR_TO_U32 virt_to_bus
2052#define ADV_U32_TO_VADDR bus_to_virt 2052#define ADV_U32_TO_VADDR bus_to_virt
2053 2053
2054#define AdvPortAddr ulong /* Virtual memory address size */ 2054#define AdvPortAddr void __iomem * /* Virtual memory address size */
2055 2055
2056/* 2056/*
2057 * Define Adv Library required memory access macros. 2057 * Define Adv Library required memory access macros.
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index cb30d9c1153d..0c9c2f400bf6 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -219,6 +219,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
219 ahc->flags |= AHC_39BIT_ADDRESSING; 219 ahc->flags |= AHC_39BIT_ADDRESSING;
220 } else { 220 } else {
221 if (dma_set_mask(dev, DMA_32BIT_MASK)) { 221 if (dma_set_mask(dev, DMA_32BIT_MASK)) {
222 ahc_free(ahc);
222 printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n"); 223 printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
223 return (-ENODEV); 224 return (-ENODEV);
224 } 225 }
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
index 02fed4a02eb3..63cab2d74552 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -2042,12 +2042,12 @@ ahc_pci_resume(struct ahc_softc *ahc)
2042 * that the OS doesn't know about and rely on our chip 2042 * that the OS doesn't know about and rely on our chip
2043 * reset handler to handle the rest. 2043 * reset handler to handle the rest.
2044 */ 2044 */
2045 ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4, 2045 ahc_pci_write_config(ahc->dev_softc, DEVCONFIG,
2046 ahc->bus_softc.pci_softc.devconfig); 2046 ahc->bus_softc.pci_softc.devconfig, /*bytes*/4);
2047 ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/1, 2047 ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND,
2048 ahc->bus_softc.pci_softc.command); 2048 ahc->bus_softc.pci_softc.command, /*bytes*/1);
2049 ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME, /*bytes*/1, 2049 ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME,
2050 ahc->bus_softc.pci_softc.csize_lattime); 2050 ahc->bus_softc.pci_softc.csize_lattime, /*bytes*/1);
2051 if ((ahc->flags & AHC_HAS_TERM_LOGIC) != 0) { 2051 if ((ahc->flags & AHC_HAS_TERM_LOGIC) != 0) {
2052 struct seeprom_descriptor sd; 2052 struct seeprom_descriptor sd;
2053 u_int sxfrctl1; 2053 u_int sxfrctl1;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 12cb743d0ce5..944fc1203ebd 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -738,7 +738,8 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
738{ 738{
739 struct viosrp_adapter_info *req; 739 struct viosrp_adapter_info *req;
740 struct srp_event_struct *evt_struct; 740 struct srp_event_struct *evt_struct;
741 741 dma_addr_t addr;
742
742 evt_struct = get_event_struct(&hostdata->pool); 743 evt_struct = get_event_struct(&hostdata->pool);
743 if (!evt_struct) { 744 if (!evt_struct) {
744 printk(KERN_ERR "ibmvscsi: couldn't allocate an event " 745 printk(KERN_ERR "ibmvscsi: couldn't allocate an event "
@@ -756,10 +757,10 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
756 757
757 req->common.type = VIOSRP_ADAPTER_INFO_TYPE; 758 req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
758 req->common.length = sizeof(hostdata->madapter_info); 759 req->common.length = sizeof(hostdata->madapter_info);
759 req->buffer = dma_map_single(hostdata->dev, 760 req->buffer = addr = dma_map_single(hostdata->dev,
760 &hostdata->madapter_info, 761 &hostdata->madapter_info,
761 sizeof(hostdata->madapter_info), 762 sizeof(hostdata->madapter_info),
762 DMA_BIDIRECTIONAL); 763 DMA_BIDIRECTIONAL);
763 764
764 if (dma_mapping_error(req->buffer)) { 765 if (dma_mapping_error(req->buffer)) {
765 printk(KERN_ERR 766 printk(KERN_ERR
@@ -769,8 +770,13 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
769 return; 770 return;
770 } 771 }
771 772
772 if (ibmvscsi_send_srp_event(evt_struct, hostdata)) 773 if (ibmvscsi_send_srp_event(evt_struct, hostdata)) {
773 printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n"); 774 printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n");
775 dma_unmap_single(hostdata->dev,
776 addr,
777 sizeof(hostdata->madapter_info),
778 DMA_BIDIRECTIONAL);
779 }
774}; 780};
775 781
776/** 782/**
@@ -1258,6 +1264,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1258{ 1264{
1259 struct viosrp_host_config *host_config; 1265 struct viosrp_host_config *host_config;
1260 struct srp_event_struct *evt_struct; 1266 struct srp_event_struct *evt_struct;
1267 dma_addr_t addr;
1261 int rc; 1268 int rc;
1262 1269
1263 evt_struct = get_event_struct(&hostdata->pool); 1270 evt_struct = get_event_struct(&hostdata->pool);
@@ -1278,8 +1285,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1278 memset(host_config, 0x00, sizeof(*host_config)); 1285 memset(host_config, 0x00, sizeof(*host_config));
1279 host_config->common.type = VIOSRP_HOST_CONFIG_TYPE; 1286 host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
1280 host_config->common.length = length; 1287 host_config->common.length = length;
1281 host_config->buffer = dma_map_single(hostdata->dev, buffer, length, 1288 host_config->buffer = addr = dma_map_single(hostdata->dev, buffer,
1282 DMA_BIDIRECTIONAL); 1289 length,
1290 DMA_BIDIRECTIONAL);
1283 1291
1284 if (dma_mapping_error(host_config->buffer)) { 1292 if (dma_mapping_error(host_config->buffer)) {
1285 printk(KERN_ERR 1293 printk(KERN_ERR
@@ -1290,11 +1298,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1290 1298
1291 init_completion(&evt_struct->comp); 1299 init_completion(&evt_struct->comp);
1292 rc = ibmvscsi_send_srp_event(evt_struct, hostdata); 1300 rc = ibmvscsi_send_srp_event(evt_struct, hostdata);
1293 if (rc == 0) { 1301 if (rc == 0)
1294 wait_for_completion(&evt_struct->comp); 1302 wait_for_completion(&evt_struct->comp);
1295 dma_unmap_single(hostdata->dev, host_config->buffer, 1303 dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL);
1296 length, DMA_BIDIRECTIONAL);
1297 }
1298 1304
1299 return rc; 1305 return rc;
1300} 1306}
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index bd147207f25d..b046ffa22101 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -864,6 +864,9 @@ static unsigned int ata_id_xfermask(const u16 *id)
864/** 864/**
865 * ata_port_queue_task - Queue port_task 865 * ata_port_queue_task - Queue port_task
866 * @ap: The ata_port to queue port_task for 866 * @ap: The ata_port to queue port_task for
867 * @fn: workqueue function to be scheduled
868 * @data: data value to pass to workqueue function
869 * @delay: delay time for workqueue function
867 * 870 *
868 * Schedule @fn(@data) for execution after @delay jiffies using 871 * Schedule @fn(@data) for execution after @delay jiffies using
869 * port_task. There is one port_task per port and it's the 872 * port_task. There is one port_task per port and it's the
@@ -2739,6 +2742,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2739 * ata_dev_init_params - Issue INIT DEV PARAMS command 2742 * ata_dev_init_params - Issue INIT DEV PARAMS command
2740 * @ap: Port associated with device @dev 2743 * @ap: Port associated with device @dev
2741 * @dev: Device to which command will be sent 2744 * @dev: Device to which command will be sent
2745 * @heads: Number of heads (taskfile parameter)
2746 * @sectors: Number of sectors (taskfile parameter)
2742 * 2747 *
2743 * LOCKING: 2748 * LOCKING:
2744 * Kernel thread context (may sleep) 2749 * Kernel thread context (may sleep)
@@ -3638,6 +3643,8 @@ static void ata_pio_block(struct ata_port *ap)
3638 3643
3639 ata_pio_sector(qc); 3644 ata_pio_sector(qc);
3640 } 3645 }
3646
3647 ata_altstatus(ap); /* flush */
3641} 3648}
3642 3649
3643static void ata_pio_error(struct ata_port *ap) 3650static void ata_pio_error(struct ata_port *ap)
@@ -3754,11 +3761,14 @@ static void atapi_packet_task(void *_data)
3754 spin_lock_irqsave(&ap->host_set->lock, flags); 3761 spin_lock_irqsave(&ap->host_set->lock, flags);
3755 ap->flags &= ~ATA_FLAG_NOINTR; 3762 ap->flags &= ~ATA_FLAG_NOINTR;
3756 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1); 3763 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3764 ata_altstatus(ap); /* flush */
3765
3757 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) 3766 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3758 ap->ops->bmdma_start(qc); /* initiate bmdma */ 3767 ap->ops->bmdma_start(qc); /* initiate bmdma */
3759 spin_unlock_irqrestore(&ap->host_set->lock, flags); 3768 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3760 } else { 3769 } else {
3761 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1); 3770 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3771 ata_altstatus(ap); /* flush */
3762 3772
3763 /* PIO commands are handled by polling */ 3773 /* PIO commands are handled by polling */
3764 ap->hsm_task_state = HSM_ST; 3774 ap->hsm_task_state = HSM_ST;
@@ -4287,6 +4297,7 @@ static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4287int ata_device_resume(struct ata_port *ap, struct ata_device *dev) 4297int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4288{ 4298{
4289 if (ap->flags & ATA_FLAG_SUSPENDED) { 4299 if (ap->flags & ATA_FLAG_SUSPENDED) {
4300 ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 200000);
4290 ap->flags &= ~ATA_FLAG_SUSPENDED; 4301 ap->flags &= ~ATA_FLAG_SUSPENDED;
4291 ata_set_mode(ap); 4302 ata_set_mode(ap);
4292 } 4303 }
@@ -4302,6 +4313,7 @@ int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4302 * ata_device_suspend - prepare a device for suspend 4313 * ata_device_suspend - prepare a device for suspend
4303 * @ap: port the device is connected to 4314 * @ap: port the device is connected to
4304 * @dev: the device to suspend 4315 * @dev: the device to suspend
4316 * @state: target power management state
4305 * 4317 *
4306 * Flush the cache on the drive, if appropriate, then issue a 4318 * Flush the cache on the drive, if appropriate, then issue a
4307 * standbynow command. 4319 * standbynow command.
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index fad607b2e6f4..ee22173fce43 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -27,7 +27,6 @@ void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
27int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *); 27int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
28void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); 28void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
29void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); 29void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
30void lpfc_set_slim(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
31int lpfc_reg_login(struct lpfc_hba *, uint32_t, uint8_t *, LPFC_MBOXQ_t *, 30int lpfc_reg_login(struct lpfc_hba *, uint32_t, uint8_t *, LPFC_MBOXQ_t *,
32 uint32_t); 31 uint32_t);
33void lpfc_unreg_login(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *); 32void lpfc_unreg_login(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *);
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 8932b1be2b60..41cf5d3ea6ce 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -113,6 +113,7 @@ struct lpfc_nodelist {
113#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from 113#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from
114 NPR list */ 114 NPR list */
115#define NLP_DELAY_REMOVE 0x4000000 /* Defer removal till end of DSM */ 115#define NLP_DELAY_REMOVE 0x4000000 /* Defer removal till end of DSM */
116#define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */
116 117
117/* Defines for list searchs */ 118/* Defines for list searchs */
118#define NLP_SEARCH_MAPPED 0x1 /* search mapped */ 119#define NLP_SEARCH_MAPPED 0x1 /* search mapped */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 4813beaaca8f..283b7d824c34 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -302,10 +302,6 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
302 if (lpfc_reg_login(phba, Fabric_DID, (uint8_t *) sp, mbox, 0)) 302 if (lpfc_reg_login(phba, Fabric_DID, (uint8_t *) sp, mbox, 0))
303 goto fail_free_mbox; 303 goto fail_free_mbox;
304 304
305 /*
306 * set_slim mailbox command needs to execute first,
307 * queue this command to be processed later.
308 */
309 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 305 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
310 mbox->context2 = ndlp; 306 mbox->context2 = ndlp;
311 307
@@ -781,25 +777,26 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
781 if (disc && phba->num_disc_nodes) { 777 if (disc && phba->num_disc_nodes) {
782 /* Check to see if there are more PLOGIs to be sent */ 778 /* Check to see if there are more PLOGIs to be sent */
783 lpfc_more_plogi(phba); 779 lpfc_more_plogi(phba);
784 }
785 780
786 if (phba->num_disc_nodes == 0) { 781 if (phba->num_disc_nodes == 0) {
787 spin_lock_irq(phba->host->host_lock); 782 spin_lock_irq(phba->host->host_lock);
788 phba->fc_flag &= ~FC_NDISC_ACTIVE; 783 phba->fc_flag &= ~FC_NDISC_ACTIVE;
789 spin_unlock_irq(phba->host->host_lock); 784 spin_unlock_irq(phba->host->host_lock);
790 785
791 lpfc_can_disctmo(phba); 786 lpfc_can_disctmo(phba);
792 if (phba->fc_flag & FC_RSCN_MODE) { 787 if (phba->fc_flag & FC_RSCN_MODE) {
793 /* Check to see if more RSCNs came in while we were 788 /*
794 * processing this one. 789 * Check to see if more RSCNs came in while
795 */ 790 * we were processing this one.
796 if ((phba->fc_rscn_id_cnt == 0) && 791 */
797 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) { 792 if ((phba->fc_rscn_id_cnt == 0) &&
798 spin_lock_irq(phba->host->host_lock); 793 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
799 phba->fc_flag &= ~FC_RSCN_MODE; 794 spin_lock_irq(phba->host->host_lock);
800 spin_unlock_irq(phba->host->host_lock); 795 phba->fc_flag &= ~FC_RSCN_MODE;
801 } else { 796 spin_unlock_irq(phba->host->host_lock);
802 lpfc_els_handle_rscn(phba); 797 } else {
798 lpfc_els_handle_rscn(phba);
799 }
803 } 800 }
804 } 801 }
805 } 802 }
@@ -1263,7 +1260,7 @@ lpfc_issue_els_logo(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
1263 psli = &phba->sli; 1260 psli = &phba->sli;
1264 pring = &psli->ring[LPFC_ELS_RING]; 1261 pring = &psli->ring[LPFC_ELS_RING];
1265 1262
1266 cmdsize = 2 * (sizeof (uint32_t) + sizeof (struct lpfc_name)); 1263 cmdsize = (2 * sizeof (uint32_t)) + sizeof (struct lpfc_name);
1267 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp, 1264 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
1268 ndlp->nlp_DID, ELS_CMD_LOGO); 1265 ndlp->nlp_DID, ELS_CMD_LOGO);
1269 if (!elsiocb) 1266 if (!elsiocb)
@@ -1451,22 +1448,23 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_hba *phba, struct lpfc_nodelist * nlp)
1451 * PLOGIs to be sent 1448 * PLOGIs to be sent
1452 */ 1449 */
1453 lpfc_more_plogi(phba); 1450 lpfc_more_plogi(phba);
1454 }
1455 1451
1456 if (phba->num_disc_nodes == 0) { 1452 if (phba->num_disc_nodes == 0) {
1457 phba->fc_flag &= ~FC_NDISC_ACTIVE; 1453 phba->fc_flag &= ~FC_NDISC_ACTIVE;
1458 lpfc_can_disctmo(phba); 1454 lpfc_can_disctmo(phba);
1459 if (phba->fc_flag & FC_RSCN_MODE) { 1455 if (phba->fc_flag & FC_RSCN_MODE) {
1460 /* Check to see if more RSCNs 1456 /*
1461 * came in while we were 1457 * Check to see if more RSCNs
1462 * processing this one. 1458 * came in while we were
1463 */ 1459 * processing this one.
1464 if((phba->fc_rscn_id_cnt==0) && 1460 */
1465 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) { 1461 if((phba->fc_rscn_id_cnt==0) &&
1466 phba->fc_flag &= ~FC_RSCN_MODE; 1462 !(phba->fc_flag & FC_RSCN_DISCOVERY)) {
1467 } 1463 phba->fc_flag &= ~FC_RSCN_MODE;
1468 else { 1464 }
1469 lpfc_els_handle_rscn(phba); 1465 else {
1466 lpfc_els_handle_rscn(phba);
1467 }
1470 } 1468 }
1471 } 1469 }
1472 } 1470 }
@@ -1872,9 +1870,6 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1872 if (mbox) { 1870 if (mbox) {
1873 if ((rspiocb->iocb.ulpStatus == 0) 1871 if ((rspiocb->iocb.ulpStatus == 0)
1874 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 1872 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
1875 /* set_slim mailbox command needs to execute first,
1876 * queue this command to be processed later.
1877 */
1878 lpfc_unreg_rpi(phba, ndlp); 1873 lpfc_unreg_rpi(phba, ndlp);
1879 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 1874 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
1880 mbox->context2 = ndlp; 1875 mbox->context2 = ndlp;
@@ -1920,6 +1915,7 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
1920 uint8_t *pcmd; 1915 uint8_t *pcmd;
1921 uint16_t cmdsize; 1916 uint16_t cmdsize;
1922 int rc; 1917 int rc;
1918 ELS_PKT *els_pkt_ptr;
1923 1919
1924 psli = &phba->sli; 1920 psli = &phba->sli;
1925 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1921 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
@@ -1958,6 +1954,23 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
1958 pcmd += sizeof (uint32_t); 1954 pcmd += sizeof (uint32_t);
1959 memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm)); 1955 memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
1960 break; 1956 break;
1957 case ELS_CMD_PRLO:
1958 cmdsize = sizeof (uint32_t) + sizeof (PRLO);
1959 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
1960 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
1961 if (!elsiocb)
1962 return 1;
1963
1964 icmd = &elsiocb->iocb;
1965 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
1966 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1967
1968 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
1969 sizeof (uint32_t) + sizeof (PRLO));
1970 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
1971 els_pkt_ptr = (ELS_PKT *) pcmd;
1972 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
1973 break;
1961 default: 1974 default:
1962 return 1; 1975 return 1;
1963 } 1976 }
@@ -2498,7 +2511,7 @@ lpfc_els_rcv_rscn(struct lpfc_hba * phba,
2498 /* If we are about to begin discovery, just ACC the RSCN. 2511 /* If we are about to begin discovery, just ACC the RSCN.
2499 * Discovery processing will satisfy it. 2512 * Discovery processing will satisfy it.
2500 */ 2513 */
2501 if (phba->hba_state < LPFC_NS_QRY) { 2514 if (phba->hba_state <= LPFC_NS_QRY) {
2502 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 2515 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
2503 newnode); 2516 newnode);
2504 return 0; 2517 return 0;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 6721e679df62..adb086009ae0 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -311,8 +311,8 @@ lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
311 evtp->evt_arg2 = arg2; 311 evtp->evt_arg2 = arg2;
312 evtp->evt = evt; 312 evtp->evt = evt;
313 313
314 list_add_tail(&evtp->evt_listp, &phba->work_list);
315 spin_lock_irq(phba->host->host_lock); 314 spin_lock_irq(phba->host->host_lock);
315 list_add_tail(&evtp->evt_listp, &phba->work_list);
316 if (phba->work_wait) 316 if (phba->work_wait)
317 wake_up(phba->work_wait); 317 wake_up(phba->work_wait);
318 spin_unlock_irq(phba->host->host_lock); 318 spin_unlock_irq(phba->host->host_lock);
@@ -1071,10 +1071,6 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
1071 /* initialize static port data */ 1071 /* initialize static port data */
1072 rport->maxframe_size = ndlp->nlp_maxframe; 1072 rport->maxframe_size = ndlp->nlp_maxframe;
1073 rport->supported_classes = ndlp->nlp_class_sup; 1073 rport->supported_classes = ndlp->nlp_class_sup;
1074 if ((rport->scsi_target_id != -1) &&
1075 (rport->scsi_target_id < MAX_FCP_TARGET)) {
1076 ndlp->nlp_sid = rport->scsi_target_id;
1077 }
1078 rdata = rport->dd_data; 1074 rdata = rport->dd_data;
1079 rdata->pnode = ndlp; 1075 rdata->pnode = ndlp;
1080 1076
@@ -1087,6 +1083,10 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
1087 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) 1083 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
1088 fc_remote_port_rolechg(rport, rport_ids.roles); 1084 fc_remote_port_rolechg(rport, rport_ids.roles);
1089 1085
1086 if ((rport->scsi_target_id != -1) &&
1087 (rport->scsi_target_id < MAX_FCP_TARGET)) {
1088 ndlp->nlp_sid = rport->scsi_target_id;
1089 }
1090 1090
1091 return; 1091 return;
1092} 1092}
@@ -1238,6 +1238,7 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1238 evt_listp); 1238 evt_listp);
1239 1239
1240 } 1240 }
1241 nlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1241 nlp->nlp_type |= NLP_FC_NODE; 1242 nlp->nlp_type |= NLP_FC_NODE;
1242 break; 1243 break;
1243 case NLP_MAPPED_LIST: 1244 case NLP_MAPPED_LIST:
@@ -1258,6 +1259,7 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1258 evt_listp); 1259 evt_listp);
1259 1260
1260 } 1261 }
1262 nlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1261 break; 1263 break;
1262 case NLP_NPR_LIST: 1264 case NLP_NPR_LIST:
1263 nlp->nlp_flag |= list; 1265 nlp->nlp_flag |= list;
@@ -1402,6 +1404,8 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba,
1402 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) 1404 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
1403 return 1; 1405 return 1;
1404 case CMD_ELS_REQUEST64_CR: 1406 case CMD_ELS_REQUEST64_CR:
1407 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
1408 return 1;
1405 case CMD_XMIT_ELS_RSP64_CX: 1409 case CMD_XMIT_ELS_RSP64_CX:
1406 if (iocb->context1 == (uint8_t *) ndlp) 1410 if (iocb->context1 == (uint8_t *) ndlp)
1407 return 1; 1411 return 1;
@@ -1901,10 +1905,8 @@ lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
1901 */ 1905 */
1902 if (ndlp->nlp_flag & NLP_DELAY_TMO) 1906 if (ndlp->nlp_flag & NLP_DELAY_TMO)
1903 lpfc_cancel_retry_delay_tmo(phba, ndlp); 1907 lpfc_cancel_retry_delay_tmo(phba, ndlp);
1904 } else { 1908 } else
1905 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1906 ndlp = NULL; 1909 ndlp = NULL;
1907 }
1908 } else { 1910 } else {
1909 flg = ndlp->nlp_flag & NLP_LIST_MASK; 1911 flg = ndlp->nlp_flag & NLP_LIST_MASK;
1910 if ((flg == NLP_ADISC_LIST) || (flg == NLP_PLOGI_LIST)) 1912 if ((flg == NLP_ADISC_LIST) || (flg == NLP_PLOGI_LIST))
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 54d04188f7cc..eedf98801366 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -449,6 +449,7 @@ struct serv_parm { /* Structure is in Big Endian format */
449#define ELS_CMD_RRQ 0x12000000 449#define ELS_CMD_RRQ 0x12000000
450#define ELS_CMD_PRLI 0x20100014 450#define ELS_CMD_PRLI 0x20100014
451#define ELS_CMD_PRLO 0x21100014 451#define ELS_CMD_PRLO 0x21100014
452#define ELS_CMD_PRLO_ACC 0x02100014
452#define ELS_CMD_PDISC 0x50000000 453#define ELS_CMD_PDISC 0x50000000
453#define ELS_CMD_FDISC 0x51000000 454#define ELS_CMD_FDISC 0x51000000
454#define ELS_CMD_ADISC 0x52000000 455#define ELS_CMD_ADISC 0x52000000
@@ -484,6 +485,7 @@ struct serv_parm { /* Structure is in Big Endian format */
484#define ELS_CMD_RRQ 0x12 485#define ELS_CMD_RRQ 0x12
485#define ELS_CMD_PRLI 0x14001020 486#define ELS_CMD_PRLI 0x14001020
486#define ELS_CMD_PRLO 0x14001021 487#define ELS_CMD_PRLO 0x14001021
488#define ELS_CMD_PRLO_ACC 0x14001002
487#define ELS_CMD_PDISC 0x50 489#define ELS_CMD_PDISC 0x50
488#define ELS_CMD_FDISC 0x51 490#define ELS_CMD_FDISC 0x51
489#define ELS_CMD_ADISC 0x52 491#define ELS_CMD_ADISC 0x52
@@ -1539,6 +1541,7 @@ typedef struct {
1539 1541
1540#define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */ 1542#define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */
1541#define FLAGS_LINK_SPEED 0x0800 /* Bit 11 */ 1543#define FLAGS_LINK_SPEED 0x0800 /* Bit 11 */
1544#define FLAGS_IMED_ABORT 0x04000 /* Bit 14 */
1542 1545
1543 uint32_t link_speed; 1546 uint32_t link_speed;
1544#define LINK_SPEED_AUTO 0 /* Auto selection */ 1547#define LINK_SPEED_AUTO 0 /* Auto selection */
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 66d5d003555d..908d0f27706f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -294,15 +294,6 @@ lpfc_config_port_post(struct lpfc_hba * phba)
294 } 294 }
295 } 295 }
296 296
297 /* This should turn on DELAYED ABTS for ELS timeouts */
298 lpfc_set_slim(phba, pmb, 0x052198, 0x1);
299 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
300 phba->hba_state = LPFC_HBA_ERROR;
301 mempool_free( pmb, phba->mbox_mem_pool);
302 return -EIO;
303 }
304
305
306 lpfc_read_config(phba, pmb); 297 lpfc_read_config(phba, pmb);
307 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 298 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
308 lpfc_printf_log(phba, 299 lpfc_printf_log(phba,
@@ -804,7 +795,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
804 int max_speed; 795 int max_speed;
805 char * ports; 796 char * ports;
806 char * bus; 797 char * bus;
807 } m; 798 } m = {"<Unknown>", 0, "", ""};
808 799
809 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 800 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
810 ports = (hdrtype == 0x80) ? "2-port " : ""; 801 ports = (hdrtype == 0x80) ? "2-port " : "";
@@ -1627,7 +1618,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1627 1618
1628 error = lpfc_alloc_sysfs_attr(phba); 1619 error = lpfc_alloc_sysfs_attr(phba);
1629 if (error) 1620 if (error)
1630 goto out_kthread_stop; 1621 goto out_remove_host;
1631 1622
1632 error = request_irq(phba->pcidev->irq, lpfc_intr_handler, SA_SHIRQ, 1623 error = request_irq(phba->pcidev->irq, lpfc_intr_handler, SA_SHIRQ,
1633 LPFC_DRIVER_NAME, phba); 1624 LPFC_DRIVER_NAME, phba);
@@ -1644,8 +1635,10 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1644 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 1635 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
1645 1636
1646 error = lpfc_sli_hba_setup(phba); 1637 error = lpfc_sli_hba_setup(phba);
1647 if (error) 1638 if (error) {
1639 error = -ENODEV;
1648 goto out_free_irq; 1640 goto out_free_irq;
1641 }
1649 1642
1650 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1643 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1651 spin_lock_irq(phba->host->host_lock); 1644 spin_lock_irq(phba->host->host_lock);
@@ -1700,6 +1693,9 @@ out_free_irq:
1700 free_irq(phba->pcidev->irq, phba); 1693 free_irq(phba->pcidev->irq, phba);
1701out_free_sysfs_attr: 1694out_free_sysfs_attr:
1702 lpfc_free_sysfs_attr(phba); 1695 lpfc_free_sysfs_attr(phba);
1696out_remove_host:
1697 fc_remove_host(phba->host);
1698 scsi_remove_host(phba->host);
1703out_kthread_stop: 1699out_kthread_stop:
1704 kthread_stop(phba->worker_thread); 1700 kthread_stop(phba->worker_thread);
1705out_free_iocbq: 1701out_free_iocbq:
@@ -1721,12 +1717,14 @@ out_iounmap_slim:
1721out_idr_remove: 1717out_idr_remove:
1722 idr_remove(&lpfc_hba_index, phba->brd_no); 1718 idr_remove(&lpfc_hba_index, phba->brd_no);
1723out_put_host: 1719out_put_host:
1720 phba->host = NULL;
1724 scsi_host_put(host); 1721 scsi_host_put(host);
1725out_release_regions: 1722out_release_regions:
1726 pci_release_regions(pdev); 1723 pci_release_regions(pdev);
1727out_disable_device: 1724out_disable_device:
1728 pci_disable_device(pdev); 1725 pci_disable_device(pdev);
1729out: 1726out:
1727 pci_set_drvdata(pdev, NULL);
1730 return error; 1728 return error;
1731} 1729}
1732 1730
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index c585e2b2e589..e42f22aaf71b 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -200,6 +200,9 @@ lpfc_init_link(struct lpfc_hba * phba,
200 break; 200 break;
201 } 201 }
202 202
203 /* Enable asynchronous ABTS responses from firmware */
204 mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
205
203 /* NEW_FEATURE 206 /* NEW_FEATURE
204 * Setting up the link speed 207 * Setting up the link speed
205 */ 208 */
@@ -292,36 +295,6 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint32_t did, LPFC_MBOXQ_t * pmb)
292 return; 295 return;
293} 296}
294 297
295/***********************************************/
296
297/* command to write slim */
298/***********************************************/
299void
300lpfc_set_slim(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint32_t addr,
301 uint32_t value)
302{
303 MAILBOX_t *mb;
304
305 mb = &pmb->mb;
306 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
307
308 /* addr = 0x090597 is AUTO ABTS disable for ELS commands */
309 /* addr = 0x052198 is DELAYED ABTS enable for ELS commands */
310
311 /*
312 * Always turn on DELAYED ABTS for ELS timeouts
313 */
314 if ((addr == 0x052198) && (value == 0))
315 value = 1;
316
317 mb->un.varWords[0] = addr;
318 mb->un.varWords[1] = value;
319
320 mb->mbxCommand = MBX_SET_SLIM;
321 mb->mbxOwner = OWN_HOST;
322 return;
323}
324
325/**********************************************/ 298/**********************************************/
326/* lpfc_read_nv Issue a READ CONFIG */ 299/* lpfc_read_nv Issue a READ CONFIG */
327/* mailbox command */ 300/* mailbox command */
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 3d77bd999b70..27d60ad897cd 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -465,14 +465,18 @@ lpfc_rcv_padisc(struct lpfc_hba * phba,
465static int 465static int
466lpfc_rcv_logo(struct lpfc_hba * phba, 466lpfc_rcv_logo(struct lpfc_hba * phba,
467 struct lpfc_nodelist * ndlp, 467 struct lpfc_nodelist * ndlp,
468 struct lpfc_iocbq *cmdiocb) 468 struct lpfc_iocbq *cmdiocb,
469 uint32_t els_cmd)
469{ 470{
470 /* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */ 471 /* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */
471 /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary 472 /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
472 * PLOGIs during LOGO storms from a device. 473 * PLOGIs during LOGO storms from a device.
473 */ 474 */
474 ndlp->nlp_flag |= NLP_LOGO_ACC; 475 ndlp->nlp_flag |= NLP_LOGO_ACC;
475 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 476 if (els_cmd == ELS_CMD_PRLO)
477 lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
478 else
479 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
476 480
477 if (!(ndlp->nlp_type & NLP_FABRIC) || 481 if (!(ndlp->nlp_type & NLP_FABRIC) ||
478 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { 482 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
@@ -681,7 +685,7 @@ lpfc_rcv_logo_plogi_issue(struct lpfc_hba * phba,
681 /* software abort outstanding PLOGI */ 685 /* software abort outstanding PLOGI */
682 lpfc_els_abort(phba, ndlp, 1); 686 lpfc_els_abort(phba, ndlp, 1);
683 687
684 lpfc_rcv_logo(phba, ndlp, cmdiocb); 688 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
685 return ndlp->nlp_state; 689 return ndlp->nlp_state;
686} 690}
687 691
@@ -788,10 +792,6 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
788 if (lpfc_reg_login 792 if (lpfc_reg_login
789 (phba, irsp->un.elsreq64.remoteID, 793 (phba, irsp->un.elsreq64.remoteID,
790 (uint8_t *) sp, mbox, 0) == 0) { 794 (uint8_t *) sp, mbox, 0) == 0) {
791 /* set_slim mailbox command needs to
792 * execute first, queue this command to
793 * be processed later.
794 */
795 switch (ndlp->nlp_DID) { 795 switch (ndlp->nlp_DID) {
796 case NameServer_DID: 796 case NameServer_DID:
797 mbox->mbox_cmpl = 797 mbox->mbox_cmpl =
@@ -832,11 +832,17 @@ static uint32_t
832lpfc_device_rm_plogi_issue(struct lpfc_hba * phba, 832lpfc_device_rm_plogi_issue(struct lpfc_hba * phba,
833 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 833 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
834{ 834{
835 /* software abort outstanding PLOGI */ 835 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
836 lpfc_els_abort(phba, ndlp, 1); 836 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
837 return ndlp->nlp_state;
838 }
839 else {
840 /* software abort outstanding PLOGI */
841 lpfc_els_abort(phba, ndlp, 1);
837 842
838 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 843 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
839 return NLP_STE_FREED_NODE; 844 return NLP_STE_FREED_NODE;
845 }
840} 846}
841 847
842static uint32_t 848static uint32_t
@@ -851,7 +857,7 @@ lpfc_device_recov_plogi_issue(struct lpfc_hba * phba,
851 ndlp->nlp_state = NLP_STE_NPR_NODE; 857 ndlp->nlp_state = NLP_STE_NPR_NODE;
852 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 858 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
853 spin_lock_irq(phba->host->host_lock); 859 spin_lock_irq(phba->host->host_lock);
854 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 860 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
855 spin_unlock_irq(phba->host->host_lock); 861 spin_unlock_irq(phba->host->host_lock);
856 862
857 return ndlp->nlp_state; 863 return ndlp->nlp_state;
@@ -905,7 +911,7 @@ lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba,
905 /* software abort outstanding ADISC */ 911 /* software abort outstanding ADISC */
906 lpfc_els_abort(phba, ndlp, 0); 912 lpfc_els_abort(phba, ndlp, 0);
907 913
908 lpfc_rcv_logo(phba, ndlp, cmdiocb); 914 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
909 return ndlp->nlp_state; 915 return ndlp->nlp_state;
910} 916}
911 917
@@ -932,7 +938,7 @@ lpfc_rcv_prlo_adisc_issue(struct lpfc_hba * phba,
932 cmdiocb = (struct lpfc_iocbq *) arg; 938 cmdiocb = (struct lpfc_iocbq *) arg;
933 939
934 /* Treat like rcv logo */ 940 /* Treat like rcv logo */
935 lpfc_rcv_logo(phba, ndlp, cmdiocb); 941 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO);
936 return ndlp->nlp_state; 942 return ndlp->nlp_state;
937} 943}
938 944
@@ -987,11 +993,17 @@ lpfc_device_rm_adisc_issue(struct lpfc_hba * phba,
987 struct lpfc_nodelist * ndlp, void *arg, 993 struct lpfc_nodelist * ndlp, void *arg,
988 uint32_t evt) 994 uint32_t evt)
989{ 995{
990 /* software abort outstanding ADISC */ 996 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
991 lpfc_els_abort(phba, ndlp, 1); 997 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
998 return ndlp->nlp_state;
999 }
1000 else {
1001 /* software abort outstanding ADISC */
1002 lpfc_els_abort(phba, ndlp, 1);
992 1003
993 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1004 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
994 return NLP_STE_FREED_NODE; 1005 return NLP_STE_FREED_NODE;
1006 }
995} 1007}
996 1008
997static uint32_t 1009static uint32_t
@@ -1006,7 +1018,7 @@ lpfc_device_recov_adisc_issue(struct lpfc_hba * phba,
1006 ndlp->nlp_state = NLP_STE_NPR_NODE; 1018 ndlp->nlp_state = NLP_STE_NPR_NODE;
1007 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 1019 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1008 spin_lock_irq(phba->host->host_lock); 1020 spin_lock_irq(phba->host->host_lock);
1009 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1021 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1010 ndlp->nlp_flag |= NLP_NPR_ADISC; 1022 ndlp->nlp_flag |= NLP_NPR_ADISC;
1011 spin_unlock_irq(phba->host->host_lock); 1023 spin_unlock_irq(phba->host->host_lock);
1012 1024
@@ -1048,7 +1060,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
1048 1060
1049 cmdiocb = (struct lpfc_iocbq *) arg; 1061 cmdiocb = (struct lpfc_iocbq *) arg;
1050 1062
1051 lpfc_rcv_logo(phba, ndlp, cmdiocb); 1063 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1052 return ndlp->nlp_state; 1064 return ndlp->nlp_state;
1053} 1065}
1054 1066
@@ -1073,7 +1085,7 @@ lpfc_rcv_prlo_reglogin_issue(struct lpfc_hba * phba,
1073 struct lpfc_iocbq *cmdiocb; 1085 struct lpfc_iocbq *cmdiocb;
1074 1086
1075 cmdiocb = (struct lpfc_iocbq *) arg; 1087 cmdiocb = (struct lpfc_iocbq *) arg;
1076 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 1088 lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
1077 return ndlp->nlp_state; 1089 return ndlp->nlp_state;
1078} 1090}
1079 1091
@@ -1133,8 +1145,14 @@ lpfc_device_rm_reglogin_issue(struct lpfc_hba * phba,
1133 struct lpfc_nodelist * ndlp, void *arg, 1145 struct lpfc_nodelist * ndlp, void *arg,
1134 uint32_t evt) 1146 uint32_t evt)
1135{ 1147{
1136 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1148 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1137 return NLP_STE_FREED_NODE; 1149 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1150 return ndlp->nlp_state;
1151 }
1152 else {
1153 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1154 return NLP_STE_FREED_NODE;
1155 }
1138} 1156}
1139 1157
1140static uint32_t 1158static uint32_t
@@ -1146,7 +1164,7 @@ lpfc_device_recov_reglogin_issue(struct lpfc_hba * phba,
1146 ndlp->nlp_state = NLP_STE_NPR_NODE; 1164 ndlp->nlp_state = NLP_STE_NPR_NODE;
1147 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 1165 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1148 spin_lock_irq(phba->host->host_lock); 1166 spin_lock_irq(phba->host->host_lock);
1149 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1167 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1150 spin_unlock_irq(phba->host->host_lock); 1168 spin_unlock_irq(phba->host->host_lock);
1151 return ndlp->nlp_state; 1169 return ndlp->nlp_state;
1152} 1170}
@@ -1186,7 +1204,7 @@ lpfc_rcv_logo_prli_issue(struct lpfc_hba * phba,
1186 /* Software abort outstanding PRLI before sending acc */ 1204 /* Software abort outstanding PRLI before sending acc */
1187 lpfc_els_abort(phba, ndlp, 1); 1205 lpfc_els_abort(phba, ndlp, 1);
1188 1206
1189 lpfc_rcv_logo(phba, ndlp, cmdiocb); 1207 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1190 return ndlp->nlp_state; 1208 return ndlp->nlp_state;
1191} 1209}
1192 1210
@@ -1214,7 +1232,7 @@ lpfc_rcv_prlo_prli_issue(struct lpfc_hba * phba,
1214 struct lpfc_iocbq *cmdiocb; 1232 struct lpfc_iocbq *cmdiocb;
1215 1233
1216 cmdiocb = (struct lpfc_iocbq *) arg; 1234 cmdiocb = (struct lpfc_iocbq *) arg;
1217 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 1235 lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
1218 return ndlp->nlp_state; 1236 return ndlp->nlp_state;
1219} 1237}
1220 1238
@@ -1278,11 +1296,17 @@ static uint32_t
1278lpfc_device_rm_prli_issue(struct lpfc_hba * phba, 1296lpfc_device_rm_prli_issue(struct lpfc_hba * phba,
1279 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1297 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1280{ 1298{
1281 /* software abort outstanding PRLI */ 1299 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1282 lpfc_els_abort(phba, ndlp, 1); 1300 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1301 return ndlp->nlp_state;
1302 }
1303 else {
1304 /* software abort outstanding PLOGI */
1305 lpfc_els_abort(phba, ndlp, 1);
1283 1306
1284 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1307 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1285 return NLP_STE_FREED_NODE; 1308 return NLP_STE_FREED_NODE;
1309 }
1286} 1310}
1287 1311
1288 1312
@@ -1313,7 +1337,7 @@ lpfc_device_recov_prli_issue(struct lpfc_hba * phba,
1313 ndlp->nlp_state = NLP_STE_NPR_NODE; 1337 ndlp->nlp_state = NLP_STE_NPR_NODE;
1314 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 1338 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1315 spin_lock_irq(phba->host->host_lock); 1339 spin_lock_irq(phba->host->host_lock);
1316 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1340 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1317 spin_unlock_irq(phba->host->host_lock); 1341 spin_unlock_irq(phba->host->host_lock);
1318 return ndlp->nlp_state; 1342 return ndlp->nlp_state;
1319} 1343}
@@ -1351,7 +1375,7 @@ lpfc_rcv_logo_unmap_node(struct lpfc_hba * phba,
1351 1375
1352 cmdiocb = (struct lpfc_iocbq *) arg; 1376 cmdiocb = (struct lpfc_iocbq *) arg;
1353 1377
1354 lpfc_rcv_logo(phba, ndlp, cmdiocb); 1378 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1355 return ndlp->nlp_state; 1379 return ndlp->nlp_state;
1356} 1380}
1357 1381
@@ -1375,7 +1399,7 @@ lpfc_rcv_prlo_unmap_node(struct lpfc_hba * phba,
1375 1399
1376 cmdiocb = (struct lpfc_iocbq *) arg; 1400 cmdiocb = (struct lpfc_iocbq *) arg;
1377 1401
1378 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 1402 lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
1379 return ndlp->nlp_state; 1403 return ndlp->nlp_state;
1380} 1404}
1381 1405
@@ -1386,7 +1410,7 @@ lpfc_device_recov_unmap_node(struct lpfc_hba * phba,
1386 ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE; 1410 ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
1387 ndlp->nlp_state = NLP_STE_NPR_NODE; 1411 ndlp->nlp_state = NLP_STE_NPR_NODE;
1388 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 1412 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1389 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1413 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1390 lpfc_disc_set_adisc(phba, ndlp); 1414 lpfc_disc_set_adisc(phba, ndlp);
1391 1415
1392 return ndlp->nlp_state; 1416 return ndlp->nlp_state;
@@ -1424,7 +1448,7 @@ lpfc_rcv_logo_mapped_node(struct lpfc_hba * phba,
1424 1448
1425 cmdiocb = (struct lpfc_iocbq *) arg; 1449 cmdiocb = (struct lpfc_iocbq *) arg;
1426 1450
1427 lpfc_rcv_logo(phba, ndlp, cmdiocb); 1451 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1428 return ndlp->nlp_state; 1452 return ndlp->nlp_state;
1429} 1453}
1430 1454
@@ -1456,7 +1480,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_hba * phba,
1456 spin_unlock_irq(phba->host->host_lock); 1480 spin_unlock_irq(phba->host->host_lock);
1457 1481
1458 /* Treat like rcv logo */ 1482 /* Treat like rcv logo */
1459 lpfc_rcv_logo(phba, ndlp, cmdiocb); 1483 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO);
1460 return ndlp->nlp_state; 1484 return ndlp->nlp_state;
1461} 1485}
1462 1486
@@ -1469,7 +1493,7 @@ lpfc_device_recov_mapped_node(struct lpfc_hba * phba,
1469 ndlp->nlp_state = NLP_STE_NPR_NODE; 1493 ndlp->nlp_state = NLP_STE_NPR_NODE;
1470 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 1494 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1471 spin_lock_irq(phba->host->host_lock); 1495 spin_lock_irq(phba->host->host_lock);
1472 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1496 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1473 spin_unlock_irq(phba->host->host_lock); 1497 spin_unlock_irq(phba->host->host_lock);
1474 lpfc_disc_set_adisc(phba, ndlp); 1498 lpfc_disc_set_adisc(phba, ndlp);
1475 return ndlp->nlp_state; 1499 return ndlp->nlp_state;
@@ -1551,7 +1575,7 @@ lpfc_rcv_logo_npr_node(struct lpfc_hba * phba,
1551 1575
1552 cmdiocb = (struct lpfc_iocbq *) arg; 1576 cmdiocb = (struct lpfc_iocbq *) arg;
1553 1577
1554 lpfc_rcv_logo(phba, ndlp, cmdiocb); 1578 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1555 return ndlp->nlp_state; 1579 return ndlp->nlp_state;
1556} 1580}
1557 1581
@@ -1617,9 +1641,16 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_hba * phba,
1617 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1641 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1618{ 1642{
1619 struct lpfc_iocbq *cmdiocb, *rspiocb; 1643 struct lpfc_iocbq *cmdiocb, *rspiocb;
1644 IOCB_t *irsp;
1620 1645
1621 cmdiocb = (struct lpfc_iocbq *) arg; 1646 cmdiocb = (struct lpfc_iocbq *) arg;
1622 rspiocb = cmdiocb->context_un.rsp_iocb; 1647 rspiocb = cmdiocb->context_un.rsp_iocb;
1648
1649 irsp = &rspiocb->iocb;
1650 if (irsp->ulpStatus) {
1651 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1652 return NLP_STE_FREED_NODE;
1653 }
1623 return ndlp->nlp_state; 1654 return ndlp->nlp_state;
1624} 1655}
1625 1656
@@ -1628,9 +1659,16 @@ lpfc_cmpl_prli_npr_node(struct lpfc_hba * phba,
1628 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1659 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1629{ 1660{
1630 struct lpfc_iocbq *cmdiocb, *rspiocb; 1661 struct lpfc_iocbq *cmdiocb, *rspiocb;
1662 IOCB_t *irsp;
1631 1663
1632 cmdiocb = (struct lpfc_iocbq *) arg; 1664 cmdiocb = (struct lpfc_iocbq *) arg;
1633 rspiocb = cmdiocb->context_un.rsp_iocb; 1665 rspiocb = cmdiocb->context_un.rsp_iocb;
1666
1667 irsp = &rspiocb->iocb;
1668 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
1669 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1670 return NLP_STE_FREED_NODE;
1671 }
1634 return ndlp->nlp_state; 1672 return ndlp->nlp_state;
1635} 1673}
1636 1674
@@ -1649,9 +1687,16 @@ lpfc_cmpl_adisc_npr_node(struct lpfc_hba * phba,
1649 uint32_t evt) 1687 uint32_t evt)
1650{ 1688{
1651 struct lpfc_iocbq *cmdiocb, *rspiocb; 1689 struct lpfc_iocbq *cmdiocb, *rspiocb;
1690 IOCB_t *irsp;
1652 1691
1653 cmdiocb = (struct lpfc_iocbq *) arg; 1692 cmdiocb = (struct lpfc_iocbq *) arg;
1654 rspiocb = cmdiocb->context_un.rsp_iocb; 1693 rspiocb = cmdiocb->context_un.rsp_iocb;
1694
1695 irsp = &rspiocb->iocb;
1696 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
1697 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1698 return NLP_STE_FREED_NODE;
1699 }
1655 return ndlp->nlp_state; 1700 return ndlp->nlp_state;
1656} 1701}
1657 1702
@@ -1668,7 +1713,12 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba,
1668 1713
1669 if (!mb->mbxStatus) 1714 if (!mb->mbxStatus)
1670 ndlp->nlp_rpi = mb->un.varWords[0]; 1715 ndlp->nlp_rpi = mb->un.varWords[0];
1671 1716 else {
1717 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
1718 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1719 return NLP_STE_FREED_NODE;
1720 }
1721 }
1672 return ndlp->nlp_state; 1722 return ndlp->nlp_state;
1673} 1723}
1674 1724
@@ -1677,6 +1727,10 @@ lpfc_device_rm_npr_node(struct lpfc_hba * phba,
1677 struct lpfc_nodelist * ndlp, void *arg, 1727 struct lpfc_nodelist * ndlp, void *arg,
1678 uint32_t evt) 1728 uint32_t evt)
1679{ 1729{
1730 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1731 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1732 return ndlp->nlp_state;
1733 }
1680 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1734 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1681 return NLP_STE_FREED_NODE; 1735 return NLP_STE_FREED_NODE;
1682} 1736}
@@ -1687,7 +1741,7 @@ lpfc_device_recov_npr_node(struct lpfc_hba * phba,
1687 uint32_t evt) 1741 uint32_t evt)
1688{ 1742{
1689 spin_lock_irq(phba->host->host_lock); 1743 spin_lock_irq(phba->host->host_lock);
1690 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1744 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1691 spin_unlock_irq(phba->host->host_lock); 1745 spin_unlock_irq(phba->host->host_lock);
1692 if (ndlp->nlp_flag & NLP_DELAY_TMO) { 1746 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1693 lpfc_cancel_retry_delay_tmo(phba, ndlp); 1747 lpfc_cancel_retry_delay_tmo(phba, ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index f93799873721..7dc4c2e6bed2 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -629,8 +629,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
629 struct lpfc_iocbq *piocbq; 629 struct lpfc_iocbq *piocbq;
630 IOCB_t *piocb; 630 IOCB_t *piocb;
631 struct fcp_cmnd *fcp_cmnd; 631 struct fcp_cmnd *fcp_cmnd;
632 struct scsi_device *scsi_dev = lpfc_cmd->pCmd->device; 632 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
633 struct lpfc_rport_data *rdata = scsi_dev->hostdata;
634 struct lpfc_nodelist *ndlp = rdata->pnode; 633 struct lpfc_nodelist *ndlp = rdata->pnode;
635 634
636 if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 635 if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
@@ -665,56 +664,18 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
665 piocb->ulpTimeout = lpfc_cmd->timeout; 664 piocb->ulpTimeout = lpfc_cmd->timeout;
666 } 665 }
667 666
668 lpfc_cmd->rdata = rdata;
669
670 switch (task_mgmt_cmd) {
671 case FCP_LUN_RESET:
672 /* Issue LUN Reset to TGT <num> LUN <num> */
673 lpfc_printf_log(phba,
674 KERN_INFO,
675 LOG_FCP,
676 "%d:0703 Issue LUN Reset to TGT %d LUN %d "
677 "Data: x%x x%x\n",
678 phba->brd_no,
679 scsi_dev->id, scsi_dev->lun,
680 ndlp->nlp_rpi, ndlp->nlp_flag);
681
682 break;
683 case FCP_ABORT_TASK_SET:
684 /* Issue Abort Task Set to TGT <num> LUN <num> */
685 lpfc_printf_log(phba,
686 KERN_INFO,
687 LOG_FCP,
688 "%d:0701 Issue Abort Task Set to TGT %d LUN %d "
689 "Data: x%x x%x\n",
690 phba->brd_no,
691 scsi_dev->id, scsi_dev->lun,
692 ndlp->nlp_rpi, ndlp->nlp_flag);
693
694 break;
695 case FCP_TARGET_RESET:
696 /* Issue Target Reset to TGT <num> */
697 lpfc_printf_log(phba,
698 KERN_INFO,
699 LOG_FCP,
700 "%d:0702 Issue Target Reset to TGT %d "
701 "Data: x%x x%x\n",
702 phba->brd_no,
703 scsi_dev->id, ndlp->nlp_rpi,
704 ndlp->nlp_flag);
705 break;
706 }
707
708 return (1); 667 return (1);
709} 668}
710 669
711static int 670static int
712lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba) 671lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
672 unsigned tgt_id, struct lpfc_rport_data *rdata)
713{ 673{
714 struct lpfc_iocbq *iocbq; 674 struct lpfc_iocbq *iocbq;
715 struct lpfc_iocbq *iocbqrsp; 675 struct lpfc_iocbq *iocbqrsp;
716 int ret; 676 int ret;
717 677
678 lpfc_cmd->rdata = rdata;
718 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET); 679 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET);
719 if (!ret) 680 if (!ret)
720 return FAILED; 681 return FAILED;
@@ -726,6 +687,13 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba)
726 if (!iocbqrsp) 687 if (!iocbqrsp)
727 return FAILED; 688 return FAILED;
728 689
690 /* Issue Target Reset to TGT <num> */
691 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
692 "%d:0702 Issue Target Reset to TGT %d "
693 "Data: x%x x%x\n",
694 phba->brd_no, tgt_id, rdata->pnode->nlp_rpi,
695 rdata->pnode->nlp_flag);
696
729 ret = lpfc_sli_issue_iocb_wait(phba, 697 ret = lpfc_sli_issue_iocb_wait(phba,
730 &phba->sli.ring[phba->sli.fcp_ring], 698 &phba->sli.ring[phba->sli.fcp_ring],
731 iocbq, iocbqrsp, lpfc_cmd->timeout); 699 iocbq, iocbqrsp, lpfc_cmd->timeout);
@@ -1021,6 +989,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1021 lpfc_cmd->pCmd = cmnd; 989 lpfc_cmd->pCmd = cmnd;
1022 lpfc_cmd->timeout = 60; 990 lpfc_cmd->timeout = 60;
1023 lpfc_cmd->scsi_hba = phba; 991 lpfc_cmd->scsi_hba = phba;
992 lpfc_cmd->rdata = rdata;
1024 993
1025 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET); 994 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET);
1026 if (!ret) 995 if (!ret)
@@ -1033,6 +1002,11 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1033 if (iocbqrsp == NULL) 1002 if (iocbqrsp == NULL)
1034 goto out_free_scsi_buf; 1003 goto out_free_scsi_buf;
1035 1004
1005 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
1006 "%d:0703 Issue LUN Reset to TGT %d LUN %d "
1007 "Data: x%x x%x\n", phba->brd_no, cmnd->device->id,
1008 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
1009
1036 ret = lpfc_sli_issue_iocb_wait(phba, 1010 ret = lpfc_sli_issue_iocb_wait(phba,
1037 &phba->sli.ring[phba->sli.fcp_ring], 1011 &phba->sli.ring[phba->sli.fcp_ring],
1038 iocbq, iocbqrsp, lpfc_cmd->timeout); 1012 iocbq, iocbqrsp, lpfc_cmd->timeout);
@@ -1104,7 +1078,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1104 int match; 1078 int match;
1105 int ret = FAILED, i, err_count = 0; 1079 int ret = FAILED, i, err_count = 0;
1106 int cnt, loopcnt; 1080 int cnt, loopcnt;
1107 unsigned int midlayer_id = 0;
1108 struct lpfc_scsi_buf * lpfc_cmd; 1081 struct lpfc_scsi_buf * lpfc_cmd;
1109 1082
1110 lpfc_block_requests(phba); 1083 lpfc_block_requests(phba);
@@ -1124,7 +1097,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1124 * targets known to the driver. Should any target reset 1097 * targets known to the driver. Should any target reset
1125 * fail, this routine returns failure to the midlayer. 1098 * fail, this routine returns failure to the midlayer.
1126 */ 1099 */
1127 midlayer_id = cmnd->device->id;
1128 for (i = 0; i < MAX_FCP_TARGET; i++) { 1100 for (i = 0; i < MAX_FCP_TARGET; i++) {
1129 /* Search the mapped list for this target ID */ 1101 /* Search the mapped list for this target ID */
1130 match = 0; 1102 match = 0;
@@ -1137,9 +1109,8 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1137 if (!match) 1109 if (!match)
1138 continue; 1110 continue;
1139 1111
1140 lpfc_cmd->pCmd->device->id = i; 1112 ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba,
1141 lpfc_cmd->pCmd->device->hostdata = ndlp->rport->dd_data; 1113 i, ndlp->rport->dd_data);
1142 ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba);
1143 if (ret != SUCCESS) { 1114 if (ret != SUCCESS) {
1144 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1115 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1145 "%d:0713 Bus Reset on target %d failed\n", 1116 "%d:0713 Bus Reset on target %d failed\n",
@@ -1158,7 +1129,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1158 * the targets. Unfortunately, some targets do not abide by 1129 * the targets. Unfortunately, some targets do not abide by
1159 * this forcing the driver to double check. 1130 * this forcing the driver to double check.
1160 */ 1131 */
1161 cmnd->device->id = midlayer_id;
1162 cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 1132 cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
1163 0, 0, LPFC_CTX_HOST); 1133 0, 0, LPFC_CTX_HOST);
1164 if (cnt) 1134 if (cnt)
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 4cf1366108b7..6b737568b831 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.1.4" 21#define LPFC_DRIVER_VERSION "8.1.6"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24 24
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index d245717fee65..c33857e7b33b 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4471,7 +4471,6 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
4471{ 4471{
4472 Scsi_Cmnd *scmd; 4472 Scsi_Cmnd *scmd;
4473 struct scsi_device *sdev; 4473 struct scsi_device *sdev;
4474 unsigned long flags = 0;
4475 scb_t *scb; 4474 scb_t *scb;
4476 int rval; 4475 int rval;
4477 4476
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index c11e5ce6865e..bec1424eda85 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * FILE : megaraid_mbox.c 12 * FILE : megaraid_mbox.c
13 * Version : v2.20.4.7 (Nov 14 2005) 13 * Version : v2.20.4.8 (Apr 11 2006)
14 * 14 *
15 * Authors: 15 * Authors:
16 * Atul Mukker <Atul.Mukker@lsil.com> 16 * Atul Mukker <Atul.Mukker@lsil.com>
@@ -2278,6 +2278,7 @@ megaraid_mbox_dpc(unsigned long devp)
2278 unsigned long flags; 2278 unsigned long flags;
2279 uint8_t c; 2279 uint8_t c;
2280 int status; 2280 int status;
2281 uioc_t *kioc;
2281 2282
2282 2283
2283 if (!adapter) return; 2284 if (!adapter) return;
@@ -2320,6 +2321,9 @@ megaraid_mbox_dpc(unsigned long devp)
2320 // remove from local clist 2321 // remove from local clist
2321 list_del_init(&scb->list); 2322 list_del_init(&scb->list);
2322 2323
2324 kioc = (uioc_t *)scb->gp;
2325 kioc->status = 0;
2326
2323 megaraid_mbox_mm_done(adapter, scb); 2327 megaraid_mbox_mm_done(adapter, scb);
2324 2328
2325 continue; 2329 continue;
@@ -2636,6 +2640,7 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
2636 int recovery_window; 2640 int recovery_window;
2637 int recovering; 2641 int recovering;
2638 int i; 2642 int i;
2643 uioc_t *kioc;
2639 2644
2640 adapter = SCP2ADAPTER(scp); 2645 adapter = SCP2ADAPTER(scp);
2641 raid_dev = ADAP2RAIDDEV(adapter); 2646 raid_dev = ADAP2RAIDDEV(adapter);
@@ -2655,32 +2660,51 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
2655 // Also, reset all the commands currently owned by the driver 2660 // Also, reset all the commands currently owned by the driver
2656 spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); 2661 spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
2657 list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) { 2662 list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) {
2658
2659 list_del_init(&scb->list); // from pending list 2663 list_del_init(&scb->list); // from pending list
2660 2664
2661 con_log(CL_ANN, (KERN_WARNING 2665 if (scb->sno >= MBOX_MAX_SCSI_CMDS) {
2662 "megaraid: %ld:%d[%d:%d], reset from pending list\n", 2666 con_log(CL_ANN, (KERN_WARNING
2663 scp->serial_number, scb->sno, 2667 "megaraid: IOCTL packet with %d[%d:%d] being reset\n",
2664 scb->dev_channel, scb->dev_target)); 2668 scb->sno, scb->dev_channel, scb->dev_target));
2665 2669
2666 scp->result = (DID_RESET << 16); 2670 scb->status = -1;
2667 scp->scsi_done(scp);
2668 2671
2669 megaraid_dealloc_scb(adapter, scb); 2672 kioc = (uioc_t *)scb->gp;
2673 kioc->status = -EFAULT;
2674
2675 megaraid_mbox_mm_done(adapter, scb);
2676 } else {
2677 if (scb->scp == scp) { // Found command
2678 con_log(CL_ANN, (KERN_WARNING
2679 "megaraid: %ld:%d[%d:%d], reset from pending list\n",
2680 scp->serial_number, scb->sno,
2681 scb->dev_channel, scb->dev_target));
2682 } else {
2683 con_log(CL_ANN, (KERN_WARNING
2684 "megaraid: IO packet with %d[%d:%d] being reset\n",
2685 scb->sno, scb->dev_channel, scb->dev_target));
2686 }
2687
2688 scb->scp->result = (DID_RESET << 16);
2689 scb->scp->scsi_done(scb->scp);
2690
2691 megaraid_dealloc_scb(adapter, scb);
2692 }
2670 } 2693 }
2671 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); 2694 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
2672 2695
2673 if (adapter->outstanding_cmds) { 2696 if (adapter->outstanding_cmds) {
2674 con_log(CL_ANN, (KERN_NOTICE 2697 con_log(CL_ANN, (KERN_NOTICE
2675 "megaraid: %d outstanding commands. Max wait %d sec\n", 2698 "megaraid: %d outstanding commands. Max wait %d sec\n",
2676 adapter->outstanding_cmds, MBOX_RESET_WAIT)); 2699 adapter->outstanding_cmds,
2700 (MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT)));
2677 } 2701 }
2678 2702
2679 recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT; 2703 recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
2680 2704
2681 recovering = adapter->outstanding_cmds; 2705 recovering = adapter->outstanding_cmds;
2682 2706
2683 for (i = 0; i < recovery_window && adapter->outstanding_cmds; i++) { 2707 for (i = 0; i < recovery_window; i++) {
2684 2708
2685 megaraid_ack_sequence(adapter); 2709 megaraid_ack_sequence(adapter);
2686 2710
@@ -2689,12 +2713,11 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
2689 con_log(CL_ANN, ( 2713 con_log(CL_ANN, (
2690 "megaraid mbox: Wait for %d commands to complete:%d\n", 2714 "megaraid mbox: Wait for %d commands to complete:%d\n",
2691 adapter->outstanding_cmds, 2715 adapter->outstanding_cmds,
2692 MBOX_RESET_WAIT - i)); 2716 (MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT) - i));
2693 } 2717 }
2694 2718
2695 // bailout if no recovery happended in reset time 2719 // bailout if no recovery happended in reset time
2696 if ((i == MBOX_RESET_WAIT) && 2720 if (adapter->outstanding_cmds == 0) {
2697 (recovering == adapter->outstanding_cmds)) {
2698 break; 2721 break;
2699 } 2722 }
2700 2723
@@ -2918,12 +2941,13 @@ mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[])
2918 wmb(); 2941 wmb();
2919 WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1); 2942 WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
2920 2943
2921 for (i = 0; i < 0xFFFFF; i++) { 2944 for (i = 0; i < MBOX_SYNC_WAIT_CNT; i++) {
2922 if (mbox->numstatus != 0xFF) break; 2945 if (mbox->numstatus != 0xFF) break;
2923 rmb(); 2946 rmb();
2947 udelay(MBOX_SYNC_DELAY_200);
2924 } 2948 }
2925 2949
2926 if (i == 0xFFFFF) { 2950 if (i == MBOX_SYNC_WAIT_CNT) {
2927 // We may need to re-calibrate the counter 2951 // We may need to re-calibrate the counter
2928 con_log(CL_ANN, (KERN_CRIT 2952 con_log(CL_ANN, (KERN_CRIT
2929 "megaraid: fast sync command timed out\n")); 2953 "megaraid: fast sync command timed out\n"));
@@ -3475,7 +3499,7 @@ megaraid_cmm_register(adapter_t *adapter)
3475 adp.drvr_data = (unsigned long)adapter; 3499 adp.drvr_data = (unsigned long)adapter;
3476 adp.pdev = adapter->pdev; 3500 adp.pdev = adapter->pdev;
3477 adp.issue_uioc = megaraid_mbox_mm_handler; 3501 adp.issue_uioc = megaraid_mbox_mm_handler;
3478 adp.timeout = 300; 3502 adp.timeout = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
3479 adp.max_kioc = MBOX_MAX_USER_CMDS; 3503 adp.max_kioc = MBOX_MAX_USER_CMDS;
3480 3504
3481 if ((rval = mraid_mm_register_adp(&adp)) != 0) { 3505 if ((rval = mraid_mm_register_adp(&adp)) != 0) {
@@ -3702,7 +3726,6 @@ megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb)
3702 unsigned long flags; 3726 unsigned long flags;
3703 3727
3704 kioc = (uioc_t *)scb->gp; 3728 kioc = (uioc_t *)scb->gp;
3705 kioc->status = 0;
3706 mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf; 3729 mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
3707 mbox64->mbox32.status = scb->status; 3730 mbox64->mbox32.status = scb->status;
3708 raw_mbox = (uint8_t *)&mbox64->mbox32; 3731 raw_mbox = (uint8_t *)&mbox64->mbox32;
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
index 882fb1a0b575..868fb0ec93e7 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.h
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -21,8 +21,8 @@
21#include "megaraid_ioctl.h" 21#include "megaraid_ioctl.h"
22 22
23 23
24#define MEGARAID_VERSION "2.20.4.7" 24#define MEGARAID_VERSION "2.20.4.8"
25#define MEGARAID_EXT_VERSION "(Release Date: Mon Nov 14 12:27:22 EST 2005)" 25#define MEGARAID_EXT_VERSION "(Release Date: Mon Apr 11 12:27:22 EST 2006)"
26 26
27 27
28/* 28/*
@@ -100,6 +100,9 @@
100#define MBOX_BUSY_WAIT 10 // max usec to wait for busy mailbox 100#define MBOX_BUSY_WAIT 10 // max usec to wait for busy mailbox
101#define MBOX_RESET_WAIT 180 // wait these many seconds in reset 101#define MBOX_RESET_WAIT 180 // wait these many seconds in reset
102#define MBOX_RESET_EXT_WAIT 120 // extended wait reset 102#define MBOX_RESET_EXT_WAIT 120 // extended wait reset
103#define MBOX_SYNC_WAIT_CNT 0xFFFF // wait loop index for synchronous mode
104
105#define MBOX_SYNC_DELAY_200 200 // 200 micro-seconds
103 106
104/* 107/*
105 * maximum transfer that can happen through the firmware commands issued 108 * maximum transfer that can happen through the firmware commands issued
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 8f3ce0432295..e8f534fb336b 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -898,10 +898,8 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
898 898
899 adapter = kmalloc(sizeof(mraid_mmadp_t), GFP_KERNEL); 899 adapter = kmalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
900 900
901 if (!adapter) { 901 if (!adapter)
902 rval = -ENOMEM; 902 return -ENOMEM;
903 goto memalloc_error;
904 }
905 903
906 memset(adapter, 0, sizeof(mraid_mmadp_t)); 904 memset(adapter, 0, sizeof(mraid_mmadp_t));
907 905
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index fee843fab1c7..108910f512e4 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -982,6 +982,12 @@ static int device_check(ppa_struct *dev)
982 return -ENODEV; 982 return -ENODEV;
983} 983}
984 984
985static int ppa_adjust_queue(struct scsi_device *device)
986{
987 blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH);
988 return 0;
989}
990
985static struct scsi_host_template ppa_template = { 991static struct scsi_host_template ppa_template = {
986 .module = THIS_MODULE, 992 .module = THIS_MODULE,
987 .proc_name = "ppa", 993 .proc_name = "ppa",
@@ -997,6 +1003,7 @@ static struct scsi_host_template ppa_template = {
997 .cmd_per_lun = 1, 1003 .cmd_per_lun = 1,
998 .use_clustering = ENABLE_CLUSTERING, 1004 .use_clustering = ENABLE_CLUSTERING,
999 .can_queue = 1, 1005 .can_queue = 1,
1006 .slave_alloc = ppa_adjust_queue,
1000}; 1007};
1001 1008
1002/*************************************************************************** 1009/***************************************************************************
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 1052528c3109..ccaad0b08d35 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -589,6 +589,7 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
589* Either SUCCESS or FAILED. 589* Either SUCCESS or FAILED.
590* 590*
591* Note: 591* Note:
592* Only return FAILED if command not returned by firmware.
592**************************************************************************/ 593**************************************************************************/
593int 594int
594qla2xxx_eh_abort(struct scsi_cmnd *cmd) 595qla2xxx_eh_abort(struct scsi_cmnd *cmd)
@@ -599,11 +600,12 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
599 unsigned int id, lun; 600 unsigned int id, lun;
600 unsigned long serial; 601 unsigned long serial;
601 unsigned long flags; 602 unsigned long flags;
603 int wait = 0;
602 604
603 if (!CMD_SP(cmd)) 605 if (!CMD_SP(cmd))
604 return FAILED; 606 return SUCCESS;
605 607
606 ret = FAILED; 608 ret = SUCCESS;
607 609
608 id = cmd->device->id; 610 id = cmd->device->id;
609 lun = cmd->device->lun; 611 lun = cmd->device->lun;
@@ -631,7 +633,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
631 } else { 633 } else {
632 DEBUG3(printk("%s(%ld): abort_command " 634 DEBUG3(printk("%s(%ld): abort_command "
633 "mbx success.\n", __func__, ha->host_no)); 635 "mbx success.\n", __func__, ha->host_no));
634 ret = SUCCESS; 636 wait = 1;
635 } 637 }
636 spin_lock_irqsave(&ha->hardware_lock, flags); 638 spin_lock_irqsave(&ha->hardware_lock, flags);
637 639
@@ -640,17 +642,18 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
640 spin_unlock_irqrestore(&ha->hardware_lock, flags); 642 spin_unlock_irqrestore(&ha->hardware_lock, flags);
641 643
642 /* Wait for the command to be returned. */ 644 /* Wait for the command to be returned. */
643 if (ret == SUCCESS) { 645 if (wait) {
644 if (qla2x00_eh_wait_on_command(ha, cmd) != QLA_SUCCESS) { 646 if (qla2x00_eh_wait_on_command(ha, cmd) != QLA_SUCCESS) {
645 qla_printk(KERN_ERR, ha, 647 qla_printk(KERN_ERR, ha,
646 "scsi(%ld:%d:%d): Abort handler timed out -- %lx " 648 "scsi(%ld:%d:%d): Abort handler timed out -- %lx "
647 "%x.\n", ha->host_no, id, lun, serial, ret); 649 "%x.\n", ha->host_no, id, lun, serial, ret);
650 ret = FAILED;
648 } 651 }
649 } 652 }
650 653
651 qla_printk(KERN_INFO, ha, 654 qla_printk(KERN_INFO, ha,
652 "scsi(%ld:%d:%d): Abort command issued -- %lx %x.\n", ha->host_no, 655 "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n",
653 id, lun, serial, ret); 656 ha->host_no, id, lun, wait, serial, ret);
654 657
655 return ret; 658 return ret;
656} 659}
@@ -1687,8 +1690,8 @@ qla2x00_free_device(scsi_qla_host_t *ha)
1687 ha->flags.online = 0; 1690 ha->flags.online = 0;
1688 1691
1689 /* Detach interrupts */ 1692 /* Detach interrupts */
1690 if (ha->pdev->irq) 1693 if (ha->host->irq)
1691 free_irq(ha->pdev->irq, ha); 1694 free_irq(ha->host->irq, ha);
1692 1695
1693 /* release io space registers */ 1696 /* release io space registers */
1694 if (ha->iobase) 1697 if (ha->iobase)
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index d5fdcb9a8842..9b8bca1ac1f0 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -37,7 +37,7 @@
37#include <asm/io.h> 37#include <asm/io.h>
38 38
39#define DRV_NAME "sata_mv" 39#define DRV_NAME "sata_mv"
40#define DRV_VERSION "0.6" 40#define DRV_VERSION "0.7"
41 41
42enum { 42enum {
43 /* BAR's are enumerated in terms of pci_resource_start() terms */ 43 /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -50,6 +50,12 @@ enum {
50 50
51 MV_PCI_REG_BASE = 0, 51 MV_PCI_REG_BASE = 0,
52 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */ 52 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
53 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
54 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
55 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
56 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
57 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
58
53 MV_SATAHC0_REG_BASE = 0x20000, 59 MV_SATAHC0_REG_BASE = 0x20000,
54 MV_FLASH_CTL = 0x1046c, 60 MV_FLASH_CTL = 0x1046c,
55 MV_GPIO_PORT_CTL = 0x104f0, 61 MV_GPIO_PORT_CTL = 0x104f0,
@@ -302,9 +308,6 @@ struct mv_port_priv {
302 dma_addr_t crpb_dma; 308 dma_addr_t crpb_dma;
303 struct mv_sg *sg_tbl; 309 struct mv_sg *sg_tbl;
304 dma_addr_t sg_tbl_dma; 310 dma_addr_t sg_tbl_dma;
305
306 unsigned req_producer; /* cp of req_in_ptr */
307 unsigned rsp_consumer; /* cp of rsp_out_ptr */
308 u32 pp_flags; 311 u32 pp_flags;
309}; 312};
310 313
@@ -937,8 +940,6 @@ static int mv_port_start(struct ata_port *ap)
937 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, 940 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
938 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 941 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
939 942
940 pp->req_producer = pp->rsp_consumer = 0;
941
942 /* Don't turn on EDMA here...do it before DMA commands only. Else 943 /* Don't turn on EDMA here...do it before DMA commands only. Else
943 * we'll be unable to send non-data, PIO, etc due to restricted access 944 * we'll be unable to send non-data, PIO, etc due to restricted access
944 * to shadow regs. 945 * to shadow regs.
@@ -1022,16 +1023,16 @@ static void mv_fill_sg(struct ata_queued_cmd *qc)
1022 } 1023 }
1023} 1024}
1024 1025
1025static inline unsigned mv_inc_q_index(unsigned *index) 1026static inline unsigned mv_inc_q_index(unsigned index)
1026{ 1027{
1027 *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK; 1028 return (index + 1) & MV_MAX_Q_DEPTH_MASK;
1028 return *index;
1029} 1029}
1030 1030
1031static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last) 1031static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last)
1032{ 1032{
1033 *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | 1033 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1034 (last ? CRQB_CMD_LAST : 0); 1034 (last ? CRQB_CMD_LAST : 0);
1035 *cmdw = cpu_to_le16(tmp);
1035} 1036}
1036 1037
1037/** 1038/**
@@ -1053,15 +1054,11 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1053 u16 *cw; 1054 u16 *cw;
1054 struct ata_taskfile *tf; 1055 struct ata_taskfile *tf;
1055 u16 flags = 0; 1056 u16 flags = 0;
1057 unsigned in_index;
1056 1058
1057 if (ATA_PROT_DMA != qc->tf.protocol) 1059 if (ATA_PROT_DMA != qc->tf.protocol)
1058 return; 1060 return;
1059 1061
1060 /* the req producer index should be the same as we remember it */
1061 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
1062 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1063 pp->req_producer);
1064
1065 /* Fill in command request block 1062 /* Fill in command request block
1066 */ 1063 */
1067 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 1064 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
@@ -1069,13 +1066,17 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1069 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1066 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1070 flags |= qc->tag << CRQB_TAG_SHIFT; 1067 flags |= qc->tag << CRQB_TAG_SHIFT;
1071 1068
1072 pp->crqb[pp->req_producer].sg_addr = 1069 /* get current queue index from hardware */
1070 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1071 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1072
1073 pp->crqb[in_index].sg_addr =
1073 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); 1074 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1074 pp->crqb[pp->req_producer].sg_addr_hi = 1075 pp->crqb[in_index].sg_addr_hi =
1075 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); 1076 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1076 pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags); 1077 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1077 1078
1078 cw = &pp->crqb[pp->req_producer].ata_cmd[0]; 1079 cw = &pp->crqb[in_index].ata_cmd[0];
1079 tf = &qc->tf; 1080 tf = &qc->tf;
1080 1081
1081 /* Sadly, the CRQB cannot accomodate all registers--there are 1082 /* Sadly, the CRQB cannot accomodate all registers--there are
@@ -1144,16 +1145,12 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1144 struct mv_port_priv *pp = ap->private_data; 1145 struct mv_port_priv *pp = ap->private_data;
1145 struct mv_crqb_iie *crqb; 1146 struct mv_crqb_iie *crqb;
1146 struct ata_taskfile *tf; 1147 struct ata_taskfile *tf;
1148 unsigned in_index;
1147 u32 flags = 0; 1149 u32 flags = 0;
1148 1150
1149 if (ATA_PROT_DMA != qc->tf.protocol) 1151 if (ATA_PROT_DMA != qc->tf.protocol)
1150 return; 1152 return;
1151 1153
1152 /* the req producer index should be the same as we remember it */
1153 WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
1154 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1155 pp->req_producer);
1156
1157 /* Fill in Gen IIE command request block 1154 /* Fill in Gen IIE command request block
1158 */ 1155 */
1159 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) 1156 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
@@ -1162,7 +1159,11 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1162 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 1159 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1163 flags |= qc->tag << CRQB_TAG_SHIFT; 1160 flags |= qc->tag << CRQB_TAG_SHIFT;
1164 1161
1165 crqb = (struct mv_crqb_iie *) &pp->crqb[pp->req_producer]; 1162 /* get current queue index from hardware */
1163 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1164 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1165
1166 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1166 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); 1167 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1167 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); 1168 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1168 crqb->flags = cpu_to_le32(flags); 1169 crqb->flags = cpu_to_le32(flags);
@@ -1210,6 +1211,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1210{ 1211{
1211 void __iomem *port_mmio = mv_ap_base(qc->ap); 1212 void __iomem *port_mmio = mv_ap_base(qc->ap);
1212 struct mv_port_priv *pp = qc->ap->private_data; 1213 struct mv_port_priv *pp = qc->ap->private_data;
1214 unsigned in_index;
1213 u32 in_ptr; 1215 u32 in_ptr;
1214 1216
1215 if (ATA_PROT_DMA != qc->tf.protocol) { 1217 if (ATA_PROT_DMA != qc->tf.protocol) {
@@ -1221,23 +1223,20 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1221 return ata_qc_issue_prot(qc); 1223 return ata_qc_issue_prot(qc);
1222 } 1224 }
1223 1225
1224 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 1226 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1227 in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1225 1228
1226 /* the req producer index should be the same as we remember it */
1227 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) !=
1228 pp->req_producer);
1229 /* until we do queuing, the queue should be empty at this point */ 1229 /* until we do queuing, the queue should be empty at this point */
1230 WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != 1230 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1231 ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> 1231 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1232 EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1233 1232
1234 mv_inc_q_index(&pp->req_producer); /* now incr producer index */ 1233 in_index = mv_inc_q_index(in_index); /* now incr producer index */
1235 1234
1236 mv_start_dma(port_mmio, pp); 1235 mv_start_dma(port_mmio, pp);
1237 1236
1238 /* and write the request in pointer to kick the EDMA to life */ 1237 /* and write the request in pointer to kick the EDMA to life */
1239 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK; 1238 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
1240 in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT; 1239 in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
1241 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 1240 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1242 1241
1243 return 0; 1242 return 0;
@@ -1260,28 +1259,26 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
1260{ 1259{
1261 void __iomem *port_mmio = mv_ap_base(ap); 1260 void __iomem *port_mmio = mv_ap_base(ap);
1262 struct mv_port_priv *pp = ap->private_data; 1261 struct mv_port_priv *pp = ap->private_data;
1262 unsigned out_index;
1263 u32 out_ptr; 1263 u32 out_ptr;
1264 u8 ata_status; 1264 u8 ata_status;
1265 1265
1266 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 1266 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1267 out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1267 1268
1268 /* the response consumer index should be the same as we remember it */ 1269 ata_status = le16_to_cpu(pp->crpb[out_index].flags)
1269 WARN_ON(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != 1270 >> CRPB_FLAG_STATUS_SHIFT;
1270 pp->rsp_consumer);
1271
1272 ata_status = pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT;
1273 1271
1274 /* increment our consumer index... */ 1272 /* increment our consumer index... */
1275 pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer); 1273 out_index = mv_inc_q_index(out_index);
1276 1274
1277 /* and, until we do NCQ, there should only be 1 CRPB waiting */ 1275 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1278 WARN_ON(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> 1276 WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1279 EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != 1277 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1280 pp->rsp_consumer);
1281 1278
1282 /* write out our inc'd consumer index so EDMA knows we're caught up */ 1279 /* write out our inc'd consumer index so EDMA knows we're caught up */
1283 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; 1280 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
1284 out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT; 1281 out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
1285 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 1282 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1286 1283
1287 /* Return ATA status register for completed CRPB */ 1284 /* Return ATA status register for completed CRPB */
@@ -1291,6 +1288,7 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
1291/** 1288/**
1292 * mv_err_intr - Handle error interrupts on the port 1289 * mv_err_intr - Handle error interrupts on the port
1293 * @ap: ATA channel to manipulate 1290 * @ap: ATA channel to manipulate
1291 * @reset_allowed: bool: 0 == don't trigger from reset here
1294 * 1292 *
1295 * In most cases, just clear the interrupt and move on. However, 1293 * In most cases, just clear the interrupt and move on. However,
1296 * some cases require an eDMA reset, which is done right before 1294 * some cases require an eDMA reset, which is done right before
@@ -1301,7 +1299,7 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
1301 * LOCKING: 1299 * LOCKING:
1302 * Inherited from caller. 1300 * Inherited from caller.
1303 */ 1301 */
1304static void mv_err_intr(struct ata_port *ap) 1302static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1305{ 1303{
1306 void __iomem *port_mmio = mv_ap_base(ap); 1304 void __iomem *port_mmio = mv_ap_base(ap);
1307 u32 edma_err_cause, serr = 0; 1305 u32 edma_err_cause, serr = 0;
@@ -1323,9 +1321,8 @@ static void mv_err_intr(struct ata_port *ap)
1323 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1321 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1324 1322
1325 /* check for fatal here and recover if needed */ 1323 /* check for fatal here and recover if needed */
1326 if (EDMA_ERR_FATAL & edma_err_cause) { 1324 if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
1327 mv_stop_and_reset(ap); 1325 mv_stop_and_reset(ap);
1328 }
1329} 1326}
1330 1327
1331/** 1328/**
@@ -1374,12 +1371,12 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1374 struct ata_port *ap = host_set->ports[port]; 1371 struct ata_port *ap = host_set->ports[port];
1375 struct mv_port_priv *pp = ap->private_data; 1372 struct mv_port_priv *pp = ap->private_data;
1376 1373
1377 hard_port = port & MV_PORT_MASK; /* range 0-3 */ 1374 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1378 handled = 0; /* ensure ata_status is set if handled++ */ 1375 handled = 0; /* ensure ata_status is set if handled++ */
1379 1376
1380 /* Note that DEV_IRQ might happen spuriously during EDMA, 1377 /* Note that DEV_IRQ might happen spuriously during EDMA,
1381 * and should be ignored in such cases. We could mask it, 1378 * and should be ignored in such cases.
1382 * but it's pretty rare and may not be worth the overhead. 1379 * The cause of this is still under investigation.
1383 */ 1380 */
1384 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 1381 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1385 /* EDMA: check for response queue interrupt */ 1382 /* EDMA: check for response queue interrupt */
@@ -1393,6 +1390,11 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1393 ata_status = readb((void __iomem *) 1390 ata_status = readb((void __iomem *)
1394 ap->ioaddr.status_addr); 1391 ap->ioaddr.status_addr);
1395 handled = 1; 1392 handled = 1;
1393 /* ignore spurious intr if drive still BUSY */
1394 if (ata_status & ATA_BUSY) {
1395 ata_status = 0;
1396 handled = 0;
1397 }
1396 } 1398 }
1397 } 1399 }
1398 1400
@@ -1406,7 +1408,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1406 shift++; /* skip bit 8 in the HC Main IRQ reg */ 1408 shift++; /* skip bit 8 in the HC Main IRQ reg */
1407 } 1409 }
1408 if ((PORT0_ERR << shift) & relevant) { 1410 if ((PORT0_ERR << shift) & relevant) {
1409 mv_err_intr(ap); 1411 mv_err_intr(ap, 1);
1410 err_mask |= AC_ERR_OTHER; 1412 err_mask |= AC_ERR_OTHER;
1411 handled = 1; 1413 handled = 1;
1412 } 1414 }
@@ -1448,6 +1450,7 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance,
1448 struct ata_host_set *host_set = dev_instance; 1450 struct ata_host_set *host_set = dev_instance;
1449 unsigned int hc, handled = 0, n_hcs; 1451 unsigned int hc, handled = 0, n_hcs;
1450 void __iomem *mmio = host_set->mmio_base; 1452 void __iomem *mmio = host_set->mmio_base;
1453 struct mv_host_priv *hpriv;
1451 u32 irq_stat; 1454 u32 irq_stat;
1452 1455
1453 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); 1456 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
@@ -1469,6 +1472,17 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance,
1469 handled++; 1472 handled++;
1470 } 1473 }
1471 } 1474 }
1475
1476 hpriv = host_set->private_data;
1477 if (IS_60XX(hpriv)) {
1478 /* deal with the interrupt coalescing bits */
1479 if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
1480 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
1481 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
1482 writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
1483 }
1484 }
1485
1472 if (PCI_ERR & irq_stat) { 1486 if (PCI_ERR & irq_stat) {
1473 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n", 1487 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1474 readl(mmio + PCI_IRQ_CAUSE_OFS)); 1488 readl(mmio + PCI_IRQ_CAUSE_OFS));
@@ -1867,7 +1881,8 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1867 1881
1868 if (IS_60XX(hpriv)) { 1882 if (IS_60XX(hpriv)) {
1869 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); 1883 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
1870 ifctl |= (1 << 12) | (1 << 7); 1884 ifctl |= (1 << 7); /* enable gen2i speed */
1885 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
1871 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); 1886 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1872 } 1887 }
1873 1888
@@ -2031,11 +2046,14 @@ static void mv_eng_timeout(struct ata_port *ap)
2031 ap->host_set->mmio_base, ap, qc, qc->scsicmd, 2046 ap->host_set->mmio_base, ap, qc, qc->scsicmd,
2032 &qc->scsicmd->cmnd); 2047 &qc->scsicmd->cmnd);
2033 2048
2034 mv_err_intr(ap); 2049 mv_err_intr(ap, 0);
2035 mv_stop_and_reset(ap); 2050 mv_stop_and_reset(ap);
2036 2051
2037 qc->err_mask |= AC_ERR_TIMEOUT; 2052 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2038 ata_eh_qc_complete(qc); 2053 if (qc->flags & ATA_QCFLAG_ACTIVE) {
2054 qc->err_mask |= AC_ERR_TIMEOUT;
2055 ata_eh_qc_complete(qc);
2056 }
2039} 2057}
2040 2058
2041/** 2059/**
@@ -2229,7 +2247,8 @@ static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent,
2229 void __iomem *port_mmio = mv_port_base(mmio, port); 2247 void __iomem *port_mmio = mv_port_base(mmio, port);
2230 2248
2231 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); 2249 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2232 ifctl |= (1 << 12); 2250 ifctl |= (1 << 7); /* enable gen2i speed */
2251 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2233 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); 2252 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2234 } 2253 }
2235 2254
@@ -2330,6 +2349,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2330 if (rc) { 2349 if (rc) {
2331 return rc; 2350 return rc;
2332 } 2351 }
2352 pci_set_master(pdev);
2333 2353
2334 rc = pci_request_regions(pdev, DRV_NAME); 2354 rc = pci_request_regions(pdev, DRV_NAME);
2335 if (rc) { 2355 if (rc) {
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index f7264fd611c2..cb9082fd7e2f 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -454,7 +454,7 @@ static int sil24_softreset(struct ata_port *ap, int verbose,
454 */ 454 */
455 msleep(10); 455 msleep(10);
456 456
457 prb->ctrl = PRB_CTRL_SRST; 457 prb->ctrl = cpu_to_le16(PRB_CTRL_SRST);
458 prb->fis[1] = 0; /* no PM yet */ 458 prb->fis[1] = 0; /* no PM yet */
459 459
460 writel((u32)paddr, port + PORT_CMD_ACTIVATE); 460 writel((u32)paddr, port + PORT_CMD_ACTIVATE);
@@ -551,9 +551,9 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
551 551
552 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) { 552 if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) {
553 if (qc->tf.flags & ATA_TFLAG_WRITE) 553 if (qc->tf.flags & ATA_TFLAG_WRITE)
554 prb->ctrl = PRB_CTRL_PACKET_WRITE; 554 prb->ctrl = cpu_to_le16(PRB_CTRL_PACKET_WRITE);
555 else 555 else
556 prb->ctrl = PRB_CTRL_PACKET_READ; 556 prb->ctrl = cpu_to_le16(PRB_CTRL_PACKET_READ);
557 } else 557 } else
558 prb->ctrl = 0; 558 prb->ctrl = 0;
559 559
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index ae13e4a78d51..fb5cb4c9ac65 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -56,6 +56,8 @@ static struct {
56 {"DENON", "DRD-25X", "V", BLIST_NOLUN}, /* locks up */ 56 {"DENON", "DRD-25X", "V", BLIST_NOLUN}, /* locks up */
57 {"HITACHI", "DK312C", "CM81", BLIST_NOLUN}, /* responds to all lun */ 57 {"HITACHI", "DK312C", "CM81", BLIST_NOLUN}, /* responds to all lun */
58 {"HITACHI", "DK314C", "CR21", BLIST_NOLUN}, /* responds to all lun */ 58 {"HITACHI", "DK314C", "CR21", BLIST_NOLUN}, /* responds to all lun */
59 {"IBM", "2104-DU3", NULL, BLIST_NOLUN}, /* locks up */
60 {"IBM", "2104-TU3", NULL, BLIST_NOLUN}, /* locks up */
59 {"IMS", "CDD521/10", "2.06", BLIST_NOLUN}, /* locks up */ 61 {"IMS", "CDD521/10", "2.06", BLIST_NOLUN}, /* locks up */
60 {"MAXTOR", "XT-3280", "PR02", BLIST_NOLUN}, /* locks up */ 62 {"MAXTOR", "XT-3280", "PR02", BLIST_NOLUN}, /* locks up */
61 {"MAXTOR", "XT-4380S", "B3C", BLIST_NOLUN}, /* locks up */ 63 {"MAXTOR", "XT-4380S", "B3C", BLIST_NOLUN}, /* locks up */
@@ -165,6 +167,7 @@ static struct {
165 {"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD}, 167 {"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
166 {"HP", "C1557A", NULL, BLIST_FORCELUN}, 168 {"HP", "C1557A", NULL, BLIST_FORCELUN},
167 {"HP", "C3323-300", "4269", BLIST_NOTQ}, 169 {"HP", "C3323-300", "4269", BLIST_NOTQ},
170 {"HP", "C5713A", NULL, BLIST_NOREPORTLUN},
168 {"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN}, 171 {"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN},
169 {"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 172 {"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
170 {"IBM", "2105", NULL, BLIST_RETRY_HWERROR}, 173 {"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 3ca7b9d3086c..bdce9d1f5b71 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -367,7 +367,7 @@ static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
367 int nsegs, unsigned bufflen, gfp_t gfp) 367 int nsegs, unsigned bufflen, gfp_t gfp)
368{ 368{
369 struct request_queue *q = rq->q; 369 struct request_queue *q = rq->q;
370 int nr_pages = (bufflen + PAGE_SIZE - 1) >> PAGE_SHIFT; 370 int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
371 unsigned int data_len = 0, len, bytes, off; 371 unsigned int data_len = 0, len, bytes, off;
372 struct page *page; 372 struct page *page;
373 struct bio *bio = NULL; 373 struct bio *bio = NULL;
@@ -1067,16 +1067,29 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
1067 break; 1067 break;
1068 case NOT_READY: 1068 case NOT_READY:
1069 /* 1069 /*
1070 * If the device is in the process of becoming ready, 1070 * If the device is in the process of becoming
1071 * retry. 1071 * ready, or has a temporary blockage, retry.
1072 */ 1072 */
1073 if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) { 1073 if (sshdr.asc == 0x04) {
1074 scsi_requeue_command(q, cmd); 1074 switch (sshdr.ascq) {
1075 return; 1075 case 0x01: /* becoming ready */
1076 case 0x04: /* format in progress */
1077 case 0x05: /* rebuild in progress */
1078 case 0x06: /* recalculation in progress */
1079 case 0x07: /* operation in progress */
1080 case 0x08: /* Long write in progress */
1081 case 0x09: /* self test in progress */
1082 scsi_requeue_command(q, cmd);
1083 return;
1084 default:
1085 break;
1086 }
1076 } 1087 }
1077 if (!(req->flags & REQ_QUIET)) 1088 if (!(req->flags & REQ_QUIET)) {
1078 scmd_printk(KERN_INFO, cmd, 1089 scmd_printk(KERN_INFO, cmd,
1079 "Device not ready.\n"); 1090 "Device not ready: ");
1091 scsi_print_sense_hdr("", &sshdr);
1092 }
1080 scsi_end_request(cmd, 0, this_count, 1); 1093 scsi_end_request(cmd, 0, this_count, 1);
1081 return; 1094 return;
1082 case VOLUME_OVERFLOW: 1095 case VOLUME_OVERFLOW:
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index ce4f806bd8ec..6da6721eb0db 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -955,7 +955,8 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
955 list_for_each_entry(rphy, &sas_host->rphy_list, list) { 955 list_for_each_entry(rphy, &sas_host->rphy_list, list) {
956 struct sas_phy *parent = dev_to_phy(rphy->dev.parent); 956 struct sas_phy *parent = dev_to_phy(rphy->dev.parent);
957 957
958 if (rphy->scsi_target_id == -1) 958 if (rphy->identify.device_type != SAS_END_DEVICE ||
959 rphy->scsi_target_id == -1)
959 continue; 960 continue;
960 961
961 if ((channel == SCAN_WILD_CARD || channel == parent->port_identifier) && 962 if ((channel == SCAN_WILD_CARD || channel == parent->port_identifier) &&
@@ -977,7 +978,6 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
977#define SETUP_TEMPLATE(attrb, field, perm, test) \ 978#define SETUP_TEMPLATE(attrb, field, perm, test) \
978 i->private_##attrb[count] = class_device_attr_##field; \ 979 i->private_##attrb[count] = class_device_attr_##field; \
979 i->private_##attrb[count].attr.mode = perm; \ 980 i->private_##attrb[count].attr.mode = perm; \
980 i->private_##attrb[count].store = NULL; \
981 i->attrb[count] = &i->private_##attrb[count]; \ 981 i->attrb[count] = &i->private_##attrb[count]; \
982 if (test) \ 982 if (test) \
983 count++ 983 count++
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index 3274ab76c8d3..255886a9ac55 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -75,7 +75,7 @@ param_setup(char *str)
75 else if(!strncmp(pos, "id:", 3)) { 75 else if(!strncmp(pos, "id:", 3)) {
76 if(slot == -1) { 76 if(slot == -1) {
77 printk(KERN_WARNING "sim710: Must specify slot for id parameter\n"); 77 printk(KERN_WARNING "sim710: Must specify slot for id parameter\n");
78 } else if(slot > MAX_SLOTS) { 78 } else if(slot >= MAX_SLOTS) {
79 printk(KERN_WARNING "sim710: Illegal slot %d for id %d\n", slot, val); 79 printk(KERN_WARNING "sim710: Illegal slot %d for id %d\n", slot, val);
80 } else { 80 } else {
81 id_array[slot] = val; 81 id_array[slot] = val;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 2691248b2cbd..ad87d73f88ee 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4054,7 +4054,7 @@ static int st_probe(struct device *dev)
4054 } 4054 }
4055 4055
4056 sdev_printk(KERN_WARNING, SDp, 4056 sdev_printk(KERN_WARNING, SDp,
4057 "Attached scsi tape %s", tape_name(tpnt)); 4057 "Attached scsi tape %s\n", tape_name(tpnt));
4058 printk(KERN_WARNING "%s: try direct i/o: %s (alignment %d B)\n", 4058 printk(KERN_WARNING "%s: try direct i/o: %s (alignment %d B)\n",
4059 tape_name(tpnt), tpnt->try_dio ? "yes" : "no", 4059 tape_name(tpnt), tpnt->try_dio ? "yes" : "no",
4060 queue_dma_alignment(SDp->request_queue) + 1); 4060 queue_dma_alignment(SDp->request_queue) + 1);