aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-01-08 19:27:31 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-08 19:27:31 -0500
commitcd764695b67386a81964f68e9c66efd9f13f4d29 (patch)
tree504e961ab6bad164c41f4b9c1ff00c0ce7f645ee
parent97d61b8e3aef163a75f80f4762794c154572293d (diff)
parentffda8c7dc492e2170bb263f7c56f286992ceb54b (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (45 commits) [SCSI] qla2xxx: Update version number to 8.03.00-k1. [SCSI] qla2xxx: Add ISP81XX support. [SCSI] qla2xxx: Use proper request/response queues with MQ instantiations. [SCSI] qla2xxx: Correct MQ-chain information retrieval during a firmware dump. [SCSI] qla2xxx: Collapse EFT/FCE copy procedures during a firmware dump. [SCSI] qla2xxx: Don't pollute kernel logs with ZIO/RIO status messages. [SCSI] qla2xxx: Don't fallback to interrupt-polling during re-initialization with MSI-X enabled. [SCSI] qla2xxx: Remove support for reading/writing HW-event-log. [SCSI] cxgb3i: add missing include [SCSI] scsi_lib: fix DID_RESET status problems [SCSI] fc transport: restore missing dev_loss_tmo callback to LLDD [SCSI] aha152x_cs: Fix regression that keeps driver from using shared interrupts [SCSI] sd: Correctly handle 6-byte commands with DIX [SCSI] sd: DIF: Fix tagging on platforms with signed char [SCSI] sd: DIF: Show app tag on error [SCSI] Fix error handling for DIF/DIX [SCSI] scsi_lib: don't decrement busy counters when inserting commands [SCSI] libsas: fix test for negative unsigned and typos [SCSI] a2091, gvp11: kill warn_unused_result warnings [SCSI] fusion: Move a dereference below a NULL test ... Fixed up trivial conflict due to moving the async part of sd_probe around in the async probes vs using dev_set_name() in naming.
-rw-r--r--block/blk-map.c19
-rw-r--r--drivers/message/fusion/mptctl.c5
-rw-r--r--drivers/misc/enclosure.c8
-rw-r--r--drivers/scsi/NCR_D700.c2
-rw-r--r--drivers/scsi/a2091.c18
-rw-r--r--drivers/scsi/advansys.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c2
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.c1
-rw-r--r--drivers/scsi/gvp11.c8
-rw-r--r--drivers/scsi/hosts.c6
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c14
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c16
-rw-r--r--drivers/scsi/ipr.c2
-rw-r--r--drivers/scsi/ipr.h2
-rw-r--r--drivers/scsi/lasi700.c3
-rw-r--r--drivers/scsi/libsas/sas_discover.c2
-rw-r--r--drivers/scsi/libsas/sas_dump.c2
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c12
-rw-r--r--drivers/scsi/libsas/sas_port.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c3
-rw-r--r--drivers/scsi/mvsas.c2
-rw-r--r--drivers/scsi/pcmcia/aha152x_stub.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c26
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c462
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h40
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h33
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h294
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h10
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c295
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c82
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c41
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c139
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c263
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/raid_class.c3
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi_debug.c4
-rw-r--r--drivers/scsi/scsi_error.c24
-rw-r--r--drivers/scsi/scsi_ioctl.c9
-rw-r--r--drivers/scsi/scsi_lib.c119
-rw-r--r--drivers/scsi/scsi_scan.c7
-rw-r--r--drivers/scsi/scsi_sysfs.c12
-rw-r--r--drivers/scsi/scsi_transport_fc.c39
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c11
-rw-r--r--drivers/scsi/scsi_transport_sas.c42
-rw-r--r--drivers/scsi/scsi_transport_srp.c2
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sd_dif.c17
-rw-r--r--drivers/scsi/ses.c2
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/sgiwd93.c3
-rw-r--r--drivers/scsi/sim710.c4
-rw-r--r--drivers/scsi/sni_53c710.c3
-rw-r--r--drivers/scsi/st.c492
-rw-r--r--drivers/scsi/st.h14
-rw-r--r--drivers/scsi/zalon.c4
-rw-r--r--fs/bio.c36
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/scsi/scsi_transport_fc.h1
63 files changed, 1851 insertions, 859 deletions
diff --git a/block/blk-map.c b/block/blk-map.c
index 2990447f45e9..f103729b462f 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -42,7 +42,7 @@ static int __blk_rq_unmap_user(struct bio *bio)
42 42
43static int __blk_rq_map_user(struct request_queue *q, struct request *rq, 43static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
44 struct rq_map_data *map_data, void __user *ubuf, 44 struct rq_map_data *map_data, void __user *ubuf,
45 unsigned int len, int null_mapped, gfp_t gfp_mask) 45 unsigned int len, gfp_t gfp_mask)
46{ 46{
47 unsigned long uaddr; 47 unsigned long uaddr;
48 struct bio *bio, *orig_bio; 48 struct bio *bio, *orig_bio;
@@ -63,7 +63,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
63 if (IS_ERR(bio)) 63 if (IS_ERR(bio))
64 return PTR_ERR(bio); 64 return PTR_ERR(bio);
65 65
66 if (null_mapped) 66 if (map_data && map_data->null_mapped)
67 bio->bi_flags |= (1 << BIO_NULL_MAPPED); 67 bio->bi_flags |= (1 << BIO_NULL_MAPPED);
68 68
69 orig_bio = bio; 69 orig_bio = bio;
@@ -114,17 +114,15 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
114{ 114{
115 unsigned long bytes_read = 0; 115 unsigned long bytes_read = 0;
116 struct bio *bio = NULL; 116 struct bio *bio = NULL;
117 int ret, null_mapped = 0; 117 int ret;
118 118
119 if (len > (q->max_hw_sectors << 9)) 119 if (len > (q->max_hw_sectors << 9))
120 return -EINVAL; 120 return -EINVAL;
121 if (!len) 121 if (!len)
122 return -EINVAL; 122 return -EINVAL;
123 if (!ubuf) { 123
124 if (!map_data || rq_data_dir(rq) != READ) 124 if (!ubuf && (!map_data || !map_data->null_mapped))
125 return -EINVAL; 125 return -EINVAL;
126 null_mapped = 1;
127 }
128 126
129 while (bytes_read != len) { 127 while (bytes_read != len) {
130 unsigned long map_len, end, start; 128 unsigned long map_len, end, start;
@@ -143,13 +141,16 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
143 map_len -= PAGE_SIZE; 141 map_len -= PAGE_SIZE;
144 142
145 ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len, 143 ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
146 null_mapped, gfp_mask); 144 gfp_mask);
147 if (ret < 0) 145 if (ret < 0)
148 goto unmap_rq; 146 goto unmap_rq;
149 if (!bio) 147 if (!bio)
150 bio = rq->bio; 148 bio = rq->bio;
151 bytes_read += ret; 149 bytes_read += ret;
152 ubuf += ret; 150 ubuf += ret;
151
152 if (map_data)
153 map_data->offset += ret;
153 } 154 }
154 155
155 if (!bio_flagged(bio, BIO_USER_MAPPED)) 156 if (!bio_flagged(bio, BIO_USER_MAPPED))
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index b89f476cd0a9..c63817117c0a 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -308,10 +308,11 @@ static void mptctl_timeout_expired (MPT_IOCTL *ioctl)
308{ 308{
309 int rc = 1; 309 int rc = 1;
310 310
311 dctlprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT ": Timeout Expired! Host %d\n",
312 ioctl->ioc->name, ioctl->ioc->id));
313 if (ioctl == NULL) 311 if (ioctl == NULL)
314 return; 312 return;
313 dctlprintk(ioctl->ioc,
314 printk(MYIOC_s_DEBUG_FMT ": Timeout Expired! Host %d\n",
315 ioctl->ioc->name, ioctl->ioc->id));
315 316
316 ioctl->wait_done = 0; 317 ioctl->wait_done = 0;
317 if (ioctl->reset & MPTCTL_RESET_OK) 318 if (ioctl->reset & MPTCTL_RESET_OK)
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 0736cff9d97a..3cf61ece71d7 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -119,7 +119,7 @@ enclosure_register(struct device *dev, const char *name, int components,
119 edev->edev.class = &enclosure_class; 119 edev->edev.class = &enclosure_class;
120 edev->edev.parent = get_device(dev); 120 edev->edev.parent = get_device(dev);
121 edev->cb = cb; 121 edev->cb = cb;
122 snprintf(edev->edev.bus_id, BUS_ID_SIZE, "%s", name); 122 dev_set_name(&edev->edev, name);
123 err = device_register(&edev->edev); 123 err = device_register(&edev->edev);
124 if (err) 124 if (err)
125 goto err; 125 goto err;
@@ -170,7 +170,7 @@ EXPORT_SYMBOL_GPL(enclosure_unregister);
170static void enclosure_link_name(struct enclosure_component *cdev, char *name) 170static void enclosure_link_name(struct enclosure_component *cdev, char *name)
171{ 171{
172 strcpy(name, "enclosure_device:"); 172 strcpy(name, "enclosure_device:");
173 strcat(name, cdev->cdev.bus_id); 173 strcat(name, dev_name(&cdev->cdev));
174} 174}
175 175
176static void enclosure_remove_links(struct enclosure_component *cdev) 176static void enclosure_remove_links(struct enclosure_component *cdev)
@@ -256,9 +256,9 @@ enclosure_component_register(struct enclosure_device *edev,
256 cdev = &ecomp->cdev; 256 cdev = &ecomp->cdev;
257 cdev->parent = get_device(&edev->edev); 257 cdev->parent = get_device(&edev->edev);
258 if (name) 258 if (name)
259 snprintf(cdev->bus_id, BUS_ID_SIZE, "%s", name); 259 dev_set_name(cdev, name);
260 else 260 else
261 snprintf(cdev->bus_id, BUS_ID_SIZE, "%u", number); 261 dev_set_name(cdev, "%u", number);
262 262
263 cdev->release = enclosure_component_release; 263 cdev->release = enclosure_component_release;
264 cdev->groups = enclosure_groups; 264 cdev->groups = enclosure_groups;
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c
index 9e64b21ef637..c889d8458684 100644
--- a/drivers/scsi/NCR_D700.c
+++ b/drivers/scsi/NCR_D700.c
@@ -318,7 +318,7 @@ NCR_D700_probe(struct device *dev)
318 return -ENOMEM; 318 return -ENOMEM;
319 319
320 p->dev = dev; 320 p->dev = dev;
321 snprintf(p->name, sizeof(p->name), "D700(%s)", dev->bus_id); 321 snprintf(p->name, sizeof(p->name), "D700(%s)", dev_name(dev));
322 if (request_irq(irq, NCR_D700_intr, IRQF_SHARED, p->name, p)) { 322 if (request_irq(irq, NCR_D700_intr, IRQF_SHARED, p->name, p)) {
323 printk(KERN_ERR "D700: request_irq failed\n"); 323 printk(KERN_ERR "D700: request_irq failed\n");
324 kfree(p); 324 kfree(p);
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 07d572feceed..37dd47136fb1 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -169,10 +169,8 @@ int __init a2091_detect(struct scsi_host_template *tpnt)
169 continue; 169 continue;
170 170
171 instance = scsi_register (tpnt, sizeof (struct WD33C93_hostdata)); 171 instance = scsi_register (tpnt, sizeof (struct WD33C93_hostdata));
172 if (instance == NULL) { 172 if (instance == NULL)
173 release_mem_region(address, 256); 173 goto release;
174 continue;
175 }
176 instance->base = ZTWO_VADDR(address); 174 instance->base = ZTWO_VADDR(address);
177 instance->irq = IRQ_AMIGA_PORTS; 175 instance->irq = IRQ_AMIGA_PORTS;
178 instance->unique_id = z->slotaddr; 176 instance->unique_id = z->slotaddr;
@@ -183,10 +181,18 @@ int __init a2091_detect(struct scsi_host_template *tpnt)
183 HDATA(instance)->fast = 0; 181 HDATA(instance)->fast = 0;
184 HDATA(instance)->dma_mode = CTRL_DMA; 182 HDATA(instance)->dma_mode = CTRL_DMA;
185 wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10); 183 wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10);
186 request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, "A2091 SCSI", 184 if (request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, "A2091 SCSI",
187 instance); 185 instance))
186 goto unregister;
188 DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN; 187 DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN;
189 num_a2091++; 188 num_a2091++;
189 continue;
190
191unregister:
192 scsi_unregister(instance);
193 wd33c93_release();
194release:
195 release_mem_region(address, 256);
190 } 196 }
191 197
192 return num_a2091; 198 return num_a2091;
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 2f602720193e..7507d8bc57a1 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -2527,7 +2527,7 @@ static void asc_prt_scsi_host(struct Scsi_Host *s)
2527{ 2527{
2528 struct asc_board *boardp = shost_priv(s); 2528 struct asc_board *boardp = shost_priv(s);
2529 2529
2530 printk("Scsi_Host at addr 0x%p, device %s\n", s, boardp->dev->bus_id); 2530 printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev));
2531 printk(" host_busy %u, host_no %d, last_reset %d,\n", 2531 printk(" host_busy %u, host_no %d, last_reset %d,\n",
2532 s->host_busy, s->host_no, (unsigned)s->last_reset); 2532 s->host_busy, s->host_no, (unsigned)s->last_reset);
2533 2533
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index d4640ef6d44f..78eb86fc6276 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -189,7 +189,7 @@ int asd_I_T_nexus_reset(struct domain_device *dev)
189 asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE); 189 asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
190 /* send a hard reset */ 190 /* send a hard reset */
191 ASD_DPRINTK("sending %s reset to %s\n", 191 ASD_DPRINTK("sending %s reset to %s\n",
192 reset_type ? "hard" : "soft", phy->dev.bus_id); 192 reset_type ? "hard" : "soft", dev_name(&phy->dev));
193 res = sas_phy_reset(phy, reset_type); 193 res = sas_phy_reset(phy, reset_type);
194 if (res == TMF_RESP_FUNC_COMPLETE) { 194 if (res == TMF_RESP_FUNC_COMPLETE) {
195 /* wait for the maximum settle time */ 195 /* wait for the maximum settle time */
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.c b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
index 1a41f04264f7..08f3a09d9233 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_ddp.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/skbuff.h> 13#include <linux/skbuff.h>
14#include <linux/scatterlist.h>
14 15
15/* from cxgb3 LLD */ 16/* from cxgb3 LLD */
16#include "common.h" 17#include "common.h"
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index ca7363752401..5d1bf7e3d245 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -329,12 +329,16 @@ int __init gvp11_detect(struct scsi_host_template *tpnt)
329 (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10 329 (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10
330 : WD33C93_FS_12_15); 330 : WD33C93_FS_12_15);
331 331
332 request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED, "GVP11 SCSI", 332 if (request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED, "GVP11 SCSI",
333 instance); 333 instance))
334 goto unregister;
334 DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE; 335 DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE;
335 num_gvp11++; 336 num_gvp11++;
336 continue; 337 continue;
337 338
339unregister:
340 scsi_unregister(instance);
341 wd33c93_release();
338release: 342release:
339 release_mem_region(address, 256); 343 release_mem_region(address, 256);
340 } 344 }
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 3fdbb13e80a8..aa670a1d1513 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -388,8 +388,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
388 shost->dma_boundary = 0xffffffff; 388 shost->dma_boundary = 0xffffffff;
389 389
390 device_initialize(&shost->shost_gendev); 390 device_initialize(&shost->shost_gendev);
391 snprintf(shost->shost_gendev.bus_id, BUS_ID_SIZE, "host%d", 391 dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
392 shost->host_no);
393#ifndef CONFIG_SYSFS_DEPRECATED 392#ifndef CONFIG_SYSFS_DEPRECATED
394 shost->shost_gendev.bus = &scsi_bus_type; 393 shost->shost_gendev.bus = &scsi_bus_type;
395#endif 394#endif
@@ -398,8 +397,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
398 device_initialize(&shost->shost_dev); 397 device_initialize(&shost->shost_dev);
399 shost->shost_dev.parent = &shost->shost_gendev; 398 shost->shost_dev.parent = &shost->shost_gendev;
400 shost->shost_dev.class = &shost_class; 399 shost->shost_dev.class = &shost_class;
401 snprintf(shost->shost_dev.bus_id, BUS_ID_SIZE, "host%d", 400 dev_set_name(&shost->shost_dev, "host%d", shost->host_no);
402 shost->host_no);
403 shost->shost_dev.groups = scsi_sysfs_shost_attr_groups; 401 shost->shost_dev.groups = scsi_sysfs_shost_attr_groups;
404 402
405 shost->ehandler = kthread_run(scsi_error_handler, shost, 403 shost->ehandler = kthread_run(scsi_error_handler, shost,
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 44f202f33101..ee0739b217b6 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -101,7 +101,7 @@ static const struct {
101 { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" }, 101 { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
102 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" }, 102 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
103 { IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" }, 103 { IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
104 { IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_NO_CONNECT, 1, 1, "network down" }, 104 { IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" },
105 { IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" }, 105 { IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
106 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" }, 106 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
107 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" }, 107 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
@@ -115,11 +115,11 @@ static const struct {
115 115
116 { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" }, 116 { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
117 { IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" }, 117 { IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
118 { IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ABORT, 0, 1, "invalid parameter" }, 118 { IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" },
119 { IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ABORT, 0, 1, "missing parameter" }, 119 { IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" },
120 { IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" }, 120 { IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
121 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ABORT, 0, 1, "transaction cancelled" }, 121 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" },
122 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ABORT, 0, 1, "transaction cancelled implicit" }, 122 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" },
123 { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" }, 123 { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
124 { IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" }, 124 { IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
125 { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" }, 125 { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
@@ -1145,10 +1145,10 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1145 login_info->async.len = vhost->async_crq.size * sizeof(*vhost->async_crq.msgs); 1145 login_info->async.len = vhost->async_crq.size * sizeof(*vhost->async_crq.msgs);
1146 strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME); 1146 strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
1147 strncpy(login_info->device_name, 1147 strncpy(login_info->device_name,
1148 vhost->host->shost_gendev.bus_id, IBMVFC_MAX_NAME); 1148 dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
1149 1149
1150 location = of_get_property(of_node, "ibm,loc-code", NULL); 1150 location = of_get_property(of_node, "ibm,loc-code", NULL);
1151 location = location ? location : vhost->dev->bus_id; 1151 location = location ? location : dev_name(vhost->dev);
1152 strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME); 1152 strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
1153} 1153}
1154 1154
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 868d35ea01bb..5c541f7850f9 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -89,6 +89,7 @@ static int max_id = 64;
89static int max_channel = 3; 89static int max_channel = 3;
90static int init_timeout = 5; 90static int init_timeout = 5;
91static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT; 91static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
92static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
92 93
93static struct scsi_transport_template *ibmvscsi_transport_template; 94static struct scsi_transport_template *ibmvscsi_transport_template;
94 95
@@ -1633,7 +1634,7 @@ static struct scsi_host_template driver_template = {
1633static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev) 1634static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev)
1634{ 1635{
1635 /* iu_storage data allocated in initialize_event_pool */ 1636 /* iu_storage data allocated in initialize_event_pool */
1636 unsigned long desired_io = max_requests * sizeof(union viosrp_iu); 1637 unsigned long desired_io = max_events * sizeof(union viosrp_iu);
1637 1638
1638 /* add io space for sg data */ 1639 /* add io space for sg data */
1639 desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * 512 * 1640 desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * 512 *
@@ -1657,7 +1658,6 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1657 1658
1658 vdev->dev.driver_data = NULL; 1659 vdev->dev.driver_data = NULL;
1659 1660
1660 driver_template.can_queue = max_requests - 2;
1661 host = scsi_host_alloc(&driver_template, sizeof(*hostdata)); 1661 host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
1662 if (!host) { 1662 if (!host) {
1663 dev_err(&vdev->dev, "couldn't allocate host data\n"); 1663 dev_err(&vdev->dev, "couldn't allocate host data\n");
@@ -1673,12 +1673,12 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1673 atomic_set(&hostdata->request_limit, -1); 1673 atomic_set(&hostdata->request_limit, -1);
1674 hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT; 1674 hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
1675 1675
1676 rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_requests); 1676 rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events);
1677 if (rc != 0 && rc != H_RESOURCE) { 1677 if (rc != 0 && rc != H_RESOURCE) {
1678 dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc); 1678 dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
1679 goto init_crq_failed; 1679 goto init_crq_failed;
1680 } 1680 }
1681 if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) { 1681 if (initialize_event_pool(&hostdata->pool, max_events, hostdata) != 0) {
1682 dev_err(&vdev->dev, "couldn't initialize event pool\n"); 1682 dev_err(&vdev->dev, "couldn't initialize event pool\n");
1683 goto init_pool_failed; 1683 goto init_pool_failed;
1684 } 1684 }
@@ -1730,7 +1730,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1730 add_host_failed: 1730 add_host_failed:
1731 release_event_pool(&hostdata->pool, hostdata); 1731 release_event_pool(&hostdata->pool, hostdata);
1732 init_pool_failed: 1732 init_pool_failed:
1733 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_requests); 1733 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events);
1734 init_crq_failed: 1734 init_crq_failed:
1735 scsi_host_put(host); 1735 scsi_host_put(host);
1736 scsi_host_alloc_failed: 1736 scsi_host_alloc_failed:
@@ -1742,7 +1742,7 @@ static int ibmvscsi_remove(struct vio_dev *vdev)
1742 struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data; 1742 struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data;
1743 release_event_pool(&hostdata->pool, hostdata); 1743 release_event_pool(&hostdata->pool, hostdata);
1744 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, 1744 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
1745 max_requests); 1745 max_events);
1746 1746
1747 srp_remove_host(hostdata->host); 1747 srp_remove_host(hostdata->host);
1748 scsi_remove_host(hostdata->host); 1748 scsi_remove_host(hostdata->host);
@@ -1779,6 +1779,10 @@ int __init ibmvscsi_module_init(void)
1779{ 1779{
1780 int ret; 1780 int ret;
1781 1781
1782 /* Ensure we have two requests to do error recovery */
1783 driver_template.can_queue = max_requests;
1784 max_events = max_requests + 2;
1785
1782 if (firmware_has_feature(FW_FEATURE_ISERIES)) 1786 if (firmware_has_feature(FW_FEATURE_ISERIES))
1783 ibmvscsi_ops = &iseriesvscsi_ops; 1787 ibmvscsi_ops = &iseriesvscsi_ops;
1784 else if (firmware_has_feature(FW_FEATURE_VIO)) 1788 else if (firmware_has_feature(FW_FEATURE_VIO))
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 0edfb1fa63ce..841f460edbc4 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2184,7 +2184,7 @@ static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2184 sizeof(struct ipr_dump_entry_header); 2184 sizeof(struct ipr_dump_entry_header);
2185 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; 2185 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2186 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID; 2186 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2187 strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id); 2187 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2188 driver_dump->hdr.num_entries++; 2188 driver_dump->hdr.num_entries++;
2189} 2189}
2190 2190
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 59459141b437..8f872f816fe4 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1272,7 +1272,7 @@ struct ipr_dump_entry_header {
1272 1272
1273struct ipr_dump_location_entry { 1273struct ipr_dump_location_entry {
1274 struct ipr_dump_entry_header hdr; 1274 struct ipr_dump_entry_header hdr;
1275 u8 location[BUS_ID_SIZE]; 1275 u8 location[20];
1276}__attribute__((packed)); 1276}__attribute__((packed));
1277 1277
1278struct ipr_dump_trace_entry { 1278struct ipr_dump_trace_entry {
diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c
index 3126824da36d..4a4e6954ec79 100644
--- a/drivers/scsi/lasi700.c
+++ b/drivers/scsi/lasi700.c
@@ -103,8 +103,7 @@ lasi700_probe(struct parisc_device *dev)
103 103
104 hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL); 104 hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
105 if (!hostdata) { 105 if (!hostdata) {
106 printk(KERN_ERR "%s: Failed to allocate host data\n", 106 dev_printk(KERN_ERR, dev, "Failed to allocate host data\n");
107 dev->dev.bus_id);
108 return -ENOMEM; 107 return -ENOMEM;
109 } 108 }
110 109
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 709a6f75ca9d..facc5bfcf7db 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -169,7 +169,7 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
169 if (res) { 169 if (res) {
170 printk("sas: driver on pcidev %s cannot handle " 170 printk("sas: driver on pcidev %s cannot handle "
171 "device %llx, error:%d\n", 171 "device %llx, error:%d\n",
172 sas_ha->dev->bus_id, 172 dev_name(sas_ha->dev),
173 SAS_ADDR(dev->sas_addr), res); 173 SAS_ADDR(dev->sas_addr), res);
174 } 174 }
175 } 175 }
diff --git a/drivers/scsi/libsas/sas_dump.c b/drivers/scsi/libsas/sas_dump.c
index bf34a236f946..c17c25030f1c 100644
--- a/drivers/scsi/libsas/sas_dump.c
+++ b/drivers/scsi/libsas/sas_dump.c
@@ -56,7 +56,7 @@ void sas_dprint_phye(int phyid, enum phy_event pe)
56 56
57void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he) 57void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he)
58{ 58{
59 SAS_DPRINTK("ha %s: %s event\n", sas_ha->dev->bus_id, 59 SAS_DPRINTK("ha %s: %s event\n", dev_name(sas_ha->dev),
60 sas_hae_str[he]); 60 sas_hae_str[he]);
61} 61}
62 62
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index 16f93123271e..d110a366c48a 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -199,8 +199,8 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
199 break; 199 break;
200 200
201 case SMP_DISCOVER: 201 case SMP_DISCOVER:
202 req->data_len =- 16; 202 req->data_len -= 16;
203 if (req->data_len < 0) { 203 if ((int)req->data_len < 0) {
204 req->data_len = 0; 204 req->data_len = 0;
205 error = -EINVAL; 205 error = -EINVAL;
206 goto out; 206 goto out;
@@ -215,8 +215,8 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
215 break; 215 break;
216 216
217 case SMP_REPORT_PHY_SATA: 217 case SMP_REPORT_PHY_SATA:
218 req->data_len =- 16; 218 req->data_len -= 16;
219 if (req->data_len < 0) { 219 if ((int)req->data_len < 0) {
220 req->data_len = 0; 220 req->data_len = 0;
221 error = -EINVAL; 221 error = -EINVAL;
222 goto out; 222 goto out;
@@ -238,8 +238,8 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
238 break; 238 break;
239 239
240 case SMP_PHY_CONTROL: 240 case SMP_PHY_CONTROL:
241 req->data_len =- 44; 241 req->data_len -= 44;
242 if (req->data_len < 0) { 242 if ((int)req->data_len < 0) {
243 req->data_len = 0; 243 req->data_len = 0;
244 error = -EINVAL; 244 error = -EINVAL;
245 goto out; 245 goto out;
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 139935a121b4..e6ac59c023f1 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -113,7 +113,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
113 sas_port_add_phy(port->port, phy->phy); 113 sas_port_add_phy(port->port, phy->phy);
114 114
115 SAS_DPRINTK("%s added to %s, phy_mask:0x%x (%16llx)\n", 115 SAS_DPRINTK("%s added to %s, phy_mask:0x%x (%16llx)\n",
116 phy->phy->dev.bus_id,port->port->dev.bus_id, 116 dev_name(&phy->phy->dev), dev_name(&port->port->dev),
117 port->phy_mask, 117 port->phy_mask,
118 SAS_ADDR(port->attached_sas_addr)); 118 SAS_ADDR(port->attached_sas_addr));
119 119
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 4c77038c8f1c..6c867311cef1 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1795,12 +1795,13 @@ lpfc_block_mgmt_io(struct lpfc_hba * phba)
1795int 1795int
1796lpfc_online(struct lpfc_hba *phba) 1796lpfc_online(struct lpfc_hba *phba)
1797{ 1797{
1798 struct lpfc_vport *vport = phba->pport; 1798 struct lpfc_vport *vport;
1799 struct lpfc_vport **vports; 1799 struct lpfc_vport **vports;
1800 int i; 1800 int i;
1801 1801
1802 if (!phba) 1802 if (!phba)
1803 return 0; 1803 return 0;
1804 vport = phba->pport;
1804 1805
1805 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 1806 if (!(vport->fc_flag & FC_OFFLINE_MODE))
1806 return 0; 1807 return 0;
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c
index 1dd70d7a4947..23e5a876bb10 100644
--- a/drivers/scsi/mvsas.c
+++ b/drivers/scsi/mvsas.c
@@ -2959,7 +2959,7 @@ static int __devinit mvs_hw_init(struct mvs_info *mvi)
2959 2959
2960 /* enable auto port detection */ 2960 /* enable auto port detection */
2961 mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN); 2961 mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
2962 msleep(100); 2962 msleep(1100);
2963 /* init and reset phys */ 2963 /* init and reset phys */
2964 for (i = 0; i < mvi->chip->n_phy; i++) { 2964 for (i = 0; i < mvi->chip->n_phy; i++) {
2965 u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]); 2965 u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]);
diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c
index 165ff884f48e..67cde0138061 100644
--- a/drivers/scsi/pcmcia/aha152x_stub.c
+++ b/drivers/scsi/pcmcia/aha152x_stub.c
@@ -114,7 +114,7 @@ static int aha152x_probe(struct pcmcia_device *link)
114 link->io.NumPorts1 = 0x20; 114 link->io.NumPorts1 = 0x20;
115 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 115 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
116 link->io.IOAddrLines = 10; 116 link->io.IOAddrLines = 10;
117 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 117 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
118 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 118 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
119 link->conf.Attributes = CONF_ENABLE_IRQ; 119 link->conf.Attributes = CONF_ENABLE_IRQ;
120 link->conf.IntType = INT_MEMORY_AND_IO; 120 link->conf.IntType = INT_MEMORY_AND_IO;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index cd53627cc761..c7acef50d5da 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -303,7 +303,7 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
303 else if (start == (ha->flt_region_boot * 4) || 303 else if (start == (ha->flt_region_boot * 4) ||
304 start == (ha->flt_region_fw * 4)) 304 start == (ha->flt_region_fw * 4))
305 valid = 1; 305 valid = 1;
306 else if (IS_QLA25XX(ha) && 306 else if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) &&
307 start == (ha->flt_region_vpd_nvram * 4)) 307 start == (ha->flt_region_vpd_nvram * 4))
308 valid = 1; 308 valid = 1;
309 if (!valid) { 309 if (!valid) {
@@ -815,6 +815,21 @@ qla2x00_total_isp_aborts_show(struct device *dev,
815 ha->qla_stats.total_isp_aborts); 815 ha->qla_stats.total_isp_aborts);
816} 816}
817 817
818static ssize_t
819qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
820 char *buf)
821{
822 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
823 struct qla_hw_data *ha = vha->hw;
824
825 if (!IS_QLA81XX(ha))
826 return snprintf(buf, PAGE_SIZE, "\n");
827
828 return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x (%x)\n",
829 ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
830 ha->mpi_version[3], ha->mpi_capabilities);
831}
832
818static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); 833static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
819static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 834static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
820static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); 835static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -839,6 +854,7 @@ static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
839 NULL); 854 NULL);
840static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show, 855static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
841 NULL); 856 NULL);
857static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
842 858
843struct device_attribute *qla2x00_host_attrs[] = { 859struct device_attribute *qla2x00_host_attrs[] = {
844 &dev_attr_driver_version, 860 &dev_attr_driver_version,
@@ -858,6 +874,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
858 &dev_attr_optrom_fcode_version, 874 &dev_attr_optrom_fcode_version,
859 &dev_attr_optrom_fw_version, 875 &dev_attr_optrom_fw_version,
860 &dev_attr_total_isp_aborts, 876 &dev_attr_total_isp_aborts,
877 &dev_attr_mpi_version,
861 NULL, 878 NULL,
862}; 879};
863 880
@@ -892,6 +909,9 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
892 case PORT_SPEED_8GB: 909 case PORT_SPEED_8GB:
893 speed = FC_PORTSPEED_8GBIT; 910 speed = FC_PORTSPEED_8GBIT;
894 break; 911 break;
912 case PORT_SPEED_10GB:
913 speed = FC_PORTSPEED_10GBIT;
914 break;
895 } 915 }
896 fc_host_speed(shost) = speed; 916 fc_host_speed(shost) = speed;
897} 917}
@@ -1382,7 +1402,9 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
1382 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports; 1402 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
1383 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count; 1403 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
1384 1404
1385 if (IS_QLA25XX(ha)) 1405 if (IS_QLA81XX(ha))
1406 speed = FC_PORTSPEED_10GBIT;
1407 else if (IS_QLA25XX(ha))
1386 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | 1408 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
1387 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; 1409 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
1388 else if (IS_QLA24XX_TYPE(ha)) 1410 else if (IS_QLA24XX_TYPE(ha))
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 1cf77772623b..34760f8d4f17 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -310,6 +310,76 @@ qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
310 *buf++ = htons(RD_REG_WORD(dmp_reg++)); 310 *buf++ = htons(RD_REG_WORD(dmp_reg++));
311} 311}
312 312
313static inline void *
314qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
315{
316 if (!ha->eft)
317 return ptr;
318
319 memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size));
320 return ptr + ntohl(ha->fw_dump->eft_size);
321}
322
323static inline void *
324qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
325{
326 uint32_t cnt;
327 uint32_t *iter_reg;
328 struct qla2xxx_fce_chain *fcec = ptr;
329
330 if (!ha->fce)
331 return ptr;
332
333 *last_chain = &fcec->type;
334 fcec->type = __constant_htonl(DUMP_CHAIN_FCE);
335 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
336 fce_calc_size(ha->fce_bufs));
337 fcec->size = htonl(fce_calc_size(ha->fce_bufs));
338 fcec->addr_l = htonl(LSD(ha->fce_dma));
339 fcec->addr_h = htonl(MSD(ha->fce_dma));
340
341 iter_reg = fcec->eregs;
342 for (cnt = 0; cnt < 8; cnt++)
343 *iter_reg++ = htonl(ha->fce_mb[cnt]);
344
345 memcpy(iter_reg, ha->fce, ntohl(fcec->size));
346
347 return iter_reg;
348}
349
350static inline void *
351qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
352{
353 uint32_t cnt, que_idx;
354 uint8_t req_cnt, rsp_cnt, que_cnt;
355 struct qla2xxx_mq_chain *mq = ptr;
356 struct device_reg_25xxmq __iomem *reg;
357
358 if (!ha->mqenable)
359 return ptr;
360
361 mq = ptr;
362 *last_chain = &mq->type;
363 mq->type = __constant_htonl(DUMP_CHAIN_MQ);
364 mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain));
365
366 req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues);
367 rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues);
368 que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt;
369 mq->count = htonl(que_cnt);
370 for (cnt = 0; cnt < que_cnt; cnt++) {
371 reg = (struct device_reg_25xxmq *) ((void *)
372 ha->mqiobase + cnt * QLA_QUE_PAGE);
373 que_idx = cnt * 4;
374 mq->qregs[que_idx] = htonl(RD_REG_DWORD(&reg->req_q_in));
375 mq->qregs[que_idx+1] = htonl(RD_REG_DWORD(&reg->req_q_out));
376 mq->qregs[que_idx+2] = htonl(RD_REG_DWORD(&reg->rsp_q_in));
377 mq->qregs[que_idx+3] = htonl(RD_REG_DWORD(&reg->rsp_q_out));
378 }
379
380 return ptr + sizeof(struct qla2xxx_mq_chain);
381}
382
313/** 383/**
314 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware. 384 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
315 * @ha: HA context 385 * @ha: HA context
@@ -913,8 +983,8 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
913 goto qla24xx_fw_dump_failed_0; 983 goto qla24xx_fw_dump_failed_0;
914 984
915 nxt = qla2xxx_copy_queues(ha, nxt); 985 nxt = qla2xxx_copy_queues(ha, nxt);
916 if (ha->eft) 986
917 memcpy(nxt, ha->eft, ntohl(ha->fw_dump->eft_size)); 987 qla24xx_copy_eft(ha, nxt);
918 988
919qla24xx_fw_dump_failed_0: 989qla24xx_fw_dump_failed_0:
920 if (rval != QLA_SUCCESS) { 990 if (rval != QLA_SUCCESS) {
@@ -942,19 +1012,14 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
942 uint32_t risc_address; 1012 uint32_t risc_address;
943 struct qla_hw_data *ha = vha->hw; 1013 struct qla_hw_data *ha = vha->hw;
944 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1014 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
945 struct device_reg_25xxmq __iomem *reg25;
946 uint32_t __iomem *dmp_reg; 1015 uint32_t __iomem *dmp_reg;
947 uint32_t *iter_reg; 1016 uint32_t *iter_reg;
948 uint16_t __iomem *mbx_reg; 1017 uint16_t __iomem *mbx_reg;
949 unsigned long flags; 1018 unsigned long flags;
950 struct qla25xx_fw_dump *fw; 1019 struct qla25xx_fw_dump *fw;
951 uint32_t ext_mem_cnt; 1020 uint32_t ext_mem_cnt;
952 void *nxt; 1021 void *nxt, *nxt_chain;
953 struct qla2xxx_fce_chain *fcec; 1022 uint32_t *last_chain = NULL;
954 struct qla2xxx_mq_chain *mq = NULL;
955 uint32_t qreg_size;
956 uint8_t req_cnt, rsp_cnt, que_cnt;
957 uint32_t que_idx;
958 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1023 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
959 1024
960 risc_address = ext_mem_cnt = 0; 1025 risc_address = ext_mem_cnt = 0;
@@ -1001,28 +1066,6 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1001 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); 1066 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1002 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window)); 1067 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1003 1068
1004 /* Multi queue registers */
1005 if (ha->mqenable) {
1006 qreg_size = sizeof(struct qla2xxx_mq_chain);
1007 mq = kzalloc(qreg_size, GFP_KERNEL);
1008 if (!mq)
1009 goto qla25xx_fw_dump_failed_0;
1010 req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues);
1011 rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues);
1012 que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt;
1013 mq->count = htonl(que_cnt);
1014 mq->chain_size = htonl(qreg_size);
1015 mq->type = __constant_htonl(DUMP_CHAIN_MQ);
1016 for (cnt = 0; cnt < que_cnt; cnt++) {
1017 reg25 = (struct device_reg_25xxmq *) ((void *)
1018 ha->mqiobase + cnt * QLA_QUE_PAGE);
1019 que_idx = cnt * 4;
1020 mq->qregs[que_idx] = htonl(reg25->req_q_in);
1021 mq->qregs[que_idx+1] = htonl(reg25->req_q_out);
1022 mq->qregs[que_idx+2] = htonl(reg25->rsp_q_in);
1023 mq->qregs[que_idx+3] = htonl(reg25->rsp_q_out);
1024 }
1025 }
1026 WRT_REG_DWORD(&reg->iobase_window, 0x00); 1069 WRT_REG_DWORD(&reg->iobase_window, 0x00);
1027 RD_REG_DWORD(&reg->iobase_window); 1070 RD_REG_DWORD(&reg->iobase_window);
1028 1071
@@ -1240,6 +1283,10 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1240 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg); 1283 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1241 qla24xx_read_window(reg, 0x6F00, 16, iter_reg); 1284 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1242 1285
1286 /* Multi queue registers */
1287 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1288 &last_chain);
1289
1243 rval = qla24xx_soft_reset(ha); 1290 rval = qla24xx_soft_reset(ha);
1244 if (rval != QLA_SUCCESS) 1291 if (rval != QLA_SUCCESS)
1245 goto qla25xx_fw_dump_failed_0; 1292 goto qla25xx_fw_dump_failed_0;
@@ -1249,39 +1296,341 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1249 if (rval != QLA_SUCCESS) 1296 if (rval != QLA_SUCCESS)
1250 goto qla25xx_fw_dump_failed_0; 1297 goto qla25xx_fw_dump_failed_0;
1251 1298
1252 /* Fibre Channel Trace Buffer. */
1253 nxt = qla2xxx_copy_queues(ha, nxt); 1299 nxt = qla2xxx_copy_queues(ha, nxt);
1254 if (ha->eft)
1255 memcpy(nxt, ha->eft, ntohl(ha->fw_dump->eft_size));
1256 1300
1257 /* Fibre Channel Event Buffer. */ 1301 nxt = qla24xx_copy_eft(ha, nxt);
1258 if (!ha->fce) 1302
1259 goto qla25xx_fw_dump_failed_0; 1303 /* Chain entries -- started with MQ. */
1304 qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1305 if (last_chain) {
1306 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1307 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
1308 }
1260 1309
1261 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 1310qla25xx_fw_dump_failed_0:
1311 if (rval != QLA_SUCCESS) {
1312 qla_printk(KERN_WARNING, ha,
1313 "Failed to dump firmware (%x)!!!\n", rval);
1314 ha->fw_dumped = 0;
1262 1315
1263 if (ha->mqenable) {
1264 nxt = nxt + ntohl(ha->fw_dump->eft_size);
1265 memcpy(nxt, mq, qreg_size);
1266 kfree(mq);
1267 fcec = nxt + qreg_size;
1268 } else { 1316 } else {
1269 fcec = nxt + ntohl(ha->fw_dump->eft_size); 1317 qla_printk(KERN_INFO, ha,
1318 "Firmware dump saved to temp buffer (%ld/%p).\n",
1319 base_vha->host_no, ha->fw_dump);
1320 ha->fw_dumped = 1;
1270 } 1321 }
1271 fcec->type = __constant_htonl(DUMP_CHAIN_FCE | DUMP_CHAIN_LAST);
1272 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
1273 fce_calc_size(ha->fce_bufs));
1274 fcec->size = htonl(fce_calc_size(ha->fce_bufs));
1275 fcec->addr_l = htonl(LSD(ha->fce_dma));
1276 fcec->addr_h = htonl(MSD(ha->fce_dma));
1277 1322
1278 iter_reg = fcec->eregs; 1323qla25xx_fw_dump_failed:
1279 for (cnt = 0; cnt < 8; cnt++) 1324 if (!hardware_locked)
1280 *iter_reg++ = htonl(ha->fce_mb[cnt]); 1325 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1326}
1281 1327
1282 memcpy(iter_reg, ha->fce, ntohl(fcec->size)); 1328void
1329qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1330{
1331 int rval;
1332 uint32_t cnt;
1333 uint32_t risc_address;
1334 struct qla_hw_data *ha = vha->hw;
1335 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1336 uint32_t __iomem *dmp_reg;
1337 uint32_t *iter_reg;
1338 uint16_t __iomem *mbx_reg;
1339 unsigned long flags;
1340 struct qla81xx_fw_dump *fw;
1341 uint32_t ext_mem_cnt;
1342 void *nxt, *nxt_chain;
1343 uint32_t *last_chain = NULL;
1344 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1283 1345
1284qla25xx_fw_dump_failed_0: 1346 risc_address = ext_mem_cnt = 0;
1347 flags = 0;
1348
1349 if (!hardware_locked)
1350 spin_lock_irqsave(&ha->hardware_lock, flags);
1351
1352 if (!ha->fw_dump) {
1353 qla_printk(KERN_WARNING, ha,
1354 "No buffer available for dump!!!\n");
1355 goto qla81xx_fw_dump_failed;
1356 }
1357
1358 if (ha->fw_dumped) {
1359 qla_printk(KERN_WARNING, ha,
1360 "Firmware has been previously dumped (%p) -- ignoring "
1361 "request...\n", ha->fw_dump);
1362 goto qla81xx_fw_dump_failed;
1363 }
1364 fw = &ha->fw_dump->isp.isp81;
1365 qla2xxx_prep_dump(ha, ha->fw_dump);
1366
1367 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1368
1369 /* Pause RISC. */
1370 rval = qla24xx_pause_risc(reg);
1371 if (rval != QLA_SUCCESS)
1372 goto qla81xx_fw_dump_failed_0;
1373
1374 /* Host/Risc registers. */
1375 iter_reg = fw->host_risc_reg;
1376 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1377 qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1378
1379 /* PCIe registers. */
1380 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1381 RD_REG_DWORD(&reg->iobase_addr);
1382 WRT_REG_DWORD(&reg->iobase_window, 0x01);
1383 dmp_reg = &reg->iobase_c4;
1384 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
1385 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
1386 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1387 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1388
1389 WRT_REG_DWORD(&reg->iobase_window, 0x00);
1390 RD_REG_DWORD(&reg->iobase_window);
1391
1392 /* Host interface registers. */
1393 dmp_reg = &reg->flash_addr;
1394 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
1395 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1396
1397 /* Disable interrupts. */
1398 WRT_REG_DWORD(&reg->ictrl, 0);
1399 RD_REG_DWORD(&reg->ictrl);
1400
1401 /* Shadow registers. */
1402 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1403 RD_REG_DWORD(&reg->iobase_addr);
1404 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1405 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1406
1407 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1408 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1409
1410 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1411 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1412
1413 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1414 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1415
1416 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1417 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1418
1419 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1420 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1421
1422 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1423 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1424
1425 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1426 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1427
1428 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1429 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1430
1431 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1432 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1433
1434 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1435 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1436
1437 /* RISC I/O register. */
1438 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1439 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1440
1441 /* Mailbox registers. */
1442 mbx_reg = &reg->mailbox0;
1443 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
1444 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
1445
1446 /* Transfer sequence registers. */
1447 iter_reg = fw->xseq_gp_reg;
1448 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1449 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1450 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1451 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1452 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1453 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1454 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1455 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1456
1457 iter_reg = fw->xseq_0_reg;
1458 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1459 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1460 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1461
1462 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1463
1464 /* Receive sequence registers. */
1465 iter_reg = fw->rseq_gp_reg;
1466 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1467 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1468 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1469 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1470 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1471 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1472 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1473 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1474
1475 iter_reg = fw->rseq_0_reg;
1476 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1477 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1478
1479 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1480 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1481
1482 /* Auxiliary sequence registers. */
1483 iter_reg = fw->aseq_gp_reg;
1484 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1485 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1486 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1487 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1488 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1489 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1490 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1491 qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1492
1493 iter_reg = fw->aseq_0_reg;
1494 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1495 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1496
1497 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1498 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1499
1500 /* Command DMA registers. */
1501 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1502
1503 /* Queues. */
1504 iter_reg = fw->req0_dma_reg;
1505 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1506 dmp_reg = &reg->iobase_q;
1507 for (cnt = 0; cnt < 7; cnt++)
1508 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1509
1510 iter_reg = fw->resp0_dma_reg;
1511 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1512 dmp_reg = &reg->iobase_q;
1513 for (cnt = 0; cnt < 7; cnt++)
1514 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1515
1516 iter_reg = fw->req1_dma_reg;
1517 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1518 dmp_reg = &reg->iobase_q;
1519 for (cnt = 0; cnt < 7; cnt++)
1520 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1521
1522 /* Transmit DMA registers. */
1523 iter_reg = fw->xmt0_dma_reg;
1524 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1525 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1526
1527 iter_reg = fw->xmt1_dma_reg;
1528 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1529 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1530
1531 iter_reg = fw->xmt2_dma_reg;
1532 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1533 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1534
1535 iter_reg = fw->xmt3_dma_reg;
1536 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1537 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1538
1539 iter_reg = fw->xmt4_dma_reg;
1540 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1541 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1542
1543 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1544
1545 /* Receive DMA registers. */
1546 iter_reg = fw->rcvt0_data_dma_reg;
1547 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1548 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1549
1550 iter_reg = fw->rcvt1_data_dma_reg;
1551 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1552 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1553
1554 /* RISC registers. */
1555 iter_reg = fw->risc_gp_reg;
1556 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1557 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1558 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1559 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1560 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1561 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1562 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1563 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1564
1565 /* Local memory controller registers. */
1566 iter_reg = fw->lmc_reg;
1567 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1568 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1569 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1570 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1571 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1572 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1573 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1574 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1575
1576 /* Fibre Protocol Module registers. */
1577 iter_reg = fw->fpm_hdw_reg;
1578 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1579 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1580 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1581 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1582 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1583 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1584 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1585 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1586 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1587 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1588 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1589 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1590 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
1591 qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
1592
1593 /* Frame Buffer registers. */
1594 iter_reg = fw->fb_hdw_reg;
1595 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1596 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1597 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1598 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1599 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1600 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1601 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1602 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1603 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1604 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1605 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1606 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
1607 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1608
1609 /* Multi queue registers */
1610 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1611 &last_chain);
1612
1613 rval = qla24xx_soft_reset(ha);
1614 if (rval != QLA_SUCCESS)
1615 goto qla81xx_fw_dump_failed_0;
1616
1617 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1618 &nxt);
1619 if (rval != QLA_SUCCESS)
1620 goto qla81xx_fw_dump_failed_0;
1621
1622 nxt = qla2xxx_copy_queues(ha, nxt);
1623
1624 nxt = qla24xx_copy_eft(ha, nxt);
1625
1626 /* Chain entries -- started with MQ. */
1627 qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1628 if (last_chain) {
1629 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1630 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
1631 }
1632
1633qla81xx_fw_dump_failed_0:
1285 if (rval != QLA_SUCCESS) { 1634 if (rval != QLA_SUCCESS) {
1286 qla_printk(KERN_WARNING, ha, 1635 qla_printk(KERN_WARNING, ha,
1287 "Failed to dump firmware (%x)!!!\n", rval); 1636 "Failed to dump firmware (%x)!!!\n", rval);
@@ -1294,10 +1643,11 @@ qla25xx_fw_dump_failed_0:
1294 ha->fw_dumped = 1; 1643 ha->fw_dumped = 1;
1295 } 1644 }
1296 1645
1297qla25xx_fw_dump_failed: 1646qla81xx_fw_dump_failed:
1298 if (!hardware_locked) 1647 if (!hardware_locked)
1299 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1648 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1300} 1649}
1650
1301/****************************************************************************/ 1651/****************************************************************************/
1302/* Driver Debug Functions. */ 1652/* Driver Debug Functions. */
1303/****************************************************************************/ 1653/****************************************************************************/
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index c1794a70a45f..f660dd70b72e 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -251,6 +251,45 @@ struct qla25xx_fw_dump {
251 uint32_t ext_mem[1]; 251 uint32_t ext_mem[1];
252}; 252};
253 253
254struct qla81xx_fw_dump {
255 uint32_t host_status;
256 uint32_t host_risc_reg[32];
257 uint32_t pcie_regs[4];
258 uint32_t host_reg[32];
259 uint32_t shadow_reg[11];
260 uint32_t risc_io_reg;
261 uint16_t mailbox_reg[32];
262 uint32_t xseq_gp_reg[128];
263 uint32_t xseq_0_reg[48];
264 uint32_t xseq_1_reg[16];
265 uint32_t rseq_gp_reg[128];
266 uint32_t rseq_0_reg[32];
267 uint32_t rseq_1_reg[16];
268 uint32_t rseq_2_reg[16];
269 uint32_t aseq_gp_reg[128];
270 uint32_t aseq_0_reg[32];
271 uint32_t aseq_1_reg[16];
272 uint32_t aseq_2_reg[16];
273 uint32_t cmd_dma_reg[16];
274 uint32_t req0_dma_reg[15];
275 uint32_t resp0_dma_reg[15];
276 uint32_t req1_dma_reg[15];
277 uint32_t xmt0_dma_reg[32];
278 uint32_t xmt1_dma_reg[32];
279 uint32_t xmt2_dma_reg[32];
280 uint32_t xmt3_dma_reg[32];
281 uint32_t xmt4_dma_reg[32];
282 uint32_t xmt_data_dma_reg[16];
283 uint32_t rcvt0_data_dma_reg[32];
284 uint32_t rcvt1_data_dma_reg[32];
285 uint32_t risc_gp_reg[128];
286 uint32_t lmc_reg[128];
287 uint32_t fpm_hdw_reg[224];
288 uint32_t fb_hdw_reg[208];
289 uint32_t code_ram[0x2000];
290 uint32_t ext_mem[1];
291};
292
254#define EFT_NUM_BUFFERS 4 293#define EFT_NUM_BUFFERS 4
255#define EFT_BYTES_PER_BUFFER 0x4000 294#define EFT_BYTES_PER_BUFFER 0x4000
256#define EFT_SIZE ((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS)) 295#define EFT_SIZE ((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS))
@@ -313,5 +352,6 @@ struct qla2xxx_fw_dump {
313 struct qla2300_fw_dump isp23; 352 struct qla2300_fw_dump isp23;
314 struct qla24xx_fw_dump isp24; 353 struct qla24xx_fw_dump isp24;
315 struct qla25xx_fw_dump isp25; 354 struct qla25xx_fw_dump isp25;
355 struct qla81xx_fw_dump isp81;
316 } isp; 356 } isp;
317}; 357};
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a29c95204975..023ee77fb027 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -187,7 +187,6 @@ struct req_que;
187 * SCSI Request Block 187 * SCSI Request Block
188 */ 188 */
189typedef struct srb { 189typedef struct srb {
190 struct scsi_qla_host *vha; /* HA the SP is queued on */
191 struct req_que *que; 190 struct req_que *que;
192 struct fc_port *fcport; 191 struct fc_port *fcport;
193 192
@@ -2136,7 +2135,6 @@ struct qla_msix_entry {
2136/* Work events. */ 2135/* Work events. */
2137enum qla_work_type { 2136enum qla_work_type {
2138 QLA_EVT_AEN, 2137 QLA_EVT_AEN,
2139 QLA_EVT_HWE_LOG,
2140}; 2138};
2141 2139
2142 2140
@@ -2151,10 +2149,6 @@ struct qla_work_evt {
2151 enum fc_host_event_code code; 2149 enum fc_host_event_code code;
2152 u32 data; 2150 u32 data;
2153 } aen; 2151 } aen;
2154 struct {
2155 uint16_t code;
2156 uint16_t d1, d2, d3;
2157 } hwe;
2158 } u; 2152 } u;
2159}; 2153};
2160 2154
@@ -2309,6 +2303,7 @@ struct qla_hw_data {
2309#define PORT_SPEED_2GB 0x01 2303#define PORT_SPEED_2GB 0x01
2310#define PORT_SPEED_4GB 0x03 2304#define PORT_SPEED_4GB 0x03
2311#define PORT_SPEED_8GB 0x04 2305#define PORT_SPEED_8GB 0x04
2306#define PORT_SPEED_10GB 0x13
2312 uint16_t link_data_rate; /* F/W operating speed */ 2307 uint16_t link_data_rate; /* F/W operating speed */
2313 2308
2314 uint8_t current_topology; 2309 uint8_t current_topology;
@@ -2328,6 +2323,7 @@ struct qla_hw_data {
2328 2323
2329#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532 2324#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
2330#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432 2325#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432
2326#define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001
2331 uint32_t device_type; 2327 uint32_t device_type;
2332#define DT_ISP2100 BIT_0 2328#define DT_ISP2100 BIT_0
2333#define DT_ISP2200 BIT_1 2329#define DT_ISP2200 BIT_1
@@ -2342,7 +2338,8 @@ struct qla_hw_data {
2342#define DT_ISP5432 BIT_10 2338#define DT_ISP5432 BIT_10
2343#define DT_ISP2532 BIT_11 2339#define DT_ISP2532 BIT_11
2344#define DT_ISP8432 BIT_12 2340#define DT_ISP8432 BIT_12
2345#define DT_ISP_LAST (DT_ISP8432 << 1) 2341#define DT_ISP8001 BIT_13
2342#define DT_ISP_LAST (DT_ISP8001 << 1)
2346 2343
2347#define DT_IIDMA BIT_26 2344#define DT_IIDMA BIT_26
2348#define DT_FWI2 BIT_27 2345#define DT_FWI2 BIT_27
@@ -2364,6 +2361,7 @@ struct qla_hw_data {
2364#define IS_QLA5432(ha) (DT_MASK(ha) & DT_ISP5432) 2361#define IS_QLA5432(ha) (DT_MASK(ha) & DT_ISP5432)
2365#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532) 2362#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532)
2366#define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432) 2363#define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432)
2364#define IS_QLA8001(ha) (DT_MASK(ha) & DT_ISP8001)
2367 2365
2368#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \ 2366#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
2369 IS_QLA6312(ha) || IS_QLA6322(ha)) 2367 IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -2373,8 +2371,11 @@ struct qla_hw_data {
2373#define IS_QLA84XX(ha) (IS_QLA8432(ha)) 2371#define IS_QLA84XX(ha) (IS_QLA8432(ha))
2374#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \ 2372#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
2375 IS_QLA84XX(ha)) 2373 IS_QLA84XX(ha))
2374#define IS_QLA81XX(ha) (IS_QLA8001(ha))
2376#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \ 2375#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
2377 IS_QLA25XX(ha)) 2376 IS_QLA25XX(ha) || IS_QLA81XX(ha))
2377#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \
2378 (ha)->flags.msix_enabled)
2378 2379
2379#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA) 2380#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA)
2380#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2) 2381#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2)
@@ -2472,6 +2473,9 @@ struct qla_hw_data {
2472 uint8_t fw_seriallink_options[4]; 2473 uint8_t fw_seriallink_options[4];
2473 uint16_t fw_seriallink_options24[4]; 2474 uint16_t fw_seriallink_options24[4];
2474 2475
2476 uint8_t mpi_version[4];
2477 uint32_t mpi_capabilities;
2478
2475 /* Firmware dump information. */ 2479 /* Firmware dump information. */
2476 struct qla2xxx_fw_dump *fw_dump; 2480 struct qla2xxx_fw_dump *fw_dump;
2477 uint32_t fw_dump_len; 2481 uint32_t fw_dump_len;
@@ -2480,6 +2484,7 @@ struct qla_hw_data {
2480 dma_addr_t eft_dma; 2484 dma_addr_t eft_dma;
2481 void *eft; 2485 void *eft;
2482 2486
2487 uint32_t chain_offset;
2483 struct dentry *dfs_dir; 2488 struct dentry *dfs_dir;
2484 struct dentry *dfs_fce; 2489 struct dentry *dfs_fce;
2485 dma_addr_t fce_dma; 2490 dma_addr_t fce_dma;
@@ -2489,10 +2494,6 @@ struct qla_hw_data {
2489 uint64_t fce_wr, fce_rd; 2494 uint64_t fce_wr, fce_rd;
2490 struct mutex fce_mutex; 2495 struct mutex fce_mutex;
2491 2496
2492 uint32_t hw_event_start;
2493 uint32_t hw_event_ptr;
2494 uint32_t hw_event_pause_errors;
2495
2496 uint32_t pci_attr; 2497 uint32_t pci_attr;
2497 uint16_t chip_revision; 2498 uint16_t chip_revision;
2498 2499
@@ -2522,6 +2523,12 @@ struct qla_hw_data {
2522 uint8_t fcode_revision[16]; 2523 uint8_t fcode_revision[16];
2523 uint32_t fw_revision[4]; 2524 uint32_t fw_revision[4];
2524 2525
2526 /* Offsets for flash/nvram access (set to ~0 if not used). */
2527 uint32_t flash_conf_off;
2528 uint32_t flash_data_off;
2529 uint32_t nvram_conf_off;
2530 uint32_t nvram_data_off;
2531
2525 uint32_t fdt_wrt_disable; 2532 uint32_t fdt_wrt_disable;
2526 uint32_t fdt_erase_cmd; 2533 uint32_t fdt_erase_cmd;
2527 uint32_t fdt_block_size; 2534 uint32_t fdt_block_size;
@@ -2533,7 +2540,6 @@ struct qla_hw_data {
2533 uint32_t flt_region_boot; 2540 uint32_t flt_region_boot;
2534 uint32_t flt_region_fw; 2541 uint32_t flt_region_fw;
2535 uint32_t flt_region_vpd_nvram; 2542 uint32_t flt_region_vpd_nvram;
2536 uint32_t flt_region_hw_event;
2537 uint32_t flt_region_npiv_conf; 2543 uint32_t flt_region_npiv_conf;
2538 2544
2539 /* Needed for BEACON */ 2545 /* Needed for BEACON */
@@ -2737,6 +2743,7 @@ typedef struct scsi_qla_host {
2737#define OPTROM_SIZE_2322 0x100000 2743#define OPTROM_SIZE_2322 0x100000
2738#define OPTROM_SIZE_24XX 0x100000 2744#define OPTROM_SIZE_24XX 0x100000
2739#define OPTROM_SIZE_25XX 0x200000 2745#define OPTROM_SIZE_25XX 0x200000
2746#define OPTROM_SIZE_81XX 0x400000
2740 2747
2741#include "qla_gbl.h" 2748#include "qla_gbl.h"
2742#include "qla_dbg.h" 2749#include "qla_dbg.h"
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 0e366a1b44b3..c66036da7d2b 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -113,7 +113,8 @@ int
113qla2x00_dfs_setup(scsi_qla_host_t *vha) 113qla2x00_dfs_setup(scsi_qla_host_t *vha)
114{ 114{
115 struct qla_hw_data *ha = vha->hw; 115 struct qla_hw_data *ha = vha->hw;
116 if (!IS_QLA25XX(ha)) 116
117 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
117 goto out; 118 goto out;
118 if (!ha->fce) 119 if (!ha->fce)
119 goto out; 120 goto out;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index ee1f1e794c2d..7abb045a0410 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1215,9 +1215,10 @@ struct qla_fdt_layout {
1215 1215
1216struct qla_flt_location { 1216struct qla_flt_location {
1217 uint8_t sig[4]; 1217 uint8_t sig[4];
1218 uint32_t start_lo; 1218 uint16_t start_lo;
1219 uint32_t start_hi; 1219 uint16_t start_hi;
1220 uint16_t unused; 1220 uint8_t version;
1221 uint8_t unused[5];
1221 uint16_t checksum; 1222 uint16_t checksum;
1222}; 1223};
1223 1224
@@ -1390,4 +1391,291 @@ struct access_chip_rsp_84xx {
1390 1391
1391 uint32_t reserved[12]; 1392 uint32_t reserved[12];
1392}; 1393};
1394
1395/* 81XX Support **************************************************************/
1396
1397#define MBA_DCBX_START 0x8016
1398#define MBA_DCBX_COMPLETE 0x8030
1399#define MBA_FCF_CONF_ERR 0x8031
1400#define MBA_DCBX_PARAM_UPDATE 0x8032
1401#define MBA_IDC_COMPLETE 0x8100
1402#define MBA_IDC_NOTIFY 0x8101
1403#define MBA_IDC_TIME_EXT 0x8102
1404
1405struct nvram_81xx {
1406 /* NVRAM header. */
1407 uint8_t id[4];
1408 uint16_t nvram_version;
1409 uint16_t reserved_0;
1410
1411 /* Firmware Initialization Control Block. */
1412 uint16_t version;
1413 uint16_t reserved_1;
1414 uint16_t frame_payload_size;
1415 uint16_t execution_throttle;
1416 uint16_t exchange_count;
1417 uint16_t reserved_2;
1418
1419 uint8_t port_name[WWN_SIZE];
1420 uint8_t node_name[WWN_SIZE];
1421
1422 uint16_t login_retry_count;
1423 uint16_t reserved_3;
1424 uint16_t interrupt_delay_timer;
1425 uint16_t login_timeout;
1426
1427 uint32_t firmware_options_1;
1428 uint32_t firmware_options_2;
1429 uint32_t firmware_options_3;
1430
1431 uint16_t reserved_4[4];
1432
1433 /* Offset 64. */
1434 uint8_t enode_mac[6];
1435 uint16_t reserved_5[5];
1436
1437 /* Offset 80. */
1438 uint16_t reserved_6[24];
1439
1440 /* Offset 128. */
1441 uint16_t reserved_7[64];
1442
1443 /*
1444 * BIT 0 = Enable spinup delay
1445 * BIT 1 = Disable BIOS
1446 * BIT 2 = Enable Memory Map BIOS
1447 * BIT 3 = Enable Selectable Boot
1448 * BIT 4 = Disable RISC code load
1449 * BIT 5 = Disable Serdes
1450 * BIT 6 = Opt boot mode
1451 * BIT 7 = Interrupt enable
1452 *
1453 * BIT 8 = EV Control enable
1454 * BIT 9 = Enable lip reset
1455 * BIT 10 = Enable lip full login
1456 * BIT 11 = Enable target reset
1457 * BIT 12 = Stop firmware
1458 * BIT 13 = Enable nodename option
1459 * BIT 14 = Default WWPN valid
1460 * BIT 15 = Enable alternate WWN
1461 *
1462 * BIT 16 = CLP LUN string
1463 * BIT 17 = CLP Target string
1464 * BIT 18 = CLP BIOS enable string
1465 * BIT 19 = CLP Serdes string
1466 * BIT 20 = CLP WWPN string
1467 * BIT 21 = CLP WWNN string
1468 * BIT 22 =
1469 * BIT 23 =
1470 * BIT 24 = Keep WWPN
1471 * BIT 25 = Temp WWPN
1472 * BIT 26-31 =
1473 */
1474 uint32_t host_p;
1475
1476 uint8_t alternate_port_name[WWN_SIZE];
1477 uint8_t alternate_node_name[WWN_SIZE];
1478
1479 uint8_t boot_port_name[WWN_SIZE];
1480 uint16_t boot_lun_number;
1481 uint16_t reserved_8;
1482
1483 uint8_t alt1_boot_port_name[WWN_SIZE];
1484 uint16_t alt1_boot_lun_number;
1485 uint16_t reserved_9;
1486
1487 uint8_t alt2_boot_port_name[WWN_SIZE];
1488 uint16_t alt2_boot_lun_number;
1489 uint16_t reserved_10;
1490
1491 uint8_t alt3_boot_port_name[WWN_SIZE];
1492 uint16_t alt3_boot_lun_number;
1493 uint16_t reserved_11;
1494
1495 /*
1496 * BIT 0 = Selective Login
1497 * BIT 1 = Alt-Boot Enable
1498 * BIT 2 = Reserved
1499 * BIT 3 = Boot Order List
1500 * BIT 4 = Reserved
1501 * BIT 5 = Selective LUN
1502 * BIT 6 = Reserved
1503 * BIT 7-31 =
1504 */
1505 uint32_t efi_parameters;
1506
1507 uint8_t reset_delay;
1508 uint8_t reserved_12;
1509 uint16_t reserved_13;
1510
1511 uint16_t boot_id_number;
1512 uint16_t reserved_14;
1513
1514 uint16_t max_luns_per_target;
1515 uint16_t reserved_15;
1516
1517 uint16_t port_down_retry_count;
1518 uint16_t link_down_timeout;
1519
1520 /* FCode parameters. */
1521 uint16_t fcode_parameter;
1522
1523 uint16_t reserved_16[3];
1524
1525 /* Offset 352. */
1526 uint8_t reserved_17[4];
1527 uint16_t reserved_18[5];
1528 uint8_t reserved_19[2];
1529 uint16_t reserved_20[8];
1530
1531 /* Offset 384. */
1532 uint8_t reserved_21[16];
1533 uint16_t reserved_22[8];
1534
1535 /* Offset 416. */
1536 uint16_t reserved_23[32];
1537
1538 /* Offset 480. */
1539 uint8_t model_name[16];
1540
1541 /* Offset 496. */
1542 uint16_t feature_mask_l;
1543 uint16_t feature_mask_h;
1544 uint16_t reserved_24[2];
1545
1546 uint16_t subsystem_vendor_id;
1547 uint16_t subsystem_device_id;
1548
1549 uint32_t checksum;
1550};
1551
1552/*
1553 * ISP Initialization Control Block.
1554 * Little endian except where noted.
1555 */
1556#define ICB_VERSION 1
1557struct init_cb_81xx {
1558 uint16_t version;
1559 uint16_t reserved_1;
1560
1561 uint16_t frame_payload_size;
1562 uint16_t execution_throttle;
1563 uint16_t exchange_count;
1564
1565 uint16_t reserved_2;
1566
1567 uint8_t port_name[WWN_SIZE]; /* Big endian. */
1568 uint8_t node_name[WWN_SIZE]; /* Big endian. */
1569
1570 uint16_t response_q_inpointer;
1571 uint16_t request_q_outpointer;
1572
1573 uint16_t login_retry_count;
1574
1575 uint16_t prio_request_q_outpointer;
1576
1577 uint16_t response_q_length;
1578 uint16_t request_q_length;
1579
1580 uint16_t reserved_3;
1581
1582 uint16_t prio_request_q_length;
1583
1584 uint32_t request_q_address[2];
1585 uint32_t response_q_address[2];
1586 uint32_t prio_request_q_address[2];
1587
1588 uint8_t reserved_4[8];
1589
1590 uint16_t atio_q_inpointer;
1591 uint16_t atio_q_length;
1592 uint32_t atio_q_address[2];
1593
1594 uint16_t interrupt_delay_timer; /* 100us increments. */
1595 uint16_t login_timeout;
1596
1597 /*
1598 * BIT 0-3 = Reserved
1599 * BIT 4 = Enable Target Mode
1600 * BIT 5 = Disable Initiator Mode
1601 * BIT 6 = Reserved
1602 * BIT 7 = Reserved
1603 *
1604 * BIT 8-13 = Reserved
1605 * BIT 14 = Node Name Option
1606 * BIT 15-31 = Reserved
1607 */
1608 uint32_t firmware_options_1;
1609
1610 /*
1611 * BIT 0 = Operation Mode bit 0
1612 * BIT 1 = Operation Mode bit 1
1613 * BIT 2 = Operation Mode bit 2
1614 * BIT 3 = Operation Mode bit 3
1615 * BIT 4-7 = Reserved
1616 *
1617 * BIT 8 = Enable Class 2
1618 * BIT 9 = Enable ACK0
1619 * BIT 10 = Reserved
1620 * BIT 11 = Enable FC-SP Security
1621 * BIT 12 = FC Tape Enable
1622 * BIT 13 = Reserved
1623 * BIT 14 = Enable Target PRLI Control
1624 * BIT 15-31 = Reserved
1625 */
1626 uint32_t firmware_options_2;
1627
1628 /*
1629 * BIT 0-3 = Reserved
1630 * BIT 4 = FCP RSP Payload bit 0
1631 * BIT 5 = FCP RSP Payload bit 1
1632 * BIT 6 = Enable Receive Out-of-Order data frame handling
1633 * BIT 7 = Reserved
1634 *
1635 * BIT 8 = Reserved
1636 * BIT 9 = Enable Out-of-Order FCP_XFER_RDY relative offset handling
1637 * BIT 10-16 = Reserved
1638 * BIT 17 = Enable multiple FCFs
1639 * BIT 18-20 = MAC addressing mode
1640 * BIT 21-25 = Ethernet data rate
1641 * BIT 26 = Enable ethernet header rx IOCB for ATIO q
1642 * BIT 27 = Enable ethernet header rx IOCB for response q
1643 * BIT 28 = SPMA selection bit 0
1644 * BIT 28 = SPMA selection bit 1
1645 * BIT 30-31 = Reserved
1646 */
1647 uint32_t firmware_options_3;
1648
1649 uint8_t reserved_5[8];
1650
1651 uint8_t enode_mac[6];
1652
1653 uint8_t reserved_6[10];
1654};
1655
1656struct mid_init_cb_81xx {
1657 struct init_cb_81xx init_cb;
1658
1659 uint16_t count;
1660 uint16_t options;
1661
1662 struct mid_conf_entry_24xx entries[MAX_MULTI_ID_FABRIC];
1663};
1664
1665#define FARX_ACCESS_FLASH_CONF_81XX 0x7FFD0000
1666#define FARX_ACCESS_FLASH_DATA_81XX 0x7F800000
1667
1668/* 81XX Flash locations -- occupies second 2MB region. */
1669#define FA_BOOT_CODE_ADDR_81 0x80000
1670#define FA_RISC_CODE_ADDR_81 0xA0000
1671#define FA_FW_AREA_ADDR_81 0xC0000
1672#define FA_VPD_NVRAM_ADDR_81 0xD0000
1673#define FA_FEATURE_ADDR_81 0xD4000
1674#define FA_FLASH_DESCR_ADDR_81 0xD8000
1675#define FA_FLASH_LAYOUT_ADDR_81 0xD8400
1676#define FA_HW_EVENT0_ADDR_81 0xDC000
1677#define FA_HW_EVENT1_ADDR_81 0xDC400
1678#define FA_NPIV_CONF0_ADDR_81 0xD1000
1679#define FA_NPIV_CONF1_ADDR_81 0xD2000
1680
1393#endif 1681#endif
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 0011e31205db..ba4913353752 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -28,8 +28,10 @@ extern void qla2x00_reset_adapter(struct scsi_qla_host *);
28extern void qla24xx_reset_adapter(struct scsi_qla_host *); 28extern void qla24xx_reset_adapter(struct scsi_qla_host *);
29extern int qla2x00_nvram_config(struct scsi_qla_host *); 29extern int qla2x00_nvram_config(struct scsi_qla_host *);
30extern int qla24xx_nvram_config(struct scsi_qla_host *); 30extern int qla24xx_nvram_config(struct scsi_qla_host *);
31extern int qla81xx_nvram_config(struct scsi_qla_host *);
31extern void qla2x00_update_fw_options(struct scsi_qla_host *); 32extern void qla2x00_update_fw_options(struct scsi_qla_host *);
32extern void qla24xx_update_fw_options(scsi_qla_host_t *); 33extern void qla24xx_update_fw_options(scsi_qla_host_t *);
34extern void qla81xx_update_fw_options(scsi_qla_host_t *);
33extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *); 35extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *);
34extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *); 36extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *);
35 37
@@ -69,8 +71,6 @@ extern int qla2x00_loop_reset(scsi_qla_host_t *);
69extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 71extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
70extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum 72extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum
71 fc_host_event_code, u32); 73 fc_host_event_code, u32);
72extern int qla2x00_post_hwe_work(struct scsi_qla_host *, uint16_t , uint16_t,
73 uint16_t, uint16_t);
74 74
75extern void qla2x00_abort_fcport_cmds(fc_port_t *); 75extern void qla2x00_abort_fcport_cmds(fc_port_t *);
76extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *, 76extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *,
@@ -143,7 +143,7 @@ qla2x00_execute_fw(scsi_qla_host_t *, uint32_t);
143 143
144extern void 144extern void
145qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *, 145qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *,
146 uint16_t *, uint16_t *, uint16_t *, uint32_t *); 146 uint16_t *, uint16_t *, uint16_t *, uint32_t *, uint8_t *, uint32_t *);
147 147
148extern int 148extern int
149qla2x00_get_fw_options(scsi_qla_host_t *, uint16_t *); 149qla2x00_get_fw_options(scsi_qla_host_t *, uint16_t *);
@@ -317,9 +317,6 @@ extern uint8_t *qla25xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
317extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *); 317extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *);
318extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *); 318extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *);
319 319
320extern int qla2xxx_hw_event_log(scsi_qla_host_t *, uint16_t , uint16_t,
321 uint16_t, uint16_t);
322
323extern int qla2xxx_get_flash_info(scsi_qla_host_t *); 320extern int qla2xxx_get_flash_info(scsi_qla_host_t *);
324extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t); 321extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
325 322
@@ -332,6 +329,7 @@ extern void qla2100_fw_dump(scsi_qla_host_t *, int);
332extern void qla2300_fw_dump(scsi_qla_host_t *, int); 329extern void qla2300_fw_dump(scsi_qla_host_t *, int);
333extern void qla24xx_fw_dump(scsi_qla_host_t *, int); 330extern void qla24xx_fw_dump(scsi_qla_host_t *, int);
334extern void qla25xx_fw_dump(scsi_qla_host_t *, int); 331extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
332extern void qla81xx_fw_dump(scsi_qla_host_t *, int);
335extern void qla2x00_dump_regs(scsi_qla_host_t *); 333extern void qla2x00_dump_regs(scsi_qla_host_t *);
336extern void qla2x00_dump_buffer(uint8_t *, uint32_t); 334extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
337 335
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 0a6f72973996..557f58d5bf88 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1535,7 +1535,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1535 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1535 eiter = (struct ct_fdmi_port_attr *) (entries + size);
1536 eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED); 1536 eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
1537 eiter->len = __constant_cpu_to_be16(4 + 4); 1537 eiter->len = __constant_cpu_to_be16(4 + 4);
1538 if (IS_QLA25XX(ha)) 1538 if (IS_QLA81XX(ha))
1539 eiter->a.sup_speed = __constant_cpu_to_be32(
1540 FDMI_PORT_SPEED_10GB);
1541 else if (IS_QLA25XX(ha))
1539 eiter->a.sup_speed = __constant_cpu_to_be32( 1542 eiter->a.sup_speed = __constant_cpu_to_be32(
1540 FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB| 1543 FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
1541 FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_8GB); 1544 FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_8GB);
@@ -1575,6 +1578,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1575 eiter->a.cur_speed = 1578 eiter->a.cur_speed =
1576 __constant_cpu_to_be32(FDMI_PORT_SPEED_8GB); 1579 __constant_cpu_to_be32(FDMI_PORT_SPEED_8GB);
1577 break; 1580 break;
1581 case PORT_SPEED_10GB:
1582 eiter->a.cur_speed =
1583 __constant_cpu_to_be32(FDMI_PORT_SPEED_10GB);
1584 break;
1578 default: 1585 default:
1579 eiter->a.cur_speed = 1586 eiter->a.cur_speed =
1580 __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN); 1587 __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 52ed56ecf195..2d4f32b4df5c 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -552,10 +552,6 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
552 d2 = RD_REG_DWORD(&reg->ctrl_status); 552 d2 = RD_REG_DWORD(&reg->ctrl_status);
553 barrier(); 553 barrier();
554 } 554 }
555 if (cnt == 0 || hw_evt)
556 qla2xxx_hw_event_log(vha, HW_EVENT_RESET_ERR,
557 RD_REG_WORD(&reg->mailbox1), RD_REG_WORD(&reg->mailbox2),
558 RD_REG_WORD(&reg->mailbox3));
559 555
560 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET); 556 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
561 RD_REG_DWORD(&reg->hccr); 557 RD_REG_DWORD(&reg->hccr);
@@ -574,6 +570,9 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
574 } 570 }
575 571
576 spin_unlock_irqrestore(&ha->hardware_lock, flags); 572 spin_unlock_irqrestore(&ha->hardware_lock, flags);
573
574 if (IS_NOPOLLING_TYPE(ha))
575 ha->isp_ops->enable_intrs(ha);
577} 576}
578 577
579/** 578/**
@@ -779,16 +778,19 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
779 mem_size = (ha->fw_memory_size - 0x11000 + 1) * 778 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
780 sizeof(uint16_t); 779 sizeof(uint16_t);
781 } else if (IS_FWI2_CAPABLE(ha)) { 780 } else if (IS_FWI2_CAPABLE(ha)) {
782 fixed_size = IS_QLA25XX(ha) ? 781 if (IS_QLA81XX(ha))
783 offsetof(struct qla25xx_fw_dump, ext_mem) : 782 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
784 offsetof(struct qla24xx_fw_dump, ext_mem); 783 else if (IS_QLA25XX(ha))
784 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
785 else
786 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
785 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 787 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
786 sizeof(uint32_t); 788 sizeof(uint32_t);
787 if (ha->mqenable) 789 if (ha->mqenable)
788 mq_size = sizeof(struct qla2xxx_mq_chain); 790 mq_size = sizeof(struct qla2xxx_mq_chain);
789 791
790 /* Allocate memory for Fibre Channel Event Buffer. */ 792 /* Allocate memory for Fibre Channel Event Buffer. */
791 if (!IS_QLA25XX(ha)) 793 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
792 goto try_eft; 794 goto try_eft;
793 795
794 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 796 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
@@ -851,7 +853,9 @@ cont_alloc:
851 853
852 dump_size = offsetof(struct qla2xxx_fw_dump, isp); 854 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
853 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + 855 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size +
854 mq_size + eft_size + fce_size; 856 eft_size;
857 ha->chain_offset = dump_size;
858 dump_size += mq_size + fce_size;
855 859
856 ha->fw_dump = vmalloc(dump_size); 860 ha->fw_dump = vmalloc(dump_size);
857 if (!ha->fw_dump) { 861 if (!ha->fw_dump) {
@@ -987,7 +991,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
987 &ha->fw_major_version, 991 &ha->fw_major_version,
988 &ha->fw_minor_version, 992 &ha->fw_minor_version,
989 &ha->fw_subminor_version, 993 &ha->fw_subminor_version,
990 &ha->fw_attributes, &ha->fw_memory_size); 994 &ha->fw_attributes, &ha->fw_memory_size,
995 ha->mpi_version, &ha->mpi_capabilities);
991 ha->flags.npiv_supported = 0; 996 ha->flags.npiv_supported = 0;
992 if (IS_QLA2XXX_MIDTYPE(ha) && 997 if (IS_QLA2XXX_MIDTYPE(ha) &&
993 (ha->fw_attributes & BIT_2)) { 998 (ha->fw_attributes & BIT_2)) {
@@ -1665,10 +1670,6 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
1665 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet " 1670 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
1666 "invalid -- WWPN) defaults.\n"); 1671 "invalid -- WWPN) defaults.\n");
1667 1672
1668 if (chksum)
1669 qla2xxx_hw_event_log(vha, HW_EVENT_NVRAM_CHKSUM_ERR, 0,
1670 MSW(chksum), LSW(chksum));
1671
1672 /* 1673 /*
1673 * Set default initialization control block. 1674 * Set default initialization control block.
1674 */ 1675 */
@@ -4255,3 +4256,269 @@ qla84xx_init_chip(scsi_qla_host_t *vha)
4255 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED: 4256 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED:
4256 QLA_SUCCESS; 4257 QLA_SUCCESS;
4257} 4258}
4259
4260/* 81XX Support **************************************************************/
4261
4262int
4263qla81xx_nvram_config(scsi_qla_host_t *vha)
4264{
4265 int rval;
4266 struct init_cb_81xx *icb;
4267 struct nvram_81xx *nv;
4268 uint32_t *dptr;
4269 uint8_t *dptr1, *dptr2;
4270 uint32_t chksum;
4271 uint16_t cnt;
4272 struct qla_hw_data *ha = vha->hw;
4273
4274 rval = QLA_SUCCESS;
4275 icb = (struct init_cb_81xx *)ha->init_cb;
4276 nv = ha->nvram;
4277
4278 /* Determine NVRAM starting address. */
4279 ha->nvram_size = sizeof(struct nvram_81xx);
4280 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
4281 ha->vpd_size = FA_NVRAM_VPD_SIZE;
4282 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
4283 if (PCI_FUNC(ha->pdev->devfn) & 1) {
4284 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
4285 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
4286 }
4287
4288 /* Get VPD data into cache */
4289 ha->vpd = ha->nvram + VPD_OFFSET;
4290 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
4291 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
4292
4293 /* Get NVRAM data into cache and calculate checksum. */
4294 dptr = (uint32_t *)nv;
4295 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
4296 ha->nvram_size);
4297 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
4298 chksum += le32_to_cpu(*dptr++);
4299
4300 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no));
4301 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
4302
4303 /* Bad NVRAM data, set defaults parameters. */
4304 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
4305 || nv->id[3] != ' ' ||
4306 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
4307 /* Reset NVRAM data. */
4308 qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
4309 "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
4310 le16_to_cpu(nv->nvram_version));
4311 qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
4312 "invalid -- WWPN) defaults.\n");
4313
4314 /*
4315 * Set default initialization control block.
4316 */
4317 memset(nv, 0, ha->nvram_size);
4318 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
4319 nv->version = __constant_cpu_to_le16(ICB_VERSION);
4320 nv->frame_payload_size = __constant_cpu_to_le16(2048);
4321 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4322 nv->exchange_count = __constant_cpu_to_le16(0);
4323 nv->port_name[0] = 0x21;
4324 nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn);
4325 nv->port_name[2] = 0x00;
4326 nv->port_name[3] = 0xe0;
4327 nv->port_name[4] = 0x8b;
4328 nv->port_name[5] = 0x1c;
4329 nv->port_name[6] = 0x55;
4330 nv->port_name[7] = 0x86;
4331 nv->node_name[0] = 0x20;
4332 nv->node_name[1] = 0x00;
4333 nv->node_name[2] = 0x00;
4334 nv->node_name[3] = 0xe0;
4335 nv->node_name[4] = 0x8b;
4336 nv->node_name[5] = 0x1c;
4337 nv->node_name[6] = 0x55;
4338 nv->node_name[7] = 0x86;
4339 nv->login_retry_count = __constant_cpu_to_le16(8);
4340 nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
4341 nv->login_timeout = __constant_cpu_to_le16(0);
4342 nv->firmware_options_1 =
4343 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
4344 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
4345 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
4346 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
4347 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
4348 nv->efi_parameters = __constant_cpu_to_le32(0);
4349 nv->reset_delay = 5;
4350 nv->max_luns_per_target = __constant_cpu_to_le16(128);
4351 nv->port_down_retry_count = __constant_cpu_to_le16(30);
4352 nv->link_down_timeout = __constant_cpu_to_le16(30);
4353 nv->enode_mac[0] = 0x01;
4354 nv->enode_mac[1] = 0x02;
4355 nv->enode_mac[2] = 0x03;
4356 nv->enode_mac[3] = 0x04;
4357 nv->enode_mac[4] = 0x05;
4358 nv->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn);
4359
4360 rval = 1;
4361 }
4362
4363 /* Reset Initialization control block */
4364 memset(icb, 0, sizeof(struct init_cb_81xx));
4365
4366 /* Copy 1st segment. */
4367 dptr1 = (uint8_t *)icb;
4368 dptr2 = (uint8_t *)&nv->version;
4369 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
4370 while (cnt--)
4371 *dptr1++ = *dptr2++;
4372
4373 icb->login_retry_count = nv->login_retry_count;
4374
4375 /* Copy 2nd segment. */
4376 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
4377 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
4378 cnt = (uint8_t *)&icb->reserved_5 -
4379 (uint8_t *)&icb->interrupt_delay_timer;
4380 while (cnt--)
4381 *dptr1++ = *dptr2++;
4382
4383 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
4384 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
4385 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
4386 icb->enode_mac[0] = 0x01;
4387 icb->enode_mac[1] = 0x02;
4388 icb->enode_mac[2] = 0x03;
4389 icb->enode_mac[3] = 0x04;
4390 icb->enode_mac[4] = 0x05;
4391 icb->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn);
4392 }
4393
4394 /*
4395 * Setup driver NVRAM options.
4396 */
4397 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
4398 "QLE81XX");
4399
4400 /* Use alternate WWN? */
4401 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
4402 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
4403 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
4404 }
4405
4406 /* Prepare nodename */
4407 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
4408 /*
4409 * Firmware will apply the following mask if the nodename was
4410 * not provided.
4411 */
4412 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
4413 icb->node_name[0] &= 0xF0;
4414 }
4415
4416 /* Set host adapter parameters. */
4417 ha->flags.disable_risc_code_load = 0;
4418 ha->flags.enable_lip_reset = 0;
4419 ha->flags.enable_lip_full_login =
4420 le32_to_cpu(nv->host_p) & BIT_10 ? 1: 0;
4421 ha->flags.enable_target_reset =
4422 le32_to_cpu(nv->host_p) & BIT_11 ? 1: 0;
4423 ha->flags.enable_led_scheme = 0;
4424 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1: 0;
4425
4426 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
4427 (BIT_6 | BIT_5 | BIT_4)) >> 4;
4428
4429 /* save HBA serial number */
4430 ha->serial0 = icb->port_name[5];
4431 ha->serial1 = icb->port_name[6];
4432 ha->serial2 = icb->port_name[7];
4433 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
4434 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
4435
4436 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4437
4438 ha->retry_count = le16_to_cpu(nv->login_retry_count);
4439
4440 /* Set minimum login_timeout to 4 seconds. */
4441 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
4442 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
4443 if (le16_to_cpu(nv->login_timeout) < 4)
4444 nv->login_timeout = __constant_cpu_to_le16(4);
4445 ha->login_timeout = le16_to_cpu(nv->login_timeout);
4446 icb->login_timeout = nv->login_timeout;
4447
4448 /* Set minimum RATOV to 100 tenths of a second. */
4449 ha->r_a_tov = 100;
4450
4451 ha->loop_reset_delay = nv->reset_delay;
4452
4453 /* Link Down Timeout = 0:
4454 *
4455 * When Port Down timer expires we will start returning
4456 * I/O's to OS with "DID_NO_CONNECT".
4457 *
4458 * Link Down Timeout != 0:
4459 *
4460 * The driver waits for the link to come up after link down
4461 * before returning I/Os to OS with "DID_NO_CONNECT".
4462 */
4463 if (le16_to_cpu(nv->link_down_timeout) == 0) {
4464 ha->loop_down_abort_time =
4465 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
4466 } else {
4467 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
4468 ha->loop_down_abort_time =
4469 (LOOP_DOWN_TIME - ha->link_down_timeout);
4470 }
4471
4472 /* Need enough time to try and get the port back. */
4473 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
4474 if (qlport_down_retry)
4475 ha->port_down_retry_count = qlport_down_retry;
4476
4477 /* Set login_retry_count */
4478 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
4479 if (ha->port_down_retry_count ==
4480 le16_to_cpu(nv->port_down_retry_count) &&
4481 ha->port_down_retry_count > 3)
4482 ha->login_retry_count = ha->port_down_retry_count;
4483 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
4484 ha->login_retry_count = ha->port_down_retry_count;
4485 if (ql2xloginretrycount)
4486 ha->login_retry_count = ql2xloginretrycount;
4487
4488 /* Enable ZIO. */
4489 if (!vha->flags.init_done) {
4490 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
4491 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4492 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
4493 le16_to_cpu(icb->interrupt_delay_timer): 2;
4494 }
4495 icb->firmware_options_2 &= __constant_cpu_to_le32(
4496 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
4497 vha->flags.process_response_queue = 0;
4498 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4499 ha->zio_mode = QLA_ZIO_MODE_6;
4500
4501 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
4502 "(%d us).\n", vha->host_no, ha->zio_mode,
4503 ha->zio_timer * 100));
4504 qla_printk(KERN_INFO, ha,
4505 "ZIO mode %d enabled; timer delay (%d us).\n",
4506 ha->zio_mode, ha->zio_timer * 100);
4507
4508 icb->firmware_options_2 |= cpu_to_le32(
4509 (uint32_t)ha->zio_mode);
4510 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
4511 vha->flags.process_response_queue = 1;
4512 }
4513
4514 if (rval) {
4515 DEBUG2_3(printk(KERN_WARNING
4516 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
4517 }
4518 return (rval);
4519}
4520
4521void
4522qla81xx_update_fw_options(scsi_qla_host_t *ha)
4523{
4524}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 5bedc9d05942..2258152b1f41 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -173,7 +173,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
173 return; 173 return;
174 } 174 }
175 175
176 vha = sp->vha; 176 vha = sp->fcport->vha;
177 req = sp->que; 177 req = sp->que;
178 178
179 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 179 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
@@ -234,7 +234,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
234 return; 234 return;
235 } 235 }
236 236
237 vha = sp->vha; 237 vha = sp->fcport->vha;
238 req = sp->que; 238 req = sp->que;
239 239
240 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 240 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
@@ -294,7 +294,7 @@ qla2x00_start_scsi(srb_t *sp)
294 294
295 /* Setup device pointers. */ 295 /* Setup device pointers. */
296 ret = 0; 296 ret = 0;
297 vha = sp->vha; 297 vha = sp->fcport->vha;
298 ha = vha->hw; 298 ha = vha->hw;
299 reg = &ha->iobase->isp; 299 reg = &ha->iobase->isp;
300 cmd = sp->cmd; 300 cmd = sp->cmd;
@@ -353,7 +353,6 @@ qla2x00_start_scsi(srb_t *sp)
353 /* Build command packet */ 353 /* Build command packet */
354 req->current_outstanding_cmd = handle; 354 req->current_outstanding_cmd = handle;
355 req->outstanding_cmds[handle] = sp; 355 req->outstanding_cmds[handle] = sp;
356 sp->vha = vha;
357 sp->que = req; 356 sp->que = req;
358 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 357 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
359 req->cnt -= req_cnt; 358 req->cnt -= req_cnt;
@@ -656,7 +655,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
656 return; 655 return;
657 } 656 }
658 657
659 vha = sp->vha; 658 vha = sp->fcport->vha;
660 req = sp->que; 659 req = sp->que;
661 660
662 /* Set transfer direction */ 661 /* Set transfer direction */
@@ -723,7 +722,7 @@ qla24xx_start_scsi(srb_t *sp)
723 struct req_que *req = NULL; 722 struct req_que *req = NULL;
724 struct rsp_que *rsp = NULL; 723 struct rsp_que *rsp = NULL;
725 struct scsi_cmnd *cmd = sp->cmd; 724 struct scsi_cmnd *cmd = sp->cmd;
726 struct scsi_qla_host *vha = sp->vha; 725 struct scsi_qla_host *vha = sp->fcport->vha;
727 struct qla_hw_data *ha = vha->hw; 726 struct qla_hw_data *ha = vha->hw;
728 uint16_t que_id; 727 uint16_t que_id;
729 728
@@ -791,7 +790,6 @@ qla24xx_start_scsi(srb_t *sp)
791 /* Build command packet. */ 790 /* Build command packet. */
792 req->current_outstanding_cmd = handle; 791 req->current_outstanding_cmd = handle;
793 req->outstanding_cmds[handle] = sp; 792 req->outstanding_cmds[handle] = sp;
794 sp->vha = vha;
795 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 793 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
796 req->cnt -= req_cnt; 794 req->cnt -= req_cnt;
797 795
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index d5fb79a88001..789fc576f222 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -275,7 +275,7 @@ void
275qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 275qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
276{ 276{
277#define LS_UNKNOWN 2 277#define LS_UNKNOWN 2
278 static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; 278 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
279 char *link_speed; 279 char *link_speed;
280 uint16_t handle_cnt; 280 uint16_t handle_cnt;
281 uint16_t cnt; 281 uint16_t cnt;
@@ -288,6 +288,8 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
288 288
289 /* Setup to process RIO completion. */ 289 /* Setup to process RIO completion. */
290 handle_cnt = 0; 290 handle_cnt = 0;
291 if (IS_QLA81XX(ha))
292 goto skip_rio;
291 switch (mb[0]) { 293 switch (mb[0]) {
292 case MBA_SCSI_COMPLETION: 294 case MBA_SCSI_COMPLETION:
293 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 295 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
@@ -339,7 +341,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
339 default: 341 default:
340 break; 342 break;
341 } 343 }
342 344skip_rio:
343 switch (mb[0]) { 345 switch (mb[0]) {
344 case MBA_SCSI_COMPLETION: /* Fast Post */ 346 case MBA_SCSI_COMPLETION: /* Fast Post */
345 if (!vha->flags.online) 347 if (!vha->flags.online)
@@ -362,7 +364,6 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
362 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n", 364 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
363 mb[1], mb[2], mb[3]); 365 mb[1], mb[2], mb[3]);
364 366
365 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
366 ha->isp_ops->fw_dump(vha, 1); 367 ha->isp_ops->fw_dump(vha, 1);
367 368
368 if (IS_FWI2_CAPABLE(ha)) { 369 if (IS_FWI2_CAPABLE(ha)) {
@@ -387,7 +388,6 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
387 vha->host_no)); 388 vha->host_no));
388 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n"); 389 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
389 390
390 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
391 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 391 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
392 break; 392 break;
393 393
@@ -396,7 +396,6 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
396 vha->host_no)); 396 vha->host_no));
397 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n"); 397 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
398 398
399 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
400 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 399 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
401 break; 400 break;
402 401
@@ -436,6 +435,8 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
436 link_speed = link_speeds[LS_UNKNOWN]; 435 link_speed = link_speeds[LS_UNKNOWN];
437 if (mb[1] < 5) 436 if (mb[1] < 5)
438 link_speed = link_speeds[mb[1]]; 437 link_speed = link_speeds[mb[1]];
438 else if (mb[1] == 0x13)
439 link_speed = link_speeds[5];
439 ha->link_data_rate = mb[1]; 440 ha->link_data_rate = mb[1];
440 } 441 }
441 442
@@ -495,12 +496,17 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
495 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 496 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
496 break; 497 break;
497 498
499 /* case MBA_DCBX_COMPLETE: */
498 case MBA_POINT_TO_POINT: /* Point-to-Point */ 500 case MBA_POINT_TO_POINT: /* Point-to-Point */
499 if (IS_QLA2100(ha)) 501 if (IS_QLA2100(ha))
500 break; 502 break;
501 503
502 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n", 504 if (IS_QLA81XX(ha))
503 vha->host_no)); 505 DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x "
506 "%04x\n", vha->host_no, mb[1], mb[2], mb[3]));
507 else
508 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE "
509 "received.\n", vha->host_no));
504 510
505 /* 511 /*
506 * Until there's a transition from loop down to loop up, treat 512 * Until there's a transition from loop down to loop up, treat
@@ -641,10 +647,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
641 647
642 /* case MBA_RIO_RESPONSE: */ 648 /* case MBA_RIO_RESPONSE: */
643 case MBA_ZIO_RESPONSE: 649 case MBA_ZIO_RESPONSE:
644 DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n", 650 DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n",
645 vha->host_no));
646 DEBUG(printk(KERN_INFO
647 "scsi(%ld): [R|Z]IO update completion.\n",
648 vha->host_no)); 651 vha->host_no));
649 652
650 if (IS_FWI2_CAPABLE(ha)) 653 if (IS_FWI2_CAPABLE(ha))
@@ -698,6 +701,35 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
698 } 701 }
699 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 702 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
700 break; 703 break;
704 case MBA_DCBX_START:
705 DEBUG2(printk("scsi(%ld): DCBX Started -- %04x %04x %04x\n",
706 vha->host_no, mb[1], mb[2], mb[3]));
707 break;
708 case MBA_DCBX_PARAM_UPDATE:
709 DEBUG2(printk("scsi(%ld): DCBX Parameters Updated -- "
710 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
711 break;
712 case MBA_FCF_CONF_ERR:
713 DEBUG2(printk("scsi(%ld): FCF Configuration Error -- "
714 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
715 break;
716 case MBA_IDC_COMPLETE:
717 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation "
718 "Complete -- %04x %04x %04x\n", vha->host_no, mb[1], mb[2],
719 mb[3]));
720 break;
721 case MBA_IDC_NOTIFY:
722 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation "
723 "Request Notification -- %04x %04x %04x\n", vha->host_no,
724 mb[1], mb[2], mb[3]));
725 /**** Mailbox registers 4 - 7 valid!!! */
726 break;
727 case MBA_IDC_TIME_EXT:
728 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation "
729 "Time Extension -- %04x %04x %04x\n", vha->host_no, mb[1],
730 mb[2], mb[3]));
731 /**** Mailbox registers 4 - 7 valid!!! */
732 break;
701 } 733 }
702 734
703 if (!vha->vp_idx && ha->num_vhosts) 735 if (!vha->vp_idx && ha->num_vhosts)
@@ -1510,7 +1542,7 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
1510 struct qla_hw_data *ha = vha->hw; 1542 struct qla_hw_data *ha = vha->hw;
1511 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1543 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1512 1544
1513 if (!IS_QLA25XX(ha)) 1545 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
1514 return; 1546 return;
1515 1547
1516 rval = QLA_SUCCESS; 1548 rval = QLA_SUCCESS;
@@ -1590,12 +1622,6 @@ qla24xx_intr_handler(int irq, void *dev_id)
1590 if (pci_channel_offline(ha->pdev)) 1622 if (pci_channel_offline(ha->pdev))
1591 break; 1623 break;
1592 1624
1593 if (ha->hw_event_pause_errors == 0)
1594 qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
1595 0, MSW(stat), LSW(stat));
1596 else if (ha->hw_event_pause_errors < 0xffffffff)
1597 ha->hw_event_pause_errors++;
1598
1599 hccr = RD_REG_DWORD(&reg->hccr); 1625 hccr = RD_REG_DWORD(&reg->hccr);
1600 1626
1601 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 1627 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
@@ -1740,12 +1766,6 @@ qla24xx_msix_default(int irq, void *dev_id)
1740 if (pci_channel_offline(ha->pdev)) 1766 if (pci_channel_offline(ha->pdev))
1741 break; 1767 break;
1742 1768
1743 if (ha->hw_event_pause_errors == 0)
1744 qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
1745 0, MSW(stat), LSW(stat));
1746 else if (ha->hw_event_pause_errors < 0xffffffff)
1747 ha->hw_event_pause_errors++;
1748
1749 hccr = RD_REG_DWORD(&reg->hccr); 1769 hccr = RD_REG_DWORD(&reg->hccr);
1750 1770
1751 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 1771 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
@@ -1944,7 +1964,8 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
1944 device_reg_t __iomem *reg = ha->iobase; 1964 device_reg_t __iomem *reg = ha->iobase;
1945 1965
1946 /* If possible, enable MSI-X. */ 1966 /* If possible, enable MSI-X. */
1947 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha)) 1967 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
1968 !IS_QLA8432(ha) && !IS_QLA8001(ha))
1948 goto skip_msix; 1969 goto skip_msix;
1949 1970
1950 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX || 1971 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
@@ -1979,7 +2000,8 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
1979 "MSI-X: Falling back-to INTa mode -- %d.\n", ret); 2000 "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
1980skip_msix: 2001skip_msix:
1981 2002
1982 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha)) 2003 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2004 !IS_QLA8001(ha))
1983 goto skip_msi; 2005 goto skip_msi;
1984 2006
1985 ret = pci_enable_msi(ha->pdev); 2007 ret = pci_enable_msi(ha->pdev);
@@ -2000,6 +2022,12 @@ skip_msi:
2000 ha->flags.inta_enabled = 1; 2022 ha->flags.inta_enabled = 1;
2001clear_risc_ints: 2023clear_risc_ints:
2002 2024
2025 /*
2026 * FIXME: Noted that 8014s were being dropped during NK testing.
2027 * Timing deltas during MSI-X/INTa transitions?
2028 */
2029 if (IS_QLA81XX(ha))
2030 goto fail;
2003 spin_lock_irq(&ha->hardware_lock); 2031 spin_lock_irq(&ha->hardware_lock);
2004 if (IS_FWI2_CAPABLE(ha)) { 2032 if (IS_FWI2_CAPABLE(ha)) {
2005 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT); 2033 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
@@ -2044,7 +2072,7 @@ qla2x00_get_rsp_host(struct rsp_que *rsp)
2044 if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) { 2072 if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
2045 sp = req->outstanding_cmds[pkt->handle]; 2073 sp = req->outstanding_cmds[pkt->handle];
2046 if (sp) 2074 if (sp)
2047 vha = sp->vha; 2075 vha = sp->fcport->vha;
2048 } 2076 }
2049 } 2077 }
2050 if (!vha) 2078 if (!vha)
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index a99976f5fabd..db4df45234a5 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -123,8 +123,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
123 123
124 /* Wait for mbx cmd completion until timeout */ 124 /* Wait for mbx cmd completion until timeout */
125 125
126 if (!abort_active && io_lock_on) { 126 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
127
128 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 127 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
129 128
130 if (IS_FWI2_CAPABLE(ha)) 129 if (IS_FWI2_CAPABLE(ha))
@@ -218,7 +217,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
218 /* Clean up */ 217 /* Clean up */
219 ha->mcp = NULL; 218 ha->mcp = NULL;
220 219
221 if (abort_active || !io_lock_on) { 220 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
222 DEBUG11(printk("%s(%ld): checking for additional resp " 221 DEBUG11(printk("%s(%ld): checking for additional resp "
223 "interrupt.\n", __func__, base_vha->host_no)); 222 "interrupt.\n", __func__, base_vha->host_no));
224 223
@@ -412,7 +411,8 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
412 */ 411 */
413void 412void
414qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor, 413qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
415 uint16_t *subminor, uint16_t *attributes, uint32_t *memory) 414 uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi,
415 uint32_t *mpi_caps)
416{ 416{
417 int rval; 417 int rval;
418 mbx_cmd_t mc; 418 mbx_cmd_t mc;
@@ -423,6 +423,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
423 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 423 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
424 mcp->out_mb = MBX_0; 424 mcp->out_mb = MBX_0;
425 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 425 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
426 if (IS_QLA81XX(vha->hw))
427 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
426 mcp->flags = 0; 428 mcp->flags = 0;
427 mcp->tov = MBX_TOV_SECONDS; 429 mcp->tov = MBX_TOV_SECONDS;
428 rval = qla2x00_mailbox_command(vha, mcp); 430 rval = qla2x00_mailbox_command(vha, mcp);
@@ -436,6 +438,13 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
436 *memory = 0x1FFFF; /* Defaults to 128KB. */ 438 *memory = 0x1FFFF; /* Defaults to 128KB. */
437 else 439 else
438 *memory = (mcp->mb[5] << 16) | mcp->mb[4]; 440 *memory = (mcp->mb[5] << 16) | mcp->mb[4];
441 if (IS_QLA81XX(vha->hw)) {
442 mpi[0] = mcp->mb[10] >> 8;
443 mpi[1] = mcp->mb[10] & 0xff;
444 mpi[2] = mcp->mb[11] >> 8;
445 mpi[3] = mcp->mb[11] & 0xff;
446 *mpi_caps = (mcp->mb[12] << 16) | mcp->mb[13];
447 }
439 448
440 if (rval != QLA_SUCCESS) { 449 if (rval != QLA_SUCCESS) {
441 /*EMPTY*/ 450 /*EMPTY*/
@@ -568,7 +577,6 @@ int
568qla2x00_mbx_reg_test(scsi_qla_host_t *vha) 577qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
569{ 578{
570 int rval; 579 int rval;
571 struct qla_hw_data *ha = vha->hw;
572 mbx_cmd_t mc; 580 mbx_cmd_t mc;
573 mbx_cmd_t *mcp = &mc; 581 mbx_cmd_t *mcp = &mc;
574 582
@@ -595,14 +603,6 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
595 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A || 603 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
596 mcp->mb[7] != 0x2525) 604 mcp->mb[7] != 0x2525)
597 rval = QLA_FUNCTION_FAILED; 605 rval = QLA_FUNCTION_FAILED;
598 if (rval == QLA_FUNCTION_FAILED) {
599 struct device_reg_24xx __iomem *reg =
600 &ha->iobase->isp24;
601
602 qla2xxx_hw_event_log(vha, HW_EVENT_ISP_ERR, 0,
603 LSW(RD_REG_DWORD(&reg->hccr)),
604 LSW(RD_REG_DWORD(&reg->istatus)));
605 }
606 } 606 }
607 607
608 if (rval != QLA_SUCCESS) { 608 if (rval != QLA_SUCCESS) {
@@ -1363,7 +1363,13 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
1363 1363
1364 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1364 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1365 1365
1366 if (IS_FWI2_CAPABLE(vha->hw)) { 1366 if (IS_QLA81XX(vha->hw)) {
1367 /* Logout across all FCFs. */
1368 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1369 mcp->mb[1] = BIT_1;
1370 mcp->mb[2] = 0;
1371 mcp->out_mb = MBX_2|MBX_1|MBX_0;
1372 } else if (IS_FWI2_CAPABLE(vha->hw)) {
1367 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 1373 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1368 mcp->mb[1] = BIT_6; 1374 mcp->mb[1] = BIT_6;
1369 mcp->mb[2] = 0; 1375 mcp->mb[2] = 0;
@@ -1853,6 +1859,9 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
1853 mbx_cmd_t mc; 1859 mbx_cmd_t mc;
1854 mbx_cmd_t *mcp = &mc; 1860 mbx_cmd_t *mcp = &mc;
1855 1861
1862 if (IS_QLA81XX(vha->hw))
1863 return QLA_SUCCESS;
1864
1856 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n", 1865 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n",
1857 vha->host_no)); 1866 vha->host_no));
1858 1867
@@ -2512,7 +2521,7 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2512 mbx_cmd_t mc; 2521 mbx_cmd_t mc;
2513 mbx_cmd_t *mcp = &mc; 2522 mbx_cmd_t *mcp = &mc;
2514 2523
2515 if (!IS_QLA25XX(vha->hw)) 2524 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw))
2516 return QLA_FUNCTION_FAILED; 2525 return QLA_FUNCTION_FAILED;
2517 2526
2518 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2527 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
@@ -3155,7 +3164,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp,
3155 mcp->mb[7] = LSW(MSD(rsp->dma)); 3164 mcp->mb[7] = LSW(MSD(rsp->dma));
3156 mcp->mb[5] = rsp->length; 3165 mcp->mb[5] = rsp->length;
3157 mcp->mb[11] = rsp->vp_idx; 3166 mcp->mb[11] = rsp->vp_idx;
3158 mcp->mb[14] = rsp->msix->vector; 3167 mcp->mb[14] = rsp->msix->entry;
3159 mcp->mb[13] = rsp->rid; 3168 mcp->mb[13] = rsp->rid;
3160 3169
3161 reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) + 3170 reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 386ffeae5b5a..886323130fcc 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -614,8 +614,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
614 req->vp_idx = vp_idx; 614 req->vp_idx = vp_idx;
615 req->qos = qos; 615 req->qos = qos;
616 616
617 if (ha->rsp_q_map[rsp_que]) 617 if (ha->rsp_q_map[rsp_que]) {
618 req->rsp = ha->rsp_q_map[rsp_que]; 618 req->rsp = ha->rsp_q_map[rsp_que];
619 req->rsp->req = req;
620 }
619 /* Use alternate PCI bus number */ 621 /* Use alternate PCI bus number */
620 if (MSB(req->rid)) 622 if (MSB(req->rid))
621 options |= BIT_4; 623 options |= BIT_4;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8ea927788b3f..4a71f522f925 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -404,26 +404,9 @@ static char *
404qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str) 404qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
405{ 405{
406 struct qla_hw_data *ha = vha->hw; 406 struct qla_hw_data *ha = vha->hw;
407 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
408 ha->fw_minor_version,
409 ha->fw_subminor_version);
410 407
411 if (ha->fw_attributes & BIT_0) 408 sprintf(str, "%d.%02d.%02d (%x)", ha->fw_major_version,
412 strcat(str, "[Class 2] "); 409 ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
413 if (ha->fw_attributes & BIT_1)
414 strcat(str, "[IP] ");
415 if (ha->fw_attributes & BIT_2)
416 strcat(str, "[Multi-ID] ");
417 if (ha->fw_attributes & BIT_3)
418 strcat(str, "[SB-2] ");
419 if (ha->fw_attributes & BIT_4)
420 strcat(str, "[T10 CRC] ");
421 if (ha->fw_attributes & BIT_5)
422 strcat(str, "[VI] ");
423 if (ha->fw_attributes & BIT_10)
424 strcat(str, "[84XX] ");
425 if (ha->fw_attributes & BIT_13)
426 strcat(str, "[Experimental]");
427 return str; 410 return str;
428} 411}
429 412
@@ -438,7 +421,6 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
438 if (!sp) 421 if (!sp)
439 return sp; 422 return sp;
440 423
441 sp->vha = vha;
442 sp->fcport = fcport; 424 sp->fcport = fcport;
443 sp->cmd = cmd; 425 sp->cmd = cmd;
444 sp->que = ha->req_q_map[0]; 426 sp->que = ha->req_q_map[0];
@@ -1182,7 +1164,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1182 continue; 1164 continue;
1183 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 1165 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1184 sp = req->outstanding_cmds[cnt]; 1166 sp = req->outstanding_cmds[cnt];
1185 if (sp && sp->vha == vha) { 1167 if (sp && sp->fcport->vha == vha) {
1186 req->outstanding_cmds[cnt] = NULL; 1168 req->outstanding_cmds[cnt] = NULL;
1187 sp->cmd->result = res; 1169 sp->cmd->result = res;
1188 qla2x00_sp_compl(ha, sp); 1170 qla2x00_sp_compl(ha, sp);
@@ -1329,6 +1311,8 @@ qla24xx_disable_intrs(struct qla_hw_data *ha)
1329 unsigned long flags = 0; 1311 unsigned long flags = 0;
1330 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1312 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1331 1313
1314 if (IS_NOPOLLING_TYPE(ha))
1315 return;
1332 spin_lock_irqsave(&ha->hardware_lock, flags); 1316 spin_lock_irqsave(&ha->hardware_lock, flags);
1333 ha->interrupts_on = 0; 1317 ha->interrupts_on = 0;
1334 WRT_REG_DWORD(&reg->ictrl, 0); 1318 WRT_REG_DWORD(&reg->ictrl, 0);
@@ -1488,6 +1472,44 @@ static struct isp_operations qla25xx_isp_ops = {
1488 .rd_req_reg = qla24xx_rd_req_reg, 1472 .rd_req_reg = qla24xx_rd_req_reg,
1489}; 1473};
1490 1474
1475static struct isp_operations qla81xx_isp_ops = {
1476 .pci_config = qla25xx_pci_config,
1477 .reset_chip = qla24xx_reset_chip,
1478 .chip_diag = qla24xx_chip_diag,
1479 .config_rings = qla24xx_config_rings,
1480 .reset_adapter = qla24xx_reset_adapter,
1481 .nvram_config = qla81xx_nvram_config,
1482 .update_fw_options = qla81xx_update_fw_options,
1483 .load_risc = qla24xx_load_risc,
1484 .pci_info_str = qla24xx_pci_info_str,
1485 .fw_version_str = qla24xx_fw_version_str,
1486 .intr_handler = qla24xx_intr_handler,
1487 .enable_intrs = qla24xx_enable_intrs,
1488 .disable_intrs = qla24xx_disable_intrs,
1489 .abort_command = qla24xx_abort_command,
1490 .target_reset = qla24xx_abort_target,
1491 .lun_reset = qla24xx_lun_reset,
1492 .fabric_login = qla24xx_login_fabric,
1493 .fabric_logout = qla24xx_fabric_logout,
1494 .calc_req_entries = NULL,
1495 .build_iocbs = NULL,
1496 .prep_ms_iocb = qla24xx_prep_ms_iocb,
1497 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
1498 .read_nvram = qla25xx_read_nvram_data,
1499 .write_nvram = qla25xx_write_nvram_data,
1500 .fw_dump = qla81xx_fw_dump,
1501 .beacon_on = qla24xx_beacon_on,
1502 .beacon_off = qla24xx_beacon_off,
1503 .beacon_blink = qla24xx_beacon_blink,
1504 .read_optrom = qla25xx_read_optrom_data,
1505 .write_optrom = qla24xx_write_optrom_data,
1506 .get_flash_version = qla24xx_get_flash_version,
1507 .start_scsi = qla24xx_start_scsi,
1508 .wrt_req_reg = qla24xx_wrt_req_reg,
1509 .wrt_rsp_reg = qla24xx_wrt_rsp_reg,
1510 .rd_req_reg = qla24xx_rd_req_reg,
1511};
1512
1491static inline void 1513static inline void
1492qla2x00_set_isp_flags(struct qla_hw_data *ha) 1514qla2x00_set_isp_flags(struct qla_hw_data *ha)
1493{ 1515{
@@ -1567,6 +1589,13 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
1567 ha->device_type |= DT_IIDMA; 1589 ha->device_type |= DT_IIDMA;
1568 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 1590 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1569 break; 1591 break;
1592 case PCI_DEVICE_ID_QLOGIC_ISP8001:
1593 ha->device_type |= DT_ISP8001;
1594 ha->device_type |= DT_ZIO_SUPPORTED;
1595 ha->device_type |= DT_FWI2;
1596 ha->device_type |= DT_IIDMA;
1597 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1598 break;
1570 } 1599 }
1571} 1600}
1572 1601
@@ -1629,7 +1658,7 @@ skip_pio:
1629 1658
1630 /* Determine queue resources */ 1659 /* Determine queue resources */
1631 ha->max_queues = 1; 1660 ha->max_queues = 1;
1632 if (ql2xmaxqueues <= 1 || !IS_QLA25XX(ha)) 1661 if (ql2xmaxqueues <= 1 || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
1633 goto mqiobase_exit; 1662 goto mqiobase_exit;
1634 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 1663 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1635 pci_resource_len(ha->pdev, 3)); 1664 pci_resource_len(ha->pdev, 3));
@@ -1706,7 +1735,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1706 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 || 1735 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 ||
1707 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 || 1736 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
1708 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 || 1737 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
1709 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532) { 1738 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
1739 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001) {
1710 bars = pci_select_bars(pdev, IORESOURCE_MEM); 1740 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1711 sht = &qla24xx_driver_template; 1741 sht = &qla24xx_driver_template;
1712 mem_only = 1; 1742 mem_only = 1;
@@ -1760,6 +1790,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1760 rsp_length = RESPONSE_ENTRY_CNT_2100; 1790 rsp_length = RESPONSE_ENTRY_CNT_2100;
1761 ha->max_loop_id = SNS_LAST_LOOP_ID_2100; 1791 ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
1762 ha->gid_list_info_size = 4; 1792 ha->gid_list_info_size = 4;
1793 ha->flash_conf_off = ~0;
1794 ha->flash_data_off = ~0;
1795 ha->nvram_conf_off = ~0;
1796 ha->nvram_data_off = ~0;
1763 ha->isp_ops = &qla2100_isp_ops; 1797 ha->isp_ops = &qla2100_isp_ops;
1764 } else if (IS_QLA2200(ha)) { 1798 } else if (IS_QLA2200(ha)) {
1765 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1799 ha->mbx_count = MAILBOX_REGISTER_COUNT;
@@ -1767,6 +1801,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1767 rsp_length = RESPONSE_ENTRY_CNT_2100; 1801 rsp_length = RESPONSE_ENTRY_CNT_2100;
1768 ha->max_loop_id = SNS_LAST_LOOP_ID_2100; 1802 ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
1769 ha->gid_list_info_size = 4; 1803 ha->gid_list_info_size = 4;
1804 ha->flash_conf_off = ~0;
1805 ha->flash_data_off = ~0;
1806 ha->nvram_conf_off = ~0;
1807 ha->nvram_data_off = ~0;
1770 ha->isp_ops = &qla2100_isp_ops; 1808 ha->isp_ops = &qla2100_isp_ops;
1771 } else if (IS_QLA23XX(ha)) { 1809 } else if (IS_QLA23XX(ha)) {
1772 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1810 ha->mbx_count = MAILBOX_REGISTER_COUNT;
@@ -1776,6 +1814,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1776 ha->gid_list_info_size = 6; 1814 ha->gid_list_info_size = 6;
1777 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 1815 if (IS_QLA2322(ha) || IS_QLA6322(ha))
1778 ha->optrom_size = OPTROM_SIZE_2322; 1816 ha->optrom_size = OPTROM_SIZE_2322;
1817 ha->flash_conf_off = ~0;
1818 ha->flash_data_off = ~0;
1819 ha->nvram_conf_off = ~0;
1820 ha->nvram_data_off = ~0;
1779 ha->isp_ops = &qla2300_isp_ops; 1821 ha->isp_ops = &qla2300_isp_ops;
1780 } else if (IS_QLA24XX_TYPE(ha)) { 1822 } else if (IS_QLA24XX_TYPE(ha)) {
1781 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1823 ha->mbx_count = MAILBOX_REGISTER_COUNT;
@@ -1787,6 +1829,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1787 ha->optrom_size = OPTROM_SIZE_24XX; 1829 ha->optrom_size = OPTROM_SIZE_24XX;
1788 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX; 1830 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
1789 ha->isp_ops = &qla24xx_isp_ops; 1831 ha->isp_ops = &qla24xx_isp_ops;
1832 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
1833 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
1834 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
1835 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
1790 } else if (IS_QLA25XX(ha)) { 1836 } else if (IS_QLA25XX(ha)) {
1791 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1837 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1792 req_length = REQUEST_ENTRY_CNT_24XX; 1838 req_length = REQUEST_ENTRY_CNT_24XX;
@@ -1797,6 +1843,23 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1797 ha->optrom_size = OPTROM_SIZE_25XX; 1843 ha->optrom_size = OPTROM_SIZE_25XX;
1798 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; 1844 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
1799 ha->isp_ops = &qla25xx_isp_ops; 1845 ha->isp_ops = &qla25xx_isp_ops;
1846 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
1847 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
1848 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
1849 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
1850 } else if (IS_QLA81XX(ha)) {
1851 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1852 req_length = REQUEST_ENTRY_CNT_24XX;
1853 rsp_length = RESPONSE_ENTRY_CNT_2300;
1854 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
1855 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
1856 ha->gid_list_info_size = 8;
1857 ha->optrom_size = OPTROM_SIZE_81XX;
1858 ha->isp_ops = &qla81xx_isp_ops;
1859 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
1860 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
1861 ha->nvram_conf_off = ~0;
1862 ha->nvram_data_off = ~0;
1800 } 1863 }
1801 1864
1802 mutex_init(&ha->vport_lock); 1865 mutex_init(&ha->vport_lock);
@@ -2458,23 +2521,6 @@ qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
2458 return qla2x00_post_work(vha, e, 1); 2521 return qla2x00_post_work(vha, e, 1);
2459} 2522}
2460 2523
2461int
2462qla2x00_post_hwe_work(struct scsi_qla_host *vha, uint16_t code, uint16_t d1,
2463 uint16_t d2, uint16_t d3)
2464{
2465 struct qla_work_evt *e;
2466
2467 e = qla2x00_alloc_work(vha, QLA_EVT_HWE_LOG, 1);
2468 if (!e)
2469 return QLA_FUNCTION_FAILED;
2470
2471 e->u.hwe.code = code;
2472 e->u.hwe.d1 = d1;
2473 e->u.hwe.d2 = d2;
2474 e->u.hwe.d3 = d3;
2475 return qla2x00_post_work(vha, e, 1);
2476}
2477
2478static void 2524static void
2479qla2x00_do_work(struct scsi_qla_host *vha) 2525qla2x00_do_work(struct scsi_qla_host *vha)
2480{ 2526{
@@ -2492,10 +2538,6 @@ qla2x00_do_work(struct scsi_qla_host *vha)
2492 fc_host_post_event(vha->host, fc_get_event_number(), 2538 fc_host_post_event(vha->host, fc_get_event_number(),
2493 e->u.aen.code, e->u.aen.data); 2539 e->u.aen.code, e->u.aen.data);
2494 break; 2540 break;
2495 case QLA_EVT_HWE_LOG:
2496 qla2xxx_hw_event_log(vha, e->u.hwe.code, e->u.hwe.d1,
2497 e->u.hwe.d2, e->u.hwe.d3);
2498 break;
2499 } 2541 }
2500 if (e->flags & QLA_EVT_FLAG_FREE) 2542 if (e->flags & QLA_EVT_FLAG_FREE)
2501 kfree(e); 2543 kfree(e);
@@ -2914,13 +2956,14 @@ qla2x00_timer(scsi_qla_host_t *vha)
2914 2956
2915/* Firmware interface routines. */ 2957/* Firmware interface routines. */
2916 2958
2917#define FW_BLOBS 6 2959#define FW_BLOBS 7
2918#define FW_ISP21XX 0 2960#define FW_ISP21XX 0
2919#define FW_ISP22XX 1 2961#define FW_ISP22XX 1
2920#define FW_ISP2300 2 2962#define FW_ISP2300 2
2921#define FW_ISP2322 3 2963#define FW_ISP2322 3
2922#define FW_ISP24XX 4 2964#define FW_ISP24XX 4
2923#define FW_ISP25XX 5 2965#define FW_ISP25XX 5
2966#define FW_ISP81XX 6
2924 2967
2925#define FW_FILE_ISP21XX "ql2100_fw.bin" 2968#define FW_FILE_ISP21XX "ql2100_fw.bin"
2926#define FW_FILE_ISP22XX "ql2200_fw.bin" 2969#define FW_FILE_ISP22XX "ql2200_fw.bin"
@@ -2928,6 +2971,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
2928#define FW_FILE_ISP2322 "ql2322_fw.bin" 2971#define FW_FILE_ISP2322 "ql2322_fw.bin"
2929#define FW_FILE_ISP24XX "ql2400_fw.bin" 2972#define FW_FILE_ISP24XX "ql2400_fw.bin"
2930#define FW_FILE_ISP25XX "ql2500_fw.bin" 2973#define FW_FILE_ISP25XX "ql2500_fw.bin"
2974#define FW_FILE_ISP81XX "ql8100_fw.bin"
2931 2975
2932static DEFINE_MUTEX(qla_fw_lock); 2976static DEFINE_MUTEX(qla_fw_lock);
2933 2977
@@ -2938,6 +2982,7 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
2938 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, }, 2982 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
2939 { .name = FW_FILE_ISP24XX, }, 2983 { .name = FW_FILE_ISP24XX, },
2940 { .name = FW_FILE_ISP25XX, }, 2984 { .name = FW_FILE_ISP25XX, },
2985 { .name = FW_FILE_ISP81XX, },
2941}; 2986};
2942 2987
2943struct fw_blob * 2988struct fw_blob *
@@ -2959,6 +3004,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
2959 blob = &qla_fw_blobs[FW_ISP24XX]; 3004 blob = &qla_fw_blobs[FW_ISP24XX];
2960 } else if (IS_QLA25XX(ha)) { 3005 } else if (IS_QLA25XX(ha)) {
2961 blob = &qla_fw_blobs[FW_ISP25XX]; 3006 blob = &qla_fw_blobs[FW_ISP25XX];
3007 } else if (IS_QLA81XX(ha)) {
3008 blob = &qla_fw_blobs[FW_ISP81XX];
2962 } 3009 }
2963 3010
2964 mutex_lock(&qla_fw_lock); 3011 mutex_lock(&qla_fw_lock);
@@ -3112,6 +3159,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
3112 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) }, 3159 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
3113 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) }, 3160 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
3114 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) }, 3161 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
3162 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
3115 { 0 }, 3163 { 0 },
3116}; 3164};
3117MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 3165MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
@@ -3200,3 +3248,4 @@ MODULE_FIRMWARE(FW_FILE_ISP2300);
3200MODULE_FIRMWARE(FW_FILE_ISP2322); 3248MODULE_FIRMWARE(FW_FILE_ISP2322);
3201MODULE_FIRMWARE(FW_FILE_ISP24XX); 3249MODULE_FIRMWARE(FW_FILE_ISP24XX);
3202MODULE_FIRMWARE(FW_FILE_ISP25XX); 3250MODULE_FIRMWARE(FW_FILE_ISP25XX);
3251MODULE_FIRMWARE(FW_FILE_ISP81XX);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index c538ee1b1a31..303f8ee11f25 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -425,27 +425,27 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
425#define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4) 425#define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4)
426 426
427static inline uint32_t 427static inline uint32_t
428flash_conf_to_access_addr(uint32_t faddr) 428flash_conf_addr(struct qla_hw_data *ha, uint32_t faddr)
429{ 429{
430 return FARX_ACCESS_FLASH_CONF | faddr; 430 return ha->flash_conf_off | faddr;
431} 431}
432 432
433static inline uint32_t 433static inline uint32_t
434flash_data_to_access_addr(uint32_t faddr) 434flash_data_addr(struct qla_hw_data *ha, uint32_t faddr)
435{ 435{
436 return FARX_ACCESS_FLASH_DATA | faddr; 436 return ha->flash_data_off | faddr;
437} 437}
438 438
439static inline uint32_t 439static inline uint32_t
440nvram_conf_to_access_addr(uint32_t naddr) 440nvram_conf_addr(struct qla_hw_data *ha, uint32_t naddr)
441{ 441{
442 return FARX_ACCESS_NVRAM_CONF | naddr; 442 return ha->nvram_conf_off | naddr;
443} 443}
444 444
445static inline uint32_t 445static inline uint32_t
446nvram_data_to_access_addr(uint32_t naddr) 446nvram_data_addr(struct qla_hw_data *ha, uint32_t naddr)
447{ 447{
448 return FARX_ACCESS_NVRAM_DATA | naddr; 448 return ha->nvram_data_off | naddr;
449} 449}
450 450
451static uint32_t 451static uint32_t
@@ -481,10 +481,12 @@ qla24xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
481 uint32_t dwords) 481 uint32_t dwords)
482{ 482{
483 uint32_t i; 483 uint32_t i;
484 struct qla_hw_data *ha = vha->hw;
485
484 /* Dword reads to flash. */ 486 /* Dword reads to flash. */
485 for (i = 0; i < dwords; i++, faddr++) 487 for (i = 0; i < dwords; i++, faddr++)
486 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(vha->hw, 488 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
487 flash_data_to_access_addr(faddr))); 489 flash_data_addr(ha, faddr)));
488 490
489 return dwptr; 491 return dwptr;
490} 492}
@@ -518,7 +520,7 @@ qla24xx_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id,
518{ 520{
519 uint32_t ids; 521 uint32_t ids;
520 522
521 ids = qla24xx_read_flash_dword(ha, flash_data_to_access_addr(0xd03ab)); 523 ids = qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x03ab));
522 *man_id = LSB(ids); 524 *man_id = LSB(ids);
523 *flash_id = MSB(ids); 525 *flash_id = MSB(ids);
524 526
@@ -530,8 +532,7 @@ qla24xx_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id,
530 * Example: ATMEL 0x00 01 45 1F 532 * Example: ATMEL 0x00 01 45 1F
531 * Extract MFG and Dev ID from last two bytes. 533 * Extract MFG and Dev ID from last two bytes.
532 */ 534 */
533 ids = qla24xx_read_flash_dword(ha, 535 ids = qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x009f));
534 flash_data_to_access_addr(0xd009f));
535 *man_id = LSB(ids); 536 *man_id = LSB(ids);
536 *flash_id = MSB(ids); 537 *flash_id = MSB(ids);
537 } 538 }
@@ -555,9 +556,13 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
555 556
556 /* Begin with sane defaults. */ 557 /* Begin with sane defaults. */
557 loc = locations[0]; 558 loc = locations[0];
558 *start = IS_QLA24XX_TYPE(ha) ? FA_FLASH_LAYOUT_ADDR_24: 559 *start = 0;
559 FA_FLASH_LAYOUT_ADDR; 560 if (IS_QLA24XX_TYPE(ha))
560 561 *start = FA_FLASH_LAYOUT_ADDR_24;
562 else if (IS_QLA25XX(ha))
563 *start = FA_FLASH_LAYOUT_ADDR;
564 else if (IS_QLA81XX(ha))
565 *start = FA_FLASH_LAYOUT_ADDR_81;
561 /* Begin with first PCI expansion ROM header. */ 566 /* Begin with first PCI expansion ROM header. */
562 buf = (uint8_t *)req->ring; 567 buf = (uint8_t *)req->ring;
563 dcode = (uint32_t *)req->ring; 568 dcode = (uint32_t *)req->ring;
@@ -618,6 +623,22 @@ static void
618qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) 623qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
619{ 624{
620 const char *loc, *locations[] = { "DEF", "FLT" }; 625 const char *loc, *locations[] = { "DEF", "FLT" };
626 const uint32_t def_fw[] =
627 { FA_RISC_CODE_ADDR, FA_RISC_CODE_ADDR, FA_RISC_CODE_ADDR_81 };
628 const uint32_t def_boot[] =
629 { FA_BOOT_CODE_ADDR, FA_BOOT_CODE_ADDR, FA_BOOT_CODE_ADDR_81 };
630 const uint32_t def_vpd_nvram[] =
631 { FA_VPD_NVRAM_ADDR, FA_VPD_NVRAM_ADDR, FA_VPD_NVRAM_ADDR_81 };
632 const uint32_t def_fdt[] =
633 { FA_FLASH_DESCR_ADDR_24, FA_FLASH_DESCR_ADDR,
634 FA_FLASH_DESCR_ADDR_81 };
635 const uint32_t def_npiv_conf0[] =
636 { FA_NPIV_CONF0_ADDR_24, FA_NPIV_CONF0_ADDR,
637 FA_NPIV_CONF0_ADDR_81 };
638 const uint32_t def_npiv_conf1[] =
639 { FA_NPIV_CONF1_ADDR_24, FA_NPIV_CONF1_ADDR,
640 FA_NPIV_CONF1_ADDR_81 };
641 uint32_t def;
621 uint16_t *wptr; 642 uint16_t *wptr;
622 uint16_t cnt, chksum; 643 uint16_t cnt, chksum;
623 uint32_t start; 644 uint32_t start;
@@ -676,20 +697,12 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
676 case FLT_REG_FDT: 697 case FLT_REG_FDT:
677 ha->flt_region_fdt = start; 698 ha->flt_region_fdt = start;
678 break; 699 break;
679 case FLT_REG_HW_EVENT_0:
680 if (!PCI_FUNC(ha->pdev->devfn))
681 ha->flt_region_hw_event = start;
682 break;
683 case FLT_REG_HW_EVENT_1:
684 if (PCI_FUNC(ha->pdev->devfn))
685 ha->flt_region_hw_event = start;
686 break;
687 case FLT_REG_NPIV_CONF_0: 700 case FLT_REG_NPIV_CONF_0:
688 if (!PCI_FUNC(ha->pdev->devfn)) 701 if (!(PCI_FUNC(ha->pdev->devfn) & 1))
689 ha->flt_region_npiv_conf = start; 702 ha->flt_region_npiv_conf = start;
690 break; 703 break;
691 case FLT_REG_NPIV_CONF_1: 704 case FLT_REG_NPIV_CONF_1:
692 if (PCI_FUNC(ha->pdev->devfn)) 705 if (PCI_FUNC(ha->pdev->devfn) & 1)
693 ha->flt_region_npiv_conf = start; 706 ha->flt_region_npiv_conf = start;
694 break; 707 break;
695 } 708 }
@@ -699,22 +712,24 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
699no_flash_data: 712no_flash_data:
700 /* Use hardcoded defaults. */ 713 /* Use hardcoded defaults. */
701 loc = locations[0]; 714 loc = locations[0];
702 ha->flt_region_fw = FA_RISC_CODE_ADDR; 715 def = 0;
703 ha->flt_region_boot = FA_BOOT_CODE_ADDR; 716 if (IS_QLA24XX_TYPE(ha))
704 ha->flt_region_vpd_nvram = FA_VPD_NVRAM_ADDR; 717 def = 0;
705 ha->flt_region_fdt = IS_QLA24XX_TYPE(ha) ? FA_FLASH_DESCR_ADDR_24: 718 else if (IS_QLA25XX(ha))
706 FA_FLASH_DESCR_ADDR; 719 def = 1;
707 ha->flt_region_hw_event = !PCI_FUNC(ha->pdev->devfn) ? 720 else if (IS_QLA81XX(ha))
708 FA_HW_EVENT0_ADDR: FA_HW_EVENT1_ADDR; 721 def = 2;
709 ha->flt_region_npiv_conf = !PCI_FUNC(ha->pdev->devfn) ? 722 ha->flt_region_fw = def_fw[def];
710 (IS_QLA24XX_TYPE(ha) ? FA_NPIV_CONF0_ADDR_24: FA_NPIV_CONF0_ADDR): 723 ha->flt_region_boot = def_boot[def];
711 (IS_QLA24XX_TYPE(ha) ? FA_NPIV_CONF1_ADDR_24: FA_NPIV_CONF1_ADDR); 724 ha->flt_region_vpd_nvram = def_vpd_nvram[def];
725 ha->flt_region_fdt = def_fdt[def];
726 ha->flt_region_npiv_conf = !(PCI_FUNC(ha->pdev->devfn) & 1) ?
727 def_npiv_conf0[def]: def_npiv_conf1[def];
712done: 728done:
713 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x " 729 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
714 "vpd_nvram=0x%x fdt=0x%x flt=0x%x hwe=0x%x npiv=0x%x.\n", loc, 730 "vpd_nvram=0x%x fdt=0x%x flt=0x%x npiv=0x%x.\n", loc,
715 ha->flt_region_boot, ha->flt_region_fw, ha->flt_region_vpd_nvram, 731 ha->flt_region_boot, ha->flt_region_fw, ha->flt_region_vpd_nvram,
716 ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_hw_event, 732 ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_npiv_conf));
717 ha->flt_region_npiv_conf));
718} 733}
719 734
720static void 735static void
@@ -757,14 +772,14 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
757 mid = le16_to_cpu(fdt->man_id); 772 mid = le16_to_cpu(fdt->man_id);
758 fid = le16_to_cpu(fdt->id); 773 fid = le16_to_cpu(fdt->id);
759 ha->fdt_wrt_disable = fdt->wrt_disable_bits; 774 ha->fdt_wrt_disable = fdt->wrt_disable_bits;
760 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0300 | fdt->erase_cmd); 775 ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0300 | fdt->erase_cmd);
761 ha->fdt_block_size = le32_to_cpu(fdt->block_size); 776 ha->fdt_block_size = le32_to_cpu(fdt->block_size);
762 if (fdt->unprotect_sec_cmd) { 777 if (fdt->unprotect_sec_cmd) {
763 ha->fdt_unprotect_sec_cmd = flash_conf_to_access_addr(0x0300 | 778 ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 0x0300 |
764 fdt->unprotect_sec_cmd); 779 fdt->unprotect_sec_cmd);
765 ha->fdt_protect_sec_cmd = fdt->protect_sec_cmd ? 780 ha->fdt_protect_sec_cmd = fdt->protect_sec_cmd ?
766 flash_conf_to_access_addr(0x0300 | fdt->protect_sec_cmd): 781 flash_conf_addr(ha, 0x0300 | fdt->protect_sec_cmd):
767 flash_conf_to_access_addr(0x0336); 782 flash_conf_addr(ha, 0x0336);
768 } 783 }
769 goto done; 784 goto done;
770no_flash_data: 785no_flash_data:
@@ -773,7 +788,7 @@ no_flash_data:
773 mid = man_id; 788 mid = man_id;
774 fid = flash_id; 789 fid = flash_id;
775 ha->fdt_wrt_disable = 0x9c; 790 ha->fdt_wrt_disable = 0x9c;
776 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x03d8); 791 ha->fdt_erase_cmd = flash_conf_addr(ha, 0x03d8);
777 switch (man_id) { 792 switch (man_id) {
778 case 0xbf: /* STT flash. */ 793 case 0xbf: /* STT flash. */
779 if (flash_id == 0x8e) 794 if (flash_id == 0x8e)
@@ -782,16 +797,16 @@ no_flash_data:
782 ha->fdt_block_size = FLASH_BLK_SIZE_32K; 797 ha->fdt_block_size = FLASH_BLK_SIZE_32K;
783 798
784 if (flash_id == 0x80) 799 if (flash_id == 0x80)
785 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0352); 800 ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0352);
786 break; 801 break;
787 case 0x13: /* ST M25P80. */ 802 case 0x13: /* ST M25P80. */
788 ha->fdt_block_size = FLASH_BLK_SIZE_64K; 803 ha->fdt_block_size = FLASH_BLK_SIZE_64K;
789 break; 804 break;
790 case 0x1f: /* Atmel 26DF081A. */ 805 case 0x1f: /* Atmel 26DF081A. */
791 ha->fdt_block_size = FLASH_BLK_SIZE_4K; 806 ha->fdt_block_size = FLASH_BLK_SIZE_4K;
792 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0320); 807 ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0320);
793 ha->fdt_unprotect_sec_cmd = flash_conf_to_access_addr(0x0339); 808 ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 0x0339);
794 ha->fdt_protect_sec_cmd = flash_conf_to_access_addr(0x0336); 809 ha->fdt_protect_sec_cmd = flash_conf_addr(ha, 0x0336);
795 break; 810 break;
796 default: 811 default:
797 /* Default to 64 kb sector size. */ 812 /* Default to 64 kb sector size. */
@@ -813,7 +828,7 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha)
813 uint32_t flt_addr; 828 uint32_t flt_addr;
814 struct qla_hw_data *ha = vha->hw; 829 struct qla_hw_data *ha = vha->hw;
815 830
816 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 831 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA81XX(ha))
817 return QLA_SUCCESS; 832 return QLA_SUCCESS;
818 833
819 ret = qla2xxx_find_flt_start(vha, &flt_addr); 834 ret = qla2xxx_find_flt_start(vha, &flt_addr);
@@ -838,7 +853,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
838 struct qla_npiv_entry *entry; 853 struct qla_npiv_entry *entry;
839 struct qla_hw_data *ha = vha->hw; 854 struct qla_hw_data *ha = vha->hw;
840 855
841 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 856 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA81XX(ha))
842 return; 857 return;
843 858
844 ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr, 859 ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
@@ -930,9 +945,9 @@ qla24xx_unprotect_flash(struct qla_hw_data *ha)
930 return; 945 return;
931 946
932 /* Disable flash write-protection. */ 947 /* Disable flash write-protection. */
933 qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101), 0); 948 qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0);
934 /* Some flash parts need an additional zero-write to clear bits.*/ 949 /* Some flash parts need an additional zero-write to clear bits.*/
935 qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101), 0); 950 qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), 0);
936} 951}
937 952
938static void 953static void
@@ -945,11 +960,10 @@ qla24xx_protect_flash(struct qla_hw_data *ha)
945 goto skip_wrt_protect; 960 goto skip_wrt_protect;
946 961
947 /* Enable flash write-protection and wait for completion. */ 962 /* Enable flash write-protection and wait for completion. */
948 qla24xx_write_flash_dword(ha, flash_conf_to_access_addr(0x101), 963 qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101),
949 ha->fdt_wrt_disable); 964 ha->fdt_wrt_disable);
950 for (cnt = 300; cnt && 965 for (cnt = 300; cnt &&
951 qla24xx_read_flash_dword(ha, 966 qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x005)) & BIT_0;
952 flash_conf_to_access_addr(0x005)) & BIT_0;
953 cnt--) { 967 cnt--) {
954 udelay(10); 968 udelay(10);
955 } 969 }
@@ -977,7 +991,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
977 ret = QLA_SUCCESS; 991 ret = QLA_SUCCESS;
978 992
979 /* Prepare burst-capable write on supported ISPs. */ 993 /* Prepare burst-capable write on supported ISPs. */
980 if (IS_QLA25XX(ha) && !(faddr & 0xfff) && 994 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && !(faddr & 0xfff) &&
981 dwords > OPTROM_BURST_DWORDS) { 995 dwords > OPTROM_BURST_DWORDS) {
982 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 996 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
983 &optrom_dma, GFP_KERNEL); 997 &optrom_dma, GFP_KERNEL);
@@ -989,7 +1003,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
989 } 1003 }
990 1004
991 rest_addr = (ha->fdt_block_size >> 2) - 1; 1005 rest_addr = (ha->fdt_block_size >> 2) - 1;
992 sec_mask = 0x80000 - (ha->fdt_block_size >> 2); 1006 sec_mask = (ha->optrom_size >> 2) - (ha->fdt_block_size >> 2);
993 1007
994 qla24xx_unprotect_flash(ha); 1008 qla24xx_unprotect_flash(ha);
995 1009
@@ -1024,13 +1038,13 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1024 *s = cpu_to_le32(*d); 1038 *s = cpu_to_le32(*d);
1025 1039
1026 ret = qla2x00_load_ram(vha, optrom_dma, 1040 ret = qla2x00_load_ram(vha, optrom_dma,
1027 flash_data_to_access_addr(faddr), 1041 flash_data_addr(ha, faddr),
1028 OPTROM_BURST_DWORDS); 1042 OPTROM_BURST_DWORDS);
1029 if (ret != QLA_SUCCESS) { 1043 if (ret != QLA_SUCCESS) {
1030 qla_printk(KERN_WARNING, ha, 1044 qla_printk(KERN_WARNING, ha,
1031 "Unable to burst-write optrom segment " 1045 "Unable to burst-write optrom segment "
1032 "(%x/%x/%llx).\n", ret, 1046 "(%x/%x/%llx).\n", ret,
1033 flash_data_to_access_addr(faddr), 1047 flash_data_addr(ha, faddr),
1034 (unsigned long long)optrom_dma); 1048 (unsigned long long)optrom_dma);
1035 qla_printk(KERN_WARNING, ha, 1049 qla_printk(KERN_WARNING, ha,
1036 "Reverting to slow-write.\n"); 1050 "Reverting to slow-write.\n");
@@ -1047,7 +1061,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1047 } 1061 }
1048 1062
1049 ret = qla24xx_write_flash_dword(ha, 1063 ret = qla24xx_write_flash_dword(ha,
1050 flash_data_to_access_addr(faddr), cpu_to_le32(*dwptr)); 1064 flash_data_addr(ha, faddr), cpu_to_le32(*dwptr));
1051 if (ret != QLA_SUCCESS) { 1065 if (ret != QLA_SUCCESS) {
1052 DEBUG9(printk("%s(%ld) Unable to program flash " 1066 DEBUG9(printk("%s(%ld) Unable to program flash "
1053 "address=%x data=%x.\n", __func__, 1067 "address=%x data=%x.\n", __func__,
@@ -1098,12 +1112,13 @@ qla24xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1098{ 1112{
1099 uint32_t i; 1113 uint32_t i;
1100 uint32_t *dwptr; 1114 uint32_t *dwptr;
1115 struct qla_hw_data *ha = vha->hw;
1101 1116
1102 /* Dword reads to flash. */ 1117 /* Dword reads to flash. */
1103 dwptr = (uint32_t *)buf; 1118 dwptr = (uint32_t *)buf;
1104 for (i = 0; i < bytes >> 2; i++, naddr++) 1119 for (i = 0; i < bytes >> 2; i++, naddr++)
1105 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(vha->hw, 1120 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
1106 nvram_data_to_access_addr(naddr))); 1121 nvram_data_addr(ha, naddr)));
1107 1122
1108 return buf; 1123 return buf;
1109} 1124}
@@ -1160,17 +1175,14 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1160 RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */ 1175 RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
1161 1176
1162 /* Disable NVRAM write-protection. */ 1177 /* Disable NVRAM write-protection. */
1163 qla24xx_write_flash_dword(ha, nvram_conf_to_access_addr(0x101), 1178 qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0);
1164 0); 1179 qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0);
1165 qla24xx_write_flash_dword(ha, nvram_conf_to_access_addr(0x101),
1166 0);
1167 1180
1168 /* Dword writes to flash. */ 1181 /* Dword writes to flash. */
1169 dwptr = (uint32_t *)buf; 1182 dwptr = (uint32_t *)buf;
1170 for (i = 0; i < bytes >> 2; i++, naddr++, dwptr++) { 1183 for (i = 0; i < bytes >> 2; i++, naddr++, dwptr++) {
1171 ret = qla24xx_write_flash_dword(ha, 1184 ret = qla24xx_write_flash_dword(ha,
1172 nvram_data_to_access_addr(naddr), 1185 nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr));
1173 cpu_to_le32(*dwptr));
1174 if (ret != QLA_SUCCESS) { 1186 if (ret != QLA_SUCCESS) {
1175 DEBUG9(qla_printk("Unable to program nvram address=%x " 1187 DEBUG9(qla_printk("Unable to program nvram address=%x "
1176 "data=%x.\n", naddr, *dwptr)); 1188 "data=%x.\n", naddr, *dwptr));
@@ -1179,8 +1191,7 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1179 } 1191 }
1180 1192
1181 /* Enable NVRAM write-protection. */ 1193 /* Enable NVRAM write-protection. */
1182 qla24xx_write_flash_dword(ha, nvram_conf_to_access_addr(0x101), 1194 qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0x8c);
1183 0x8c);
1184 1195
1185 /* Disable flash write. */ 1196 /* Disable flash write. */
1186 WRT_REG_DWORD(&reg->ctrl_status, 1197 WRT_REG_DWORD(&reg->ctrl_status,
@@ -1202,8 +1213,7 @@ qla25xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1202 dwptr = (uint32_t *)buf; 1213 dwptr = (uint32_t *)buf;
1203 for (i = 0; i < bytes >> 2; i++, naddr++) 1214 for (i = 0; i < bytes >> 2; i++, naddr++)
1204 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha, 1215 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
1205 flash_data_to_access_addr(ha->flt_region_vpd_nvram | 1216 flash_data_addr(ha, ha->flt_region_vpd_nvram | naddr)));
1206 naddr)));
1207 1217
1208 return buf; 1218 return buf;
1209} 1219}
@@ -2246,12 +2256,12 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
2246 burst = left; 2256 burst = left;
2247 2257
2248 rval = qla2x00_dump_ram(vha, optrom_dma, 2258 rval = qla2x00_dump_ram(vha, optrom_dma,
2249 flash_data_to_access_addr(faddr), burst); 2259 flash_data_addr(ha, faddr), burst);
2250 if (rval) { 2260 if (rval) {
2251 qla_printk(KERN_WARNING, ha, 2261 qla_printk(KERN_WARNING, ha,
2252 "Unable to burst-read optrom segment " 2262 "Unable to burst-read optrom segment "
2253 "(%x/%x/%llx).\n", rval, 2263 "(%x/%x/%llx).\n", rval,
2254 flash_data_to_access_addr(faddr), 2264 flash_data_addr(ha, faddr),
2255 (unsigned long long)optrom_dma); 2265 (unsigned long long)optrom_dma);
2256 qla_printk(KERN_WARNING, ha, 2266 qla_printk(KERN_WARNING, ha,
2257 "Reverting to slow-read.\n"); 2267 "Reverting to slow-read.\n");
@@ -2648,108 +2658,3 @@ qla2xxx_get_vpd_field(scsi_qla_host_t *vha, char *key, char *str, size_t size)
2648 2658
2649 return 0; 2659 return 0;
2650} 2660}
2651
2652static int
2653qla2xxx_hw_event_store(scsi_qla_host_t *vha, uint32_t *fdata)
2654{
2655 uint32_t d[2], faddr;
2656 struct qla_hw_data *ha = vha->hw;
2657
2658 /* Locate first empty entry. */
2659 for (;;) {
2660 if (ha->hw_event_ptr >=
2661 ha->flt_region_hw_event + FA_HW_EVENT_SIZE) {
2662 DEBUG2(qla_printk(KERN_WARNING, ha,
2663 "HW event -- Log Full!\n"));
2664 return QLA_MEMORY_ALLOC_FAILED;
2665 }
2666
2667 qla24xx_read_flash_data(vha, d, ha->hw_event_ptr, 2);
2668 faddr = flash_data_to_access_addr(ha->hw_event_ptr);
2669 ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE;
2670 if (d[0] == __constant_cpu_to_le32(0xffffffff) &&
2671 d[1] == __constant_cpu_to_le32(0xffffffff)) {
2672 qla24xx_unprotect_flash(ha);
2673
2674 qla24xx_write_flash_dword(ha, faddr++,
2675 cpu_to_le32(jiffies));
2676 qla24xx_write_flash_dword(ha, faddr++, 0);
2677 qla24xx_write_flash_dword(ha, faddr++, *fdata++);
2678 qla24xx_write_flash_dword(ha, faddr++, *fdata);
2679
2680 qla24xx_protect_flash(ha);
2681 break;
2682 }
2683 }
2684 return QLA_SUCCESS;
2685}
2686
2687int
2688qla2xxx_hw_event_log(scsi_qla_host_t *vha, uint16_t code, uint16_t d1,
2689 uint16_t d2, uint16_t d3)
2690{
2691#define QMARK(a, b, c, d) \
2692 cpu_to_le32(LSB(a) << 24 | LSB(b) << 16 | LSB(c) << 8 | LSB(d))
2693 struct qla_hw_data *ha = vha->hw;
2694 int rval;
2695 uint32_t marker[2], fdata[4];
2696
2697 if (ha->flt_region_hw_event == 0)
2698 return QLA_FUNCTION_FAILED;
2699
2700 DEBUG2(qla_printk(KERN_WARNING, ha,
2701 "HW event -- code=%x, d1=%x, d2=%x, d3=%x.\n", code, d1, d2, d3));
2702
2703 /* If marker not already found, locate or write. */
2704 if (!ha->flags.hw_event_marker_found) {
2705 /* Create marker. */
2706 marker[0] = QMARK('L', ha->fw_major_version,
2707 ha->fw_minor_version, ha->fw_subminor_version);
2708 marker[1] = QMARK(QLA_DRIVER_MAJOR_VER, QLA_DRIVER_MINOR_VER,
2709 QLA_DRIVER_PATCH_VER, QLA_DRIVER_BETA_VER);
2710
2711 /* Locate marker. */
2712 ha->hw_event_ptr = ha->flt_region_hw_event;
2713 for (;;) {
2714 qla24xx_read_flash_data(vha, fdata, ha->hw_event_ptr,
2715 4);
2716 if (fdata[0] == __constant_cpu_to_le32(0xffffffff) &&
2717 fdata[1] == __constant_cpu_to_le32(0xffffffff))
2718 break;
2719 ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE;
2720 if (ha->hw_event_ptr >=
2721 ha->flt_region_hw_event + FA_HW_EVENT_SIZE) {
2722 DEBUG2(qla_printk(KERN_WARNING, ha,
2723 "HW event -- Log Full!\n"));
2724 return QLA_MEMORY_ALLOC_FAILED;
2725 }
2726 if (fdata[2] == marker[0] && fdata[3] == marker[1]) {
2727 ha->flags.hw_event_marker_found = 1;
2728 break;
2729 }
2730 }
2731 /* No marker, write it. */
2732 if (!ha->flags.hw_event_marker_found) {
2733 rval = qla2xxx_hw_event_store(vha, marker);
2734 if (rval != QLA_SUCCESS) {
2735 DEBUG2(qla_printk(KERN_WARNING, ha,
2736 "HW event -- Failed marker write=%x.!\n",
2737 rval));
2738 return rval;
2739 }
2740 ha->flags.hw_event_marker_found = 1;
2741 }
2742 }
2743
2744 /* Store error. */
2745 fdata[0] = cpu_to_le32(code << 16 | d1);
2746 fdata[1] = cpu_to_le32(d2 << 16 | d3);
2747 rval = qla2xxx_hw_event_store(vha, fdata);
2748 if (rval != QLA_SUCCESS) {
2749 DEBUG2(qla_printk(KERN_WARNING, ha,
2750 "HW event -- Failed error write=%x.!\n",
2751 rval));
2752 }
2753
2754 return rval;
2755}
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index be22f3a09f8d..808bab6ef06b 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.02.03-k1" 10#define QLA2XXX_VERSION "8.03.00-k1"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 2 13#define QLA_DRIVER_MINOR_VER 3
14#define QLA_DRIVER_PATCH_VER 3 14#define QLA_DRIVER_PATCH_VER 0
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
index 913a931176ef..8e5c169b03fb 100644
--- a/drivers/scsi/raid_class.c
+++ b/drivers/scsi/raid_class.c
@@ -237,8 +237,7 @@ int raid_component_add(struct raid_template *r,struct device *raid_dev,
237 rc->dev.parent = get_device(component_dev); 237 rc->dev.parent = get_device(component_dev);
238 rc->num = rd->component_count++; 238 rc->num = rd->component_count++;
239 239
240 snprintf(rc->dev.bus_id, sizeof(rc->dev.bus_id), 240 dev_set_name(&rc->dev, "component-%d", rc->num);
241 "component-%d", rc->num);
242 list_add_tail(&rc->node, &rd->component_list); 241 list_add_tail(&rc->node, &rd->component_list);
243 rc->dev.class = &raid_class.class; 242 rc->dev.class = &raid_class.class;
244 err = device_add(&rc->dev); 243 err = device_add(&rc->dev);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index f8b79d401d58..42e72a2c1f98 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -651,10 +651,6 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
651 unsigned long timeout; 651 unsigned long timeout;
652 int rtn = 0; 652 int rtn = 0;
653 653
654 /*
655 * We will use a queued command if possible, otherwise we will
656 * emulate the queuing and calling of completion function ourselves.
657 */
658 atomic_inc(&cmd->device->iorequest_cnt); 654 atomic_inc(&cmd->device->iorequest_cnt);
659 655
660 /* check if the device is still usable */ 656 /* check if the device is still usable */
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 27c633f55794..6eebd0bbe8a8 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -2508,7 +2508,7 @@ static void pseudo_0_release(struct device *dev)
2508} 2508}
2509 2509
2510static struct device pseudo_primary = { 2510static struct device pseudo_primary = {
2511 .bus_id = "pseudo_0", 2511 .init_name = "pseudo_0",
2512 .release = pseudo_0_release, 2512 .release = pseudo_0_release,
2513}; 2513};
2514 2514
@@ -2680,7 +2680,7 @@ static int sdebug_add_adapter(void)
2680 sdbg_host->dev.bus = &pseudo_lld_bus; 2680 sdbg_host->dev.bus = &pseudo_lld_bus;
2681 sdbg_host->dev.parent = &pseudo_primary; 2681 sdbg_host->dev.parent = &pseudo_primary;
2682 sdbg_host->dev.release = &sdebug_release_adapter; 2682 sdbg_host->dev.release = &sdebug_release_adapter;
2683 sprintf(sdbg_host->dev.bus_id, "adapter%d", scsi_debug_add_host); 2683 dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
2684 2684
2685 error = device_register(&sdbg_host->dev); 2685 error = device_register(&sdbg_host->dev);
2686 2686
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index d86ebea9350a..ad6a1370761e 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -124,34 +124,22 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
124enum blk_eh_timer_return scsi_times_out(struct request *req) 124enum blk_eh_timer_return scsi_times_out(struct request *req)
125{ 125{
126 struct scsi_cmnd *scmd = req->special; 126 struct scsi_cmnd *scmd = req->special;
127 enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
128 enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED; 127 enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
129 128
130 scsi_log_completion(scmd, TIMEOUT_ERROR); 129 scsi_log_completion(scmd, TIMEOUT_ERROR);
131 130
132 if (scmd->device->host->transportt->eh_timed_out) 131 if (scmd->device->host->transportt->eh_timed_out)
133 eh_timed_out = scmd->device->host->transportt->eh_timed_out; 132 rtn = scmd->device->host->transportt->eh_timed_out(scmd);
134 else if (scmd->device->host->hostt->eh_timed_out) 133 else if (scmd->device->host->hostt->eh_timed_out)
135 eh_timed_out = scmd->device->host->hostt->eh_timed_out; 134 rtn = scmd->device->host->hostt->eh_timed_out(scmd);
136 else
137 eh_timed_out = NULL;
138 135
139 if (eh_timed_out) { 136 if (unlikely(rtn == BLK_EH_NOT_HANDLED &&
140 rtn = eh_timed_out(scmd); 137 !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
141 switch (rtn) {
142 case BLK_EH_NOT_HANDLED:
143 break;
144 default:
145 return rtn;
146 }
147 }
148
149 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
150 scmd->result |= DID_TIME_OUT << 16; 138 scmd->result |= DID_TIME_OUT << 16;
151 return BLK_EH_HANDLED; 139 rtn = BLK_EH_HANDLED;
152 } 140 }
153 141
154 return BLK_EH_NOT_HANDLED; 142 return rtn;
155} 143}
156 144
157/** 145/**
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 2ae4f8fc5831..b98f763931c5 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -167,10 +167,17 @@ EXPORT_SYMBOL(scsi_set_medium_removal);
167static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg) 167static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg)
168{ 168{
169 struct device *dev = scsi_get_device(sdev->host); 169 struct device *dev = scsi_get_device(sdev->host);
170 const char *name;
170 171
171 if (!dev) 172 if (!dev)
172 return -ENXIO; 173 return -ENXIO;
173 return copy_to_user(arg, dev->bus_id, sizeof(dev->bus_id))? -EFAULT: 0; 174
175 name = dev_name(dev);
176
177 /* compatibility with old ioctl which only returned
178 * 20 characters */
179 return copy_to_user(arg, name, min(strlen(name), (size_t)20))
180 ? -EFAULT: 0;
174} 181}
175 182
176 183
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f2f51e0333eb..940dc32ff0dc 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -91,26 +91,19 @@ static void scsi_unprep_request(struct request *req)
91 scsi_put_command(cmd); 91 scsi_put_command(cmd);
92} 92}
93 93
94/* 94/**
95 * Function: scsi_queue_insert() 95 * __scsi_queue_insert - private queue insertion
96 * 96 * @cmd: The SCSI command being requeued
97 * Purpose: Insert a command in the midlevel queue. 97 * @reason: The reason for the requeue
98 * 98 * @unbusy: Whether the queue should be unbusied
99 * Arguments: cmd - command that we are adding to queue. 99 *
100 * reason - why we are inserting command to queue. 100 * This is a private queue insertion. The public interface
101 * 101 * scsi_queue_insert() always assumes the queue should be unbusied
102 * Lock status: Assumed that lock is not held upon entry. 102 * because it's always called before the completion. This function is
103 * 103 * for a requeue after completion, which should only occur in this
104 * Returns: Nothing. 104 * file.
105 *
106 * Notes: We do this for one of two cases. Either the host is busy
107 * and it cannot accept any more commands for the time being,
108 * or the device returned QUEUE_FULL and can accept no more
109 * commands.
110 * Notes: This could be called either from an interrupt context or a
111 * normal process context.
112 */ 105 */
113int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 106static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
114{ 107{
115 struct Scsi_Host *host = cmd->device->host; 108 struct Scsi_Host *host = cmd->device->host;
116 struct scsi_device *device = cmd->device; 109 struct scsi_device *device = cmd->device;
@@ -150,7 +143,8 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
150 * Decrement the counters, since these commands are no longer 143 * Decrement the counters, since these commands are no longer
151 * active on the host/device. 144 * active on the host/device.
152 */ 145 */
153 scsi_device_unbusy(device); 146 if (unbusy)
147 scsi_device_unbusy(device);
154 148
155 /* 149 /*
156 * Requeue this command. It will go before all other commands 150 * Requeue this command. It will go before all other commands
@@ -172,6 +166,29 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
172 return 0; 166 return 0;
173} 167}
174 168
169/*
170 * Function: scsi_queue_insert()
171 *
172 * Purpose: Insert a command in the midlevel queue.
173 *
174 * Arguments: cmd - command that we are adding to queue.
175 * reason - why we are inserting command to queue.
176 *
177 * Lock status: Assumed that lock is not held upon entry.
178 *
179 * Returns: Nothing.
180 *
181 * Notes: We do this for one of two cases. Either the host is busy
182 * and it cannot accept any more commands for the time being,
183 * or the device returned QUEUE_FULL and can accept no more
184 * commands.
185 * Notes: This could be called either from an interrupt context or a
186 * normal process context.
187 */
188int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
189{
190 return __scsi_queue_insert(cmd, reason, 1);
191}
175/** 192/**
176 * scsi_execute - insert request and wait for the result 193 * scsi_execute - insert request and wait for the result
177 * @sdev: scsi device 194 * @sdev: scsi device
@@ -684,6 +701,8 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
684 scsi_run_queue(sdev->request_queue); 701 scsi_run_queue(sdev->request_queue);
685} 702}
686 703
704static void __scsi_release_buffers(struct scsi_cmnd *, int);
705
687/* 706/*
688 * Function: scsi_end_request() 707 * Function: scsi_end_request()
689 * 708 *
@@ -732,6 +751,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
732 * leftovers in the front of the 751 * leftovers in the front of the
733 * queue, and goose the queue again. 752 * queue, and goose the queue again.
734 */ 753 */
754 scsi_release_buffers(cmd);
735 scsi_requeue_command(q, cmd); 755 scsi_requeue_command(q, cmd);
736 cmd = NULL; 756 cmd = NULL;
737 } 757 }
@@ -743,6 +763,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
743 * This will goose the queue request function at the end, so we don't 763 * This will goose the queue request function at the end, so we don't
744 * need to worry about launching another command. 764 * need to worry about launching another command.
745 */ 765 */
766 __scsi_release_buffers(cmd, 0);
746 scsi_next_command(cmd); 767 scsi_next_command(cmd);
747 return NULL; 768 return NULL;
748} 769}
@@ -798,6 +819,26 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
798 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free); 819 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
799} 820}
800 821
822static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
823{
824
825 if (cmd->sdb.table.nents)
826 scsi_free_sgtable(&cmd->sdb);
827
828 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
829
830 if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
831 struct scsi_data_buffer *bidi_sdb =
832 cmd->request->next_rq->special;
833 scsi_free_sgtable(bidi_sdb);
834 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
835 cmd->request->next_rq->special = NULL;
836 }
837
838 if (scsi_prot_sg_count(cmd))
839 scsi_free_sgtable(cmd->prot_sdb);
840}
841
801/* 842/*
802 * Function: scsi_release_buffers() 843 * Function: scsi_release_buffers()
803 * 844 *
@@ -817,21 +858,7 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
817 */ 858 */
818void scsi_release_buffers(struct scsi_cmnd *cmd) 859void scsi_release_buffers(struct scsi_cmnd *cmd)
819{ 860{
820 if (cmd->sdb.table.nents) 861 __scsi_release_buffers(cmd, 1);
821 scsi_free_sgtable(&cmd->sdb);
822
823 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
824
825 if (scsi_bidi_cmnd(cmd)) {
826 struct scsi_data_buffer *bidi_sdb =
827 cmd->request->next_rq->special;
828 scsi_free_sgtable(bidi_sdb);
829 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
830 cmd->request->next_rq->special = NULL;
831 }
832
833 if (scsi_prot_sg_count(cmd))
834 scsi_free_sgtable(cmd->prot_sdb);
835} 862}
836EXPORT_SYMBOL(scsi_release_buffers); 863EXPORT_SYMBOL(scsi_release_buffers);
837 864
@@ -945,7 +972,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
945 } 972 }
946 973
947 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */ 974 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
948 scsi_release_buffers(cmd);
949 975
950 /* 976 /*
951 * Next deal with any sectors which we were able to correctly 977 * Next deal with any sectors which we were able to correctly
@@ -963,6 +989,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
963 return; 989 return;
964 this_count = blk_rq_bytes(req); 990 this_count = blk_rq_bytes(req);
965 991
992 error = -EIO;
993
966 if (host_byte(result) == DID_RESET) { 994 if (host_byte(result) == DID_RESET) {
967 /* Third party bus reset or reset for error recovery 995 /* Third party bus reset or reset for error recovery
968 * reasons. Just retry the command and see what 996 * reasons. Just retry the command and see what
@@ -1004,13 +1032,18 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1004 /* This will issue a new 6-byte command. */ 1032 /* This will issue a new 6-byte command. */
1005 cmd->device->use_10_for_rw = 0; 1033 cmd->device->use_10_for_rw = 0;
1006 action = ACTION_REPREP; 1034 action = ACTION_REPREP;
1035 } else if (sshdr.asc == 0x10) /* DIX */ {
1036 description = "Host Data Integrity Failure";
1037 action = ACTION_FAIL;
1038 error = -EILSEQ;
1007 } else 1039 } else
1008 action = ACTION_FAIL; 1040 action = ACTION_FAIL;
1009 break; 1041 break;
1010 case ABORTED_COMMAND: 1042 case ABORTED_COMMAND:
1011 if (sshdr.asc == 0x10) { /* DIF */ 1043 if (sshdr.asc == 0x10) { /* DIF */
1044 description = "Target Data Integrity Failure";
1012 action = ACTION_FAIL; 1045 action = ACTION_FAIL;
1013 description = "Data Integrity Failure"; 1046 error = -EILSEQ;
1014 } else 1047 } else
1015 action = ACTION_RETRY; 1048 action = ACTION_RETRY;
1016 break; 1049 break;
@@ -1029,6 +1062,10 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1029 case 0x09: /* self test in progress */ 1062 case 0x09: /* self test in progress */
1030 action = ACTION_DELAYED_RETRY; 1063 action = ACTION_DELAYED_RETRY;
1031 break; 1064 break;
1065 default:
1066 description = "Device not ready";
1067 action = ACTION_FAIL;
1068 break;
1032 } 1069 }
1033 } else { 1070 } else {
1034 description = "Device not ready"; 1071 description = "Device not ready";
@@ -1052,9 +1089,10 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1052 switch (action) { 1089 switch (action) {
1053 case ACTION_FAIL: 1090 case ACTION_FAIL:
1054 /* Give up and fail the remainder of the request */ 1091 /* Give up and fail the remainder of the request */
1092 scsi_release_buffers(cmd);
1055 if (!(req->cmd_flags & REQ_QUIET)) { 1093 if (!(req->cmd_flags & REQ_QUIET)) {
1056 if (description) 1094 if (description)
1057 scmd_printk(KERN_INFO, cmd, "%s", 1095 scmd_printk(KERN_INFO, cmd, "%s\n",
1058 description); 1096 description);
1059 scsi_print_result(cmd); 1097 scsi_print_result(cmd);
1060 if (driver_byte(result) & DRIVER_SENSE) 1098 if (driver_byte(result) & DRIVER_SENSE)
@@ -1067,15 +1105,16 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1067 /* Unprep the request and put it back at the head of the queue. 1105 /* Unprep the request and put it back at the head of the queue.
1068 * A new command will be prepared and issued. 1106 * A new command will be prepared and issued.
1069 */ 1107 */
1108 scsi_release_buffers(cmd);
1070 scsi_requeue_command(q, cmd); 1109 scsi_requeue_command(q, cmd);
1071 break; 1110 break;
1072 case ACTION_RETRY: 1111 case ACTION_RETRY:
1073 /* Retry the same command immediately */ 1112 /* Retry the same command immediately */
1074 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); 1113 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
1075 break; 1114 break;
1076 case ACTION_DELAYED_RETRY: 1115 case ACTION_DELAYED_RETRY:
1077 /* Retry the same command after a delay */ 1116 /* Retry the same command after a delay */
1078 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 1117 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
1079 break; 1118 break;
1080 } 1119 }
1081} 1120}
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 17914a346f71..66505bb79410 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -414,8 +414,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
414 device_initialize(dev); 414 device_initialize(dev);
415 starget->reap_ref = 1; 415 starget->reap_ref = 1;
416 dev->parent = get_device(parent); 416 dev->parent = get_device(parent);
417 sprintf(dev->bus_id, "target%d:%d:%d", 417 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
418 shost->host_no, channel, id);
419#ifndef CONFIG_SYSFS_DEPRECATED 418#ifndef CONFIG_SYSFS_DEPRECATED
420 dev->bus = &scsi_bus_type; 419 dev->bus = &scsi_bus_type;
421#endif 420#endif
@@ -1024,7 +1023,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
1024 if (rescan || !scsi_device_created(sdev)) { 1023 if (rescan || !scsi_device_created(sdev)) {
1025 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO 1024 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO
1026 "scsi scan: device exists on %s\n", 1025 "scsi scan: device exists on %s\n",
1027 sdev->sdev_gendev.bus_id)); 1026 dev_name(&sdev->sdev_gendev)));
1028 if (sdevp) 1027 if (sdevp)
1029 *sdevp = sdev; 1028 *sdevp = sdev;
1030 else 1029 else
@@ -1163,7 +1162,7 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
1163 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1162 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1164 1163
1165 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: Sequential scan of" 1164 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: Sequential scan of"
1166 "%s\n", starget->dev.bus_id)); 1165 "%s\n", dev_name(&starget->dev)));
1167 1166
1168 max_dev_lun = min(max_scsi_luns, shost->max_lun); 1167 max_dev_lun = min(max_scsi_luns, shost->max_lun);
1169 /* 1168 /*
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 93c28f30bbd7..da63802cbf9d 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1079,16 +1079,14 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)
1079 device_initialize(&sdev->sdev_gendev); 1079 device_initialize(&sdev->sdev_gendev);
1080 sdev->sdev_gendev.bus = &scsi_bus_type; 1080 sdev->sdev_gendev.bus = &scsi_bus_type;
1081 sdev->sdev_gendev.type = &scsi_dev_type; 1081 sdev->sdev_gendev.type = &scsi_dev_type;
1082 sprintf(sdev->sdev_gendev.bus_id,"%d:%d:%d:%d", 1082 dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%d",
1083 sdev->host->host_no, sdev->channel, sdev->id, 1083 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
1084 sdev->lun); 1084
1085
1086 device_initialize(&sdev->sdev_dev); 1085 device_initialize(&sdev->sdev_dev);
1087 sdev->sdev_dev.parent = &sdev->sdev_gendev; 1086 sdev->sdev_dev.parent = &sdev->sdev_gendev;
1088 sdev->sdev_dev.class = &sdev_class; 1087 sdev->sdev_dev.class = &sdev_class;
1089 snprintf(sdev->sdev_dev.bus_id, BUS_ID_SIZE, 1088 dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%d",
1090 "%d:%d:%d:%d", sdev->host->host_no, 1089 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
1091 sdev->channel, sdev->id, sdev->lun);
1092 sdev->scsi_level = starget->scsi_level; 1090 sdev->scsi_level = starget->scsi_level;
1093 transport_setup_device(&sdev->sdev_gendev); 1091 transport_setup_device(&sdev->sdev_gendev);
1094 spin_lock_irqsave(shost->host_lock, flags); 1092 spin_lock_irqsave(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 062304de4854..5f77417ed585 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -2407,8 +2407,12 @@ fc_rport_final_delete(struct work_struct *work)
2407 /* 2407 /*
2408 * Notify the driver that the rport is now dead. The LLDD will 2408 * Notify the driver that the rport is now dead. The LLDD will
2409 * also guarantee that any communication to the rport is terminated 2409 * also guarantee that any communication to the rport is terminated
2410 *
2411 * Avoid this call if we already called it when we preserved the
2412 * rport for the binding.
2410 */ 2413 */
2411 if (i->f->dev_loss_tmo_callbk) 2414 if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) &&
2415 (i->f->dev_loss_tmo_callbk))
2412 i->f->dev_loss_tmo_callbk(rport); 2416 i->f->dev_loss_tmo_callbk(rport);
2413 2417
2414 transport_remove_device(dev); 2418 transport_remove_device(dev);
@@ -2486,8 +2490,8 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
2486 device_initialize(dev); /* takes self reference */ 2490 device_initialize(dev); /* takes self reference */
2487 dev->parent = get_device(&shost->shost_gendev); /* parent reference */ 2491 dev->parent = get_device(&shost->shost_gendev); /* parent reference */
2488 dev->release = fc_rport_dev_release; 2492 dev->release = fc_rport_dev_release;
2489 sprintf(dev->bus_id, "rport-%d:%d-%d", 2493 dev_set_name(dev, "rport-%d:%d-%d",
2490 shost->host_no, channel, rport->number); 2494 shost->host_no, channel, rport->number);
2491 transport_setup_device(dev); 2495 transport_setup_device(dev);
2492 2496
2493 error = device_add(dev); 2497 error = device_add(dev);
@@ -2647,7 +2651,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
2647 spin_lock_irqsave(shost->host_lock, flags); 2651 spin_lock_irqsave(shost->host_lock, flags);
2648 2652
2649 rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT | 2653 rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
2650 FC_RPORT_DEVLOSS_PENDING); 2654 FC_RPORT_DEVLOSS_PENDING |
2655 FC_RPORT_DEVLOSS_CALLBK_DONE);
2651 2656
2652 /* if target, initiate a scan */ 2657 /* if target, initiate a scan */
2653 if (rport->scsi_target_id != -1) { 2658 if (rport->scsi_target_id != -1) {
@@ -2944,6 +2949,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
2944 struct fc_rport *rport = 2949 struct fc_rport *rport =
2945 container_of(work, struct fc_rport, dev_loss_work.work); 2950 container_of(work, struct fc_rport, dev_loss_work.work);
2946 struct Scsi_Host *shost = rport_to_shost(rport); 2951 struct Scsi_Host *shost = rport_to_shost(rport);
2952 struct fc_internal *i = to_fc_internal(shost->transportt);
2947 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 2953 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2948 unsigned long flags; 2954 unsigned long flags;
2949 2955
@@ -3011,6 +3017,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
3011 rport->roles = FC_PORT_ROLE_UNKNOWN; 3017 rport->roles = FC_PORT_ROLE_UNKNOWN;
3012 rport->port_state = FC_PORTSTATE_NOTPRESENT; 3018 rport->port_state = FC_PORTSTATE_NOTPRESENT;
3013 rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT; 3019 rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
3020 rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
3014 3021
3015 /* 3022 /*
3016 * Pre-emptively kill I/O rather than waiting for the work queue 3023 * Pre-emptively kill I/O rather than waiting for the work queue
@@ -3046,8 +3053,18 @@ fc_timeout_deleted_rport(struct work_struct *work)
3046 * all attached scsi devices. 3053 * all attached scsi devices.
3047 */ 3054 */
3048 fc_queue_work(shost, &rport->stgt_delete_work); 3055 fc_queue_work(shost, &rport->stgt_delete_work);
3056
3057 /*
3058 * Notify the driver that the rport is now dead. The LLDD will
3059 * also guarantee that any communication to the rport is terminated
3060 *
3061 * Note: we set the CALLBK_DONE flag above to correspond
3062 */
3063 if (i->f->dev_loss_tmo_callbk)
3064 i->f->dev_loss_tmo_callbk(rport);
3049} 3065}
3050 3066
3067
3051/** 3068/**
3052 * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a disconnected SCSI target. 3069 * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a disconnected SCSI target.
3053 * @work: rport to terminate io on. 3070 * @work: rport to terminate io on.
@@ -3164,8 +3181,8 @@ fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev,
3164 device_initialize(dev); /* takes self reference */ 3181 device_initialize(dev); /* takes self reference */
3165 dev->parent = get_device(pdev); /* takes parent reference */ 3182 dev->parent = get_device(pdev); /* takes parent reference */
3166 dev->release = fc_vport_dev_release; 3183 dev->release = fc_vport_dev_release;
3167 sprintf(dev->bus_id, "vport-%d:%d-%d", 3184 dev_set_name(dev, "vport-%d:%d-%d",
3168 shost->host_no, channel, vport->number); 3185 shost->host_no, channel, vport->number);
3169 transport_setup_device(dev); 3186 transport_setup_device(dev);
3170 3187
3171 error = device_add(dev); 3188 error = device_add(dev);
@@ -3188,19 +3205,19 @@ fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev,
3188 */ 3205 */
3189 if (pdev != &shost->shost_gendev) { 3206 if (pdev != &shost->shost_gendev) {
3190 error = sysfs_create_link(&shost->shost_gendev.kobj, 3207 error = sysfs_create_link(&shost->shost_gendev.kobj,
3191 &dev->kobj, dev->bus_id); 3208 &dev->kobj, dev_name(dev));
3192 if (error) 3209 if (error)
3193 printk(KERN_ERR 3210 printk(KERN_ERR
3194 "%s: Cannot create vport symlinks for " 3211 "%s: Cannot create vport symlinks for "
3195 "%s, err=%d\n", 3212 "%s, err=%d\n",
3196 __func__, dev->bus_id, error); 3213 __func__, dev_name(dev), error);
3197 } 3214 }
3198 spin_lock_irqsave(shost->host_lock, flags); 3215 spin_lock_irqsave(shost->host_lock, flags);
3199 vport->flags &= ~FC_VPORT_CREATING; 3216 vport->flags &= ~FC_VPORT_CREATING;
3200 spin_unlock_irqrestore(shost->host_lock, flags); 3217 spin_unlock_irqrestore(shost->host_lock, flags);
3201 3218
3202 dev_printk(KERN_NOTICE, pdev, 3219 dev_printk(KERN_NOTICE, pdev,
3203 "%s created via shost%d channel %d\n", dev->bus_id, 3220 "%s created via shost%d channel %d\n", dev_name(dev),
3204 shost->host_no, channel); 3221 shost->host_no, channel);
3205 3222
3206 *ret_vport = vport; 3223 *ret_vport = vport;
@@ -3297,7 +3314,7 @@ fc_vport_terminate(struct fc_vport *vport)
3297 return stat; 3314 return stat;
3298 3315
3299 if (dev->parent != &shost->shost_gendev) 3316 if (dev->parent != &shost->shost_gendev)
3300 sysfs_remove_link(&shost->shost_gendev.kobj, dev->bus_id); 3317 sysfs_remove_link(&shost->shost_gendev.kobj, dev_name(dev));
3301 transport_remove_device(dev); 3318 transport_remove_device(dev);
3302 device_del(dev); 3319 device_del(dev);
3303 transport_destroy_device(dev); 3320 transport_destroy_device(dev);
@@ -3329,7 +3346,7 @@ fc_vport_sched_delete(struct work_struct *work)
3329 dev_printk(KERN_ERR, vport->dev.parent, 3346 dev_printk(KERN_ERR, vport->dev.parent,
3330 "%s: %s could not be deleted created via " 3347 "%s: %s could not be deleted created via "
3331 "shost%d channel %d - error %d\n", __func__, 3348 "shost%d channel %d - error %d\n", __func__,
3332 vport->dev.bus_id, vport->shost->host_no, 3349 dev_name(&vport->dev), vport->shost->host_no,
3333 vport->channel, stat); 3350 vport->channel, stat);
3334} 3351}
3335 3352
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 4a803ebaf508..75c9297694cb 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -187,8 +187,7 @@ iscsi_create_endpoint(int dd_size)
187 187
188 ep->id = id; 188 ep->id = id;
189 ep->dev.class = &iscsi_endpoint_class; 189 ep->dev.class = &iscsi_endpoint_class;
190 snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%llu", 190 dev_set_name(&ep->dev, "ep-%llu", (unsigned long long) id);
191 (unsigned long long) id);
192 err = device_register(&ep->dev); 191 err = device_register(&ep->dev);
193 if (err) 192 if (err)
194 goto free_ep; 193 goto free_ep;
@@ -724,8 +723,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
724 } 723 }
725 session->target_id = id; 724 session->target_id = id;
726 725
727 snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u", 726 dev_set_name(&session->dev, "session%u", session->sid);
728 session->sid);
729 err = device_add(&session->dev); 727 err = device_add(&session->dev);
730 if (err) { 728 if (err) {
731 iscsi_cls_session_printk(KERN_ERR, session, 729 iscsi_cls_session_printk(KERN_ERR, session,
@@ -898,8 +896,7 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
898 if (!get_device(&session->dev)) 896 if (!get_device(&session->dev))
899 goto free_conn; 897 goto free_conn;
900 898
901 snprintf(conn->dev.bus_id, BUS_ID_SIZE, "connection%d:%u", 899 dev_set_name(&conn->dev, "connection%d:%u", session->sid, cid);
902 session->sid, cid);
903 conn->dev.parent = &session->dev; 900 conn->dev.parent = &session->dev;
904 conn->dev.release = iscsi_conn_release; 901 conn->dev.release = iscsi_conn_release;
905 err = device_register(&conn->dev); 902 err = device_register(&conn->dev);
@@ -1816,7 +1813,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
1816 priv->t.create_work_queue = 1; 1813 priv->t.create_work_queue = 1;
1817 1814
1818 priv->dev.class = &iscsi_transport_class; 1815 priv->dev.class = &iscsi_transport_class;
1819 snprintf(priv->dev.bus_id, BUS_ID_SIZE, "%s", tt->name); 1816 dev_set_name(&priv->dev, "%s", tt->name);
1820 err = device_register(&priv->dev); 1817 err = device_register(&priv->dev);
1821 if (err) 1818 if (err)
1822 goto free_priv; 1819 goto free_priv;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 366609386be1..50988cbf7b2d 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -207,7 +207,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
207 struct request_queue *q; 207 struct request_queue *q;
208 int error; 208 int error;
209 struct device *dev; 209 struct device *dev;
210 char namebuf[BUS_ID_SIZE]; 210 char namebuf[20];
211 const char *name; 211 const char *name;
212 void (*release)(struct device *); 212 void (*release)(struct device *);
213 213
@@ -219,7 +219,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
219 if (rphy) { 219 if (rphy) {
220 q = blk_init_queue(sas_non_host_smp_request, NULL); 220 q = blk_init_queue(sas_non_host_smp_request, NULL);
221 dev = &rphy->dev; 221 dev = &rphy->dev;
222 name = dev->bus_id; 222 name = dev_name(dev);
223 release = NULL; 223 release = NULL;
224 } else { 224 } else {
225 q = blk_init_queue(sas_host_smp_request, NULL); 225 q = blk_init_queue(sas_host_smp_request, NULL);
@@ -629,10 +629,10 @@ struct sas_phy *sas_phy_alloc(struct device *parent, int number)
629 INIT_LIST_HEAD(&phy->port_siblings); 629 INIT_LIST_HEAD(&phy->port_siblings);
630 if (scsi_is_sas_expander_device(parent)) { 630 if (scsi_is_sas_expander_device(parent)) {
631 struct sas_rphy *rphy = dev_to_rphy(parent); 631 struct sas_rphy *rphy = dev_to_rphy(parent);
632 sprintf(phy->dev.bus_id, "phy-%d:%d:%d", shost->host_no, 632 dev_set_name(&phy->dev, "phy-%d:%d:%d", shost->host_no,
633 rphy->scsi_target_id, number); 633 rphy->scsi_target_id, number);
634 } else 634 } else
635 sprintf(phy->dev.bus_id, "phy-%d:%d", shost->host_no, number); 635 dev_set_name(&phy->dev, "phy-%d:%d", shost->host_no, number);
636 636
637 transport_setup_device(&phy->dev); 637 transport_setup_device(&phy->dev);
638 638
@@ -770,7 +770,7 @@ static void sas_port_create_link(struct sas_port *port,
770 int res; 770 int res;
771 771
772 res = sysfs_create_link(&port->dev.kobj, &phy->dev.kobj, 772 res = sysfs_create_link(&port->dev.kobj, &phy->dev.kobj,
773 phy->dev.bus_id); 773 dev_name(&phy->dev));
774 if (res) 774 if (res)
775 goto err; 775 goto err;
776 res = sysfs_create_link(&phy->dev.kobj, &port->dev.kobj, "port"); 776 res = sysfs_create_link(&phy->dev.kobj, &port->dev.kobj, "port");
@@ -785,7 +785,7 @@ err:
785static void sas_port_delete_link(struct sas_port *port, 785static void sas_port_delete_link(struct sas_port *port,
786 struct sas_phy *phy) 786 struct sas_phy *phy)
787{ 787{
788 sysfs_remove_link(&port->dev.kobj, phy->dev.bus_id); 788 sysfs_remove_link(&port->dev.kobj, dev_name(&phy->dev));
789 sysfs_remove_link(&phy->dev.kobj, "port"); 789 sysfs_remove_link(&phy->dev.kobj, "port");
790} 790}
791 791
@@ -821,11 +821,11 @@ struct sas_port *sas_port_alloc(struct device *parent, int port_id)
821 821
822 if (scsi_is_sas_expander_device(parent)) { 822 if (scsi_is_sas_expander_device(parent)) {
823 struct sas_rphy *rphy = dev_to_rphy(parent); 823 struct sas_rphy *rphy = dev_to_rphy(parent);
824 sprintf(port->dev.bus_id, "port-%d:%d:%d", shost->host_no, 824 dev_set_name(&port->dev, "port-%d:%d:%d", shost->host_no,
825 rphy->scsi_target_id, port->port_identifier); 825 rphy->scsi_target_id, port->port_identifier);
826 } else 826 } else
827 sprintf(port->dev.bus_id, "port-%d:%d", shost->host_no, 827 dev_set_name(&port->dev, "port-%d:%d", shost->host_no,
828 port->port_identifier); 828 port->port_identifier);
829 829
830 transport_setup_device(&port->dev); 830 transport_setup_device(&port->dev);
831 831
@@ -935,7 +935,7 @@ void sas_port_delete(struct sas_port *port)
935 if (port->is_backlink) { 935 if (port->is_backlink) {
936 struct device *parent = port->dev.parent; 936 struct device *parent = port->dev.parent;
937 937
938 sysfs_remove_link(&port->dev.kobj, parent->bus_id); 938 sysfs_remove_link(&port->dev.kobj, dev_name(parent));
939 port->is_backlink = 0; 939 port->is_backlink = 0;
940 } 940 }
941 941
@@ -984,7 +984,8 @@ void sas_port_add_phy(struct sas_port *port, struct sas_phy *phy)
984 /* If this trips, you added a phy that was already 984 /* If this trips, you added a phy that was already
985 * part of a different port */ 985 * part of a different port */
986 if (unlikely(tmp != phy)) { 986 if (unlikely(tmp != phy)) {
987 dev_printk(KERN_ERR, &port->dev, "trying to add phy %s fails: it's already part of another port\n", phy->dev.bus_id); 987 dev_printk(KERN_ERR, &port->dev, "trying to add phy %s fails: it's already part of another port\n",
988 dev_name(&phy->dev));
988 BUG(); 989 BUG();
989 } 990 }
990 } else { 991 } else {
@@ -1023,7 +1024,7 @@ void sas_port_mark_backlink(struct sas_port *port)
1023 return; 1024 return;
1024 port->is_backlink = 1; 1025 port->is_backlink = 1;
1025 res = sysfs_create_link(&port->dev.kobj, &parent->kobj, 1026 res = sysfs_create_link(&port->dev.kobj, &parent->kobj,
1026 parent->bus_id); 1027 dev_name(parent));
1027 if (res) 1028 if (res)
1028 goto err; 1029 goto err;
1029 return; 1030 return;
@@ -1367,11 +1368,12 @@ struct sas_rphy *sas_end_device_alloc(struct sas_port *parent)
1367 rdev->rphy.dev.release = sas_end_device_release; 1368 rdev->rphy.dev.release = sas_end_device_release;
1368 if (scsi_is_sas_expander_device(parent->dev.parent)) { 1369 if (scsi_is_sas_expander_device(parent->dev.parent)) {
1369 struct sas_rphy *rphy = dev_to_rphy(parent->dev.parent); 1370 struct sas_rphy *rphy = dev_to_rphy(parent->dev.parent);
1370 sprintf(rdev->rphy.dev.bus_id, "end_device-%d:%d:%d", 1371 dev_set_name(&rdev->rphy.dev, "end_device-%d:%d:%d",
1371 shost->host_no, rphy->scsi_target_id, parent->port_identifier); 1372 shost->host_no, rphy->scsi_target_id,
1373 parent->port_identifier);
1372 } else 1374 } else
1373 sprintf(rdev->rphy.dev.bus_id, "end_device-%d:%d", 1375 dev_set_name(&rdev->rphy.dev, "end_device-%d:%d",
1374 shost->host_no, parent->port_identifier); 1376 shost->host_no, parent->port_identifier);
1375 rdev->rphy.identify.device_type = SAS_END_DEVICE; 1377 rdev->rphy.identify.device_type = SAS_END_DEVICE;
1376 sas_rphy_initialize(&rdev->rphy); 1378 sas_rphy_initialize(&rdev->rphy);
1377 transport_setup_device(&rdev->rphy.dev); 1379 transport_setup_device(&rdev->rphy.dev);
@@ -1411,8 +1413,8 @@ struct sas_rphy *sas_expander_alloc(struct sas_port *parent,
1411 mutex_lock(&sas_host->lock); 1413 mutex_lock(&sas_host->lock);
1412 rdev->rphy.scsi_target_id = sas_host->next_expander_id++; 1414 rdev->rphy.scsi_target_id = sas_host->next_expander_id++;
1413 mutex_unlock(&sas_host->lock); 1415 mutex_unlock(&sas_host->lock);
1414 sprintf(rdev->rphy.dev.bus_id, "expander-%d:%d", 1416 dev_set_name(&rdev->rphy.dev, "expander-%d:%d",
1415 shost->host_no, rdev->rphy.scsi_target_id); 1417 shost->host_no, rdev->rphy.scsi_target_id);
1416 rdev->rphy.identify.device_type = type; 1418 rdev->rphy.identify.device_type = type;
1417 sas_rphy_initialize(&rdev->rphy); 1419 sas_rphy_initialize(&rdev->rphy);
1418 transport_setup_device(&rdev->rphy.dev); 1420 transport_setup_device(&rdev->rphy.dev);
@@ -1445,7 +1447,7 @@ int sas_rphy_add(struct sas_rphy *rphy)
1445 transport_add_device(&rphy->dev); 1447 transport_add_device(&rphy->dev);
1446 transport_configure_device(&rphy->dev); 1448 transport_configure_device(&rphy->dev);
1447 if (sas_bsg_initialize(shost, rphy)) 1449 if (sas_bsg_initialize(shost, rphy))
1448 printk("fail to a bsg device %s\n", rphy->dev.bus_id); 1450 printk("fail to a bsg device %s\n", dev_name(&rphy->dev));
1449 1451
1450 1452
1451 mutex_lock(&sas_host->lock); 1453 mutex_lock(&sas_host->lock);
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index 8a7af951d98a..21a045e0559f 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -212,7 +212,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
212 rport->roles = ids->roles; 212 rport->roles = ids->roles;
213 213
214 id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id); 214 id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
215 sprintf(rport->dev.bus_id, "port-%d:%d", shost->host_no, id); 215 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
216 216
217 transport_setup_device(&rport->dev); 217 transport_setup_device(&rport->dev);
218 218
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e035c1114010..d57566b8be0a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1830,7 +1830,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
1830 device_initialize(&sdkp->dev); 1830 device_initialize(&sdkp->dev);
1831 sdkp->dev.parent = &sdp->sdev_gendev; 1831 sdkp->dev.parent = &sdp->sdev_gendev;
1832 sdkp->dev.class = &sd_disk_class; 1832 sdkp->dev.class = &sd_disk_class;
1833 strncpy(sdkp->dev.bus_id, sdp->sdev_gendev.bus_id, BUS_ID_SIZE); 1833 dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev));
1834 1834
1835 if (device_add(&sdkp->dev)) 1835 if (device_add(&sdkp->dev))
1836 goto out_free_index; 1836 goto out_free_index;
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 3ebb1f289490..184dff492797 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -142,7 +142,7 @@ static int sd_dif_type1_verify_ip(struct blk_integrity_exchg *bix)
142static void sd_dif_type1_set_tag(void *prot, void *tag_buf, unsigned int sectors) 142static void sd_dif_type1_set_tag(void *prot, void *tag_buf, unsigned int sectors)
143{ 143{
144 struct sd_dif_tuple *sdt = prot; 144 struct sd_dif_tuple *sdt = prot;
145 char *tag = tag_buf; 145 u8 *tag = tag_buf;
146 unsigned int i, j; 146 unsigned int i, j;
147 147
148 for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) { 148 for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
@@ -154,7 +154,7 @@ static void sd_dif_type1_set_tag(void *prot, void *tag_buf, unsigned int sectors
154static void sd_dif_type1_get_tag(void *prot, void *tag_buf, unsigned int sectors) 154static void sd_dif_type1_get_tag(void *prot, void *tag_buf, unsigned int sectors)
155{ 155{
156 struct sd_dif_tuple *sdt = prot; 156 struct sd_dif_tuple *sdt = prot;
157 char *tag = tag_buf; 157 u8 *tag = tag_buf;
158 unsigned int i, j; 158 unsigned int i, j;
159 159
160 for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) { 160 for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
@@ -256,7 +256,7 @@ static int sd_dif_type3_verify_ip(struct blk_integrity_exchg *bix)
256static void sd_dif_type3_set_tag(void *prot, void *tag_buf, unsigned int sectors) 256static void sd_dif_type3_set_tag(void *prot, void *tag_buf, unsigned int sectors)
257{ 257{
258 struct sd_dif_tuple *sdt = prot; 258 struct sd_dif_tuple *sdt = prot;
259 char *tag = tag_buf; 259 u8 *tag = tag_buf;
260 unsigned int i, j; 260 unsigned int i, j;
261 261
262 for (i = 0, j = 0 ; i < sectors ; i++, j += 6, sdt++) { 262 for (i = 0, j = 0 ; i < sectors ; i++, j += 6, sdt++) {
@@ -269,7 +269,7 @@ static void sd_dif_type3_set_tag(void *prot, void *tag_buf, unsigned int sectors
269static void sd_dif_type3_get_tag(void *prot, void *tag_buf, unsigned int sectors) 269static void sd_dif_type3_get_tag(void *prot, void *tag_buf, unsigned int sectors)
270{ 270{
271 struct sd_dif_tuple *sdt = prot; 271 struct sd_dif_tuple *sdt = prot;
272 char *tag = tag_buf; 272 u8 *tag = tag_buf;
273 unsigned int i, j; 273 unsigned int i, j;
274 274
275 for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) { 275 for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
@@ -374,7 +374,10 @@ void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix, unsig
374 else 374 else
375 csum_convert = 0; 375 csum_convert = 0;
376 376
377 BUG_ON(dif && (scmd->cmnd[0] == READ_6 || scmd->cmnd[0] == WRITE_6));
378
377 switch (scmd->cmnd[0]) { 379 switch (scmd->cmnd[0]) {
380 case READ_6:
378 case READ_10: 381 case READ_10:
379 case READ_12: 382 case READ_12:
380 case READ_16: 383 case READ_16:
@@ -390,6 +393,7 @@ void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix, unsig
390 393
391 break; 394 break;
392 395
396 case WRITE_6:
393 case WRITE_10: 397 case WRITE_10:
394 case WRITE_12: 398 case WRITE_12:
395 case WRITE_16: 399 case WRITE_16:
@@ -475,8 +479,9 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
475 479
476error: 480error:
477 kunmap_atomic(sdt, KM_USER0); 481 kunmap_atomic(sdt, KM_USER0);
478 sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u\n", 482 sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u, app %4x\n",
479 __func__, virt, phys, be32_to_cpu(sdt->ref_tag)); 483 __func__, virt, phys, be32_to_cpu(sdt->ref_tag),
484 be16_to_cpu(sdt->app_tag));
480 485
481 return -EIO; 486 return -EIO;
482} 487}
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 7f0df29f3a64..e946e05db7f7 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -526,7 +526,7 @@ static int ses_intf_add(struct device *cdev,
526 if (!scomp) 526 if (!scomp)
527 goto err_free; 527 goto err_free;
528 528
529 edev = enclosure_register(cdev->parent, sdev->sdev_gendev.bus_id, 529 edev = enclosure_register(cdev->parent, dev_name(&sdev->sdev_gendev),
530 components, &ses_enclosure_callbacks); 530 components, &ses_enclosure_callbacks);
531 if (IS_ERR(edev)) { 531 if (IS_ERR(edev)) {
532 err = PTR_ERR(edev); 532 err = PTR_ERR(edev);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 5103855242ae..8f0bd3f7a59f 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1669,6 +1669,8 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd)
1669 md->pages = req_schp->pages; 1669 md->pages = req_schp->pages;
1670 md->page_order = req_schp->page_order; 1670 md->page_order = req_schp->page_order;
1671 md->nr_entries = req_schp->k_use_sg; 1671 md->nr_entries = req_schp->k_use_sg;
1672 md->offset = 0;
1673 md->null_mapped = hp->dxferp ? 0 : 1;
1672 } 1674 }
1673 1675
1674 if (iov_count) 1676 if (iov_count)
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 31fe6051c799..0807b260268b 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -297,7 +297,7 @@ out:
297 return err; 297 return err;
298} 298}
299 299
300static void __exit sgiwd93_remove(struct platform_device *pdev) 300static int __exit sgiwd93_remove(struct platform_device *pdev)
301{ 301{
302 struct Scsi_Host *host = platform_get_drvdata(pdev); 302 struct Scsi_Host *host = platform_get_drvdata(pdev);
303 struct ip22_hostdata *hdata = (struct ip22_hostdata *) host->hostdata; 303 struct ip22_hostdata *hdata = (struct ip22_hostdata *) host->hostdata;
@@ -307,6 +307,7 @@ static void __exit sgiwd93_remove(struct platform_device *pdev)
307 free_irq(pd->irq, host); 307 free_irq(pd->irq, host);
308 dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma); 308 dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma);
309 scsi_host_put(host); 309 scsi_host_put(host);
310 return 0;
310} 311}
311 312
312static struct platform_driver sgiwd93_driver = { 313static struct platform_driver sgiwd93_driver = {
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index d63d229e2323..6dc8b846c112 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -102,7 +102,7 @@ sim710_probe_common(struct device *dev, unsigned long base_addr,
102 struct NCR_700_Host_Parameters *hostdata = 102 struct NCR_700_Host_Parameters *hostdata =
103 kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL); 103 kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL);
104 104
105 printk(KERN_NOTICE "sim710: %s\n", dev->bus_id); 105 printk(KERN_NOTICE "sim710: %s\n", dev_name(dev));
106 printk(KERN_NOTICE "sim710: irq = %d, clock = %d, base = 0x%lx, scsi_id = %d\n", 106 printk(KERN_NOTICE "sim710: irq = %d, clock = %d, base = 0x%lx, scsi_id = %d\n",
107 irq, clock, base_addr, scsi_id); 107 irq, clock, base_addr, scsi_id);
108 108
@@ -305,7 +305,7 @@ sim710_eisa_probe(struct device *dev)
305 scsi_id = ffs(val) - 1; 305 scsi_id = ffs(val) - 1;
306 306
307 if(scsi_id > 7 || (val & ~(1<<scsi_id)) != 0) { 307 if(scsi_id > 7 || (val & ~(1<<scsi_id)) != 0) {
308 printk(KERN_ERR "sim710.c, EISA card %s has incorrect scsi_id, setting to 7\n", dev->bus_id); 308 printk(KERN_ERR "sim710.c, EISA card %s has incorrect scsi_id, setting to 7\n", dev_name(dev));
309 scsi_id = 7; 309 scsi_id = 7;
310 } 310 }
311 } else { 311 } else {
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index 2bbef4c45a0d..77f0b2cdaa94 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -78,8 +78,7 @@ static int __init snirm710_probe(struct platform_device *dev)
78 base = res->start; 78 base = res->start;
79 hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL); 79 hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
80 if (!hostdata) { 80 if (!hostdata) {
81 printk(KERN_ERR "%s: Failed to allocate host data\n", 81 dev_printk(KERN_ERR, dev, "Failed to allocate host data\n");
82 dev->dev.bus_id);
83 return -ENOMEM; 82 return -ENOMEM;
84 } 83 }
85 84
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 7f3f317ee6ca..c6f19ee8f2cb 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -17,7 +17,7 @@
17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support 17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
18 */ 18 */
19 19
20static const char *verstr = "20080504"; 20static const char *verstr = "20081215";
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23 23
@@ -182,18 +182,16 @@ static struct scsi_tape **scsi_tapes = NULL;
182 182
183static int modes_defined; 183static int modes_defined;
184 184
185static struct st_buffer *new_tape_buffer(int, int, int);
186static int enlarge_buffer(struct st_buffer *, int, int); 185static int enlarge_buffer(struct st_buffer *, int, int);
187static void clear_buffer(struct st_buffer *); 186static void clear_buffer(struct st_buffer *);
188static void normalize_buffer(struct st_buffer *); 187static void normalize_buffer(struct st_buffer *);
189static int append_to_buffer(const char __user *, struct st_buffer *, int); 188static int append_to_buffer(const char __user *, struct st_buffer *, int);
190static int from_buffer(struct st_buffer *, char __user *, int); 189static int from_buffer(struct st_buffer *, char __user *, int);
191static void move_buffer_data(struct st_buffer *, int); 190static void move_buffer_data(struct st_buffer *, int);
192static void buf_to_sg(struct st_buffer *, unsigned int);
193 191
194static int sgl_map_user_pages(struct scatterlist *, const unsigned int, 192static int sgl_map_user_pages(struct st_buffer *, const unsigned int,
195 unsigned long, size_t, int); 193 unsigned long, size_t, int);
196static int sgl_unmap_user_pages(struct scatterlist *, const unsigned int, int); 194static int sgl_unmap_user_pages(struct st_buffer *, const unsigned int, int);
197 195
198static int st_probe(struct device *); 196static int st_probe(struct device *);
199static int st_remove(struct device *); 197static int st_remove(struct device *);
@@ -435,22 +433,6 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
435 return (-EIO); 433 return (-EIO);
436} 434}
437 435
438
439/* Wakeup from interrupt */
440static void st_sleep_done(void *data, char *sense, int result, int resid)
441{
442 struct st_request *SRpnt = data;
443 struct scsi_tape *STp = SRpnt->stp;
444
445 memcpy(SRpnt->sense, sense, SCSI_SENSE_BUFFERSIZE);
446 (STp->buffer)->cmdstat.midlevel_result = SRpnt->result = result;
447 (STp->buffer)->cmdstat.residual = resid;
448 DEB( STp->write_pending = 0; )
449
450 if (SRpnt->waiting)
451 complete(SRpnt->waiting);
452}
453
454static struct st_request *st_allocate_request(struct scsi_tape *stp) 436static struct st_request *st_allocate_request(struct scsi_tape *stp)
455{ 437{
456 struct st_request *streq; 438 struct st_request *streq;
@@ -475,6 +457,63 @@ static void st_release_request(struct st_request *streq)
475 kfree(streq); 457 kfree(streq);
476} 458}
477 459
460static void st_scsi_execute_end(struct request *req, int uptodate)
461{
462 struct st_request *SRpnt = req->end_io_data;
463 struct scsi_tape *STp = SRpnt->stp;
464
465 STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
466 STp->buffer->cmdstat.residual = req->data_len;
467
468 if (SRpnt->waiting)
469 complete(SRpnt->waiting);
470
471 blk_rq_unmap_user(SRpnt->bio);
472 __blk_put_request(req->q, req);
473}
474
475static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
476 int data_direction, void *buffer, unsigned bufflen,
477 int timeout, int retries)
478{
479 struct request *req;
480 struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
481 int err = 0;
482 int write = (data_direction == DMA_TO_DEVICE);
483
484 req = blk_get_request(SRpnt->stp->device->request_queue, write,
485 GFP_KERNEL);
486 if (!req)
487 return DRIVER_ERROR << 24;
488
489 req->cmd_type = REQ_TYPE_BLOCK_PC;
490 req->cmd_flags |= REQ_QUIET;
491
492 mdata->null_mapped = 1;
493
494 if (bufflen) {
495 err = blk_rq_map_user(req->q, req, mdata, NULL, bufflen,
496 GFP_KERNEL);
497 if (err) {
498 blk_put_request(req);
499 return DRIVER_ERROR << 24;
500 }
501 }
502
503 SRpnt->bio = req->bio;
504 req->cmd_len = COMMAND_SIZE(cmd[0]);
505 memset(req->cmd, 0, BLK_MAX_CDB);
506 memcpy(req->cmd, cmd, req->cmd_len);
507 req->sense = SRpnt->sense;
508 req->sense_len = 0;
509 req->timeout = timeout;
510 req->retries = retries;
511 req->end_io_data = SRpnt;
512
513 blk_execute_rq_nowait(req->q, NULL, req, 1, st_scsi_execute_end);
514 return 0;
515}
516
478/* Do the scsi command. Waits until command performed if do_wait is true. 517/* Do the scsi command. Waits until command performed if do_wait is true.
479 Otherwise write_behind_check() is used to check that the command 518 Otherwise write_behind_check() is used to check that the command
480 has finished. */ 519 has finished. */
@@ -483,6 +522,8 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
483 int bytes, int direction, int timeout, int retries, int do_wait) 522 int bytes, int direction, int timeout, int retries, int do_wait)
484{ 523{
485 struct completion *waiting; 524 struct completion *waiting;
525 struct rq_map_data *mdata = &STp->buffer->map_data;
526 int ret;
486 527
487 /* if async, make sure there's no command outstanding */ 528 /* if async, make sure there's no command outstanding */
488 if (!do_wait && ((STp->buffer)->last_SRpnt)) { 529 if (!do_wait && ((STp->buffer)->last_SRpnt)) {
@@ -510,21 +551,27 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
510 init_completion(waiting); 551 init_completion(waiting);
511 SRpnt->waiting = waiting; 552 SRpnt->waiting = waiting;
512 553
513 if (!STp->buffer->do_dio) 554 if (STp->buffer->do_dio) {
514 buf_to_sg(STp->buffer, bytes); 555 mdata->nr_entries = STp->buffer->sg_segs;
556 mdata->pages = STp->buffer->mapped_pages;
557 } else {
558 mdata->nr_entries =
559 DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order);
560 STp->buffer->map_data.pages = STp->buffer->reserved_pages;
561 STp->buffer->map_data.offset = 0;
562 }
515 563
516 memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd)); 564 memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd));
517 STp->buffer->cmdstat.have_sense = 0; 565 STp->buffer->cmdstat.have_sense = 0;
518 STp->buffer->syscall_result = 0; 566 STp->buffer->syscall_result = 0;
519 567
520 if (scsi_execute_async(STp->device, cmd, COMMAND_SIZE(cmd[0]), direction, 568 ret = st_scsi_execute(SRpnt, cmd, direction, NULL, bytes, timeout,
521 &((STp->buffer)->sg[0]), bytes, (STp->buffer)->sg_segs, 569 retries);
522 timeout, retries, SRpnt, st_sleep_done, GFP_KERNEL)) { 570 if (ret) {
523 /* could not allocate the buffer or request was too large */ 571 /* could not allocate the buffer or request was too large */
524 (STp->buffer)->syscall_result = (-EBUSY); 572 (STp->buffer)->syscall_result = (-EBUSY);
525 (STp->buffer)->last_SRpnt = NULL; 573 (STp->buffer)->last_SRpnt = NULL;
526 } 574 } else if (do_wait) {
527 else if (do_wait) {
528 wait_for_completion(waiting); 575 wait_for_completion(waiting);
529 SRpnt->waiting = NULL; 576 SRpnt->waiting = NULL;
530 (STp->buffer)->syscall_result = st_chk_result(STp, SRpnt); 577 (STp->buffer)->syscall_result = st_chk_result(STp, SRpnt);
@@ -533,28 +580,6 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
533 return SRpnt; 580 return SRpnt;
534} 581}
535 582
536static int st_scsi_kern_execute(struct st_request *streq,
537 const unsigned char *cmd, int data_direction,
538 void *buffer, unsigned bufflen, int timeout,
539 int retries)
540{
541 struct scsi_tape *stp = streq->stp;
542 int ret, resid;
543
544 stp->buffer->cmdstat.have_sense = 0;
545 memcpy(streq->cmd, cmd, sizeof(streq->cmd));
546
547 ret = scsi_execute(stp->device, cmd, data_direction, buffer, bufflen,
548 streq->sense, timeout, retries, 0, &resid);
549 if (driver_byte(ret) & DRIVER_ERROR)
550 return -EBUSY;
551
552 stp->buffer->cmdstat.midlevel_result = streq->result = ret;
553 stp->buffer->cmdstat.residual = resid;
554 stp->buffer->syscall_result = st_chk_result(stp, streq);
555
556 return 0;
557}
558 583
559/* Handle the write-behind checking (waits for completion). Returns -ENOSPC if 584/* Handle the write-behind checking (waits for completion). Returns -ENOSPC if
560 write has been correct but EOM early warning reached, -EIO if write ended in 585 write has been correct but EOM early warning reached, -EIO if write ended in
@@ -627,7 +652,6 @@ static int cross_eof(struct scsi_tape * STp, int forward)
627{ 652{
628 struct st_request *SRpnt; 653 struct st_request *SRpnt;
629 unsigned char cmd[MAX_COMMAND_SIZE]; 654 unsigned char cmd[MAX_COMMAND_SIZE];
630 int ret;
631 655
632 cmd[0] = SPACE; 656 cmd[0] = SPACE;
633 cmd[1] = 0x01; /* Space FileMarks */ 657 cmd[1] = 0x01; /* Space FileMarks */
@@ -641,26 +665,20 @@ static int cross_eof(struct scsi_tape * STp, int forward)
641 DEBC(printk(ST_DEB_MSG "%s: Stepping over filemark %s.\n", 665 DEBC(printk(ST_DEB_MSG "%s: Stepping over filemark %s.\n",
642 tape_name(STp), forward ? "forward" : "backward")); 666 tape_name(STp), forward ? "forward" : "backward"));
643 667
644 SRpnt = st_allocate_request(STp); 668 SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
669 STp->device->request_queue->rq_timeout,
670 MAX_RETRIES, 1);
645 if (!SRpnt) 671 if (!SRpnt)
646 return STp->buffer->syscall_result; 672 return (STp->buffer)->syscall_result;
647
648 ret = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0,
649 STp->device->request_queue->rq_timeout,
650 MAX_RETRIES);
651 if (ret)
652 goto out;
653 673
654 ret = STp->buffer->syscall_result; 674 st_release_request(SRpnt);
675 SRpnt = NULL;
655 676
656 if ((STp->buffer)->cmdstat.midlevel_result != 0) 677 if ((STp->buffer)->cmdstat.midlevel_result != 0)
657 printk(KERN_ERR "%s: Stepping over filemark %s failed.\n", 678 printk(KERN_ERR "%s: Stepping over filemark %s failed.\n",
658 tape_name(STp), forward ? "forward" : "backward"); 679 tape_name(STp), forward ? "forward" : "backward");
659 680
660out: 681 return (STp->buffer)->syscall_result;
661 st_release_request(SRpnt);
662
663 return ret;
664} 682}
665 683
666 684
@@ -881,24 +899,21 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
881 int attentions, waits, max_wait, scode; 899 int attentions, waits, max_wait, scode;
882 int retval = CHKRES_READY, new_session = 0; 900 int retval = CHKRES_READY, new_session = 0;
883 unsigned char cmd[MAX_COMMAND_SIZE]; 901 unsigned char cmd[MAX_COMMAND_SIZE];
884 struct st_request *SRpnt; 902 struct st_request *SRpnt = NULL;
885 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; 903 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
886 904
887 SRpnt = st_allocate_request(STp);
888 if (!SRpnt)
889 return STp->buffer->syscall_result;
890
891 max_wait = do_wait ? ST_BLOCK_SECONDS : 0; 905 max_wait = do_wait ? ST_BLOCK_SECONDS : 0;
892 906
893 for (attentions=waits=0; ; ) { 907 for (attentions=waits=0; ; ) {
894 memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE); 908 memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE);
895 cmd[0] = TEST_UNIT_READY; 909 cmd[0] = TEST_UNIT_READY;
910 SRpnt = st_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
911 STp->long_timeout, MAX_READY_RETRIES, 1);
896 912
897 retval = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0, 913 if (!SRpnt) {
898 STp->long_timeout, 914 retval = (STp->buffer)->syscall_result;
899 MAX_READY_RETRIES);
900 if (retval)
901 break; 915 break;
916 }
902 917
903 if (cmdstatp->have_sense) { 918 if (cmdstatp->have_sense) {
904 919
@@ -942,8 +957,8 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
942 break; 957 break;
943 } 958 }
944 959
945 st_release_request(SRpnt); 960 if (SRpnt != NULL)
946 961 st_release_request(SRpnt);
947 return retval; 962 return retval;
948} 963}
949 964
@@ -1020,24 +1035,17 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
1020 } 1035 }
1021 } 1036 }
1022 1037
1023 SRpnt = st_allocate_request(STp);
1024 if (!SRpnt) {
1025 retval = STp->buffer->syscall_result;
1026 goto err_out;
1027 }
1028
1029 if (STp->omit_blklims) 1038 if (STp->omit_blklims)
1030 STp->min_block = STp->max_block = (-1); 1039 STp->min_block = STp->max_block = (-1);
1031 else { 1040 else {
1032 memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE); 1041 memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE);
1033 cmd[0] = READ_BLOCK_LIMITS; 1042 cmd[0] = READ_BLOCK_LIMITS;
1034 1043
1035 retval = st_scsi_kern_execute(SRpnt, cmd, DMA_FROM_DEVICE, 1044 SRpnt = st_do_scsi(SRpnt, STp, cmd, 6, DMA_FROM_DEVICE,
1036 STp->buffer->b_data, 6, 1045 STp->device->request_queue->rq_timeout,
1037 STp->device->request_queue->rq_timeout, 1046 MAX_READY_RETRIES, 1);
1038 MAX_READY_RETRIES); 1047 if (!SRpnt) {
1039 if (retval) { 1048 retval = (STp->buffer)->syscall_result;
1040 st_release_request(SRpnt);
1041 goto err_out; 1049 goto err_out;
1042 } 1050 }
1043 1051
@@ -1061,12 +1069,11 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
1061 cmd[0] = MODE_SENSE; 1069 cmd[0] = MODE_SENSE;
1062 cmd[4] = 12; 1070 cmd[4] = 12;
1063 1071
1064 retval = st_scsi_kern_execute(SRpnt, cmd, DMA_FROM_DEVICE, 1072 SRpnt = st_do_scsi(SRpnt, STp, cmd, 12, DMA_FROM_DEVICE,
1065 STp->buffer->b_data, 12, 1073 STp->device->request_queue->rq_timeout,
1066 STp->device->request_queue->rq_timeout, 1074 MAX_READY_RETRIES, 1);
1067 MAX_READY_RETRIES); 1075 if (!SRpnt) {
1068 if (retval) { 1076 retval = (STp->buffer)->syscall_result;
1069 st_release_request(SRpnt);
1070 goto err_out; 1077 goto err_out;
1071 } 1078 }
1072 1079
@@ -1296,17 +1303,11 @@ static int st_flush(struct file *filp, fl_owner_t id)
1296 cmd[0] = WRITE_FILEMARKS; 1303 cmd[0] = WRITE_FILEMARKS;
1297 cmd[4] = 1 + STp->two_fm; 1304 cmd[4] = 1 + STp->two_fm;
1298 1305
1299 SRpnt = st_allocate_request(STp); 1306 SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
1307 STp->device->request_queue->rq_timeout,
1308 MAX_WRITE_RETRIES, 1);
1300 if (!SRpnt) { 1309 if (!SRpnt) {
1301 result = STp->buffer->syscall_result; 1310 result = (STp->buffer)->syscall_result;
1302 goto out;
1303 }
1304
1305 result = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0,
1306 STp->device->request_queue->rq_timeout,
1307 MAX_WRITE_RETRIES);
1308 if (result) {
1309 st_release_request(SRpnt);
1310 goto out; 1311 goto out;
1311 } 1312 }
1312 1313
@@ -1471,8 +1472,8 @@ static int setup_buffering(struct scsi_tape *STp, const char __user *buf,
1471 1472
1472 if (i && ((unsigned long)buf & queue_dma_alignment( 1473 if (i && ((unsigned long)buf & queue_dma_alignment(
1473 STp->device->request_queue)) == 0) { 1474 STp->device->request_queue)) == 0) {
1474 i = sgl_map_user_pages(&(STbp->sg[0]), STbp->use_sg, 1475 i = sgl_map_user_pages(STbp, STbp->use_sg, (unsigned long)buf,
1475 (unsigned long)buf, count, (is_read ? READ : WRITE)); 1476 count, (is_read ? READ : WRITE));
1476 if (i > 0) { 1477 if (i > 0) {
1477 STbp->do_dio = i; 1478 STbp->do_dio = i;
1478 STbp->buffer_bytes = 0; /* can be used as transfer counter */ 1479 STbp->buffer_bytes = 0; /* can be used as transfer counter */
@@ -1480,7 +1481,6 @@ static int setup_buffering(struct scsi_tape *STp, const char __user *buf,
1480 else 1481 else
1481 STbp->do_dio = 0; /* fall back to buffering with any error */ 1482 STbp->do_dio = 0; /* fall back to buffering with any error */
1482 STbp->sg_segs = STbp->do_dio; 1483 STbp->sg_segs = STbp->do_dio;
1483 STbp->frp_sg_current = 0;
1484 DEB( 1484 DEB(
1485 if (STbp->do_dio) { 1485 if (STbp->do_dio) {
1486 STp->nbr_dio++; 1486 STp->nbr_dio++;
@@ -1526,7 +1526,7 @@ static void release_buffering(struct scsi_tape *STp, int is_read)
1526 1526
1527 STbp = STp->buffer; 1527 STbp = STp->buffer;
1528 if (STbp->do_dio) { 1528 if (STbp->do_dio) {
1529 sgl_unmap_user_pages(&(STbp->sg[0]), STbp->do_dio, is_read); 1529 sgl_unmap_user_pages(STbp, STbp->do_dio, is_read);
1530 STbp->do_dio = 0; 1530 STbp->do_dio = 0;
1531 STbp->sg_segs = 0; 1531 STbp->sg_segs = 0;
1532 } 1532 }
@@ -2372,7 +2372,6 @@ static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
2372{ 2372{
2373 unsigned char cmd[MAX_COMMAND_SIZE]; 2373 unsigned char cmd[MAX_COMMAND_SIZE];
2374 struct st_request *SRpnt; 2374 struct st_request *SRpnt;
2375 int ret;
2376 2375
2377 memset(cmd, 0, MAX_COMMAND_SIZE); 2376 memset(cmd, 0, MAX_COMMAND_SIZE);
2378 cmd[0] = MODE_SENSE; 2377 cmd[0] = MODE_SENSE;
@@ -2381,17 +2380,14 @@ static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
2381 cmd[2] = page; 2380 cmd[2] = page;
2382 cmd[4] = 255; 2381 cmd[4] = 255;
2383 2382
2384 SRpnt = st_allocate_request(STp); 2383 SRpnt = st_do_scsi(NULL, STp, cmd, cmd[4], DMA_FROM_DEVICE,
2385 if (!SRpnt) 2384 STp->device->request_queue->rq_timeout, 0, 1);
2386 return STp->buffer->syscall_result; 2385 if (SRpnt == NULL)
2386 return (STp->buffer)->syscall_result;
2387 2387
2388 ret = st_scsi_kern_execute(SRpnt, cmd, DMA_FROM_DEVICE,
2389 STp->buffer->b_data, cmd[4],
2390 STp->device->request_queue->rq_timeout,
2391 MAX_RETRIES);
2392 st_release_request(SRpnt); 2388 st_release_request(SRpnt);
2393 2389
2394 return ret ? : STp->buffer->syscall_result; 2390 return STp->buffer->syscall_result;
2395} 2391}
2396 2392
2397 2393
@@ -2399,9 +2395,10 @@ static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
2399 in the buffer is correctly formatted. The long timeout is used if slow is non-zero. */ 2395 in the buffer is correctly formatted. The long timeout is used if slow is non-zero. */
2400static int write_mode_page(struct scsi_tape *STp, int page, int slow) 2396static int write_mode_page(struct scsi_tape *STp, int page, int slow)
2401{ 2397{
2402 int pgo, timeout, ret = 0; 2398 int pgo;
2403 unsigned char cmd[MAX_COMMAND_SIZE]; 2399 unsigned char cmd[MAX_COMMAND_SIZE];
2404 struct st_request *SRpnt; 2400 struct st_request *SRpnt;
2401 int timeout;
2405 2402
2406 memset(cmd, 0, MAX_COMMAND_SIZE); 2403 memset(cmd, 0, MAX_COMMAND_SIZE);
2407 cmd[0] = MODE_SELECT; 2404 cmd[0] = MODE_SELECT;
@@ -2415,21 +2412,16 @@ static int write_mode_page(struct scsi_tape *STp, int page, int slow)
2415 (STp->buffer)->b_data[MH_OFF_DEV_SPECIFIC] &= ~MH_BIT_WP; 2412 (STp->buffer)->b_data[MH_OFF_DEV_SPECIFIC] &= ~MH_BIT_WP;
2416 (STp->buffer)->b_data[pgo + MP_OFF_PAGE_NBR] &= MP_MSK_PAGE_NBR; 2413 (STp->buffer)->b_data[pgo + MP_OFF_PAGE_NBR] &= MP_MSK_PAGE_NBR;
2417 2414
2418 SRpnt = st_allocate_request(STp); 2415 timeout = slow ?
2419 if (!SRpnt) 2416 STp->long_timeout : STp->device->request_queue->rq_timeout;
2420 return ret; 2417 SRpnt = st_do_scsi(NULL, STp, cmd, cmd[4], DMA_TO_DEVICE,
2421 2418 timeout, 0, 1);
2422 timeout = slow ? STp->long_timeout : 2419 if (SRpnt == NULL)
2423 STp->device->request_queue->rq_timeout; 2420 return (STp->buffer)->syscall_result;
2424
2425 ret = st_scsi_kern_execute(SRpnt, cmd, DMA_TO_DEVICE,
2426 STp->buffer->b_data, cmd[4], timeout, 0);
2427 if (!ret)
2428 ret = STp->buffer->syscall_result;
2429 2421
2430 st_release_request(SRpnt); 2422 st_release_request(SRpnt);
2431 2423
2432 return ret; 2424 return STp->buffer->syscall_result;
2433} 2425}
2434 2426
2435 2427
@@ -2547,16 +2539,13 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod
2547 printk(ST_DEB_MSG "%s: Loading tape.\n", name); 2539 printk(ST_DEB_MSG "%s: Loading tape.\n", name);
2548 ); 2540 );
2549 2541
2550 SRpnt = st_allocate_request(STp); 2542 SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
2543 timeout, MAX_RETRIES, 1);
2551 if (!SRpnt) 2544 if (!SRpnt)
2552 return STp->buffer->syscall_result; 2545 return (STp->buffer)->syscall_result;
2553
2554 retval = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0, timeout,
2555 MAX_RETRIES);
2556 if (retval)
2557 goto out;
2558 2546
2559 retval = (STp->buffer)->syscall_result; 2547 retval = (STp->buffer)->syscall_result;
2548 st_release_request(SRpnt);
2560 2549
2561 if (!retval) { /* SCSI command successful */ 2550 if (!retval) { /* SCSI command successful */
2562 2551
@@ -2575,8 +2564,6 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod
2575 STps = &(STp->ps[STp->partition]); 2564 STps = &(STp->ps[STp->partition]);
2576 STps->drv_file = STps->drv_block = (-1); 2565 STps->drv_file = STps->drv_block = (-1);
2577 } 2566 }
2578out:
2579 st_release_request(SRpnt);
2580 2567
2581 return retval; 2568 return retval;
2582} 2569}
@@ -2852,15 +2839,12 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2852 return (-ENOSYS); 2839 return (-ENOSYS);
2853 } 2840 }
2854 2841
2855 SRpnt = st_allocate_request(STp); 2842 SRpnt = st_do_scsi(NULL, STp, cmd, datalen, direction,
2843 timeout, MAX_RETRIES, 1);
2856 if (!SRpnt) 2844 if (!SRpnt)
2857 return (STp->buffer)->syscall_result; 2845 return (STp->buffer)->syscall_result;
2858 2846
2859 ioctl_result = st_scsi_kern_execute(SRpnt, cmd, direction, 2847 ioctl_result = (STp->buffer)->syscall_result;
2860 STp->buffer->b_data, datalen,
2861 timeout, MAX_RETRIES);
2862 if (!ioctl_result)
2863 ioctl_result = (STp->buffer)->syscall_result;
2864 2848
2865 if (!ioctl_result) { /* SCSI command successful */ 2849 if (!ioctl_result) { /* SCSI command successful */
2866 st_release_request(SRpnt); 2850 st_release_request(SRpnt);
@@ -3022,17 +3006,11 @@ static int get_location(struct scsi_tape *STp, unsigned int *block, int *partiti
3022 if (!logical && !STp->scsi2_logical) 3006 if (!logical && !STp->scsi2_logical)
3023 scmd[1] = 1; 3007 scmd[1] = 1;
3024 } 3008 }
3025 3009 SRpnt = st_do_scsi(NULL, STp, scmd, 20, DMA_FROM_DEVICE,
3026 SRpnt = st_allocate_request(STp); 3010 STp->device->request_queue->rq_timeout,
3011 MAX_READY_RETRIES, 1);
3027 if (!SRpnt) 3012 if (!SRpnt)
3028 return STp->buffer->syscall_result; 3013 return (STp->buffer)->syscall_result;
3029
3030 result = st_scsi_kern_execute(SRpnt, scmd, DMA_FROM_DEVICE,
3031 STp->buffer->b_data, 20,
3032 STp->device->request_queue->rq_timeout,
3033 MAX_READY_RETRIES);
3034 if (result)
3035 goto out;
3036 3014
3037 if ((STp->buffer)->syscall_result != 0 || 3015 if ((STp->buffer)->syscall_result != 0 ||
3038 (STp->device->scsi_level >= SCSI_2 && 3016 (STp->device->scsi_level >= SCSI_2 &&
@@ -3060,7 +3038,6 @@ static int get_location(struct scsi_tape *STp, unsigned int *block, int *partiti
3060 DEBC(printk(ST_DEB_MSG "%s: Got tape pos. blk %d part %d.\n", name, 3038 DEBC(printk(ST_DEB_MSG "%s: Got tape pos. blk %d part %d.\n", name,
3061 *block, *partition)); 3039 *block, *partition));
3062 } 3040 }
3063out:
3064 st_release_request(SRpnt); 3041 st_release_request(SRpnt);
3065 SRpnt = NULL; 3042 SRpnt = NULL;
3066 3043
@@ -3135,14 +3112,10 @@ static int set_location(struct scsi_tape *STp, unsigned int block, int partition
3135 timeout = STp->device->request_queue->rq_timeout; 3112 timeout = STp->device->request_queue->rq_timeout;
3136 } 3113 }
3137 3114
3138 SRpnt = st_allocate_request(STp); 3115 SRpnt = st_do_scsi(NULL, STp, scmd, 0, DMA_NONE,
3116 timeout, MAX_READY_RETRIES, 1);
3139 if (!SRpnt) 3117 if (!SRpnt)
3140 return STp->buffer->syscall_result; 3118 return (STp->buffer)->syscall_result;
3141
3142 result = st_scsi_kern_execute(SRpnt, scmd, DMA_NONE, NULL, 0,
3143 timeout, MAX_READY_RETRIES);
3144 if (result)
3145 goto out;
3146 3119
3147 STps->drv_block = STps->drv_file = (-1); 3120 STps->drv_block = STps->drv_file = (-1);
3148 STps->eof = ST_NOEOF; 3121 STps->eof = ST_NOEOF;
@@ -3167,7 +3140,7 @@ static int set_location(struct scsi_tape *STp, unsigned int block, int partition
3167 STps->drv_block = STps->drv_file = 0; 3140 STps->drv_block = STps->drv_file = 0;
3168 result = 0; 3141 result = 0;
3169 } 3142 }
3170out: 3143
3171 st_release_request(SRpnt); 3144 st_release_request(SRpnt);
3172 SRpnt = NULL; 3145 SRpnt = NULL;
3173 3146
@@ -3696,38 +3669,34 @@ static long st_compat_ioctl(struct file *file, unsigned int cmd, unsigned long a
3696 3669
3697/* Try to allocate a new tape buffer. Calling function must not hold 3670/* Try to allocate a new tape buffer. Calling function must not hold
3698 dev_arr_lock. */ 3671 dev_arr_lock. */
3699static struct st_buffer * 3672static struct st_buffer *new_tape_buffer(int need_dma, int max_sg)
3700 new_tape_buffer(int from_initialization, int need_dma, int max_sg)
3701{ 3673{
3702 int i, got = 0;
3703 gfp_t priority;
3704 struct st_buffer *tb; 3674 struct st_buffer *tb;
3705 3675
3706 if (from_initialization) 3676 tb = kzalloc(sizeof(struct st_buffer), GFP_ATOMIC);
3707 priority = GFP_ATOMIC;
3708 else
3709 priority = GFP_KERNEL;
3710
3711 i = sizeof(struct st_buffer) + (max_sg - 1) * sizeof(struct scatterlist) +
3712 max_sg * sizeof(struct st_buf_fragment);
3713 tb = kzalloc(i, priority);
3714 if (!tb) { 3677 if (!tb) {
3715 printk(KERN_NOTICE "st: Can't allocate new tape buffer.\n"); 3678 printk(KERN_NOTICE "st: Can't allocate new tape buffer.\n");
3716 return NULL; 3679 return NULL;
3717 } 3680 }
3718 tb->frp_segs = tb->orig_frp_segs = 0; 3681 tb->frp_segs = 0;
3719 tb->use_sg = max_sg; 3682 tb->use_sg = max_sg;
3720 tb->frp = (struct st_buf_fragment *)(&(tb->sg[0]) + max_sg);
3721
3722 tb->dma = need_dma; 3683 tb->dma = need_dma;
3723 tb->buffer_size = got; 3684 tb->buffer_size = 0;
3724 sg_init_table(tb->sg, max_sg); 3685
3686 tb->reserved_pages = kzalloc(max_sg * sizeof(struct page *),
3687 GFP_ATOMIC);
3688 if (!tb->reserved_pages) {
3689 kfree(tb);
3690 return NULL;
3691 }
3725 3692
3726 return tb; 3693 return tb;
3727} 3694}
3728 3695
3729 3696
3730/* Try to allocate enough space in the tape buffer */ 3697/* Try to allocate enough space in the tape buffer */
3698#define ST_MAX_ORDER 6
3699
3731static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dma) 3700static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dma)
3732{ 3701{
3733 int segs, nbr, max_segs, b_size, order, got; 3702 int segs, nbr, max_segs, b_size, order, got;
@@ -3747,33 +3716,45 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
3747 priority = GFP_KERNEL | __GFP_NOWARN; 3716 priority = GFP_KERNEL | __GFP_NOWARN;
3748 if (need_dma) 3717 if (need_dma)
3749 priority |= GFP_DMA; 3718 priority |= GFP_DMA;
3750 for (b_size = PAGE_SIZE, order=0; order <= 6 && 3719
3751 b_size < new_size - STbuffer->buffer_size; 3720 if (STbuffer->cleared)
3752 order++, b_size *= 2) 3721 priority |= __GFP_ZERO;
3753 ; /* empty */ 3722
3723 if (STbuffer->frp_segs) {
3724 order = STbuffer->map_data.page_order;
3725 b_size = PAGE_SIZE << order;
3726 } else {
3727 for (b_size = PAGE_SIZE, order = 0;
3728 order < ST_MAX_ORDER && b_size < new_size;
3729 order++, b_size *= 2)
3730 ; /* empty */
3731 }
3732 if (max_segs * (PAGE_SIZE << order) < new_size) {
3733 if (order == ST_MAX_ORDER)
3734 return 0;
3735 normalize_buffer(STbuffer);
3736 return enlarge_buffer(STbuffer, new_size, need_dma);
3737 }
3754 3738
3755 for (segs = STbuffer->frp_segs, got = STbuffer->buffer_size; 3739 for (segs = STbuffer->frp_segs, got = STbuffer->buffer_size;
3756 segs < max_segs && got < new_size;) { 3740 segs < max_segs && got < new_size;) {
3757 STbuffer->frp[segs].page = alloc_pages(priority, order); 3741 struct page *page;
3758 if (STbuffer->frp[segs].page == NULL) { 3742
3759 if (new_size - got <= (max_segs - segs) * b_size / 2) { 3743 page = alloc_pages(priority, order);
3760 b_size /= 2; /* Large enough for the rest of the buffers */ 3744 if (!page) {
3761 order--;
3762 continue;
3763 }
3764 DEB(STbuffer->buffer_size = got); 3745 DEB(STbuffer->buffer_size = got);
3765 normalize_buffer(STbuffer); 3746 normalize_buffer(STbuffer);
3766 return 0; 3747 return 0;
3767 } 3748 }
3768 STbuffer->frp[segs].length = b_size; 3749
3769 STbuffer->frp_segs += 1; 3750 STbuffer->frp_segs += 1;
3770 got += b_size; 3751 got += b_size;
3771 STbuffer->buffer_size = got; 3752 STbuffer->buffer_size = got;
3772 if (STbuffer->cleared) 3753 STbuffer->reserved_pages[segs] = page;
3773 memset(page_address(STbuffer->frp[segs].page), 0, b_size);
3774 segs++; 3754 segs++;
3775 } 3755 }
3776 STbuffer->b_data = page_address(STbuffer->frp[0].page); 3756 STbuffer->b_data = page_address(STbuffer->reserved_pages[0]);
3757 STbuffer->map_data.page_order = order;
3777 3758
3778 return 1; 3759 return 1;
3779} 3760}
@@ -3785,7 +3766,8 @@ static void clear_buffer(struct st_buffer * st_bp)
3785 int i; 3766 int i;
3786 3767
3787 for (i=0; i < st_bp->frp_segs; i++) 3768 for (i=0; i < st_bp->frp_segs; i++)
3788 memset(page_address(st_bp->frp[i].page), 0, st_bp->frp[i].length); 3769 memset(page_address(st_bp->reserved_pages[i]), 0,
3770 PAGE_SIZE << st_bp->map_data.page_order);
3789 st_bp->cleared = 1; 3771 st_bp->cleared = 1;
3790} 3772}
3791 3773
@@ -3793,16 +3775,16 @@ static void clear_buffer(struct st_buffer * st_bp)
3793/* Release the extra buffer */ 3775/* Release the extra buffer */
3794static void normalize_buffer(struct st_buffer * STbuffer) 3776static void normalize_buffer(struct st_buffer * STbuffer)
3795{ 3777{
3796 int i, order; 3778 int i, order = STbuffer->map_data.page_order;
3797 3779
3798 for (i = STbuffer->orig_frp_segs; i < STbuffer->frp_segs; i++) { 3780 for (i = 0; i < STbuffer->frp_segs; i++) {
3799 order = get_order(STbuffer->frp[i].length); 3781 __free_pages(STbuffer->reserved_pages[i], order);
3800 __free_pages(STbuffer->frp[i].page, order); 3782 STbuffer->buffer_size -= (PAGE_SIZE << order);
3801 STbuffer->buffer_size -= STbuffer->frp[i].length;
3802 } 3783 }
3803 STbuffer->frp_segs = STbuffer->orig_frp_segs; 3784 STbuffer->frp_segs = 0;
3804 STbuffer->frp_sg_current = 0;
3805 STbuffer->sg_segs = 0; 3785 STbuffer->sg_segs = 0;
3786 STbuffer->map_data.page_order = 0;
3787 STbuffer->map_data.offset = 0;
3806} 3788}
3807 3789
3808 3790
@@ -3811,18 +3793,19 @@ static void normalize_buffer(struct st_buffer * STbuffer)
3811static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count) 3793static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count)
3812{ 3794{
3813 int i, cnt, res, offset; 3795 int i, cnt, res, offset;
3796 int length = PAGE_SIZE << st_bp->map_data.page_order;
3814 3797
3815 for (i = 0, offset = st_bp->buffer_bytes; 3798 for (i = 0, offset = st_bp->buffer_bytes;
3816 i < st_bp->frp_segs && offset >= st_bp->frp[i].length; i++) 3799 i < st_bp->frp_segs && offset >= length; i++)
3817 offset -= st_bp->frp[i].length; 3800 offset -= length;
3818 if (i == st_bp->frp_segs) { /* Should never happen */ 3801 if (i == st_bp->frp_segs) { /* Should never happen */
3819 printk(KERN_WARNING "st: append_to_buffer offset overflow.\n"); 3802 printk(KERN_WARNING "st: append_to_buffer offset overflow.\n");
3820 return (-EIO); 3803 return (-EIO);
3821 } 3804 }
3822 for (; i < st_bp->frp_segs && do_count > 0; i++) { 3805 for (; i < st_bp->frp_segs && do_count > 0; i++) {
3823 cnt = st_bp->frp[i].length - offset < do_count ? 3806 struct page *page = st_bp->reserved_pages[i];
3824 st_bp->frp[i].length - offset : do_count; 3807 cnt = length - offset < do_count ? length - offset : do_count;
3825 res = copy_from_user(page_address(st_bp->frp[i].page) + offset, ubp, cnt); 3808 res = copy_from_user(page_address(page) + offset, ubp, cnt);
3826 if (res) 3809 if (res)
3827 return (-EFAULT); 3810 return (-EFAULT);
3828 do_count -= cnt; 3811 do_count -= cnt;
@@ -3842,18 +3825,19 @@ static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, in
3842static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count) 3825static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count)
3843{ 3826{
3844 int i, cnt, res, offset; 3827 int i, cnt, res, offset;
3828 int length = PAGE_SIZE << st_bp->map_data.page_order;
3845 3829
3846 for (i = 0, offset = st_bp->read_pointer; 3830 for (i = 0, offset = st_bp->read_pointer;
3847 i < st_bp->frp_segs && offset >= st_bp->frp[i].length; i++) 3831 i < st_bp->frp_segs && offset >= length; i++)
3848 offset -= st_bp->frp[i].length; 3832 offset -= length;
3849 if (i == st_bp->frp_segs) { /* Should never happen */ 3833 if (i == st_bp->frp_segs) { /* Should never happen */
3850 printk(KERN_WARNING "st: from_buffer offset overflow.\n"); 3834 printk(KERN_WARNING "st: from_buffer offset overflow.\n");
3851 return (-EIO); 3835 return (-EIO);
3852 } 3836 }
3853 for (; i < st_bp->frp_segs && do_count > 0; i++) { 3837 for (; i < st_bp->frp_segs && do_count > 0; i++) {
3854 cnt = st_bp->frp[i].length - offset < do_count ? 3838 struct page *page = st_bp->reserved_pages[i];
3855 st_bp->frp[i].length - offset : do_count; 3839 cnt = length - offset < do_count ? length - offset : do_count;
3856 res = copy_to_user(ubp, page_address(st_bp->frp[i].page) + offset, cnt); 3840 res = copy_to_user(ubp, page_address(page) + offset, cnt);
3857 if (res) 3841 if (res)
3858 return (-EFAULT); 3842 return (-EFAULT);
3859 do_count -= cnt; 3843 do_count -= cnt;
@@ -3874,6 +3858,7 @@ static void move_buffer_data(struct st_buffer * st_bp, int offset)
3874{ 3858{
3875 int src_seg, dst_seg, src_offset = 0, dst_offset; 3859 int src_seg, dst_seg, src_offset = 0, dst_offset;
3876 int count, total; 3860 int count, total;
3861 int length = PAGE_SIZE << st_bp->map_data.page_order;
3877 3862
3878 if (offset == 0) 3863 if (offset == 0)
3879 return; 3864 return;
@@ -3881,24 +3866,26 @@ static void move_buffer_data(struct st_buffer * st_bp, int offset)
3881 total=st_bp->buffer_bytes - offset; 3866 total=st_bp->buffer_bytes - offset;
3882 for (src_seg=0; src_seg < st_bp->frp_segs; src_seg++) { 3867 for (src_seg=0; src_seg < st_bp->frp_segs; src_seg++) {
3883 src_offset = offset; 3868 src_offset = offset;
3884 if (src_offset < st_bp->frp[src_seg].length) 3869 if (src_offset < length)
3885 break; 3870 break;
3886 offset -= st_bp->frp[src_seg].length; 3871 offset -= length;
3887 } 3872 }
3888 3873
3889 st_bp->buffer_bytes = st_bp->read_pointer = total; 3874 st_bp->buffer_bytes = st_bp->read_pointer = total;
3890 for (dst_seg=dst_offset=0; total > 0; ) { 3875 for (dst_seg=dst_offset=0; total > 0; ) {
3891 count = min(st_bp->frp[dst_seg].length - dst_offset, 3876 struct page *dpage = st_bp->reserved_pages[dst_seg];
3892 st_bp->frp[src_seg].length - src_offset); 3877 struct page *spage = st_bp->reserved_pages[src_seg];
3893 memmove(page_address(st_bp->frp[dst_seg].page) + dst_offset, 3878
3894 page_address(st_bp->frp[src_seg].page) + src_offset, count); 3879 count = min(length - dst_offset, length - src_offset);
3880 memmove(page_address(dpage) + dst_offset,
3881 page_address(spage) + src_offset, count);
3895 src_offset += count; 3882 src_offset += count;
3896 if (src_offset >= st_bp->frp[src_seg].length) { 3883 if (src_offset >= length) {
3897 src_seg++; 3884 src_seg++;
3898 src_offset = 0; 3885 src_offset = 0;
3899 } 3886 }
3900 dst_offset += count; 3887 dst_offset += count;
3901 if (dst_offset >= st_bp->frp[dst_seg].length) { 3888 if (dst_offset >= length) {
3902 dst_seg++; 3889 dst_seg++;
3903 dst_offset = 0; 3890 dst_offset = 0;
3904 } 3891 }
@@ -3906,32 +3893,6 @@ static void move_buffer_data(struct st_buffer * st_bp, int offset)
3906 } 3893 }
3907} 3894}
3908 3895
3909
3910/* Fill the s/g list up to the length required for this transfer */
3911static void buf_to_sg(struct st_buffer *STbp, unsigned int length)
3912{
3913 int i;
3914 unsigned int count;
3915 struct scatterlist *sg;
3916 struct st_buf_fragment *frp;
3917
3918 if (length == STbp->frp_sg_current)
3919 return; /* work already done */
3920
3921 sg = &(STbp->sg[0]);
3922 frp = STbp->frp;
3923 for (i=count=0; count < length; i++) {
3924 if (length - count > frp[i].length)
3925 sg_set_page(&sg[i], frp[i].page, frp[i].length, 0);
3926 else
3927 sg_set_page(&sg[i], frp[i].page, length - count, 0);
3928 count += sg[i].length;
3929 }
3930 STbp->sg_segs = i;
3931 STbp->frp_sg_current = length;
3932}
3933
3934
3935/* Validate the options from command line or module parameters */ 3896/* Validate the options from command line or module parameters */
3936static void validate_options(void) 3897static void validate_options(void)
3937{ 3898{
@@ -4026,7 +3987,7 @@ static int st_probe(struct device *dev)
4026 SDp->request_queue->max_phys_segments); 3987 SDp->request_queue->max_phys_segments);
4027 if (st_max_sg_segs < i) 3988 if (st_max_sg_segs < i)
4028 i = st_max_sg_segs; 3989 i = st_max_sg_segs;
4029 buffer = new_tape_buffer(1, (SDp->host)->unchecked_isa_dma, i); 3990 buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
4030 if (buffer == NULL) { 3991 if (buffer == NULL) {
4031 printk(KERN_ERR 3992 printk(KERN_ERR
4032 "st: Can't allocate new tape buffer. Device not attached.\n"); 3993 "st: Can't allocate new tape buffer. Device not attached.\n");
@@ -4280,8 +4241,8 @@ static void scsi_tape_release(struct kref *kref)
4280 tpnt->device = NULL; 4241 tpnt->device = NULL;
4281 4242
4282 if (tpnt->buffer) { 4243 if (tpnt->buffer) {
4283 tpnt->buffer->orig_frp_segs = 0;
4284 normalize_buffer(tpnt->buffer); 4244 normalize_buffer(tpnt->buffer);
4245 kfree(tpnt->buffer->reserved_pages);
4285 kfree(tpnt->buffer); 4246 kfree(tpnt->buffer);
4286 } 4247 }
4287 4248
@@ -4567,14 +4528,16 @@ out:
4567} 4528}
4568 4529
4569/* The following functions may be useful for a larger audience. */ 4530/* The following functions may be useful for a larger audience. */
4570static int sgl_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages, 4531static int sgl_map_user_pages(struct st_buffer *STbp,
4571 unsigned long uaddr, size_t count, int rw) 4532 const unsigned int max_pages, unsigned long uaddr,
4533 size_t count, int rw)
4572{ 4534{
4573 unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT; 4535 unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
4574 unsigned long start = uaddr >> PAGE_SHIFT; 4536 unsigned long start = uaddr >> PAGE_SHIFT;
4575 const int nr_pages = end - start; 4537 const int nr_pages = end - start;
4576 int res, i, j; 4538 int res, i, j;
4577 struct page **pages; 4539 struct page **pages;
4540 struct rq_map_data *mdata = &STbp->map_data;
4578 4541
4579 /* User attempted Overflow! */ 4542 /* User attempted Overflow! */
4580 if ((uaddr + count) < uaddr) 4543 if ((uaddr + count) < uaddr)
@@ -4616,24 +4579,11 @@ static int sgl_map_user_pages(struct scatterlist *sgl, const unsigned int max_pa
4616 flush_dcache_page(pages[i]); 4579 flush_dcache_page(pages[i]);
4617 } 4580 }
4618 4581
4619 /* Populate the scatter/gather list */ 4582 mdata->offset = uaddr & ~PAGE_MASK;
4620 sg_set_page(&sgl[0], pages[0], 0, uaddr & ~PAGE_MASK); 4583 mdata->page_order = 0;
4621 if (nr_pages > 1) { 4584 STbp->mapped_pages = pages;
4622 sgl[0].length = PAGE_SIZE - sgl[0].offset;
4623 count -= sgl[0].length;
4624 for (i=1; i < nr_pages ; i++) {
4625 sg_set_page(&sgl[i], pages[i],
4626 count < PAGE_SIZE ? count : PAGE_SIZE, 0);;
4627 count -= PAGE_SIZE;
4628 }
4629 }
4630 else {
4631 sgl[0].length = count;
4632 }
4633 4585
4634 kfree(pages);
4635 return nr_pages; 4586 return nr_pages;
4636
4637 out_unmap: 4587 out_unmap:
4638 if (res > 0) { 4588 if (res > 0) {
4639 for (j=0; j < res; j++) 4589 for (j=0; j < res; j++)
@@ -4646,13 +4596,13 @@ static int sgl_map_user_pages(struct scatterlist *sgl, const unsigned int max_pa
4646 4596
4647 4597
4648/* And unmap them... */ 4598/* And unmap them... */
4649static int sgl_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages, 4599static int sgl_unmap_user_pages(struct st_buffer *STbp,
4650 int dirtied) 4600 const unsigned int nr_pages, int dirtied)
4651{ 4601{
4652 int i; 4602 int i;
4653 4603
4654 for (i=0; i < nr_pages; i++) { 4604 for (i=0; i < nr_pages; i++) {
4655 struct page *page = sg_page(&sgl[i]); 4605 struct page *page = STbp->mapped_pages[i];
4656 4606
4657 if (dirtied) 4607 if (dirtied)
4658 SetPageDirty(page); 4608 SetPageDirty(page);
@@ -4661,6 +4611,8 @@ static int sgl_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_p
4661 */ 4611 */
4662 page_cache_release(page); 4612 page_cache_release(page);
4663 } 4613 }
4614 kfree(STbp->mapped_pages);
4615 STbp->mapped_pages = NULL;
4664 4616
4665 return 0; 4617 return 0;
4666} 4618}
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index b92712f95931..544dc6b1f548 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -29,6 +29,7 @@ struct st_request {
29 int result; 29 int result;
30 struct scsi_tape *stp; 30 struct scsi_tape *stp;
31 struct completion *waiting; 31 struct completion *waiting;
32 struct bio *bio;
32}; 33};
33 34
34/* The tape buffer descriptor. */ 35/* The tape buffer descriptor. */
@@ -44,20 +45,13 @@ struct st_buffer {
44 int syscall_result; 45 int syscall_result;
45 struct st_request *last_SRpnt; 46 struct st_request *last_SRpnt;
46 struct st_cmdstatus cmdstat; 47 struct st_cmdstatus cmdstat;
48 struct page **reserved_pages;
49 struct page **mapped_pages;
50 struct rq_map_data map_data;
47 unsigned char *b_data; 51 unsigned char *b_data;
48 unsigned short use_sg; /* zero or max number of s/g segments for this adapter */ 52 unsigned short use_sg; /* zero or max number of s/g segments for this adapter */
49 unsigned short sg_segs; /* number of segments in s/g list */ 53 unsigned short sg_segs; /* number of segments in s/g list */
50 unsigned short orig_frp_segs; /* number of segments allocated at first try */
51 unsigned short frp_segs; /* number of buffer segments */ 54 unsigned short frp_segs; /* number of buffer segments */
52 unsigned int frp_sg_current; /* driver buffer length currently in s/g list */
53 struct st_buf_fragment *frp; /* the allocated buffer fragment list */
54 struct scatterlist sg[1]; /* MUST BE last item */
55};
56
57/* The tape buffer fragment descriptor */
58struct st_buf_fragment {
59 struct page *page;
60 unsigned int length;
61}; 55};
62 56
63/* The tape mode definition */ 57/* The tape mode definition */
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index 3c4a300494a4..a8d61a62522e 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -137,8 +137,8 @@ zalon_probe(struct parisc_device *dev)
137 goto fail; 137 goto fail;
138 138
139 if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) { 139 if (request_irq(dev->irq, ncr53c8xx_intr, IRQF_SHARED, "zalon", host)) {
140 printk(KERN_ERR "%s: irq problem with %d, detaching\n ", 140 dev_printk(KERN_ERR, dev, "irq problem with %d, detaching\n ",
141 dev->dev.bus_id, dev->irq); 141 dev->irq);
142 goto fail; 142 goto fail;
143 } 143 }
144 144
diff --git a/fs/bio.c b/fs/bio.c
index 711cee103602..062299acbccd 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -788,6 +788,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
788 int i, ret; 788 int i, ret;
789 int nr_pages = 0; 789 int nr_pages = 0;
790 unsigned int len = 0; 790 unsigned int len = 0;
791 unsigned int offset = map_data ? map_data->offset & ~PAGE_MASK : 0;
791 792
792 for (i = 0; i < iov_count; i++) { 793 for (i = 0; i < iov_count; i++) {
793 unsigned long uaddr; 794 unsigned long uaddr;
@@ -814,35 +815,42 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
814 bio->bi_rw |= (!write_to_vm << BIO_RW); 815 bio->bi_rw |= (!write_to_vm << BIO_RW);
815 816
816 ret = 0; 817 ret = 0;
817 i = 0; 818
819 if (map_data) {
820 nr_pages = 1 << map_data->page_order;
821 i = map_data->offset / PAGE_SIZE;
822 }
818 while (len) { 823 while (len) {
819 unsigned int bytes; 824 unsigned int bytes = PAGE_SIZE;
820 825
821 if (map_data) 826 bytes -= offset;
822 bytes = 1U << (PAGE_SHIFT + map_data->page_order);
823 else
824 bytes = PAGE_SIZE;
825 827
826 if (bytes > len) 828 if (bytes > len)
827 bytes = len; 829 bytes = len;
828 830
829 if (map_data) { 831 if (map_data) {
830 if (i == map_data->nr_entries) { 832 if (i == map_data->nr_entries * nr_pages) {
831 ret = -ENOMEM; 833 ret = -ENOMEM;
832 break; 834 break;
833 } 835 }
834 page = map_data->pages[i++]; 836
835 } else 837 page = map_data->pages[i / nr_pages];
838 page += (i % nr_pages);
839
840 i++;
841 } else {
836 page = alloc_page(q->bounce_gfp | gfp_mask); 842 page = alloc_page(q->bounce_gfp | gfp_mask);
837 if (!page) { 843 if (!page) {
838 ret = -ENOMEM; 844 ret = -ENOMEM;
839 break; 845 break;
846 }
840 } 847 }
841 848
842 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) 849 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
843 break; 850 break;
844 851
845 len -= bytes; 852 len -= bytes;
853 offset = 0;
846 } 854 }
847 855
848 if (ret) 856 if (ret)
@@ -851,7 +859,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
851 /* 859 /*
852 * success 860 * success
853 */ 861 */
854 if (!write_to_vm) { 862 if (!write_to_vm && (!map_data || !map_data->null_mapped)) {
855 ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 0); 863 ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 0);
856 if (ret) 864 if (ret)
857 goto cleanup; 865 goto cleanup;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 7035cec583b6..044467ef7b11 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -690,6 +690,8 @@ struct rq_map_data {
690 struct page **pages; 690 struct page **pages;
691 int page_order; 691 int page_order;
692 int nr_entries; 692 int nr_entries;
693 unsigned long offset;
694 int null_mapped;
693}; 695};
694 696
695struct req_iterator { 697struct req_iterator {
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 6e04e6fe79c7..c9184f756cad 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -358,6 +358,7 @@ struct fc_rport { /* aka fc_starget_attrs */
358#define FC_RPORT_DEVLOSS_PENDING 0x01 358#define FC_RPORT_DEVLOSS_PENDING 0x01
359#define FC_RPORT_SCAN_PENDING 0x02 359#define FC_RPORT_SCAN_PENDING 0x02
360#define FC_RPORT_FAST_FAIL_TIMEDOUT 0x04 360#define FC_RPORT_FAST_FAIL_TIMEDOUT 0x04
361#define FC_RPORT_DEVLOSS_CALLBK_DONE 0x08
361 362
362#define dev_to_rport(d) \ 363#define dev_to_rport(d) \
363 container_of(d, struct fc_rport, dev) 364 container_of(d, struct fc_rport, dev)