aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2005-09-25 08:51:50 -0400
committerPaul Mackerras <paulus@samba.org>2005-09-25 08:51:50 -0400
commite5baa396af7560382d2cf3f0871d616b61fc284c (patch)
tree6afc166894b8c8b3b2cf6add72a726be14ae2443 /drivers
parentd6a4c847e43c851cc0ddf73087a730227223f989 (diff)
parentef6bd6eb90ad72ee8ee7ba8b271f27102e9a90c1 (diff)
Merge from Linus' tree.
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acorn/char/pcf8583.c3
-rw-r--r--drivers/base/class.c13
-rw-r--r--drivers/base/dd.c3
-rw-r--r--drivers/block/cciss.c5
-rw-r--r--drivers/block/ll_rw_blk.c38
-rw-r--r--drivers/block/ub.c55
-rw-r--r--drivers/char/hpet.c1
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c6
-rw-r--r--drivers/hwmon/Kconfig9
-rw-r--r--drivers/hwmon/hdaps.c21
-rw-r--r--drivers/i2c/busses/Kconfig12
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-pmac-smu.c316
-rw-r--r--drivers/infiniband/core/mad_rmpp.c19
-rw-r--r--drivers/infiniband/core/user_mad.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c16
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c51
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c25
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c13
-rw-r--r--drivers/input/input.c1
-rw-r--r--drivers/isdn/hisax/st5481_b.c2
-rw-r--r--drivers/isdn/hisax/st5481_usb.c2
-rw-r--r--drivers/macintosh/smu.c1030
-rw-r--r--drivers/macintosh/therm_adt746x.c2
-rw-r--r--drivers/macintosh/therm_pm72.c2
-rw-r--r--drivers/macintosh/therm_windtunnel.c2
-rw-r--r--drivers/media/video/bttv-driver.c14
-rw-r--r--drivers/media/video/bttvp.h2
-rw-r--r--drivers/message/fusion/Kconfig17
-rw-r--r--drivers/message/fusion/Makefile1
-rw-r--r--drivers/message/fusion/mptbase.c963
-rw-r--r--drivers/message/fusion/mptbase.h56
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/message/fusion/mptfc.c2
-rw-r--r--drivers/message/fusion/mptlan.c7
-rw-r--r--drivers/message/fusion/mptsas.c1235
-rw-r--r--drivers/message/fusion/mptscsih.c463
-rw-r--r--drivers/message/fusion/mptscsih.h7
-rw-r--r--drivers/message/fusion/mptspi.c2
-rw-r--r--drivers/message/i2o/config-osm.c5
-rw-r--r--drivers/mfd/ucb1x00-ts.c4
-rw-r--r--drivers/mtd/devices/docecc.c8
-rw-r--r--drivers/net/8390.c2
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/r8169.c4
-rw-r--r--drivers/net/skge.c216
-rw-r--r--drivers/net/skge.h2
-rw-r--r--drivers/net/wan/hdlc_cisco.c2
-rw-r--r--drivers/pci/hotplug.c4
-rw-r--r--drivers/pci/hotplug/rpadlpar_sysfs.c4
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c6
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pci/probe.c22
-rw-r--r--drivers/s390/cio/ccwgroup.c2
-rw-r--r--drivers/s390/scsi/Makefile2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c184
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c10
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c995
-rw-r--r--drivers/s390/scsi/zfcp_def.h307
-rw-r--r--drivers/s390/scsi/zfcp_erp.c135
-rw-r--r--drivers/s390/scsi/zfcp_ext.h30
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c769
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h54
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c30
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c297
-rw-r--r--drivers/s390/scsi/zfcp_sysfs_adapter.c14
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c9
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c8
-rw-r--r--drivers/scsi/ata_piix.c1
-rw-r--r--drivers/scsi/atp870u.c6
-rw-r--r--drivers/scsi/atp870u.h5
-rw-r--r--drivers/scsi/fd_mcs.c2
-rw-r--r--drivers/scsi/hosts.c35
-rw-r--r--drivers/scsi/ibmmca.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c10
-rw-r--r--drivers/scsi/libata-core.c81
-rw-r--r--drivers/scsi/mesh.c29
-rw-r--r--drivers/scsi/sata_nv.c2
-rw-r--r--drivers/scsi/scsi.c5
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_error.c78
-rw-r--r--drivers/scsi/scsi_ioctl.c2
-rw-r--r--drivers/scsi/scsi_lib.c12
-rw-r--r--drivers/scsi/scsi_scan.c20
-rw-r--r--drivers/scsi/scsi_sysfs.c17
-rw-r--r--drivers/scsi/sd.c1
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/sr.c1
-rw-r--r--drivers/scsi/st.c1
-rw-r--r--drivers/serial/clps711x.c2
-rw-r--r--drivers/usb/core/message.c2
-rw-r--r--drivers/usb/core/usb.c6
-rw-r--r--drivers/usb/gadget/pxa2xx_udc.c4
-rw-r--r--drivers/usb/gadget/pxa2xx_udc.h8
-rw-r--r--drivers/usb/host/sl811-hcd.c16
-rw-r--r--drivers/usb/net/pegasus.c29
-rw-r--r--drivers/usb/serial/airprime.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.c8
-rw-r--r--drivers/usb/serial/option.c11
-rw-r--r--drivers/video/Kconfig1
-rw-r--r--drivers/video/aty/xlinit.c11
-rw-r--r--drivers/video/fbcvt.c8
-rw-r--r--drivers/video/nvidia/nvidia.c5
107 files changed, 5888 insertions, 2105 deletions
diff --git a/drivers/acorn/char/pcf8583.c b/drivers/acorn/char/pcf8583.c
index 141b4c237a50..2b850e5860a0 100644
--- a/drivers/acorn/char/pcf8583.c
+++ b/drivers/acorn/char/pcf8583.c
@@ -23,12 +23,13 @@ static struct i2c_driver pcf8583_driver;
23 23
24static unsigned short ignore[] = { I2C_CLIENT_END }; 24static unsigned short ignore[] = { I2C_CLIENT_END };
25static unsigned short normal_addr[] = { 0x50, I2C_CLIENT_END }; 25static unsigned short normal_addr[] = { 0x50, I2C_CLIENT_END };
26static unsigned short *forces[] = { NULL };
26 27
27static struct i2c_client_address_data addr_data = { 28static struct i2c_client_address_data addr_data = {
28 .normal_i2c = normal_addr, 29 .normal_i2c = normal_addr,
29 .probe = ignore, 30 .probe = ignore,
30 .ignore = ignore, 31 .ignore = ignore,
31 .force = ignore, 32 .forces = forces,
32}; 33};
33 34
34#define DAT(x) ((unsigned int)(x->dev.driver_data)) 35#define DAT(x) ((unsigned int)(x->dev.driver_data))
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 3b112e3542f8..ce23dc8c18c5 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -669,6 +669,7 @@ void class_device_destroy(struct class *cls, dev_t devt)
669int class_device_rename(struct class_device *class_dev, char *new_name) 669int class_device_rename(struct class_device *class_dev, char *new_name)
670{ 670{
671 int error = 0; 671 int error = 0;
672 char *old_class_name = NULL, *new_class_name = NULL;
672 673
673 class_dev = class_device_get(class_dev); 674 class_dev = class_device_get(class_dev);
674 if (!class_dev) 675 if (!class_dev)
@@ -677,12 +678,24 @@ int class_device_rename(struct class_device *class_dev, char *new_name)
677 pr_debug("CLASS: renaming '%s' to '%s'\n", class_dev->class_id, 678 pr_debug("CLASS: renaming '%s' to '%s'\n", class_dev->class_id,
678 new_name); 679 new_name);
679 680
681 if (class_dev->dev)
682 old_class_name = make_class_name(class_dev);
683
680 strlcpy(class_dev->class_id, new_name, KOBJ_NAME_LEN); 684 strlcpy(class_dev->class_id, new_name, KOBJ_NAME_LEN);
681 685
682 error = kobject_rename(&class_dev->kobj, new_name); 686 error = kobject_rename(&class_dev->kobj, new_name);
683 687
688 if (class_dev->dev) {
689 new_class_name = make_class_name(class_dev);
690 sysfs_create_link(&class_dev->dev->kobj, &class_dev->kobj,
691 new_class_name);
692 sysfs_remove_link(&class_dev->dev->kobj, old_class_name);
693 }
684 class_device_put(class_dev); 694 class_device_put(class_dev);
685 695
696 kfree(old_class_name);
697 kfree(new_class_name);
698
686 return error; 699 return error;
687} 700}
688 701
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index d5bbce38282f..3565e9795301 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -40,6 +40,9 @@
40 */ 40 */
41void device_bind_driver(struct device * dev) 41void device_bind_driver(struct device * dev)
42{ 42{
43 if (klist_node_attached(&dev->knode_driver))
44 return;
45
43 pr_debug("bound device '%s' to driver '%s'\n", 46 pr_debug("bound device '%s' to driver '%s'\n",
44 dev->bus_id, dev->driver->name); 47 dev->bus_id, dev->driver->name);
45 klist_add_tail(&dev->knode_driver, &dev->driver->klist_devices); 48 klist_add_tail(&dev->knode_driver, &dev->driver->klist_devices);
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index c56f995aadad..486b6e1c7dfb 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -483,9 +483,6 @@ static int cciss_open(struct inode *inode, struct file *filep)
483 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name); 483 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
484#endif /* CCISS_DEBUG */ 484#endif /* CCISS_DEBUG */
485 485
486 if (host->busy_initializing)
487 return -EBUSY;
488
489 if (host->busy_initializing || drv->busy_configuring) 486 if (host->busy_initializing || drv->busy_configuring)
490 return -EBUSY; 487 return -EBUSY;
491 /* 488 /*
@@ -2991,6 +2988,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
2991 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON); 2988 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
2992 2989
2993 cciss_procinit(i); 2990 cciss_procinit(i);
2991 hba[i]->busy_initializing = 0;
2994 2992
2995 for(j=0; j < NWD; j++) { /* mfm */ 2993 for(j=0; j < NWD; j++) { /* mfm */
2996 drive_info_struct *drv = &(hba[i]->drv[j]); 2994 drive_info_struct *drv = &(hba[i]->drv[j]);
@@ -3033,7 +3031,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3033 add_disk(disk); 3031 add_disk(disk);
3034 } 3032 }
3035 3033
3036 hba[i]->busy_initializing = 0;
3037 return(1); 3034 return(1);
3038 3035
3039clean4: 3036clean4:
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 483d71b10cf9..baedac522945 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -2373,44 +2373,6 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
2373 2373
2374EXPORT_SYMBOL(blkdev_issue_flush); 2374EXPORT_SYMBOL(blkdev_issue_flush);
2375 2375
2376/**
2377 * blkdev_scsi_issue_flush_fn - issue flush for SCSI devices
2378 * @q: device queue
2379 * @disk: gendisk
2380 * @error_sector: error offset
2381 *
2382 * Description:
2383 * Devices understanding the SCSI command set, can use this function as
2384 * a helper for issuing a cache flush. Note: driver is required to store
2385 * the error offset (in case of error flushing) in ->sector of struct
2386 * request.
2387 */
2388int blkdev_scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
2389 sector_t *error_sector)
2390{
2391 struct request *rq = blk_get_request(q, WRITE, __GFP_WAIT);
2392 int ret;
2393
2394 rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER;
2395 rq->sector = 0;
2396 memset(rq->cmd, 0, sizeof(rq->cmd));
2397 rq->cmd[0] = 0x35;
2398 rq->cmd_len = 12;
2399 rq->data = NULL;
2400 rq->data_len = 0;
2401 rq->timeout = 60 * HZ;
2402
2403 ret = blk_execute_rq(q, disk, rq, 0);
2404
2405 if (ret && error_sector)
2406 *error_sector = rq->sector;
2407
2408 blk_put_request(rq);
2409 return ret;
2410}
2411
2412EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn);
2413
2414static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) 2376static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
2415{ 2377{
2416 int rw = rq_data_dir(rq); 2378 int rw = rq_data_dir(rq);
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index aa0bf7ee008d..ed4d5006fe62 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -172,7 +172,7 @@ struct bulk_cs_wrap {
172 */ 172 */
173struct ub_dev; 173struct ub_dev;
174 174
175#define UB_MAX_REQ_SG 4 175#define UB_MAX_REQ_SG 9 /* cdrecord requires 32KB and maybe a header */
176#define UB_MAX_SECTORS 64 176#define UB_MAX_SECTORS 64
177 177
178/* 178/*
@@ -387,7 +387,7 @@ struct ub_dev {
387 struct bulk_cs_wrap work_bcs; 387 struct bulk_cs_wrap work_bcs;
388 struct usb_ctrlrequest work_cr; 388 struct usb_ctrlrequest work_cr;
389 389
390 int sg_stat[UB_MAX_REQ_SG+1]; 390 int sg_stat[6];
391 struct ub_scsi_trace tr; 391 struct ub_scsi_trace tr;
392}; 392};
393 393
@@ -525,12 +525,13 @@ static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr,
525 "qlen %d qmax %d\n", 525 "qlen %d qmax %d\n",
526 sc->cmd_queue.qlen, sc->cmd_queue.qmax); 526 sc->cmd_queue.qlen, sc->cmd_queue.qmax);
527 cnt += sprintf(page + cnt, 527 cnt += sprintf(page + cnt,
528 "sg %d %d %d %d %d\n", 528 "sg %d %d %d %d %d .. %d\n",
529 sc->sg_stat[0], 529 sc->sg_stat[0],
530 sc->sg_stat[1], 530 sc->sg_stat[1],
531 sc->sg_stat[2], 531 sc->sg_stat[2],
532 sc->sg_stat[3], 532 sc->sg_stat[3],
533 sc->sg_stat[4]); 533 sc->sg_stat[4],
534 sc->sg_stat[5]);
534 535
535 list_for_each (p, &sc->luns) { 536 list_for_each (p, &sc->luns) {
536 lun = list_entry(p, struct ub_lun, link); 537 lun = list_entry(p, struct ub_lun, link);
@@ -835,7 +836,7 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
835 return -1; 836 return -1;
836 } 837 }
837 cmd->nsg = n_elem; 838 cmd->nsg = n_elem;
838 sc->sg_stat[n_elem]++; 839 sc->sg_stat[n_elem < 5 ? n_elem : 5]++;
839 840
840 /* 841 /*
841 * build the command 842 * build the command
@@ -891,7 +892,7 @@ static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
891 return -1; 892 return -1;
892 } 893 }
893 cmd->nsg = n_elem; 894 cmd->nsg = n_elem;
894 sc->sg_stat[n_elem]++; 895 sc->sg_stat[n_elem < 5 ? n_elem : 5]++;
895 896
896 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); 897 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
897 cmd->cdb_len = rq->cmd_len; 898 cmd->cdb_len = rq->cmd_len;
@@ -1010,7 +1011,6 @@ static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1010 sc->last_pipe = sc->send_bulk_pipe; 1011 sc->last_pipe = sc->send_bulk_pipe;
1011 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe, 1012 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
1012 bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc); 1013 bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
1013 sc->work_urb.transfer_flags = 0;
1014 1014
1015 /* Fill what we shouldn't be filling, because usb-storage did so. */ 1015 /* Fill what we shouldn't be filling, because usb-storage did so. */
1016 sc->work_urb.actual_length = 0; 1016 sc->work_urb.actual_length = 0;
@@ -1019,7 +1019,6 @@ static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1019 1019
1020 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { 1020 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1021 /* XXX Clear stalls */ 1021 /* XXX Clear stalls */
1022 printk("ub: cmd #%d start failed (%d)\n", cmd->tag, rc); /* P3 */
1023 ub_complete(&sc->work_done); 1022 ub_complete(&sc->work_done);
1024 return rc; 1023 return rc;
1025 } 1024 }
@@ -1190,11 +1189,9 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1190 return; 1189 return;
1191 } 1190 }
1192 if (urb->status != 0) { 1191 if (urb->status != 0) {
1193 printk("ub: cmd #%d cmd status (%d)\n", cmd->tag, urb->status); /* P3 */
1194 goto Bad_End; 1192 goto Bad_End;
1195 } 1193 }
1196 if (urb->actual_length != US_BULK_CB_WRAP_LEN) { 1194 if (urb->actual_length != US_BULK_CB_WRAP_LEN) {
1197 printk("ub: cmd #%d xferred %d\n", cmd->tag, urb->actual_length); /* P3 */
1198 /* XXX Must do reset here to unconfuse the device */ 1195 /* XXX Must do reset here to unconfuse the device */
1199 goto Bad_End; 1196 goto Bad_End;
1200 } 1197 }
@@ -1395,14 +1392,12 @@ static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1395 usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, 1392 usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
1396 page_address(sg->page) + sg->offset, sg->length, 1393 page_address(sg->page) + sg->offset, sg->length,
1397 ub_urb_complete, sc); 1394 ub_urb_complete, sc);
1398 sc->work_urb.transfer_flags = 0;
1399 sc->work_urb.actual_length = 0; 1395 sc->work_urb.actual_length = 0;
1400 sc->work_urb.error_count = 0; 1396 sc->work_urb.error_count = 0;
1401 sc->work_urb.status = 0; 1397 sc->work_urb.status = 0;
1402 1398
1403 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { 1399 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1404 /* XXX Clear stalls */ 1400 /* XXX Clear stalls */
1405 printk("ub: data #%d submit failed (%d)\n", cmd->tag, rc); /* P3 */
1406 ub_complete(&sc->work_done); 1401 ub_complete(&sc->work_done);
1407 ub_state_done(sc, cmd, rc); 1402 ub_state_done(sc, cmd, rc);
1408 return; 1403 return;
@@ -1442,7 +1437,6 @@ static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1442 sc->last_pipe = sc->recv_bulk_pipe; 1437 sc->last_pipe = sc->recv_bulk_pipe;
1443 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe, 1438 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
1444 &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc); 1439 &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
1445 sc->work_urb.transfer_flags = 0;
1446 sc->work_urb.actual_length = 0; 1440 sc->work_urb.actual_length = 0;
1447 sc->work_urb.error_count = 0; 1441 sc->work_urb.error_count = 0;
1448 sc->work_urb.status = 0; 1442 sc->work_urb.status = 0;
@@ -1563,7 +1557,6 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
1563 1557
1564 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, 1558 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1565 (unsigned char*) cr, NULL, 0, ub_urb_complete, sc); 1559 (unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
1566 sc->work_urb.transfer_flags = 0;
1567 sc->work_urb.actual_length = 0; 1560 sc->work_urb.actual_length = 0;
1568 sc->work_urb.error_count = 0; 1561 sc->work_urb.error_count = 0;
1569 sc->work_urb.status = 0; 1562 sc->work_urb.status = 0;
@@ -2000,17 +1993,16 @@ static int ub_sync_getmaxlun(struct ub_dev *sc)
2000 1993
2001 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe, 1994 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe,
2002 (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl); 1995 (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl);
2003 sc->work_urb.transfer_flags = 0;
2004 sc->work_urb.actual_length = 0; 1996 sc->work_urb.actual_length = 0;
2005 sc->work_urb.error_count = 0; 1997 sc->work_urb.error_count = 0;
2006 sc->work_urb.status = 0; 1998 sc->work_urb.status = 0;
2007 1999
2008 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { 2000 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
2009 if (rc == -EPIPE) { 2001 if (rc == -EPIPE) {
2010 printk("%s: Stall at GetMaxLUN, using 1 LUN\n", 2002 printk("%s: Stall submitting GetMaxLUN, using 1 LUN\n",
2011 sc->name); /* P3 */ 2003 sc->name); /* P3 */
2012 } else { 2004 } else {
2013 printk(KERN_WARNING 2005 printk(KERN_NOTICE
2014 "%s: Unable to submit GetMaxLUN (%d)\n", 2006 "%s: Unable to submit GetMaxLUN (%d)\n",
2015 sc->name, rc); 2007 sc->name, rc);
2016 } 2008 }
@@ -2028,6 +2020,18 @@ static int ub_sync_getmaxlun(struct ub_dev *sc)
2028 del_timer_sync(&timer); 2020 del_timer_sync(&timer);
2029 usb_kill_urb(&sc->work_urb); 2021 usb_kill_urb(&sc->work_urb);
2030 2022
2023 if ((rc = sc->work_urb.status) < 0) {
2024 if (rc == -EPIPE) {
2025 printk("%s: Stall at GetMaxLUN, using 1 LUN\n",
2026 sc->name); /* P3 */
2027 } else {
2028 printk(KERN_NOTICE
2029 "%s: Error at GetMaxLUN (%d)\n",
2030 sc->name, rc);
2031 }
2032 goto err_io;
2033 }
2034
2031 if (sc->work_urb.actual_length != 1) { 2035 if (sc->work_urb.actual_length != 1) {
2032 printk("%s: GetMaxLUN returned %d bytes\n", sc->name, 2036 printk("%s: GetMaxLUN returned %d bytes\n", sc->name,
2033 sc->work_urb.actual_length); /* P3 */ 2037 sc->work_urb.actual_length); /* P3 */
@@ -2048,6 +2052,7 @@ static int ub_sync_getmaxlun(struct ub_dev *sc)
2048 kfree(p); 2052 kfree(p);
2049 return nluns; 2053 return nluns;
2050 2054
2055err_io:
2051err_submit: 2056err_submit:
2052 kfree(p); 2057 kfree(p);
2053err_alloc: 2058err_alloc:
@@ -2080,7 +2085,6 @@ static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe)
2080 2085
2081 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, 2086 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
2082 (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl); 2087 (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
2083 sc->work_urb.transfer_flags = 0;
2084 sc->work_urb.actual_length = 0; 2088 sc->work_urb.actual_length = 0;
2085 sc->work_urb.error_count = 0; 2089 sc->work_urb.error_count = 0;
2086 sc->work_urb.status = 0; 2090 sc->work_urb.status = 0;
@@ -2213,8 +2217,10 @@ static int ub_probe(struct usb_interface *intf,
2213 * This is needed to clear toggles. It is a problem only if we do 2217 * This is needed to clear toggles. It is a problem only if we do
2214 * `rmmod ub && modprobe ub` without disconnects, but we like that. 2218 * `rmmod ub && modprobe ub` without disconnects, but we like that.
2215 */ 2219 */
2220#if 0 /* iPod Mini fails if we do this (big white iPod works) */
2216 ub_probe_clear_stall(sc, sc->recv_bulk_pipe); 2221 ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
2217 ub_probe_clear_stall(sc, sc->send_bulk_pipe); 2222 ub_probe_clear_stall(sc, sc->send_bulk_pipe);
2223#endif
2218 2224
2219 /* 2225 /*
2220 * The way this is used by the startup code is a little specific. 2226 * The way this is used by the startup code is a little specific.
@@ -2241,10 +2247,10 @@ static int ub_probe(struct usb_interface *intf,
2241 for (i = 0; i < 3; i++) { 2247 for (i = 0; i < 3; i++) {
2242 if ((rc = ub_sync_getmaxlun(sc)) < 0) { 2248 if ((rc = ub_sync_getmaxlun(sc)) < 0) {
2243 /* 2249 /*
2244 * Some devices (i.e. Iomega Zip100) need this -- 2250 * This segment is taken from usb-storage. They say
2245 * apparently the bulk pipes get STALLed when the 2251 * that ZIP-100 needs this, but my own ZIP-100 works
2246 * GetMaxLUN request is processed. 2252 * fine without this.
2247 * XXX I have a ZIP-100, verify it does this. 2253 * Still, it does not seem to hurt anything.
2248 */ 2254 */
2249 if (rc == -EPIPE) { 2255 if (rc == -EPIPE) {
2250 ub_probe_clear_stall(sc, sc->recv_bulk_pipe); 2256 ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
@@ -2313,7 +2319,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
2313 disk->first_minor = lun->id * UB_MINORS_PER_MAJOR; 2319 disk->first_minor = lun->id * UB_MINORS_PER_MAJOR;
2314 disk->fops = &ub_bd_fops; 2320 disk->fops = &ub_bd_fops;
2315 disk->private_data = lun; 2321 disk->private_data = lun;
2316 disk->driverfs_dev = &sc->intf->dev; /* XXX Many to one ok? */ 2322 disk->driverfs_dev = &sc->intf->dev;
2317 2323
2318 rc = -ENOMEM; 2324 rc = -ENOMEM;
2319 if ((q = blk_init_queue(ub_request_fn, &sc->lock)) == NULL) 2325 if ((q = blk_init_queue(ub_request_fn, &sc->lock)) == NULL)
@@ -2466,9 +2472,6 @@ static int __init ub_init(void)
2466{ 2472{
2467 int rc; 2473 int rc;
2468 2474
2469 /* P3 */ printk("ub: sizeof ub_scsi_cmd %zu ub_dev %zu ub_lun %zu\n",
2470 sizeof(struct ub_scsi_cmd), sizeof(struct ub_dev), sizeof(struct ub_lun));
2471
2472 if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0) 2475 if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
2473 goto err_regblkdev; 2476 goto err_regblkdev;
2474 devfs_mk_dir(DEVFS_NAME); 2477 devfs_mk_dir(DEVFS_NAME);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index de0379b6d502..c055bb630ffc 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -273,7 +273,6 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
273 273
274 vma->vm_flags |= VM_IO; 274 vma->vm_flags |= VM_IO;
275 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 275 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
276 addr = __pa(addr);
277 276
278 if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, 277 if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
279 PAGE_SIZE, vma->vm_page_prot)) { 278 PAGE_SIZE, vma->vm_page_prot)) {
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 463351d4f942..32fa82c78c73 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2620,7 +2620,7 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
2620 spin_lock_irqsave(&(intf->waiting_msgs_lock), flags); 2620 spin_lock_irqsave(&(intf->waiting_msgs_lock), flags);
2621 if (!list_empty(&(intf->waiting_msgs))) { 2621 if (!list_empty(&(intf->waiting_msgs))) {
2622 list_add_tail(&(msg->link), &(intf->waiting_msgs)); 2622 list_add_tail(&(msg->link), &(intf->waiting_msgs));
2623 spin_unlock(&(intf->waiting_msgs_lock)); 2623 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
2624 goto out_unlock; 2624 goto out_unlock;
2625 } 2625 }
2626 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags); 2626 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
@@ -2629,9 +2629,9 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
2629 if (rv > 0) { 2629 if (rv > 0) {
2630 /* Could not handle the message now, just add it to a 2630 /* Could not handle the message now, just add it to a
2631 list to handle later. */ 2631 list to handle later. */
2632 spin_lock(&(intf->waiting_msgs_lock)); 2632 spin_lock_irqsave(&(intf->waiting_msgs_lock), flags);
2633 list_add_tail(&(msg->link), &(intf->waiting_msgs)); 2633 list_add_tail(&(msg->link), &(intf->waiting_msgs));
2634 spin_unlock(&(intf->waiting_msgs_lock)); 2634 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
2635 } else if (rv == 0) { 2635 } else if (rv == 0) {
2636 ipmi_free_smi_msg(msg); 2636 ipmi_free_smi_msg(msg);
2637 } 2637 }
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 7e72e922b41c..db358cfa7cbf 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -418,12 +418,11 @@ config SENSORS_HDAPS
418 help 418 help
419 This driver provides support for the IBM Hard Drive Active Protection 419 This driver provides support for the IBM Hard Drive Active Protection
420 System (hdaps), which provides an accelerometer and other misc. data. 420 System (hdaps), which provides an accelerometer and other misc. data.
421 Supported laptops include the IBM ThinkPad T41, T42, T43, and R51. 421 ThinkPads starting with the R50, T41, and X40 are supported. The
422 The accelerometer data is readable via sysfs. 422 accelerometer data is readable via sysfs.
423 423
424 This driver also provides an input class device, allowing the 424 This driver also provides an absolute input class device, allowing
425 laptop to act as a pinball machine-esque mouse. This is off by 425 the laptop to act as a pinball machine-esque joystick.
426 default but enabled via sysfs or the module parameter "mousedev".
427 426
428 Say Y here if you have an applicable laptop and want to experience 427 Say Y here if you have an applicable laptop and want to experience
429 the awesome power of hdaps. 428 the awesome power of hdaps.
diff --git a/drivers/hwmon/hdaps.c b/drivers/hwmon/hdaps.c
index 4c56411f3993..7f0107613827 100644
--- a/drivers/hwmon/hdaps.c
+++ b/drivers/hwmon/hdaps.c
@@ -4,9 +4,9 @@
4 * Copyright (C) 2005 Robert Love <rml@novell.com> 4 * Copyright (C) 2005 Robert Love <rml@novell.com>
5 * Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com> 5 * Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com>
6 * 6 *
7 * The HardDisk Active Protection System (hdaps) is present in the IBM ThinkPad 7 * The HardDisk Active Protection System (hdaps) is present in IBM ThinkPads
8 * T41, T42, T43, R50, R50p, R51, and X40, at least. It provides a basic 8 * starting with the R40, T41, and X40. It provides a basic two-axis
9 * two-axis accelerometer and other data, such as the device's temperature. 9 * accelerometer and other data, such as the device's temperature.
10 * 10 *
11 * This driver is based on the document by Mark A. Smith available at 11 * This driver is based on the document by Mark A. Smith available at
12 * http://www.almaden.ibm.com/cs/people/marksmith/tpaps.html and a lot of trial 12 * http://www.almaden.ibm.com/cs/people/marksmith/tpaps.html and a lot of trial
@@ -487,24 +487,19 @@ static struct attribute_group hdaps_attribute_group = {
487 487
488/* Module stuff */ 488/* Module stuff */
489 489
490/* 490/* hdaps_dmi_match - found a match. return one, short-circuiting the hunt. */
491 * XXX: We should be able to return nonzero and halt the detection process.
492 * But there is a bug in dmi_check_system() where a nonzero return from the
493 * first match will result in a return of failure from dmi_check_system().
494 * I fixed this; the patch is 2.6-git. Once in a released tree, we can make
495 * hdaps_dmi_match_invert() return hdaps_dmi_match(), which in turn returns 1.
496 */
497static int hdaps_dmi_match(struct dmi_system_id *id) 491static int hdaps_dmi_match(struct dmi_system_id *id)
498{ 492{
499 printk(KERN_INFO "hdaps: %s detected.\n", id->ident); 493 printk(KERN_INFO "hdaps: %s detected.\n", id->ident);
500 return 0; 494 return 1;
501} 495}
502 496
497/* hdaps_dmi_match_invert - found an inverted match. */
503static int hdaps_dmi_match_invert(struct dmi_system_id *id) 498static int hdaps_dmi_match_invert(struct dmi_system_id *id)
504{ 499{
505 hdaps_invert = 1; 500 hdaps_invert = 1;
506 printk(KERN_INFO "hdaps: inverting axis readings.\n"); 501 printk(KERN_INFO "hdaps: inverting axis readings.\n");
507 return 0; 502 return hdaps_dmi_match(id);
508} 503}
509 504
510#define HDAPS_DMI_MATCH_NORMAL(model) { \ 505#define HDAPS_DMI_MATCH_NORMAL(model) { \
@@ -534,6 +529,7 @@ static int __init hdaps_init(void)
534 HDAPS_DMI_MATCH_INVERT("ThinkPad R50p"), 529 HDAPS_DMI_MATCH_INVERT("ThinkPad R50p"),
535 HDAPS_DMI_MATCH_NORMAL("ThinkPad R50"), 530 HDAPS_DMI_MATCH_NORMAL("ThinkPad R50"),
536 HDAPS_DMI_MATCH_NORMAL("ThinkPad R51"), 531 HDAPS_DMI_MATCH_NORMAL("ThinkPad R51"),
532 HDAPS_DMI_MATCH_NORMAL("ThinkPad R52"),
537 HDAPS_DMI_MATCH_INVERT("ThinkPad T41p"), 533 HDAPS_DMI_MATCH_INVERT("ThinkPad T41p"),
538 HDAPS_DMI_MATCH_NORMAL("ThinkPad T41"), 534 HDAPS_DMI_MATCH_NORMAL("ThinkPad T41"),
539 HDAPS_DMI_MATCH_INVERT("ThinkPad T42p"), 535 HDAPS_DMI_MATCH_INVERT("ThinkPad T42p"),
@@ -541,6 +537,7 @@ static int __init hdaps_init(void)
541 HDAPS_DMI_MATCH_NORMAL("ThinkPad T43"), 537 HDAPS_DMI_MATCH_NORMAL("ThinkPad T43"),
542 HDAPS_DMI_MATCH_NORMAL("ThinkPad X40"), 538 HDAPS_DMI_MATCH_NORMAL("ThinkPad X40"),
543 HDAPS_DMI_MATCH_NORMAL("ThinkPad X41 Tablet"), 539 HDAPS_DMI_MATCH_NORMAL("ThinkPad X41 Tablet"),
540 HDAPS_DMI_MATCH_NORMAL("ThinkPad X41"),
544 { .ident = NULL } 541 { .ident = NULL }
545 }; 542 };
546 543
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 8334496a7e0a..3badfec75b1c 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -245,6 +245,18 @@ config I2C_KEYWEST
245 This support is also available as a module. If so, the module 245 This support is also available as a module. If so, the module
246 will be called i2c-keywest. 246 will be called i2c-keywest.
247 247
248config I2C_PMAC_SMU
249 tristate "Powermac SMU I2C interface"
250 depends on I2C && PMAC_SMU
251 help
252 This supports the use of the I2C interface in the SMU
253 chip on recent Apple machines like the iMac G5. It is used
254 among others by the thermal control driver for those machines.
255 Say Y if you have such a machine.
256
257 This support is also available as a module. If so, the module
258 will be called i2c-pmac-smu.
259
248config I2C_MPC 260config I2C_MPC
249 tristate "MPC107/824x/85xx/52xx" 261 tristate "MPC107/824x/85xx/52xx"
250 depends on I2C && PPC32 262 depends on I2C && PPC32
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 980b3e983670..f1df00f66c6c 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_I2C_ITE) += i2c-ite.o
20obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o 20obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o
21obj-$(CONFIG_I2C_IXP4XX) += i2c-ixp4xx.o 21obj-$(CONFIG_I2C_IXP4XX) += i2c-ixp4xx.o
22obj-$(CONFIG_I2C_KEYWEST) += i2c-keywest.o 22obj-$(CONFIG_I2C_KEYWEST) += i2c-keywest.o
23obj-$(CONFIG_I2C_PMAC_SMU) += i2c-pmac-smu.o
23obj-$(CONFIG_I2C_MPC) += i2c-mpc.o 24obj-$(CONFIG_I2C_MPC) += i2c-mpc.o
24obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o 25obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o
25obj-$(CONFIG_I2C_NFORCE2) += i2c-nforce2.o 26obj-$(CONFIG_I2C_NFORCE2) += i2c-nforce2.o
diff --git a/drivers/i2c/busses/i2c-pmac-smu.c b/drivers/i2c/busses/i2c-pmac-smu.c
new file mode 100644
index 000000000000..8a9f5648a23d
--- /dev/null
+++ b/drivers/i2c/busses/i2c-pmac-smu.c
@@ -0,0 +1,316 @@
1/*
2 i2c Support for Apple SMU Controller
3
4 Copyright (c) 2005 Benjamin Herrenschmidt, IBM Corp.
5 <benh@kernel.crashing.org>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20
21*/
22
23#include <linux/config.h>
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/types.h>
27#include <linux/i2c.h>
28#include <linux/init.h>
29#include <linux/completion.h>
30#include <linux/device.h>
31#include <asm/prom.h>
32#include <asm/of_device.h>
33#include <asm/smu.h>
34
35static int probe;
36
37MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
38MODULE_DESCRIPTION("I2C driver for Apple's SMU");
39MODULE_LICENSE("GPL");
40module_param(probe, bool, 0);
41
42
43/* Physical interface */
44struct smu_iface
45{
46 struct i2c_adapter adapter;
47 struct completion complete;
48 u32 busid;
49};
50
51static void smu_i2c_done(struct smu_i2c_cmd *cmd, void *misc)
52{
53 struct smu_iface *iface = misc;
54 complete(&iface->complete);
55}
56
57/*
58 * SMBUS-type transfer entrypoint
59 */
60static s32 smu_smbus_xfer( struct i2c_adapter* adap,
61 u16 addr,
62 unsigned short flags,
63 char read_write,
64 u8 command,
65 int size,
66 union i2c_smbus_data* data)
67{
68 struct smu_iface *iface = i2c_get_adapdata(adap);
69 struct smu_i2c_cmd cmd;
70 int rc = 0;
71 int read = (read_write == I2C_SMBUS_READ);
72
73 cmd.info.bus = iface->busid;
74 cmd.info.devaddr = (addr << 1) | (read ? 0x01 : 0x00);
75
76 /* Prepare datas & select mode */
77 switch (size) {
78 case I2C_SMBUS_QUICK:
79 cmd.info.type = SMU_I2C_TRANSFER_SIMPLE;
80 cmd.info.datalen = 0;
81 break;
82 case I2C_SMBUS_BYTE:
83 cmd.info.type = SMU_I2C_TRANSFER_SIMPLE;
84 cmd.info.datalen = 1;
85 if (!read)
86 cmd.info.data[0] = data->byte;
87 break;
88 case I2C_SMBUS_BYTE_DATA:
89 cmd.info.type = SMU_I2C_TRANSFER_STDSUB;
90 cmd.info.datalen = 1;
91 cmd.info.sublen = 1;
92 cmd.info.subaddr[0] = command;
93 cmd.info.subaddr[1] = 0;
94 cmd.info.subaddr[2] = 0;
95 if (!read)
96 cmd.info.data[0] = data->byte;
97 break;
98 case I2C_SMBUS_WORD_DATA:
99 cmd.info.type = SMU_I2C_TRANSFER_STDSUB;
100 cmd.info.datalen = 2;
101 cmd.info.sublen = 1;
102 cmd.info.subaddr[0] = command;
103 cmd.info.subaddr[1] = 0;
104 cmd.info.subaddr[2] = 0;
105 if (!read) {
106 cmd.info.data[0] = data->byte & 0xff;
107 cmd.info.data[1] = (data->byte >> 8) & 0xff;
108 }
109 break;
110 /* Note that these are broken vs. the expected smbus API where
111 * on reads, the lenght is actually returned from the function,
112 * but I think the current API makes no sense and I don't want
113 * any driver that I haven't verified for correctness to go
114 * anywhere near a pmac i2c bus anyway ...
115 */
116 case I2C_SMBUS_BLOCK_DATA:
117 cmd.info.type = SMU_I2C_TRANSFER_STDSUB;
118 cmd.info.datalen = data->block[0] + 1;
119 if (cmd.info.datalen > 6)
120 return -EINVAL;
121 if (!read)
122 memcpy(cmd.info.data, data->block, cmd.info.datalen);
123 cmd.info.sublen = 1;
124 cmd.info.subaddr[0] = command;
125 cmd.info.subaddr[1] = 0;
126 cmd.info.subaddr[2] = 0;
127 break;
128 case I2C_SMBUS_I2C_BLOCK_DATA:
129 cmd.info.type = SMU_I2C_TRANSFER_STDSUB;
130 cmd.info.datalen = data->block[0];
131 if (cmd.info.datalen > 7)
132 return -EINVAL;
133 if (!read)
134 memcpy(cmd.info.data, &data->block[1],
135 cmd.info.datalen);
136 cmd.info.sublen = 1;
137 cmd.info.subaddr[0] = command;
138 cmd.info.subaddr[1] = 0;
139 cmd.info.subaddr[2] = 0;
140 break;
141
142 default:
143 return -EINVAL;
144 }
145
146 /* Turn a standardsub read into a combined mode access */
147 if (read_write == I2C_SMBUS_READ &&
148 cmd.info.type == SMU_I2C_TRANSFER_STDSUB)
149 cmd.info.type = SMU_I2C_TRANSFER_COMBINED;
150
151 /* Finish filling command and submit it */
152 cmd.done = smu_i2c_done;
153 cmd.misc = iface;
154 rc = smu_queue_i2c(&cmd);
155 if (rc < 0)
156 return rc;
157 wait_for_completion(&iface->complete);
158 rc = cmd.status;
159
160 if (!read || rc < 0)
161 return rc;
162
163 switch (size) {
164 case I2C_SMBUS_BYTE:
165 case I2C_SMBUS_BYTE_DATA:
166 data->byte = cmd.info.data[0];
167 break;
168 case I2C_SMBUS_WORD_DATA:
169 data->word = ((u16)cmd.info.data[1]) << 8;
170 data->word |= cmd.info.data[0];
171 break;
172 /* Note that these are broken vs. the expected smbus API where
173 * on reads, the lenght is actually returned from the function,
174 * but I think the current API makes no sense and I don't want
175 * any driver that I haven't verified for correctness to go
176 * anywhere near a pmac i2c bus anyway ...
177 */
178 case I2C_SMBUS_BLOCK_DATA:
179 case I2C_SMBUS_I2C_BLOCK_DATA:
180 memcpy(&data->block[0], cmd.info.data, cmd.info.datalen);
181 break;
182 }
183
184 return rc;
185}
186
187static u32
188smu_smbus_func(struct i2c_adapter * adapter)
189{
190 return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
191 I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
192 I2C_FUNC_SMBUS_BLOCK_DATA;
193}
194
195/* For now, we only handle combined mode (smbus) */
196static struct i2c_algorithm smu_algorithm = {
197 .smbus_xfer = smu_smbus_xfer,
198 .functionality = smu_smbus_func,
199};
200
201static int create_iface(struct device_node *np, struct device *dev)
202{
203 struct smu_iface* iface;
204 u32 *reg, busid;
205 int rc;
206
207 reg = (u32 *)get_property(np, "reg", NULL);
208 if (reg == NULL) {
209 printk(KERN_ERR "i2c-pmac-smu: can't find bus number !\n");
210 return -ENXIO;
211 }
212 busid = *reg;
213
214 iface = kmalloc(sizeof(struct smu_iface), GFP_KERNEL);
215 if (iface == NULL) {
216 printk(KERN_ERR "i2c-pmac-smu: can't allocate inteface !\n");
217 return -ENOMEM;
218 }
219 memset(iface, 0, sizeof(struct smu_iface));
220 init_completion(&iface->complete);
221 iface->busid = busid;
222
223 dev_set_drvdata(dev, iface);
224
225 sprintf(iface->adapter.name, "smu-i2c-%02x", busid);
226 iface->adapter.algo = &smu_algorithm;
227 iface->adapter.algo_data = NULL;
228 iface->adapter.client_register = NULL;
229 iface->adapter.client_unregister = NULL;
230 i2c_set_adapdata(&iface->adapter, iface);
231 iface->adapter.dev.parent = dev;
232
233 rc = i2c_add_adapter(&iface->adapter);
234 if (rc) {
235 printk(KERN_ERR "i2c-pamc-smu.c: Adapter %s registration "
236 "failed\n", iface->adapter.name);
237 i2c_set_adapdata(&iface->adapter, NULL);
238 }
239
240 if (probe) {
241 unsigned char addr;
242 printk("Probe: ");
243 for (addr = 0x00; addr <= 0x7f; addr++) {
244 if (i2c_smbus_xfer(&iface->adapter,addr,
245 0,0,0,I2C_SMBUS_QUICK,NULL) >= 0)
246 printk("%02x ", addr);
247 }
248 printk("\n");
249 }
250
251 printk(KERN_INFO "SMU i2c bus %x registered\n", busid);
252
253 return 0;
254}
255
256static int dispose_iface(struct device *dev)
257{
258 struct smu_iface *iface = dev_get_drvdata(dev);
259 int rc;
260
261 rc = i2c_del_adapter(&iface->adapter);
262 i2c_set_adapdata(&iface->adapter, NULL);
263 /* We aren't that prepared to deal with this... */
264 if (rc)
265 printk("i2c-pmac-smu.c: Failed to remove bus %s !\n",
266 iface->adapter.name);
267 dev_set_drvdata(dev, NULL);
268 kfree(iface);
269
270 return 0;
271}
272
273
274static int create_iface_of_platform(struct of_device* dev,
275 const struct of_device_id *match)
276{
277 return create_iface(dev->node, &dev->dev);
278}
279
280
281static int dispose_iface_of_platform(struct of_device* dev)
282{
283 return dispose_iface(&dev->dev);
284}
285
286
287static struct of_device_id i2c_smu_match[] =
288{
289 {
290 .compatible = "smu-i2c",
291 },
292 {},
293};
294static struct of_platform_driver i2c_smu_of_platform_driver =
295{
296 .name = "i2c-smu",
297 .match_table = i2c_smu_match,
298 .probe = create_iface_of_platform,
299 .remove = dispose_iface_of_platform
300};
301
302
303static int __init i2c_pmac_smu_init(void)
304{
305 of_register_driver(&i2c_smu_of_platform_driver);
306 return 0;
307}
308
309
310static void __exit i2c_pmac_smu_cleanup(void)
311{
312 of_unregister_driver(&i2c_smu_of_platform_driver);
313}
314
315module_init(i2c_pmac_smu_init);
316module_exit(i2c_pmac_smu_cleanup);
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 2bd8b1cc57c4..e23836d0e21b 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -412,8 +412,8 @@ static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
412 412
413 hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class); 413 hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class);
414 data_size = sizeof(struct ib_rmpp_mad) - hdr_size; 414 data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
415 pad = data_size - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); 415 pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
416 if (pad > data_size || pad < 0) 416 if (pad > IB_MGMT_RMPP_DATA || pad < 0)
417 pad = 0; 417 pad = 0;
418 418
419 return hdr_size + rmpp_recv->seg_num * data_size - pad; 419 return hdr_size + rmpp_recv->seg_num * data_size - pad;
@@ -583,6 +583,7 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
583{ 583{
584 struct ib_rmpp_mad *rmpp_mad; 584 struct ib_rmpp_mad *rmpp_mad;
585 int timeout; 585 int timeout;
586 u32 paylen;
586 587
587 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; 588 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
588 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); 589 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
@@ -590,11 +591,9 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
590 591
591 if (mad_send_wr->seg_num == 1) { 592 if (mad_send_wr->seg_num == 1) {
592 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST; 593 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
593 rmpp_mad->rmpp_hdr.paylen_newwin = 594 paylen = mad_send_wr->total_seg * IB_MGMT_RMPP_DATA -
594 cpu_to_be32(mad_send_wr->total_seg * 595 mad_send_wr->pad;
595 (sizeof(struct ib_rmpp_mad) - 596 rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
596 offsetof(struct ib_rmpp_mad, data)) -
597 mad_send_wr->pad);
598 mad_send_wr->sg_list[0].length = sizeof(struct ib_rmpp_mad); 597 mad_send_wr->sg_list[0].length = sizeof(struct ib_rmpp_mad);
599 } else { 598 } else {
600 mad_send_wr->send_wr.num_sge = 2; 599 mad_send_wr->send_wr.num_sge = 2;
@@ -608,10 +607,8 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
608 607
609 if (mad_send_wr->seg_num == mad_send_wr->total_seg) { 608 if (mad_send_wr->seg_num == mad_send_wr->total_seg) {
610 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST; 609 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
611 rmpp_mad->rmpp_hdr.paylen_newwin = 610 paylen = IB_MGMT_RMPP_DATA - mad_send_wr->pad;
612 cpu_to_be32(sizeof(struct ib_rmpp_mad) - 611 rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
613 offsetof(struct ib_rmpp_mad, data) -
614 mad_send_wr->pad);
615 } 612 }
616 613
617 /* 2 seconds for an ACK until we can find the packet lifetime */ 614 /* 2 seconds for an ACK until we can find the packet lifetime */
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 7c2f03057ddb..a64d6b4dcc16 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -334,10 +334,11 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
334 ret = -EINVAL; 334 ret = -EINVAL;
335 goto err_ah; 335 goto err_ah;
336 } 336 }
337 /* Validate that management class can support RMPP */ 337
338 /* Validate that the management class can support RMPP */
338 if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) { 339 if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) {
339 hdr_len = offsetof(struct ib_sa_mad, data); 340 hdr_len = offsetof(struct ib_sa_mad, data);
340 data_len = length; 341 data_len = length - hdr_len;
341 } else if ((rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 342 } else if ((rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
342 (rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) { 343 (rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) {
343 hdr_len = offsetof(struct ib_vendor_mad, data); 344 hdr_len = offsetof(struct ib_vendor_mad, data);
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 18f0981eb0c1..78152a8ad17d 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -476,12 +476,8 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
476 int i; 476 int i;
477 u8 status; 477 u8 status;
478 478
479 /* Make sure EQ size is aligned to a power of 2 size. */ 479 eq->dev = dev;
480 for (i = 1; i < nent; i <<= 1) 480 eq->nent = roundup_pow_of_two(max(nent, 2));
481 ; /* nothing */
482 nent = i;
483
484 eq->dev = dev;
485 481
486 eq->page_list = kmalloc(npages * sizeof *eq->page_list, 482 eq->page_list = kmalloc(npages * sizeof *eq->page_list,
487 GFP_KERNEL); 483 GFP_KERNEL);
@@ -512,7 +508,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
512 memset(eq->page_list[i].buf, 0, PAGE_SIZE); 508 memset(eq->page_list[i].buf, 0, PAGE_SIZE);
513 } 509 }
514 510
515 for (i = 0; i < nent; ++i) 511 for (i = 0; i < eq->nent; ++i)
516 set_eqe_hw(get_eqe(eq, i)); 512 set_eqe_hw(get_eqe(eq, i));
517 513
518 eq->eqn = mthca_alloc(&dev->eq_table.alloc); 514 eq->eqn = mthca_alloc(&dev->eq_table.alloc);
@@ -528,8 +524,6 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
528 if (err) 524 if (err)
529 goto err_out_free_eq; 525 goto err_out_free_eq;
530 526
531 eq->nent = nent;
532
533 memset(eq_context, 0, sizeof *eq_context); 527 memset(eq_context, 0, sizeof *eq_context);
534 eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK | 528 eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK |
535 MTHCA_EQ_OWNER_HW | 529 MTHCA_EQ_OWNER_HW |
@@ -538,7 +532,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
538 if (mthca_is_memfree(dev)) 532 if (mthca_is_memfree(dev))
539 eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL); 533 eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL);
540 534
541 eq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); 535 eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24);
542 if (mthca_is_memfree(dev)) { 536 if (mthca_is_memfree(dev)) {
543 eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num); 537 eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num);
544 } else { 538 } else {
@@ -569,7 +563,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
569 dev->eq_table.arm_mask |= eq->eqn_mask; 563 dev->eq_table.arm_mask |= eq->eqn_mask;
570 564
571 mthca_dbg(dev, "Allocated EQ %d with %d entries\n", 565 mthca_dbg(dev, "Allocated EQ %d with %d entries\n",
572 eq->eqn, nent); 566 eq->eqn, eq->nent);
573 567
574 return err; 568 return err;
575 569
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index bcef06bf15e7..5fa00669f9b8 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -227,7 +227,6 @@ static void mthca_wq_init(struct mthca_wq *wq)
227 wq->last_comp = wq->max - 1; 227 wq->last_comp = wq->max - 1;
228 wq->head = 0; 228 wq->head = 0;
229 wq->tail = 0; 229 wq->tail = 0;
230 wq->last = NULL;
231} 230}
232 231
233void mthca_qp_event(struct mthca_dev *dev, u32 qpn, 232void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
@@ -687,7 +686,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
687 } 686 }
688 687
689 if (attr_mask & IB_QP_TIMEOUT) { 688 if (attr_mask & IB_QP_TIMEOUT) {
690 qp_context->pri_path.ackto = attr->timeout; 689 qp_context->pri_path.ackto = attr->timeout << 3;
691 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); 690 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
692 } 691 }
693 692
@@ -1103,6 +1102,9 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
1103 } 1102 }
1104 } 1103 }
1105 1104
1105 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
1106 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
1107
1106 return 0; 1108 return 0;
1107} 1109}
1108 1110
@@ -1583,15 +1585,13 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1583 goto out; 1585 goto out;
1584 } 1586 }
1585 1587
1586 if (prev_wqe) { 1588 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1587 ((struct mthca_next_seg *) prev_wqe)->nda_op = 1589 cpu_to_be32(((ind << qp->sq.wqe_shift) +
1588 cpu_to_be32(((ind << qp->sq.wqe_shift) + 1590 qp->send_wqe_offset) |
1589 qp->send_wqe_offset) | 1591 mthca_opcode[wr->opcode]);
1590 mthca_opcode[wr->opcode]); 1592 wmb();
1591 wmb(); 1593 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1592 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1594 cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
1593 cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
1594 }
1595 1595
1596 if (!size0) { 1596 if (!size0) {
1597 size0 = size; 1597 size0 = size;
@@ -1688,13 +1688,11 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1688 1688
1689 qp->wrid[ind] = wr->wr_id; 1689 qp->wrid[ind] = wr->wr_id;
1690 1690
1691 if (likely(prev_wqe)) { 1691 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1692 ((struct mthca_next_seg *) prev_wqe)->nda_op = 1692 cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
1693 cpu_to_be32((ind << qp->rq.wqe_shift) | 1); 1693 wmb();
1694 wmb(); 1694 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1695 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1695 cpu_to_be32(MTHCA_NEXT_DBD | size);
1696 cpu_to_be32(MTHCA_NEXT_DBD | size);
1697 }
1698 1696
1699 if (!size0) 1697 if (!size0)
1700 size0 = size; 1698 size0 = size;
@@ -1905,15 +1903,13 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1905 goto out; 1903 goto out;
1906 } 1904 }
1907 1905
1908 if (likely(prev_wqe)) { 1906 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1909 ((struct mthca_next_seg *) prev_wqe)->nda_op = 1907 cpu_to_be32(((ind << qp->sq.wqe_shift) +
1910 cpu_to_be32(((ind << qp->sq.wqe_shift) + 1908 qp->send_wqe_offset) |
1911 qp->send_wqe_offset) | 1909 mthca_opcode[wr->opcode]);
1912 mthca_opcode[wr->opcode]); 1910 wmb();
1913 wmb(); 1911 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1914 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1912 cpu_to_be32(MTHCA_NEXT_DBD | size);
1915 cpu_to_be32(MTHCA_NEXT_DBD | size);
1916 }
1917 1913
1918 if (!size0) { 1914 if (!size0) {
1919 size0 = size; 1915 size0 = size;
@@ -2127,5 +2123,6 @@ void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev)
2127 for (i = 0; i < 2; ++i) 2123 for (i = 0; i < 2; ++i)
2128 mthca_CONF_SPECIAL_QP(dev, i, 0, &status); 2124 mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
2129 2125
2126 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2130 mthca_alloc_cleanup(&dev->qp_table.alloc); 2127 mthca_alloc_cleanup(&dev->qp_table.alloc);
2131} 2128}
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index 75cd2d84ef12..18998d48c53e 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -172,6 +172,8 @@ static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
172 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 172 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
173 } 173 }
174 174
175 srq->last = get_wqe(srq, srq->max - 1);
176
175 return 0; 177 return 0;
176} 178}
177 179
@@ -189,7 +191,6 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
189 191
190 srq->max = attr->max_wr; 192 srq->max = attr->max_wr;
191 srq->max_gs = attr->max_sge; 193 srq->max_gs = attr->max_sge;
192 srq->last = NULL;
193 srq->counter = 0; 194 srq->counter = 0;
194 195
195 if (mthca_is_memfree(dev)) 196 if (mthca_is_memfree(dev))
@@ -409,7 +410,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
409 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 410 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
410 err = -ENOMEM; 411 err = -ENOMEM;
411 *bad_wr = wr; 412 *bad_wr = wr;
412 return nreq; 413 break;
413 } 414 }
414 415
415 wqe = get_wqe(srq, ind); 416 wqe = get_wqe(srq, ind);
@@ -427,7 +428,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
427 err = -EINVAL; 428 err = -EINVAL;
428 *bad_wr = wr; 429 *bad_wr = wr;
429 srq->last = prev_wqe; 430 srq->last = prev_wqe;
430 return nreq; 431 break;
431 } 432 }
432 433
433 for (i = 0; i < wr->num_sge; ++i) { 434 for (i = 0; i < wr->num_sge; ++i) {
@@ -446,20 +447,16 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
446 ((struct mthca_data_seg *) wqe)->addr = 0; 447 ((struct mthca_data_seg *) wqe)->addr = 0;
447 } 448 }
448 449
449 if (likely(prev_wqe)) { 450 ((struct mthca_next_seg *) prev_wqe)->nda_op =
450 ((struct mthca_next_seg *) prev_wqe)->nda_op = 451 cpu_to_be32((ind << srq->wqe_shift) | 1);
451 cpu_to_be32((ind << srq->wqe_shift) | 1); 452 wmb();
452 wmb(); 453 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
453 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 454 cpu_to_be32(MTHCA_NEXT_DBD);
454 cpu_to_be32(MTHCA_NEXT_DBD);
455 }
456 455
457 srq->wrid[ind] = wr->wr_id; 456 srq->wrid[ind] = wr->wr_id;
458 srq->first_free = next_ind; 457 srq->first_free = next_ind;
459 } 458 }
460 459
461 return nreq;
462
463 if (likely(nreq)) { 460 if (likely(nreq)) {
464 __be32 doorbell[2]; 461 __be32 doorbell[2];
465 462
@@ -503,7 +500,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
503 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 500 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
504 err = -ENOMEM; 501 err = -ENOMEM;
505 *bad_wr = wr; 502 *bad_wr = wr;
506 return nreq; 503 break;
507 } 504 }
508 505
509 wqe = get_wqe(srq, ind); 506 wqe = get_wqe(srq, ind);
@@ -519,7 +516,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
519 if (unlikely(wr->num_sge > srq->max_gs)) { 516 if (unlikely(wr->num_sge > srq->max_gs)) {
520 err = -EINVAL; 517 err = -EINVAL;
521 *bad_wr = wr; 518 *bad_wr = wr;
522 return nreq; 519 break;
523 } 520 }
524 521
525 for (i = 0; i < wr->num_sge; ++i) { 522 for (i = 0; i < wr->num_sge; ++i) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index bea960b8191f..4ea1c1ca85bc 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -257,7 +257,7 @@ void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid,
257 257
258void ipoib_mcast_restart_task(void *dev_ptr); 258void ipoib_mcast_restart_task(void *dev_ptr);
259int ipoib_mcast_start_thread(struct net_device *dev); 259int ipoib_mcast_start_thread(struct net_device *dev);
260int ipoib_mcast_stop_thread(struct net_device *dev); 260int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
261 261
262void ipoib_mcast_dev_down(struct net_device *dev); 262void ipoib_mcast_dev_down(struct net_device *dev);
263void ipoib_mcast_dev_flush(struct net_device *dev); 263void ipoib_mcast_dev_flush(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index ef0e3894863c..f7440096b5ed 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -432,7 +432,7 @@ int ipoib_ib_dev_down(struct net_device *dev)
432 flush_workqueue(ipoib_workqueue); 432 flush_workqueue(ipoib_workqueue);
433 } 433 }
434 434
435 ipoib_mcast_stop_thread(dev); 435 ipoib_mcast_stop_thread(dev, 1);
436 436
437 /* 437 /*
438 * Flush the multicast groups first so we stop any multicast joins. The 438 * Flush the multicast groups first so we stop any multicast joins. The
@@ -599,7 +599,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
599 599
600 ipoib_dbg(priv, "cleaning up ib_dev\n"); 600 ipoib_dbg(priv, "cleaning up ib_dev\n");
601 601
602 ipoib_mcast_stop_thread(dev); 602 ipoib_mcast_stop_thread(dev, 1);
603 603
604 /* Delete the broadcast address and the local address */ 604 /* Delete the broadcast address and the local address */
605 ipoib_mcast_dev_down(dev); 605 ipoib_mcast_dev_down(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 49d120d2b92c..704f48e0b6a7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1005,6 +1005,7 @@ debug_failed:
1005 1005
1006register_failed: 1006register_failed:
1007 ib_unregister_event_handler(&priv->event_handler); 1007 ib_unregister_event_handler(&priv->event_handler);
1008 flush_scheduled_work();
1008 1009
1009event_failed: 1010event_failed:
1010 ipoib_dev_cleanup(priv->dev); 1011 ipoib_dev_cleanup(priv->dev);
@@ -1057,6 +1058,7 @@ static void ipoib_remove_one(struct ib_device *device)
1057 1058
1058 list_for_each_entry_safe(priv, tmp, dev_list, list) { 1059 list_for_each_entry_safe(priv, tmp, dev_list, list) {
1059 ib_unregister_event_handler(&priv->event_handler); 1060 ib_unregister_event_handler(&priv->event_handler);
1061 flush_scheduled_work();
1060 1062
1061 unregister_netdev(priv->dev); 1063 unregister_netdev(priv->dev);
1062 ipoib_dev_cleanup(priv->dev); 1064 ipoib_dev_cleanup(priv->dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index aca7aea18a69..36ce29836bf2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -145,7 +145,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
145 145
146 mcast->dev = dev; 146 mcast->dev = dev;
147 mcast->created = jiffies; 147 mcast->created = jiffies;
148 mcast->backoff = HZ; 148 mcast->backoff = 1;
149 mcast->logcount = 0; 149 mcast->logcount = 0;
150 150
151 INIT_LIST_HEAD(&mcast->list); 151 INIT_LIST_HEAD(&mcast->list);
@@ -396,7 +396,7 @@ static void ipoib_mcast_join_complete(int status,
396 IPOIB_GID_ARG(mcast->mcmember.mgid), status); 396 IPOIB_GID_ARG(mcast->mcmember.mgid), status);
397 397
398 if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) { 398 if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) {
399 mcast->backoff = HZ; 399 mcast->backoff = 1;
400 down(&mcast_mutex); 400 down(&mcast_mutex);
401 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 401 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
402 queue_work(ipoib_workqueue, &priv->mcast_task); 402 queue_work(ipoib_workqueue, &priv->mcast_task);
@@ -496,7 +496,7 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
496 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 496 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
497 queue_delayed_work(ipoib_workqueue, 497 queue_delayed_work(ipoib_workqueue,
498 &priv->mcast_task, 498 &priv->mcast_task,
499 mcast->backoff); 499 mcast->backoff * HZ);
500 up(&mcast_mutex); 500 up(&mcast_mutex);
501 } else 501 } else
502 mcast->query_id = ret; 502 mcast->query_id = ret;
@@ -598,7 +598,7 @@ int ipoib_mcast_start_thread(struct net_device *dev)
598 return 0; 598 return 0;
599} 599}
600 600
601int ipoib_mcast_stop_thread(struct net_device *dev) 601int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
602{ 602{
603 struct ipoib_dev_priv *priv = netdev_priv(dev); 603 struct ipoib_dev_priv *priv = netdev_priv(dev);
604 struct ipoib_mcast *mcast; 604 struct ipoib_mcast *mcast;
@@ -610,7 +610,8 @@ int ipoib_mcast_stop_thread(struct net_device *dev)
610 cancel_delayed_work(&priv->mcast_task); 610 cancel_delayed_work(&priv->mcast_task);
611 up(&mcast_mutex); 611 up(&mcast_mutex);
612 612
613 flush_workqueue(ipoib_workqueue); 613 if (flush)
614 flush_workqueue(ipoib_workqueue);
614 615
615 if (priv->broadcast && priv->broadcast->query) { 616 if (priv->broadcast && priv->broadcast->query) {
616 ib_sa_cancel_query(priv->broadcast->query_id, priv->broadcast->query); 617 ib_sa_cancel_query(priv->broadcast->query_id, priv->broadcast->query);
@@ -832,7 +833,7 @@ void ipoib_mcast_restart_task(void *dev_ptr)
832 833
833 ipoib_dbg_mcast(priv, "restarting multicast task\n"); 834 ipoib_dbg_mcast(priv, "restarting multicast task\n");
834 835
835 ipoib_mcast_stop_thread(dev); 836 ipoib_mcast_stop_thread(dev, 0);
836 837
837 spin_lock_irqsave(&priv->lock, flags); 838 spin_lock_irqsave(&priv->lock, flags);
838 839
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 88636a204525..14ae5583e198 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -308,6 +308,7 @@ static struct input_device_id *input_match_device(struct input_device_id *id, st
308 MATCH_BIT(ledbit, LED_MAX); 308 MATCH_BIT(ledbit, LED_MAX);
309 MATCH_BIT(sndbit, SND_MAX); 309 MATCH_BIT(sndbit, SND_MAX);
310 MATCH_BIT(ffbit, FF_MAX); 310 MATCH_BIT(ffbit, FF_MAX);
311 MATCH_BIT(swbit, SW_MAX);
311 312
312 return id; 313 return id;
313 } 314 }
diff --git a/drivers/isdn/hisax/st5481_b.c b/drivers/isdn/hisax/st5481_b.c
index 0a2536d62402..657817a591fe 100644
--- a/drivers/isdn/hisax/st5481_b.c
+++ b/drivers/isdn/hisax/st5481_b.c
@@ -209,9 +209,7 @@ static void st5481B_mode(struct st5481_bcs *bcs, int mode)
209 bcs->mode = mode; 209 bcs->mode = mode;
210 210
211 // Cancel all USB transfers on this B channel 211 // Cancel all USB transfers on this B channel
212 b_out->urb[0]->transfer_flags |= URB_ASYNC_UNLINK;
213 usb_unlink_urb(b_out->urb[0]); 212 usb_unlink_urb(b_out->urb[0]);
214 b_out->urb[1]->transfer_flags |= URB_ASYNC_UNLINK;
215 usb_unlink_urb(b_out->urb[1]); 213 usb_unlink_urb(b_out->urb[1]);
216 b_out->busy = 0; 214 b_out->busy = 0;
217 215
diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c
index ffd5b2d45552..89fbeb58485d 100644
--- a/drivers/isdn/hisax/st5481_usb.c
+++ b/drivers/isdn/hisax/st5481_usb.c
@@ -645,9 +645,7 @@ void st5481_in_mode(struct st5481_in *in, int mode)
645 645
646 in->mode = mode; 646 in->mode = mode;
647 647
648 in->urb[0]->transfer_flags |= URB_ASYNC_UNLINK;
649 usb_unlink_urb(in->urb[0]); 648 usb_unlink_urb(in->urb[0]);
650 in->urb[1]->transfer_flags |= URB_ASYNC_UNLINK;
651 usb_unlink_urb(in->urb[1]); 649 usb_unlink_urb(in->urb[1]);
652 650
653 if (in->mode != L1_MODE_NULL) { 651 if (in->mode != L1_MODE_NULL) {
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index fb535737d17d..a85ac18dd21d 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -8,21 +8,15 @@
8 */ 8 */
9 9
10/* 10/*
11 * For now, this driver includes:
12 * - RTC get & set
13 * - reboot & shutdown commands
14 * all synchronous with IRQ disabled (ugh)
15 *
16 * TODO: 11 * TODO:
17 * rework in a way the PMU driver works, that is asynchronous 12 * - maybe add timeout to commands ?
18 * with a queue of commands. I'll do that as soon as I have an 13 * - blocking version of time functions
19 * SMU based machine at hand. Some more cleanup is needed too, 14 * - polling version of i2c commands (including timer that works with
20 * like maybe fitting it into a platform device, etc... 15 * interrutps off)
21 * Also check what's up with cache coherency, and if we really 16 * - maybe avoid some data copies with i2c by directly using the smu cmd
22 * can't do better than flushing the cache, maybe build a table 17 * buffer and a lower level internal interface
23 * of command len/reply len like the PMU driver to only flush 18 * - understand SMU -> CPU events and implement reception of them via
24 * what is actually necessary. 19 * the userland interface
25 * --BenH.
26 */ 20 */
27 21
28#include <linux/config.h> 22#include <linux/config.h>
@@ -36,6 +30,11 @@
36#include <linux/jiffies.h> 30#include <linux/jiffies.h>
37#include <linux/interrupt.h> 31#include <linux/interrupt.h>
38#include <linux/rtc.h> 32#include <linux/rtc.h>
33#include <linux/completion.h>
34#include <linux/miscdevice.h>
35#include <linux/delay.h>
36#include <linux/sysdev.h>
37#include <linux/poll.h>
39 38
40#include <asm/byteorder.h> 39#include <asm/byteorder.h>
41#include <asm/io.h> 40#include <asm/io.h>
@@ -45,8 +44,13 @@
45#include <asm/smu.h> 44#include <asm/smu.h>
46#include <asm/sections.h> 45#include <asm/sections.h>
47#include <asm/abs_addr.h> 46#include <asm/abs_addr.h>
47#include <asm/uaccess.h>
48#include <asm/of_device.h>
49
50#define VERSION "0.6"
51#define AUTHOR "(c) 2005 Benjamin Herrenschmidt, IBM Corp."
48 52
49#define DEBUG_SMU 1 53#undef DEBUG_SMU
50 54
51#ifdef DEBUG_SMU 55#ifdef DEBUG_SMU
52#define DPRINTK(fmt, args...) do { printk(KERN_DEBUG fmt , ##args); } while (0) 56#define DPRINTK(fmt, args...) do { printk(KERN_DEBUG fmt , ##args); } while (0)
@@ -57,20 +61,30 @@
57/* 61/*
58 * This is the command buffer passed to the SMU hardware 62 * This is the command buffer passed to the SMU hardware
59 */ 63 */
64#define SMU_MAX_DATA 254
65
60struct smu_cmd_buf { 66struct smu_cmd_buf {
61 u8 cmd; 67 u8 cmd;
62 u8 length; 68 u8 length;
63 u8 data[0x0FFE]; 69 u8 data[SMU_MAX_DATA];
64}; 70};
65 71
66struct smu_device { 72struct smu_device {
67 spinlock_t lock; 73 spinlock_t lock;
68 struct device_node *of_node; 74 struct device_node *of_node;
69 int db_ack; /* doorbell ack GPIO */ 75 struct of_device *of_dev;
70 int db_req; /* doorbell req GPIO */ 76 int doorbell; /* doorbell gpio */
71 u32 __iomem *db_buf; /* doorbell buffer */ 77 u32 __iomem *db_buf; /* doorbell buffer */
78 int db_irq;
79 int msg;
80 int msg_irq;
72 struct smu_cmd_buf *cmd_buf; /* command buffer virtual */ 81 struct smu_cmd_buf *cmd_buf; /* command buffer virtual */
73 u32 cmd_buf_abs; /* command buffer absolute */ 82 u32 cmd_buf_abs; /* command buffer absolute */
83 struct list_head cmd_list;
84 struct smu_cmd *cmd_cur; /* pending command */
85 struct list_head cmd_i2c_list;
86 struct smu_i2c_cmd *cmd_i2c_cur; /* pending i2c command */
87 struct timer_list i2c_timer;
74}; 88};
75 89
76/* 90/*
@@ -79,113 +93,243 @@ struct smu_device {
79 */ 93 */
80static struct smu_device *smu; 94static struct smu_device *smu;
81 95
96
82/* 97/*
83 * SMU low level communication stuff 98 * SMU driver low level stuff
84 */ 99 */
85static inline int smu_cmd_stat(struct smu_cmd_buf *cmd_buf, u8 cmd_ack)
86{
87 rmb();
88 return cmd_buf->cmd == cmd_ack && cmd_buf->length != 0;
89}
90 100
91static inline u8 smu_save_ack_cmd(struct smu_cmd_buf *cmd_buf) 101static void smu_start_cmd(void)
92{ 102{
93 return (~cmd_buf->cmd) & 0xff; 103 unsigned long faddr, fend;
94} 104 struct smu_cmd *cmd;
95 105
96static void smu_send_cmd(struct smu_device *dev) 106 if (list_empty(&smu->cmd_list))
97{ 107 return;
98 /* SMU command buf is currently cacheable, we need a physical 108
99 * address. This isn't exactly a DMA mapping here, I suspect 109 /* Fetch first command in queue */
110 cmd = list_entry(smu->cmd_list.next, struct smu_cmd, link);
111 smu->cmd_cur = cmd;
112 list_del(&cmd->link);
113
114 DPRINTK("SMU: starting cmd %x, %d bytes data\n", cmd->cmd,
115 cmd->data_len);
116 DPRINTK("SMU: data buffer: %02x %02x %02x %02x ...\n",
117 ((u8 *)cmd->data_buf)[0], ((u8 *)cmd->data_buf)[1],
118 ((u8 *)cmd->data_buf)[2], ((u8 *)cmd->data_buf)[3]);
119
120 /* Fill the SMU command buffer */
121 smu->cmd_buf->cmd = cmd->cmd;
122 smu->cmd_buf->length = cmd->data_len;
123 memcpy(smu->cmd_buf->data, cmd->data_buf, cmd->data_len);
124
125 /* Flush command and data to RAM */
126 faddr = (unsigned long)smu->cmd_buf;
127 fend = faddr + smu->cmd_buf->length + 2;
128 flush_inval_dcache_range(faddr, fend);
129
130 /* This isn't exactly a DMA mapping here, I suspect
100 * the SMU is actually communicating with us via i2c to the 131 * the SMU is actually communicating with us via i2c to the
101 * northbridge or the CPU to access RAM. 132 * northbridge or the CPU to access RAM.
102 */ 133 */
103 writel(dev->cmd_buf_abs, dev->db_buf); 134 writel(smu->cmd_buf_abs, smu->db_buf);
104 135
105 /* Ring the SMU doorbell */ 136 /* Ring the SMU doorbell */
106 pmac_do_feature_call(PMAC_FTR_WRITE_GPIO, NULL, dev->db_req, 4); 137 pmac_do_feature_call(PMAC_FTR_WRITE_GPIO, NULL, smu->doorbell, 4);
107 pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, dev->db_req, 4);
108} 138}
109 139
110static int smu_cmd_done(struct smu_device *dev) 140
141static irqreturn_t smu_db_intr(int irq, void *arg, struct pt_regs *regs)
111{ 142{
112 unsigned long wait = 0; 143 unsigned long flags;
113 int gpio; 144 struct smu_cmd *cmd;
145 void (*done)(struct smu_cmd *cmd, void *misc) = NULL;
146 void *misc = NULL;
147 u8 gpio;
148 int rc = 0;
114 149
115 /* Check the SMU doorbell */ 150 /* SMU completed the command, well, we hope, let's make sure
116 do { 151 * of it
117 gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, 152 */
118 NULL, dev->db_ack); 153 spin_lock_irqsave(&smu->lock, flags);
119 if ((gpio & 7) == 7)
120 return 0;
121 udelay(100);
122 } while(++wait < 10000);
123 154
124 printk(KERN_ERR "SMU timeout !\n"); 155 gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, smu->doorbell);
125 return -ENXIO; 156 if ((gpio & 7) != 7)
157 return IRQ_HANDLED;
158
159 cmd = smu->cmd_cur;
160 smu->cmd_cur = NULL;
161 if (cmd == NULL)
162 goto bail;
163
164 if (rc == 0) {
165 unsigned long faddr;
166 int reply_len;
167 u8 ack;
168
169 /* CPU might have brought back the cache line, so we need
170 * to flush again before peeking at the SMU response. We
171 * flush the entire buffer for now as we haven't read the
172 * reply lenght (it's only 2 cache lines anyway)
173 */
174 faddr = (unsigned long)smu->cmd_buf;
175 flush_inval_dcache_range(faddr, faddr + 256);
176
177 /* Now check ack */
178 ack = (~cmd->cmd) & 0xff;
179 if (ack != smu->cmd_buf->cmd) {
180 DPRINTK("SMU: incorrect ack, want %x got %x\n",
181 ack, smu->cmd_buf->cmd);
182 rc = -EIO;
183 }
184 reply_len = rc == 0 ? smu->cmd_buf->length : 0;
185 DPRINTK("SMU: reply len: %d\n", reply_len);
186 if (reply_len > cmd->reply_len) {
187 printk(KERN_WARNING "SMU: reply buffer too small,"
188 "got %d bytes for a %d bytes buffer\n",
189 reply_len, cmd->reply_len);
190 reply_len = cmd->reply_len;
191 }
192 cmd->reply_len = reply_len;
193 if (cmd->reply_buf && reply_len)
194 memcpy(cmd->reply_buf, smu->cmd_buf->data, reply_len);
195 }
196
197 /* Now complete the command. Write status last in order as we lost
198 * ownership of the command structure as soon as it's no longer -1
199 */
200 done = cmd->done;
201 misc = cmd->misc;
202 mb();
203 cmd->status = rc;
204 bail:
205 /* Start next command if any */
206 smu_start_cmd();
207 spin_unlock_irqrestore(&smu->lock, flags);
208
209 /* Call command completion handler if any */
210 if (done)
211 done(cmd, misc);
212
213 /* It's an edge interrupt, nothing to do */
214 return IRQ_HANDLED;
126} 215}
127 216
128static int smu_do_cmd(struct smu_device *dev) 217
218static irqreturn_t smu_msg_intr(int irq, void *arg, struct pt_regs *regs)
129{ 219{
130 int rc; 220 /* I don't quite know what to do with this one, we seem to never
131 u8 cmd_ack; 221 * receive it, so I suspect we have to arm it someway in the SMU
222 * to start getting events that way.
223 */
224
225 printk(KERN_INFO "SMU: message interrupt !\n");
132 226
133 DPRINTK("SMU do_cmd %02x len=%d %02x\n", 227 /* It's an edge interrupt, nothing to do */
134 dev->cmd_buf->cmd, dev->cmd_buf->length, 228 return IRQ_HANDLED;
135 dev->cmd_buf->data[0]); 229}
136 230
137 cmd_ack = smu_save_ack_cmd(dev->cmd_buf);
138 231
139 /* Clear cmd_buf cache lines */ 232/*
140 flush_inval_dcache_range((unsigned long)dev->cmd_buf, 233 * Queued command management.
141 ((unsigned long)dev->cmd_buf) + 234 *
142 sizeof(struct smu_cmd_buf)); 235 */
143 smu_send_cmd(dev);
144 rc = smu_cmd_done(dev);
145 if (rc == 0)
146 rc = smu_cmd_stat(dev->cmd_buf, cmd_ack) ? 0 : -1;
147 236
148 DPRINTK("SMU do_cmd %02x len=%d %02x => %d (%02x)\n", 237int smu_queue_cmd(struct smu_cmd *cmd)
149 dev->cmd_buf->cmd, dev->cmd_buf->length, 238{
150 dev->cmd_buf->data[0], rc, cmd_ack); 239 unsigned long flags;
151 240
152 return rc; 241 if (smu == NULL)
242 return -ENODEV;
243 if (cmd->data_len > SMU_MAX_DATA ||
244 cmd->reply_len > SMU_MAX_DATA)
245 return -EINVAL;
246
247 cmd->status = 1;
248 spin_lock_irqsave(&smu->lock, flags);
249 list_add_tail(&cmd->link, &smu->cmd_list);
250 if (smu->cmd_cur == NULL)
251 smu_start_cmd();
252 spin_unlock_irqrestore(&smu->lock, flags);
253
254 return 0;
153} 255}
256EXPORT_SYMBOL(smu_queue_cmd);
154 257
155/* RTC low level commands */ 258
156static inline int bcd2hex (int n) 259int smu_queue_simple(struct smu_simple_cmd *scmd, u8 command,
260 unsigned int data_len,
261 void (*done)(struct smu_cmd *cmd, void *misc),
262 void *misc, ...)
157{ 263{
158 return (((n & 0xf0) >> 4) * 10) + (n & 0xf); 264 struct smu_cmd *cmd = &scmd->cmd;
265 va_list list;
266 int i;
267
268 if (data_len > sizeof(scmd->buffer))
269 return -EINVAL;
270
271 memset(scmd, 0, sizeof(*scmd));
272 cmd->cmd = command;
273 cmd->data_len = data_len;
274 cmd->data_buf = scmd->buffer;
275 cmd->reply_len = sizeof(scmd->buffer);
276 cmd->reply_buf = scmd->buffer;
277 cmd->done = done;
278 cmd->misc = misc;
279
280 va_start(list, misc);
281 for (i = 0; i < data_len; ++i)
282 scmd->buffer[i] = (u8)va_arg(list, int);
283 va_end(list);
284
285 return smu_queue_cmd(cmd);
159} 286}
287EXPORT_SYMBOL(smu_queue_simple);
160 288
161static inline int hex2bcd (int n) 289
290void smu_poll(void)
162{ 291{
163 return ((n / 10) << 4) + (n % 10); 292 u8 gpio;
293
294 if (smu == NULL)
295 return;
296
297 gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, smu->doorbell);
298 if ((gpio & 7) == 7)
299 smu_db_intr(smu->db_irq, smu, NULL);
164} 300}
301EXPORT_SYMBOL(smu_poll);
302
165 303
166#if 0 304void smu_done_complete(struct smu_cmd *cmd, void *misc)
167static inline void smu_fill_set_pwrup_timer_cmd(struct smu_cmd_buf *cmd_buf)
168{ 305{
169 cmd_buf->cmd = 0x8e; 306 struct completion *comp = misc;
170 cmd_buf->length = 8; 307
171 cmd_buf->data[0] = 0x00; 308 complete(comp);
172 memset(cmd_buf->data + 1, 0, 7);
173} 309}
310EXPORT_SYMBOL(smu_done_complete);
311
174 312
175static inline void smu_fill_get_pwrup_timer_cmd(struct smu_cmd_buf *cmd_buf) 313void smu_spinwait_cmd(struct smu_cmd *cmd)
176{ 314{
177 cmd_buf->cmd = 0x8e; 315 while(cmd->status == 1)
178 cmd_buf->length = 1; 316 smu_poll();
179 cmd_buf->data[0] = 0x01; 317}
318EXPORT_SYMBOL(smu_spinwait_cmd);
319
320
321/* RTC low level commands */
322static inline int bcd2hex (int n)
323{
324 return (((n & 0xf0) >> 4) * 10) + (n & 0xf);
180} 325}
181 326
182static inline void smu_fill_dis_pwrup_timer_cmd(struct smu_cmd_buf *cmd_buf) 327
328static inline int hex2bcd (int n)
183{ 329{
184 cmd_buf->cmd = 0x8e; 330 return ((n / 10) << 4) + (n % 10);
185 cmd_buf->length = 1;
186 cmd_buf->data[0] = 0x02;
187} 331}
188#endif 332
189 333
190static inline void smu_fill_set_rtc_cmd(struct smu_cmd_buf *cmd_buf, 334static inline void smu_fill_set_rtc_cmd(struct smu_cmd_buf *cmd_buf,
191 struct rtc_time *time) 335 struct rtc_time *time)
@@ -202,100 +346,96 @@ static inline void smu_fill_set_rtc_cmd(struct smu_cmd_buf *cmd_buf,
202 cmd_buf->data[7] = hex2bcd(time->tm_year - 100); 346 cmd_buf->data[7] = hex2bcd(time->tm_year - 100);
203} 347}
204 348
205static inline void smu_fill_get_rtc_cmd(struct smu_cmd_buf *cmd_buf)
206{
207 cmd_buf->cmd = 0x8e;
208 cmd_buf->length = 1;
209 cmd_buf->data[0] = 0x81;
210}
211 349
212static void smu_parse_get_rtc_reply(struct smu_cmd_buf *cmd_buf, 350int smu_get_rtc_time(struct rtc_time *time, int spinwait)
213 struct rtc_time *time)
214{ 351{
215 time->tm_sec = bcd2hex(cmd_buf->data[0]); 352 struct smu_simple_cmd cmd;
216 time->tm_min = bcd2hex(cmd_buf->data[1]);
217 time->tm_hour = bcd2hex(cmd_buf->data[2]);
218 time->tm_wday = bcd2hex(cmd_buf->data[3]);
219 time->tm_mday = bcd2hex(cmd_buf->data[4]);
220 time->tm_mon = bcd2hex(cmd_buf->data[5]) - 1;
221 time->tm_year = bcd2hex(cmd_buf->data[6]) + 100;
222}
223
224int smu_get_rtc_time(struct rtc_time *time)
225{
226 unsigned long flags;
227 int rc; 353 int rc;
228 354
229 if (smu == NULL) 355 if (smu == NULL)
230 return -ENODEV; 356 return -ENODEV;
231 357
232 memset(time, 0, sizeof(struct rtc_time)); 358 memset(time, 0, sizeof(struct rtc_time));
233 spin_lock_irqsave(&smu->lock, flags); 359 rc = smu_queue_simple(&cmd, SMU_CMD_RTC_COMMAND, 1, NULL, NULL,
234 smu_fill_get_rtc_cmd(smu->cmd_buf); 360 SMU_CMD_RTC_GET_DATETIME);
235 rc = smu_do_cmd(smu); 361 if (rc)
236 if (rc == 0) 362 return rc;
237 smu_parse_get_rtc_reply(smu->cmd_buf, time); 363 smu_spinwait_simple(&cmd);
238 spin_unlock_irqrestore(&smu->lock, flags);
239 364
240 return rc; 365 time->tm_sec = bcd2hex(cmd.buffer[0]);
366 time->tm_min = bcd2hex(cmd.buffer[1]);
367 time->tm_hour = bcd2hex(cmd.buffer[2]);
368 time->tm_wday = bcd2hex(cmd.buffer[3]);
369 time->tm_mday = bcd2hex(cmd.buffer[4]);
370 time->tm_mon = bcd2hex(cmd.buffer[5]) - 1;
371 time->tm_year = bcd2hex(cmd.buffer[6]) + 100;
372
373 return 0;
241} 374}
242 375
243int smu_set_rtc_time(struct rtc_time *time) 376
377int smu_set_rtc_time(struct rtc_time *time, int spinwait)
244{ 378{
245 unsigned long flags; 379 struct smu_simple_cmd cmd;
246 int rc; 380 int rc;
247 381
248 if (smu == NULL) 382 if (smu == NULL)
249 return -ENODEV; 383 return -ENODEV;
250 384
251 spin_lock_irqsave(&smu->lock, flags); 385 rc = smu_queue_simple(&cmd, SMU_CMD_RTC_COMMAND, 8, NULL, NULL,
252 smu_fill_set_rtc_cmd(smu->cmd_buf, time); 386 SMU_CMD_RTC_SET_DATETIME,
253 rc = smu_do_cmd(smu); 387 hex2bcd(time->tm_sec),
254 spin_unlock_irqrestore(&smu->lock, flags); 388 hex2bcd(time->tm_min),
389 hex2bcd(time->tm_hour),
390 time->tm_wday,
391 hex2bcd(time->tm_mday),
392 hex2bcd(time->tm_mon) + 1,
393 hex2bcd(time->tm_year - 100));
394 if (rc)
395 return rc;
396 smu_spinwait_simple(&cmd);
255 397
256 return rc; 398 return 0;
257} 399}
258 400
401
259void smu_shutdown(void) 402void smu_shutdown(void)
260{ 403{
261 const unsigned char *command = "SHUTDOWN"; 404 struct smu_simple_cmd cmd;
262 unsigned long flags;
263 405
264 if (smu == NULL) 406 if (smu == NULL)
265 return; 407 return;
266 408
267 spin_lock_irqsave(&smu->lock, flags); 409 if (smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 9, NULL, NULL,
268 smu->cmd_buf->cmd = 0xaa; 410 'S', 'H', 'U', 'T', 'D', 'O', 'W', 'N', 0))
269 smu->cmd_buf->length = strlen(command); 411 return;
270 strcpy(smu->cmd_buf->data, command); 412 smu_spinwait_simple(&cmd);
271 smu_do_cmd(smu);
272 for (;;) 413 for (;;)
273 ; 414 ;
274 spin_unlock_irqrestore(&smu->lock, flags);
275} 415}
276 416
417
277void smu_restart(void) 418void smu_restart(void)
278{ 419{
279 const unsigned char *command = "RESTART"; 420 struct smu_simple_cmd cmd;
280 unsigned long flags;
281 421
282 if (smu == NULL) 422 if (smu == NULL)
283 return; 423 return;
284 424
285 spin_lock_irqsave(&smu->lock, flags); 425 if (smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 8, NULL, NULL,
286 smu->cmd_buf->cmd = 0xaa; 426 'R', 'E', 'S', 'T', 'A', 'R', 'T', 0))
287 smu->cmd_buf->length = strlen(command); 427 return;
288 strcpy(smu->cmd_buf->data, command); 428 smu_spinwait_simple(&cmd);
289 smu_do_cmd(smu);
290 for (;;) 429 for (;;)
291 ; 430 ;
292 spin_unlock_irqrestore(&smu->lock, flags);
293} 431}
294 432
433
295int smu_present(void) 434int smu_present(void)
296{ 435{
297 return smu != NULL; 436 return smu != NULL;
298} 437}
438EXPORT_SYMBOL(smu_present);
299 439
300 440
301int smu_init (void) 441int smu_init (void)
@@ -307,6 +447,8 @@ int smu_init (void)
307 if (np == NULL) 447 if (np == NULL)
308 return -ENODEV; 448 return -ENODEV;
309 449
450 printk(KERN_INFO "SMU driver %s %s\n", VERSION, AUTHOR);
451
310 if (smu_cmdbuf_abs == 0) { 452 if (smu_cmdbuf_abs == 0) {
311 printk(KERN_ERR "SMU: Command buffer not allocated !\n"); 453 printk(KERN_ERR "SMU: Command buffer not allocated !\n");
312 return -EINVAL; 454 return -EINVAL;
@@ -318,7 +460,13 @@ int smu_init (void)
318 memset(smu, 0, sizeof(*smu)); 460 memset(smu, 0, sizeof(*smu));
319 461
320 spin_lock_init(&smu->lock); 462 spin_lock_init(&smu->lock);
463 INIT_LIST_HEAD(&smu->cmd_list);
464 INIT_LIST_HEAD(&smu->cmd_i2c_list);
321 smu->of_node = np; 465 smu->of_node = np;
466 smu->db_irq = NO_IRQ;
467 smu->msg_irq = NO_IRQ;
468 init_timer(&smu->i2c_timer);
469
322 /* smu_cmdbuf_abs is in the low 2G of RAM, can be converted to a 470 /* smu_cmdbuf_abs is in the low 2G of RAM, can be converted to a
323 * 32 bits value safely 471 * 32 bits value safely
324 */ 472 */
@@ -331,8 +479,8 @@ int smu_init (void)
331 goto fail; 479 goto fail;
332 } 480 }
333 data = (u32 *)get_property(np, "reg", NULL); 481 data = (u32 *)get_property(np, "reg", NULL);
334 of_node_put(np);
335 if (data == NULL) { 482 if (data == NULL) {
483 of_node_put(np);
336 printk(KERN_ERR "SMU: Can't find doorbell GPIO address !\n"); 484 printk(KERN_ERR "SMU: Can't find doorbell GPIO address !\n");
337 goto fail; 485 goto fail;
338 } 486 }
@@ -341,8 +489,31 @@ int smu_init (void)
341 * and ack. GPIOs are at 0x50, best would be to find that out 489 * and ack. GPIOs are at 0x50, best would be to find that out
342 * in the device-tree though. 490 * in the device-tree though.
343 */ 491 */
344 smu->db_req = 0x50 + *data; 492 smu->doorbell = *data;
345 smu->db_ack = 0x50 + *data; 493 if (smu->doorbell < 0x50)
494 smu->doorbell += 0x50;
495 if (np->n_intrs > 0)
496 smu->db_irq = np->intrs[0].line;
497
498 of_node_put(np);
499
500 /* Now look for the smu-interrupt GPIO */
501 do {
502 np = of_find_node_by_name(NULL, "smu-interrupt");
503 if (np == NULL)
504 break;
505 data = (u32 *)get_property(np, "reg", NULL);
506 if (data == NULL) {
507 of_node_put(np);
508 break;
509 }
510 smu->msg = *data;
511 if (smu->msg < 0x50)
512 smu->msg += 0x50;
513 if (np->n_intrs > 0)
514 smu->msg_irq = np->intrs[0].line;
515 of_node_put(np);
516 } while(0);
346 517
347 /* Doorbell buffer is currently hard-coded, I didn't find a proper 518 /* Doorbell buffer is currently hard-coded, I didn't find a proper
348 * device-tree entry giving the address. Best would probably to use 519 * device-tree entry giving the address. Best would probably to use
@@ -362,3 +533,584 @@ int smu_init (void)
362 return -ENXIO; 533 return -ENXIO;
363 534
364} 535}
536
537
538static int smu_late_init(void)
539{
540 if (!smu)
541 return 0;
542
543 /*
544 * Try to request the interrupts
545 */
546
547 if (smu->db_irq != NO_IRQ) {
548 if (request_irq(smu->db_irq, smu_db_intr,
549 SA_SHIRQ, "SMU doorbell", smu) < 0) {
550 printk(KERN_WARNING "SMU: can't "
551 "request interrupt %d\n",
552 smu->db_irq);
553 smu->db_irq = NO_IRQ;
554 }
555 }
556
557 if (smu->msg_irq != NO_IRQ) {
558 if (request_irq(smu->msg_irq, smu_msg_intr,
559 SA_SHIRQ, "SMU message", smu) < 0) {
560 printk(KERN_WARNING "SMU: can't "
561 "request interrupt %d\n",
562 smu->msg_irq);
563 smu->msg_irq = NO_IRQ;
564 }
565 }
566
567 return 0;
568}
569arch_initcall(smu_late_init);
570
571/*
572 * sysfs visibility
573 */
574
575static void smu_expose_childs(void *unused)
576{
577 struct device_node *np;
578
579 for (np = NULL; (np = of_get_next_child(smu->of_node, np)) != NULL;) {
580 if (device_is_compatible(np, "smu-i2c")) {
581 char name[32];
582 u32 *reg = (u32 *)get_property(np, "reg", NULL);
583
584 if (reg == NULL)
585 continue;
586 sprintf(name, "smu-i2c-%02x", *reg);
587 of_platform_device_create(np, name, &smu->of_dev->dev);
588 }
589 }
590
591}
592
593static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs, NULL);
594
595static int smu_platform_probe(struct of_device* dev,
596 const struct of_device_id *match)
597{
598 if (!smu)
599 return -ENODEV;
600 smu->of_dev = dev;
601
602 /*
603 * Ok, we are matched, now expose all i2c busses. We have to defer
604 * that unfortunately or it would deadlock inside the device model
605 */
606 schedule_work(&smu_expose_childs_work);
607
608 return 0;
609}
610
611static struct of_device_id smu_platform_match[] =
612{
613 {
614 .type = "smu",
615 },
616 {},
617};
618
619static struct of_platform_driver smu_of_platform_driver =
620{
621 .name = "smu",
622 .match_table = smu_platform_match,
623 .probe = smu_platform_probe,
624};
625
626static int __init smu_init_sysfs(void)
627{
628 int rc;
629
630 /*
631 * Due to sysfs bogosity, a sysdev is not a real device, so
632 * we should in fact create both if we want sysdev semantics
633 * for power management.
634 * For now, we don't power manage machines with an SMU chip,
635 * I'm a bit too far from figuring out how that works with those
636 * new chipsets, but that will come back and bite us
637 */
638 rc = of_register_driver(&smu_of_platform_driver);
639 return 0;
640}
641
642device_initcall(smu_init_sysfs);
643
644struct of_device *smu_get_ofdev(void)
645{
646 if (!smu)
647 return NULL;
648 return smu->of_dev;
649}
650
651EXPORT_SYMBOL_GPL(smu_get_ofdev);
652
653/*
654 * i2c interface
655 */
656
657static void smu_i2c_complete_command(struct smu_i2c_cmd *cmd, int fail)
658{
659 void (*done)(struct smu_i2c_cmd *cmd, void *misc) = cmd->done;
660 void *misc = cmd->misc;
661 unsigned long flags;
662
663 /* Check for read case */
664 if (!fail && cmd->read) {
665 if (cmd->pdata[0] < 1)
666 fail = 1;
667 else
668 memcpy(cmd->info.data, &cmd->pdata[1],
669 cmd->info.datalen);
670 }
671
672 DPRINTK("SMU: completing, success: %d\n", !fail);
673
674 /* Update status and mark no pending i2c command with lock
675 * held so nobody comes in while we dequeue an eventual
676 * pending next i2c command
677 */
678 spin_lock_irqsave(&smu->lock, flags);
679 smu->cmd_i2c_cur = NULL;
680 wmb();
681 cmd->status = fail ? -EIO : 0;
682
683 /* Is there another i2c command waiting ? */
684 if (!list_empty(&smu->cmd_i2c_list)) {
685 struct smu_i2c_cmd *newcmd;
686
687 /* Fetch it, new current, remove from list */
688 newcmd = list_entry(smu->cmd_i2c_list.next,
689 struct smu_i2c_cmd, link);
690 smu->cmd_i2c_cur = newcmd;
691 list_del(&cmd->link);
692
693 /* Queue with low level smu */
694 list_add_tail(&cmd->scmd.link, &smu->cmd_list);
695 if (smu->cmd_cur == NULL)
696 smu_start_cmd();
697 }
698 spin_unlock_irqrestore(&smu->lock, flags);
699
700 /* Call command completion handler if any */
701 if (done)
702 done(cmd, misc);
703
704}
705
706
707static void smu_i2c_retry(unsigned long data)
708{
709 struct smu_i2c_cmd *cmd = (struct smu_i2c_cmd *)data;
710
711 DPRINTK("SMU: i2c failure, requeuing...\n");
712
713 /* requeue command simply by resetting reply_len */
714 cmd->pdata[0] = 0xff;
715 cmd->scmd.reply_len = 0x10;
716 smu_queue_cmd(&cmd->scmd);
717}
718
719
720static void smu_i2c_low_completion(struct smu_cmd *scmd, void *misc)
721{
722 struct smu_i2c_cmd *cmd = misc;
723 int fail = 0;
724
725 DPRINTK("SMU: i2c compl. stage=%d status=%x pdata[0]=%x rlen: %x\n",
726 cmd->stage, scmd->status, cmd->pdata[0], scmd->reply_len);
727
728 /* Check for possible status */
729 if (scmd->status < 0)
730 fail = 1;
731 else if (cmd->read) {
732 if (cmd->stage == 0)
733 fail = cmd->pdata[0] != 0;
734 else
735 fail = cmd->pdata[0] >= 0x80;
736 } else {
737 fail = cmd->pdata[0] != 0;
738 }
739
740 /* Handle failures by requeuing command, after 5ms interval
741 */
742 if (fail && --cmd->retries > 0) {
743 DPRINTK("SMU: i2c failure, starting timer...\n");
744 smu->i2c_timer.function = smu_i2c_retry;
745 smu->i2c_timer.data = (unsigned long)cmd;
746 smu->i2c_timer.expires = jiffies + msecs_to_jiffies(5);
747 add_timer(&smu->i2c_timer);
748 return;
749 }
750
751 /* If failure or stage 1, command is complete */
752 if (fail || cmd->stage != 0) {
753 smu_i2c_complete_command(cmd, fail);
754 return;
755 }
756
757 DPRINTK("SMU: going to stage 1\n");
758
759 /* Ok, initial command complete, now poll status */
760 scmd->reply_buf = cmd->pdata;
761 scmd->reply_len = 0x10;
762 scmd->data_buf = cmd->pdata;
763 scmd->data_len = 1;
764 cmd->pdata[0] = 0;
765 cmd->stage = 1;
766 cmd->retries = 20;
767 smu_queue_cmd(scmd);
768}
769
770
771int smu_queue_i2c(struct smu_i2c_cmd *cmd)
772{
773 unsigned long flags;
774
775 if (smu == NULL)
776 return -ENODEV;
777
778 /* Fill most fields of scmd */
779 cmd->scmd.cmd = SMU_CMD_I2C_COMMAND;
780 cmd->scmd.done = smu_i2c_low_completion;
781 cmd->scmd.misc = cmd;
782 cmd->scmd.reply_buf = cmd->pdata;
783 cmd->scmd.reply_len = 0x10;
784 cmd->scmd.data_buf = (u8 *)(char *)&cmd->info;
785 cmd->scmd.status = 1;
786 cmd->stage = 0;
787 cmd->pdata[0] = 0xff;
788 cmd->retries = 20;
789 cmd->status = 1;
790
791 /* Check transfer type, sanitize some "info" fields
792 * based on transfer type and do more checking
793 */
794 cmd->info.caddr = cmd->info.devaddr;
795 cmd->read = cmd->info.devaddr & 0x01;
796 switch(cmd->info.type) {
797 case SMU_I2C_TRANSFER_SIMPLE:
798 memset(&cmd->info.sublen, 0, 4);
799 break;
800 case SMU_I2C_TRANSFER_COMBINED:
801 cmd->info.devaddr &= 0xfe;
802 case SMU_I2C_TRANSFER_STDSUB:
803 if (cmd->info.sublen > 3)
804 return -EINVAL;
805 break;
806 default:
807 return -EINVAL;
808 }
809
810 /* Finish setting up command based on transfer direction
811 */
812 if (cmd->read) {
813 if (cmd->info.datalen > SMU_I2C_READ_MAX)
814 return -EINVAL;
815 memset(cmd->info.data, 0xff, cmd->info.datalen);
816 cmd->scmd.data_len = 9;
817 } else {
818 if (cmd->info.datalen > SMU_I2C_WRITE_MAX)
819 return -EINVAL;
820 cmd->scmd.data_len = 9 + cmd->info.datalen;
821 }
822
823 DPRINTK("SMU: i2c enqueuing command\n");
824 DPRINTK("SMU: %s, len=%d bus=%x addr=%x sub0=%x type=%x\n",
825 cmd->read ? "read" : "write", cmd->info.datalen,
826 cmd->info.bus, cmd->info.caddr,
827 cmd->info.subaddr[0], cmd->info.type);
828
829
830 /* Enqueue command in i2c list, and if empty, enqueue also in
831 * main command list
832 */
833 spin_lock_irqsave(&smu->lock, flags);
834 if (smu->cmd_i2c_cur == NULL) {
835 smu->cmd_i2c_cur = cmd;
836 list_add_tail(&cmd->scmd.link, &smu->cmd_list);
837 if (smu->cmd_cur == NULL)
838 smu_start_cmd();
839 } else
840 list_add_tail(&cmd->link, &smu->cmd_i2c_list);
841 spin_unlock_irqrestore(&smu->lock, flags);
842
843 return 0;
844}
845
846
847
848/*
849 * Userland driver interface
850 */
851
852
853static LIST_HEAD(smu_clist);
854static DEFINE_SPINLOCK(smu_clist_lock);
855
856enum smu_file_mode {
857 smu_file_commands,
858 smu_file_events,
859 smu_file_closing
860};
861
862struct smu_private
863{
864 struct list_head list;
865 enum smu_file_mode mode;
866 int busy;
867 struct smu_cmd cmd;
868 spinlock_t lock;
869 wait_queue_head_t wait;
870 u8 buffer[SMU_MAX_DATA];
871};
872
873
874static int smu_open(struct inode *inode, struct file *file)
875{
876 struct smu_private *pp;
877 unsigned long flags;
878
879 pp = kmalloc(sizeof(struct smu_private), GFP_KERNEL);
880 if (pp == 0)
881 return -ENOMEM;
882 memset(pp, 0, sizeof(struct smu_private));
883 spin_lock_init(&pp->lock);
884 pp->mode = smu_file_commands;
885 init_waitqueue_head(&pp->wait);
886
887 spin_lock_irqsave(&smu_clist_lock, flags);
888 list_add(&pp->list, &smu_clist);
889 spin_unlock_irqrestore(&smu_clist_lock, flags);
890 file->private_data = pp;
891
892 return 0;
893}
894
895
896static void smu_user_cmd_done(struct smu_cmd *cmd, void *misc)
897{
898 struct smu_private *pp = misc;
899
900 wake_up_all(&pp->wait);
901}
902
903
904static ssize_t smu_write(struct file *file, const char __user *buf,
905 size_t count, loff_t *ppos)
906{
907 struct smu_private *pp = file->private_data;
908 unsigned long flags;
909 struct smu_user_cmd_hdr hdr;
910 int rc = 0;
911
912 if (pp->busy)
913 return -EBUSY;
914 else if (copy_from_user(&hdr, buf, sizeof(hdr)))
915 return -EFAULT;
916 else if (hdr.cmdtype == SMU_CMDTYPE_WANTS_EVENTS) {
917 pp->mode = smu_file_events;
918 return 0;
919 } else if (hdr.cmdtype != SMU_CMDTYPE_SMU)
920 return -EINVAL;
921 else if (pp->mode != smu_file_commands)
922 return -EBADFD;
923 else if (hdr.data_len > SMU_MAX_DATA)
924 return -EINVAL;
925
926 spin_lock_irqsave(&pp->lock, flags);
927 if (pp->busy) {
928 spin_unlock_irqrestore(&pp->lock, flags);
929 return -EBUSY;
930 }
931 pp->busy = 1;
932 pp->cmd.status = 1;
933 spin_unlock_irqrestore(&pp->lock, flags);
934
935 if (copy_from_user(pp->buffer, buf + sizeof(hdr), hdr.data_len)) {
936 pp->busy = 0;
937 return -EFAULT;
938 }
939
940 pp->cmd.cmd = hdr.cmd;
941 pp->cmd.data_len = hdr.data_len;
942 pp->cmd.reply_len = SMU_MAX_DATA;
943 pp->cmd.data_buf = pp->buffer;
944 pp->cmd.reply_buf = pp->buffer;
945 pp->cmd.done = smu_user_cmd_done;
946 pp->cmd.misc = pp;
947 rc = smu_queue_cmd(&pp->cmd);
948 if (rc < 0)
949 return rc;
950 return count;
951}
952
953
954static ssize_t smu_read_command(struct file *file, struct smu_private *pp,
955 char __user *buf, size_t count)
956{
957 DECLARE_WAITQUEUE(wait, current);
958 struct smu_user_reply_hdr hdr;
959 unsigned long flags;
960 int size, rc = 0;
961
962 if (!pp->busy)
963 return 0;
964 if (count < sizeof(struct smu_user_reply_hdr))
965 return -EOVERFLOW;
966 spin_lock_irqsave(&pp->lock, flags);
967 if (pp->cmd.status == 1) {
968 if (file->f_flags & O_NONBLOCK)
969 return -EAGAIN;
970 add_wait_queue(&pp->wait, &wait);
971 for (;;) {
972 set_current_state(TASK_INTERRUPTIBLE);
973 rc = 0;
974 if (pp->cmd.status != 1)
975 break;
976 rc = -ERESTARTSYS;
977 if (signal_pending(current))
978 break;
979 spin_unlock_irqrestore(&pp->lock, flags);
980 schedule();
981 spin_lock_irqsave(&pp->lock, flags);
982 }
983 set_current_state(TASK_RUNNING);
984 remove_wait_queue(&pp->wait, &wait);
985 }
986 spin_unlock_irqrestore(&pp->lock, flags);
987 if (rc)
988 return rc;
989 if (pp->cmd.status != 0)
990 pp->cmd.reply_len = 0;
991 size = sizeof(hdr) + pp->cmd.reply_len;
992 if (count < size)
993 size = count;
994 rc = size;
995 hdr.status = pp->cmd.status;
996 hdr.reply_len = pp->cmd.reply_len;
997 if (copy_to_user(buf, &hdr, sizeof(hdr)))
998 return -EFAULT;
999 size -= sizeof(hdr);
1000 if (size && copy_to_user(buf + sizeof(hdr), pp->buffer, size))
1001 return -EFAULT;
1002 pp->busy = 0;
1003
1004 return rc;
1005}
1006
1007
1008static ssize_t smu_read_events(struct file *file, struct smu_private *pp,
1009 char __user *buf, size_t count)
1010{
1011 /* Not implemented */
1012 msleep_interruptible(1000);
1013 return 0;
1014}
1015
1016
1017static ssize_t smu_read(struct file *file, char __user *buf,
1018 size_t count, loff_t *ppos)
1019{
1020 struct smu_private *pp = file->private_data;
1021
1022 if (pp->mode == smu_file_commands)
1023 return smu_read_command(file, pp, buf, count);
1024 if (pp->mode == smu_file_events)
1025 return smu_read_events(file, pp, buf, count);
1026
1027 return -EBADFD;
1028}
1029
1030static unsigned int smu_fpoll(struct file *file, poll_table *wait)
1031{
1032 struct smu_private *pp = file->private_data;
1033 unsigned int mask = 0;
1034 unsigned long flags;
1035
1036 if (pp == 0)
1037 return 0;
1038
1039 if (pp->mode == smu_file_commands) {
1040 poll_wait(file, &pp->wait, wait);
1041
1042 spin_lock_irqsave(&pp->lock, flags);
1043 if (pp->busy && pp->cmd.status != 1)
1044 mask |= POLLIN;
1045 spin_unlock_irqrestore(&pp->lock, flags);
1046 } if (pp->mode == smu_file_events) {
1047 /* Not yet implemented */
1048 }
1049 return mask;
1050}
1051
1052static int smu_release(struct inode *inode, struct file *file)
1053{
1054 struct smu_private *pp = file->private_data;
1055 unsigned long flags;
1056 unsigned int busy;
1057
1058 if (pp == 0)
1059 return 0;
1060
1061 file->private_data = NULL;
1062
1063 /* Mark file as closing to avoid races with new request */
1064 spin_lock_irqsave(&pp->lock, flags);
1065 pp->mode = smu_file_closing;
1066 busy = pp->busy;
1067
1068 /* Wait for any pending request to complete */
1069 if (busy && pp->cmd.status == 1) {
1070 DECLARE_WAITQUEUE(wait, current);
1071
1072 add_wait_queue(&pp->wait, &wait);
1073 for (;;) {
1074 set_current_state(TASK_UNINTERRUPTIBLE);
1075 if (pp->cmd.status != 1)
1076 break;
1077 spin_lock_irqsave(&pp->lock, flags);
1078 schedule();
1079 spin_unlock_irqrestore(&pp->lock, flags);
1080 }
1081 set_current_state(TASK_RUNNING);
1082 remove_wait_queue(&pp->wait, &wait);
1083 }
1084 spin_unlock_irqrestore(&pp->lock, flags);
1085
1086 spin_lock_irqsave(&smu_clist_lock, flags);
1087 list_del(&pp->list);
1088 spin_unlock_irqrestore(&smu_clist_lock, flags);
1089 kfree(pp);
1090
1091 return 0;
1092}
1093
1094
1095static struct file_operations smu_device_fops __pmacdata = {
1096 .llseek = no_llseek,
1097 .read = smu_read,
1098 .write = smu_write,
1099 .poll = smu_fpoll,
1100 .open = smu_open,
1101 .release = smu_release,
1102};
1103
1104static struct miscdevice pmu_device __pmacdata = {
1105 MISC_DYNAMIC_MINOR, "smu", &smu_device_fops
1106};
1107
1108static int smu_device_init(void)
1109{
1110 if (!smu)
1111 return -ENODEV;
1112 if (misc_register(&pmu_device) < 0)
1113 printk(KERN_ERR "via-pmu: cannot register misc device.\n");
1114 return 0;
1115}
1116device_initcall(smu_device_init);
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index c9ca1118e449..f38696622eb4 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -599,7 +599,7 @@ thermostat_init(void)
599 sensor_location[2] = "?"; 599 sensor_location[2] = "?";
600 } 600 }
601 601
602 of_dev = of_platform_device_create(np, "temperatures"); 602 of_dev = of_platform_device_create(np, "temperatures", NULL);
603 603
604 if (of_dev == NULL) { 604 if (of_dev == NULL) {
605 printk(KERN_ERR "Can't register temperatures device !\n"); 605 printk(KERN_ERR "Can't register temperatures device !\n");
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index 703e31973314..cc507ceef153 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -2051,7 +2051,7 @@ static int __init therm_pm72_init(void)
2051 return -ENODEV; 2051 return -ENODEV;
2052 } 2052 }
2053 } 2053 }
2054 of_dev = of_platform_device_create(np, "temperature"); 2054 of_dev = of_platform_device_create(np, "temperature", NULL);
2055 if (of_dev == NULL) { 2055 if (of_dev == NULL) {
2056 printk(KERN_ERR "Can't register FCU platform device !\n"); 2056 printk(KERN_ERR "Can't register FCU platform device !\n");
2057 return -ENODEV; 2057 return -ENODEV;
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index cbb72eb0426d..6aaa1df1a64e 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -504,7 +504,7 @@ g4fan_init( void )
504 } 504 }
505 if( !(np=of_find_node_by_name(NULL, "fan")) ) 505 if( !(np=of_find_node_by_name(NULL, "fan")) )
506 return -ENODEV; 506 return -ENODEV;
507 x.of_dev = of_platform_device_create( np, "temperature" ); 507 x.of_dev = of_platform_device_create(np, "temperature", NULL);
508 of_node_put( np ); 508 of_node_put( np );
509 509
510 if( !x.of_dev ) { 510 if( !x.of_dev ) {
diff --git a/drivers/media/video/bttv-driver.c b/drivers/media/video/bttv-driver.c
index a564321db2f0..c062a017491e 100644
--- a/drivers/media/video/bttv-driver.c
+++ b/drivers/media/video/bttv-driver.c
@@ -763,21 +763,21 @@ static void set_pll(struct bttv *btv)
763 /* no PLL needed */ 763 /* no PLL needed */
764 if (btv->pll.pll_current == 0) 764 if (btv->pll.pll_current == 0)
765 return; 765 return;
766 vprintk(KERN_INFO "bttv%d: PLL can sleep, using XTAL (%d).\n", 766 bttv_printk(KERN_INFO "bttv%d: PLL can sleep, using XTAL (%d).\n",
767 btv->c.nr,btv->pll.pll_ifreq); 767 btv->c.nr,btv->pll.pll_ifreq);
768 btwrite(0x00,BT848_TGCTRL); 768 btwrite(0x00,BT848_TGCTRL);
769 btwrite(0x00,BT848_PLL_XCI); 769 btwrite(0x00,BT848_PLL_XCI);
770 btv->pll.pll_current = 0; 770 btv->pll.pll_current = 0;
771 return; 771 return;
772 } 772 }
773 773
774 vprintk(KERN_INFO "bttv%d: PLL: %d => %d ",btv->c.nr, 774 bttv_printk(KERN_INFO "bttv%d: PLL: %d => %d ",btv->c.nr,
775 btv->pll.pll_ifreq, btv->pll.pll_ofreq); 775 btv->pll.pll_ifreq, btv->pll.pll_ofreq);
776 set_pll_freq(btv, btv->pll.pll_ifreq, btv->pll.pll_ofreq); 776 set_pll_freq(btv, btv->pll.pll_ifreq, btv->pll.pll_ofreq);
777 777
778 for (i=0; i<10; i++) { 778 for (i=0; i<10; i++) {
779 /* Let other people run while the PLL stabilizes */ 779 /* Let other people run while the PLL stabilizes */
780 vprintk("."); 780 bttv_printk(".");
781 msleep(10); 781 msleep(10);
782 782
783 if (btread(BT848_DSTATUS) & BT848_DSTATUS_PLOCK) { 783 if (btread(BT848_DSTATUS) & BT848_DSTATUS_PLOCK) {
@@ -785,12 +785,12 @@ static void set_pll(struct bttv *btv)
785 } else { 785 } else {
786 btwrite(0x08,BT848_TGCTRL); 786 btwrite(0x08,BT848_TGCTRL);
787 btv->pll.pll_current = btv->pll.pll_ofreq; 787 btv->pll.pll_current = btv->pll.pll_ofreq;
788 vprintk(" ok\n"); 788 bttv_printk(" ok\n");
789 return; 789 return;
790 } 790 }
791 } 791 }
792 btv->pll.pll_current = -1; 792 btv->pll.pll_current = -1;
793 vprintk("failed\n"); 793 bttv_printk("failed\n");
794 return; 794 return;
795} 795}
796 796
diff --git a/drivers/media/video/bttvp.h b/drivers/media/video/bttvp.h
index 9b0b7ca035f8..7a312f79340a 100644
--- a/drivers/media/video/bttvp.h
+++ b/drivers/media/video/bttvp.h
@@ -221,7 +221,7 @@ extern void bttv_gpio_tracking(struct bttv *btv, char *comment);
221extern int init_bttv_i2c(struct bttv *btv); 221extern int init_bttv_i2c(struct bttv *btv);
222extern int fini_bttv_i2c(struct bttv *btv); 222extern int fini_bttv_i2c(struct bttv *btv);
223 223
224#define vprintk if (bttv_verbose) printk 224#define bttv_printk if (bttv_verbose) printk
225#define dprintk if (bttv_debug >= 1) printk 225#define dprintk if (bttv_debug >= 1) printk
226#define d2printk if (bttv_debug >= 2) printk 226#define d2printk if (bttv_debug >= 2) printk
227 227
diff --git a/drivers/message/fusion/Kconfig b/drivers/message/fusion/Kconfig
index 33f209a39cb6..1883d22cffeb 100644
--- a/drivers/message/fusion/Kconfig
+++ b/drivers/message/fusion/Kconfig
@@ -35,6 +35,23 @@ config FUSION_FC
35 LSIFC929X 35 LSIFC929X
36 LSIFC929XL 36 LSIFC929XL
37 37
38config FUSION_SAS
39 tristate "Fusion MPT ScsiHost drivers for SAS"
40 depends on PCI && SCSI
41 select FUSION
42 select SCSI_SAS_ATTRS
43 ---help---
44 SCSI HOST support for a SAS host adapters.
45
46 List of supported controllers:
47
48 LSISAS1064
49 LSISAS1066
50 LSISAS1068
51 LSISAS1064E
52 LSISAS1066E
53 LSISAS1068E
54
38config FUSION_MAX_SGE 55config FUSION_MAX_SGE
39 int "Maximum number of scatter gather entries (16 - 128)" 56 int "Maximum number of scatter gather entries (16 - 128)"
40 depends on FUSION 57 depends on FUSION
diff --git a/drivers/message/fusion/Makefile b/drivers/message/fusion/Makefile
index 1d2f9db813c1..8a2e2657f4c2 100644
--- a/drivers/message/fusion/Makefile
+++ b/drivers/message/fusion/Makefile
@@ -34,5 +34,6 @@
34 34
35obj-$(CONFIG_FUSION_SPI) += mptbase.o mptscsih.o mptspi.o 35obj-$(CONFIG_FUSION_SPI) += mptbase.o mptscsih.o mptspi.o
36obj-$(CONFIG_FUSION_FC) += mptbase.o mptscsih.o mptfc.o 36obj-$(CONFIG_FUSION_FC) += mptbase.o mptscsih.o mptfc.o
37obj-$(CONFIG_FUSION_SAS) += mptbase.o mptscsih.o mptsas.o
37obj-$(CONFIG_FUSION_CTL) += mptctl.o 38obj-$(CONFIG_FUSION_CTL) += mptctl.o
38obj-$(CONFIG_FUSION_LAN) += mptlan.o 39obj-$(CONFIG_FUSION_LAN) += mptlan.o
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index f517d0692d5f..790a2932ded9 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -135,13 +135,12 @@ static void mpt_adapter_dispose(MPT_ADAPTER *ioc);
135 135
136static void MptDisplayIocCapabilities(MPT_ADAPTER *ioc); 136static void MptDisplayIocCapabilities(MPT_ADAPTER *ioc);
137static int MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag); 137static int MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag);
138//static u32 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked);
139static int GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason); 138static int GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason);
140static int GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag); 139static int GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
141static int SendIocInit(MPT_ADAPTER *ioc, int sleepFlag); 140static int SendIocInit(MPT_ADAPTER *ioc, int sleepFlag);
142static int SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag); 141static int SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
143static int mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag); 142static int mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag);
144static int mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag); 143static int mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag);
145static int mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag); 144static int mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
146static int KickStart(MPT_ADAPTER *ioc, int ignore, int sleepFlag); 145static int KickStart(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
147static int SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag); 146static int SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag);
@@ -152,6 +151,7 @@ static int WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
152static int GetLanConfigPages(MPT_ADAPTER *ioc); 151static int GetLanConfigPages(MPT_ADAPTER *ioc);
153static int GetFcPortPage0(MPT_ADAPTER *ioc, int portnum); 152static int GetFcPortPage0(MPT_ADAPTER *ioc, int portnum);
154static int GetIoUnitPage2(MPT_ADAPTER *ioc); 153static int GetIoUnitPage2(MPT_ADAPTER *ioc);
154int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
155static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum); 155static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum);
156static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum); 156static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum);
157static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc); 157static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc);
@@ -159,6 +159,8 @@ static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc);
159static void mpt_timer_expired(unsigned long data); 159static void mpt_timer_expired(unsigned long data);
160static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch); 160static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch);
161static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp); 161static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp);
162static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag);
163static int mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init);
162 164
163#ifdef CONFIG_PROC_FS 165#ifdef CONFIG_PROC_FS
164static int procmpt_summary_read(char *buf, char **start, off_t offset, 166static int procmpt_summary_read(char *buf, char **start, off_t offset,
@@ -175,6 +177,7 @@ static int ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *
175static void mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf); 177static void mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf);
176static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info); 178static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info);
177static void mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info); 179static void mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info);
180static void mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info);
178 181
179/* module entry point */ 182/* module entry point */
180static int __init fusion_init (void); 183static int __init fusion_init (void);
@@ -206,6 +209,144 @@ pci_enable_io_access(struct pci_dev *pdev)
206 pci_write_config_word(pdev, PCI_COMMAND, command_reg); 209 pci_write_config_word(pdev, PCI_COMMAND, command_reg);
207} 210}
208 211
212/*
213 * Process turbo (context) reply...
214 */
215static void
216mpt_turbo_reply(MPT_ADAPTER *ioc, u32 pa)
217{
218 MPT_FRAME_HDR *mf = NULL;
219 MPT_FRAME_HDR *mr = NULL;
220 int req_idx = 0;
221 int cb_idx;
222
223 dmfprintk((MYIOC_s_INFO_FMT "Got TURBO reply req_idx=%08x\n",
224 ioc->name, pa));
225
226 switch (pa >> MPI_CONTEXT_REPLY_TYPE_SHIFT) {
227 case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
228 req_idx = pa & 0x0000FFFF;
229 cb_idx = (pa & 0x00FF0000) >> 16;
230 mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
231 break;
232 case MPI_CONTEXT_REPLY_TYPE_LAN:
233 cb_idx = mpt_lan_index;
234 /*
235 * Blind set of mf to NULL here was fatal
236 * after lan_reply says "freeme"
237 * Fix sort of combined with an optimization here;
238 * added explicit check for case where lan_reply
239 * was just returning 1 and doing nothing else.
240 * For this case skip the callback, but set up
241 * proper mf value first here:-)
242 */
243 if ((pa & 0x58000000) == 0x58000000) {
244 req_idx = pa & 0x0000FFFF;
245 mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
246 mpt_free_msg_frame(ioc, mf);
247 mb();
248 return;
249 break;
250 }
251 mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
252 break;
253 case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
254 cb_idx = mpt_stm_index;
255 mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
256 break;
257 default:
258 cb_idx = 0;
259 BUG();
260 }
261
262 /* Check for (valid) IO callback! */
263 if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
264 MptCallbacks[cb_idx] == NULL) {
265 printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
266 __FUNCTION__, ioc->name, cb_idx);
267 goto out;
268 }
269
270 if (MptCallbacks[cb_idx](ioc, mf, mr))
271 mpt_free_msg_frame(ioc, mf);
272 out:
273 mb();
274}
275
276static void
277mpt_reply(MPT_ADAPTER *ioc, u32 pa)
278{
279 MPT_FRAME_HDR *mf;
280 MPT_FRAME_HDR *mr;
281 int req_idx;
282 int cb_idx;
283 int freeme;
284
285 u32 reply_dma_low;
286 u16 ioc_stat;
287
288 /* non-TURBO reply! Hmmm, something may be up...
289 * Newest turbo reply mechanism; get address
290 * via left shift 1 (get rid of MPI_ADDRESS_REPLY_A_BIT)!
291 */
292
293 /* Map DMA address of reply header to cpu address.
294 * pa is 32 bits - but the dma address may be 32 or 64 bits
295 * get offset based only only the low addresses
296 */
297
298 reply_dma_low = (pa <<= 1);
299 mr = (MPT_FRAME_HDR *)((u8 *)ioc->reply_frames +
300 (reply_dma_low - ioc->reply_frames_low_dma));
301
302 req_idx = le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx);
303 cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx;
304 mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
305
306 dmfprintk((MYIOC_s_INFO_FMT "Got non-TURBO reply=%p req_idx=%x cb_idx=%x Function=%x\n",
307 ioc->name, mr, req_idx, cb_idx, mr->u.hdr.Function));
308 DBG_DUMP_REPLY_FRAME(mr)
309
310 /* Check/log IOC log info
311 */
312 ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus);
313 if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
314 u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo);
315 if (ioc->bus_type == FC)
316 mpt_fc_log_info(ioc, log_info);
317 else if (ioc->bus_type == SCSI)
318 mpt_sp_log_info(ioc, log_info);
319 else if (ioc->bus_type == SAS)
320 mpt_sas_log_info(ioc, log_info);
321 }
322 if (ioc_stat & MPI_IOCSTATUS_MASK) {
323 if (ioc->bus_type == SCSI &&
324 cb_idx != mpt_stm_index &&
325 cb_idx != mpt_lan_index)
326 mpt_sp_ioc_info(ioc, (u32)ioc_stat, mf);
327 }
328
329
330 /* Check for (valid) IO callback! */
331 if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
332 MptCallbacks[cb_idx] == NULL) {
333 printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
334 __FUNCTION__, ioc->name, cb_idx);
335 freeme = 0;
336 goto out;
337 }
338
339 freeme = MptCallbacks[cb_idx](ioc, mf, mr);
340
341 out:
342 /* Flush (non-TURBO) reply with a WRITE! */
343 CHIPREG_WRITE32(&ioc->chip->ReplyFifo, pa);
344
345 if (freeme)
346 mpt_free_msg_frame(ioc, mf);
347 mb();
348}
349
209/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 350/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
210/* 351/*
211 * mpt_interrupt - MPT adapter (IOC) specific interrupt handler. 352 * mpt_interrupt - MPT adapter (IOC) specific interrupt handler.
@@ -227,164 +368,21 @@ pci_enable_io_access(struct pci_dev *pdev)
227static irqreturn_t 368static irqreturn_t
228mpt_interrupt(int irq, void *bus_id, struct pt_regs *r) 369mpt_interrupt(int irq, void *bus_id, struct pt_regs *r)
229{ 370{
230 MPT_ADAPTER *ioc; 371 MPT_ADAPTER *ioc = bus_id;
231 MPT_FRAME_HDR *mf; 372 u32 pa;
232 MPT_FRAME_HDR *mr;
233 u32 pa;
234 int req_idx;
235 int cb_idx;
236 int type;
237 int freeme;
238
239 ioc = (MPT_ADAPTER *)bus_id;
240 373
241 /* 374 /*
242 * Drain the reply FIFO! 375 * Drain the reply FIFO!
243 *
244 * NOTES: I've seen up to 10 replies processed in this loop, so far...
245 * Update: I've seen up to 9182 replies processed in this loop! ??
246 * Update: Limit ourselves to processing max of N replies
247 * (bottom of loop).
248 */ 376 */
249 while (1) { 377 while (1) {
250 378 pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo);
251 if ((pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo)) == 0xFFFFFFFF) 379 if (pa == 0xFFFFFFFF)
252 return IRQ_HANDLED; 380 return IRQ_HANDLED;
253 381 else if (pa & MPI_ADDRESS_REPLY_A_BIT)
254 cb_idx = 0; 382 mpt_reply(ioc, pa);
255 freeme = 0; 383 else
256 384 mpt_turbo_reply(ioc, pa);
257 /* 385 }
258 * Check for non-TURBO reply!
259 */
260 if (pa & MPI_ADDRESS_REPLY_A_BIT) {
261 u32 reply_dma_low;
262 u16 ioc_stat;
263
264 /* non-TURBO reply! Hmmm, something may be up...
265 * Newest turbo reply mechanism; get address
266 * via left shift 1 (get rid of MPI_ADDRESS_REPLY_A_BIT)!
267 */
268
269 /* Map DMA address of reply header to cpu address.
270 * pa is 32 bits - but the dma address may be 32 or 64 bits
271 * get offset based only only the low addresses
272 */
273 reply_dma_low = (pa = (pa << 1));
274 mr = (MPT_FRAME_HDR *)((u8 *)ioc->reply_frames +
275 (reply_dma_low - ioc->reply_frames_low_dma));
276
277 req_idx = le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx);
278 cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx;
279 mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
280
281 dmfprintk((MYIOC_s_INFO_FMT "Got non-TURBO reply=%p req_idx=%x cb_idx=%x Function=%x\n",
282 ioc->name, mr, req_idx, cb_idx, mr->u.hdr.Function));
283 DBG_DUMP_REPLY_FRAME(mr)
284
285 /* Check/log IOC log info
286 */
287 ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus);
288 if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
289 u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo);
290 if (ioc->bus_type == FC)
291 mpt_fc_log_info(ioc, log_info);
292 else if (ioc->bus_type == SCSI)
293 mpt_sp_log_info(ioc, log_info);
294 }
295 if (ioc_stat & MPI_IOCSTATUS_MASK) {
296 if (ioc->bus_type == SCSI)
297 mpt_sp_ioc_info(ioc, (u32)ioc_stat, mf);
298 }
299 } else {
300 /*
301 * Process turbo (context) reply...
302 */
303 dmfprintk((MYIOC_s_INFO_FMT "Got TURBO reply req_idx=%08x\n", ioc->name, pa));
304 type = (pa >> MPI_CONTEXT_REPLY_TYPE_SHIFT);
305 if (type == MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET) {
306 cb_idx = mpt_stm_index;
307 mf = NULL;
308 mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
309 } else if (type == MPI_CONTEXT_REPLY_TYPE_LAN) {
310 cb_idx = mpt_lan_index;
311 /* Blind set of mf to NULL here was fatal
312 * after lan_reply says "freeme"
313 * Fix sort of combined with an optimization here;
314 * added explicit check for case where lan_reply
315 * was just returning 1 and doing nothing else.
316 * For this case skip the callback, but set up
317 * proper mf value first here:-)
318 */
319 if ((pa & 0x58000000) == 0x58000000) {
320 req_idx = pa & 0x0000FFFF;
321 mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
322 freeme = 1;
323 /*
324 * IMPORTANT! Invalidate the callback!
325 */
326 cb_idx = 0;
327 } else {
328 mf = NULL;
329 }
330 mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
331 } else {
332 req_idx = pa & 0x0000FFFF;
333 cb_idx = (pa & 0x00FF0000) >> 16;
334 mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
335 mr = NULL;
336 }
337 pa = 0; /* No reply flush! */
338 }
339
340#ifdef MPT_DEBUG_IRQ
341 if (ioc->bus_type == SCSI) {
342 /* Verify mf, mr are reasonable.
343 */
344 if ((mf) && ((mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))
345 || (mf < ioc->req_frames)) ) {
346 printk(MYIOC_s_WARN_FMT
347 "mpt_interrupt: Invalid mf (%p)!\n", ioc->name, (void *)mf);
348 cb_idx = 0;
349 pa = 0;
350 freeme = 0;
351 }
352 if ((pa) && (mr) && ((mr >= MPT_INDEX_2_RFPTR(ioc, ioc->req_depth))
353 || (mr < ioc->reply_frames)) ) {
354 printk(MYIOC_s_WARN_FMT
355 "mpt_interrupt: Invalid rf (%p)!\n", ioc->name, (void *)mr);
356 cb_idx = 0;
357 pa = 0;
358 freeme = 0;
359 }
360 if (cb_idx > (MPT_MAX_PROTOCOL_DRIVERS-1)) {
361 printk(MYIOC_s_WARN_FMT
362 "mpt_interrupt: Invalid cb_idx (%d)!\n", ioc->name, cb_idx);
363 cb_idx = 0;
364 pa = 0;
365 freeme = 0;
366 }
367 }
368#endif
369
370 /* Check for (valid) IO callback! */
371 if (cb_idx) {
372 /* Do the callback! */
373 freeme = (*(MptCallbacks[cb_idx]))(ioc, mf, mr);
374 }
375
376 if (pa) {
377 /* Flush (non-TURBO) reply with a WRITE! */
378 CHIPREG_WRITE32(&ioc->chip->ReplyFifo, pa);
379 }
380
381 if (freeme) {
382 /* Put Request back on FreeQ! */
383 mpt_free_msg_frame(ioc, mf);
384 }
385
386 mb();
387 } /* drain reply FIFO */
388 386
389 return IRQ_HANDLED; 387 return IRQ_HANDLED;
390} 388}
@@ -509,6 +507,14 @@ mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
509 pCfg->wait_done = 1; 507 pCfg->wait_done = 1;
510 wake_up(&mpt_waitq); 508 wake_up(&mpt_waitq);
511 } 509 }
510 } else if (func == MPI_FUNCTION_SAS_IO_UNIT_CONTROL) {
511 /* we should be always getting a reply frame */
512 memcpy(ioc->persist_reply_frame, reply,
513 min(MPT_DEFAULT_FRAME_SIZE,
514 4*reply->u.reply.MsgLength));
515 del_timer(&ioc->persist_timer);
516 ioc->persist_wait_done = 1;
517 wake_up(&mpt_waitq);
512 } else { 518 } else {
513 printk(MYIOC_s_ERR_FMT "Unexpected msg function (=%02Xh) reply received!\n", 519 printk(MYIOC_s_ERR_FMT "Unexpected msg function (=%02Xh) reply received!\n",
514 ioc->name, func); 520 ioc->name, func);
@@ -750,6 +756,7 @@ mpt_get_msg_frame(int handle, MPT_ADAPTER *ioc)
750 mf = list_entry(ioc->FreeQ.next, MPT_FRAME_HDR, 756 mf = list_entry(ioc->FreeQ.next, MPT_FRAME_HDR,
751 u.frame.linkage.list); 757 u.frame.linkage.list);
752 list_del(&mf->u.frame.linkage.list); 758 list_del(&mf->u.frame.linkage.list);
759 mf->u.frame.linkage.arg1 = 0;
753 mf->u.frame.hwhdr.msgctxu.fld.cb_idx = handle; /* byte */ 760 mf->u.frame.hwhdr.msgctxu.fld.cb_idx = handle; /* byte */
754 req_offset = (u8 *)mf - (u8 *)ioc->req_frames; 761 req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
755 /* u16! */ 762 /* u16! */
@@ -845,6 +852,7 @@ mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
845 852
846 /* Put Request back on FreeQ! */ 853 /* Put Request back on FreeQ! */
847 spin_lock_irqsave(&ioc->FreeQlock, flags); 854 spin_lock_irqsave(&ioc->FreeQlock, flags);
855 mf->u.frame.linkage.arg1 = 0xdeadbeaf; /* signature to know if this mf is freed */
848 list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ); 856 list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
849#ifdef MFCNT 857#ifdef MFCNT
850 ioc->mfcnt--; 858 ioc->mfcnt--;
@@ -971,12 +979,123 @@ mpt_send_handshake_request(int handle, MPT_ADAPTER *ioc, int reqBytes, u32 *req,
971 979
972 /* Make sure there are no doorbells */ 980 /* Make sure there are no doorbells */
973 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 981 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
974 982
975 return r; 983 return r;
976} 984}
977 985
978/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 986/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
979/** 987/**
988 * mpt_host_page_access_control - provides mechanism for the host
989 * driver to control the IOC's Host Page Buffer access.
990 * @ioc: Pointer to MPT adapter structure
991 * @access_control_value: define bits below
992 *
993 * Access Control Value - bits[15:12]
994 * 0h Reserved
995 * 1h Enable Access { MPI_DB_HPBAC_ENABLE_ACCESS }
996 * 2h Disable Access { MPI_DB_HPBAC_DISABLE_ACCESS }
997 * 3h Free Buffer { MPI_DB_HPBAC_FREE_BUFFER }
998 *
999 * Returns 0 for success, non-zero for failure.
1000 */
1001
1002static int
1003mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag)
1004{
1005 int r = 0;
1006
1007 /* return if in use */
1008 if (CHIPREG_READ32(&ioc->chip->Doorbell)
1009 & MPI_DOORBELL_ACTIVE)
1010 return -1;
1011
1012 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
1013
1014 CHIPREG_WRITE32(&ioc->chip->Doorbell,
1015 ((MPI_FUNCTION_HOST_PAGEBUF_ACCESS_CONTROL
1016 <<MPI_DOORBELL_FUNCTION_SHIFT) |
1017 (access_control_value<<12)));
1018
1019 /* Wait for IOC to clear Doorbell Status bit */
1020 if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
1021 return -2;
1022 }else
1023 return 0;
1024}
1025
1026/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1027/**
1028 * mpt_host_page_alloc - allocate system memory for the fw
1029 * If we already allocated memory in past, then resend the same pointer.
1030 * ioc@: Pointer to pointer to IOC adapter
1031 * ioc_init@: Pointer to ioc init config page
1032 *
1033 * Returns 0 for success, non-zero for failure.
1034 */
1035static int
1036mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
1037{
1038 char *psge;
1039 int flags_length;
1040 u32 host_page_buffer_sz=0;
1041
1042 if(!ioc->HostPageBuffer) {
1043
1044 host_page_buffer_sz =
1045 le32_to_cpu(ioc->facts.HostPageBufferSGE.FlagsLength) & 0xFFFFFF;
1046
1047 if(!host_page_buffer_sz)
1048 return 0; /* fw doesn't need any host buffers */
1049
1050 /* spin till we get enough memory */
1051 while(host_page_buffer_sz > 0) {
1052
1053 if((ioc->HostPageBuffer = pci_alloc_consistent(
1054 ioc->pcidev,
1055 host_page_buffer_sz,
1056 &ioc->HostPageBuffer_dma)) != NULL) {
1057
1058 dinitprintk((MYIOC_s_INFO_FMT
1059 "host_page_buffer @ %p, dma @ %x, sz=%d bytes\n",
1060 ioc->name,
1061 ioc->HostPageBuffer,
1062 ioc->HostPageBuffer_dma,
1063 host_page_buffer_sz));
1064 ioc->alloc_total += host_page_buffer_sz;
1065 ioc->HostPageBuffer_sz = host_page_buffer_sz;
1066 break;
1067 }
1068
1069 host_page_buffer_sz -= (4*1024);
1070 }
1071 }
1072
1073 if(!ioc->HostPageBuffer) {
1074 printk(MYIOC_s_ERR_FMT
1075 "Failed to alloc memory for host_page_buffer!\n",
1076 ioc->name);
1077 return -999;
1078 }
1079
1080 psge = (char *)&ioc_init->HostPageBufferSGE;
1081 flags_length = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1082 MPI_SGE_FLAGS_SYSTEM_ADDRESS |
1083 MPI_SGE_FLAGS_32_BIT_ADDRESSING |
1084 MPI_SGE_FLAGS_HOST_TO_IOC |
1085 MPI_SGE_FLAGS_END_OF_BUFFER;
1086 if (sizeof(dma_addr_t) == sizeof(u64)) {
1087 flags_length |= MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1088 }
1089 flags_length = flags_length << MPI_SGE_FLAGS_SHIFT;
1090 flags_length |= ioc->HostPageBuffer_sz;
1091 mpt_add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
1092 ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE;
1093
1094return 0;
1095}
1096
1097/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1098/**
980 * mpt_verify_adapter - Given a unique IOC identifier, set pointer to 1099 * mpt_verify_adapter - Given a unique IOC identifier, set pointer to
981 * the associated MPT adapter structure. 1100 * the associated MPT adapter structure.
982 * @iocid: IOC unique identifier (integer) 1101 * @iocid: IOC unique identifier (integer)
@@ -1084,7 +1203,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1084 1203
1085 /* Initilize SCSI Config Data structure 1204 /* Initilize SCSI Config Data structure
1086 */ 1205 */
1087 memset(&ioc->spi_data, 0, sizeof(ScsiCfgData)); 1206 memset(&ioc->spi_data, 0, sizeof(SpiCfgData));
1088 1207
1089 /* Initialize the running configQ head. 1208 /* Initialize the running configQ head.
1090 */ 1209 */
@@ -1213,6 +1332,33 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1213 ioc->prod_name = "LSI53C1035"; 1332 ioc->prod_name = "LSI53C1035";
1214 ioc->bus_type = SCSI; 1333 ioc->bus_type = SCSI;
1215 } 1334 }
1335 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1064) {
1336 ioc->prod_name = "LSISAS1064";
1337 ioc->bus_type = SAS;
1338 ioc->errata_flag_1064 = 1;
1339 }
1340 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1066) {
1341 ioc->prod_name = "LSISAS1066";
1342 ioc->bus_type = SAS;
1343 ioc->errata_flag_1064 = 1;
1344 }
1345 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1068) {
1346 ioc->prod_name = "LSISAS1068";
1347 ioc->bus_type = SAS;
1348 ioc->errata_flag_1064 = 1;
1349 }
1350 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1064E) {
1351 ioc->prod_name = "LSISAS1064E";
1352 ioc->bus_type = SAS;
1353 }
1354 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1066E) {
1355 ioc->prod_name = "LSISAS1066E";
1356 ioc->bus_type = SAS;
1357 }
1358 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1068E) {
1359 ioc->prod_name = "LSISAS1068E";
1360 ioc->bus_type = SAS;
1361 }
1216 1362
1217 if (ioc->errata_flag_1064) 1363 if (ioc->errata_flag_1064)
1218 pci_disable_io_access(pdev); 1364 pci_disable_io_access(pdev);
@@ -1604,8 +1750,23 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
1604 */ 1750 */
1605 if (ret == 0) { 1751 if (ret == 0) {
1606 rc = mpt_do_upload(ioc, sleepFlag); 1752 rc = mpt_do_upload(ioc, sleepFlag);
1607 if (rc != 0) 1753 if (rc == 0) {
1754 if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
1755 /*
1756 * Maintain only one pointer to FW memory
1757 * so there will not be two attempt to
1758 * downloadboot onboard dual function
1759 * chips (mpt_adapter_disable,
1760 * mpt_diag_reset)
1761 */
1762 ioc->cached_fw = NULL;
1763 ddlprintk((MYIOC_s_INFO_FMT ": mpt_upload: alt_%s has cached_fw=%p \n",
1764 ioc->name, ioc->alt_ioc->name, ioc->alt_ioc->cached_fw));
1765 }
1766 } else {
1608 printk(KERN_WARNING MYNAM ": firmware upload failure!\n"); 1767 printk(KERN_WARNING MYNAM ": firmware upload failure!\n");
1768 ret = -5;
1769 }
1609 } 1770 }
1610 } 1771 }
1611 } 1772 }
@@ -1640,7 +1801,22 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
1640 * and we try GetLanConfigPages again... 1801 * and we try GetLanConfigPages again...
1641 */ 1802 */
1642 if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) { 1803 if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) {
1643 if (ioc->bus_type == FC) { 1804 if (ioc->bus_type == SAS) {
1805
1806 /* clear persistency table */
1807 if(ioc->facts.IOCExceptions &
1808 MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL) {
1809 ret = mptbase_sas_persist_operation(ioc,
1810 MPI_SAS_OP_CLEAR_NOT_PRESENT);
1811 if(ret != 0)
1812 return -1;
1813 }
1814
1815 /* Find IM volumes
1816 */
1817 mpt_findImVolumes(ioc);
1818
1819 } else if (ioc->bus_type == FC) {
1644 /* 1820 /*
1645 * Pre-fetch FC port WWN and stuff... 1821 * Pre-fetch FC port WWN and stuff...
1646 * (FCPortPage0_t stuff) 1822 * (FCPortPage0_t stuff)
@@ -1783,7 +1959,7 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
1783 1959
1784 if (ioc->cached_fw != NULL) { 1960 if (ioc->cached_fw != NULL) {
1785 ddlprintk((KERN_INFO MYNAM ": mpt_adapter_disable: Pushing FW onto adapter\n")); 1961 ddlprintk((KERN_INFO MYNAM ": mpt_adapter_disable: Pushing FW onto adapter\n"));
1786 if ((ret = mpt_downloadboot(ioc, NO_SLEEP)) < 0) { 1962 if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)ioc->cached_fw, NO_SLEEP)) < 0) {
1787 printk(KERN_WARNING MYNAM 1963 printk(KERN_WARNING MYNAM
1788 ": firmware downloadboot failure (%d)!\n", ret); 1964 ": firmware downloadboot failure (%d)!\n", ret);
1789 } 1965 }
@@ -1831,9 +2007,9 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
1831 } 2007 }
1832 2008
1833 kfree(ioc->spi_data.nvram); 2009 kfree(ioc->spi_data.nvram);
1834 kfree(ioc->spi_data.pIocPg3); 2010 kfree(ioc->raid_data.pIocPg3);
1835 ioc->spi_data.nvram = NULL; 2011 ioc->spi_data.nvram = NULL;
1836 ioc->spi_data.pIocPg3 = NULL; 2012 ioc->raid_data.pIocPg3 = NULL;
1837 2013
1838 if (ioc->spi_data.pIocPg4 != NULL) { 2014 if (ioc->spi_data.pIocPg4 != NULL) {
1839 sz = ioc->spi_data.IocPg4Sz; 2015 sz = ioc->spi_data.IocPg4Sz;
@@ -1852,6 +2028,23 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
1852 2028
1853 kfree(ioc->ChainToChain); 2029 kfree(ioc->ChainToChain);
1854 ioc->ChainToChain = NULL; 2030 ioc->ChainToChain = NULL;
2031
2032 if (ioc->HostPageBuffer != NULL) {
2033 if((ret = mpt_host_page_access_control(ioc,
2034 MPI_DB_HPBAC_FREE_BUFFER, NO_SLEEP)) != 0) {
2035 printk(KERN_ERR MYNAM
2036 ": %s: host page buffers free failed (%d)!\n",
2037 __FUNCTION__, ret);
2038 }
2039 dexitprintk((KERN_INFO MYNAM ": %s HostPageBuffer free @ %p, sz=%d bytes\n",
2040 ioc->name, ioc->HostPageBuffer, ioc->HostPageBuffer_sz));
2041 pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz,
2042 ioc->HostPageBuffer,
2043 ioc->HostPageBuffer_dma);
2044 ioc->HostPageBuffer = NULL;
2045 ioc->HostPageBuffer_sz = 0;
2046 ioc->alloc_total -= ioc->HostPageBuffer_sz;
2047 }
1855} 2048}
1856 2049
1857/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2050/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -2034,7 +2227,7 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
2034 * Loop here waiting for IOC to come READY. 2227 * Loop here waiting for IOC to come READY.
2035 */ 2228 */
2036 ii = 0; 2229 ii = 0;
2037 cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 15; /* 15 seconds */ 2230 cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 5; /* 5 seconds */
2038 2231
2039 while ((ioc_state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) { 2232 while ((ioc_state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) {
2040 if (ioc_state == MPI_IOC_STATE_OPERATIONAL) { 2233 if (ioc_state == MPI_IOC_STATE_OPERATIONAL) {
@@ -2212,6 +2405,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
2212 le32_to_cpu(facts->CurrentSenseBufferHighAddr); 2405 le32_to_cpu(facts->CurrentSenseBufferHighAddr);
2213 facts->CurReplyFrameSize = 2406 facts->CurReplyFrameSize =
2214 le16_to_cpu(facts->CurReplyFrameSize); 2407 le16_to_cpu(facts->CurReplyFrameSize);
2408 facts->IOCCapabilities = le32_to_cpu(facts->IOCCapabilities);
2215 2409
2216 /* 2410 /*
2217 * Handle NEW (!) IOCFactsReply fields in MPI-1.01.xx 2411 * Handle NEW (!) IOCFactsReply fields in MPI-1.01.xx
@@ -2383,13 +2577,25 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
2383 ddlprintk((MYIOC_s_INFO_FMT "upload_fw %d facts.Flags=%x\n", 2577 ddlprintk((MYIOC_s_INFO_FMT "upload_fw %d facts.Flags=%x\n",
2384 ioc->name, ioc->upload_fw, ioc->facts.Flags)); 2578 ioc->name, ioc->upload_fw, ioc->facts.Flags));
2385 2579
2386 if (ioc->bus_type == FC) 2580 if(ioc->bus_type == SAS)
2581 ioc_init.MaxDevices = ioc->facts.MaxDevices;
2582 else if(ioc->bus_type == FC)
2387 ioc_init.MaxDevices = MPT_MAX_FC_DEVICES; 2583 ioc_init.MaxDevices = MPT_MAX_FC_DEVICES;
2388 else 2584 else
2389 ioc_init.MaxDevices = MPT_MAX_SCSI_DEVICES; 2585 ioc_init.MaxDevices = MPT_MAX_SCSI_DEVICES;
2390
2391 ioc_init.MaxBuses = MPT_MAX_BUS; 2586 ioc_init.MaxBuses = MPT_MAX_BUS;
2392 2587 dinitprintk((MYIOC_s_INFO_FMT "facts.MsgVersion=%x\n",
2588 ioc->name, ioc->facts.MsgVersion));
2589 if (ioc->facts.MsgVersion >= MPI_VERSION_01_05) {
2590 // set MsgVersion and HeaderVersion host driver was built with
2591 ioc_init.MsgVersion = cpu_to_le16(MPI_VERSION);
2592 ioc_init.HeaderVersion = cpu_to_le16(MPI_HEADER_VERSION);
2593
2594 if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_HOST_PAGE_BUFFER_PERSISTENT) {
2595 ioc_init.HostPageBufferSGE = ioc->facts.HostPageBufferSGE;
2596 } else if(mpt_host_page_alloc(ioc, &ioc_init))
2597 return -99;
2598 }
2393 ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */ 2599 ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */
2394 2600
2395 if (sizeof(dma_addr_t) == sizeof(u64)) { 2601 if (sizeof(dma_addr_t) == sizeof(u64)) {
@@ -2403,17 +2609,21 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
2403 ioc_init.HostMfaHighAddr = cpu_to_le32(0); 2609 ioc_init.HostMfaHighAddr = cpu_to_le32(0);
2404 ioc_init.SenseBufferHighAddr = cpu_to_le32(0); 2610 ioc_init.SenseBufferHighAddr = cpu_to_le32(0);
2405 } 2611 }
2406 2612
2407 ioc->facts.CurrentHostMfaHighAddr = ioc_init.HostMfaHighAddr; 2613 ioc->facts.CurrentHostMfaHighAddr = ioc_init.HostMfaHighAddr;
2408 ioc->facts.CurrentSenseBufferHighAddr = ioc_init.SenseBufferHighAddr; 2614 ioc->facts.CurrentSenseBufferHighAddr = ioc_init.SenseBufferHighAddr;
2615 ioc->facts.MaxDevices = ioc_init.MaxDevices;
2616 ioc->facts.MaxBuses = ioc_init.MaxBuses;
2409 2617
2410 dhsprintk((MYIOC_s_INFO_FMT "Sending IOCInit (req @ %p)\n", 2618 dhsprintk((MYIOC_s_INFO_FMT "Sending IOCInit (req @ %p)\n",
2411 ioc->name, &ioc_init)); 2619 ioc->name, &ioc_init));
2412 2620
2413 r = mpt_handshake_req_reply_wait(ioc, sizeof(IOCInit_t), (u32*)&ioc_init, 2621 r = mpt_handshake_req_reply_wait(ioc, sizeof(IOCInit_t), (u32*)&ioc_init,
2414 sizeof(MPIDefaultReply_t), (u16*)&init_reply, 10 /*seconds*/, sleepFlag); 2622 sizeof(MPIDefaultReply_t), (u16*)&init_reply, 10 /*seconds*/, sleepFlag);
2415 if (r != 0) 2623 if (r != 0) {
2624 printk(MYIOC_s_ERR_FMT "Sending IOCInit failed(%d)!\n",ioc->name, r);
2416 return r; 2625 return r;
2626 }
2417 2627
2418 /* No need to byte swap the multibyte fields in the reply 2628 /* No need to byte swap the multibyte fields in the reply
2419 * since we don't even look at it's contents. 2629 * since we don't even look at it's contents.
@@ -2472,7 +2682,7 @@ SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
2472{ 2682{
2473 PortEnable_t port_enable; 2683 PortEnable_t port_enable;
2474 MPIDefaultReply_t reply_buf; 2684 MPIDefaultReply_t reply_buf;
2475 int ii; 2685 int rc;
2476 int req_sz; 2686 int req_sz;
2477 int reply_sz; 2687 int reply_sz;
2478 2688
@@ -2494,22 +2704,15 @@ SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
2494 2704
2495 /* RAID FW may take a long time to enable 2705 /* RAID FW may take a long time to enable
2496 */ 2706 */
2497 if (ioc->bus_type == FC) { 2707 if ( (ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK)
2498 ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable, 2708 > MPI_FW_HEADER_PID_PROD_TARGET_SCSI ) {
2499 reply_sz, (u16*)&reply_buf, 65 /*seconds*/, sleepFlag); 2709 rc = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable,
2500 } else {
2501 ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable,
2502 reply_sz, (u16*)&reply_buf, 300 /*seconds*/, sleepFlag); 2710 reply_sz, (u16*)&reply_buf, 300 /*seconds*/, sleepFlag);
2711 } else {
2712 rc = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable,
2713 reply_sz, (u16*)&reply_buf, 30 /*seconds*/, sleepFlag);
2503 } 2714 }
2504 2715 return rc;
2505 if (ii != 0)
2506 return ii;
2507
2508 /* We do not even look at the reply, so we need not
2509 * swap the multi-byte fields.
2510 */
2511
2512 return 0;
2513} 2716}
2514 2717
2515/* 2718/*
@@ -2666,9 +2869,8 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
2666 * <0 for fw upload failure. 2869 * <0 for fw upload failure.
2667 */ 2870 */
2668static int 2871static int
2669mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag) 2872mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag)
2670{ 2873{
2671 MpiFwHeader_t *pFwHeader;
2672 MpiExtImageHeader_t *pExtImage; 2874 MpiExtImageHeader_t *pExtImage;
2673 u32 fwSize; 2875 u32 fwSize;
2674 u32 diag0val; 2876 u32 diag0val;
@@ -2679,18 +2881,8 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
2679 u32 load_addr; 2881 u32 load_addr;
2680 u32 ioc_state=0; 2882 u32 ioc_state=0;
2681 2883
2682 ddlprintk((MYIOC_s_INFO_FMT "downloadboot: fw size 0x%x, ioc FW Ptr %p\n", 2884 ddlprintk((MYIOC_s_INFO_FMT "downloadboot: fw size 0x%x (%d), FW Ptr %p\n",
2683 ioc->name, ioc->facts.FWImageSize, ioc->cached_fw)); 2885 ioc->name, pFwHeader->ImageSize, pFwHeader->ImageSize, pFwHeader));
2684
2685 if ( ioc->facts.FWImageSize == 0 )
2686 return -1;
2687
2688 if (ioc->cached_fw == NULL)
2689 return -2;
2690
2691 /* prevent a second downloadboot and memory free with alt_ioc */
2692 if (ioc->alt_ioc && ioc->alt_ioc->cached_fw)
2693 ioc->alt_ioc->cached_fw = NULL;
2694 2886
2695 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF); 2887 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
2696 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE); 2888 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
@@ -2718,16 +2910,17 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
2718 ioc->name, count)); 2910 ioc->name, count));
2719 break; 2911 break;
2720 } 2912 }
2721 /* wait 1 sec */ 2913 /* wait .1 sec */
2722 if (sleepFlag == CAN_SLEEP) { 2914 if (sleepFlag == CAN_SLEEP) {
2723 msleep_interruptible (1000); 2915 msleep_interruptible (100);
2724 } else { 2916 } else {
2725 mdelay (1000); 2917 mdelay (100);
2726 } 2918 }
2727 } 2919 }
2728 2920
2729 if ( count == 30 ) { 2921 if ( count == 30 ) {
2730 ddlprintk((MYIOC_s_INFO_FMT "downloadboot failed! Unable to RESET_ADAPTER diag0val=%x\n", 2922 ddlprintk((MYIOC_s_INFO_FMT "downloadboot failed! "
2923 "Unable to get MPI_DIAG_DRWE mode, diag0val=%x\n",
2731 ioc->name, diag0val)); 2924 ioc->name, diag0val));
2732 return -3; 2925 return -3;
2733 } 2926 }
@@ -2742,7 +2935,6 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
2742 /* Set the DiagRwEn and Disable ARM bits */ 2935 /* Set the DiagRwEn and Disable ARM bits */
2743 CHIPREG_WRITE32(&ioc->chip->Diagnostic, (MPI_DIAG_RW_ENABLE | MPI_DIAG_DISABLE_ARM)); 2936 CHIPREG_WRITE32(&ioc->chip->Diagnostic, (MPI_DIAG_RW_ENABLE | MPI_DIAG_DISABLE_ARM));
2744 2937
2745 pFwHeader = (MpiFwHeader_t *) ioc->cached_fw;
2746 fwSize = (pFwHeader->ImageSize + 3)/4; 2938 fwSize = (pFwHeader->ImageSize + 3)/4;
2747 ptrFw = (u32 *) pFwHeader; 2939 ptrFw = (u32 *) pFwHeader;
2748 2940
@@ -2792,19 +2984,38 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
2792 /* Clear the internal flash bad bit - autoincrementing register, 2984 /* Clear the internal flash bad bit - autoincrementing register,
2793 * so must do two writes. 2985 * so must do two writes.
2794 */ 2986 */
2795 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000); 2987 if (ioc->bus_type == SCSI) {
2796 diagRwData = CHIPREG_PIO_READ32(&ioc->pio_chip->DiagRwData); 2988 /*
2797 diagRwData |= 0x4000000; 2989 * 1030 and 1035 H/W errata, workaround to access
2798 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000); 2990 * the ClearFlashBadSignatureBit
2799 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, diagRwData); 2991 */
2992 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
2993 diagRwData = CHIPREG_PIO_READ32(&ioc->pio_chip->DiagRwData);
2994 diagRwData |= 0x40000000;
2995 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
2996 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, diagRwData);
2997
2998 } else /* if((ioc->bus_type == SAS) || (ioc->bus_type == FC)) */ {
2999 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
3000 CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val |
3001 MPI_DIAG_CLEAR_FLASH_BAD_SIG);
3002
3003 /* wait 1 msec */
3004 if (sleepFlag == CAN_SLEEP) {
3005 msleep_interruptible (1);
3006 } else {
3007 mdelay (1);
3008 }
3009 }
2800 3010
2801 if (ioc->errata_flag_1064) 3011 if (ioc->errata_flag_1064)
2802 pci_disable_io_access(ioc->pcidev); 3012 pci_disable_io_access(ioc->pcidev);
2803 3013
2804 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); 3014 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
2805 ddlprintk((MYIOC_s_INFO_FMT "downloadboot diag0val=%x, turning off PREVENT_IOC_BOOT, DISABLE_ARM\n", 3015 ddlprintk((MYIOC_s_INFO_FMT "downloadboot diag0val=%x, "
3016 "turning off PREVENT_IOC_BOOT, DISABLE_ARM, RW_ENABLE\n",
2806 ioc->name, diag0val)); 3017 ioc->name, diag0val));
2807 diag0val &= ~(MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM); 3018 diag0val &= ~(MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM | MPI_DIAG_RW_ENABLE);
2808 ddlprintk((MYIOC_s_INFO_FMT "downloadboot now diag0val=%x\n", 3019 ddlprintk((MYIOC_s_INFO_FMT "downloadboot now diag0val=%x\n",
2809 ioc->name, diag0val)); 3020 ioc->name, diag0val));
2810 CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val); 3021 CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val);
@@ -2812,10 +3023,23 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
2812 /* Write 0xFF to reset the sequencer */ 3023 /* Write 0xFF to reset the sequencer */
2813 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF); 3024 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
2814 3025
3026 if (ioc->bus_type == SAS) {
3027 ioc_state = mpt_GetIocState(ioc, 0);
3028 if ( (GetIocFacts(ioc, sleepFlag,
3029 MPT_HOSTEVENT_IOC_BRINGUP)) != 0 ) {
3030 ddlprintk((MYIOC_s_INFO_FMT "GetIocFacts failed: IocState=%x\n",
3031 ioc->name, ioc_state));
3032 return -EFAULT;
3033 }
3034 }
3035
2815 for (count=0; count<HZ*20; count++) { 3036 for (count=0; count<HZ*20; count++) {
2816 if ((ioc_state = mpt_GetIocState(ioc, 0)) & MPI_IOC_STATE_READY) { 3037 if ((ioc_state = mpt_GetIocState(ioc, 0)) & MPI_IOC_STATE_READY) {
2817 ddlprintk((MYIOC_s_INFO_FMT "downloadboot successful! (count=%d) IocState=%x\n", 3038 ddlprintk((MYIOC_s_INFO_FMT "downloadboot successful! (count=%d) IocState=%x\n",
2818 ioc->name, count, ioc_state)); 3039 ioc->name, count, ioc_state));
3040 if (ioc->bus_type == SAS) {
3041 return 0;
3042 }
2819 if ((SendIocInit(ioc, sleepFlag)) != 0) { 3043 if ((SendIocInit(ioc, sleepFlag)) != 0) {
2820 ddlprintk((MYIOC_s_INFO_FMT "downloadboot: SendIocInit failed\n", 3044 ddlprintk((MYIOC_s_INFO_FMT "downloadboot: SendIocInit failed\n",
2821 ioc->name)); 3045 ioc->name));
@@ -3049,12 +3273,13 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3049 3273
3050 /* wait 1 sec */ 3274 /* wait 1 sec */
3051 if (sleepFlag == CAN_SLEEP) { 3275 if (sleepFlag == CAN_SLEEP) {
3052 ssleep(1); 3276 msleep_interruptible (1000);
3053 } else { 3277 } else {
3054 mdelay (1000); 3278 mdelay (1000);
3055 } 3279 }
3056 } 3280 }
3057 if ((count = mpt_downloadboot(ioc, sleepFlag)) < 0) { 3281 if ((count = mpt_downloadboot(ioc,
3282 (MpiFwHeader_t *)ioc->cached_fw, sleepFlag)) < 0) {
3058 printk(KERN_WARNING MYNAM 3283 printk(KERN_WARNING MYNAM
3059 ": firmware downloadboot failure (%d)!\n", count); 3284 ": firmware downloadboot failure (%d)!\n", count);
3060 } 3285 }
@@ -3637,7 +3862,7 @@ WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
3637 int count = 0; 3862 int count = 0;
3638 u32 intstat=0; 3863 u32 intstat=0;
3639 3864
3640 cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * howlong; 3865 cntdn = 1000 * howlong;
3641 3866
3642 if (sleepFlag == CAN_SLEEP) { 3867 if (sleepFlag == CAN_SLEEP) {
3643 while (--cntdn) { 3868 while (--cntdn) {
@@ -3687,7 +3912,7 @@ WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
3687 int count = 0; 3912 int count = 0;
3688 u32 intstat=0; 3913 u32 intstat=0;
3689 3914
3690 cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * howlong; 3915 cntdn = 1000 * howlong;
3691 if (sleepFlag == CAN_SLEEP) { 3916 if (sleepFlag == CAN_SLEEP) {
3692 while (--cntdn) { 3917 while (--cntdn) {
3693 intstat = CHIPREG_READ32(&ioc->chip->IntStatus); 3918 intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
@@ -4001,6 +4226,85 @@ GetFcPortPage0(MPT_ADAPTER *ioc, int portnum)
4001 4226
4002/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 4227/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4003/* 4228/*
4229 * mptbase_sas_persist_operation - Perform operation on SAS Persitent Table
4230 * @ioc: Pointer to MPT_ADAPTER structure
4231 * @sas_address: 64bit SAS Address for operation.
4232 * @target_id: specified target for operation
4233 * @bus: specified bus for operation
4234 * @persist_opcode: see below
4235 *
4236 * MPI_SAS_OP_CLEAR_NOT_PRESENT - Free all persist TargetID mappings for
4237 * devices not currently present.
4238 * MPI_SAS_OP_CLEAR_ALL_PERSISTENT - Clear al persist TargetID mappings
4239 *
4240 * NOTE: Don't use not this function during interrupt time.
4241 *
4242 * Returns: 0 for success, non-zero error
4243 */
4244
4245/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4246int
4247mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
4248{
4249 SasIoUnitControlRequest_t *sasIoUnitCntrReq;
4250 SasIoUnitControlReply_t *sasIoUnitCntrReply;
4251 MPT_FRAME_HDR *mf = NULL;
4252 MPIHeader_t *mpi_hdr;
4253
4254
4255 /* insure garbage is not sent to fw */
4256 switch(persist_opcode) {
4257
4258 case MPI_SAS_OP_CLEAR_NOT_PRESENT:
4259 case MPI_SAS_OP_CLEAR_ALL_PERSISTENT:
4260 break;
4261
4262 default:
4263 return -1;
4264 break;
4265 }
4266
4267 printk("%s: persist_opcode=%x\n",__FUNCTION__, persist_opcode);
4268
4269 /* Get a MF for this command.
4270 */
4271 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
4272 printk("%s: no msg frames!\n",__FUNCTION__);
4273 return -1;
4274 }
4275
4276 mpi_hdr = (MPIHeader_t *) mf;
4277 sasIoUnitCntrReq = (SasIoUnitControlRequest_t *)mf;
4278 memset(sasIoUnitCntrReq,0,sizeof(SasIoUnitControlRequest_t));
4279 sasIoUnitCntrReq->Function = MPI_FUNCTION_SAS_IO_UNIT_CONTROL;
4280 sasIoUnitCntrReq->MsgContext = mpi_hdr->MsgContext;
4281 sasIoUnitCntrReq->Operation = persist_opcode;
4282
4283 init_timer(&ioc->persist_timer);
4284 ioc->persist_timer.data = (unsigned long) ioc;
4285 ioc->persist_timer.function = mpt_timer_expired;
4286 ioc->persist_timer.expires = jiffies + HZ*10 /* 10 sec */;
4287 ioc->persist_wait_done=0;
4288 add_timer(&ioc->persist_timer);
4289 mpt_put_msg_frame(mpt_base_index, ioc, mf);
4290 wait_event(mpt_waitq, ioc->persist_wait_done);
4291
4292 sasIoUnitCntrReply =
4293 (SasIoUnitControlReply_t *)ioc->persist_reply_frame;
4294 if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) {
4295 printk("%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
4296 __FUNCTION__,
4297 sasIoUnitCntrReply->IOCStatus,
4298 sasIoUnitCntrReply->IOCLogInfo);
4299 return -1;
4300 }
4301
4302 printk("%s: success\n",__FUNCTION__);
4303 return 0;
4304}
4305
4306/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4307/*
4004 * GetIoUnitPage2 - Retrieve BIOS version and boot order information. 4308 * GetIoUnitPage2 - Retrieve BIOS version and boot order information.
4005 * @ioc: Pointer to MPT_ADAPTER structure 4309 * @ioc: Pointer to MPT_ADAPTER structure
4006 * 4310 *
@@ -4340,10 +4644,10 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
4340 if (mpt_config(ioc, &cfg) != 0) 4644 if (mpt_config(ioc, &cfg) != 0)
4341 goto done_and_free; 4645 goto done_and_free;
4342 4646
4343 if ( (mem = (u8 *)ioc->spi_data.pIocPg2) == NULL ) { 4647 if ( (mem = (u8 *)ioc->raid_data.pIocPg2) == NULL ) {
4344 mem = kmalloc(iocpage2sz, GFP_ATOMIC); 4648 mem = kmalloc(iocpage2sz, GFP_ATOMIC);
4345 if (mem) { 4649 if (mem) {
4346 ioc->spi_data.pIocPg2 = (IOCPage2_t *) mem; 4650 ioc->raid_data.pIocPg2 = (IOCPage2_t *) mem;
4347 } else { 4651 } else {
4348 goto done_and_free; 4652 goto done_and_free;
4349 } 4653 }
@@ -4360,7 +4664,7 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
4360 /* At least 1 RAID Volume 4664 /* At least 1 RAID Volume
4361 */ 4665 */
4362 pIocRv = pIoc2->RaidVolume; 4666 pIocRv = pIoc2->RaidVolume;
4363 ioc->spi_data.isRaid = 0; 4667 ioc->raid_data.isRaid = 0;
4364 for (jj = 0; jj < nVols; jj++, pIocRv++) { 4668 for (jj = 0; jj < nVols; jj++, pIocRv++) {
4365 vid = pIocRv->VolumeID; 4669 vid = pIocRv->VolumeID;
4366 vbus = pIocRv->VolumeBus; 4670 vbus = pIocRv->VolumeBus;
@@ -4369,7 +4673,7 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
4369 /* find the match 4673 /* find the match
4370 */ 4674 */
4371 if (vbus == 0) { 4675 if (vbus == 0) {
4372 ioc->spi_data.isRaid |= (1 << vid); 4676 ioc->raid_data.isRaid |= (1 << vid);
4373 } else { 4677 } else {
4374 /* Error! Always bus 0 4678 /* Error! Always bus 0
4375 */ 4679 */
@@ -4404,8 +4708,8 @@ mpt_read_ioc_pg_3(MPT_ADAPTER *ioc)
4404 4708
4405 /* Free the old page 4709 /* Free the old page
4406 */ 4710 */
4407 kfree(ioc->spi_data.pIocPg3); 4711 kfree(ioc->raid_data.pIocPg3);
4408 ioc->spi_data.pIocPg3 = NULL; 4712 ioc->raid_data.pIocPg3 = NULL;
4409 4713
4410 /* There is at least one physical disk. 4714 /* There is at least one physical disk.
4411 * Read and save IOC Page 3 4715 * Read and save IOC Page 3
@@ -4442,7 +4746,7 @@ mpt_read_ioc_pg_3(MPT_ADAPTER *ioc)
4442 mem = kmalloc(iocpage3sz, GFP_ATOMIC); 4746 mem = kmalloc(iocpage3sz, GFP_ATOMIC);
4443 if (mem) { 4747 if (mem) {
4444 memcpy(mem, (u8 *)pIoc3, iocpage3sz); 4748 memcpy(mem, (u8 *)pIoc3, iocpage3sz);
4445 ioc->spi_data.pIocPg3 = (IOCPage3_t *) mem; 4749 ioc->raid_data.pIocPg3 = (IOCPage3_t *) mem;
4446 } 4750 }
4447 } 4751 }
4448 4752
@@ -5366,8 +5670,8 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
5366} 5670}
5367 5671
5368/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5672/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5369static char * 5673static void
5370EventDescriptionStr(u8 event, u32 evData0) 5674EventDescriptionStr(u8 event, u32 evData0, char *evStr)
5371{ 5675{
5372 char *ds; 5676 char *ds;
5373 5677
@@ -5420,8 +5724,95 @@ EventDescriptionStr(u8 event, u32 evData0)
5420 ds = "Events(OFF) Change"; 5724 ds = "Events(OFF) Change";
5421 break; 5725 break;
5422 case MPI_EVENT_INTEGRATED_RAID: 5726 case MPI_EVENT_INTEGRATED_RAID:
5423 ds = "Integrated Raid"; 5727 {
5728 u8 ReasonCode = (u8)(evData0 >> 16);
5729 switch (ReasonCode) {
5730 case MPI_EVENT_RAID_RC_VOLUME_CREATED :
5731 ds = "Integrated Raid: Volume Created";
5732 break;
5733 case MPI_EVENT_RAID_RC_VOLUME_DELETED :
5734 ds = "Integrated Raid: Volume Deleted";
5735 break;
5736 case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED :
5737 ds = "Integrated Raid: Volume Settings Changed";
5738 break;
5739 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED :
5740 ds = "Integrated Raid: Volume Status Changed";
5741 break;
5742 case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED :
5743 ds = "Integrated Raid: Volume Physdisk Changed";
5744 break;
5745 case MPI_EVENT_RAID_RC_PHYSDISK_CREATED :
5746 ds = "Integrated Raid: Physdisk Created";
5747 break;
5748 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED :
5749 ds = "Integrated Raid: Physdisk Deleted";
5750 break;
5751 case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED :
5752 ds = "Integrated Raid: Physdisk Settings Changed";
5753 break;
5754 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED :
5755 ds = "Integrated Raid: Physdisk Status Changed";
5756 break;
5757 case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED :
5758 ds = "Integrated Raid: Domain Validation Needed";
5759 break;
5760 case MPI_EVENT_RAID_RC_SMART_DATA :
5761 ds = "Integrated Raid; Smart Data";
5762 break;
5763 case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED :
5764 ds = "Integrated Raid: Replace Action Started";
5765 break;
5766 default:
5767 ds = "Integrated Raid";
5768 break;
5769 }
5770 break;
5771 }
5772 case MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE:
5773 ds = "SCSI Device Status Change";
5774 break;
5775 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
5776 {
5777 u8 ReasonCode = (u8)(evData0 >> 16);
5778 switch (ReasonCode) {
5779 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
5780 ds = "SAS Device Status Change: Added";
5781 break;
5782 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
5783 ds = "SAS Device Status Change: Deleted";
5784 break;
5785 case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
5786 ds = "SAS Device Status Change: SMART Data";
5787 break;
5788 case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
5789 ds = "SAS Device Status Change: No Persistancy Added";
5790 break;
5791 default:
5792 ds = "SAS Device Status Change: Unknown";
5793 break;
5794 }
5795 break;
5796 }
5797 case MPI_EVENT_ON_BUS_TIMER_EXPIRED:
5798 ds = "Bus Timer Expired";
5799 break;
5800 case MPI_EVENT_QUEUE_FULL:
5801 ds = "Queue Full";
5802 break;
5803 case MPI_EVENT_SAS_SES:
5804 ds = "SAS SES Event";
5805 break;
5806 case MPI_EVENT_PERSISTENT_TABLE_FULL:
5807 ds = "Persistent Table Full";
5808 break;
5809 case MPI_EVENT_SAS_PHY_LINK_STATUS:
5810 ds = "SAS PHY Link Status";
5811 break;
5812 case MPI_EVENT_SAS_DISCOVERY_ERROR:
5813 ds = "SAS Discovery Error";
5424 break; 5814 break;
5815
5425 /* 5816 /*
5426 * MPT base "custom" events may be added here... 5817 * MPT base "custom" events may be added here...
5427 */ 5818 */
@@ -5429,7 +5820,7 @@ EventDescriptionStr(u8 event, u32 evData0)
5429 ds = "Unknown"; 5820 ds = "Unknown";
5430 break; 5821 break;
5431 } 5822 }
5432 return ds; 5823 strcpy(evStr,ds);
5433} 5824}
5434 5825
5435/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5826/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5451,7 +5842,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
5451 int ii; 5842 int ii;
5452 int r = 0; 5843 int r = 0;
5453 int handlers = 0; 5844 int handlers = 0;
5454 char *evStr; 5845 char evStr[100];
5455 u8 event; 5846 u8 event;
5456 5847
5457 /* 5848 /*
@@ -5464,7 +5855,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
5464 evData0 = le32_to_cpu(pEventReply->Data[0]); 5855 evData0 = le32_to_cpu(pEventReply->Data[0]);
5465 } 5856 }
5466 5857
5467 evStr = EventDescriptionStr(event, evData0); 5858 EventDescriptionStr(event, evData0, evStr);
5468 devtprintk((MYIOC_s_INFO_FMT "MPT event (%s=%02Xh) detected!\n", 5859 devtprintk((MYIOC_s_INFO_FMT "MPT event (%s=%02Xh) detected!\n",
5469 ioc->name, 5860 ioc->name,
5470 evStr, 5861 evStr,
@@ -5481,20 +5872,6 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
5481 * Do general / base driver event processing 5872 * Do general / base driver event processing
5482 */ 5873 */
5483 switch(event) { 5874 switch(event) {
5484 case MPI_EVENT_NONE: /* 00 */
5485 case MPI_EVENT_LOG_DATA: /* 01 */
5486 case MPI_EVENT_STATE_CHANGE: /* 02 */
5487 case MPI_EVENT_UNIT_ATTENTION: /* 03 */
5488 case MPI_EVENT_IOC_BUS_RESET: /* 04 */
5489 case MPI_EVENT_EXT_BUS_RESET: /* 05 */
5490 case MPI_EVENT_RESCAN: /* 06 */
5491 case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */
5492 case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */
5493 case MPI_EVENT_LOGOUT: /* 09 */
5494 case MPI_EVENT_INTEGRATED_RAID: /* 0B */
5495 case MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE: /* 0C */
5496 default:
5497 break;
5498 case MPI_EVENT_EVENT_CHANGE: /* 0A */ 5875 case MPI_EVENT_EVENT_CHANGE: /* 0A */
5499 if (evDataLen) { 5876 if (evDataLen) {
5500 u8 evState = evData0 & 0xFF; 5877 u8 evState = evData0 & 0xFF;
@@ -5507,6 +5884,8 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
5507 } 5884 }
5508 } 5885 }
5509 break; 5886 break;
5887 default:
5888 break;
5510 } 5889 }
5511 5890
5512 /* 5891 /*
@@ -5653,6 +6032,111 @@ mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info)
5653 printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): F/W: %s\n", ioc->name, log_info, desc); 6032 printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): F/W: %s\n", ioc->name, log_info, desc);
5654} 6033}
5655 6034
6035/* strings for sas loginfo */
6036 static char *originator_str[] = {
6037 "IOP", /* 00h */
6038 "PL", /* 01h */
6039 "IR" /* 02h */
6040 };
6041 static char *iop_code_str[] = {
6042 NULL, /* 00h */
6043 "Invalid SAS Address", /* 01h */
6044 NULL, /* 02h */
6045 "Invalid Page", /* 03h */
6046 NULL, /* 04h */
6047 "Task Terminated" /* 05h */
6048 };
6049 static char *pl_code_str[] = {
6050 NULL, /* 00h */
6051 "Open Failure", /* 01h */
6052 "Invalid Scatter Gather List", /* 02h */
6053 "Wrong Relative Offset or Frame Length", /* 03h */
6054 "Frame Transfer Error", /* 04h */
6055 "Transmit Frame Connected Low", /* 05h */
6056 "SATA Non-NCQ RW Error Bit Set", /* 06h */
6057 "SATA Read Log Receive Data Error", /* 07h */
6058 "SATA NCQ Fail All Commands After Error", /* 08h */
6059 "SATA Error in Receive Set Device Bit FIS", /* 09h */
6060 "Receive Frame Invalid Message", /* 0Ah */
6061 "Receive Context Message Valid Error", /* 0Bh */
6062 "Receive Frame Current Frame Error", /* 0Ch */
6063 "SATA Link Down", /* 0Dh */
6064 "Discovery SATA Init W IOS", /* 0Eh */
6065 "Config Invalid Page", /* 0Fh */
6066 "Discovery SATA Init Timeout", /* 10h */
6067 "Reset", /* 11h */
6068 "Abort", /* 12h */
6069 "IO Not Yet Executed", /* 13h */
6070 "IO Executed", /* 14h */
6071 NULL, /* 15h */
6072 NULL, /* 16h */
6073 NULL, /* 17h */
6074 NULL, /* 18h */
6075 NULL, /* 19h */
6076 NULL, /* 1Ah */
6077 NULL, /* 1Bh */
6078 NULL, /* 1Ch */
6079 NULL, /* 1Dh */
6080 NULL, /* 1Eh */
6081 NULL, /* 1Fh */
6082 "Enclosure Management" /* 20h */
6083 };
6084
6085/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
6086/*
6087 * mpt_sas_log_info - Log information returned from SAS IOC.
6088 * @ioc: Pointer to MPT_ADAPTER structure
6089 * @log_info: U32 LogInfo reply word from the IOC
6090 *
6091 * Refer to lsi/mpi_log_sas.h.
6092 */
6093static void
6094mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info)
6095{
6096union loginfo_type {
6097 u32 loginfo;
6098 struct {
6099 u32 subcode:16;
6100 u32 code:8;
6101 u32 originator:4;
6102 u32 bus_type:4;
6103 }dw;
6104};
6105 union loginfo_type sas_loginfo;
6106 char *code_desc = NULL;
6107
6108 sas_loginfo.loginfo = log_info;
6109 if ((sas_loginfo.dw.bus_type != 3 /*SAS*/) &&
6110 (sas_loginfo.dw.originator < sizeof(originator_str)/sizeof(char*)))
6111 return;
6112 if ((sas_loginfo.dw.originator == 0 /*IOP*/) &&
6113 (sas_loginfo.dw.code < sizeof(iop_code_str)/sizeof(char*))) {
6114 code_desc = iop_code_str[sas_loginfo.dw.code];
6115 }else if ((sas_loginfo.dw.originator == 1 /*PL*/) &&
6116 (sas_loginfo.dw.code < sizeof(pl_code_str)/sizeof(char*) )) {
6117 code_desc = pl_code_str[sas_loginfo.dw.code];
6118 }
6119
6120 if (code_desc != NULL)
6121 printk(MYIOC_s_INFO_FMT
6122 "LogInfo(0x%08x): Originator={%s}, Code={%s},"
6123 " SubCode(0x%04x)\n",
6124 ioc->name,
6125 log_info,
6126 originator_str[sas_loginfo.dw.originator],
6127 code_desc,
6128 sas_loginfo.dw.subcode);
6129 else
6130 printk(MYIOC_s_INFO_FMT
6131 "LogInfo(0x%08x): Originator={%s}, Code=(0x%02x),"
6132 " SubCode(0x%04x)\n",
6133 ioc->name,
6134 log_info,
6135 originator_str[sas_loginfo.dw.originator],
6136 sas_loginfo.dw.code,
6137 sas_loginfo.dw.subcode);
6138}
6139
5656/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6140/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5657/* 6141/*
5658 * mpt_sp_ioc_info - IOC information returned from SCSI Parallel IOC. 6142 * mpt_sp_ioc_info - IOC information returned from SCSI Parallel IOC.
@@ -5814,6 +6298,7 @@ EXPORT_SYMBOL(mpt_findImVolumes);
5814EXPORT_SYMBOL(mpt_read_ioc_pg_3); 6298EXPORT_SYMBOL(mpt_read_ioc_pg_3);
5815EXPORT_SYMBOL(mpt_alloc_fw_memory); 6299EXPORT_SYMBOL(mpt_alloc_fw_memory);
5816EXPORT_SYMBOL(mpt_free_fw_memory); 6300EXPORT_SYMBOL(mpt_free_fw_memory);
6301EXPORT_SYMBOL(mptbase_sas_persist_operation);
5817 6302
5818 6303
5819/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6304/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index f4827d923731..75105277e22f 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -65,6 +65,7 @@
65#include "lsi/mpi_fc.h" /* Fibre Channel (lowlevel) support */ 65#include "lsi/mpi_fc.h" /* Fibre Channel (lowlevel) support */
66#include "lsi/mpi_targ.h" /* SCSI/FCP Target protcol support */ 66#include "lsi/mpi_targ.h" /* SCSI/FCP Target protcol support */
67#include "lsi/mpi_tool.h" /* Tools support */ 67#include "lsi/mpi_tool.h" /* Tools support */
68#include "lsi/mpi_sas.h" /* SAS support */
68 69
69/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 70/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
70 71
@@ -76,8 +77,8 @@
76#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR 77#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR
77#endif 78#endif
78 79
79#define MPT_LINUX_VERSION_COMMON "3.03.02" 80#define MPT_LINUX_VERSION_COMMON "3.03.03"
80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.02" 81#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.03"
81#define WHAT_MAGIC_STRING "@" "(" "#" ")" 82#define WHAT_MAGIC_STRING "@" "(" "#" ")"
82 83
83#define show_mptmod_ver(s,ver) \ 84#define show_mptmod_ver(s,ver) \
@@ -423,7 +424,7 @@ typedef struct _MPT_IOCTL {
423/* 424/*
424 * Event Structure and define 425 * Event Structure and define
425 */ 426 */
426#define MPTCTL_EVENT_LOG_SIZE (0x0000000A) 427#define MPTCTL_EVENT_LOG_SIZE (0x000000032)
427typedef struct _mpt_ioctl_events { 428typedef struct _mpt_ioctl_events {
428 u32 event; /* Specified by define above */ 429 u32 event; /* Specified by define above */
429 u32 eventContext; /* Index or counter */ 430 u32 eventContext; /* Index or counter */
@@ -451,16 +452,13 @@ typedef struct _mpt_ioctl_events {
451#define MPT_SCSICFG_ALL_IDS 0x02 /* WriteSDP1 to all IDS */ 452#define MPT_SCSICFG_ALL_IDS 0x02 /* WriteSDP1 to all IDS */
452/* #define MPT_SCSICFG_BLK_NEGO 0x10 WriteSDP1 with WDTR and SDTR disabled */ 453/* #define MPT_SCSICFG_BLK_NEGO 0x10 WriteSDP1 with WDTR and SDTR disabled */
453 454
454typedef struct _ScsiCfgData { 455typedef struct _SpiCfgData {
455 u32 PortFlags; 456 u32 PortFlags;
456 int *nvram; /* table of device NVRAM values */ 457 int *nvram; /* table of device NVRAM values */
457 IOCPage2_t *pIocPg2; /* table of Raid Volumes */
458 IOCPage3_t *pIocPg3; /* table of physical disks */
459 IOCPage4_t *pIocPg4; /* SEP devices addressing */ 458 IOCPage4_t *pIocPg4; /* SEP devices addressing */
460 dma_addr_t IocPg4_dma; /* Phys Addr of IOCPage4 data */ 459 dma_addr_t IocPg4_dma; /* Phys Addr of IOCPage4 data */
461 int IocPg4Sz; /* IOCPage4 size */ 460 int IocPg4Sz; /* IOCPage4 size */
462 u8 dvStatus[MPT_MAX_SCSI_DEVICES]; 461 u8 dvStatus[MPT_MAX_SCSI_DEVICES];
463 int isRaid; /* bit field, 1 if RAID */
464 u8 minSyncFactor; /* 0xFF if async */ 462 u8 minSyncFactor; /* 0xFF if async */
465 u8 maxSyncOffset; /* 0 if async */ 463 u8 maxSyncOffset; /* 0 if async */
466 u8 maxBusWidth; /* 0 if narrow, 1 if wide */ 464 u8 maxBusWidth; /* 0 if narrow, 1 if wide */
@@ -472,10 +470,28 @@ typedef struct _ScsiCfgData {
472 u8 dvScheduled; /* 1 if scheduled */ 470 u8 dvScheduled; /* 1 if scheduled */
473 u8 forceDv; /* 1 to force DV scheduling */ 471 u8 forceDv; /* 1 to force DV scheduling */
474 u8 noQas; /* Disable QAS for this adapter */ 472 u8 noQas; /* Disable QAS for this adapter */
475 u8 Saf_Te; /* 1 to force all Processors as SAF-TE if Inquiry data length is too short to check for SAF-TE */ 473 u8 Saf_Te; /* 1 to force all Processors as
474 * SAF-TE if Inquiry data length
475 * is too short to check for SAF-TE
476 */
476 u8 mpt_dv; /* command line option: enhanced=1, basic=0 */ 477 u8 mpt_dv; /* command line option: enhanced=1, basic=0 */
478 u8 bus_reset; /* 1 to allow bus reset */
477 u8 rsvd[1]; 479 u8 rsvd[1];
478} ScsiCfgData; 480}SpiCfgData;
481
482typedef struct _SasCfgData {
483 u8 ptClear; /* 1 to automatically clear the
484 * persistent table.
485 * 0 to disable
486 * automatic clearing.
487 */
488}SasCfgData;
489
490typedef struct _RaidCfgData {
491 IOCPage2_t *pIocPg2; /* table of Raid Volumes */
492 IOCPage3_t *pIocPg3; /* table of physical disks */
493 int isRaid; /* bit field, 1 if RAID */
494}RaidCfgData;
479 495
480/* 496/*
481 * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS 497 * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS
@@ -530,11 +546,16 @@ typedef struct _MPT_ADAPTER
530 u8 *sense_buf_pool; 546 u8 *sense_buf_pool;
531 dma_addr_t sense_buf_pool_dma; 547 dma_addr_t sense_buf_pool_dma;
532 u32 sense_buf_low_dma; 548 u32 sense_buf_low_dma;
549 u8 *HostPageBuffer; /* SAS - host page buffer support */
550 u32 HostPageBuffer_sz;
551 dma_addr_t HostPageBuffer_dma;
533 int mtrr_reg; 552 int mtrr_reg;
534 struct pci_dev *pcidev; /* struct pci_dev pointer */ 553 struct pci_dev *pcidev; /* struct pci_dev pointer */
535 u8 __iomem *memmap; /* mmap address */ 554 u8 __iomem *memmap; /* mmap address */
536 struct Scsi_Host *sh; /* Scsi Host pointer */ 555 struct Scsi_Host *sh; /* Scsi Host pointer */
537 ScsiCfgData spi_data; /* Scsi config. data */ 556 SpiCfgData spi_data; /* Scsi config. data */
557 RaidCfgData raid_data; /* Raid config. data */
558 SasCfgData sas_data; /* Sas config. data */
538 MPT_IOCTL *ioctl; /* ioctl data pointer */ 559 MPT_IOCTL *ioctl; /* ioctl data pointer */
539 struct proc_dir_entry *ioc_dentry; 560 struct proc_dir_entry *ioc_dentry;
540 struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */ 561 struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */
@@ -554,31 +575,35 @@ typedef struct _MPT_ADAPTER
554#else 575#else
555 u32 mfcnt; 576 u32 mfcnt;
556#endif 577#endif
557 u32 NB_for_64_byte_frame; 578 u32 NB_for_64_byte_frame;
558 u32 hs_req[MPT_MAX_FRAME_SIZE/sizeof(u32)]; 579 u32 hs_req[MPT_MAX_FRAME_SIZE/sizeof(u32)];
559 u16 hs_reply[MPT_MAX_FRAME_SIZE/sizeof(u16)]; 580 u16 hs_reply[MPT_MAX_FRAME_SIZE/sizeof(u16)];
560 IOCFactsReply_t facts; 581 IOCFactsReply_t facts;
561 PortFactsReply_t pfacts[2]; 582 PortFactsReply_t pfacts[2];
562 FCPortPage0_t fc_port_page0[2]; 583 FCPortPage0_t fc_port_page0[2];
584 struct timer_list persist_timer; /* persist table timer */
585 int persist_wait_done; /* persist completion flag */
586 u8 persist_reply_frame[MPT_DEFAULT_FRAME_SIZE]; /* persist reply */
563 LANPage0_t lan_cnfg_page0; 587 LANPage0_t lan_cnfg_page0;
564 LANPage1_t lan_cnfg_page1; 588 LANPage1_t lan_cnfg_page1;
565 /* 589 /*
566 * Description: errata_flag_1064 590 * Description: errata_flag_1064
567 * If a PCIX read occurs within 1 or 2 cycles after the chip receives 591 * If a PCIX read occurs within 1 or 2 cycles after the chip receives
568 * a split completion for a read data, an internal address pointer incorrectly 592 * a split completion for a read data, an internal address pointer incorrectly
569 * increments by 32 bytes 593 * increments by 32 bytes
570 */ 594 */
571 int errata_flag_1064; 595 int errata_flag_1064;
572 u8 FirstWhoInit; 596 u8 FirstWhoInit;
573 u8 upload_fw; /* If set, do a fw upload */ 597 u8 upload_fw; /* If set, do a fw upload */
574 u8 reload_fw; /* Force a FW Reload on next reset */ 598 u8 reload_fw; /* Force a FW Reload on next reset */
575 u8 NBShiftFactor; /* NB Shift Factor based on Block Size (Facts) */ 599 u8 NBShiftFactor; /* NB Shift Factor based on Block Size (Facts) */
576 u8 pad1[4]; 600 u8 pad1[4];
577 int DoneCtx; 601 int DoneCtx;
578 int TaskCtx; 602 int TaskCtx;
579 int InternalCtx; 603 int InternalCtx;
580 struct list_head list; 604 struct list_head list;
581 struct net_device *netdev; 605 struct net_device *netdev;
606 struct list_head sas_topology;
582} MPT_ADAPTER; 607} MPT_ADAPTER;
583 608
584/* 609/*
@@ -964,6 +989,7 @@ extern void mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size);
964extern void mpt_free_fw_memory(MPT_ADAPTER *ioc); 989extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
965extern int mpt_findImVolumes(MPT_ADAPTER *ioc); 990extern int mpt_findImVolumes(MPT_ADAPTER *ioc);
966extern int mpt_read_ioc_pg_3(MPT_ADAPTER *ioc); 991extern int mpt_read_ioc_pg_3(MPT_ADAPTER *ioc);
992extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
967 993
968/* 994/*
969 * Public data decl's... 995 * Public data decl's...
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 7577c2417e2e..cb2d59d5f5af 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1326,7 +1326,7 @@ mptctl_gettargetinfo (unsigned long arg)
1326 */ 1326 */
1327 if (hd && hd->Targets) { 1327 if (hd && hd->Targets) {
1328 mpt_findImVolumes(ioc); 1328 mpt_findImVolumes(ioc);
1329 pIoc2 = ioc->spi_data.pIocPg2; 1329 pIoc2 = ioc->raid_data.pIocPg2;
1330 for ( id = 0; id <= max_id; ) { 1330 for ( id = 0; id <= max_id; ) {
1331 if ( pIoc2 && pIoc2->NumActiveVolumes ) { 1331 if ( pIoc2 && pIoc2->NumActiveVolumes ) {
1332 if ( id == pIoc2->RaidVolume[0].VolumeID ) { 1332 if ( id == pIoc2->RaidVolume[0].VolumeID ) {
@@ -1348,7 +1348,7 @@ mptctl_gettargetinfo (unsigned long arg)
1348 --maxWordsLeft; 1348 --maxWordsLeft;
1349 goto next_id; 1349 goto next_id;
1350 } else { 1350 } else {
1351 pIoc3 = ioc->spi_data.pIocPg3; 1351 pIoc3 = ioc->raid_data.pIocPg3;
1352 for ( jj = 0; jj < pIoc3->NumPhysDisks; jj++ ) { 1352 for ( jj = 0; jj < pIoc3->NumPhysDisks; jj++ ) {
1353 if ( pIoc3->PhysDisk[jj].PhysDiskID == id ) 1353 if ( pIoc3->PhysDisk[jj].PhysDiskID == id )
1354 goto next_id; 1354 goto next_id;
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 13771abea13f..a628be9bbbad 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -189,7 +189,7 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
189 printk(MYIOC_s_WARN_FMT 189 printk(MYIOC_s_WARN_FMT
190 "Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n", 190 "Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n",
191 ioc->name, ioc); 191 ioc->name, ioc);
192 return -ENODEV; 192 return 0;
193 } 193 }
194 194
195 sh = scsi_host_alloc(&mptfc_driver_template, sizeof(MPT_SCSI_HOST)); 195 sh = scsi_host_alloc(&mptfc_driver_template, sizeof(MPT_SCSI_HOST));
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index 52794be5a95c..ed3c891e388f 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -312,7 +312,12 @@ static int
312mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) 312mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
313{ 313{
314 struct net_device *dev = ioc->netdev; 314 struct net_device *dev = ioc->netdev;
315 struct mpt_lan_priv *priv = netdev_priv(dev); 315 struct mpt_lan_priv *priv;
316
317 if (dev == NULL)
318 return(1);
319 else
320 priv = netdev_priv(dev);
316 321
317 dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n", 322 dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
318 reset_phase==MPT_IOC_SETUP_RESET ? "setup" : ( 323 reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
new file mode 100644
index 000000000000..429820e48c69
--- /dev/null
+++ b/drivers/message/fusion/mptsas.c
@@ -0,0 +1,1235 @@
1/*
2 * linux/drivers/message/fusion/mptsas.c
3 * For use with LSI Logic PCI chip/adapter(s)
4 * running LSI Logic Fusion MPT (Message Passing Technology) firmware.
5 *
6 * Copyright (c) 1999-2005 LSI Logic Corporation
7 * (mailto:mpt_linux_developer@lsil.com)
8 * Copyright (c) 2005 Dell
9 */
10/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
11/*
12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; version 2 of the License.
15
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 NO WARRANTY
22 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
23 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
24 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
25 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
26 solely responsible for determining the appropriateness of using and
27 distributing the Program and assumes all risks associated with its
28 exercise of rights under this Agreement, including but not limited to
29 the risks and costs of program errors, damage to or loss of data,
30 programs or equipment, and unavailability or interruption of operations.
31
32 DISCLAIMER OF LIABILITY
33 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
34 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
36 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
37 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
38 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
39 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
40
41 You should have received a copy of the GNU General Public License
42 along with this program; if not, write to the Free Software
43 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
44*/
45/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
46
47#include <linux/module.h>
48#include <linux/kernel.h>
49#include <linux/init.h>
50#include <linux/errno.h>
51#include <linux/sched.h>
52#include <linux/workqueue.h>
53
54#include <scsi/scsi_cmnd.h>
55#include <scsi/scsi_device.h>
56#include <scsi/scsi_host.h>
57#include <scsi/scsi_transport_sas.h>
58
59#include "mptbase.h"
60#include "mptscsih.h"
61
62
63#define my_NAME "Fusion MPT SAS Host driver"
64#define my_VERSION MPT_LINUX_VERSION_COMMON
65#define MYNAM "mptsas"
66
67MODULE_AUTHOR(MODULEAUTHOR);
68MODULE_DESCRIPTION(my_NAME);
69MODULE_LICENSE("GPL");
70
71static int mpt_pq_filter;
72module_param(mpt_pq_filter, int, 0);
73MODULE_PARM_DESC(mpt_pq_filter,
74 "Enable peripheral qualifier filter: enable=1 "
75 "(default=0)");
76
77static int mpt_pt_clear;
78module_param(mpt_pt_clear, int, 0);
79MODULE_PARM_DESC(mpt_pt_clear,
80 "Clear persistency table: enable=1 "
81 "(default=MPTSCSIH_PT_CLEAR=0)");
82
83static int mptsasDoneCtx = -1;
84static int mptsasTaskCtx = -1;
85static int mptsasInternalCtx = -1; /* Used only for internal commands */
86
87
88/*
89 * SAS topology structures
90 *
91 * The MPT Fusion firmware interface spreads information about the
92 * SAS topology over many manufacture pages, thus we need some data
93 * structure to collect it and process it for the SAS transport class.
94 */
95
96struct mptsas_devinfo {
97 u16 handle; /* unique id to address this device */
98 u8 phy_id; /* phy number of parent device */
99 u8 port_id; /* sas physical port this device
100 is assoc'd with */
101 u8 target; /* logical target id of this device */
102 u8 bus; /* logical bus number of this device */
103 u64 sas_address; /* WWN of this device,
104 SATA is assigned by HBA,expander */
105 u32 device_info; /* bitfield detailed info about this device */
106};
107
108struct mptsas_phyinfo {
109 u8 phy_id; /* phy index */
110 u8 port_id; /* port number this phy is part of */
111 u8 negotiated_link_rate; /* nego'd link rate for this phy */
112 u8 hw_link_rate; /* hardware max/min phys link rate */
113 u8 programmed_link_rate; /* programmed max/min phy link rate */
114 struct mptsas_devinfo identify; /* point to phy device info */
115 struct mptsas_devinfo attached; /* point to attached device info */
116 struct sas_rphy *rphy;
117};
118
119struct mptsas_portinfo {
120 struct list_head list;
121 u16 handle; /* unique id to address this */
122 u8 num_phys; /* number of phys */
123 struct mptsas_phyinfo *phy_info;
124};
125
126/*
127 * This is pretty ugly. We will be able to seriously clean it up
128 * once the DV code in mptscsih goes away and we can properly
129 * implement ->target_alloc.
130 */
131static int
132mptsas_slave_alloc(struct scsi_device *device)
133{
134 struct Scsi_Host *host = device->host;
135 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
136 struct sas_rphy *rphy;
137 struct mptsas_portinfo *p;
138 VirtDevice *vdev;
139 uint target = device->id;
140 int i;
141
142 if ((vdev = hd->Targets[target]) != NULL)
143 goto out;
144
145 vdev = kmalloc(sizeof(VirtDevice), GFP_KERNEL);
146 if (!vdev) {
147 printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n",
148 hd->ioc->name, sizeof(VirtDevice));
149 return -ENOMEM;
150 }
151
152 memset(vdev, 0, sizeof(VirtDevice));
153 vdev->tflags = MPT_TARGET_FLAGS_Q_YES|MPT_TARGET_FLAGS_VALID_INQUIRY;
154 vdev->ioc_id = hd->ioc->id;
155
156 rphy = dev_to_rphy(device->sdev_target->dev.parent);
157 list_for_each_entry(p, &hd->ioc->sas_topology, list) {
158 for (i = 0; i < p->num_phys; i++) {
159 if (p->phy_info[i].attached.sas_address ==
160 rphy->identify.sas_address) {
161 vdev->target_id =
162 p->phy_info[i].attached.target;
163 vdev->bus_id = p->phy_info[i].attached.bus;
164 hd->Targets[device->id] = vdev;
165 goto out;
166 }
167 }
168 }
169
170 printk("No matching SAS device found!!\n");
171 kfree(vdev);
172 return -ENODEV;
173
174 out:
175 vdev->num_luns++;
176 device->hostdata = vdev;
177 return 0;
178}
179
180static struct scsi_host_template mptsas_driver_template = {
181 .proc_name = "mptsas",
182 .proc_info = mptscsih_proc_info,
183 .name = "MPT SPI Host",
184 .info = mptscsih_info,
185 .queuecommand = mptscsih_qcmd,
186 .slave_alloc = mptsas_slave_alloc,
187 .slave_configure = mptscsih_slave_configure,
188 .slave_destroy = mptscsih_slave_destroy,
189 .change_queue_depth = mptscsih_change_queue_depth,
190 .eh_abort_handler = mptscsih_abort,
191 .eh_device_reset_handler = mptscsih_dev_reset,
192 .eh_bus_reset_handler = mptscsih_bus_reset,
193 .eh_host_reset_handler = mptscsih_host_reset,
194 .bios_param = mptscsih_bios_param,
195 .can_queue = MPT_FC_CAN_QUEUE,
196 .this_id = -1,
197 .sg_tablesize = MPT_SCSI_SG_DEPTH,
198 .max_sectors = 8192,
199 .cmd_per_lun = 7,
200 .use_clustering = ENABLE_CLUSTERING,
201};
202
203static struct sas_function_template mptsas_transport_functions = {
204};
205
206static struct scsi_transport_template *mptsas_transport_template;
207
208#ifdef SASDEBUG
209static void mptsas_print_phy_data(MPI_SAS_IO_UNIT0_PHY_DATA *phy_data)
210{
211 printk("---- IO UNIT PAGE 0 ------------\n");
212 printk("Handle=0x%X\n",
213 le16_to_cpu(phy_data->AttachedDeviceHandle));
214 printk("Controller Handle=0x%X\n",
215 le16_to_cpu(phy_data->ControllerDevHandle));
216 printk("Port=0x%X\n", phy_data->Port);
217 printk("Port Flags=0x%X\n", phy_data->PortFlags);
218 printk("PHY Flags=0x%X\n", phy_data->PhyFlags);
219 printk("Negotiated Link Rate=0x%X\n", phy_data->NegotiatedLinkRate);
220 printk("Controller PHY Device Info=0x%X\n",
221 le32_to_cpu(phy_data->ControllerPhyDeviceInfo));
222 printk("DiscoveryStatus=0x%X\n",
223 le32_to_cpu(phy_data->DiscoveryStatus));
224 printk("\n");
225}
226
227static void mptsas_print_phy_pg0(SasPhyPage0_t *pg0)
228{
229 __le64 sas_address;
230
231 memcpy(&sas_address, &pg0->SASAddress, sizeof(__le64));
232
233 printk("---- SAS PHY PAGE 0 ------------\n");
234 printk("Attached Device Handle=0x%X\n",
235 le16_to_cpu(pg0->AttachedDevHandle));
236 printk("SAS Address=0x%llX\n",
237 (unsigned long long)le64_to_cpu(sas_address));
238 printk("Attached PHY Identifier=0x%X\n", pg0->AttachedPhyIdentifier);
239 printk("Attached Device Info=0x%X\n",
240 le32_to_cpu(pg0->AttachedDeviceInfo));
241 printk("Programmed Link Rate=0x%X\n", pg0->ProgrammedLinkRate);
242 printk("Change Count=0x%X\n", pg0->ChangeCount);
243 printk("PHY Info=0x%X\n", le32_to_cpu(pg0->PhyInfo));
244 printk("\n");
245}
246
247static void mptsas_print_device_pg0(SasDevicePage0_t *pg0)
248{
249 __le64 sas_address;
250
251 memcpy(&sas_address, &pg0->SASAddress, sizeof(__le64));
252
253 printk("---- SAS DEVICE PAGE 0 ---------\n");
254 printk("Handle=0x%X\n" ,le16_to_cpu(pg0->DevHandle));
255 printk("Enclosure Handle=0x%X\n", le16_to_cpu(pg0->EnclosureHandle));
256 printk("Slot=0x%X\n", le16_to_cpu(pg0->Slot));
257 printk("SAS Address=0x%llX\n", le64_to_cpu(sas_address));
258 printk("Target ID=0x%X\n", pg0->TargetID);
259 printk("Bus=0x%X\n", pg0->Bus);
260 printk("PhyNum=0x%X\n", pg0->PhyNum);
261 printk("AccessStatus=0x%X\n", le16_to_cpu(pg0->AccessStatus));
262 printk("Device Info=0x%X\n", le32_to_cpu(pg0->DeviceInfo));
263 printk("Flags=0x%X\n", le16_to_cpu(pg0->Flags));
264 printk("Physical Port=0x%X\n", pg0->PhysicalPort);
265 printk("\n");
266}
267
268static void mptsas_print_expander_pg1(SasExpanderPage1_t *pg1)
269{
270 printk("---- SAS EXPANDER PAGE 1 ------------\n");
271
272 printk("Physical Port=0x%X\n", pg1->PhysicalPort);
273 printk("PHY Identifier=0x%X\n", pg1->Phy);
274 printk("Negotiated Link Rate=0x%X\n", pg1->NegotiatedLinkRate);
275 printk("Programmed Link Rate=0x%X\n", pg1->ProgrammedLinkRate);
276 printk("Hardware Link Rate=0x%X\n", pg1->HwLinkRate);
277 printk("Owner Device Handle=0x%X\n",
278 le16_to_cpu(pg1->OwnerDevHandle));
279 printk("Attached Device Handle=0x%X\n",
280 le16_to_cpu(pg1->AttachedDevHandle));
281}
282#else
283#define mptsas_print_phy_data(phy_data) do { } while (0)
284#define mptsas_print_phy_pg0(pg0) do { } while (0)
285#define mptsas_print_device_pg0(pg0) do { } while (0)
286#define mptsas_print_expander_pg1(pg1) do { } while (0)
287#endif
288
289static int
290mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
291{
292 ConfigExtendedPageHeader_t hdr;
293 CONFIGPARMS cfg;
294 SasIOUnitPage0_t *buffer;
295 dma_addr_t dma_handle;
296 int error, i;
297
298 hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION;
299 hdr.ExtPageLength = 0;
300 hdr.PageNumber = 0;
301 hdr.Reserved1 = 0;
302 hdr.Reserved2 = 0;
303 hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
304 hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
305
306 cfg.cfghdr.ehdr = &hdr;
307 cfg.physAddr = -1;
308 cfg.pageAddr = 0;
309 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
310 cfg.dir = 0; /* read */
311 cfg.timeout = 10;
312
313 error = mpt_config(ioc, &cfg);
314 if (error)
315 goto out;
316 if (!hdr.ExtPageLength) {
317 error = -ENXIO;
318 goto out;
319 }
320
321 buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
322 &dma_handle);
323 if (!buffer) {
324 error = -ENOMEM;
325 goto out;
326 }
327
328 cfg.physAddr = dma_handle;
329 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
330
331 error = mpt_config(ioc, &cfg);
332 if (error)
333 goto out_free_consistent;
334
335 port_info->num_phys = buffer->NumPhys;
336 port_info->phy_info = kcalloc(port_info->num_phys,
337 sizeof(struct mptsas_phyinfo),GFP_KERNEL);
338 if (!port_info->phy_info) {
339 error = -ENOMEM;
340 goto out_free_consistent;
341 }
342
343 for (i = 0; i < port_info->num_phys; i++) {
344 mptsas_print_phy_data(&buffer->PhyData[i]);
345 port_info->phy_info[i].phy_id = i;
346 port_info->phy_info[i].port_id =
347 buffer->PhyData[i].Port;
348 port_info->phy_info[i].negotiated_link_rate =
349 buffer->PhyData[i].NegotiatedLinkRate;
350 }
351
352 out_free_consistent:
353 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
354 buffer, dma_handle);
355 out:
356 return error;
357}
358
359static int
360mptsas_sas_phy_pg0(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
361 u32 form, u32 form_specific)
362{
363 ConfigExtendedPageHeader_t hdr;
364 CONFIGPARMS cfg;
365 SasPhyPage0_t *buffer;
366 dma_addr_t dma_handle;
367 int error;
368
369 hdr.PageVersion = MPI_SASPHY0_PAGEVERSION;
370 hdr.ExtPageLength = 0;
371 hdr.PageNumber = 0;
372 hdr.Reserved1 = 0;
373 hdr.Reserved2 = 0;
374 hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
375 hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_PHY;
376
377 cfg.cfghdr.ehdr = &hdr;
378 cfg.dir = 0; /* read */
379 cfg.timeout = 10;
380
381 /* Get Phy Pg 0 for each Phy. */
382 cfg.physAddr = -1;
383 cfg.pageAddr = form + form_specific;
384 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
385
386 error = mpt_config(ioc, &cfg);
387 if (error)
388 goto out;
389
390 if (!hdr.ExtPageLength) {
391 error = -ENXIO;
392 goto out;
393 }
394
395 buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
396 &dma_handle);
397 if (!buffer) {
398 error = -ENOMEM;
399 goto out;
400 }
401
402 cfg.physAddr = dma_handle;
403 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
404
405 error = mpt_config(ioc, &cfg);
406 if (error)
407 goto out_free_consistent;
408
409 mptsas_print_phy_pg0(buffer);
410
411 phy_info->hw_link_rate = buffer->HwLinkRate;
412 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
413 phy_info->identify.handle = le16_to_cpu(buffer->OwnerDevHandle);
414 phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle);
415
416 out_free_consistent:
417 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
418 buffer, dma_handle);
419 out:
420 return error;
421}
422
423static int
424mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
425 u32 form, u32 form_specific)
426{
427 ConfigExtendedPageHeader_t hdr;
428 CONFIGPARMS cfg;
429 SasDevicePage0_t *buffer;
430 dma_addr_t dma_handle;
431 __le64 sas_address;
432 int error;
433
434 hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION;
435 hdr.ExtPageLength = 0;
436 hdr.PageNumber = 0;
437 hdr.Reserved1 = 0;
438 hdr.Reserved2 = 0;
439 hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
440 hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE;
441
442 cfg.cfghdr.ehdr = &hdr;
443 cfg.pageAddr = form + form_specific;
444 cfg.physAddr = -1;
445 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
446 cfg.dir = 0; /* read */
447 cfg.timeout = 10;
448
449 error = mpt_config(ioc, &cfg);
450 if (error)
451 goto out;
452 if (!hdr.ExtPageLength) {
453 error = -ENXIO;
454 goto out;
455 }
456
457 buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
458 &dma_handle);
459 if (!buffer) {
460 error = -ENOMEM;
461 goto out;
462 }
463
464 cfg.physAddr = dma_handle;
465 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
466
467 error = mpt_config(ioc, &cfg);
468 if (error)
469 goto out_free_consistent;
470
471 mptsas_print_device_pg0(buffer);
472
473 device_info->handle = le16_to_cpu(buffer->DevHandle);
474 device_info->phy_id = buffer->PhyNum;
475 device_info->port_id = buffer->PhysicalPort;
476 device_info->target = buffer->TargetID;
477 device_info->bus = buffer->Bus;
478 memcpy(&sas_address, &buffer->SASAddress, sizeof(__le64));
479 device_info->sas_address = le64_to_cpu(sas_address);
480 device_info->device_info =
481 le32_to_cpu(buffer->DeviceInfo);
482
483 out_free_consistent:
484 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
485 buffer, dma_handle);
486 out:
487 return error;
488}
489
490static int
491mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
492 u32 form, u32 form_specific)
493{
494 ConfigExtendedPageHeader_t hdr;
495 CONFIGPARMS cfg;
496 SasExpanderPage0_t *buffer;
497 dma_addr_t dma_handle;
498 int error;
499
500 hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
501 hdr.ExtPageLength = 0;
502 hdr.PageNumber = 0;
503 hdr.Reserved1 = 0;
504 hdr.Reserved2 = 0;
505 hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
506 hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
507
508 cfg.cfghdr.ehdr = &hdr;
509 cfg.physAddr = -1;
510 cfg.pageAddr = form + form_specific;
511 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
512 cfg.dir = 0; /* read */
513 cfg.timeout = 10;
514
515 error = mpt_config(ioc, &cfg);
516 if (error)
517 goto out;
518
519 if (!hdr.ExtPageLength) {
520 error = -ENXIO;
521 goto out;
522 }
523
524 buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
525 &dma_handle);
526 if (!buffer) {
527 error = -ENOMEM;
528 goto out;
529 }
530
531 cfg.physAddr = dma_handle;
532 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
533
534 error = mpt_config(ioc, &cfg);
535 if (error)
536 goto out_free_consistent;
537
538 /* save config data */
539 port_info->num_phys = buffer->NumPhys;
540 port_info->handle = le16_to_cpu(buffer->DevHandle);
541 port_info->phy_info = kcalloc(port_info->num_phys,
542 sizeof(struct mptsas_phyinfo),GFP_KERNEL);
543 if (!port_info->phy_info) {
544 error = -ENOMEM;
545 goto out_free_consistent;
546 }
547
548 out_free_consistent:
549 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
550 buffer, dma_handle);
551 out:
552 return error;
553}
554
555static int
556mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
557 u32 form, u32 form_specific)
558{
559 ConfigExtendedPageHeader_t hdr;
560 CONFIGPARMS cfg;
561 SasExpanderPage1_t *buffer;
562 dma_addr_t dma_handle;
563 int error;
564
565 hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
566 hdr.ExtPageLength = 0;
567 hdr.PageNumber = 1;
568 hdr.Reserved1 = 0;
569 hdr.Reserved2 = 0;
570 hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
571 hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
572
573 cfg.cfghdr.ehdr = &hdr;
574 cfg.physAddr = -1;
575 cfg.pageAddr = form + form_specific;
576 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
577 cfg.dir = 0; /* read */
578 cfg.timeout = 10;
579
580 error = mpt_config(ioc, &cfg);
581 if (error)
582 goto out;
583
584 if (!hdr.ExtPageLength) {
585 error = -ENXIO;
586 goto out;
587 }
588
589 buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
590 &dma_handle);
591 if (!buffer) {
592 error = -ENOMEM;
593 goto out;
594 }
595
596 cfg.physAddr = dma_handle;
597 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
598
599 error = mpt_config(ioc, &cfg);
600 if (error)
601 goto out_free_consistent;
602
603
604 mptsas_print_expander_pg1(buffer);
605
606 /* save config data */
607 phy_info->phy_id = buffer->Phy;
608 phy_info->port_id = buffer->PhysicalPort;
609 phy_info->negotiated_link_rate = buffer->NegotiatedLinkRate;
610 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
611 phy_info->hw_link_rate = buffer->HwLinkRate;
612 phy_info->identify.handle = le16_to_cpu(buffer->OwnerDevHandle);
613 phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle);
614
615
616 out_free_consistent:
617 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
618 buffer, dma_handle);
619 out:
620 return error;
621}
622
623static void
624mptsas_parse_device_info(struct sas_identify *identify,
625 struct mptsas_devinfo *device_info)
626{
627 u16 protocols;
628
629 identify->sas_address = device_info->sas_address;
630 identify->phy_identifier = device_info->phy_id;
631
632 /*
633 * Fill in Phy Initiator Port Protocol.
634 * Bits 6:3, more than one bit can be set, fall through cases.
635 */
636 protocols = device_info->device_info & 0x78;
637 identify->initiator_port_protocols = 0;
638 if (protocols & MPI_SAS_DEVICE_INFO_SSP_INITIATOR)
639 identify->initiator_port_protocols |= SAS_PROTOCOL_SSP;
640 if (protocols & MPI_SAS_DEVICE_INFO_STP_INITIATOR)
641 identify->initiator_port_protocols |= SAS_PROTOCOL_STP;
642 if (protocols & MPI_SAS_DEVICE_INFO_SMP_INITIATOR)
643 identify->initiator_port_protocols |= SAS_PROTOCOL_SMP;
644 if (protocols & MPI_SAS_DEVICE_INFO_SATA_HOST)
645 identify->initiator_port_protocols |= SAS_PROTOCOL_SATA;
646
647 /*
648 * Fill in Phy Target Port Protocol.
649 * Bits 10:7, more than one bit can be set, fall through cases.
650 */
651 protocols = device_info->device_info & 0x780;
652 identify->target_port_protocols = 0;
653 if (protocols & MPI_SAS_DEVICE_INFO_SSP_TARGET)
654 identify->target_port_protocols |= SAS_PROTOCOL_SSP;
655 if (protocols & MPI_SAS_DEVICE_INFO_STP_TARGET)
656 identify->target_port_protocols |= SAS_PROTOCOL_STP;
657 if (protocols & MPI_SAS_DEVICE_INFO_SMP_TARGET)
658 identify->target_port_protocols |= SAS_PROTOCOL_SMP;
659 if (protocols & MPI_SAS_DEVICE_INFO_SATA_DEVICE)
660 identify->target_port_protocols |= SAS_PROTOCOL_SATA;
661
662 /*
663 * Fill in Attached device type.
664 */
665 switch (device_info->device_info &
666 MPI_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) {
667 case MPI_SAS_DEVICE_INFO_NO_DEVICE:
668 identify->device_type = SAS_PHY_UNUSED;
669 break;
670 case MPI_SAS_DEVICE_INFO_END_DEVICE:
671 identify->device_type = SAS_END_DEVICE;
672 break;
673 case MPI_SAS_DEVICE_INFO_EDGE_EXPANDER:
674 identify->device_type = SAS_EDGE_EXPANDER_DEVICE;
675 break;
676 case MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER:
677 identify->device_type = SAS_FANOUT_EXPANDER_DEVICE;
678 break;
679 }
680}
681
682static int mptsas_probe_one_phy(struct device *dev,
683 struct mptsas_phyinfo *phy_info, int index)
684{
685 struct sas_phy *port;
686 int error;
687
688 port = sas_phy_alloc(dev, index);
689 if (!port)
690 return -ENOMEM;
691
692 port->port_identifier = phy_info->port_id;
693 mptsas_parse_device_info(&port->identify, &phy_info->identify);
694
695 /*
696 * Set Negotiated link rate.
697 */
698 switch (phy_info->negotiated_link_rate) {
699 case MPI_SAS_IOUNIT0_RATE_PHY_DISABLED:
700 port->negotiated_linkrate = SAS_PHY_DISABLED;
701 break;
702 case MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION:
703 port->negotiated_linkrate = SAS_LINK_RATE_FAILED;
704 break;
705 case MPI_SAS_IOUNIT0_RATE_1_5:
706 port->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS;
707 break;
708 case MPI_SAS_IOUNIT0_RATE_3_0:
709 port->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
710 break;
711 case MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE:
712 case MPI_SAS_IOUNIT0_RATE_UNKNOWN:
713 default:
714 port->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
715 break;
716 }
717
718 /*
719 * Set Max hardware link rate.
720 */
721 switch (phy_info->hw_link_rate & MPI_SAS_PHY0_PRATE_MAX_RATE_MASK) {
722 case MPI_SAS_PHY0_HWRATE_MAX_RATE_1_5:
723 port->maximum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
724 break;
725 case MPI_SAS_PHY0_PRATE_MAX_RATE_3_0:
726 port->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
727 break;
728 default:
729 break;
730 }
731
732 /*
733 * Set Max programmed link rate.
734 */
735 switch (phy_info->programmed_link_rate &
736 MPI_SAS_PHY0_PRATE_MAX_RATE_MASK) {
737 case MPI_SAS_PHY0_PRATE_MAX_RATE_1_5:
738 port->maximum_linkrate = SAS_LINK_RATE_1_5_GBPS;
739 break;
740 case MPI_SAS_PHY0_PRATE_MAX_RATE_3_0:
741 port->maximum_linkrate = SAS_LINK_RATE_3_0_GBPS;
742 break;
743 default:
744 break;
745 }
746
747 /*
748 * Set Min hardware link rate.
749 */
750 switch (phy_info->hw_link_rate & MPI_SAS_PHY0_HWRATE_MIN_RATE_MASK) {
751 case MPI_SAS_PHY0_HWRATE_MIN_RATE_1_5:
752 port->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
753 break;
754 case MPI_SAS_PHY0_PRATE_MIN_RATE_3_0:
755 port->minimum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
756 break;
757 default:
758 break;
759 }
760
761 /*
762 * Set Min programmed link rate.
763 */
764 switch (phy_info->programmed_link_rate &
765 MPI_SAS_PHY0_PRATE_MIN_RATE_MASK) {
766 case MPI_SAS_PHY0_PRATE_MIN_RATE_1_5:
767 port->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
768 break;
769 case MPI_SAS_PHY0_PRATE_MIN_RATE_3_0:
770 port->minimum_linkrate = SAS_LINK_RATE_3_0_GBPS;
771 break;
772 default:
773 break;
774 }
775
776 error = sas_phy_add(port);
777 if (error) {
778 sas_phy_free(port);
779 return error;
780 }
781
782 if (phy_info->attached.handle) {
783 struct sas_rphy *rphy;
784
785 rphy = sas_rphy_alloc(port);
786 if (!rphy)
787 return 0; /* non-fatal: an rphy can be added later */
788
789 mptsas_parse_device_info(&rphy->identify, &phy_info->attached);
790 error = sas_rphy_add(rphy);
791 if (error) {
792 sas_rphy_free(rphy);
793 return error;
794 }
795
796 phy_info->rphy = rphy;
797 }
798
799 return 0;
800}
801
802static int
803mptsas_probe_hba_phys(MPT_ADAPTER *ioc, int *index)
804{
805 struct mptsas_portinfo *port_info;
806 u32 handle = 0xFFFF;
807 int error = -ENOMEM, i;
808
809 port_info = kmalloc(sizeof(*port_info), GFP_KERNEL);
810 if (!port_info)
811 goto out;
812 memset(port_info, 0, sizeof(*port_info));
813
814 error = mptsas_sas_io_unit_pg0(ioc, port_info);
815 if (error)
816 goto out_free_port_info;
817
818 list_add_tail(&port_info->list, &ioc->sas_topology);
819
820 for (i = 0; i < port_info->num_phys; i++) {
821 mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i],
822 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
823 MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
824
825 mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify,
826 (MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE <<
827 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), handle);
828 handle = port_info->phy_info[i].identify.handle;
829
830 if (port_info->phy_info[i].attached.handle) {
831 mptsas_sas_device_pg0(ioc,
832 &port_info->phy_info[i].attached,
833 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
834 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
835 port_info->phy_info[i].attached.handle);
836 }
837
838 mptsas_probe_one_phy(&ioc->sh->shost_gendev,
839 &port_info->phy_info[i], *index);
840 (*index)++;
841 }
842
843 return 0;
844
845 out_free_port_info:
846 kfree(port_info);
847 out:
848 return error;
849}
850
851static int
852mptsas_probe_expander_phys(MPT_ADAPTER *ioc, u32 *handle, int *index)
853{
854 struct mptsas_portinfo *port_info, *p;
855 int error = -ENOMEM, i, j;
856
857 port_info = kmalloc(sizeof(*port_info), GFP_KERNEL);
858 if (!port_info)
859 goto out;
860 memset(port_info, 0, sizeof(*port_info));
861
862 error = mptsas_sas_expander_pg0(ioc, port_info,
863 (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
864 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), *handle);
865 if (error)
866 goto out_free_port_info;
867
868 *handle = port_info->handle;
869
870 list_add_tail(&port_info->list, &ioc->sas_topology);
871 for (i = 0; i < port_info->num_phys; i++) {
872 struct device *parent;
873
874 mptsas_sas_expander_pg1(ioc, &port_info->phy_info[i],
875 (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM <<
876 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + *handle);
877
878 if (port_info->phy_info[i].identify.handle) {
879 mptsas_sas_device_pg0(ioc,
880 &port_info->phy_info[i].identify,
881 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
882 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
883 port_info->phy_info[i].identify.handle);
884 }
885
886 if (port_info->phy_info[i].attached.handle) {
887 mptsas_sas_device_pg0(ioc,
888 &port_info->phy_info[i].attached,
889 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
890 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
891 port_info->phy_info[i].attached.handle);
892 }
893
894 /*
895 * If we find a parent port handle this expander is
896 * attached to another expander, else it hangs of the
897 * HBA phys.
898 */
899 parent = &ioc->sh->shost_gendev;
900 list_for_each_entry(p, &ioc->sas_topology, list) {
901 for (j = 0; j < p->num_phys; j++) {
902 if (port_info->phy_info[i].identify.handle ==
903 p->phy_info[j].attached.handle)
904 parent = &p->phy_info[j].rphy->dev;
905 }
906 }
907
908 mptsas_probe_one_phy(parent, &port_info->phy_info[i], *index);
909 (*index)++;
910 }
911
912 return 0;
913
914 out_free_port_info:
915 kfree(port_info);
916 out:
917 return error;
918}
919
920static void
921mptsas_scan_sas_topology(MPT_ADAPTER *ioc)
922{
923 u32 handle = 0xFFFF;
924 int index = 0;
925
926 mptsas_probe_hba_phys(ioc, &index);
927 while (!mptsas_probe_expander_phys(ioc, &handle, &index))
928 ;
929}
930
931static int
932mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
933{
934 struct Scsi_Host *sh;
935 MPT_SCSI_HOST *hd;
936 MPT_ADAPTER *ioc;
937 unsigned long flags;
938 int sz, ii;
939 int numSGE = 0;
940 int scale;
941 int ioc_cap;
942 u8 *mem;
943 int error=0;
944 int r;
945
946 r = mpt_attach(pdev,id);
947 if (r)
948 return r;
949
950 ioc = pci_get_drvdata(pdev);
951 ioc->DoneCtx = mptsasDoneCtx;
952 ioc->TaskCtx = mptsasTaskCtx;
953 ioc->InternalCtx = mptsasInternalCtx;
954
955 /* Added sanity check on readiness of the MPT adapter.
956 */
957 if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) {
958 printk(MYIOC_s_WARN_FMT
959 "Skipping because it's not operational!\n",
960 ioc->name);
961 return -ENODEV;
962 }
963
964 if (!ioc->active) {
965 printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n",
966 ioc->name);
967 return -ENODEV;
968 }
969
970 /* Sanity check - ensure at least 1 port is INITIATOR capable
971 */
972 ioc_cap = 0;
973 for (ii = 0; ii < ioc->facts.NumberOfPorts; ii++) {
974 if (ioc->pfacts[ii].ProtocolFlags &
975 MPI_PORTFACTS_PROTOCOL_INITIATOR)
976 ioc_cap++;
977 }
978
979 if (!ioc_cap) {
980 printk(MYIOC_s_WARN_FMT
981 "Skipping ioc=%p because SCSI Initiator mode "
982 "is NOT enabled!\n", ioc->name, ioc);
983 return 0;
984 }
985
986 sh = scsi_host_alloc(&mptsas_driver_template, sizeof(MPT_SCSI_HOST));
987 if (!sh) {
988 printk(MYIOC_s_WARN_FMT
989 "Unable to register controller with SCSI subsystem\n",
990 ioc->name);
991 return -1;
992 }
993
994 spin_lock_irqsave(&ioc->FreeQlock, flags);
995
996 /* Attach the SCSI Host to the IOC structure
997 */
998 ioc->sh = sh;
999
1000 sh->io_port = 0;
1001 sh->n_io_port = 0;
1002 sh->irq = 0;
1003
1004 /* set 16 byte cdb's */
1005 sh->max_cmd_len = 16;
1006
1007 sh->max_id = ioc->pfacts->MaxDevices + 1;
1008
1009 sh->transportt = mptsas_transport_template;
1010
1011 sh->max_lun = MPT_LAST_LUN + 1;
1012 sh->max_channel = 0;
1013 sh->this_id = ioc->pfacts[0].PortSCSIID;
1014
1015 /* Required entry.
1016 */
1017 sh->unique_id = ioc->id;
1018
1019 INIT_LIST_HEAD(&ioc->sas_topology);
1020
1021 /* Verify that we won't exceed the maximum
1022 * number of chain buffers
1023 * We can optimize: ZZ = req_sz/sizeof(SGE)
1024 * For 32bit SGE's:
1025 * numSGE = 1 + (ZZ-1)*(maxChain -1) + ZZ
1026 * + (req_sz - 64)/sizeof(SGE)
1027 * A slightly different algorithm is required for
1028 * 64bit SGEs.
1029 */
1030 scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32));
1031 if (sizeof(dma_addr_t) == sizeof(u64)) {
1032 numSGE = (scale - 1) *
1033 (ioc->facts.MaxChainDepth-1) + scale +
1034 (ioc->req_sz - 60) / (sizeof(dma_addr_t) +
1035 sizeof(u32));
1036 } else {
1037 numSGE = 1 + (scale - 1) *
1038 (ioc->facts.MaxChainDepth-1) + scale +
1039 (ioc->req_sz - 64) / (sizeof(dma_addr_t) +
1040 sizeof(u32));
1041 }
1042
1043 if (numSGE < sh->sg_tablesize) {
1044 /* Reset this value */
1045 dprintk((MYIOC_s_INFO_FMT
1046 "Resetting sg_tablesize to %d from %d\n",
1047 ioc->name, numSGE, sh->sg_tablesize));
1048 sh->sg_tablesize = numSGE;
1049 }
1050
1051 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
1052
1053 hd = (MPT_SCSI_HOST *) sh->hostdata;
1054 hd->ioc = ioc;
1055
1056 /* SCSI needs scsi_cmnd lookup table!
1057 * (with size equal to req_depth*PtrSz!)
1058 */
1059 sz = ioc->req_depth * sizeof(void *);
1060 mem = kmalloc(sz, GFP_ATOMIC);
1061 if (mem == NULL) {
1062 error = -ENOMEM;
1063 goto mptsas_probe_failed;
1064 }
1065
1066 memset(mem, 0, sz);
1067 hd->ScsiLookup = (struct scsi_cmnd **) mem;
1068
1069 dprintk((MYIOC_s_INFO_FMT "ScsiLookup @ %p, sz=%d\n",
1070 ioc->name, hd->ScsiLookup, sz));
1071
1072 /* Allocate memory for the device structures.
1073 * A non-Null pointer at an offset
1074 * indicates a device exists.
1075 * max_id = 1 + maximum id (hosts.h)
1076 */
1077 sz = sh->max_id * sizeof(void *);
1078 mem = kmalloc(sz, GFP_ATOMIC);
1079 if (mem == NULL) {
1080 error = -ENOMEM;
1081 goto mptsas_probe_failed;
1082 }
1083
1084 memset(mem, 0, sz);
1085 hd->Targets = (VirtDevice **) mem;
1086
1087 dprintk((KERN_INFO
1088 " Targets @ %p, sz=%d\n", hd->Targets, sz));
1089
1090 /* Clear the TM flags
1091 */
1092 hd->tmPending = 0;
1093 hd->tmState = TM_STATE_NONE;
1094 hd->resetPending = 0;
1095 hd->abortSCpnt = NULL;
1096
1097 /* Clear the pointer used to store
1098 * single-threaded commands, i.e., those
1099 * issued during a bus scan, dv and
1100 * configuration pages.
1101 */
1102 hd->cmdPtr = NULL;
1103
1104 /* Initialize this SCSI Hosts' timers
1105 * To use, set the timer expires field
1106 * and add_timer
1107 */
1108 init_timer(&hd->timer);
1109 hd->timer.data = (unsigned long) hd;
1110 hd->timer.function = mptscsih_timer_expired;
1111
1112 hd->mpt_pq_filter = mpt_pq_filter;
1113 ioc->sas_data.ptClear = mpt_pt_clear;
1114
1115 if (ioc->sas_data.ptClear==1) {
1116 mptbase_sas_persist_operation(
1117 ioc, MPI_SAS_OP_CLEAR_ALL_PERSISTENT);
1118 }
1119
1120 ddvprintk((MYIOC_s_INFO_FMT
1121 "mpt_pq_filter %x mpt_pq_filter %x\n",
1122 ioc->name,
1123 mpt_pq_filter,
1124 mpt_pq_filter));
1125
1126 init_waitqueue_head(&hd->scandv_waitq);
1127 hd->scandv_wait_done = 0;
1128 hd->last_queue_full = 0;
1129
1130 error = scsi_add_host(sh, &ioc->pcidev->dev);
1131 if (error) {
1132 dprintk((KERN_ERR MYNAM
1133 "scsi_add_host failed\n"));
1134 goto mptsas_probe_failed;
1135 }
1136
1137 mptsas_scan_sas_topology(ioc);
1138
1139 return 0;
1140
1141mptsas_probe_failed:
1142
1143 mptscsih_remove(pdev);
1144 return error;
1145}
1146
1147static void __devexit mptsas_remove(struct pci_dev *pdev)
1148{
1149 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
1150 struct mptsas_portinfo *p, *n;
1151
1152 sas_remove_host(ioc->sh);
1153
1154 list_for_each_entry_safe(p, n, &ioc->sas_topology, list) {
1155 list_del(&p->list);
1156 kfree(p);
1157 }
1158
1159 mptscsih_remove(pdev);
1160}
1161
1162static struct pci_device_id mptsas_pci_table[] = {
1163 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064,
1164 PCI_ANY_ID, PCI_ANY_ID },
1165 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1066,
1166 PCI_ANY_ID, PCI_ANY_ID },
1167 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1068,
1168 PCI_ANY_ID, PCI_ANY_ID },
1169 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064E,
1170 PCI_ANY_ID, PCI_ANY_ID },
1171 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1066E,
1172 PCI_ANY_ID, PCI_ANY_ID },
1173 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1068E,
1174 PCI_ANY_ID, PCI_ANY_ID },
1175 {0} /* Terminating entry */
1176};
1177MODULE_DEVICE_TABLE(pci, mptsas_pci_table);
1178
1179
1180static struct pci_driver mptsas_driver = {
1181 .name = "mptsas",
1182 .id_table = mptsas_pci_table,
1183 .probe = mptsas_probe,
1184 .remove = __devexit_p(mptsas_remove),
1185 .shutdown = mptscsih_shutdown,
1186#ifdef CONFIG_PM
1187 .suspend = mptscsih_suspend,
1188 .resume = mptscsih_resume,
1189#endif
1190};
1191
1192static int __init
1193mptsas_init(void)
1194{
1195 show_mptmod_ver(my_NAME, my_VERSION);
1196
1197 mptsas_transport_template =
1198 sas_attach_transport(&mptsas_transport_functions);
1199 if (!mptsas_transport_template)
1200 return -ENODEV;
1201
1202 mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER);
1203 mptsasTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSAS_DRIVER);
1204 mptsasInternalCtx =
1205 mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER);
1206
1207 if (mpt_event_register(mptsasDoneCtx, mptscsih_event_process) == 0) {
1208 devtprintk((KERN_INFO MYNAM
1209 ": Registered for IOC event notifications\n"));
1210 }
1211
1212 if (mpt_reset_register(mptsasDoneCtx, mptscsih_ioc_reset) == 0) {
1213 dprintk((KERN_INFO MYNAM
1214 ": Registered for IOC reset notifications\n"));
1215 }
1216
1217 return pci_register_driver(&mptsas_driver);
1218}
1219
1220static void __exit
1221mptsas_exit(void)
1222{
1223 pci_unregister_driver(&mptsas_driver);
1224 sas_release_transport(mptsas_transport_template);
1225
1226 mpt_reset_deregister(mptsasDoneCtx);
1227 mpt_event_deregister(mptsasDoneCtx);
1228
1229 mpt_deregister(mptsasInternalCtx);
1230 mpt_deregister(mptsasTaskCtx);
1231 mpt_deregister(mptsasDoneCtx);
1232}
1233
1234module_init(mptsas_init);
1235module_exit(mptsas_exit);
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 4a003dc5fde8..5cb07eb224d7 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -62,6 +62,7 @@
62#include <scsi/scsi_device.h> 62#include <scsi/scsi_device.h>
63#include <scsi/scsi_host.h> 63#include <scsi/scsi_host.h>
64#include <scsi/scsi_tcq.h> 64#include <scsi/scsi_tcq.h>
65#include <scsi/scsi_dbg.h>
65 66
66#include "mptbase.h" 67#include "mptbase.h"
67#include "mptscsih.h" 68#include "mptscsih.h"
@@ -93,8 +94,9 @@ typedef struct _BIG_SENSE_BUF {
93 94
94#define MPT_ICFLAG_BUF_CAP 0x01 /* ReadBuffer Read Capacity format */ 95#define MPT_ICFLAG_BUF_CAP 0x01 /* ReadBuffer Read Capacity format */
95#define MPT_ICFLAG_ECHO 0x02 /* ReadBuffer Echo buffer format */ 96#define MPT_ICFLAG_ECHO 0x02 /* ReadBuffer Echo buffer format */
96#define MPT_ICFLAG_PHYS_DISK 0x04 /* Any SCSI IO but do Phys Disk Format */ 97#define MPT_ICFLAG_EBOS 0x04 /* ReadBuffer Echo buffer has EBOS */
97#define MPT_ICFLAG_TAGGED_CMD 0x08 /* Do tagged IO */ 98#define MPT_ICFLAG_PHYS_DISK 0x08 /* Any SCSI IO but do Phys Disk Format */
99#define MPT_ICFLAG_TAGGED_CMD 0x10 /* Do tagged IO */
98#define MPT_ICFLAG_DID_RESET 0x20 /* Bus Reset occurred with this command */ 100#define MPT_ICFLAG_DID_RESET 0x20 /* Bus Reset occurred with this command */
99#define MPT_ICFLAG_RESERVED 0x40 /* Reserved has been issued */ 101#define MPT_ICFLAG_RESERVED 0x40 /* Reserved has been issued */
100 102
@@ -159,6 +161,8 @@ int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR
159static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd); 161static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd);
160static int mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum); 162static int mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum);
161 163
164static struct work_struct mptscsih_persistTask;
165
162#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION 166#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
163static int mptscsih_do_raid(MPT_SCSI_HOST *hd, u8 action, INTERNAL_CMD *io); 167static int mptscsih_do_raid(MPT_SCSI_HOST *hd, u8 action, INTERNAL_CMD *io);
164static void mptscsih_domainValidation(void *hd); 168static void mptscsih_domainValidation(void *hd);
@@ -167,6 +171,7 @@ static void mptscsih_qas_check(MPT_SCSI_HOST *hd, int id);
167static int mptscsih_doDv(MPT_SCSI_HOST *hd, int channel, int target); 171static int mptscsih_doDv(MPT_SCSI_HOST *hd, int channel, int target);
168static void mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage); 172static void mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage);
169static void mptscsih_fillbuf(char *buffer, int size, int index, int width); 173static void mptscsih_fillbuf(char *buffer, int size, int index, int width);
174static void mptscsih_set_dvflags_raid(MPT_SCSI_HOST *hd, int id);
170#endif 175#endif
171 176
172void mptscsih_remove(struct pci_dev *); 177void mptscsih_remove(struct pci_dev *);
@@ -606,11 +611,24 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
606 xfer_cnt = le32_to_cpu(pScsiReply->TransferCount); 611 xfer_cnt = le32_to_cpu(pScsiReply->TransferCount);
607 sc->resid = sc->request_bufflen - xfer_cnt; 612 sc->resid = sc->request_bufflen - xfer_cnt;
608 613
614 /*
615 * if we get a data underrun indication, yet no data was
616 * transferred and the SCSI status indicates that the
617 * command was never started, change the data underrun
618 * to success
619 */
620 if (status == MPI_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
621 (scsi_status == MPI_SCSI_STATUS_BUSY ||
622 scsi_status == MPI_SCSI_STATUS_RESERVATION_CONFLICT ||
623 scsi_status == MPI_SCSI_STATUS_TASK_SET_FULL)) {
624 status = MPI_IOCSTATUS_SUCCESS;
625 }
626
609 dreplyprintk((KERN_NOTICE "Reply ha=%d id=%d lun=%d:\n" 627 dreplyprintk((KERN_NOTICE "Reply ha=%d id=%d lun=%d:\n"
610 "IOCStatus=%04xh SCSIState=%02xh SCSIStatus=%02xh\n" 628 "IOCStatus=%04xh SCSIState=%02xh SCSIStatus=%02xh\n"
611 "resid=%d bufflen=%d xfer_cnt=%d\n", 629 "resid=%d bufflen=%d xfer_cnt=%d\n",
612 ioc->id, pScsiReq->TargetID, pScsiReq->LUN[1], 630 ioc->id, pScsiReq->TargetID, pScsiReq->LUN[1],
613 status, scsi_state, scsi_status, sc->resid, 631 status, scsi_state, scsi_status, sc->resid,
614 sc->request_bufflen, xfer_cnt)); 632 sc->request_bufflen, xfer_cnt));
615 633
616 if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) 634 if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID)
@@ -619,8 +637,11 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
619 /* 637 /*
620 * Look for + dump FCP ResponseInfo[]! 638 * Look for + dump FCP ResponseInfo[]!
621 */ 639 */
622 if (scsi_state & MPI_SCSI_STATE_RESPONSE_INFO_VALID) { 640 if (scsi_state & MPI_SCSI_STATE_RESPONSE_INFO_VALID &&
623 printk(KERN_NOTICE " FCP_ResponseInfo=%08xh\n", 641 pScsiReply->ResponseInfo) {
642 printk(KERN_NOTICE "ha=%d id=%d lun=%d: "
643 "FCP_ResponseInfo=%08xh\n",
644 ioc->id, pScsiReq->TargetID, pScsiReq->LUN[1],
624 le32_to_cpu(pScsiReply->ResponseInfo)); 645 le32_to_cpu(pScsiReply->ResponseInfo));
625 } 646 }
626 647
@@ -661,23 +682,13 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
661 break; 682 break;
662 683
663 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */ 684 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */
664 if ( xfer_cnt >= sc->underflow ) { 685 sc->resid = sc->request_bufflen - xfer_cnt;
665 /* Sufficient data transfer occurred */ 686 if((xfer_cnt==0)||(sc->underflow > xfer_cnt))
687 sc->result=DID_SOFT_ERROR << 16;
688 else /* Sufficient data transfer occurred */
666 sc->result = (DID_OK << 16) | scsi_status; 689 sc->result = (DID_OK << 16) | scsi_status;
667 } else if ( xfer_cnt == 0 ) { 690 dreplyprintk((KERN_NOTICE
668 /* A CRC Error causes this condition; retry */ 691 "RESIDUAL_MISMATCH: result=%x on id=%d\n", sc->result, sc->device->id));
669 sc->result = (DRIVER_SENSE << 24) | (DID_OK << 16) |
670 (CHECK_CONDITION << 1);
671 sc->sense_buffer[0] = 0x70;
672 sc->sense_buffer[2] = NO_SENSE;
673 sc->sense_buffer[12] = 0;
674 sc->sense_buffer[13] = 0;
675 } else {
676 sc->result = DID_SOFT_ERROR << 16;
677 }
678 dreplyprintk((KERN_NOTICE
679 "RESIDUAL_MISMATCH: result=%x on id=%d\n",
680 sc->result, sc->device->id));
681 break; 692 break;
682 693
683 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */ 694 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
@@ -692,7 +703,10 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
692 ; 703 ;
693 } else { 704 } else {
694 if (xfer_cnt < sc->underflow) { 705 if (xfer_cnt < sc->underflow) {
695 sc->result = DID_SOFT_ERROR << 16; 706 if (scsi_status == SAM_STAT_BUSY)
707 sc->result = SAM_STAT_BUSY;
708 else
709 sc->result = DID_SOFT_ERROR << 16;
696 } 710 }
697 if (scsi_state & (MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS)) { 711 if (scsi_state & (MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS)) {
698 /* What to do? 712 /* What to do?
@@ -717,8 +731,10 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
717 731
718 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */ 732 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
719 case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */ 733 case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
720 scsi_status = pScsiReply->SCSIStatus; 734 if (scsi_status == MPI_SCSI_STATUS_BUSY)
721 sc->result = (DID_OK << 16) | scsi_status; 735 sc->result = (DID_BUS_BUSY << 16) | scsi_status;
736 else
737 sc->result = (DID_OK << 16) | scsi_status;
722 if (scsi_state == 0) { 738 if (scsi_state == 0) {
723 ; 739 ;
724 } else if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) { 740 } else if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) {
@@ -890,12 +906,13 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, uint target, uint lun)
890 SCSIIORequest_t *mf = NULL; 906 SCSIIORequest_t *mf = NULL;
891 int ii; 907 int ii;
892 int max = hd->ioc->req_depth; 908 int max = hd->ioc->req_depth;
909 struct scsi_cmnd *sc;
893 910
894 dsprintk((KERN_INFO MYNAM ": search_running target %d lun %d max %d\n", 911 dsprintk((KERN_INFO MYNAM ": search_running target %d lun %d max %d\n",
895 target, lun, max)); 912 target, lun, max));
896 913
897 for (ii=0; ii < max; ii++) { 914 for (ii=0; ii < max; ii++) {
898 if (hd->ScsiLookup[ii] != NULL) { 915 if ((sc = hd->ScsiLookup[ii]) != NULL) {
899 916
900 mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(hd->ioc, ii); 917 mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(hd->ioc, ii);
901 918
@@ -910,9 +927,22 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, uint target, uint lun)
910 hd->ScsiLookup[ii] = NULL; 927 hd->ScsiLookup[ii] = NULL;
911 mptscsih_freeChainBuffers(hd->ioc, ii); 928 mptscsih_freeChainBuffers(hd->ioc, ii);
912 mpt_free_msg_frame(hd->ioc, (MPT_FRAME_HDR *)mf); 929 mpt_free_msg_frame(hd->ioc, (MPT_FRAME_HDR *)mf);
930 if (sc->use_sg) {
931 pci_unmap_sg(hd->ioc->pcidev,
932 (struct scatterlist *) sc->request_buffer,
933 sc->use_sg,
934 sc->sc_data_direction);
935 } else if (sc->request_bufflen) {
936 pci_unmap_single(hd->ioc->pcidev,
937 sc->SCp.dma_handle,
938 sc->request_bufflen,
939 sc->sc_data_direction);
940 }
941 sc->host_scribble = NULL;
942 sc->result = DID_NO_CONNECT << 16;
943 sc->scsi_done(sc);
913 } 944 }
914 } 945 }
915
916 return; 946 return;
917} 947}
918 948
@@ -967,8 +997,10 @@ mptscsih_remove(struct pci_dev *pdev)
967 unsigned long flags; 997 unsigned long flags;
968 int sz1; 998 int sz1;
969 999
970 if(!host) 1000 if(!host) {
1001 mpt_detach(pdev);
971 return; 1002 return;
1003 }
972 1004
973 scsi_remove_host(host); 1005 scsi_remove_host(host);
974 1006
@@ -1256,8 +1288,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1256 MPT_SCSI_HOST *hd; 1288 MPT_SCSI_HOST *hd;
1257 MPT_FRAME_HDR *mf; 1289 MPT_FRAME_HDR *mf;
1258 SCSIIORequest_t *pScsiReq; 1290 SCSIIORequest_t *pScsiReq;
1259 VirtDevice *pTarget; 1291 VirtDevice *pTarget = SCpnt->device->hostdata;
1260 int target;
1261 int lun; 1292 int lun;
1262 u32 datalen; 1293 u32 datalen;
1263 u32 scsictl; 1294 u32 scsictl;
@@ -1267,12 +1298,9 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1267 int ii; 1298 int ii;
1268 1299
1269 hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata; 1300 hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata;
1270 target = SCpnt->device->id;
1271 lun = SCpnt->device->lun; 1301 lun = SCpnt->device->lun;
1272 SCpnt->scsi_done = done; 1302 SCpnt->scsi_done = done;
1273 1303
1274 pTarget = hd->Targets[target];
1275
1276 dmfprintk((MYIOC_s_INFO_FMT "qcmd: SCpnt=%p, done()=%p\n", 1304 dmfprintk((MYIOC_s_INFO_FMT "qcmd: SCpnt=%p, done()=%p\n",
1277 (hd && hd->ioc) ? hd->ioc->name : "ioc?", SCpnt, done)); 1305 (hd && hd->ioc) ? hd->ioc->name : "ioc?", SCpnt, done));
1278 1306
@@ -1315,7 +1343,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1315 /* Default to untagged. Once a target structure has been allocated, 1343 /* Default to untagged. Once a target structure has been allocated,
1316 * use the Inquiry data to determine if device supports tagged. 1344 * use the Inquiry data to determine if device supports tagged.
1317 */ 1345 */
1318 if ( pTarget 1346 if (pTarget
1319 && (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES) 1347 && (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES)
1320 && (SCpnt->device->tagged_supported)) { 1348 && (SCpnt->device->tagged_supported)) {
1321 scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ; 1349 scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ;
@@ -1325,8 +1353,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1325 1353
1326 /* Use the above information to set up the message frame 1354 /* Use the above information to set up the message frame
1327 */ 1355 */
1328 pScsiReq->TargetID = (u8) target; 1356 pScsiReq->TargetID = (u8) pTarget->target_id;
1329 pScsiReq->Bus = (u8) SCpnt->device->channel; 1357 pScsiReq->Bus = pTarget->bus_id;
1330 pScsiReq->ChainOffset = 0; 1358 pScsiReq->ChainOffset = 0;
1331 pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST; 1359 pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
1332 pScsiReq->CDBLength = SCpnt->cmd_len; 1360 pScsiReq->CDBLength = SCpnt->cmd_len;
@@ -1378,7 +1406,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1378 1406
1379#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION 1407#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
1380 if (hd->ioc->bus_type == SCSI) { 1408 if (hd->ioc->bus_type == SCSI) {
1381 int dvStatus = hd->ioc->spi_data.dvStatus[target]; 1409 int dvStatus = hd->ioc->spi_data.dvStatus[pTarget->target_id];
1382 int issueCmd = 1; 1410 int issueCmd = 1;
1383 1411
1384 if (dvStatus || hd->ioc->spi_data.forceDv) { 1412 if (dvStatus || hd->ioc->spi_data.forceDv) {
@@ -1426,6 +1454,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1426 return 0; 1454 return 0;
1427 1455
1428 fail: 1456 fail:
1457 hd->ScsiLookup[my_idx] = NULL;
1429 mptscsih_freeChainBuffers(hd->ioc, my_idx); 1458 mptscsih_freeChainBuffers(hd->ioc, my_idx);
1430 mpt_free_msg_frame(hd->ioc, mf); 1459 mpt_free_msg_frame(hd->ioc, mf);
1431 return SCSI_MLQUEUE_HOST_BUSY; 1460 return SCSI_MLQUEUE_HOST_BUSY;
@@ -1713,24 +1742,23 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1713 MPT_FRAME_HDR *mf; 1742 MPT_FRAME_HDR *mf;
1714 u32 ctx2abort; 1743 u32 ctx2abort;
1715 int scpnt_idx; 1744 int scpnt_idx;
1745 int retval;
1716 1746
1717 /* If we can't locate our host adapter structure, return FAILED status. 1747 /* If we can't locate our host adapter structure, return FAILED status.
1718 */ 1748 */
1719 if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL) { 1749 if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL) {
1720 SCpnt->result = DID_RESET << 16; 1750 SCpnt->result = DID_RESET << 16;
1721 SCpnt->scsi_done(SCpnt); 1751 SCpnt->scsi_done(SCpnt);
1722 dfailprintk((KERN_WARNING MYNAM ": mptscsih_abort: " 1752 dfailprintk((KERN_INFO MYNAM ": mptscsih_abort: "
1723 "Can't locate host! (sc=%p)\n", 1753 "Can't locate host! (sc=%p)\n",
1724 SCpnt)); 1754 SCpnt));
1725 return FAILED; 1755 return FAILED;
1726 } 1756 }
1727 1757
1728 ioc = hd->ioc; 1758 ioc = hd->ioc;
1729 if (hd->resetPending) 1759 if (hd->resetPending) {
1730 return FAILED; 1760 return FAILED;
1731 1761 }
1732 printk(KERN_WARNING MYNAM ": %s: >> Attempting task abort! (sc=%p)\n",
1733 hd->ioc->name, SCpnt);
1734 1762
1735 if (hd->timeouts < -1) 1763 if (hd->timeouts < -1)
1736 hd->timeouts++; 1764 hd->timeouts++;
@@ -1738,16 +1766,20 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1738 /* Find this command 1766 /* Find this command
1739 */ 1767 */
1740 if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) { 1768 if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) {
1741 /* Cmd not found in ScsiLookup. 1769 /* Cmd not found in ScsiLookup.
1742 * Do OS callback. 1770 * Do OS callback.
1743 */ 1771 */
1744 SCpnt->result = DID_RESET << 16; 1772 SCpnt->result = DID_RESET << 16;
1745 dtmprintk((KERN_WARNING MYNAM ": %s: mptscsih_abort: " 1773 dtmprintk((KERN_INFO MYNAM ": %s: mptscsih_abort: "
1746 "Command not in the active list! (sc=%p)\n", 1774 "Command not in the active list! (sc=%p)\n",
1747 hd->ioc->name, SCpnt)); 1775 hd->ioc->name, SCpnt));
1748 return SUCCESS; 1776 return SUCCESS;
1749 } 1777 }
1750 1778
1779 printk(KERN_WARNING MYNAM ": %s: attempting task abort! (sc=%p)\n",
1780 hd->ioc->name, SCpnt);
1781 scsi_print_command(SCpnt);
1782
1751 /* Most important! Set TaskMsgContext to SCpnt's MsgContext! 1783 /* Most important! Set TaskMsgContext to SCpnt's MsgContext!
1752 * (the IO to be ABORT'd) 1784 * (the IO to be ABORT'd)
1753 * 1785 *
@@ -1760,38 +1792,22 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1760 1792
1761 hd->abortSCpnt = SCpnt; 1793 hd->abortSCpnt = SCpnt;
1762 1794
1763 if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 1795 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
1764 SCpnt->device->channel, SCpnt->device->id, SCpnt->device->lun, 1796 SCpnt->device->channel, SCpnt->device->id, SCpnt->device->lun,
1765 ctx2abort, 2 /* 2 second timeout */) 1797 ctx2abort, 2 /* 2 second timeout */);
1766 < 0) {
1767 1798
1768 /* The TM request failed and the subsequent FW-reload failed! 1799 printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n",
1769 * Fatal error case. 1800 hd->ioc->name,
1770 */ 1801 ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
1771 printk(MYIOC_s_WARN_FMT "Error issuing abort task! (sc=%p)\n",
1772 hd->ioc->name, SCpnt);
1773 1802
1774 /* We must clear our pending flag before clearing our state. 1803 if (retval == 0)
1775 */ 1804 return SUCCESS;
1805
1806 if(retval != FAILED ) {
1776 hd->tmPending = 0; 1807 hd->tmPending = 0;
1777 hd->tmState = TM_STATE_NONE; 1808 hd->tmState = TM_STATE_NONE;
1778
1779 /* Unmap the DMA buffers, if any. */
1780 if (SCpnt->use_sg) {
1781 pci_unmap_sg(ioc->pcidev, (struct scatterlist *) SCpnt->request_buffer,
1782 SCpnt->use_sg, SCpnt->sc_data_direction);
1783 } else if (SCpnt->request_bufflen) {
1784 pci_unmap_single(ioc->pcidev, SCpnt->SCp.dma_handle,
1785 SCpnt->request_bufflen, SCpnt->sc_data_direction);
1786 }
1787 hd->ScsiLookup[scpnt_idx] = NULL;
1788 SCpnt->result = DID_RESET << 16;
1789 SCpnt->scsi_done(SCpnt); /* Issue the command callback */
1790 mptscsih_freeChainBuffers(ioc, scpnt_idx);
1791 mpt_free_msg_frame(ioc, mf);
1792 return FAILED;
1793 } 1809 }
1794 return SUCCESS; 1810 return FAILED;
1795} 1811}
1796 1812
1797/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1813/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1807,11 +1823,12 @@ int
1807mptscsih_dev_reset(struct scsi_cmnd * SCpnt) 1823mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
1808{ 1824{
1809 MPT_SCSI_HOST *hd; 1825 MPT_SCSI_HOST *hd;
1826 int retval;
1810 1827
1811 /* If we can't locate our host adapter structure, return FAILED status. 1828 /* If we can't locate our host adapter structure, return FAILED status.
1812 */ 1829 */
1813 if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){ 1830 if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){
1814 dtmprintk((KERN_WARNING MYNAM ": mptscsih_dev_reset: " 1831 dtmprintk((KERN_INFO MYNAM ": mptscsih_dev_reset: "
1815 "Can't locate host! (sc=%p)\n", 1832 "Can't locate host! (sc=%p)\n",
1816 SCpnt)); 1833 SCpnt));
1817 return FAILED; 1834 return FAILED;
@@ -1820,24 +1837,26 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
1820 if (hd->resetPending) 1837 if (hd->resetPending)
1821 return FAILED; 1838 return FAILED;
1822 1839
1823 printk(KERN_WARNING MYNAM ": %s: >> Attempting target reset! (sc=%p)\n", 1840 printk(KERN_WARNING MYNAM ": %s: attempting target reset! (sc=%p)\n",
1824 hd->ioc->name, SCpnt); 1841 hd->ioc->name, SCpnt);
1842 scsi_print_command(SCpnt);
1825 1843
1826 if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 1844 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
1827 SCpnt->device->channel, SCpnt->device->id, 1845 SCpnt->device->channel, SCpnt->device->id,
1828 0, 0, 5 /* 5 second timeout */) 1846 0, 0, 5 /* 5 second timeout */);
1829 < 0){ 1847
1830 /* The TM request failed and the subsequent FW-reload failed! 1848 printk (KERN_WARNING MYNAM ": %s: target reset: %s (sc=%p)\n",
1831 * Fatal error case. 1849 hd->ioc->name,
1832 */ 1850 ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
1833 printk(MYIOC_s_WARN_FMT "Error processing TaskMgmt request (sc=%p)\n", 1851
1834 hd->ioc->name, SCpnt); 1852 if (retval == 0)
1853 return SUCCESS;
1854
1855 if(retval != FAILED ) {
1835 hd->tmPending = 0; 1856 hd->tmPending = 0;
1836 hd->tmState = TM_STATE_NONE; 1857 hd->tmState = TM_STATE_NONE;
1837 return FAILED;
1838 } 1858 }
1839 1859 return FAILED;
1840 return SUCCESS;
1841} 1860}
1842 1861
1843/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1862/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1853,41 +1872,39 @@ int
1853mptscsih_bus_reset(struct scsi_cmnd * SCpnt) 1872mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
1854{ 1873{
1855 MPT_SCSI_HOST *hd; 1874 MPT_SCSI_HOST *hd;
1856 spinlock_t *host_lock = SCpnt->device->host->host_lock; 1875 int retval;
1857 1876
1858 /* If we can't locate our host adapter structure, return FAILED status. 1877 /* If we can't locate our host adapter structure, return FAILED status.
1859 */ 1878 */
1860 if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){ 1879 if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){
1861 dtmprintk((KERN_WARNING MYNAM ": mptscsih_bus_reset: " 1880 dtmprintk((KERN_INFO MYNAM ": mptscsih_bus_reset: "
1862 "Can't locate host! (sc=%p)\n", 1881 "Can't locate host! (sc=%p)\n",
1863 SCpnt ) ); 1882 SCpnt ) );
1864 return FAILED; 1883 return FAILED;
1865 } 1884 }
1866 1885
1867 printk(KERN_WARNING MYNAM ": %s: >> Attempting bus reset! (sc=%p)\n", 1886 printk(KERN_WARNING MYNAM ": %s: attempting bus reset! (sc=%p)\n",
1868 hd->ioc->name, SCpnt); 1887 hd->ioc->name, SCpnt);
1888 scsi_print_command(SCpnt);
1869 1889
1870 if (hd->timeouts < -1) 1890 if (hd->timeouts < -1)
1871 hd->timeouts++; 1891 hd->timeouts++;
1872 1892
1873 /* We are now ready to execute the task management request. */ 1893 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
1874 if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 1894 SCpnt->device->channel, 0, 0, 0, 5 /* 5 second timeout */);
1875 SCpnt->device->channel, 0, 0, 0, 5 /* 5 second timeout */)
1876 < 0){
1877 1895
1878 /* The TM request failed and the subsequent FW-reload failed! 1896 printk (KERN_WARNING MYNAM ": %s: bus reset: %s (sc=%p)\n",
1879 * Fatal error case. 1897 hd->ioc->name,
1880 */ 1898 ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
1881 printk(MYIOC_s_WARN_FMT 1899
1882 "Error processing TaskMgmt request (sc=%p)\n", 1900 if (retval == 0)
1883 hd->ioc->name, SCpnt); 1901 return SUCCESS;
1902
1903 if(retval != FAILED ) {
1884 hd->tmPending = 0; 1904 hd->tmPending = 0;
1885 hd->tmState = TM_STATE_NONE; 1905 hd->tmState = TM_STATE_NONE;
1886 spin_lock_irq(host_lock);
1887 return FAILED;
1888 } 1906 }
1889 1907 return FAILED;
1890 return SUCCESS;
1891} 1908}
1892 1909
1893/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1910/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -2169,7 +2186,7 @@ mptscsih_slave_alloc(struct scsi_device *device)
2169 vdev->raidVolume = 0; 2186 vdev->raidVolume = 0;
2170 hd->Targets[device->id] = vdev; 2187 hd->Targets[device->id] = vdev;
2171 if (hd->ioc->bus_type == SCSI) { 2188 if (hd->ioc->bus_type == SCSI) {
2172 if (hd->ioc->spi_data.isRaid & (1 << device->id)) { 2189 if (hd->ioc->raid_data.isRaid & (1 << device->id)) {
2173 vdev->raidVolume = 1; 2190 vdev->raidVolume = 1;
2174 ddvtprintk((KERN_INFO 2191 ddvtprintk((KERN_INFO
2175 "RAID Volume @ id %d\n", device->id)); 2192 "RAID Volume @ id %d\n", device->id));
@@ -2180,22 +2197,7 @@ mptscsih_slave_alloc(struct scsi_device *device)
2180 2197
2181 out: 2198 out:
2182 vdev->num_luns++; 2199 vdev->num_luns++;
2183 return 0; 2200 device->hostdata = vdev;
2184}
2185
2186static int
2187mptscsih_is_raid_volume(MPT_SCSI_HOST *hd, uint id)
2188{
2189 int i;
2190
2191 if (!hd->ioc->spi_data.isRaid || !hd->ioc->spi_data.pIocPg3)
2192 return 0;
2193
2194 for (i = 0; i < hd->ioc->spi_data.pIocPg3->NumPhysDisks; i++) {
2195 if (id == hd->ioc->spi_data.pIocPg3->PhysDisk[i].PhysDiskID)
2196 return 1;
2197 }
2198
2199 return 0; 2201 return 0;
2200} 2202}
2201 2203
@@ -2226,7 +2228,7 @@ mptscsih_slave_destroy(struct scsi_device *device)
2226 hd->Targets[target] = NULL; 2228 hd->Targets[target] = NULL;
2227 2229
2228 if (hd->ioc->bus_type == SCSI) { 2230 if (hd->ioc->bus_type == SCSI) {
2229 if (mptscsih_is_raid_volume(hd, target)) { 2231 if (mptscsih_is_phys_disk(hd->ioc, target)) {
2230 hd->ioc->spi_data.forceDv |= MPT_SCSICFG_RELOAD_IOC_PG3; 2232 hd->ioc->spi_data.forceDv |= MPT_SCSICFG_RELOAD_IOC_PG3;
2231 } else { 2233 } else {
2232 hd->ioc->spi_data.dvStatus[target] = 2234 hd->ioc->spi_data.dvStatus[target] =
@@ -2439,6 +2441,7 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
2439{ 2441{
2440 MPT_SCSI_HOST *hd; 2442 MPT_SCSI_HOST *hd;
2441 unsigned long flags; 2443 unsigned long flags;
2444 int ii;
2442 2445
2443 dtmprintk((KERN_WARNING MYNAM 2446 dtmprintk((KERN_WARNING MYNAM
2444 ": IOC %s_reset routed to SCSI host driver!\n", 2447 ": IOC %s_reset routed to SCSI host driver!\n",
@@ -2496,11 +2499,8 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
2496 2499
2497 /* ScsiLookup initialization 2500 /* ScsiLookup initialization
2498 */ 2501 */
2499 { 2502 for (ii=0; ii < hd->ioc->req_depth; ii++)
2500 int ii; 2503 hd->ScsiLookup[ii] = NULL;
2501 for (ii=0; ii < hd->ioc->req_depth; ii++)
2502 hd->ScsiLookup[ii] = NULL;
2503 }
2504 2504
2505 /* 2. Chain Buffer initialization 2505 /* 2. Chain Buffer initialization
2506 */ 2506 */
@@ -2549,6 +2549,16 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
2549} 2549}
2550 2550
2551/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2551/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2552/* work queue thread to clear the persitency table */
2553static void
2554mptscsih_sas_persist_clear_table(void * arg)
2555{
2556 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
2557
2558 mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT);
2559}
2560
2561/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2552int 2562int
2553mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) 2563mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2554{ 2564{
@@ -2558,18 +2568,18 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2558 devtprintk((MYIOC_s_INFO_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n", 2568 devtprintk((MYIOC_s_INFO_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n",
2559 ioc->name, event)); 2569 ioc->name, event));
2560 2570
2571 if (ioc->sh == NULL ||
2572 ((hd = (MPT_SCSI_HOST *)ioc->sh->hostdata) == NULL))
2573 return 1;
2574
2561 switch (event) { 2575 switch (event) {
2562 case MPI_EVENT_UNIT_ATTENTION: /* 03 */ 2576 case MPI_EVENT_UNIT_ATTENTION: /* 03 */
2563 /* FIXME! */ 2577 /* FIXME! */
2564 break; 2578 break;
2565 case MPI_EVENT_IOC_BUS_RESET: /* 04 */ 2579 case MPI_EVENT_IOC_BUS_RESET: /* 04 */
2566 case MPI_EVENT_EXT_BUS_RESET: /* 05 */ 2580 case MPI_EVENT_EXT_BUS_RESET: /* 05 */
2567 hd = NULL; 2581 if (hd && (ioc->bus_type == SCSI) && (hd->soft_resets < -1))
2568 if (ioc->sh) { 2582 hd->soft_resets++;
2569 hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
2570 if (hd && (ioc->bus_type == SCSI) && (hd->soft_resets < -1))
2571 hd->soft_resets++;
2572 }
2573 break; 2583 break;
2574 case MPI_EVENT_LOGOUT: /* 09 */ 2584 case MPI_EVENT_LOGOUT: /* 09 */
2575 /* FIXME! */ 2585 /* FIXME! */
@@ -2588,69 +2598,24 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2588 break; 2598 break;
2589 2599
2590 case MPI_EVENT_INTEGRATED_RAID: /* 0B */ 2600 case MPI_EVENT_INTEGRATED_RAID: /* 0B */
2601 {
2602 pMpiEventDataRaid_t pRaidEventData =
2603 (pMpiEventDataRaid_t) pEvReply->Data;
2591#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION 2604#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
2592 /* negoNvram set to 0 if DV enabled and to USE_NVRAM if 2605 /* Domain Validation Needed */
2593 * if DV disabled. Need to check for target mode. 2606 if (ioc->bus_type == SCSI &&
2594 */ 2607 pRaidEventData->ReasonCode ==
2595 hd = NULL; 2608 MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED)
2596 if (ioc->sh) 2609 mptscsih_set_dvflags_raid(hd, pRaidEventData->PhysDiskNum);
2597 hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
2598
2599 if (hd && (ioc->bus_type == SCSI) && (hd->negoNvram == 0)) {
2600 ScsiCfgData *pSpi;
2601 Ioc3PhysDisk_t *pPDisk;
2602 int numPDisk;
2603 u8 reason;
2604 u8 physDiskNum;
2605
2606 reason = (le32_to_cpu(pEvReply->Data[0]) & 0x00FF0000) >> 16;
2607 if (reason == MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED) {
2608 /* New or replaced disk.
2609 * Set DV flag and schedule DV.
2610 */
2611 pSpi = &ioc->spi_data;
2612 physDiskNum = (le32_to_cpu(pEvReply->Data[0]) & 0xFF000000) >> 24;
2613 ddvtprintk(("DV requested for phys disk id %d\n", physDiskNum));
2614 if (pSpi->pIocPg3) {
2615 pPDisk = pSpi->pIocPg3->PhysDisk;
2616 numPDisk =pSpi->pIocPg3->NumPhysDisks;
2617
2618 while (numPDisk) {
2619 if (physDiskNum == pPDisk->PhysDiskNum) {
2620 pSpi->dvStatus[pPDisk->PhysDiskID] = (MPT_SCSICFG_NEED_DV | MPT_SCSICFG_DV_NOT_DONE);
2621 pSpi->forceDv = MPT_SCSICFG_NEED_DV;
2622 ddvtprintk(("NEED_DV set for phys disk id %d\n", pPDisk->PhysDiskID));
2623 break;
2624 }
2625 pPDisk++;
2626 numPDisk--;
2627 }
2628
2629 if (numPDisk == 0) {
2630 /* The physical disk that needs DV was not found
2631 * in the stored IOC Page 3. The driver must reload
2632 * this page. DV routine will set the NEED_DV flag for
2633 * all phys disks that have DV_NOT_DONE set.
2634 */
2635 pSpi->forceDv = MPT_SCSICFG_NEED_DV | MPT_SCSICFG_RELOAD_IOC_PG3;
2636 ddvtprintk(("phys disk %d not found. Setting reload IOC Pg3 Flag\n", physDiskNum));
2637 }
2638 }
2639 }
2640 }
2641#endif 2610#endif
2611 break;
2612 }
2642 2613
2643#if defined(MPT_DEBUG_DV) || defined(MPT_DEBUG_DV_TINY) 2614 /* Persistent table is full. */
2644 printk("Raid Event RF: "); 2615 case MPI_EVENT_PERSISTENT_TABLE_FULL:
2645 { 2616 INIT_WORK(&mptscsih_persistTask,
2646 u32 *m = (u32 *)pEvReply; 2617 mptscsih_sas_persist_clear_table,(void *)ioc);
2647 int ii; 2618 schedule_work(&mptscsih_persistTask);
2648 int n = (int)pEvReply->MsgLength;
2649 for (ii=6; ii < n; ii++)
2650 printk(" %08x", le32_to_cpu(m[ii]));
2651 printk("\n");
2652 }
2653#endif
2654 break; 2619 break;
2655 2620
2656 case MPI_EVENT_NONE: /* 00 */ 2621 case MPI_EVENT_NONE: /* 00 */
@@ -2687,7 +2652,7 @@ mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *
2687{ 2652{
2688 int indexed_lun, lun_index; 2653 int indexed_lun, lun_index;
2689 VirtDevice *vdev; 2654 VirtDevice *vdev;
2690 ScsiCfgData *pSpi; 2655 SpiCfgData *pSpi;
2691 char data_56; 2656 char data_56;
2692 2657
2693 dinitprintk((MYIOC_s_INFO_FMT "initTarget bus=%d id=%d lun=%d hd=%p\n", 2658 dinitprintk((MYIOC_s_INFO_FMT "initTarget bus=%d id=%d lun=%d hd=%p\n",
@@ -2794,7 +2759,7 @@ mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *
2794static void 2759static void
2795mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target, char byte56) 2760mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target, char byte56)
2796{ 2761{
2797 ScsiCfgData *pspi_data = &hd->ioc->spi_data; 2762 SpiCfgData *pspi_data = &hd->ioc->spi_data;
2798 int id = (int) target->target_id; 2763 int id = (int) target->target_id;
2799 int nvram; 2764 int nvram;
2800 VirtDevice *vdev; 2765 VirtDevice *vdev;
@@ -2973,11 +2938,13 @@ mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target, char byte56)
2973static void 2938static void
2974mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq) 2939mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq)
2975{ 2940{
2941 MPT_ADAPTER *ioc = hd->ioc;
2976 u8 cmd; 2942 u8 cmd;
2977 ScsiCfgData *pSpi; 2943 SpiCfgData *pSpi;
2978 2944
2979 ddvtprintk((" set_dvflags: id=%d lun=%d negoNvram=%x cmd=%x\n", 2945 ddvtprintk((MYIOC_s_NOTE_FMT
2980 pReq->TargetID, pReq->LUN[1], hd->negoNvram, pReq->CDB[0])); 2946 " set_dvflags: id=%d lun=%d negoNvram=%x cmd=%x\n",
2947 hd->ioc->name, pReq->TargetID, pReq->LUN[1], hd->negoNvram, pReq->CDB[0]));
2981 2948
2982 if ((pReq->LUN[1] != 0) || (hd->negoNvram != 0)) 2949 if ((pReq->LUN[1] != 0) || (hd->negoNvram != 0))
2983 return; 2950 return;
@@ -2985,12 +2952,12 @@ mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq)
2985 cmd = pReq->CDB[0]; 2952 cmd = pReq->CDB[0];
2986 2953
2987 if ((cmd == READ_CAPACITY) || (cmd == MODE_SENSE)) { 2954 if ((cmd == READ_CAPACITY) || (cmd == MODE_SENSE)) {
2988 pSpi = &hd->ioc->spi_data; 2955 pSpi = &ioc->spi_data;
2989 if ((pSpi->isRaid & (1 << pReq->TargetID)) && pSpi->pIocPg3) { 2956 if ((ioc->raid_data.isRaid & (1 << pReq->TargetID)) && ioc->raid_data.pIocPg3) {
2990 /* Set NEED_DV for all hidden disks 2957 /* Set NEED_DV for all hidden disks
2991 */ 2958 */
2992 Ioc3PhysDisk_t *pPDisk = pSpi->pIocPg3->PhysDisk; 2959 Ioc3PhysDisk_t *pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
2993 int numPDisk = pSpi->pIocPg3->NumPhysDisks; 2960 int numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
2994 2961
2995 while (numPDisk) { 2962 while (numPDisk) {
2996 pSpi->dvStatus[pPDisk->PhysDiskID] |= MPT_SCSICFG_NEED_DV; 2963 pSpi->dvStatus[pPDisk->PhysDiskID] |= MPT_SCSICFG_NEED_DV;
@@ -3004,6 +2971,50 @@ mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq)
3004 } 2971 }
3005} 2972}
3006 2973
2974/* mptscsih_raid_set_dv_flags()
2975 *
2976 * New or replaced disk. Set DV flag and schedule DV.
2977 */
2978static void
2979mptscsih_set_dvflags_raid(MPT_SCSI_HOST *hd, int id)
2980{
2981 MPT_ADAPTER *ioc = hd->ioc;
2982 SpiCfgData *pSpi = &ioc->spi_data;
2983 Ioc3PhysDisk_t *pPDisk;
2984 int numPDisk;
2985
2986 if (hd->negoNvram != 0)
2987 return;
2988
2989 ddvtprintk(("DV requested for phys disk id %d\n", id));
2990 if (ioc->raid_data.pIocPg3) {
2991 pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
2992 numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
2993 while (numPDisk) {
2994 if (id == pPDisk->PhysDiskNum) {
2995 pSpi->dvStatus[pPDisk->PhysDiskID] =
2996 (MPT_SCSICFG_NEED_DV | MPT_SCSICFG_DV_NOT_DONE);
2997 pSpi->forceDv = MPT_SCSICFG_NEED_DV;
2998 ddvtprintk(("NEED_DV set for phys disk id %d\n",
2999 pPDisk->PhysDiskID));
3000 break;
3001 }
3002 pPDisk++;
3003 numPDisk--;
3004 }
3005
3006 if (numPDisk == 0) {
3007 /* The physical disk that needs DV was not found
3008 * in the stored IOC Page 3. The driver must reload
3009 * this page. DV routine will set the NEED_DV flag for
3010 * all phys disks that have DV_NOT_DONE set.
3011 */
3012 pSpi->forceDv = MPT_SCSICFG_NEED_DV | MPT_SCSICFG_RELOAD_IOC_PG3;
3013 ddvtprintk(("phys disk %d not found. Setting reload IOC Pg3 Flag\n",id));
3014 }
3015 }
3016}
3017
3007/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 3018/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3008/* 3019/*
3009 * If no Target, bus reset on 1st I/O. Set the flag to 3020 * If no Target, bus reset on 1st I/O. Set the flag to
@@ -3091,7 +3102,7 @@ mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target_id, int flags)
3091 MPT_ADAPTER *ioc = hd->ioc; 3102 MPT_ADAPTER *ioc = hd->ioc;
3092 Config_t *pReq; 3103 Config_t *pReq;
3093 SCSIDevicePage1_t *pData; 3104 SCSIDevicePage1_t *pData;
3094 VirtDevice *pTarget; 3105 VirtDevice *pTarget=NULL;
3095 MPT_FRAME_HDR *mf; 3106 MPT_FRAME_HDR *mf;
3096 dma_addr_t dataDma; 3107 dma_addr_t dataDma;
3097 u16 req_idx; 3108 u16 req_idx;
@@ -3190,7 +3201,7 @@ mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target_id, int flags)
3190#endif 3201#endif
3191 3202
3192 if (flags & MPT_SCSICFG_BLK_NEGO) 3203 if (flags & MPT_SCSICFG_BLK_NEGO)
3193 negoFlags = MPT_TARGET_NO_NEGO_WIDE | MPT_TARGET_NO_NEGO_SYNC; 3204 negoFlags |= MPT_TARGET_NO_NEGO_WIDE | MPT_TARGET_NO_NEGO_SYNC;
3194 3205
3195 mptscsih_setDevicePage1Flags(width, factor, offset, 3206 mptscsih_setDevicePage1Flags(width, factor, offset,
3196 &requested, &configuration, negoFlags); 3207 &requested, &configuration, negoFlags);
@@ -4011,7 +4022,7 @@ mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum)
4011 4022
4012 /* If target Ptr NULL or if this target is NOT a disk, skip. 4023 /* If target Ptr NULL or if this target is NOT a disk, skip.
4013 */ 4024 */
4014 if ((pTarget) && (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES)){ 4025 if ((pTarget) && (pTarget->inq_data[0] == TYPE_DISK)){
4015 for (lun=0; lun <= MPT_LAST_LUN; lun++) { 4026 for (lun=0; lun <= MPT_LAST_LUN; lun++) {
4016 /* If LUN present, issue the command 4027 /* If LUN present, issue the command
4017 */ 4028 */
@@ -4106,9 +4117,9 @@ mptscsih_domainValidation(void *arg)
4106 4117
4107 if ((ioc->spi_data.forceDv & MPT_SCSICFG_RELOAD_IOC_PG3) != 0) { 4118 if ((ioc->spi_data.forceDv & MPT_SCSICFG_RELOAD_IOC_PG3) != 0) {
4108 mpt_read_ioc_pg_3(ioc); 4119 mpt_read_ioc_pg_3(ioc);
4109 if (ioc->spi_data.pIocPg3) { 4120 if (ioc->raid_data.pIocPg3) {
4110 Ioc3PhysDisk_t *pPDisk = ioc->spi_data.pIocPg3->PhysDisk; 4121 Ioc3PhysDisk_t *pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
4111 int numPDisk = ioc->spi_data.pIocPg3->NumPhysDisks; 4122 int numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
4112 4123
4113 while (numPDisk) { 4124 while (numPDisk) {
4114 if (ioc->spi_data.dvStatus[pPDisk->PhysDiskID] & MPT_SCSICFG_DV_NOT_DONE) 4125 if (ioc->spi_data.dvStatus[pPDisk->PhysDiskID] & MPT_SCSICFG_DV_NOT_DONE)
@@ -4147,7 +4158,7 @@ mptscsih_domainValidation(void *arg)
4147 isPhysDisk = mptscsih_is_phys_disk(ioc, id); 4158 isPhysDisk = mptscsih_is_phys_disk(ioc, id);
4148 if (isPhysDisk) { 4159 if (isPhysDisk) {
4149 for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) { 4160 for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
4150 if (hd->ioc->spi_data.isRaid & (1 << ii)) { 4161 if (hd->ioc->raid_data.isRaid & (1 << ii)) {
4151 hd->ioc->spi_data.dvStatus[ii] |= MPT_SCSICFG_DV_PENDING; 4162 hd->ioc->spi_data.dvStatus[ii] |= MPT_SCSICFG_DV_PENDING;
4152 } 4163 }
4153 } 4164 }
@@ -4166,7 +4177,7 @@ mptscsih_domainValidation(void *arg)
4166 4177
4167 if (isPhysDisk) { 4178 if (isPhysDisk) {
4168 for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) { 4179 for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
4169 if (hd->ioc->spi_data.isRaid & (1 << ii)) { 4180 if (hd->ioc->raid_data.isRaid & (1 << ii)) {
4170 hd->ioc->spi_data.dvStatus[ii] &= ~MPT_SCSICFG_DV_PENDING; 4181 hd->ioc->spi_data.dvStatus[ii] &= ~MPT_SCSICFG_DV_PENDING;
4171 } 4182 }
4172 } 4183 }
@@ -4188,21 +4199,21 @@ mptscsih_domainValidation(void *arg)
4188 4199
4189/* Search IOC page 3 to determine if this is hidden physical disk 4200/* Search IOC page 3 to determine if this is hidden physical disk
4190 */ 4201 */
4191static int 4202/* Search IOC page 3 to determine if this is hidden physical disk
4203 */
4204static int
4192mptscsih_is_phys_disk(MPT_ADAPTER *ioc, int id) 4205mptscsih_is_phys_disk(MPT_ADAPTER *ioc, int id)
4193{ 4206{
4194 if (ioc->spi_data.pIocPg3) { 4207 int i;
4195 Ioc3PhysDisk_t *pPDisk = ioc->spi_data.pIocPg3->PhysDisk;
4196 int numPDisk = ioc->spi_data.pIocPg3->NumPhysDisks;
4197 4208
4198 while (numPDisk) { 4209 if (!ioc->raid_data.isRaid || !ioc->raid_data.pIocPg3)
4199 if (pPDisk->PhysDiskID == id) { 4210 return 0;
4200 return 1; 4211
4201 } 4212 for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
4202 pPDisk++; 4213 if (id == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskID)
4203 numPDisk--; 4214 return 1;
4204 }
4205 } 4215 }
4216
4206 return 0; 4217 return 0;
4207} 4218}
4208 4219
@@ -4408,7 +4419,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4408 /* Skip this ID? Set cfg.cfghdr.hdr to force config page write 4419 /* Skip this ID? Set cfg.cfghdr.hdr to force config page write
4409 */ 4420 */
4410 { 4421 {
4411 ScsiCfgData *pspi_data = &hd->ioc->spi_data; 4422 SpiCfgData *pspi_data = &hd->ioc->spi_data;
4412 if (pspi_data->nvram && (pspi_data->nvram[id] != MPT_HOST_NVRAM_INVALID)) { 4423 if (pspi_data->nvram && (pspi_data->nvram[id] != MPT_HOST_NVRAM_INVALID)) {
4413 /* Set the factor from nvram */ 4424 /* Set the factor from nvram */
4414 nfactor = (pspi_data->nvram[id] & MPT_NVRAM_SYNC_MASK) >> 8; 4425 nfactor = (pspi_data->nvram[id] & MPT_NVRAM_SYNC_MASK) >> 8;
@@ -4438,11 +4449,11 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4438 } 4449 }
4439 4450
4440 /* Finish iocmd inititialization - hidden or visible disk? */ 4451 /* Finish iocmd inititialization - hidden or visible disk? */
4441 if (ioc->spi_data.pIocPg3) { 4452 if (ioc->raid_data.pIocPg3) {
4442 /* Search IOC page 3 for matching id 4453 /* Search IOC page 3 for matching id
4443 */ 4454 */
4444 Ioc3PhysDisk_t *pPDisk = ioc->spi_data.pIocPg3->PhysDisk; 4455 Ioc3PhysDisk_t *pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
4445 int numPDisk = ioc->spi_data.pIocPg3->NumPhysDisks; 4456 int numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
4446 4457
4447 while (numPDisk) { 4458 while (numPDisk) {
4448 if (pPDisk->PhysDiskID == id) { 4459 if (pPDisk->PhysDiskID == id) {
@@ -4466,7 +4477,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4466 /* RAID Volume ID's may double for a physical device. If RAID but 4477 /* RAID Volume ID's may double for a physical device. If RAID but
4467 * not a physical ID as well, skip DV. 4478 * not a physical ID as well, skip DV.
4468 */ 4479 */
4469 if ((hd->ioc->spi_data.isRaid & (1 << id)) && !(iocmd.flags & MPT_ICFLAG_PHYS_DISK)) 4480 if ((hd->ioc->raid_data.isRaid & (1 << id)) && !(iocmd.flags & MPT_ICFLAG_PHYS_DISK))
4470 goto target_done; 4481 goto target_done;
4471 4482
4472 4483
@@ -4815,6 +4826,8 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4815 notDone = 0; 4826 notDone = 0;
4816 if (iocmd.flags & MPT_ICFLAG_ECHO) { 4827 if (iocmd.flags & MPT_ICFLAG_ECHO) {
4817 bufsize = ((pbuf1[2] & 0x1F) <<8) | pbuf1[3]; 4828 bufsize = ((pbuf1[2] & 0x1F) <<8) | pbuf1[3];
4829 if (pbuf1[0] & 0x01)
4830 iocmd.flags |= MPT_ICFLAG_EBOS;
4818 } else { 4831 } else {
4819 bufsize = pbuf1[1]<<16 | pbuf1[2]<<8 | pbuf1[3]; 4832 bufsize = pbuf1[1]<<16 | pbuf1[2]<<8 | pbuf1[3];
4820 } 4833 }
@@ -4911,6 +4924,9 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4911 } 4924 }
4912 iocmd.flags &= ~MPT_ICFLAG_DID_RESET; 4925 iocmd.flags &= ~MPT_ICFLAG_DID_RESET;
4913 4926
4927 if (iocmd.flags & MPT_ICFLAG_EBOS)
4928 goto skip_Reserve;
4929
4914 repeat = 5; 4930 repeat = 5;
4915 while (repeat && (!(iocmd.flags & MPT_ICFLAG_RESERVED))) { 4931 while (repeat && (!(iocmd.flags & MPT_ICFLAG_RESERVED))) {
4916 iocmd.cmd = RESERVE; 4932 iocmd.cmd = RESERVE;
@@ -4954,6 +4970,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4954 } 4970 }
4955 } 4971 }
4956 4972
4973skip_Reserve:
4957 mptscsih_fillbuf(pbuf1, sz, patt, 1); 4974 mptscsih_fillbuf(pbuf1, sz, patt, 1);
4958 iocmd.cmd = WRITE_BUFFER; 4975 iocmd.cmd = WRITE_BUFFER;
4959 iocmd.data_dma = buf1_dma; 4976 iocmd.data_dma = buf1_dma;
@@ -5198,11 +5215,12 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
5198 * If not an LVD bus, the adapter minSyncFactor has been 5215 * If not an LVD bus, the adapter minSyncFactor has been
5199 * already throttled back. 5216 * already throttled back.
5200 */ 5217 */
5218 negoFlags = hd->ioc->spi_data.noQas;
5201 if ((hd->Targets)&&((pTarget = hd->Targets[(int)id]) != NULL) && !pTarget->raidVolume) { 5219 if ((hd->Targets)&&((pTarget = hd->Targets[(int)id]) != NULL) && !pTarget->raidVolume) {
5202 width = pTarget->maxWidth; 5220 width = pTarget->maxWidth;
5203 offset = pTarget->maxOffset; 5221 offset = pTarget->maxOffset;
5204 factor = pTarget->minSyncFactor; 5222 factor = pTarget->minSyncFactor;
5205 negoFlags = pTarget->negoFlags; 5223 negoFlags |= pTarget->negoFlags;
5206 } else { 5224 } else {
5207 if (hd->ioc->spi_data.nvram && (hd->ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) { 5225 if (hd->ioc->spi_data.nvram && (hd->ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) {
5208 data = hd->ioc->spi_data.nvram[id]; 5226 data = hd->ioc->spi_data.nvram[id];
@@ -5223,7 +5241,6 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
5223 } 5241 }
5224 5242
5225 /* Set the negotiation flags */ 5243 /* Set the negotiation flags */
5226 negoFlags = hd->ioc->spi_data.noQas;
5227 if (!width) 5244 if (!width)
5228 negoFlags |= MPT_TARGET_NO_NEGO_WIDE; 5245 negoFlags |= MPT_TARGET_NO_NEGO_WIDE;
5229 5246
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index 51c0255ac16e..971fda4b8b57 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/message/fusion/mptscsi.h 2 * linux/drivers/message/fusion/mptscsih.h
3 * High performance SCSI / Fibre Channel SCSI Host device driver. 3 * High performance SCSI / Fibre Channel SCSI Host device driver.
4 * For use with PCI chip/adapter(s): 4 * For use with PCI chip/adapter(s):
5 * LSIFC9xx/LSI409xx Fibre Channel 5 * LSIFC9xx/LSI409xx Fibre Channel
@@ -53,8 +53,8 @@
53 * SCSI Public stuff... 53 * SCSI Public stuff...
54 */ 54 */
55 55
56#define MPT_SCSI_CMD_PER_DEV_HIGH 31 56#define MPT_SCSI_CMD_PER_DEV_HIGH 64
57#define MPT_SCSI_CMD_PER_DEV_LOW 7 57#define MPT_SCSI_CMD_PER_DEV_LOW 32
58 58
59#define MPT_SCSI_CMD_PER_LUN 7 59#define MPT_SCSI_CMD_PER_LUN 7
60 60
@@ -77,6 +77,7 @@
77#define MPTSCSIH_MAX_WIDTH 1 77#define MPTSCSIH_MAX_WIDTH 1
78#define MPTSCSIH_MIN_SYNC 0x08 78#define MPTSCSIH_MIN_SYNC 0x08
79#define MPTSCSIH_SAF_TE 0 79#define MPTSCSIH_SAF_TE 0
80#define MPTSCSIH_PT_CLEAR 0
80 81
81 82
82#endif 83#endif
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 587d1274fd74..5c0e307d1d5d 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -199,7 +199,7 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
199 printk(MYIOC_s_WARN_FMT 199 printk(MYIOC_s_WARN_FMT
200 "Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n", 200 "Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n",
201 ioc->name, ioc); 201 ioc->name, ioc);
202 return -ENODEV; 202 return 0;
203 } 203 }
204 204
205 sh = scsi_host_alloc(&mptspi_driver_template, sizeof(MPT_SCSI_HOST)); 205 sh = scsi_host_alloc(&mptspi_driver_template, sizeof(MPT_SCSI_HOST));
diff --git a/drivers/message/i2o/config-osm.c b/drivers/message/i2o/config-osm.c
index af32ab4e90cd..10432f665201 100644
--- a/drivers/message/i2o/config-osm.c
+++ b/drivers/message/i2o/config-osm.c
@@ -56,8 +56,11 @@ static int __init i2o_config_init(void)
56 return -EBUSY; 56 return -EBUSY;
57 } 57 }
58#ifdef CONFIG_I2O_CONFIG_OLD_IOCTL 58#ifdef CONFIG_I2O_CONFIG_OLD_IOCTL
59 if (i2o_config_old_init()) 59 if (i2o_config_old_init()) {
60 osm_err("old config handler initialization failed\n");
60 i2o_driver_unregister(&i2o_config_driver); 61 i2o_driver_unregister(&i2o_config_driver);
62 return -EBUSY;
63 }
61#endif 64#endif
62 65
63 return 0; 66 return 0;
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index a851d65c7cfe..a260f83bcb02 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -48,8 +48,8 @@ struct ucb1x00_ts {
48 u16 x_res; 48 u16 x_res;
49 u16 y_res; 49 u16 y_res;
50 50
51 int restart:1; 51 unsigned int restart:1;
52 int adcsync:1; 52 unsigned int adcsync:1;
53}; 53};
54 54
55static int adcsync; 55static int adcsync;
diff --git a/drivers/mtd/devices/docecc.c b/drivers/mtd/devices/docecc.c
index 9a087c1fb0b7..24f670b5a4f3 100644
--- a/drivers/mtd/devices/docecc.c
+++ b/drivers/mtd/devices/docecc.c
@@ -40,7 +40,7 @@
40#include <linux/mtd/mtd.h> 40#include <linux/mtd/mtd.h>
41#include <linux/mtd/doc2000.h> 41#include <linux/mtd/doc2000.h>
42 42
43#define DEBUG 0 43#define DEBUG_ECC 0
44/* need to undef it (from asm/termbits.h) */ 44/* need to undef it (from asm/termbits.h) */
45#undef B0 45#undef B0
46 46
@@ -249,7 +249,7 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
249 lambda[j] ^= Alpha_to[modnn(u + tmp)]; 249 lambda[j] ^= Alpha_to[modnn(u + tmp)];
250 } 250 }
251 } 251 }
252#if DEBUG >= 1 252#if DEBUG_ECC >= 1
253 /* Test code that verifies the erasure locator polynomial just constructed 253 /* Test code that verifies the erasure locator polynomial just constructed
254 Needed only for decoder debugging. */ 254 Needed only for decoder debugging. */
255 255
@@ -276,7 +276,7 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
276 count = -1; 276 count = -1;
277 goto finish; 277 goto finish;
278 } 278 }
279#if DEBUG >= 2 279#if DEBUG_ECC >= 2
280 printf("\n Erasure positions as determined by roots of Eras Loc Poly:\n"); 280 printf("\n Erasure positions as determined by roots of Eras Loc Poly:\n");
281 for (i = 0; i < count; i++) 281 for (i = 0; i < count; i++)
282 printf("%d ", loc[i]); 282 printf("%d ", loc[i]);
@@ -409,7 +409,7 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
409 den ^= Alpha_to[modnn(lambda[i+1] + i * root[j])]; 409 den ^= Alpha_to[modnn(lambda[i+1] + i * root[j])];
410 } 410 }
411 if (den == 0) { 411 if (den == 0) {
412#if DEBUG >= 1 412#if DEBUG_ECC >= 1
413 printf("\n ERROR: denominator = 0\n"); 413 printf("\n ERROR: denominator = 0\n");
414#endif 414#endif
415 /* Convert to dual- basis */ 415 /* Convert to dual- basis */
diff --git a/drivers/net/8390.c b/drivers/net/8390.c
index 6d76f3a99b17..f87027420081 100644
--- a/drivers/net/8390.c
+++ b/drivers/net/8390.c
@@ -1094,7 +1094,7 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1094 1094
1095 outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD); 1095 outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
1096 1096
1097 if (inb_p(e8390_base) & E8390_TRANS) 1097 if (inb_p(e8390_base + E8390_CMD) & E8390_TRANS)
1098 { 1098 {
1099 printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n", 1099 printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n",
1100 dev->name); 1100 dev->name);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 90449a0f2a6c..6d00c3de1a83 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1653,7 +1653,8 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1653 int old_features = bond_dev->features; 1653 int old_features = bond_dev->features;
1654 int res = 0; 1654 int res = 0;
1655 1655
1656 if (slave_dev->do_ioctl == NULL) { 1656 if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL &&
1657 slave_dev->do_ioctl == NULL) {
1657 printk(KERN_WARNING DRV_NAME 1658 printk(KERN_WARNING DRV_NAME
1658 ": Warning : no link monitoring support for %s\n", 1659 ": Warning : no link monitoring support for %s\n",
1659 slave_dev->name); 1660 slave_dev->name);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index f0471d102e3c..f9223c1c5aa4 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -100,11 +100,11 @@ VERSION 2.2LK <2005/01/25>
100 100
101#ifdef CONFIG_R8169_NAPI 101#ifdef CONFIG_R8169_NAPI
102#define rtl8169_rx_skb netif_receive_skb 102#define rtl8169_rx_skb netif_receive_skb
103#define rtl8169_rx_hwaccel_skb vlan_hwaccel_rx 103#define rtl8169_rx_hwaccel_skb vlan_hwaccel_receive_skb
104#define rtl8169_rx_quota(count, quota) min(count, quota) 104#define rtl8169_rx_quota(count, quota) min(count, quota)
105#else 105#else
106#define rtl8169_rx_skb netif_rx 106#define rtl8169_rx_skb netif_rx
107#define rtl8169_rx_hwaccel_skb vlan_hwaccel_receive_skb 107#define rtl8169_rx_hwaccel_skb vlan_hwaccel_rx
108#define rtl8169_rx_quota(count, quota) count 108#define rtl8169_rx_quota(count, quota) count
109#endif 109#endif
110 110
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 0208258e7826..fd398da4993b 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -42,7 +42,7 @@
42#include "skge.h" 42#include "skge.h"
43 43
44#define DRV_NAME "skge" 44#define DRV_NAME "skge"
45#define DRV_VERSION "1.0" 45#define DRV_VERSION "1.1"
46#define PFX DRV_NAME " " 46#define PFX DRV_NAME " "
47 47
48#define DEFAULT_TX_RING_SIZE 128 48#define DEFAULT_TX_RING_SIZE 128
@@ -105,41 +105,28 @@ static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
105static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; 105static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
106static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 }; 106static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 };
107 107
108/* Don't need to look at whole 16K.
109 * last interesting register is descriptor poll timer.
110 */
111#define SKGE_REGS_LEN (29*128)
112
113static int skge_get_regs_len(struct net_device *dev) 108static int skge_get_regs_len(struct net_device *dev)
114{ 109{
115 return SKGE_REGS_LEN; 110 return 0x4000;
116} 111}
117 112
118/* 113/*
119 * Returns copy of control register region 114 * Returns copy of whole control register region
120 * I/O region is divided into banks and certain regions are unreadable 115 * Note: skip RAM address register because accessing it will
116 * cause bus hangs!
121 */ 117 */
122static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, 118static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
123 void *p) 119 void *p)
124{ 120{
125 const struct skge_port *skge = netdev_priv(dev); 121 const struct skge_port *skge = netdev_priv(dev);
126 unsigned long offs;
127 const void __iomem *io = skge->hw->regs; 122 const void __iomem *io = skge->hw->regs;
128 static const unsigned long bankmap
129 = (1<<0) | (1<<2) | (1<<8) | (1<<9)
130 | (1<<12) | (1<<13) | (1<<14) | (1<<15) | (1<<16)
131 | (1<<17) | (1<<20) | (1<<21) | (1<<22) | (1<<23)
132 | (1<<24) | (1<<25) | (1<<26) | (1<<27) | (1<<28);
133 123
134 regs->version = 1; 124 regs->version = 1;
135 for (offs = 0; offs < regs->len; offs += 128) { 125 memset(p, 0, regs->len);
136 u32 len = min_t(u32, 128, regs->len - offs); 126 memcpy_fromio(p, io, B3_RAM_ADDR);
137 127
138 if (bankmap & (1<<(offs/128))) 128 memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
139 memcpy_fromio(p + offs, io + offs, len); 129 regs->len - B3_RI_WTO_R1);
140 else
141 memset(p + offs, 0, len);
142 }
143} 130}
144 131
145/* Wake on Lan only supported on Yukon chps with rev 1 or above */ 132/* Wake on Lan only supported on Yukon chps with rev 1 or above */
@@ -775,17 +762,6 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base)
775 return 0; 762 return 0;
776} 763}
777 764
778static struct sk_buff *skge_rx_alloc(struct net_device *dev, unsigned int size)
779{
780 struct sk_buff *skb = dev_alloc_skb(size);
781
782 if (likely(skb)) {
783 skb->dev = dev;
784 skb_reserve(skb, NET_IP_ALIGN);
785 }
786 return skb;
787}
788
789/* Allocate and setup a new buffer for receiving */ 765/* Allocate and setup a new buffer for receiving */
790static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, 766static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
791 struct sk_buff *skb, unsigned int bufsize) 767 struct sk_buff *skb, unsigned int bufsize)
@@ -858,16 +834,17 @@ static int skge_rx_fill(struct skge_port *skge)
858{ 834{
859 struct skge_ring *ring = &skge->rx_ring; 835 struct skge_ring *ring = &skge->rx_ring;
860 struct skge_element *e; 836 struct skge_element *e;
861 unsigned int bufsize = skge->rx_buf_size;
862 837
863 e = ring->start; 838 e = ring->start;
864 do { 839 do {
865 struct sk_buff *skb = skge_rx_alloc(skge->netdev, bufsize); 840 struct sk_buff *skb;
866 841
842 skb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN);
867 if (!skb) 843 if (!skb)
868 return -ENOMEM; 844 return -ENOMEM;
869 845
870 skge_rx_setup(skge, e, skb, bufsize); 846 skb_reserve(skb, NET_IP_ALIGN);
847 skge_rx_setup(skge, e, skb, skge->rx_buf_size);
871 } while ( (e = e->next) != ring->start); 848 } while ( (e = e->next) != ring->start);
872 849
873 ring->to_clean = ring->start; 850 ring->to_clean = ring->start;
@@ -1666,6 +1643,22 @@ static void yukon_reset(struct skge_hw *hw, int port)
1666 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 1643 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
1667} 1644}
1668 1645
1646/* Apparently, early versions of Yukon-Lite had wrong chip_id? */
1647static int is_yukon_lite_a0(struct skge_hw *hw)
1648{
1649 u32 reg;
1650 int ret;
1651
1652 if (hw->chip_id != CHIP_ID_YUKON)
1653 return 0;
1654
1655 reg = skge_read32(hw, B2_FAR);
1656 skge_write8(hw, B2_FAR + 3, 0xff);
1657 ret = (skge_read8(hw, B2_FAR + 3) != 0);
1658 skge_write32(hw, B2_FAR, reg);
1659 return ret;
1660}
1661
1669static void yukon_mac_init(struct skge_hw *hw, int port) 1662static void yukon_mac_init(struct skge_hw *hw, int port)
1670{ 1663{
1671 struct skge_port *skge = netdev_priv(hw->dev[port]); 1664 struct skge_port *skge = netdev_priv(hw->dev[port]);
@@ -1781,9 +1774,11 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1781 /* Configure Rx MAC FIFO */ 1774 /* Configure Rx MAC FIFO */
1782 skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK); 1775 skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK);
1783 reg = GMF_OPER_ON | GMF_RX_F_FL_ON; 1776 reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
1784 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1777
1785 hw->chip_rev >= CHIP_REV_YU_LITE_A3) 1778 /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */
1779 if (is_yukon_lite_a0(hw))
1786 reg &= ~GMF_RX_F_FL_ON; 1780 reg &= ~GMF_RX_F_FL_ON;
1781
1787 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); 1782 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
1788 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg); 1783 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
1789 /* 1784 /*
@@ -2442,6 +2437,14 @@ static void yukon_set_multicast(struct net_device *dev)
2442 gma_write16(hw, port, GM_RX_CTRL, reg); 2437 gma_write16(hw, port, GM_RX_CTRL, reg);
2443} 2438}
2444 2439
2440static inline u16 phy_length(const struct skge_hw *hw, u32 status)
2441{
2442 if (hw->chip_id == CHIP_ID_GENESIS)
2443 return status >> XMR_FS_LEN_SHIFT;
2444 else
2445 return status >> GMR_FS_LEN_SHIFT;
2446}
2447
2445static inline int bad_phy_status(const struct skge_hw *hw, u32 status) 2448static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
2446{ 2449{
2447 if (hw->chip_id == CHIP_ID_GENESIS) 2450 if (hw->chip_id == CHIP_ID_GENESIS)
@@ -2451,80 +2454,99 @@ static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
2451 (status & GMR_FS_RX_OK) == 0; 2454 (status & GMR_FS_RX_OK) == 0;
2452} 2455}
2453 2456
2454static void skge_rx_error(struct skge_port *skge, int slot,
2455 u32 control, u32 status)
2456{
2457 if (netif_msg_rx_err(skge))
2458 printk(KERN_DEBUG PFX "%s: rx err, slot %d control 0x%x status 0x%x\n",
2459 skge->netdev->name, slot, control, status);
2460
2461 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
2462 skge->net_stats.rx_length_errors++;
2463 else if (skge->hw->chip_id == CHIP_ID_GENESIS) {
2464 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
2465 skge->net_stats.rx_length_errors++;
2466 if (status & XMR_FS_FRA_ERR)
2467 skge->net_stats.rx_frame_errors++;
2468 if (status & XMR_FS_FCS_ERR)
2469 skge->net_stats.rx_crc_errors++;
2470 } else {
2471 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
2472 skge->net_stats.rx_length_errors++;
2473 if (status & GMR_FS_FRAGMENT)
2474 skge->net_stats.rx_frame_errors++;
2475 if (status & GMR_FS_CRC_ERR)
2476 skge->net_stats.rx_crc_errors++;
2477 }
2478}
2479 2457
2480/* Get receive buffer from descriptor. 2458/* Get receive buffer from descriptor.
2481 * Handles copy of small buffers and reallocation failures 2459 * Handles copy of small buffers and reallocation failures
2482 */ 2460 */
2483static inline struct sk_buff *skge_rx_get(struct skge_port *skge, 2461static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
2484 struct skge_element *e, 2462 struct skge_element *e,
2485 unsigned int len) 2463 u32 control, u32 status, u16 csum)
2486{ 2464{
2487 struct sk_buff *nskb, *skb; 2465 struct sk_buff *skb;
2466 u16 len = control & BMU_BBC;
2467
2468 if (unlikely(netif_msg_rx_status(skge)))
2469 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
2470 skge->netdev->name, e - skge->rx_ring.start,
2471 status, len);
2472
2473 if (len > skge->rx_buf_size)
2474 goto error;
2475
2476 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
2477 goto error;
2478
2479 if (bad_phy_status(skge->hw, status))
2480 goto error;
2481
2482 if (phy_length(skge->hw, status) != len)
2483 goto error;
2488 2484
2489 if (len < RX_COPY_THRESHOLD) { 2485 if (len < RX_COPY_THRESHOLD) {
2490 nskb = skge_rx_alloc(skge->netdev, len + NET_IP_ALIGN); 2486 skb = dev_alloc_skb(len + 2);
2491 if (unlikely(!nskb)) 2487 if (!skb)
2492 return NULL; 2488 goto resubmit;
2493 2489
2490 skb_reserve(skb, 2);
2494 pci_dma_sync_single_for_cpu(skge->hw->pdev, 2491 pci_dma_sync_single_for_cpu(skge->hw->pdev,
2495 pci_unmap_addr(e, mapaddr), 2492 pci_unmap_addr(e, mapaddr),
2496 len, PCI_DMA_FROMDEVICE); 2493 len, PCI_DMA_FROMDEVICE);
2497 memcpy(nskb->data, e->skb->data, len); 2494 memcpy(skb->data, e->skb->data, len);
2498 pci_dma_sync_single_for_device(skge->hw->pdev, 2495 pci_dma_sync_single_for_device(skge->hw->pdev,
2499 pci_unmap_addr(e, mapaddr), 2496 pci_unmap_addr(e, mapaddr),
2500 len, PCI_DMA_FROMDEVICE); 2497 len, PCI_DMA_FROMDEVICE);
2501
2502 if (skge->rx_csum) {
2503 struct skge_rx_desc *rd = e->desc;
2504 nskb->csum = le16_to_cpu(rd->csum2);
2505 nskb->ip_summed = CHECKSUM_HW;
2506 }
2507 skge_rx_reuse(e, skge->rx_buf_size); 2498 skge_rx_reuse(e, skge->rx_buf_size);
2508 return nskb;
2509 } else { 2499 } else {
2510 nskb = skge_rx_alloc(skge->netdev, skge->rx_buf_size); 2500 struct sk_buff *nskb;
2511 if (unlikely(!nskb)) 2501 nskb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN);
2512 return NULL; 2502 if (!nskb)
2503 goto resubmit;
2513 2504
2514 pci_unmap_single(skge->hw->pdev, 2505 pci_unmap_single(skge->hw->pdev,
2515 pci_unmap_addr(e, mapaddr), 2506 pci_unmap_addr(e, mapaddr),
2516 pci_unmap_len(e, maplen), 2507 pci_unmap_len(e, maplen),
2517 PCI_DMA_FROMDEVICE); 2508 PCI_DMA_FROMDEVICE);
2518 skb = e->skb; 2509 skb = e->skb;
2519 if (skge->rx_csum) { 2510 prefetch(skb->data);
2520 struct skge_rx_desc *rd = e->desc;
2521 skb->csum = le16_to_cpu(rd->csum2);
2522 skb->ip_summed = CHECKSUM_HW;
2523 }
2524
2525 skge_rx_setup(skge, e, nskb, skge->rx_buf_size); 2511 skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
2526 return skb;
2527 } 2512 }
2513
2514 skb_put(skb, len);
2515 skb->dev = skge->netdev;
2516 if (skge->rx_csum) {
2517 skb->csum = csum;
2518 skb->ip_summed = CHECKSUM_HW;
2519 }
2520
2521 skb->protocol = eth_type_trans(skb, skge->netdev);
2522
2523 return skb;
2524error:
2525
2526 if (netif_msg_rx_err(skge))
2527 printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n",
2528 skge->netdev->name, e - skge->rx_ring.start,
2529 control, status);
2530
2531 if (skge->hw->chip_id == CHIP_ID_GENESIS) {
2532 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
2533 skge->net_stats.rx_length_errors++;
2534 if (status & XMR_FS_FRA_ERR)
2535 skge->net_stats.rx_frame_errors++;
2536 if (status & XMR_FS_FCS_ERR)
2537 skge->net_stats.rx_crc_errors++;
2538 } else {
2539 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
2540 skge->net_stats.rx_length_errors++;
2541 if (status & GMR_FS_FRAGMENT)
2542 skge->net_stats.rx_frame_errors++;
2543 if (status & GMR_FS_CRC_ERR)
2544 skge->net_stats.rx_crc_errors++;
2545 }
2546
2547resubmit:
2548 skge_rx_reuse(e, skge->rx_buf_size);
2549 return NULL;
2528} 2550}
2529 2551
2530 2552
@@ -2540,32 +2562,16 @@ static int skge_poll(struct net_device *dev, int *budget)
2540 for (e = ring->to_clean; work_done < to_do; e = e->next) { 2562 for (e = ring->to_clean; work_done < to_do; e = e->next) {
2541 struct skge_rx_desc *rd = e->desc; 2563 struct skge_rx_desc *rd = e->desc;
2542 struct sk_buff *skb; 2564 struct sk_buff *skb;
2543 u32 control, len, status; 2565 u32 control;
2544 2566
2545 rmb(); 2567 rmb();
2546 control = rd->control; 2568 control = rd->control;
2547 if (control & BMU_OWN) 2569 if (control & BMU_OWN)
2548 break; 2570 break;
2549 2571
2550 len = control & BMU_BBC; 2572 skb = skge_rx_get(skge, e, control, rd->status,
2551 status = rd->status; 2573 le16_to_cpu(rd->csum2));
2552
2553 if (unlikely((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)
2554 || bad_phy_status(hw, status))) {
2555 skge_rx_error(skge, e - ring->start, control, status);
2556 skge_rx_reuse(e, skge->rx_buf_size);
2557 continue;
2558 }
2559
2560 if (netif_msg_rx_status(skge))
2561 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
2562 dev->name, e - ring->start, rd->status, len);
2563
2564 skb = skge_rx_get(skge, e, len);
2565 if (likely(skb)) { 2574 if (likely(skb)) {
2566 skb_put(skb, len);
2567 skb->protocol = eth_type_trans(skb, dev);
2568
2569 dev->last_rx = jiffies; 2575 dev->last_rx = jiffies;
2570 netif_receive_skb(skb); 2576 netif_receive_skb(skb);
2571 2577
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index efbf98c675d2..72c175b87a5a 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -953,6 +953,7 @@ enum {
953 */ 953 */
954enum { 954enum {
955 XMR_FS_LEN = 0x3fff<<18, /* Bit 31..18: Rx Frame Length */ 955 XMR_FS_LEN = 0x3fff<<18, /* Bit 31..18: Rx Frame Length */
956 XMR_FS_LEN_SHIFT = 18,
956 XMR_FS_2L_VLAN = 1<<17, /* Bit 17: tagged wh 2Lev VLAN ID*/ 957 XMR_FS_2L_VLAN = 1<<17, /* Bit 17: tagged wh 2Lev VLAN ID*/
957 XMR_FS_1_VLAN = 1<<16, /* Bit 16: tagged wh 1ev VLAN ID*/ 958 XMR_FS_1_VLAN = 1<<16, /* Bit 16: tagged wh 1ev VLAN ID*/
958 XMR_FS_BC = 1<<15, /* Bit 15: Broadcast Frame */ 959 XMR_FS_BC = 1<<15, /* Bit 15: Broadcast Frame */
@@ -1868,6 +1869,7 @@ enum {
1868/* Receive Frame Status Encoding */ 1869/* Receive Frame Status Encoding */
1869enum { 1870enum {
1870 GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */ 1871 GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */
1872 GMR_FS_LEN_SHIFT = 16,
1871 GMR_FS_VLAN = 1<<13, /* Bit 13: VLAN Packet */ 1873 GMR_FS_VLAN = 1<<13, /* Bit 13: VLAN Packet */
1872 GMR_FS_JABBER = 1<<12, /* Bit 12: Jabber Packet */ 1874 GMR_FS_JABBER = 1<<12, /* Bit 12: Jabber Packet */
1873 GMR_FS_UN_SIZE = 1<<11, /* Bit 11: Undersize Packet */ 1875 GMR_FS_UN_SIZE = 1<<11, /* Bit 11: Undersize Packet */
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 48c03c11cd9a..a01efa6d5c62 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -72,7 +72,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
72 } 72 }
73 skb_reserve(skb, 4); 73 skb_reserve(skb, 4);
74 cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0); 74 cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
75 data = (cisco_packet*)skb->data; 75 data = (cisco_packet*)(skb->data + 4);
76 76
77 data->type = htonl(type); 77 data->type = htonl(type);
78 data->par1 = htonl(par1); 78 data->par1 = htonl(par1);
diff --git a/drivers/pci/hotplug.c b/drivers/pci/hotplug.c
index 10444988a10b..e1743be31909 100644
--- a/drivers/pci/hotplug.c
+++ b/drivers/pci/hotplug.c
@@ -7,7 +7,6 @@ int pci_hotplug (struct device *dev, char **envp, int num_envp,
7 char *buffer, int buffer_size) 7 char *buffer, int buffer_size)
8{ 8{
9 struct pci_dev *pdev; 9 struct pci_dev *pdev;
10 char *scratch;
11 int i = 0; 10 int i = 0;
12 int length = 0; 11 int length = 0;
13 12
@@ -18,9 +17,6 @@ int pci_hotplug (struct device *dev, char **envp, int num_envp,
18 if (!pdev) 17 if (!pdev)
19 return -ENODEV; 18 return -ENODEV;
20 19
21 scratch = buffer;
22
23
24 if (add_hotplug_env_var(envp, num_envp, &i, 20 if (add_hotplug_env_var(envp, num_envp, &i,
25 buffer, buffer_size, &length, 21 buffer, buffer_size, &length,
26 "PCI_CLASS=%04X", pdev->class)) 22 "PCI_CLASS=%04X", pdev->class))
diff --git a/drivers/pci/hotplug/rpadlpar_sysfs.c b/drivers/pci/hotplug/rpadlpar_sysfs.c
index 752e6513c447..db69be85b458 100644
--- a/drivers/pci/hotplug/rpadlpar_sysfs.c
+++ b/drivers/pci/hotplug/rpadlpar_sysfs.c
@@ -62,7 +62,7 @@ static ssize_t add_slot_store(struct dlpar_io_attr *dlpar_attr,
62 char drc_name[MAX_DRC_NAME_LEN]; 62 char drc_name[MAX_DRC_NAME_LEN];
63 char *end; 63 char *end;
64 64
65 if (nbytes > MAX_DRC_NAME_LEN) 65 if (nbytes >= MAX_DRC_NAME_LEN)
66 return 0; 66 return 0;
67 67
68 memcpy(drc_name, buf, nbytes); 68 memcpy(drc_name, buf, nbytes);
@@ -83,7 +83,7 @@ static ssize_t remove_slot_store(struct dlpar_io_attr *dlpar_attr,
83 char drc_name[MAX_DRC_NAME_LEN]; 83 char drc_name[MAX_DRC_NAME_LEN];
84 char *end; 84 char *end;
85 85
86 if (nbytes > MAX_DRC_NAME_LEN) 86 if (nbytes >= MAX_DRC_NAME_LEN)
87 return 0; 87 return 0;
88 88
89 memcpy(drc_name, buf, nbytes); 89 memcpy(drc_name, buf, nbytes);
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index b1409441c1cd..a32ae82e5922 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -159,7 +159,7 @@ static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
159 159
160 pcibus_info = SN_PCIBUS_BUSSOFT_INFO(pci_bus); 160 pcibus_info = SN_PCIBUS_BUSSOFT_INFO(pci_bus);
161 161
162 slot = kcalloc(1, sizeof(*slot), GFP_KERNEL); 162 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
163 if (!slot) 163 if (!slot)
164 return -ENOMEM; 164 return -ENOMEM;
165 bss_hotplug_slot->private = slot; 165 bss_hotplug_slot->private = slot;
@@ -491,7 +491,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
491 if (sn_pci_slot_valid(pci_bus, device) != 1) 491 if (sn_pci_slot_valid(pci_bus, device) != 1)
492 continue; 492 continue;
493 493
494 bss_hotplug_slot = kcalloc(1, sizeof(*bss_hotplug_slot), 494 bss_hotplug_slot = kzalloc(sizeof(*bss_hotplug_slot),
495 GFP_KERNEL); 495 GFP_KERNEL);
496 if (!bss_hotplug_slot) { 496 if (!bss_hotplug_slot) {
497 rc = -ENOMEM; 497 rc = -ENOMEM;
@@ -499,7 +499,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
499 } 499 }
500 500
501 bss_hotplug_slot->info = 501 bss_hotplug_slot->info =
502 kcalloc(1, sizeof(struct hotplug_slot_info), 502 kzalloc(sizeof(struct hotplug_slot_info),
503 GFP_KERNEL); 503 GFP_KERNEL);
504 if (!bss_hotplug_slot->info) { 504 if (!bss_hotplug_slot->info) {
505 rc = -ENOMEM; 505 rc = -ENOMEM;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 56a3b397efee..2898830c496f 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -360,7 +360,7 @@ pci_create_resource_files(struct pci_dev *pdev)
360 continue; 360 continue;
361 361
362 /* allocate attribute structure, piggyback attribute name */ 362 /* allocate attribute structure, piggyback attribute name */
363 res_attr = kcalloc(1, sizeof(*res_attr) + 10, GFP_ATOMIC); 363 res_attr = kzalloc(sizeof(*res_attr) + 10, GFP_ATOMIC);
364 if (res_attr) { 364 if (res_attr) {
365 char *res_attr_name = (char *)(res_attr + 1); 365 char *res_attr_name = (char *)(res_attr + 1);
366 366
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 26a55d08b506..005786416bb5 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -165,7 +165,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
165 if (l == 0xffffffff) 165 if (l == 0xffffffff)
166 l = 0; 166 l = 0;
167 if ((l & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_MEMORY) { 167 if ((l & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_MEMORY) {
168 sz = pci_size(l, sz, PCI_BASE_ADDRESS_MEM_MASK); 168 sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK);
169 if (!sz) 169 if (!sz)
170 continue; 170 continue;
171 res->start = l & PCI_BASE_ADDRESS_MEM_MASK; 171 res->start = l & PCI_BASE_ADDRESS_MEM_MASK;
@@ -215,7 +215,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
215 if (l == 0xffffffff) 215 if (l == 0xffffffff)
216 l = 0; 216 l = 0;
217 if (sz && sz != 0xffffffff) { 217 if (sz && sz != 0xffffffff) {
218 sz = pci_size(l, sz, PCI_ROM_ADDRESS_MASK); 218 sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK);
219 if (sz) { 219 if (sz) {
220 res->flags = (l & IORESOURCE_ROM_ENABLE) | 220 res->flags = (l & IORESOURCE_ROM_ENABLE) |
221 IORESOURCE_MEM | IORESOURCE_PREFETCH | 221 IORESOURCE_MEM | IORESOURCE_PREFETCH |
@@ -402,6 +402,12 @@ static void pci_enable_crs(struct pci_dev *dev)
402static void __devinit pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max) 402static void __devinit pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
403{ 403{
404 struct pci_bus *parent = child->parent; 404 struct pci_bus *parent = child->parent;
405
406 /* Attempts to fix that up are really dangerous unless
407 we're going to re-assign all bus numbers. */
408 if (!pcibios_assign_all_busses())
409 return;
410
405 while (parent->parent && parent->subordinate < max) { 411 while (parent->parent && parent->subordinate < max) {
406 parent->subordinate = max; 412 parent->subordinate = max;
407 pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max); 413 pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
@@ -478,8 +484,18 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max
478 * We need to assign a number to this bus which we always 484 * We need to assign a number to this bus which we always
479 * do in the second pass. 485 * do in the second pass.
480 */ 486 */
481 if (!pass) 487 if (!pass) {
488 if (pcibios_assign_all_busses())
489 /* Temporarily disable forwarding of the
490 configuration cycles on all bridges in
491 this bus segment to avoid possible
492 conflicts in the second pass between two
493 bridges programmed with overlapping
494 bus ranges. */
495 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
496 buses & ~0xffffff);
482 return max; 497 return max;
498 }
483 499
484 /* Clear errors */ 500 /* Clear errors */
485 pci_write_config_word(dev, PCI_STATUS, 0xffff); 501 pci_write_config_word(dev, PCI_STATUS, 0xffff);
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 91ea8e4777f3..dbb3eb0e330b 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -437,7 +437,7 @@ __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
437 if (cdev->dev.driver_data) { 437 if (cdev->dev.driver_data) {
438 gdev = (struct ccwgroup_device *)cdev->dev.driver_data; 438 gdev = (struct ccwgroup_device *)cdev->dev.driver_data;
439 if (get_device(&gdev->dev)) { 439 if (get_device(&gdev->dev)) {
440 if (klist_node_attached(&gdev->dev.knode_bus)) 440 if (device_is_registered(&gdev->dev))
441 return gdev; 441 return gdev;
442 put_device(&gdev->dev); 442 put_device(&gdev->dev);
443 } 443 }
diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile
index fc145307a7d4..d6a78f1a2f16 100644
--- a/drivers/s390/scsi/Makefile
+++ b/drivers/s390/scsi/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \ 5zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \
6 zfcp_fsf.o zfcp_sysfs_adapter.o zfcp_sysfs_port.o \ 6 zfcp_fsf.o zfcp_dbf.o zfcp_sysfs_adapter.o zfcp_sysfs_port.o \
7 zfcp_sysfs_unit.o zfcp_sysfs_driver.o 7 zfcp_sysfs_unit.o zfcp_sysfs_driver.o
8 8
9obj-$(CONFIG_ZFCP) += zfcp.o 9obj-$(CONFIG_ZFCP) += zfcp.o
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index bfe3ba73bc0f..0b5087f7cabc 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -122,95 +122,6 @@ _zfcp_hex_dump(char *addr, int count)
122 122
123#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER 123#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
124 124
125static inline int
126zfcp_fsf_req_is_scsi_cmnd(struct zfcp_fsf_req *fsf_req)
127{
128 return ((fsf_req->fsf_command == FSF_QTCB_FCP_CMND) &&
129 !(fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT));
130}
131
132void
133zfcp_cmd_dbf_event_fsf(const char *text, struct zfcp_fsf_req *fsf_req,
134 void *add_data, int add_length)
135{
136 struct zfcp_adapter *adapter = fsf_req->adapter;
137 struct scsi_cmnd *scsi_cmnd;
138 int level = 3;
139 int i;
140 unsigned long flags;
141
142 spin_lock_irqsave(&adapter->dbf_lock, flags);
143 if (zfcp_fsf_req_is_scsi_cmnd(fsf_req)) {
144 scsi_cmnd = fsf_req->data.send_fcp_command_task.scsi_cmnd;
145 debug_text_event(adapter->cmd_dbf, level, "fsferror");
146 debug_text_event(adapter->cmd_dbf, level, text);
147 debug_event(adapter->cmd_dbf, level, &fsf_req,
148 sizeof (unsigned long));
149 debug_event(adapter->cmd_dbf, level, &fsf_req->seq_no,
150 sizeof (u32));
151 debug_event(adapter->cmd_dbf, level, &scsi_cmnd,
152 sizeof (unsigned long));
153 debug_event(adapter->cmd_dbf, level, &scsi_cmnd->cmnd,
154 min(ZFCP_CMD_DBF_LENGTH, (int)scsi_cmnd->cmd_len));
155 for (i = 0; i < add_length; i += ZFCP_CMD_DBF_LENGTH)
156 debug_event(adapter->cmd_dbf,
157 level,
158 (char *) add_data + i,
159 min(ZFCP_CMD_DBF_LENGTH, add_length - i));
160 }
161 spin_unlock_irqrestore(&adapter->dbf_lock, flags);
162}
163
164/* XXX additionally log unit if available */
165/* ---> introduce new parameter for unit, see 2.4 code */
166void
167zfcp_cmd_dbf_event_scsi(const char *text, struct scsi_cmnd *scsi_cmnd)
168{
169 struct zfcp_adapter *adapter;
170 union zfcp_req_data *req_data;
171 struct zfcp_fsf_req *fsf_req;
172 int level = ((host_byte(scsi_cmnd->result) != 0) ? 1 : 5);
173 unsigned long flags;
174
175 adapter = (struct zfcp_adapter *) scsi_cmnd->device->host->hostdata[0];
176 req_data = (union zfcp_req_data *) scsi_cmnd->host_scribble;
177 fsf_req = (req_data ? req_data->send_fcp_command_task.fsf_req : NULL);
178 spin_lock_irqsave(&adapter->dbf_lock, flags);
179 debug_text_event(adapter->cmd_dbf, level, "hostbyte");
180 debug_text_event(adapter->cmd_dbf, level, text);
181 debug_event(adapter->cmd_dbf, level, &scsi_cmnd->result, sizeof (u32));
182 debug_event(adapter->cmd_dbf, level, &scsi_cmnd,
183 sizeof (unsigned long));
184 debug_event(adapter->cmd_dbf, level, &scsi_cmnd->cmnd,
185 min(ZFCP_CMD_DBF_LENGTH, (int)scsi_cmnd->cmd_len));
186 if (likely(fsf_req)) {
187 debug_event(adapter->cmd_dbf, level, &fsf_req,
188 sizeof (unsigned long));
189 debug_event(adapter->cmd_dbf, level, &fsf_req->seq_no,
190 sizeof (u32));
191 } else {
192 debug_text_event(adapter->cmd_dbf, level, "");
193 debug_text_event(adapter->cmd_dbf, level, "");
194 }
195 spin_unlock_irqrestore(&adapter->dbf_lock, flags);
196}
197
198void
199zfcp_in_els_dbf_event(struct zfcp_adapter *adapter, const char *text,
200 struct fsf_status_read_buffer *status_buffer, int length)
201{
202 int level = 1;
203 int i;
204
205 debug_text_event(adapter->in_els_dbf, level, text);
206 debug_event(adapter->in_els_dbf, level, &status_buffer->d_id, 8);
207 for (i = 0; i < length; i += ZFCP_IN_ELS_DBF_LENGTH)
208 debug_event(adapter->in_els_dbf,
209 level,
210 (char *) status_buffer->payload + i,
211 min(ZFCP_IN_ELS_DBF_LENGTH, length - i));
212}
213
214/** 125/**
215 * zfcp_device_setup - setup function 126 * zfcp_device_setup - setup function
216 * @str: pointer to parameter string 127 * @str: pointer to parameter string
@@ -1017,81 +928,6 @@ zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
1017 mempool_destroy(adapter->pool.data_gid_pn); 928 mempool_destroy(adapter->pool.data_gid_pn);
1018} 929}
1019 930
1020/**
1021 * zfcp_adapter_debug_register - registers debug feature for an adapter
1022 * @adapter: pointer to adapter for which debug features should be registered
1023 * return: -ENOMEM on error, 0 otherwise
1024 */
1025int
1026zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
1027{
1028 char dbf_name[20];
1029
1030 /* debug feature area which records SCSI command failures (hostbyte) */
1031 spin_lock_init(&adapter->dbf_lock);
1032
1033 sprintf(dbf_name, ZFCP_CMD_DBF_NAME "%s",
1034 zfcp_get_busid_by_adapter(adapter));
1035 adapter->cmd_dbf = debug_register(dbf_name, ZFCP_CMD_DBF_INDEX,
1036 ZFCP_CMD_DBF_AREAS,
1037 ZFCP_CMD_DBF_LENGTH);
1038 debug_register_view(adapter->cmd_dbf, &debug_hex_ascii_view);
1039 debug_set_level(adapter->cmd_dbf, ZFCP_CMD_DBF_LEVEL);
1040
1041 /* debug feature area which records SCSI command aborts */
1042 sprintf(dbf_name, ZFCP_ABORT_DBF_NAME "%s",
1043 zfcp_get_busid_by_adapter(adapter));
1044 adapter->abort_dbf = debug_register(dbf_name, ZFCP_ABORT_DBF_INDEX,
1045 ZFCP_ABORT_DBF_AREAS,
1046 ZFCP_ABORT_DBF_LENGTH);
1047 debug_register_view(adapter->abort_dbf, &debug_hex_ascii_view);
1048 debug_set_level(adapter->abort_dbf, ZFCP_ABORT_DBF_LEVEL);
1049
1050 /* debug feature area which records incoming ELS commands */
1051 sprintf(dbf_name, ZFCP_IN_ELS_DBF_NAME "%s",
1052 zfcp_get_busid_by_adapter(adapter));
1053 adapter->in_els_dbf = debug_register(dbf_name, ZFCP_IN_ELS_DBF_INDEX,
1054 ZFCP_IN_ELS_DBF_AREAS,
1055 ZFCP_IN_ELS_DBF_LENGTH);
1056 debug_register_view(adapter->in_els_dbf, &debug_hex_ascii_view);
1057 debug_set_level(adapter->in_els_dbf, ZFCP_IN_ELS_DBF_LEVEL);
1058
1059 /* debug feature area which records erp events */
1060 sprintf(dbf_name, ZFCP_ERP_DBF_NAME "%s",
1061 zfcp_get_busid_by_adapter(adapter));
1062 adapter->erp_dbf = debug_register(dbf_name, ZFCP_ERP_DBF_INDEX,
1063 ZFCP_ERP_DBF_AREAS,
1064 ZFCP_ERP_DBF_LENGTH);
1065 debug_register_view(adapter->erp_dbf, &debug_hex_ascii_view);
1066 debug_set_level(adapter->erp_dbf, ZFCP_ERP_DBF_LEVEL);
1067
1068 if (!(adapter->cmd_dbf && adapter->abort_dbf &&
1069 adapter->in_els_dbf && adapter->erp_dbf)) {
1070 zfcp_adapter_debug_unregister(adapter);
1071 return -ENOMEM;
1072 }
1073
1074 return 0;
1075
1076}
1077
1078/**
1079 * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
1080 * @adapter: pointer to adapter for which debug features should be unregistered
1081 */
1082void
1083zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter)
1084{
1085 debug_unregister(adapter->abort_dbf);
1086 debug_unregister(adapter->cmd_dbf);
1087 debug_unregister(adapter->erp_dbf);
1088 debug_unregister(adapter->in_els_dbf);
1089 adapter->abort_dbf = NULL;
1090 adapter->cmd_dbf = NULL;
1091 adapter->erp_dbf = NULL;
1092 adapter->in_els_dbf = NULL;
1093}
1094
1095void 931void
1096zfcp_dummy_release(struct device *dev) 932zfcp_dummy_release(struct device *dev)
1097{ 933{
@@ -1462,10 +1298,6 @@ zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter,
1462 /* see FC-FS */ 1298 /* see FC-FS */
1463 no_entries = (fcp_rscn_head->payload_len / 4); 1299 no_entries = (fcp_rscn_head->payload_len / 4);
1464 1300
1465 zfcp_in_els_dbf_event(adapter, "##rscn", status_buffer,
1466 fcp_rscn_head->payload_len);
1467
1468 debug_text_event(adapter->erp_dbf, 1, "unsol_els_rscn:");
1469 for (i = 1; i < no_entries; i++) { 1301 for (i = 1; i < no_entries; i++) {
1470 /* skip head and start with 1st element */ 1302 /* skip head and start with 1st element */
1471 fcp_rscn_element++; 1303 fcp_rscn_element++;
@@ -1497,8 +1329,6 @@ zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter,
1497 (ZFCP_STATUS_PORT_DID_DID, &port->status)) { 1329 (ZFCP_STATUS_PORT_DID_DID, &port->status)) {
1498 ZFCP_LOG_INFO("incoming RSCN, trying to open " 1330 ZFCP_LOG_INFO("incoming RSCN, trying to open "
1499 "port 0x%016Lx\n", port->wwpn); 1331 "port 0x%016Lx\n", port->wwpn);
1500 debug_text_event(adapter->erp_dbf, 1,
1501 "unsol_els_rscnu:");
1502 zfcp_erp_port_reopen(port, 1332 zfcp_erp_port_reopen(port,
1503 ZFCP_STATUS_COMMON_ERP_FAILED); 1333 ZFCP_STATUS_COMMON_ERP_FAILED);
1504 continue; 1334 continue;
@@ -1524,8 +1354,6 @@ zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter,
1524 */ 1354 */
1525 ZFCP_LOG_INFO("incoming RSCN, trying to open " 1355 ZFCP_LOG_INFO("incoming RSCN, trying to open "
1526 "port 0x%016Lx\n", port->wwpn); 1356 "port 0x%016Lx\n", port->wwpn);
1527 debug_text_event(adapter->erp_dbf, 1,
1528 "unsol_els_rscnk:");
1529 zfcp_test_link(port); 1357 zfcp_test_link(port);
1530 } 1358 }
1531 } 1359 }
@@ -1541,8 +1369,6 @@ zfcp_fsf_incoming_els_plogi(struct zfcp_adapter *adapter,
1541 struct zfcp_port *port; 1369 struct zfcp_port *port;
1542 unsigned long flags; 1370 unsigned long flags;
1543 1371
1544 zfcp_in_els_dbf_event(adapter, "##plogi", status_buffer, 28);
1545
1546 read_lock_irqsave(&zfcp_data.config_lock, flags); 1372 read_lock_irqsave(&zfcp_data.config_lock, flags);
1547 list_for_each_entry(port, &adapter->port_list_head, list) { 1373 list_for_each_entry(port, &adapter->port_list_head, list) {
1548 if (port->wwpn == (*(wwn_t *) & els_logi->nport_wwn)) 1374 if (port->wwpn == (*(wwn_t *) & els_logi->nport_wwn))
@@ -1556,8 +1382,6 @@ zfcp_fsf_incoming_els_plogi(struct zfcp_adapter *adapter,
1556 status_buffer->d_id, 1382 status_buffer->d_id,
1557 zfcp_get_busid_by_adapter(adapter)); 1383 zfcp_get_busid_by_adapter(adapter));
1558 } else { 1384 } else {
1559 debug_text_event(adapter->erp_dbf, 1, "unsol_els_plogi:");
1560 debug_event(adapter->erp_dbf, 1, &els_logi->nport_wwn, 8);
1561 zfcp_erp_port_forced_reopen(port, 0); 1385 zfcp_erp_port_forced_reopen(port, 0);
1562 } 1386 }
1563} 1387}
@@ -1570,8 +1394,6 @@ zfcp_fsf_incoming_els_logo(struct zfcp_adapter *adapter,
1570 struct zfcp_port *port; 1394 struct zfcp_port *port;
1571 unsigned long flags; 1395 unsigned long flags;
1572 1396
1573 zfcp_in_els_dbf_event(adapter, "##logo", status_buffer, 16);
1574
1575 read_lock_irqsave(&zfcp_data.config_lock, flags); 1397 read_lock_irqsave(&zfcp_data.config_lock, flags);
1576 list_for_each_entry(port, &adapter->port_list_head, list) { 1398 list_for_each_entry(port, &adapter->port_list_head, list) {
1577 if (port->wwpn == els_logo->nport_wwpn) 1399 if (port->wwpn == els_logo->nport_wwpn)
@@ -1585,8 +1407,6 @@ zfcp_fsf_incoming_els_logo(struct zfcp_adapter *adapter,
1585 status_buffer->d_id, 1407 status_buffer->d_id,
1586 zfcp_get_busid_by_adapter(adapter)); 1408 zfcp_get_busid_by_adapter(adapter));
1587 } else { 1409 } else {
1588 debug_text_event(adapter->erp_dbf, 1, "unsol_els_logo:");
1589 debug_event(adapter->erp_dbf, 1, &els_logo->nport_wwpn, 8);
1590 zfcp_erp_port_forced_reopen(port, 0); 1410 zfcp_erp_port_forced_reopen(port, 0);
1591 } 1411 }
1592} 1412}
@@ -1595,7 +1415,6 @@ static void
1595zfcp_fsf_incoming_els_unknown(struct zfcp_adapter *adapter, 1415zfcp_fsf_incoming_els_unknown(struct zfcp_adapter *adapter,
1596 struct fsf_status_read_buffer *status_buffer) 1416 struct fsf_status_read_buffer *status_buffer)
1597{ 1417{
1598 zfcp_in_els_dbf_event(adapter, "##undef", status_buffer, 24);
1599 ZFCP_LOG_NORMAL("warning: unknown incoming ELS 0x%08x " 1418 ZFCP_LOG_NORMAL("warning: unknown incoming ELS 0x%08x "
1600 "for adapter %s\n", *(u32 *) (status_buffer->payload), 1419 "for adapter %s\n", *(u32 *) (status_buffer->payload),
1601 zfcp_get_busid_by_adapter(adapter)); 1420 zfcp_get_busid_by_adapter(adapter));
@@ -1609,10 +1428,11 @@ zfcp_fsf_incoming_els(struct zfcp_fsf_req *fsf_req)
1609 u32 els_type; 1428 u32 els_type;
1610 struct zfcp_adapter *adapter; 1429 struct zfcp_adapter *adapter;
1611 1430
1612 status_buffer = fsf_req->data.status_read.buffer; 1431 status_buffer = (struct fsf_status_read_buffer *) fsf_req->data;
1613 els_type = *(u32 *) (status_buffer->payload); 1432 els_type = *(u32 *) (status_buffer->payload);
1614 adapter = fsf_req->adapter; 1433 adapter = fsf_req->adapter;
1615 1434
1435 zfcp_san_dbf_event_incoming_els(fsf_req);
1616 if (els_type == LS_PLOGI) 1436 if (els_type == LS_PLOGI)
1617 zfcp_fsf_incoming_els_plogi(adapter, status_buffer); 1437 zfcp_fsf_incoming_els_plogi(adapter, status_buffer);
1618 else if (els_type == LS_LOGO) 1438 else if (els_type == LS_LOGO)
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index b30abab77da3..0fc46381fc22 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -202,19 +202,9 @@ static int
202zfcp_ccw_set_offline(struct ccw_device *ccw_device) 202zfcp_ccw_set_offline(struct ccw_device *ccw_device)
203{ 203{
204 struct zfcp_adapter *adapter; 204 struct zfcp_adapter *adapter;
205 struct zfcp_port *port;
206 struct fc_rport *rport;
207 205
208 down(&zfcp_data.config_sema); 206 down(&zfcp_data.config_sema);
209 adapter = dev_get_drvdata(&ccw_device->dev); 207 adapter = dev_get_drvdata(&ccw_device->dev);
210 /* might be racy, but we cannot take config_lock due to the fact that
211 fc_remote_port_delete might sleep */
212 list_for_each_entry(port, &adapter->port_list_head, list)
213 if (port->rport) {
214 rport = port->rport;
215 port->rport = NULL;
216 fc_remote_port_delete(rport);
217 }
218 zfcp_erp_adapter_shutdown(adapter, 0); 208 zfcp_erp_adapter_shutdown(adapter, 0);
219 zfcp_erp_wait(adapter); 209 zfcp_erp_wait(adapter);
220 zfcp_adapter_scsi_unregister(adapter); 210 zfcp_adapter_scsi_unregister(adapter);
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
new file mode 100644
index 000000000000..826fb3b00605
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -0,0 +1,995 @@
1/*
2 *
3 * linux/drivers/s390/scsi/zfcp_dbf.c
4 *
5 * FCP adapter driver for IBM eServer zSeries
6 *
7 * Debugging facilities
8 *
9 * (C) Copyright IBM Corp. 2005
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#define ZFCP_DBF_REVISION "$Revision$"
27
28#include <asm/debug.h>
29#include <linux/ctype.h>
30#include "zfcp_ext.h"
31
32static u32 dbfsize = 4;
33
34module_param(dbfsize, uint, 0400);
35MODULE_PARM_DESC(dbfsize,
36 "number of pages for each debug feature area (default 4)");
37
38#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
39
40static inline int
41zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck)
42{
43 unsigned long long sec;
44 struct timespec xtime;
45 int len = 0;
46
47 stck -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096);
48 sec = stck >> 12;
49 do_div(sec, 1000000);
50 xtime.tv_sec = sec;
51 stck -= (sec * 1000000) << 12;
52 xtime.tv_nsec = ((stck * 1000) >> 12);
53 len += sprintf(out_buf + len, "%-24s%011lu:%06lu\n",
54 label, xtime.tv_sec, xtime.tv_nsec);
55
56 return len;
57}
58
59static int zfcp_dbf_tag(char *out_buf, const char *label, const char *tag)
60{
61 int len = 0, i;
62
63 len += sprintf(out_buf + len, "%-24s", label);
64 for (i = 0; i < ZFCP_DBF_TAG_SIZE; i++)
65 len += sprintf(out_buf + len, "%c", tag[i]);
66 len += sprintf(out_buf + len, "\n");
67
68 return len;
69}
70
71static int
72zfcp_dbf_view(char *out_buf, const char *label, const char *format, ...)
73{
74 va_list arg;
75 int len = 0;
76
77 len += sprintf(out_buf + len, "%-24s", label);
78 va_start(arg, format);
79 len += vsprintf(out_buf + len, format, arg);
80 va_end(arg);
81 len += sprintf(out_buf + len, "\n");
82
83 return len;
84}
85
86static int
87zfcp_dbf_view_dump(char *out_buf, const char *label,
88 char *buffer, int buflen, int offset, int total_size)
89{
90 int len = 0;
91
92 if (offset == 0)
93 len += sprintf(out_buf + len, "%-24s ", label);
94
95 while (buflen--) {
96 if (offset > 0) {
97 if ((offset % 32) == 0)
98 len += sprintf(out_buf + len, "\n%-24c ", ' ');
99 else if ((offset % 4) == 0)
100 len += sprintf(out_buf + len, " ");
101 }
102 len += sprintf(out_buf + len, "%02x", *buffer++);
103 if (++offset == total_size) {
104 len += sprintf(out_buf + len, "\n");
105 break;
106 }
107 }
108
109 if (total_size == 0)
110 len += sprintf(out_buf + len, "\n");
111
112 return len;
113}
114
115static inline int
116zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area,
117 debug_entry_t * entry, char *out_buf)
118{
119 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)DEBUG_DATA(entry);
120 int len = 0;
121
122 if (strncmp(dump->tag, "dump", ZFCP_DBF_TAG_SIZE) != 0) {
123 len += zfcp_dbf_stck(out_buf + len, "timestamp",
124 entry->id.stck);
125 len += zfcp_dbf_view(out_buf + len, "cpu", "%02i",
126 entry->id.fields.cpuid);
127 } else {
128 len += zfcp_dbf_view_dump(out_buf + len, NULL,
129 dump->data,
130 dump->size,
131 dump->offset, dump->total_size);
132 if ((dump->offset + dump->size) == dump->total_size)
133 len += sprintf(out_buf + len, "\n");
134 }
135
136 return len;
137}
138
139inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
140{
141 struct zfcp_adapter *adapter = fsf_req->adapter;
142 struct fsf_qtcb *qtcb = fsf_req->qtcb;
143 union fsf_prot_status_qual *prot_status_qual =
144 &qtcb->prefix.prot_status_qual;
145 union fsf_status_qual *fsf_status_qual = &qtcb->header.fsf_status_qual;
146 struct scsi_cmnd *scsi_cmnd;
147 struct zfcp_port *port;
148 struct zfcp_unit *unit;
149 struct zfcp_send_els *send_els;
150 struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf;
151 struct zfcp_hba_dbf_record_response *response = &rec->type.response;
152 int level;
153 unsigned long flags;
154
155 spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
156 memset(rec, 0, sizeof(struct zfcp_hba_dbf_record));
157 strncpy(rec->tag, "resp", ZFCP_DBF_TAG_SIZE);
158
159 if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
160 (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
161 strncpy(rec->tag2, "perr", ZFCP_DBF_TAG_SIZE);
162 level = 1;
163 } else if (qtcb->header.fsf_status != FSF_GOOD) {
164 strncpy(rec->tag2, "ferr", ZFCP_DBF_TAG_SIZE);
165 level = 1;
166 } else if ((fsf_req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
167 (fsf_req->fsf_command == FSF_QTCB_OPEN_LUN)) {
168 strncpy(rec->tag2, "open", ZFCP_DBF_TAG_SIZE);
169 level = 4;
170 } else if ((prot_status_qual->doubleword[0] != 0) ||
171 (prot_status_qual->doubleword[1] != 0) ||
172 (fsf_status_qual->doubleword[0] != 0) ||
173 (fsf_status_qual->doubleword[1] != 0)) {
174 strncpy(rec->tag2, "qual", ZFCP_DBF_TAG_SIZE);
175 level = 3;
176 } else {
177 strncpy(rec->tag2, "norm", ZFCP_DBF_TAG_SIZE);
178 level = 6;
179 }
180
181 response->fsf_command = fsf_req->fsf_command;
182 response->fsf_reqid = (unsigned long)fsf_req;
183 response->fsf_seqno = fsf_req->seq_no;
184 response->fsf_issued = fsf_req->issued;
185 response->fsf_prot_status = qtcb->prefix.prot_status;
186 response->fsf_status = qtcb->header.fsf_status;
187 memcpy(response->fsf_prot_status_qual,
188 prot_status_qual, FSF_PROT_STATUS_QUAL_SIZE);
189 memcpy(response->fsf_status_qual,
190 fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE);
191 response->fsf_req_status = fsf_req->status;
192 response->sbal_first = fsf_req->sbal_first;
193 response->sbal_curr = fsf_req->sbal_curr;
194 response->sbal_last = fsf_req->sbal_last;
195 response->pool = fsf_req->pool != NULL;
196 response->erp_action = (unsigned long)fsf_req->erp_action;
197
198 switch (fsf_req->fsf_command) {
199 case FSF_QTCB_FCP_CMND:
200 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
201 break;
202 scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
203 if (scsi_cmnd != NULL) {
204 response->data.send_fcp.scsi_cmnd
205 = (unsigned long)scsi_cmnd;
206 response->data.send_fcp.scsi_serial
207 = scsi_cmnd->serial_number;
208 }
209 break;
210
211 case FSF_QTCB_OPEN_PORT_WITH_DID:
212 case FSF_QTCB_CLOSE_PORT:
213 case FSF_QTCB_CLOSE_PHYSICAL_PORT:
214 port = (struct zfcp_port *)fsf_req->data;
215 response->data.port.wwpn = port->wwpn;
216 response->data.port.d_id = port->d_id;
217 response->data.port.port_handle = qtcb->header.port_handle;
218 break;
219
220 case FSF_QTCB_OPEN_LUN:
221 case FSF_QTCB_CLOSE_LUN:
222 unit = (struct zfcp_unit *)fsf_req->data;
223 port = unit->port;
224 response->data.unit.wwpn = port->wwpn;
225 response->data.unit.fcp_lun = unit->fcp_lun;
226 response->data.unit.port_handle = qtcb->header.port_handle;
227 response->data.unit.lun_handle = qtcb->header.lun_handle;
228 break;
229
230 case FSF_QTCB_SEND_ELS:
231 send_els = (struct zfcp_send_els *)fsf_req->data;
232 response->data.send_els.d_id = qtcb->bottom.support.d_id;
233 response->data.send_els.ls_code = send_els->ls_code >> 24;
234 break;
235
236 case FSF_QTCB_ABORT_FCP_CMND:
237 case FSF_QTCB_SEND_GENERIC:
238 case FSF_QTCB_EXCHANGE_CONFIG_DATA:
239 case FSF_QTCB_EXCHANGE_PORT_DATA:
240 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
241 case FSF_QTCB_UPLOAD_CONTROL_FILE:
242 break;
243 }
244
245 debug_event(adapter->hba_dbf, level,
246 rec, sizeof(struct zfcp_hba_dbf_record));
247 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
248}
249
250inline void
251zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
252 struct fsf_status_read_buffer *status_buffer)
253{
254 struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf;
255 unsigned long flags;
256
257 spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
258 memset(rec, 0, sizeof(struct zfcp_hba_dbf_record));
259 strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE);
260 strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE);
261
262 rec->type.status.failed = adapter->status_read_failed;
263 if (status_buffer != NULL) {
264 rec->type.status.status_type = status_buffer->status_type;
265 rec->type.status.status_subtype = status_buffer->status_subtype;
266 memcpy(&rec->type.status.queue_designator,
267 &status_buffer->queue_designator,
268 sizeof(struct fsf_queue_designator));
269
270 switch (status_buffer->status_type) {
271 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
272 rec->type.status.payload_size =
273 ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL;
274 break;
275
276 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
277 rec->type.status.payload_size =
278 ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD;
279 break;
280
281 case FSF_STATUS_READ_LINK_DOWN:
282 switch (status_buffer->status_subtype) {
283 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
284 case FSF_STATUS_READ_SUB_FDISC_FAILED:
285 rec->type.status.payload_size =
286 sizeof(struct fsf_link_down_info);
287 }
288 break;
289
290 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
291 rec->type.status.payload_size =
292 ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT;
293 break;
294 }
295 memcpy(&rec->type.status.payload,
296 &status_buffer->payload, rec->type.status.payload_size);
297 }
298
299 debug_event(adapter->hba_dbf, 2,
300 rec, sizeof(struct zfcp_hba_dbf_record));
301 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
302}
303
304inline void
305zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
306 unsigned int qdio_error, unsigned int siga_error,
307 int sbal_index, int sbal_count)
308{
309 struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf;
310 unsigned long flags;
311
312 spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
313 memset(rec, 0, sizeof(struct zfcp_hba_dbf_record));
314 strncpy(rec->tag, "qdio", ZFCP_DBF_TAG_SIZE);
315 rec->type.qdio.status = status;
316 rec->type.qdio.qdio_error = qdio_error;
317 rec->type.qdio.siga_error = siga_error;
318 rec->type.qdio.sbal_index = sbal_index;
319 rec->type.qdio.sbal_count = sbal_count;
320 debug_event(adapter->hba_dbf, 0,
321 rec, sizeof(struct zfcp_hba_dbf_record));
322 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
323}
324
325static inline int
326zfcp_hba_dbf_view_response(char *out_buf,
327 struct zfcp_hba_dbf_record_response *rec)
328{
329 int len = 0;
330
331 len += zfcp_dbf_view(out_buf + len, "fsf_command", "0x%08x",
332 rec->fsf_command);
333 len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx",
334 rec->fsf_reqid);
335 len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x",
336 rec->fsf_seqno);
337 len += zfcp_dbf_stck(out_buf + len, "fsf_issued", rec->fsf_issued);
338 len += zfcp_dbf_view(out_buf + len, "fsf_prot_status", "0x%08x",
339 rec->fsf_prot_status);
340 len += zfcp_dbf_view(out_buf + len, "fsf_status", "0x%08x",
341 rec->fsf_status);
342 len += zfcp_dbf_view_dump(out_buf + len, "fsf_prot_status_qual",
343 rec->fsf_prot_status_qual,
344 FSF_PROT_STATUS_QUAL_SIZE,
345 0, FSF_PROT_STATUS_QUAL_SIZE);
346 len += zfcp_dbf_view_dump(out_buf + len, "fsf_status_qual",
347 rec->fsf_status_qual,
348 FSF_STATUS_QUALIFIER_SIZE,
349 0, FSF_STATUS_QUALIFIER_SIZE);
350 len += zfcp_dbf_view(out_buf + len, "fsf_req_status", "0x%08x",
351 rec->fsf_req_status);
352 len += zfcp_dbf_view(out_buf + len, "sbal_first", "0x%02x",
353 rec->sbal_first);
354 len += zfcp_dbf_view(out_buf + len, "sbal_curr", "0x%02x",
355 rec->sbal_curr);
356 len += zfcp_dbf_view(out_buf + len, "sbal_last", "0x%02x",
357 rec->sbal_last);
358 len += zfcp_dbf_view(out_buf + len, "pool", "0x%02x", rec->pool);
359
360 switch (rec->fsf_command) {
361 case FSF_QTCB_FCP_CMND:
362 if (rec->fsf_req_status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
363 break;
364 len += zfcp_dbf_view(out_buf + len, "scsi_cmnd", "0x%0Lx",
365 rec->data.send_fcp.scsi_cmnd);
366 len += zfcp_dbf_view(out_buf + len, "scsi_serial", "0x%016Lx",
367 rec->data.send_fcp.scsi_serial);
368 break;
369
370 case FSF_QTCB_OPEN_PORT_WITH_DID:
371 case FSF_QTCB_CLOSE_PORT:
372 case FSF_QTCB_CLOSE_PHYSICAL_PORT:
373 len += zfcp_dbf_view(out_buf + len, "wwpn", "0x%016Lx",
374 rec->data.port.wwpn);
375 len += zfcp_dbf_view(out_buf + len, "d_id", "0x%06x",
376 rec->data.port.d_id);
377 len += zfcp_dbf_view(out_buf + len, "port_handle", "0x%08x",
378 rec->data.port.port_handle);
379 break;
380
381 case FSF_QTCB_OPEN_LUN:
382 case FSF_QTCB_CLOSE_LUN:
383 len += zfcp_dbf_view(out_buf + len, "wwpn", "0x%016Lx",
384 rec->data.unit.wwpn);
385 len += zfcp_dbf_view(out_buf + len, "fcp_lun", "0x%016Lx",
386 rec->data.unit.fcp_lun);
387 len += zfcp_dbf_view(out_buf + len, "port_handle", "0x%08x",
388 rec->data.unit.port_handle);
389 len += zfcp_dbf_view(out_buf + len, "lun_handle", "0x%08x",
390 rec->data.unit.lun_handle);
391 break;
392
393 case FSF_QTCB_SEND_ELS:
394 len += zfcp_dbf_view(out_buf + len, "d_id", "0x%06x",
395 rec->data.send_els.d_id);
396 len += zfcp_dbf_view(out_buf + len, "ls_code", "0x%02x",
397 rec->data.send_els.ls_code);
398 break;
399
400 case FSF_QTCB_ABORT_FCP_CMND:
401 case FSF_QTCB_SEND_GENERIC:
402 case FSF_QTCB_EXCHANGE_CONFIG_DATA:
403 case FSF_QTCB_EXCHANGE_PORT_DATA:
404 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
405 case FSF_QTCB_UPLOAD_CONTROL_FILE:
406 break;
407 }
408
409 return len;
410}
411
412static inline int
413zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec)
414{
415 int len = 0;
416
417 len += zfcp_dbf_view(out_buf + len, "failed", "0x%02x", rec->failed);
418 len += zfcp_dbf_view(out_buf + len, "status_type", "0x%08x",
419 rec->status_type);
420 len += zfcp_dbf_view(out_buf + len, "status_subtype", "0x%08x",
421 rec->status_subtype);
422 len += zfcp_dbf_view_dump(out_buf + len, "queue_designator",
423 (char *)&rec->queue_designator,
424 sizeof(struct fsf_queue_designator),
425 0, sizeof(struct fsf_queue_designator));
426 len += zfcp_dbf_view_dump(out_buf + len, "payload",
427 (char *)&rec->payload,
428 rec->payload_size, 0, rec->payload_size);
429
430 return len;
431}
432
433static inline int
434zfcp_hba_dbf_view_qdio(char *out_buf, struct zfcp_hba_dbf_record_qdio *rec)
435{
436 int len = 0;
437
438 len += zfcp_dbf_view(out_buf + len, "status", "0x%08x", rec->status);
439 len += zfcp_dbf_view(out_buf + len, "qdio_error", "0x%08x",
440 rec->qdio_error);
441 len += zfcp_dbf_view(out_buf + len, "siga_error", "0x%08x",
442 rec->siga_error);
443 len += zfcp_dbf_view(out_buf + len, "sbal_index", "0x%02x",
444 rec->sbal_index);
445 len += zfcp_dbf_view(out_buf + len, "sbal_count", "0x%02x",
446 rec->sbal_count);
447
448 return len;
449}
450
451static int
452zfcp_hba_dbf_view_format(debug_info_t * id, struct debug_view *view,
453 char *out_buf, const char *in_buf)
454{
455 struct zfcp_hba_dbf_record *rec = (struct zfcp_hba_dbf_record *)in_buf;
456 int len = 0;
457
458 if (strncmp(rec->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
459 return 0;
460
461 len += zfcp_dbf_tag(out_buf + len, "tag", rec->tag);
462 if (isalpha(rec->tag2[0]))
463 len += zfcp_dbf_tag(out_buf + len, "tag2", rec->tag2);
464 if (strncmp(rec->tag, "resp", ZFCP_DBF_TAG_SIZE) == 0)
465 len += zfcp_hba_dbf_view_response(out_buf + len,
466 &rec->type.response);
467 else if (strncmp(rec->tag, "stat", ZFCP_DBF_TAG_SIZE) == 0)
468 len += zfcp_hba_dbf_view_status(out_buf + len,
469 &rec->type.status);
470 else if (strncmp(rec->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0)
471 len += zfcp_hba_dbf_view_qdio(out_buf + len, &rec->type.qdio);
472
473 len += sprintf(out_buf + len, "\n");
474
475 return len;
476}
477
478struct debug_view zfcp_hba_dbf_view = {
479 "structured",
480 NULL,
481 &zfcp_dbf_view_header,
482 &zfcp_hba_dbf_view_format,
483 NULL,
484 NULL
485};
486
487inline void
488_zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req,
489 u32 s_id, u32 d_id, void *buffer, int buflen)
490{
491 struct zfcp_send_ct *send_ct = (struct zfcp_send_ct *)fsf_req->data;
492 struct zfcp_port *port = send_ct->port;
493 struct zfcp_adapter *adapter = port->adapter;
494 struct ct_hdr *header = (struct ct_hdr *)buffer;
495 struct zfcp_san_dbf_record *rec = &adapter->san_dbf_buf;
496 struct zfcp_san_dbf_record_ct *ct = &rec->type.ct;
497 unsigned long flags;
498
499 spin_lock_irqsave(&adapter->san_dbf_lock, flags);
500 memset(rec, 0, sizeof(struct zfcp_san_dbf_record));
501 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
502 rec->fsf_reqid = (unsigned long)fsf_req;
503 rec->fsf_seqno = fsf_req->seq_no;
504 rec->s_id = s_id;
505 rec->d_id = d_id;
506 if (strncmp(tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) {
507 ct->type.request.cmd_req_code = header->cmd_rsp_code;
508 ct->type.request.revision = header->revision;
509 ct->type.request.gs_type = header->gs_type;
510 ct->type.request.gs_subtype = header->gs_subtype;
511 ct->type.request.options = header->options;
512 ct->type.request.max_res_size = header->max_res_size;
513 } else if (strncmp(tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) {
514 ct->type.response.cmd_rsp_code = header->cmd_rsp_code;
515 ct->type.response.revision = header->revision;
516 ct->type.response.reason_code = header->reason_code;
517 ct->type.response.reason_code_expl = header->reason_code_expl;
518 ct->type.response.vendor_unique = header->vendor_unique;
519 }
520 ct->payload_size =
521 min(buflen - (int)sizeof(struct ct_hdr), ZFCP_DBF_CT_PAYLOAD);
522 memcpy(ct->payload, buffer + sizeof(struct ct_hdr), ct->payload_size);
523 debug_event(adapter->san_dbf, 3,
524 rec, sizeof(struct zfcp_san_dbf_record));
525 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
526}
527
528inline void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
529{
530 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
531 struct zfcp_port *port = ct->port;
532 struct zfcp_adapter *adapter = port->adapter;
533
534 _zfcp_san_dbf_event_common_ct("octc", fsf_req,
535 fc_host_port_id(adapter->scsi_host),
536 port->d_id, zfcp_sg_to_address(ct->req),
537 ct->req->length);
538}
539
540inline void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
541{
542 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
543 struct zfcp_port *port = ct->port;
544 struct zfcp_adapter *adapter = port->adapter;
545
546 _zfcp_san_dbf_event_common_ct("rctc", fsf_req, port->d_id,
547 fc_host_port_id(adapter->scsi_host),
548 zfcp_sg_to_address(ct->resp),
549 ct->resp->length);
550}
551
552static inline void
553_zfcp_san_dbf_event_common_els(const char *tag, int level,
554 struct zfcp_fsf_req *fsf_req, u32 s_id,
555 u32 d_id, u8 ls_code, void *buffer, int buflen)
556{
557 struct zfcp_adapter *adapter = fsf_req->adapter;
558 struct zfcp_san_dbf_record *rec = &adapter->san_dbf_buf;
559 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
560 unsigned long flags;
561 int offset = 0;
562
563 spin_lock_irqsave(&adapter->san_dbf_lock, flags);
564 do {
565 memset(rec, 0, sizeof(struct zfcp_san_dbf_record));
566 if (offset == 0) {
567 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
568 rec->fsf_reqid = (unsigned long)fsf_req;
569 rec->fsf_seqno = fsf_req->seq_no;
570 rec->s_id = s_id;
571 rec->d_id = d_id;
572 rec->type.els.ls_code = ls_code;
573 buflen = min(buflen, ZFCP_DBF_ELS_MAX_PAYLOAD);
574 rec->type.els.payload_size = buflen;
575 memcpy(rec->type.els.payload,
576 buffer, min(buflen, ZFCP_DBF_ELS_PAYLOAD));
577 offset += min(buflen, ZFCP_DBF_ELS_PAYLOAD);
578 } else {
579 strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
580 dump->total_size = buflen;
581 dump->offset = offset;
582 dump->size = min(buflen - offset,
583 (int)sizeof(struct zfcp_san_dbf_record)
584 - (int)sizeof(struct zfcp_dbf_dump));
585 memcpy(dump->data, buffer + offset, dump->size);
586 offset += dump->size;
587 }
588 debug_event(adapter->san_dbf, level,
589 rec, sizeof(struct zfcp_san_dbf_record));
590 } while (offset < buflen);
591 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
592}
593
594inline void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req)
595{
596 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
597
598 _zfcp_san_dbf_event_common_els("oels", 2, fsf_req,
599 fc_host_port_id(els->adapter->scsi_host),
600 els->d_id,
601 *(u8 *) zfcp_sg_to_address(els->req),
602 zfcp_sg_to_address(els->req),
603 els->req->length);
604}
605
606inline void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req)
607{
608 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
609
610 _zfcp_san_dbf_event_common_els("rels", 2, fsf_req, els->d_id,
611 fc_host_port_id(els->adapter->scsi_host),
612 *(u8 *) zfcp_sg_to_address(els->req),
613 zfcp_sg_to_address(els->resp),
614 els->resp->length);
615}
616
617inline void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req)
618{
619 struct zfcp_adapter *adapter = fsf_req->adapter;
620 struct fsf_status_read_buffer *status_buffer =
621 (struct fsf_status_read_buffer *)fsf_req->data;
622 int length = (int)status_buffer->length -
623 (int)((void *)&status_buffer->payload - (void *)status_buffer);
624
625 _zfcp_san_dbf_event_common_els("iels", 1, fsf_req, status_buffer->d_id,
626 fc_host_port_id(adapter->scsi_host),
627 *(u8 *) status_buffer->payload,
628 (void *)status_buffer->payload, length);
629}
630
631static int
632zfcp_san_dbf_view_format(debug_info_t * id, struct debug_view *view,
633 char *out_buf, const char *in_buf)
634{
635 struct zfcp_san_dbf_record *rec = (struct zfcp_san_dbf_record *)in_buf;
636 char *buffer = NULL;
637 int buflen = 0, total = 0;
638 int len = 0;
639
640 if (strncmp(rec->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
641 return 0;
642
643 len += zfcp_dbf_tag(out_buf + len, "tag", rec->tag);
644 len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx",
645 rec->fsf_reqid);
646 len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x",
647 rec->fsf_seqno);
648 len += zfcp_dbf_view(out_buf + len, "s_id", "0x%06x", rec->s_id);
649 len += zfcp_dbf_view(out_buf + len, "d_id", "0x%06x", rec->d_id);
650
651 if (strncmp(rec->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) {
652 len += zfcp_dbf_view(out_buf + len, "cmd_req_code", "0x%04x",
653 rec->type.ct.type.request.cmd_req_code);
654 len += zfcp_dbf_view(out_buf + len, "revision", "0x%02x",
655 rec->type.ct.type.request.revision);
656 len += zfcp_dbf_view(out_buf + len, "gs_type", "0x%02x",
657 rec->type.ct.type.request.gs_type);
658 len += zfcp_dbf_view(out_buf + len, "gs_subtype", "0x%02x",
659 rec->type.ct.type.request.gs_subtype);
660 len += zfcp_dbf_view(out_buf + len, "options", "0x%02x",
661 rec->type.ct.type.request.options);
662 len += zfcp_dbf_view(out_buf + len, "max_res_size", "0x%04x",
663 rec->type.ct.type.request.max_res_size);
664 total = rec->type.ct.payload_size;
665 buffer = rec->type.ct.payload;
666 buflen = min(total, ZFCP_DBF_CT_PAYLOAD);
667 } else if (strncmp(rec->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) {
668 len += zfcp_dbf_view(out_buf + len, "cmd_rsp_code", "0x%04x",
669 rec->type.ct.type.response.cmd_rsp_code);
670 len += zfcp_dbf_view(out_buf + len, "revision", "0x%02x",
671 rec->type.ct.type.response.revision);
672 len += zfcp_dbf_view(out_buf + len, "reason_code", "0x%02x",
673 rec->type.ct.type.response.reason_code);
674 len +=
675 zfcp_dbf_view(out_buf + len, "reason_code_expl", "0x%02x",
676 rec->type.ct.type.response.reason_code_expl);
677 len +=
678 zfcp_dbf_view(out_buf + len, "vendor_unique", "0x%02x",
679 rec->type.ct.type.response.vendor_unique);
680 total = rec->type.ct.payload_size;
681 buffer = rec->type.ct.payload;
682 buflen = min(total, ZFCP_DBF_CT_PAYLOAD);
683 } else if (strncmp(rec->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 ||
684 strncmp(rec->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 ||
685 strncmp(rec->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) {
686 len += zfcp_dbf_view(out_buf + len, "ls_code", "0x%02x",
687 rec->type.els.ls_code);
688 total = rec->type.els.payload_size;
689 buffer = rec->type.els.payload;
690 buflen = min(total, ZFCP_DBF_ELS_PAYLOAD);
691 }
692
693 len += zfcp_dbf_view_dump(out_buf + len, "payload",
694 buffer, buflen, 0, total);
695
696 if (buflen == total)
697 len += sprintf(out_buf + len, "\n");
698
699 return len;
700}
701
702struct debug_view zfcp_san_dbf_view = {
703 "structured",
704 NULL,
705 &zfcp_dbf_view_header,
706 &zfcp_san_dbf_view_format,
707 NULL,
708 NULL
709};
710
711static inline void
712_zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
713 struct zfcp_adapter *adapter,
714 struct scsi_cmnd *scsi_cmnd,
715 struct zfcp_fsf_req *new_fsf_req)
716{
717 struct zfcp_fsf_req *fsf_req =
718 (struct zfcp_fsf_req *)scsi_cmnd->host_scribble;
719 struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf;
720 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
721 unsigned long flags;
722 struct fcp_rsp_iu *fcp_rsp;
723 char *fcp_rsp_info = NULL, *fcp_sns_info = NULL;
724 int offset = 0, buflen = 0;
725
726 spin_lock_irqsave(&adapter->scsi_dbf_lock, flags);
727 do {
728 memset(rec, 0, sizeof(struct zfcp_scsi_dbf_record));
729 if (offset == 0) {
730 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
731 strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE);
732 if (scsi_cmnd->device) {
733 rec->scsi_id = scsi_cmnd->device->id;
734 rec->scsi_lun = scsi_cmnd->device->lun;
735 }
736 rec->scsi_result = scsi_cmnd->result;
737 rec->scsi_cmnd = (unsigned long)scsi_cmnd;
738 rec->scsi_serial = scsi_cmnd->serial_number;
739 memcpy(rec->scsi_opcode,
740 &scsi_cmnd->cmnd,
741 min((int)scsi_cmnd->cmd_len,
742 ZFCP_DBF_SCSI_OPCODE));
743 rec->scsi_retries = scsi_cmnd->retries;
744 rec->scsi_allowed = scsi_cmnd->allowed;
745 if (fsf_req != NULL) {
746 fcp_rsp = (struct fcp_rsp_iu *)
747 &(fsf_req->qtcb->bottom.io.fcp_rsp);
748 fcp_rsp_info =
749 zfcp_get_fcp_rsp_info_ptr(fcp_rsp);
750 fcp_sns_info =
751 zfcp_get_fcp_sns_info_ptr(fcp_rsp);
752
753 rec->type.fcp.rsp_validity =
754 fcp_rsp->validity.value;
755 rec->type.fcp.rsp_scsi_status =
756 fcp_rsp->scsi_status;
757 rec->type.fcp.rsp_resid = fcp_rsp->fcp_resid;
758 if (fcp_rsp->validity.bits.fcp_rsp_len_valid)
759 rec->type.fcp.rsp_code =
760 *(fcp_rsp_info + 3);
761 if (fcp_rsp->validity.bits.fcp_sns_len_valid) {
762 buflen = min((int)fcp_rsp->fcp_sns_len,
763 ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO);
764 rec->type.fcp.sns_info_len = buflen;
765 memcpy(rec->type.fcp.sns_info,
766 fcp_sns_info,
767 min(buflen,
768 ZFCP_DBF_SCSI_FCP_SNS_INFO));
769 offset += min(buflen,
770 ZFCP_DBF_SCSI_FCP_SNS_INFO);
771 }
772
773 rec->fsf_reqid = (unsigned long)fsf_req;
774 rec->fsf_seqno = fsf_req->seq_no;
775 rec->fsf_issued = fsf_req->issued;
776 }
777 if (new_fsf_req != NULL) {
778 rec->type.new_fsf_req.fsf_reqid =
779 (unsigned long)
780 new_fsf_req;
781 rec->type.new_fsf_req.fsf_seqno =
782 new_fsf_req->seq_no;
783 rec->type.new_fsf_req.fsf_issued =
784 new_fsf_req->issued;
785 }
786 } else {
787 strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
788 dump->total_size = buflen;
789 dump->offset = offset;
790 dump->size = min(buflen - offset,
791 (int)sizeof(struct
792 zfcp_scsi_dbf_record) -
793 (int)sizeof(struct zfcp_dbf_dump));
794 memcpy(dump->data, fcp_sns_info + offset, dump->size);
795 offset += dump->size;
796 }
797 debug_event(adapter->scsi_dbf, level,
798 rec, sizeof(struct zfcp_scsi_dbf_record));
799 } while (offset < buflen);
800 spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags);
801}
802
803inline void
804zfcp_scsi_dbf_event_result(const char *tag, int level,
805 struct zfcp_adapter *adapter,
806 struct scsi_cmnd *scsi_cmnd)
807{
808 _zfcp_scsi_dbf_event_common("rslt",
809 tag, level, adapter, scsi_cmnd, NULL);
810}
811
812inline void
813zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
814 struct scsi_cmnd *scsi_cmnd,
815 struct zfcp_fsf_req *new_fsf_req)
816{
817 _zfcp_scsi_dbf_event_common("abrt",
818 tag, 1, adapter, scsi_cmnd, new_fsf_req);
819}
820
821inline void
822zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
823 struct scsi_cmnd *scsi_cmnd)
824{
825 struct zfcp_adapter *adapter = unit->port->adapter;
826
827 _zfcp_scsi_dbf_event_common(flag == FCP_TARGET_RESET ? "trst" : "lrst",
828 tag, 1, adapter, scsi_cmnd, NULL);
829}
830
831static int
832zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view,
833 char *out_buf, const char *in_buf)
834{
835 struct zfcp_scsi_dbf_record *rec =
836 (struct zfcp_scsi_dbf_record *)in_buf;
837 int len = 0;
838
839 if (strncmp(rec->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
840 return 0;
841
842 len += zfcp_dbf_tag(out_buf + len, "tag", rec->tag);
843 len += zfcp_dbf_tag(out_buf + len, "tag2", rec->tag2);
844 len += zfcp_dbf_view(out_buf + len, "scsi_id", "0x%08x", rec->scsi_id);
845 len += zfcp_dbf_view(out_buf + len, "scsi_lun", "0x%08x",
846 rec->scsi_lun);
847 len += zfcp_dbf_view(out_buf + len, "scsi_result", "0x%08x",
848 rec->scsi_result);
849 len += zfcp_dbf_view(out_buf + len, "scsi_cmnd", "0x%0Lx",
850 rec->scsi_cmnd);
851 len += zfcp_dbf_view(out_buf + len, "scsi_serial", "0x%016Lx",
852 rec->scsi_serial);
853 len += zfcp_dbf_view_dump(out_buf + len, "scsi_opcode",
854 rec->scsi_opcode,
855 ZFCP_DBF_SCSI_OPCODE,
856 0, ZFCP_DBF_SCSI_OPCODE);
857 len += zfcp_dbf_view(out_buf + len, "scsi_retries", "0x%02x",
858 rec->scsi_retries);
859 len += zfcp_dbf_view(out_buf + len, "scsi_allowed", "0x%02x",
860 rec->scsi_allowed);
861 len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx",
862 rec->fsf_reqid);
863 len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x",
864 rec->fsf_seqno);
865 len += zfcp_dbf_stck(out_buf + len, "fsf_issued", rec->fsf_issued);
866 if (strncmp(rec->tag, "rslt", ZFCP_DBF_TAG_SIZE) == 0) {
867 len +=
868 zfcp_dbf_view(out_buf + len, "fcp_rsp_validity", "0x%02x",
869 rec->type.fcp.rsp_validity);
870 len +=
871 zfcp_dbf_view(out_buf + len, "fcp_rsp_scsi_status",
872 "0x%02x", rec->type.fcp.rsp_scsi_status);
873 len +=
874 zfcp_dbf_view(out_buf + len, "fcp_rsp_resid", "0x%08x",
875 rec->type.fcp.rsp_resid);
876 len +=
877 zfcp_dbf_view(out_buf + len, "fcp_rsp_code", "0x%08x",
878 rec->type.fcp.rsp_code);
879 len +=
880 zfcp_dbf_view(out_buf + len, "fcp_sns_info_len", "0x%08x",
881 rec->type.fcp.sns_info_len);
882 len +=
883 zfcp_dbf_view_dump(out_buf + len, "fcp_sns_info",
884 rec->type.fcp.sns_info,
885 min((int)rec->type.fcp.sns_info_len,
886 ZFCP_DBF_SCSI_FCP_SNS_INFO), 0,
887 rec->type.fcp.sns_info_len);
888 } else if (strncmp(rec->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) {
889 len += zfcp_dbf_view(out_buf + len, "fsf_reqid_abort", "0x%0Lx",
890 rec->type.new_fsf_req.fsf_reqid);
891 len += zfcp_dbf_view(out_buf + len, "fsf_seqno_abort", "0x%08x",
892 rec->type.new_fsf_req.fsf_seqno);
893 len += zfcp_dbf_stck(out_buf + len, "fsf_issued",
894 rec->type.new_fsf_req.fsf_issued);
895 } else if ((strncmp(rec->tag, "trst", ZFCP_DBF_TAG_SIZE) == 0) ||
896 (strncmp(rec->tag, "lrst", ZFCP_DBF_TAG_SIZE) == 0)) {
897 len += zfcp_dbf_view(out_buf + len, "fsf_reqid_reset", "0x%0Lx",
898 rec->type.new_fsf_req.fsf_reqid);
899 len += zfcp_dbf_view(out_buf + len, "fsf_seqno_reset", "0x%08x",
900 rec->type.new_fsf_req.fsf_seqno);
901 len += zfcp_dbf_stck(out_buf + len, "fsf_issued",
902 rec->type.new_fsf_req.fsf_issued);
903 }
904
905 len += sprintf(out_buf + len, "\n");
906
907 return len;
908}
909
910struct debug_view zfcp_scsi_dbf_view = {
911 "structured",
912 NULL,
913 &zfcp_dbf_view_header,
914 &zfcp_scsi_dbf_view_format,
915 NULL,
916 NULL
917};
918
919/**
920 * zfcp_adapter_debug_register - registers debug feature for an adapter
921 * @adapter: pointer to adapter for which debug features should be registered
922 * return: -ENOMEM on error, 0 otherwise
923 */
924int zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
925{
926 char dbf_name[DEBUG_MAX_NAME_LEN];
927
928 /* debug feature area which records recovery activity */
929 spin_lock_init(&adapter->erp_dbf_lock);
930 sprintf(dbf_name, "zfcp_%s_erp", zfcp_get_busid_by_adapter(adapter));
931 adapter->erp_dbf = debug_register(dbf_name, dbfsize, 2,
932 sizeof(struct zfcp_erp_dbf_record));
933 if (!adapter->erp_dbf)
934 goto failed;
935 debug_register_view(adapter->erp_dbf, &debug_hex_ascii_view);
936 debug_set_level(adapter->erp_dbf, 3);
937
938 /* debug feature area which records HBA (FSF and QDIO) conditions */
939 spin_lock_init(&adapter->hba_dbf_lock);
940 sprintf(dbf_name, "zfcp_%s_hba", zfcp_get_busid_by_adapter(adapter));
941 adapter->hba_dbf = debug_register(dbf_name, dbfsize, 1,
942 sizeof(struct zfcp_hba_dbf_record));
943 if (!adapter->hba_dbf)
944 goto failed;
945 debug_register_view(adapter->hba_dbf, &debug_hex_ascii_view);
946 debug_register_view(adapter->hba_dbf, &zfcp_hba_dbf_view);
947 debug_set_level(adapter->hba_dbf, 3);
948
949 /* debug feature area which records SAN command failures and recovery */
950 spin_lock_init(&adapter->san_dbf_lock);
951 sprintf(dbf_name, "zfcp_%s_san", zfcp_get_busid_by_adapter(adapter));
952 adapter->san_dbf = debug_register(dbf_name, dbfsize, 1,
953 sizeof(struct zfcp_san_dbf_record));
954 if (!adapter->san_dbf)
955 goto failed;
956 debug_register_view(adapter->san_dbf, &debug_hex_ascii_view);
957 debug_register_view(adapter->san_dbf, &zfcp_san_dbf_view);
958 debug_set_level(adapter->san_dbf, 6);
959
960 /* debug feature area which records SCSI command failures and recovery */
961 spin_lock_init(&adapter->scsi_dbf_lock);
962 sprintf(dbf_name, "zfcp_%s_scsi", zfcp_get_busid_by_adapter(adapter));
963 adapter->scsi_dbf = debug_register(dbf_name, dbfsize, 1,
964 sizeof(struct zfcp_scsi_dbf_record));
965 if (!adapter->scsi_dbf)
966 goto failed;
967 debug_register_view(adapter->scsi_dbf, &debug_hex_ascii_view);
968 debug_register_view(adapter->scsi_dbf, &zfcp_scsi_dbf_view);
969 debug_set_level(adapter->scsi_dbf, 3);
970
971 return 0;
972
973 failed:
974 zfcp_adapter_debug_unregister(adapter);
975
976 return -ENOMEM;
977}
978
979/**
980 * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
981 * @adapter: pointer to adapter for which debug features should be unregistered
982 */
983void zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter)
984{
985 debug_unregister(adapter->scsi_dbf);
986 debug_unregister(adapter->san_dbf);
987 debug_unregister(adapter->hba_dbf);
988 debug_unregister(adapter->erp_dbf);
989 adapter->scsi_dbf = NULL;
990 adapter->san_dbf = NULL;
991 adapter->hba_dbf = NULL;
992 adapter->erp_dbf = NULL;
993}
994
995#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 455e902533a9..d81b737d68cc 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -66,7 +66,7 @@
66/********************* GENERAL DEFINES *********************************/ 66/********************* GENERAL DEFINES *********************************/
67 67
68/* zfcp version number, it consists of major, minor, and patch-level number */ 68/* zfcp version number, it consists of major, minor, and patch-level number */
69#define ZFCP_VERSION "4.3.0" 69#define ZFCP_VERSION "4.5.0"
70 70
71/** 71/**
72 * zfcp_sg_to_address - determine kernel address from struct scatterlist 72 * zfcp_sg_to_address - determine kernel address from struct scatterlist
@@ -154,13 +154,17 @@ typedef u32 scsi_lun_t;
154#define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 100 154#define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 100
155#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7 155#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7
156 156
157/* Retry 5 times every 2 second, then every minute */
158#define ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES 5
159#define ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP 200
160#define ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP 6000
161
157/* timeout value for "default timer" for fsf requests */ 162/* timeout value for "default timer" for fsf requests */
158#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ); 163#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ);
159 164
160/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/ 165/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/
161 166
162typedef unsigned long long wwn_t; 167typedef unsigned long long wwn_t;
163typedef unsigned int fc_id_t;
164typedef unsigned long long fcp_lun_t; 168typedef unsigned long long fcp_lun_t;
165/* data length field may be at variable position in FCP-2 FCP_CMND IU */ 169/* data length field may be at variable position in FCP-2 FCP_CMND IU */
166typedef unsigned int fcp_dl_t; 170typedef unsigned int fcp_dl_t;
@@ -281,6 +285,171 @@ struct fcp_logo {
281} __attribute__((packed)); 285} __attribute__((packed));
282 286
283/* 287/*
288 * DBF stuff
289 */
290#define ZFCP_DBF_TAG_SIZE 4
291
292struct zfcp_dbf_dump {
293 u8 tag[ZFCP_DBF_TAG_SIZE];
294 u32 total_size; /* size of total dump data */
295 u32 offset; /* how much data has being already dumped */
296 u32 size; /* how much data comes with this record */
297 u8 data[]; /* dump data */
298} __attribute__ ((packed));
299
300/* FIXME: to be inflated when reworking the erp dbf */
301struct zfcp_erp_dbf_record {
302 u8 dummy[16];
303} __attribute__ ((packed));
304
305struct zfcp_hba_dbf_record_response {
306 u32 fsf_command;
307 u64 fsf_reqid;
308 u32 fsf_seqno;
309 u64 fsf_issued;
310 u32 fsf_prot_status;
311 u32 fsf_status;
312 u8 fsf_prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
313 u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
314 u32 fsf_req_status;
315 u8 sbal_first;
316 u8 sbal_curr;
317 u8 sbal_last;
318 u8 pool;
319 u64 erp_action;
320 union {
321 struct {
322 u64 scsi_cmnd;
323 u64 scsi_serial;
324 } send_fcp;
325 struct {
326 u64 wwpn;
327 u32 d_id;
328 u32 port_handle;
329 } port;
330 struct {
331 u64 wwpn;
332 u64 fcp_lun;
333 u32 port_handle;
334 u32 lun_handle;
335 } unit;
336 struct {
337 u32 d_id;
338 u8 ls_code;
339 } send_els;
340 } data;
341} __attribute__ ((packed));
342
343struct zfcp_hba_dbf_record_status {
344 u8 failed;
345 u32 status_type;
346 u32 status_subtype;
347 struct fsf_queue_designator
348 queue_designator;
349 u32 payload_size;
350#define ZFCP_DBF_UNSOL_PAYLOAD 80
351#define ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL 32
352#define ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD 56
353#define ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT 2 * sizeof(u32)
354 u8 payload[ZFCP_DBF_UNSOL_PAYLOAD];
355} __attribute__ ((packed));
356
357struct zfcp_hba_dbf_record_qdio {
358 u32 status;
359 u32 qdio_error;
360 u32 siga_error;
361 u8 sbal_index;
362 u8 sbal_count;
363} __attribute__ ((packed));
364
365struct zfcp_hba_dbf_record {
366 u8 tag[ZFCP_DBF_TAG_SIZE];
367 u8 tag2[ZFCP_DBF_TAG_SIZE];
368 union {
369 struct zfcp_hba_dbf_record_response response;
370 struct zfcp_hba_dbf_record_status status;
371 struct zfcp_hba_dbf_record_qdio qdio;
372 } type;
373} __attribute__ ((packed));
374
375struct zfcp_san_dbf_record_ct {
376 union {
377 struct {
378 u16 cmd_req_code;
379 u8 revision;
380 u8 gs_type;
381 u8 gs_subtype;
382 u8 options;
383 u16 max_res_size;
384 } request;
385 struct {
386 u16 cmd_rsp_code;
387 u8 revision;
388 u8 reason_code;
389 u8 reason_code_expl;
390 u8 vendor_unique;
391 } response;
392 } type;
393 u32 payload_size;
394#define ZFCP_DBF_CT_PAYLOAD 24
395 u8 payload[ZFCP_DBF_CT_PAYLOAD];
396} __attribute__ ((packed));
397
398struct zfcp_san_dbf_record_els {
399 u8 ls_code;
400 u32 payload_size;
401#define ZFCP_DBF_ELS_PAYLOAD 32
402#define ZFCP_DBF_ELS_MAX_PAYLOAD 1024
403 u8 payload[ZFCP_DBF_ELS_PAYLOAD];
404} __attribute__ ((packed));
405
406struct zfcp_san_dbf_record {
407 u8 tag[ZFCP_DBF_TAG_SIZE];
408 u64 fsf_reqid;
409 u32 fsf_seqno;
410 u32 s_id;
411 u32 d_id;
412 union {
413 struct zfcp_san_dbf_record_ct ct;
414 struct zfcp_san_dbf_record_els els;
415 } type;
416} __attribute__ ((packed));
417
418struct zfcp_scsi_dbf_record {
419 u8 tag[ZFCP_DBF_TAG_SIZE];
420 u8 tag2[ZFCP_DBF_TAG_SIZE];
421 u32 scsi_id;
422 u32 scsi_lun;
423 u32 scsi_result;
424 u64 scsi_cmnd;
425 u64 scsi_serial;
426#define ZFCP_DBF_SCSI_OPCODE 16
427 u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
428 u8 scsi_retries;
429 u8 scsi_allowed;
430 u64 fsf_reqid;
431 u32 fsf_seqno;
432 u64 fsf_issued;
433 union {
434 struct {
435 u64 fsf_reqid;
436 u32 fsf_seqno;
437 u64 fsf_issued;
438 } new_fsf_req;
439 struct {
440 u8 rsp_validity;
441 u8 rsp_scsi_status;
442 u32 rsp_resid;
443 u8 rsp_code;
444#define ZFCP_DBF_SCSI_FCP_SNS_INFO 16
445#define ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO 256
446 u32 sns_info_len;
447 u8 sns_info[ZFCP_DBF_SCSI_FCP_SNS_INFO];
448 } fcp;
449 } type;
450} __attribute__ ((packed));
451
452/*
284 * FC-FS stuff 453 * FC-FS stuff
285 */ 454 */
286#define R_A_TOV 10 /* seconds */ 455#define R_A_TOV 10 /* seconds */
@@ -339,34 +508,6 @@ struct zfcp_rc_entry {
339 */ 508 */
340#define ZFCP_CT_TIMEOUT (3 * R_A_TOV) 509#define ZFCP_CT_TIMEOUT (3 * R_A_TOV)
341 510
342
343/***************** S390 DEBUG FEATURE SPECIFIC DEFINES ***********************/
344
345/* debug feature entries per adapter */
346#define ZFCP_ERP_DBF_INDEX 1
347#define ZFCP_ERP_DBF_AREAS 2
348#define ZFCP_ERP_DBF_LENGTH 16
349#define ZFCP_ERP_DBF_LEVEL 3
350#define ZFCP_ERP_DBF_NAME "zfcperp"
351
352#define ZFCP_CMD_DBF_INDEX 2
353#define ZFCP_CMD_DBF_AREAS 1
354#define ZFCP_CMD_DBF_LENGTH 8
355#define ZFCP_CMD_DBF_LEVEL 3
356#define ZFCP_CMD_DBF_NAME "zfcpcmd"
357
358#define ZFCP_ABORT_DBF_INDEX 2
359#define ZFCP_ABORT_DBF_AREAS 1
360#define ZFCP_ABORT_DBF_LENGTH 8
361#define ZFCP_ABORT_DBF_LEVEL 6
362#define ZFCP_ABORT_DBF_NAME "zfcpabt"
363
364#define ZFCP_IN_ELS_DBF_INDEX 2
365#define ZFCP_IN_ELS_DBF_AREAS 1
366#define ZFCP_IN_ELS_DBF_LENGTH 8
367#define ZFCP_IN_ELS_DBF_LEVEL 6
368#define ZFCP_IN_ELS_DBF_NAME "zfcpels"
369
370/******************** LOGGING MACROS AND DEFINES *****************************/ 511/******************** LOGGING MACROS AND DEFINES *****************************/
371 512
372/* 513/*
@@ -501,6 +642,7 @@ do { \
501#define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL 0x00000080 642#define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL 0x00000080
502#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100 643#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
503#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200 644#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
645#define ZFCP_STATUS_ADAPTER_XPORT_OK 0x00000800
504 646
505#define ZFCP_STATUS_ADAPTER_SCSI_UP \ 647#define ZFCP_STATUS_ADAPTER_SCSI_UP \
506 (ZFCP_STATUS_COMMON_UNBLOCKED | \ 648 (ZFCP_STATUS_COMMON_UNBLOCKED | \
@@ -635,45 +777,6 @@ struct zfcp_adapter_mempool {
635 mempool_t *data_gid_pn; 777 mempool_t *data_gid_pn;
636}; 778};
637 779
638struct zfcp_exchange_config_data{
639};
640
641struct zfcp_open_port {
642 struct zfcp_port *port;
643};
644
645struct zfcp_close_port {
646 struct zfcp_port *port;
647};
648
649struct zfcp_open_unit {
650 struct zfcp_unit *unit;
651};
652
653struct zfcp_close_unit {
654 struct zfcp_unit *unit;
655};
656
657struct zfcp_close_physical_port {
658 struct zfcp_port *port;
659};
660
661struct zfcp_send_fcp_command_task {
662 struct zfcp_fsf_req *fsf_req;
663 struct zfcp_unit *unit;
664 struct scsi_cmnd *scsi_cmnd;
665 unsigned long start_jiffies;
666};
667
668struct zfcp_send_fcp_command_task_management {
669 struct zfcp_unit *unit;
670};
671
672struct zfcp_abort_fcp_command {
673 struct zfcp_fsf_req *fsf_req;
674 struct zfcp_unit *unit;
675};
676
677/* 780/*
678 * header for CT_IU 781 * header for CT_IU
679 */ 782 */
@@ -702,7 +805,7 @@ struct ct_iu_gid_pn_req {
702/* FS_ACC IU and data unit for GID_PN nameserver request */ 805/* FS_ACC IU and data unit for GID_PN nameserver request */
703struct ct_iu_gid_pn_resp { 806struct ct_iu_gid_pn_resp {
704 struct ct_hdr header; 807 struct ct_hdr header;
705 fc_id_t d_id; 808 u32 d_id;
706} __attribute__ ((packed)); 809} __attribute__ ((packed));
707 810
708typedef void (*zfcp_send_ct_handler_t)(unsigned long); 811typedef void (*zfcp_send_ct_handler_t)(unsigned long);
@@ -768,7 +871,7 @@ typedef void (*zfcp_send_els_handler_t)(unsigned long);
768struct zfcp_send_els { 871struct zfcp_send_els {
769 struct zfcp_adapter *adapter; 872 struct zfcp_adapter *adapter;
770 struct zfcp_port *port; 873 struct zfcp_port *port;
771 fc_id_t d_id; 874 u32 d_id;
772 struct scatterlist *req; 875 struct scatterlist *req;
773 struct scatterlist *resp; 876 struct scatterlist *resp;
774 unsigned int req_count; 877 unsigned int req_count;
@@ -781,33 +884,6 @@ struct zfcp_send_els {
781 int status; 884 int status;
782}; 885};
783 886
784struct zfcp_status_read {
785 struct fsf_status_read_buffer *buffer;
786};
787
788struct zfcp_fsf_done {
789 struct completion *complete;
790 int status;
791};
792
793/* request specific data */
794union zfcp_req_data {
795 struct zfcp_exchange_config_data exchange_config_data;
796 struct zfcp_open_port open_port;
797 struct zfcp_close_port close_port;
798 struct zfcp_open_unit open_unit;
799 struct zfcp_close_unit close_unit;
800 struct zfcp_close_physical_port close_physical_port;
801 struct zfcp_send_fcp_command_task send_fcp_command_task;
802 struct zfcp_send_fcp_command_task_management
803 send_fcp_command_task_management;
804 struct zfcp_abort_fcp_command abort_fcp_command;
805 struct zfcp_send_ct *send_ct;
806 struct zfcp_send_els *send_els;
807 struct zfcp_status_read status_read;
808 struct fsf_qtcb_bottom_port *port_data;
809};
810
811struct zfcp_qdio_queue { 887struct zfcp_qdio_queue {
812 struct qdio_buffer *buffer[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */ 888 struct qdio_buffer *buffer[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */
813 u8 free_index; /* index of next free bfr 889 u8 free_index; /* index of next free bfr
@@ -838,21 +914,19 @@ struct zfcp_adapter {
838 atomic_t refcount; /* reference count */ 914 atomic_t refcount; /* reference count */
839 wait_queue_head_t remove_wq; /* can be used to wait for 915 wait_queue_head_t remove_wq; /* can be used to wait for
840 refcount drop to zero */ 916 refcount drop to zero */
841 wwn_t wwnn; /* WWNN */
842 wwn_t wwpn; /* WWPN */
843 fc_id_t s_id; /* N_Port ID */
844 wwn_t peer_wwnn; /* P2P peer WWNN */ 917 wwn_t peer_wwnn; /* P2P peer WWNN */
845 wwn_t peer_wwpn; /* P2P peer WWPN */ 918 wwn_t peer_wwpn; /* P2P peer WWPN */
846 fc_id_t peer_d_id; /* P2P peer D_ID */ 919 u32 peer_d_id; /* P2P peer D_ID */
920 wwn_t physical_wwpn; /* WWPN of physical port */
921 u32 physical_s_id; /* local FC port ID */
847 struct ccw_device *ccw_device; /* S/390 ccw device */ 922 struct ccw_device *ccw_device; /* S/390 ccw device */
848 u8 fc_service_class; 923 u8 fc_service_class;
849 u32 fc_topology; /* FC topology */ 924 u32 fc_topology; /* FC topology */
850 u32 fc_link_speed; /* FC interface speed */
851 u32 hydra_version; /* Hydra version */ 925 u32 hydra_version; /* Hydra version */
852 u32 fsf_lic_version; 926 u32 fsf_lic_version;
853 u32 supported_features;/* of FCP channel */ 927 u32 adapter_features; /* FCP channel features */
928 u32 connection_features; /* host connection features */
854 u32 hardware_version; /* of FCP channel */ 929 u32 hardware_version; /* of FCP channel */
855 u8 serial_number[32]; /* of hardware */
856 struct Scsi_Host *scsi_host; /* Pointer to mid-layer */ 930 struct Scsi_Host *scsi_host; /* Pointer to mid-layer */
857 unsigned short scsi_host_no; /* Assigned host number */ 931 unsigned short scsi_host_no; /* Assigned host number */
858 unsigned char name[9]; 932 unsigned char name[9];
@@ -889,11 +963,18 @@ struct zfcp_adapter {
889 u32 erp_low_mem_count; /* nr of erp actions waiting 963 u32 erp_low_mem_count; /* nr of erp actions waiting
890 for memory */ 964 for memory */
891 struct zfcp_port *nameserver_port; /* adapter's nameserver */ 965 struct zfcp_port *nameserver_port; /* adapter's nameserver */
892 debug_info_t *erp_dbf; /* S/390 debug features */ 966 debug_info_t *erp_dbf;
893 debug_info_t *abort_dbf; 967 debug_info_t *hba_dbf;
894 debug_info_t *in_els_dbf; 968 debug_info_t *san_dbf; /* debug feature areas */
895 debug_info_t *cmd_dbf; 969 debug_info_t *scsi_dbf;
896 spinlock_t dbf_lock; 970 spinlock_t erp_dbf_lock;
971 spinlock_t hba_dbf_lock;
972 spinlock_t san_dbf_lock;
973 spinlock_t scsi_dbf_lock;
974 struct zfcp_erp_dbf_record erp_dbf_buf;
975 struct zfcp_hba_dbf_record hba_dbf_buf;
976 struct zfcp_san_dbf_record san_dbf_buf;
977 struct zfcp_scsi_dbf_record scsi_dbf_buf;
897 struct zfcp_adapter_mempool pool; /* Adapter memory pools */ 978 struct zfcp_adapter_mempool pool; /* Adapter memory pools */
898 struct qdio_initialize qdio_init_data; /* for qdio_establish */ 979 struct qdio_initialize qdio_init_data; /* for qdio_establish */
899 struct device generic_services; /* directory for WKA ports */ 980 struct device generic_services; /* directory for WKA ports */
@@ -919,7 +1000,7 @@ struct zfcp_port {
919 atomic_t status; /* status of this remote port */ 1000 atomic_t status; /* status of this remote port */
920 wwn_t wwnn; /* WWNN if known */ 1001 wwn_t wwnn; /* WWNN if known */
921 wwn_t wwpn; /* WWPN */ 1002 wwn_t wwpn; /* WWPN */
922 fc_id_t d_id; /* D_ID */ 1003 u32 d_id; /* D_ID */
923 u32 handle; /* handle assigned by FSF */ 1004 u32 handle; /* handle assigned by FSF */
924 struct zfcp_erp_action erp_action; /* pending error recovery */ 1005 struct zfcp_erp_action erp_action; /* pending error recovery */
925 atomic_t erp_counter; 1006 atomic_t erp_counter;
@@ -963,11 +1044,13 @@ struct zfcp_fsf_req {
963 u32 fsf_command; /* FSF Command copy */ 1044 u32 fsf_command; /* FSF Command copy */
964 struct fsf_qtcb *qtcb; /* address of associated QTCB */ 1045 struct fsf_qtcb *qtcb; /* address of associated QTCB */
965 u32 seq_no; /* Sequence number of request */ 1046 u32 seq_no; /* Sequence number of request */
966 union zfcp_req_data data; /* Info fields of request */ 1047 unsigned long data; /* private data of request */
967 struct zfcp_erp_action *erp_action; /* used if this request is 1048 struct zfcp_erp_action *erp_action; /* used if this request is
968 issued on behalf of erp */ 1049 issued on behalf of erp */
969 mempool_t *pool; /* used if request was alloacted 1050 mempool_t *pool; /* used if request was alloacted
970 from emergency pool */ 1051 from emergency pool */
1052 unsigned long long issued; /* request sent time (STCK) */
1053 struct zfcp_unit *unit;
971}; 1054};
972 1055
973typedef void zfcp_fsf_req_handler_t(struct zfcp_fsf_req*); 1056typedef void zfcp_fsf_req_handler_t(struct zfcp_fsf_req*);
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index cb4f612550ba..023f4e558ae4 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -82,6 +82,7 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *);
82static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *); 82static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *);
83static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *); 83static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *);
84static int zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *); 84static int zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *);
85static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *);
85static int zfcp_erp_adapter_strategy_open_fsf_statusread( 86static int zfcp_erp_adapter_strategy_open_fsf_statusread(
86 struct zfcp_erp_action *); 87 struct zfcp_erp_action *);
87 88
@@ -345,13 +346,13 @@ zfcp_erp_adisc(struct zfcp_port *port)
345 346
346 /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports 347 /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
347 without FC-AL-2 capability, so we don't set it */ 348 without FC-AL-2 capability, so we don't set it */
348 adisc->wwpn = adapter->wwpn; 349 adisc->wwpn = fc_host_port_name(adapter->scsi_host);
349 adisc->wwnn = adapter->wwnn; 350 adisc->wwnn = fc_host_node_name(adapter->scsi_host);
350 adisc->nport_id = adapter->s_id; 351 adisc->nport_id = fc_host_port_id(adapter->scsi_host);
351 ZFCP_LOG_INFO("ADISC request from s_id 0x%08x to d_id 0x%08x " 352 ZFCP_LOG_INFO("ADISC request from s_id 0x%08x to d_id 0x%08x "
352 "(wwpn=0x%016Lx, wwnn=0x%016Lx, " 353 "(wwpn=0x%016Lx, wwnn=0x%016Lx, "
353 "hard_nport_id=0x%08x, nport_id=0x%08x)\n", 354 "hard_nport_id=0x%08x, nport_id=0x%08x)\n",
354 adapter->s_id, send_els->d_id, (wwn_t) adisc->wwpn, 355 adisc->nport_id, send_els->d_id, (wwn_t) adisc->wwpn,
355 (wwn_t) adisc->wwnn, adisc->hard_nport_id, 356 (wwn_t) adisc->wwnn, adisc->hard_nport_id,
356 adisc->nport_id); 357 adisc->nport_id);
357 358
@@ -404,7 +405,7 @@ zfcp_erp_adisc_handler(unsigned long data)
404 struct zfcp_send_els *send_els; 405 struct zfcp_send_els *send_els;
405 struct zfcp_port *port; 406 struct zfcp_port *port;
406 struct zfcp_adapter *adapter; 407 struct zfcp_adapter *adapter;
407 fc_id_t d_id; 408 u32 d_id;
408 struct zfcp_ls_adisc_acc *adisc; 409 struct zfcp_ls_adisc_acc *adisc;
409 410
410 send_els = (struct zfcp_send_els *) data; 411 send_els = (struct zfcp_send_els *) data;
@@ -435,9 +436,9 @@ zfcp_erp_adisc_handler(unsigned long data)
435 ZFCP_LOG_INFO("ADISC response from d_id 0x%08x to s_id " 436 ZFCP_LOG_INFO("ADISC response from d_id 0x%08x to s_id "
436 "0x%08x (wwpn=0x%016Lx, wwnn=0x%016Lx, " 437 "0x%08x (wwpn=0x%016Lx, wwnn=0x%016Lx, "
437 "hard_nport_id=0x%08x, nport_id=0x%08x)\n", 438 "hard_nport_id=0x%08x, nport_id=0x%08x)\n",
438 d_id, adapter->s_id, (wwn_t) adisc->wwpn, 439 d_id, fc_host_port_id(adapter->scsi_host),
439 (wwn_t) adisc->wwnn, adisc->hard_nport_id, 440 (wwn_t) adisc->wwpn, (wwn_t) adisc->wwnn,
440 adisc->nport_id); 441 adisc->hard_nport_id, adisc->nport_id);
441 442
442 /* set wwnn for port */ 443 /* set wwnn for port */
443 if (port->wwnn == 0) 444 if (port->wwnn == 0)
@@ -886,7 +887,7 @@ static int
886zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action) 887zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
887{ 888{
888 int retval = 0; 889 int retval = 0;
889 struct zfcp_fsf_req *fsf_req; 890 struct zfcp_fsf_req *fsf_req = NULL;
890 struct zfcp_adapter *adapter = erp_action->adapter; 891 struct zfcp_adapter *adapter = erp_action->adapter;
891 892
892 if (erp_action->fsf_req) { 893 if (erp_action->fsf_req) {
@@ -896,7 +897,7 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
896 list_for_each_entry(fsf_req, &adapter->fsf_req_list_head, list) 897 list_for_each_entry(fsf_req, &adapter->fsf_req_list_head, list)
897 if (fsf_req == erp_action->fsf_req) 898 if (fsf_req == erp_action->fsf_req)
898 break; 899 break;
899 if (fsf_req == erp_action->fsf_req) { 900 if (fsf_req && (fsf_req->erp_action == erp_action)) {
900 /* fsf_req still exists */ 901 /* fsf_req still exists */
901 debug_text_event(adapter->erp_dbf, 3, "a_ca_req"); 902 debug_text_event(adapter->erp_dbf, 3, "a_ca_req");
902 debug_event(adapter->erp_dbf, 3, &fsf_req, 903 debug_event(adapter->erp_dbf, 3, &fsf_req,
@@ -2258,16 +2259,21 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
2258static int 2259static int
2259zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action) 2260zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action)
2260{ 2261{
2261 int retval; 2262 int xconfig, xport;
2263
2264 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
2265 &erp_action->adapter->status)) {
2266 zfcp_erp_adapter_strategy_open_fsf_xport(erp_action);
2267 atomic_set(&erp_action->adapter->erp_counter, 0);
2268 return ZFCP_ERP_FAILED;
2269 }
2262 2270
2263 /* do 'exchange configuration data' */ 2271 xconfig = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action);
2264 retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action); 2272 xport = zfcp_erp_adapter_strategy_open_fsf_xport(erp_action);
2265 if (retval == ZFCP_ERP_FAILED) 2273 if ((xconfig == ZFCP_ERP_FAILED) || (xport == ZFCP_ERP_FAILED))
2266 return retval; 2274 return ZFCP_ERP_FAILED;
2267 2275
2268 /* start the desired number of Status Reads */ 2276 return zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action);
2269 retval = zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action);
2270 return retval;
2271} 2277}
2272 2278
2273/* 2279/*
@@ -2291,7 +2297,9 @@ zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
2291 atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 2297 atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
2292 &adapter->status); 2298 &adapter->status);
2293 ZFCP_LOG_DEBUG("Doing exchange config data\n"); 2299 ZFCP_LOG_DEBUG("Doing exchange config data\n");
2300 write_lock(&adapter->erp_lock);
2294 zfcp_erp_action_to_running(erp_action); 2301 zfcp_erp_action_to_running(erp_action);
2302 write_unlock(&adapter->erp_lock);
2295 zfcp_erp_timeout_init(erp_action); 2303 zfcp_erp_timeout_init(erp_action);
2296 if (zfcp_fsf_exchange_config_data(erp_action)) { 2304 if (zfcp_fsf_exchange_config_data(erp_action)) {
2297 retval = ZFCP_ERP_FAILED; 2305 retval = ZFCP_ERP_FAILED;
@@ -2348,6 +2356,76 @@ zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
2348 return retval; 2356 return retval;
2349} 2357}
2350 2358
2359static int
2360zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action)
2361{
2362 int retval = ZFCP_ERP_SUCCEEDED;
2363 int retries;
2364 int sleep;
2365 struct zfcp_adapter *adapter = erp_action->adapter;
2366
2367 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
2368
2369 for (retries = 0; ; retries++) {
2370 ZFCP_LOG_DEBUG("Doing exchange port data\n");
2371 zfcp_erp_action_to_running(erp_action);
2372 zfcp_erp_timeout_init(erp_action);
2373 if (zfcp_fsf_exchange_port_data(erp_action, adapter, NULL)) {
2374 retval = ZFCP_ERP_FAILED;
2375 debug_text_event(adapter->erp_dbf, 5, "a_fstx_xf");
2376 ZFCP_LOG_INFO("error: initiation of exchange of "
2377 "port data failed for adapter %s\n",
2378 zfcp_get_busid_by_adapter(adapter));
2379 break;
2380 }
2381 debug_text_event(adapter->erp_dbf, 6, "a_fstx_xok");
2382 ZFCP_LOG_DEBUG("Xchange underway\n");
2383
2384 /*
2385 * Why this works:
2386 * Both the normal completion handler as well as the timeout
2387 * handler will do an 'up' when the 'exchange port data'
2388 * request completes or times out. Thus, the signal to go on
2389 * won't be lost utilizing this semaphore.
2390 * Furthermore, this 'adapter_reopen' action is
2391 * guaranteed to be the only action being there (highest action
2392 * which prevents other actions from being created).
2393 * Resulting from that, the wake signal recognized here
2394 * _must_ be the one belonging to the 'exchange port
2395 * data' request.
2396 */
2397 down(&adapter->erp_ready_sem);
2398 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
2399 ZFCP_LOG_INFO("error: exchange of port data "
2400 "for adapter %s timed out\n",
2401 zfcp_get_busid_by_adapter(adapter));
2402 break;
2403 }
2404
2405 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
2406 &adapter->status))
2407 break;
2408
2409 ZFCP_LOG_DEBUG("host connection still initialising... "
2410 "waiting and retrying...\n");
2411 /* sleep a little bit before retry */
2412 sleep = retries < ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES ?
2413 ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP :
2414 ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP;
2415 msleep(jiffies_to_msecs(sleep));
2416 }
2417
2418 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
2419 &adapter->status)) {
2420 ZFCP_LOG_INFO("error: exchange of port data for "
2421 "adapter %s failed\n",
2422 zfcp_get_busid_by_adapter(adapter));
2423 retval = ZFCP_ERP_FAILED;
2424 }
2425
2426 return retval;
2427}
2428
2351/* 2429/*
2352 * function: 2430 * function:
2353 * 2431 *
@@ -3194,11 +3272,19 @@ zfcp_erp_action_enqueue(int action,
3194 /* fall through !!! */ 3272 /* fall through !!! */
3195 3273
3196 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 3274 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
3197 if (atomic_test_mask 3275 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
3198 (ZFCP_STATUS_COMMON_ERP_INUSE, &port->status) 3276 &port->status)) {
3199 && port->erp_action.action == 3277 if (port->erp_action.action !=
3200 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED) { 3278 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED) {
3201 debug_text_event(adapter->erp_dbf, 4, "pf_actenq_drp"); 3279 ZFCP_LOG_INFO("dropped erp action %i (port "
3280 "0x%016Lx, action in use: %i)\n",
3281 action, port->wwpn,
3282 port->erp_action.action);
3283 debug_text_event(adapter->erp_dbf, 4,
3284 "pf_actenq_drp");
3285 } else
3286 debug_text_event(adapter->erp_dbf, 4,
3287 "pf_actenq_drpcp");
3202 debug_event(adapter->erp_dbf, 4, &port->wwpn, 3288 debug_event(adapter->erp_dbf, 4, &port->wwpn,
3203 sizeof (wwn_t)); 3289 sizeof (wwn_t));
3204 goto out; 3290 goto out;
@@ -3589,6 +3675,9 @@ zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter)
3589 struct zfcp_port *port; 3675 struct zfcp_port *port;
3590 unsigned long flags; 3676 unsigned long flags;
3591 3677
3678 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
3679 return;
3680
3592 debug_text_event(adapter->erp_dbf, 3, "a_access_recover"); 3681 debug_text_event(adapter->erp_dbf, 3, "a_access_recover");
3593 debug_event(adapter->erp_dbf, 3, &adapter->name, 8); 3682 debug_event(adapter->erp_dbf, 3, &adapter->name, 8);
3594 3683
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index cd98a2de9f8f..c3782261cb5c 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -96,7 +96,8 @@ extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
96extern int zfcp_fsf_close_unit(struct zfcp_erp_action *); 96extern int zfcp_fsf_close_unit(struct zfcp_erp_action *);
97 97
98extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *); 98extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
99extern int zfcp_fsf_exchange_port_data(struct zfcp_adapter *, 99extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *,
100 struct zfcp_adapter *,
100 struct fsf_qtcb_bottom_port *); 101 struct fsf_qtcb_bottom_port *);
101extern int zfcp_fsf_control_file(struct zfcp_adapter *, struct zfcp_fsf_req **, 102extern int zfcp_fsf_control_file(struct zfcp_adapter *, struct zfcp_fsf_req **,
102 u32, u32, struct zfcp_sg_list *); 103 u32, u32, struct zfcp_sg_list *);
@@ -109,7 +110,6 @@ extern int zfcp_fsf_req_create(struct zfcp_adapter *, u32, int, mempool_t *,
109extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *, 110extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *,
110 struct zfcp_erp_action *); 111 struct zfcp_erp_action *);
111extern int zfcp_fsf_send_els(struct zfcp_send_els *); 112extern int zfcp_fsf_send_els(struct zfcp_send_els *);
112extern int zfcp_fsf_req_wait_and_cleanup(struct zfcp_fsf_req *, int, u32 *);
113extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *, 113extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *,
114 struct zfcp_unit *, 114 struct zfcp_unit *,
115 struct scsi_cmnd *, 115 struct scsi_cmnd *,
@@ -182,9 +182,25 @@ extern void zfcp_erp_port_access_changed(struct zfcp_port *);
182extern void zfcp_erp_unit_access_changed(struct zfcp_unit *); 182extern void zfcp_erp_unit_access_changed(struct zfcp_unit *);
183 183
184/******************************** AUX ****************************************/ 184/******************************** AUX ****************************************/
185extern void zfcp_cmd_dbf_event_fsf(const char *, struct zfcp_fsf_req *, 185extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *);
186 void *, int); 186extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *,
187extern void zfcp_cmd_dbf_event_scsi(const char *, struct scsi_cmnd *); 187 struct fsf_status_read_buffer *);
188extern void zfcp_in_els_dbf_event(struct zfcp_adapter *, const char *, 188extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *,
189 struct fsf_status_read_buffer *, int); 189 unsigned int, unsigned int, unsigned int,
190 int, int);
191
192extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *);
193extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *);
194extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *);
195extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *);
196extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *);
197
198extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *,
199 struct scsi_cmnd *);
200extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
201 struct scsi_cmnd *,
202 struct zfcp_fsf_req *);
203extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
204 struct scsi_cmnd *);
205
190#endif /* ZFCP_EXT_H */ 206#endif /* ZFCP_EXT_H */
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index c007b6424e74..3b0fc1163f5f 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -59,6 +59,8 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *, struct timer_list *);
59static int zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *); 59static int zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *);
60static int zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *); 60static int zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *);
61static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *); 61static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *);
62static void zfcp_fsf_link_down_info_eval(struct zfcp_adapter *,
63 struct fsf_link_down_info *);
62static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *); 64static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *);
63static void zfcp_fsf_req_dismiss(struct zfcp_fsf_req *); 65static void zfcp_fsf_req_dismiss(struct zfcp_fsf_req *);
64 66
@@ -285,51 +287,51 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
285{ 287{
286 int retval = 0; 288 int retval = 0;
287 struct zfcp_adapter *adapter = fsf_req->adapter; 289 struct zfcp_adapter *adapter = fsf_req->adapter;
290 struct fsf_qtcb *qtcb = fsf_req->qtcb;
291 union fsf_prot_status_qual *prot_status_qual =
292 &qtcb->prefix.prot_status_qual;
288 293
289 ZFCP_LOG_DEBUG("QTCB is at %p\n", fsf_req->qtcb); 294 zfcp_hba_dbf_event_fsf_response(fsf_req);
290 295
291 if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 296 if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
292 ZFCP_LOG_DEBUG("fsf_req 0x%lx has been dismissed\n", 297 ZFCP_LOG_DEBUG("fsf_req 0x%lx has been dismissed\n",
293 (unsigned long) fsf_req); 298 (unsigned long) fsf_req);
294 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR | 299 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
295 ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */ 300 ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
296 zfcp_cmd_dbf_event_fsf("dismiss", fsf_req, NULL, 0);
297 goto skip_protstatus; 301 goto skip_protstatus;
298 } 302 }
299 303
300 /* log additional information provided by FSF (if any) */ 304 /* log additional information provided by FSF (if any) */
301 if (unlikely(fsf_req->qtcb->header.log_length)) { 305 if (unlikely(qtcb->header.log_length)) {
302 /* do not trust them ;-) */ 306 /* do not trust them ;-) */
303 if (fsf_req->qtcb->header.log_start > sizeof(struct fsf_qtcb)) { 307 if (qtcb->header.log_start > sizeof(struct fsf_qtcb)) {
304 ZFCP_LOG_NORMAL 308 ZFCP_LOG_NORMAL
305 ("bug: ULP (FSF logging) log data starts " 309 ("bug: ULP (FSF logging) log data starts "
306 "beyond end of packet header. Ignored. " 310 "beyond end of packet header. Ignored. "
307 "(start=%i, size=%li)\n", 311 "(start=%i, size=%li)\n",
308 fsf_req->qtcb->header.log_start, 312 qtcb->header.log_start,
309 sizeof(struct fsf_qtcb)); 313 sizeof(struct fsf_qtcb));
310 goto forget_log; 314 goto forget_log;
311 } 315 }
312 if ((size_t) (fsf_req->qtcb->header.log_start + 316 if ((size_t) (qtcb->header.log_start + qtcb->header.log_length)
313 fsf_req->qtcb->header.log_length)
314 > sizeof(struct fsf_qtcb)) { 317 > sizeof(struct fsf_qtcb)) {
315 ZFCP_LOG_NORMAL("bug: ULP (FSF logging) log data ends " 318 ZFCP_LOG_NORMAL("bug: ULP (FSF logging) log data ends "
316 "beyond end of packet header. Ignored. " 319 "beyond end of packet header. Ignored. "
317 "(start=%i, length=%i, size=%li)\n", 320 "(start=%i, length=%i, size=%li)\n",
318 fsf_req->qtcb->header.log_start, 321 qtcb->header.log_start,
319 fsf_req->qtcb->header.log_length, 322 qtcb->header.log_length,
320 sizeof(struct fsf_qtcb)); 323 sizeof(struct fsf_qtcb));
321 goto forget_log; 324 goto forget_log;
322 } 325 }
323 ZFCP_LOG_TRACE("ULP log data: \n"); 326 ZFCP_LOG_TRACE("ULP log data: \n");
324 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, 327 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
325 (char *) fsf_req->qtcb + 328 (char *) qtcb + qtcb->header.log_start,
326 fsf_req->qtcb->header.log_start, 329 qtcb->header.log_length);
327 fsf_req->qtcb->header.log_length);
328 } 330 }
329 forget_log: 331 forget_log:
330 332
331 /* evaluate FSF Protocol Status */ 333 /* evaluate FSF Protocol Status */
332 switch (fsf_req->qtcb->prefix.prot_status) { 334 switch (qtcb->prefix.prot_status) {
333 335
334 case FSF_PROT_GOOD: 336 case FSF_PROT_GOOD:
335 case FSF_PROT_FSF_STATUS_PRESENTED: 337 case FSF_PROT_FSF_STATUS_PRESENTED:
@@ -340,14 +342,9 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
340 "microcode of version 0x%x, the device driver " 342 "microcode of version 0x%x, the device driver "
341 "only supports 0x%x. Aborting.\n", 343 "only supports 0x%x. Aborting.\n",
342 zfcp_get_busid_by_adapter(adapter), 344 zfcp_get_busid_by_adapter(adapter),
343 fsf_req->qtcb->prefix.prot_status_qual. 345 prot_status_qual->version_error.fsf_version,
344 version_error.fsf_version, ZFCP_QTCB_VERSION); 346 ZFCP_QTCB_VERSION);
345 /* stop operation for this adapter */
346 debug_text_exception(adapter->erp_dbf, 0, "prot_ver_err");
347 zfcp_erp_adapter_shutdown(adapter, 0); 347 zfcp_erp_adapter_shutdown(adapter, 0);
348 zfcp_cmd_dbf_event_fsf("qverserr", fsf_req,
349 &fsf_req->qtcb->prefix.prot_status_qual,
350 sizeof (union fsf_prot_status_qual));
351 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 348 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
352 break; 349 break;
353 350
@@ -355,16 +352,10 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
355 ZFCP_LOG_NORMAL("bug: Sequence number mismatch between " 352 ZFCP_LOG_NORMAL("bug: Sequence number mismatch between "
356 "driver (0x%x) and adapter %s (0x%x). " 353 "driver (0x%x) and adapter %s (0x%x). "
357 "Restarting all operations on this adapter.\n", 354 "Restarting all operations on this adapter.\n",
358 fsf_req->qtcb->prefix.req_seq_no, 355 qtcb->prefix.req_seq_no,
359 zfcp_get_busid_by_adapter(adapter), 356 zfcp_get_busid_by_adapter(adapter),
360 fsf_req->qtcb->prefix.prot_status_qual. 357 prot_status_qual->sequence_error.exp_req_seq_no);
361 sequence_error.exp_req_seq_no);
362 debug_text_exception(adapter->erp_dbf, 0, "prot_seq_err");
363 /* restart operation on this adapter */
364 zfcp_erp_adapter_reopen(adapter, 0); 358 zfcp_erp_adapter_reopen(adapter, 0);
365 zfcp_cmd_dbf_event_fsf("seqnoerr", fsf_req,
366 &fsf_req->qtcb->prefix.prot_status_qual,
367 sizeof (union fsf_prot_status_qual));
368 fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY; 359 fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY;
369 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 360 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
370 break; 361 break;
@@ -375,116 +366,35 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
375 "that used on adapter %s. " 366 "that used on adapter %s. "
376 "Stopping all operations on this adapter.\n", 367 "Stopping all operations on this adapter.\n",
377 zfcp_get_busid_by_adapter(adapter)); 368 zfcp_get_busid_by_adapter(adapter));
378 debug_text_exception(adapter->erp_dbf, 0, "prot_unsup_qtcb");
379 zfcp_erp_adapter_shutdown(adapter, 0); 369 zfcp_erp_adapter_shutdown(adapter, 0);
380 zfcp_cmd_dbf_event_fsf("unsqtcbt", fsf_req,
381 &fsf_req->qtcb->prefix.prot_status_qual,
382 sizeof (union fsf_prot_status_qual));
383 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 370 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
384 break; 371 break;
385 372
386 case FSF_PROT_HOST_CONNECTION_INITIALIZING: 373 case FSF_PROT_HOST_CONNECTION_INITIALIZING:
387 zfcp_cmd_dbf_event_fsf("hconinit", fsf_req,
388 &fsf_req->qtcb->prefix.prot_status_qual,
389 sizeof (union fsf_prot_status_qual));
390 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 374 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
391 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 375 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
392 &(adapter->status)); 376 &(adapter->status));
393 debug_text_event(adapter->erp_dbf, 3, "prot_con_init");
394 break; 377 break;
395 378
396 case FSF_PROT_DUPLICATE_REQUEST_ID: 379 case FSF_PROT_DUPLICATE_REQUEST_ID:
397 if (fsf_req->qtcb) {
398 ZFCP_LOG_NORMAL("bug: The request identifier 0x%Lx " 380 ZFCP_LOG_NORMAL("bug: The request identifier 0x%Lx "
399 "to the adapter %s is ambiguous. " 381 "to the adapter %s is ambiguous. "
400 "Stopping all operations on this " 382 "Stopping all operations on this adapter.\n",
401 "adapter.\n", 383 *(unsigned long long*)
402 *(unsigned long long *) 384 (&qtcb->bottom.support.req_handle),
403 (&fsf_req->qtcb->bottom.support.
404 req_handle),
405 zfcp_get_busid_by_adapter(adapter));
406 } else {
407 ZFCP_LOG_NORMAL("bug: The request identifier %p "
408 "to the adapter %s is ambiguous. "
409 "Stopping all operations on this "
410 "adapter. "
411 "(bug: got this for an unsolicited "
412 "status read request)\n",
413 fsf_req,
414 zfcp_get_busid_by_adapter(adapter)); 385 zfcp_get_busid_by_adapter(adapter));
415 }
416 debug_text_exception(adapter->erp_dbf, 0, "prot_dup_id");
417 zfcp_erp_adapter_shutdown(adapter, 0); 386 zfcp_erp_adapter_shutdown(adapter, 0);
418 zfcp_cmd_dbf_event_fsf("dupreqid", fsf_req,
419 &fsf_req->qtcb->prefix.prot_status_qual,
420 sizeof (union fsf_prot_status_qual));
421 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 387 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
422 break; 388 break;
423 389
424 case FSF_PROT_LINK_DOWN: 390 case FSF_PROT_LINK_DOWN:
425 /* 391 zfcp_fsf_link_down_info_eval(adapter,
426 * 'test and set' is not atomic here - 392 &prot_status_qual->link_down_info);
427 * it's ok as long as calls to our response queue handler
428 * (and thus execution of this code here) are serialized
429 * by the qdio module
430 */
431 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
432 &adapter->status)) {
433 switch (fsf_req->qtcb->prefix.prot_status_qual.
434 locallink_error.code) {
435 case FSF_PSQ_LINK_NOLIGHT:
436 ZFCP_LOG_INFO("The local link to adapter %s "
437 "is down (no light detected).\n",
438 zfcp_get_busid_by_adapter(
439 adapter));
440 break;
441 case FSF_PSQ_LINK_WRAPPLUG:
442 ZFCP_LOG_INFO("The local link to adapter %s "
443 "is down (wrap plug detected).\n",
444 zfcp_get_busid_by_adapter(
445 adapter));
446 break;
447 case FSF_PSQ_LINK_NOFCP:
448 ZFCP_LOG_INFO("The local link to adapter %s "
449 "is down (adjacent node on "
450 "link does not support FCP).\n",
451 zfcp_get_busid_by_adapter(
452 adapter));
453 break;
454 default:
455 ZFCP_LOG_INFO("The local link to adapter %s "
456 "is down "
457 "(warning: unknown reason "
458 "code).\n",
459 zfcp_get_busid_by_adapter(
460 adapter));
461 break;
462
463 }
464 /*
465 * Due to the 'erp failed' flag the adapter won't
466 * be recovered but will be just set to 'blocked'
467 * state. All subordinary devices will have state
468 * 'blocked' and 'erp failed', too.
469 * Thus the adapter is still able to provide
470 * 'link up' status without being flooded with
471 * requests.
472 * (note: even 'close port' is not permitted)
473 */
474 ZFCP_LOG_INFO("Stopping all operations for adapter "
475 "%s.\n",
476 zfcp_get_busid_by_adapter(adapter));
477 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
478 ZFCP_STATUS_COMMON_ERP_FAILED,
479 &adapter->status);
480 zfcp_erp_adapter_reopen(adapter, 0);
481 }
482 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 393 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
483 break; 394 break;
484 395
485 case FSF_PROT_REEST_QUEUE: 396 case FSF_PROT_REEST_QUEUE:
486 debug_text_event(adapter->erp_dbf, 1, "prot_reest_queue"); 397 ZFCP_LOG_NORMAL("The local link to adapter with "
487 ZFCP_LOG_INFO("The local link to adapter with "
488 "%s was re-plugged. " 398 "%s was re-plugged. "
489 "Re-starting operations on this adapter.\n", 399 "Re-starting operations on this adapter.\n",
490 zfcp_get_busid_by_adapter(adapter)); 400 zfcp_get_busid_by_adapter(adapter));
@@ -495,9 +405,6 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
495 zfcp_erp_adapter_reopen(adapter, 405 zfcp_erp_adapter_reopen(adapter,
496 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 406 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED
497 | ZFCP_STATUS_COMMON_ERP_FAILED); 407 | ZFCP_STATUS_COMMON_ERP_FAILED);
498 zfcp_cmd_dbf_event_fsf("reestque", fsf_req,
499 &fsf_req->qtcb->prefix.prot_status_qual,
500 sizeof (union fsf_prot_status_qual));
501 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 408 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
502 break; 409 break;
503 410
@@ -507,12 +414,7 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
507 "Restarting all operations on this " 414 "Restarting all operations on this "
508 "adapter.\n", 415 "adapter.\n",
509 zfcp_get_busid_by_adapter(adapter)); 416 zfcp_get_busid_by_adapter(adapter));
510 debug_text_event(adapter->erp_dbf, 0, "prot_err_sta");
511 /* restart operation on this adapter */
512 zfcp_erp_adapter_reopen(adapter, 0); 417 zfcp_erp_adapter_reopen(adapter, 0);
513 zfcp_cmd_dbf_event_fsf("proterrs", fsf_req,
514 &fsf_req->qtcb->prefix.prot_status_qual,
515 sizeof (union fsf_prot_status_qual));
516 fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY; 418 fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY;
517 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 419 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
518 break; 420 break;
@@ -524,11 +426,7 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
524 "Stopping all operations on this adapter. " 426 "Stopping all operations on this adapter. "
525 "(debug info 0x%x).\n", 427 "(debug info 0x%x).\n",
526 zfcp_get_busid_by_adapter(adapter), 428 zfcp_get_busid_by_adapter(adapter),
527 fsf_req->qtcb->prefix.prot_status); 429 qtcb->prefix.prot_status);
528 debug_text_event(adapter->erp_dbf, 0, "prot_inval:");
529 debug_exception(adapter->erp_dbf, 0,
530 &fsf_req->qtcb->prefix.prot_status,
531 sizeof (u32));
532 zfcp_erp_adapter_shutdown(adapter, 0); 430 zfcp_erp_adapter_shutdown(adapter, 0);
533 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 431 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
534 } 432 }
@@ -568,28 +466,18 @@ zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *fsf_req)
568 "(debug info 0x%x).\n", 466 "(debug info 0x%x).\n",
569 zfcp_get_busid_by_adapter(fsf_req->adapter), 467 zfcp_get_busid_by_adapter(fsf_req->adapter),
570 fsf_req->qtcb->header.fsf_command); 468 fsf_req->qtcb->header.fsf_command);
571 debug_text_exception(fsf_req->adapter->erp_dbf, 0,
572 "fsf_s_unknown");
573 zfcp_erp_adapter_shutdown(fsf_req->adapter, 0); 469 zfcp_erp_adapter_shutdown(fsf_req->adapter, 0);
574 zfcp_cmd_dbf_event_fsf("unknownc", fsf_req,
575 &fsf_req->qtcb->header.fsf_status_qual,
576 sizeof (union fsf_status_qual));
577 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 470 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
578 break; 471 break;
579 472
580 case FSF_FCP_RSP_AVAILABLE: 473 case FSF_FCP_RSP_AVAILABLE:
581 ZFCP_LOG_DEBUG("FCP Sense data will be presented to the " 474 ZFCP_LOG_DEBUG("FCP Sense data will be presented to the "
582 "SCSI stack.\n"); 475 "SCSI stack.\n");
583 debug_text_event(fsf_req->adapter->erp_dbf, 3, "fsf_s_rsp");
584 break; 476 break;
585 477
586 case FSF_ADAPTER_STATUS_AVAILABLE: 478 case FSF_ADAPTER_STATUS_AVAILABLE:
587 debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_s_astatus");
588 zfcp_fsf_fsfstatus_qual_eval(fsf_req); 479 zfcp_fsf_fsfstatus_qual_eval(fsf_req);
589 break; 480 break;
590
591 default:
592 break;
593 } 481 }
594 482
595 skip_fsfstatus: 483 skip_fsfstatus:
@@ -617,44 +505,28 @@ zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req)
617 505
618 switch (fsf_req->qtcb->header.fsf_status_qual.word[0]) { 506 switch (fsf_req->qtcb->header.fsf_status_qual.word[0]) {
619 case FSF_SQ_FCP_RSP_AVAILABLE: 507 case FSF_SQ_FCP_RSP_AVAILABLE:
620 debug_text_event(fsf_req->adapter->erp_dbf, 4, "fsf_sq_rsp");
621 break; 508 break;
622 case FSF_SQ_RETRY_IF_POSSIBLE: 509 case FSF_SQ_RETRY_IF_POSSIBLE:
623 /* The SCSI-stack may now issue retries or escalate */ 510 /* The SCSI-stack may now issue retries or escalate */
624 debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_sq_retry");
625 zfcp_cmd_dbf_event_fsf("sqretry", fsf_req,
626 &fsf_req->qtcb->header.fsf_status_qual,
627 sizeof (union fsf_status_qual));
628 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 511 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
629 break; 512 break;
630 case FSF_SQ_COMMAND_ABORTED: 513 case FSF_SQ_COMMAND_ABORTED:
631 /* Carry the aborted state on to upper layer */ 514 /* Carry the aborted state on to upper layer */
632 debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_sq_abort");
633 zfcp_cmd_dbf_event_fsf("sqabort", fsf_req,
634 &fsf_req->qtcb->header.fsf_status_qual,
635 sizeof (union fsf_status_qual));
636 fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTED; 515 fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
637 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 516 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
638 break; 517 break;
639 case FSF_SQ_NO_RECOM: 518 case FSF_SQ_NO_RECOM:
640 debug_text_exception(fsf_req->adapter->erp_dbf, 0,
641 "fsf_sq_no_rec");
642 ZFCP_LOG_NORMAL("bug: No recommendation could be given for a" 519 ZFCP_LOG_NORMAL("bug: No recommendation could be given for a"
643 "problem on the adapter %s " 520 "problem on the adapter %s "
644 "Stopping all operations on this adapter. ", 521 "Stopping all operations on this adapter. ",
645 zfcp_get_busid_by_adapter(fsf_req->adapter)); 522 zfcp_get_busid_by_adapter(fsf_req->adapter));
646 zfcp_erp_adapter_shutdown(fsf_req->adapter, 0); 523 zfcp_erp_adapter_shutdown(fsf_req->adapter, 0);
647 zfcp_cmd_dbf_event_fsf("sqnrecom", fsf_req,
648 &fsf_req->qtcb->header.fsf_status_qual,
649 sizeof (union fsf_status_qual));
650 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 524 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
651 break; 525 break;
652 case FSF_SQ_ULP_PROGRAMMING_ERROR: 526 case FSF_SQ_ULP_PROGRAMMING_ERROR:
653 ZFCP_LOG_NORMAL("error: not enough SBALs for data transfer " 527 ZFCP_LOG_NORMAL("error: not enough SBALs for data transfer "
654 "(adapter %s)\n", 528 "(adapter %s)\n",
655 zfcp_get_busid_by_adapter(fsf_req->adapter)); 529 zfcp_get_busid_by_adapter(fsf_req->adapter));
656 debug_text_exception(fsf_req->adapter->erp_dbf, 0,
657 "fsf_sq_ulp_err");
658 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 530 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
659 break; 531 break;
660 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 532 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
@@ -668,13 +540,6 @@ zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req)
668 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL, 540 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
669 (char *) &fsf_req->qtcb->header.fsf_status_qual, 541 (char *) &fsf_req->qtcb->header.fsf_status_qual,
670 sizeof (union fsf_status_qual)); 542 sizeof (union fsf_status_qual));
671 debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_sq_inval:");
672 debug_exception(fsf_req->adapter->erp_dbf, 0,
673 &fsf_req->qtcb->header.fsf_status_qual.word[0],
674 sizeof (u32));
675 zfcp_cmd_dbf_event_fsf("squndef", fsf_req,
676 &fsf_req->qtcb->header.fsf_status_qual,
677 sizeof (union fsf_status_qual));
678 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 543 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
679 break; 544 break;
680 } 545 }
@@ -682,6 +547,110 @@ zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req)
682 return retval; 547 return retval;
683} 548}
684 549
550/**
551 * zfcp_fsf_link_down_info_eval - evaluate link down information block
552 */
553static void
554zfcp_fsf_link_down_info_eval(struct zfcp_adapter *adapter,
555 struct fsf_link_down_info *link_down)
556{
557 switch (link_down->error_code) {
558 case FSF_PSQ_LINK_NO_LIGHT:
559 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
560 "(no light detected)\n",
561 zfcp_get_busid_by_adapter(adapter));
562 break;
563 case FSF_PSQ_LINK_WRAP_PLUG:
564 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
565 "(wrap plug detected)\n",
566 zfcp_get_busid_by_adapter(adapter));
567 break;
568 case FSF_PSQ_LINK_NO_FCP:
569 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
570 "(adjacent node on link does not support FCP)\n",
571 zfcp_get_busid_by_adapter(adapter));
572 break;
573 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
574 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
575 "(firmware update in progress)\n",
576 zfcp_get_busid_by_adapter(adapter));
577 break;
578 case FSF_PSQ_LINK_INVALID_WWPN:
579 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
580 "(duplicate or invalid WWPN detected)\n",
581 zfcp_get_busid_by_adapter(adapter));
582 break;
583 case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
584 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
585 "(no support for NPIV by Fabric)\n",
586 zfcp_get_busid_by_adapter(adapter));
587 break;
588 case FSF_PSQ_LINK_NO_FCP_RESOURCES:
589 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
590 "(out of resource in FCP daughtercard)\n",
591 zfcp_get_busid_by_adapter(adapter));
592 break;
593 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
594 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
595 "(out of resource in Fabric)\n",
596 zfcp_get_busid_by_adapter(adapter));
597 break;
598 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
599 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
600 "(unable to Fabric login)\n",
601 zfcp_get_busid_by_adapter(adapter));
602 break;
603 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
604 ZFCP_LOG_NORMAL("WWPN assignment file corrupted on adapter %s\n",
605 zfcp_get_busid_by_adapter(adapter));
606 break;
607 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
608 ZFCP_LOG_NORMAL("Mode table corrupted on adapter %s\n",
609 zfcp_get_busid_by_adapter(adapter));
610 break;
611 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
612 ZFCP_LOG_NORMAL("No WWPN for assignment table on adapter %s\n",
613 zfcp_get_busid_by_adapter(adapter));
614 break;
615 default:
616 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
617 "(warning: unknown reason code %d)\n",
618 zfcp_get_busid_by_adapter(adapter),
619 link_down->error_code);
620 }
621
622 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
623 ZFCP_LOG_DEBUG("Debug information to link down: "
624 "primary_status=0x%02x "
625 "ioerr_code=0x%02x "
626 "action_code=0x%02x "
627 "reason_code=0x%02x "
628 "explanation_code=0x%02x "
629 "vendor_specific_code=0x%02x\n",
630 link_down->primary_status,
631 link_down->ioerr_code,
632 link_down->action_code,
633 link_down->reason_code,
634 link_down->explanation_code,
635 link_down->vendor_specific_code);
636
637 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
638 &adapter->status)) {
639 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
640 &adapter->status);
641 switch (link_down->error_code) {
642 case FSF_PSQ_LINK_NO_LIGHT:
643 case FSF_PSQ_LINK_WRAP_PLUG:
644 case FSF_PSQ_LINK_NO_FCP:
645 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
646 zfcp_erp_adapter_reopen(adapter, 0);
647 break;
648 default:
649 zfcp_erp_adapter_failed(adapter);
650 }
651 }
652}
653
685/* 654/*
686 * function: zfcp_fsf_req_dispatch 655 * function: zfcp_fsf_req_dispatch
687 * 656 *
@@ -696,11 +665,6 @@ zfcp_fsf_req_dispatch(struct zfcp_fsf_req *fsf_req)
696 struct zfcp_adapter *adapter = fsf_req->adapter; 665 struct zfcp_adapter *adapter = fsf_req->adapter;
697 int retval = 0; 666 int retval = 0;
698 667
699 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
700 ZFCP_LOG_TRACE("fsf_req=%p, QTCB=%p\n", fsf_req, fsf_req->qtcb);
701 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
702 (char *) fsf_req->qtcb, sizeof(struct fsf_qtcb));
703 }
704 668
705 switch (fsf_req->fsf_command) { 669 switch (fsf_req->fsf_command) {
706 670
@@ -760,13 +724,13 @@ zfcp_fsf_req_dispatch(struct zfcp_fsf_req *fsf_req)
760 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 724 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
761 ZFCP_LOG_NORMAL("bug: Command issued by the device driver is " 725 ZFCP_LOG_NORMAL("bug: Command issued by the device driver is "
762 "not supported by the adapter %s\n", 726 "not supported by the adapter %s\n",
763 zfcp_get_busid_by_adapter(fsf_req->adapter)); 727 zfcp_get_busid_by_adapter(adapter));
764 if (fsf_req->fsf_command != fsf_req->qtcb->header.fsf_command) 728 if (fsf_req->fsf_command != fsf_req->qtcb->header.fsf_command)
765 ZFCP_LOG_NORMAL 729 ZFCP_LOG_NORMAL
766 ("bug: Command issued by the device driver differs " 730 ("bug: Command issued by the device driver differs "
767 "from the command returned by the adapter %s " 731 "from the command returned by the adapter %s "
768 "(debug info 0x%x, 0x%x).\n", 732 "(debug info 0x%x, 0x%x).\n",
769 zfcp_get_busid_by_adapter(fsf_req->adapter), 733 zfcp_get_busid_by_adapter(adapter),
770 fsf_req->fsf_command, 734 fsf_req->fsf_command,
771 fsf_req->qtcb->header.fsf_command); 735 fsf_req->qtcb->header.fsf_command);
772 } 736 }
@@ -774,8 +738,6 @@ zfcp_fsf_req_dispatch(struct zfcp_fsf_req *fsf_req)
774 if (!erp_action) 738 if (!erp_action)
775 return retval; 739 return retval;
776 740
777 debug_text_event(adapter->erp_dbf, 3, "a_frh");
778 debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int));
779 zfcp_erp_async_handler(erp_action, 0); 741 zfcp_erp_async_handler(erp_action, 0);
780 742
781 return retval; 743 return retval;
@@ -821,7 +783,7 @@ zfcp_fsf_status_read(struct zfcp_adapter *adapter, int req_flags)
821 goto failed_buf; 783 goto failed_buf;
822 } 784 }
823 memset(status_buffer, 0, sizeof (struct fsf_status_read_buffer)); 785 memset(status_buffer, 0, sizeof (struct fsf_status_read_buffer));
824 fsf_req->data.status_read.buffer = status_buffer; 786 fsf_req->data = (unsigned long) status_buffer;
825 787
826 /* insert pointer to respective buffer */ 788 /* insert pointer to respective buffer */
827 sbale = zfcp_qdio_sbale_curr(fsf_req); 789 sbale = zfcp_qdio_sbale_curr(fsf_req);
@@ -846,6 +808,7 @@ zfcp_fsf_status_read(struct zfcp_adapter *adapter, int req_flags)
846 failed_buf: 808 failed_buf:
847 zfcp_fsf_req_free(fsf_req); 809 zfcp_fsf_req_free(fsf_req);
848 failed_req_create: 810 failed_req_create:
811 zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL);
849 out: 812 out:
850 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); 813 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
851 return retval; 814 return retval;
@@ -859,7 +822,7 @@ zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *fsf_req)
859 struct zfcp_port *port; 822 struct zfcp_port *port;
860 unsigned long flags; 823 unsigned long flags;
861 824
862 status_buffer = fsf_req->data.status_read.buffer; 825 status_buffer = (struct fsf_status_read_buffer *) fsf_req->data;
863 adapter = fsf_req->adapter; 826 adapter = fsf_req->adapter;
864 827
865 read_lock_irqsave(&zfcp_data.config_lock, flags); 828 read_lock_irqsave(&zfcp_data.config_lock, flags);
@@ -918,38 +881,33 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req)
918 int retval = 0; 881 int retval = 0;
919 struct zfcp_adapter *adapter = fsf_req->adapter; 882 struct zfcp_adapter *adapter = fsf_req->adapter;
920 struct fsf_status_read_buffer *status_buffer = 883 struct fsf_status_read_buffer *status_buffer =
921 fsf_req->data.status_read.buffer; 884 (struct fsf_status_read_buffer *) fsf_req->data;
922 885
923 if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 886 if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
887 zfcp_hba_dbf_event_fsf_unsol("dism", adapter, status_buffer);
924 mempool_free(status_buffer, adapter->pool.data_status_read); 888 mempool_free(status_buffer, adapter->pool.data_status_read);
925 zfcp_fsf_req_free(fsf_req); 889 zfcp_fsf_req_free(fsf_req);
926 goto out; 890 goto out;
927 } 891 }
928 892
893 zfcp_hba_dbf_event_fsf_unsol("read", adapter, status_buffer);
894
929 switch (status_buffer->status_type) { 895 switch (status_buffer->status_type) {
930 896
931 case FSF_STATUS_READ_PORT_CLOSED: 897 case FSF_STATUS_READ_PORT_CLOSED:
932 debug_text_event(adapter->erp_dbf, 3, "unsol_pclosed:");
933 debug_event(adapter->erp_dbf, 3,
934 &status_buffer->d_id, sizeof (u32));
935 zfcp_fsf_status_read_port_closed(fsf_req); 898 zfcp_fsf_status_read_port_closed(fsf_req);
936 break; 899 break;
937 900
938 case FSF_STATUS_READ_INCOMING_ELS: 901 case FSF_STATUS_READ_INCOMING_ELS:
939 debug_text_event(adapter->erp_dbf, 3, "unsol_els:");
940 zfcp_fsf_incoming_els(fsf_req); 902 zfcp_fsf_incoming_els(fsf_req);
941 break; 903 break;
942 904
943 case FSF_STATUS_READ_SENSE_DATA_AVAIL: 905 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
944 debug_text_event(adapter->erp_dbf, 3, "unsol_sense:");
945 ZFCP_LOG_INFO("unsolicited sense data received (adapter %s)\n", 906 ZFCP_LOG_INFO("unsolicited sense data received (adapter %s)\n",
946 zfcp_get_busid_by_adapter(adapter)); 907 zfcp_get_busid_by_adapter(adapter));
947 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL, (char *) status_buffer,
948 sizeof(struct fsf_status_read_buffer));
949 break; 908 break;
950 909
951 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: 910 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
952 debug_text_event(adapter->erp_dbf, 3, "unsol_bit_err:");
953 ZFCP_LOG_NORMAL("Bit error threshold data received:\n"); 911 ZFCP_LOG_NORMAL("Bit error threshold data received:\n");
954 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL, 912 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
955 (char *) status_buffer, 913 (char *) status_buffer,
@@ -957,17 +915,32 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req)
957 break; 915 break;
958 916
959 case FSF_STATUS_READ_LINK_DOWN: 917 case FSF_STATUS_READ_LINK_DOWN:
960 debug_text_event(adapter->erp_dbf, 0, "unsol_link_down:"); 918 switch (status_buffer->status_subtype) {
961 ZFCP_LOG_INFO("Local link to adapter %s is down\n", 919 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
920 ZFCP_LOG_INFO("Physical link to adapter %s is down\n",
921 zfcp_get_busid_by_adapter(adapter));
922 break;
923 case FSF_STATUS_READ_SUB_FDISC_FAILED:
924 ZFCP_LOG_INFO("Local link to adapter %s is down "
925 "due to failed FDISC login\n",
962 zfcp_get_busid_by_adapter(adapter)); 926 zfcp_get_busid_by_adapter(adapter));
963 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, 927 break;
964 &adapter->status); 928 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
965 zfcp_erp_adapter_failed(adapter); 929 ZFCP_LOG_INFO("Local link to adapter %s is down "
930 "due to firmware update on adapter\n",
931 zfcp_get_busid_by_adapter(adapter));
932 break;
933 default:
934 ZFCP_LOG_INFO("Local link to adapter %s is down "
935 "due to unknown reason\n",
936 zfcp_get_busid_by_adapter(adapter));
937 };
938 zfcp_fsf_link_down_info_eval(adapter,
939 (struct fsf_link_down_info *) &status_buffer->payload);
966 break; 940 break;
967 941
968 case FSF_STATUS_READ_LINK_UP: 942 case FSF_STATUS_READ_LINK_UP:
969 debug_text_event(adapter->erp_dbf, 2, "unsol_link_up:"); 943 ZFCP_LOG_NORMAL("Local link to adapter %s was replugged. "
970 ZFCP_LOG_INFO("Local link to adapter %s was replugged. "
971 "Restarting operations on this adapter\n", 944 "Restarting operations on this adapter\n",
972 zfcp_get_busid_by_adapter(adapter)); 945 zfcp_get_busid_by_adapter(adapter));
973 /* All ports should be marked as ready to run again */ 946 /* All ports should be marked as ready to run again */
@@ -980,35 +953,40 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req)
980 break; 953 break;
981 954
982 case FSF_STATUS_READ_CFDC_UPDATED: 955 case FSF_STATUS_READ_CFDC_UPDATED:
983 debug_text_event(adapter->erp_dbf, 2, "unsol_cfdc_update:"); 956 ZFCP_LOG_NORMAL("CFDC has been updated on the adapter %s\n",
984 ZFCP_LOG_INFO("CFDC has been updated on the adapter %s\n",
985 zfcp_get_busid_by_adapter(adapter)); 957 zfcp_get_busid_by_adapter(adapter));
986 zfcp_erp_adapter_access_changed(adapter); 958 zfcp_erp_adapter_access_changed(adapter);
987 break; 959 break;
988 960
989 case FSF_STATUS_READ_CFDC_HARDENED: 961 case FSF_STATUS_READ_CFDC_HARDENED:
990 debug_text_event(adapter->erp_dbf, 2, "unsol_cfdc_harden:");
991 switch (status_buffer->status_subtype) { 962 switch (status_buffer->status_subtype) {
992 case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE: 963 case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE:
993 ZFCP_LOG_INFO("CFDC of adapter %s saved on SE\n", 964 ZFCP_LOG_NORMAL("CFDC of adapter %s saved on SE\n",
994 zfcp_get_busid_by_adapter(adapter)); 965 zfcp_get_busid_by_adapter(adapter));
995 break; 966 break;
996 case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2: 967 case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2:
997 ZFCP_LOG_INFO("CFDC of adapter %s has been copied " 968 ZFCP_LOG_NORMAL("CFDC of adapter %s has been copied "
998 "to the secondary SE\n", 969 "to the secondary SE\n",
999 zfcp_get_busid_by_adapter(adapter)); 970 zfcp_get_busid_by_adapter(adapter));
1000 break; 971 break;
1001 default: 972 default:
1002 ZFCP_LOG_INFO("CFDC of adapter %s has been hardened\n", 973 ZFCP_LOG_NORMAL("CFDC of adapter %s has been hardened\n",
1003 zfcp_get_busid_by_adapter(adapter)); 974 zfcp_get_busid_by_adapter(adapter));
1004 } 975 }
1005 break; 976 break;
1006 977
978 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
979 debug_text_event(adapter->erp_dbf, 2, "unsol_features:");
980 ZFCP_LOG_INFO("List of supported features on adapter %s has "
981 "been changed from 0x%08X to 0x%08X\n",
982 zfcp_get_busid_by_adapter(adapter),
983 *(u32*) (status_buffer->payload + 4),
984 *(u32*) (status_buffer->payload));
985 adapter->adapter_features = *(u32*) status_buffer->payload;
986 break;
987
1007 default: 988 default:
1008 debug_text_event(adapter->erp_dbf, 0, "unsol_unknown:"); 989 ZFCP_LOG_NORMAL("warning: An unsolicited status packet of unknown "
1009 debug_exception(adapter->erp_dbf, 0,
1010 &status_buffer->status_type, sizeof (u32));
1011 ZFCP_LOG_NORMAL("bug: An unsolicited status packet of unknown "
1012 "type was received (debug info 0x%x)\n", 990 "type was received (debug info 0x%x)\n",
1013 status_buffer->status_type); 991 status_buffer->status_type);
1014 ZFCP_LOG_DEBUG("Dump of status_read_buffer %p:\n", 992 ZFCP_LOG_DEBUG("Dump of status_read_buffer %p:\n",
@@ -1093,7 +1071,7 @@ zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
1093 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1071 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1094 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1072 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1095 1073
1096 fsf_req->data.abort_fcp_command.unit = unit; 1074 fsf_req->data = (unsigned long) unit;
1097 1075
1098 /* set handles of unit and its parent port in QTCB */ 1076 /* set handles of unit and its parent port in QTCB */
1099 fsf_req->qtcb->header.lun_handle = unit->handle; 1077 fsf_req->qtcb->header.lun_handle = unit->handle;
@@ -1139,7 +1117,7 @@ static int
1139zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req) 1117zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req)
1140{ 1118{
1141 int retval = -EINVAL; 1119 int retval = -EINVAL;
1142 struct zfcp_unit *unit = new_fsf_req->data.abort_fcp_command.unit; 1120 struct zfcp_unit *unit;
1143 unsigned char status_qual = 1121 unsigned char status_qual =
1144 new_fsf_req->qtcb->header.fsf_status_qual.word[0]; 1122 new_fsf_req->qtcb->header.fsf_status_qual.word[0];
1145 1123
@@ -1150,6 +1128,8 @@ zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req)
1150 goto skip_fsfstatus; 1128 goto skip_fsfstatus;
1151 } 1129 }
1152 1130
1131 unit = (struct zfcp_unit *) new_fsf_req->data;
1132
1153 /* evaluate FSF status in QTCB */ 1133 /* evaluate FSF status in QTCB */
1154 switch (new_fsf_req->qtcb->header.fsf_status) { 1134 switch (new_fsf_req->qtcb->header.fsf_status) {
1155 1135
@@ -1364,7 +1344,7 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1364 sbale[3].addr = zfcp_sg_to_address(&ct->resp[0]); 1344 sbale[3].addr = zfcp_sg_to_address(&ct->resp[0]);
1365 sbale[3].length = ct->resp[0].length; 1345 sbale[3].length = ct->resp[0].length;
1366 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY; 1346 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
1367 } else if (adapter->supported_features & 1347 } else if (adapter->adapter_features &
1368 FSF_FEATURE_ELS_CT_CHAINED_SBALS) { 1348 FSF_FEATURE_ELS_CT_CHAINED_SBALS) {
1369 /* try to use chained SBALs */ 1349 /* try to use chained SBALs */
1370 bytes = zfcp_qdio_sbals_from_sg(fsf_req, 1350 bytes = zfcp_qdio_sbals_from_sg(fsf_req,
@@ -1414,7 +1394,9 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1414 fsf_req->qtcb->header.port_handle = port->handle; 1394 fsf_req->qtcb->header.port_handle = port->handle;
1415 fsf_req->qtcb->bottom.support.service_class = adapter->fc_service_class; 1395 fsf_req->qtcb->bottom.support.service_class = adapter->fc_service_class;
1416 fsf_req->qtcb->bottom.support.timeout = ct->timeout; 1396 fsf_req->qtcb->bottom.support.timeout = ct->timeout;
1417 fsf_req->data.send_ct = ct; 1397 fsf_req->data = (unsigned long) ct;
1398
1399 zfcp_san_dbf_event_ct_request(fsf_req);
1418 1400
1419 /* start QDIO request for this FSF request */ 1401 /* start QDIO request for this FSF request */
1420 ret = zfcp_fsf_req_send(fsf_req, ct->timer); 1402 ret = zfcp_fsf_req_send(fsf_req, ct->timer);
@@ -1445,10 +1427,10 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1445 * zfcp_fsf_send_ct_handler - handler for Generic Service requests 1427 * zfcp_fsf_send_ct_handler - handler for Generic Service requests
1446 * @fsf_req: pointer to struct zfcp_fsf_req 1428 * @fsf_req: pointer to struct zfcp_fsf_req
1447 * 1429 *
1448 * Data specific for the Generic Service request is passed by 1430 * Data specific for the Generic Service request is passed using
1449 * fsf_req->data.send_ct 1431 * fsf_req->data. There we find the pointer to struct zfcp_send_ct.
1450 * Usually a specific handler for the request is called via 1432 * Usually a specific handler for the CT request is called which is
1451 * fsf_req->data.send_ct->handler at end of this function. 1433 * found in this structure.
1452 */ 1434 */
1453static int 1435static int
1454zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req) 1436zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
@@ -1462,7 +1444,7 @@ zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
1462 u16 subtable, rule, counter; 1444 u16 subtable, rule, counter;
1463 1445
1464 adapter = fsf_req->adapter; 1446 adapter = fsf_req->adapter;
1465 send_ct = fsf_req->data.send_ct; 1447 send_ct = (struct zfcp_send_ct *) fsf_req->data;
1466 port = send_ct->port; 1448 port = send_ct->port;
1467 header = &fsf_req->qtcb->header; 1449 header = &fsf_req->qtcb->header;
1468 bottom = &fsf_req->qtcb->bottom.support; 1450 bottom = &fsf_req->qtcb->bottom.support;
@@ -1474,6 +1456,7 @@ zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
1474 switch (header->fsf_status) { 1456 switch (header->fsf_status) {
1475 1457
1476 case FSF_GOOD: 1458 case FSF_GOOD:
1459 zfcp_san_dbf_event_ct_response(fsf_req);
1477 retval = 0; 1460 retval = 0;
1478 break; 1461 break;
1479 1462
@@ -1634,7 +1617,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
1634{ 1617{
1635 volatile struct qdio_buffer_element *sbale; 1618 volatile struct qdio_buffer_element *sbale;
1636 struct zfcp_fsf_req *fsf_req; 1619 struct zfcp_fsf_req *fsf_req;
1637 fc_id_t d_id; 1620 u32 d_id;
1638 struct zfcp_adapter *adapter; 1621 struct zfcp_adapter *adapter;
1639 unsigned long lock_flags; 1622 unsigned long lock_flags;
1640 int bytes; 1623 int bytes;
@@ -1664,7 +1647,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
1664 sbale[3].addr = zfcp_sg_to_address(&els->resp[0]); 1647 sbale[3].addr = zfcp_sg_to_address(&els->resp[0]);
1665 sbale[3].length = els->resp[0].length; 1648 sbale[3].length = els->resp[0].length;
1666 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY; 1649 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
1667 } else if (adapter->supported_features & 1650 } else if (adapter->adapter_features &
1668 FSF_FEATURE_ELS_CT_CHAINED_SBALS) { 1651 FSF_FEATURE_ELS_CT_CHAINED_SBALS) {
1669 /* try to use chained SBALs */ 1652 /* try to use chained SBALs */
1670 bytes = zfcp_qdio_sbals_from_sg(fsf_req, 1653 bytes = zfcp_qdio_sbals_from_sg(fsf_req,
@@ -1714,10 +1697,12 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
1714 fsf_req->qtcb->bottom.support.d_id = d_id; 1697 fsf_req->qtcb->bottom.support.d_id = d_id;
1715 fsf_req->qtcb->bottom.support.service_class = adapter->fc_service_class; 1698 fsf_req->qtcb->bottom.support.service_class = adapter->fc_service_class;
1716 fsf_req->qtcb->bottom.support.timeout = ZFCP_ELS_TIMEOUT; 1699 fsf_req->qtcb->bottom.support.timeout = ZFCP_ELS_TIMEOUT;
1717 fsf_req->data.send_els = els; 1700 fsf_req->data = (unsigned long) els;
1718 1701
1719 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 1702 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
1720 1703
1704 zfcp_san_dbf_event_els_request(fsf_req);
1705
1721 /* start QDIO request for this FSF request */ 1706 /* start QDIO request for this FSF request */
1722 ret = zfcp_fsf_req_send(fsf_req, els->timer); 1707 ret = zfcp_fsf_req_send(fsf_req, els->timer);
1723 if (ret) { 1708 if (ret) {
@@ -1746,23 +1731,23 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
1746 * zfcp_fsf_send_els_handler - handler for ELS commands 1731 * zfcp_fsf_send_els_handler - handler for ELS commands
1747 * @fsf_req: pointer to struct zfcp_fsf_req 1732 * @fsf_req: pointer to struct zfcp_fsf_req
1748 * 1733 *
1749 * Data specific for the ELS command is passed by 1734 * Data specific for the ELS command is passed using
1750 * fsf_req->data.send_els 1735 * fsf_req->data. There we find the pointer to struct zfcp_send_els.
1751 * Usually a specific handler for the command is called via 1736 * Usually a specific handler for the ELS command is called which is
1752 * fsf_req->data.send_els->handler at end of this function. 1737 * found in this structure.
1753 */ 1738 */
1754static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req) 1739static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req)
1755{ 1740{
1756 struct zfcp_adapter *adapter; 1741 struct zfcp_adapter *adapter;
1757 struct zfcp_port *port; 1742 struct zfcp_port *port;
1758 fc_id_t d_id; 1743 u32 d_id;
1759 struct fsf_qtcb_header *header; 1744 struct fsf_qtcb_header *header;
1760 struct fsf_qtcb_bottom_support *bottom; 1745 struct fsf_qtcb_bottom_support *bottom;
1761 struct zfcp_send_els *send_els; 1746 struct zfcp_send_els *send_els;
1762 int retval = -EINVAL; 1747 int retval = -EINVAL;
1763 u16 subtable, rule, counter; 1748 u16 subtable, rule, counter;
1764 1749
1765 send_els = fsf_req->data.send_els; 1750 send_els = (struct zfcp_send_els *) fsf_req->data;
1766 adapter = send_els->adapter; 1751 adapter = send_els->adapter;
1767 port = send_els->port; 1752 port = send_els->port;
1768 d_id = send_els->d_id; 1753 d_id = send_els->d_id;
@@ -1775,6 +1760,7 @@ static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req)
1775 switch (header->fsf_status) { 1760 switch (header->fsf_status) {
1776 1761
1777 case FSF_GOOD: 1762 case FSF_GOOD:
1763 zfcp_san_dbf_event_els_response(fsf_req);
1778 retval = 0; 1764 retval = 0;
1779 break; 1765 break;
1780 1766
@@ -1954,7 +1940,9 @@ zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1954 1940
1955 erp_action->fsf_req->erp_action = erp_action; 1941 erp_action->fsf_req->erp_action = erp_action;
1956 erp_action->fsf_req->qtcb->bottom.config.feature_selection = 1942 erp_action->fsf_req->qtcb->bottom.config.feature_selection =
1957 (FSF_FEATURE_CFDC | FSF_FEATURE_LUN_SHARING); 1943 FSF_FEATURE_CFDC |
1944 FSF_FEATURE_LUN_SHARING |
1945 FSF_FEATURE_UPDATE_ALERT;
1958 1946
1959 /* start QDIO request for this FSF request */ 1947 /* start QDIO request for this FSF request */
1960 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer); 1948 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
@@ -1990,29 +1978,36 @@ zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
1990{ 1978{
1991 struct fsf_qtcb_bottom_config *bottom; 1979 struct fsf_qtcb_bottom_config *bottom;
1992 struct zfcp_adapter *adapter = fsf_req->adapter; 1980 struct zfcp_adapter *adapter = fsf_req->adapter;
1981 struct Scsi_Host *shost = adapter->scsi_host;
1993 1982
1994 bottom = &fsf_req->qtcb->bottom.config; 1983 bottom = &fsf_req->qtcb->bottom.config;
1995 ZFCP_LOG_DEBUG("low/high QTCB version 0x%x/0x%x of FSF\n", 1984 ZFCP_LOG_DEBUG("low/high QTCB version 0x%x/0x%x of FSF\n",
1996 bottom->low_qtcb_version, bottom->high_qtcb_version); 1985 bottom->low_qtcb_version, bottom->high_qtcb_version);
1997 adapter->fsf_lic_version = bottom->lic_version; 1986 adapter->fsf_lic_version = bottom->lic_version;
1998 adapter->supported_features = bottom->supported_features; 1987 adapter->adapter_features = bottom->adapter_features;
1988 adapter->connection_features = bottom->connection_features;
1999 adapter->peer_wwpn = 0; 1989 adapter->peer_wwpn = 0;
2000 adapter->peer_wwnn = 0; 1990 adapter->peer_wwnn = 0;
2001 adapter->peer_d_id = 0; 1991 adapter->peer_d_id = 0;
2002 1992
2003 if (xchg_ok) { 1993 if (xchg_ok) {
2004 adapter->wwnn = bottom->nport_serv_param.wwnn; 1994 fc_host_node_name(shost) = bottom->nport_serv_param.wwnn;
2005 adapter->wwpn = bottom->nport_serv_param.wwpn; 1995 fc_host_port_name(shost) = bottom->nport_serv_param.wwpn;
2006 adapter->s_id = bottom->s_id & ZFCP_DID_MASK; 1996 fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK;
1997 fc_host_speed(shost) = bottom->fc_link_speed;
1998 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
2007 adapter->fc_topology = bottom->fc_topology; 1999 adapter->fc_topology = bottom->fc_topology;
2008 adapter->fc_link_speed = bottom->fc_link_speed;
2009 adapter->hydra_version = bottom->adapter_type; 2000 adapter->hydra_version = bottom->adapter_type;
2001 if (adapter->physical_wwpn == 0)
2002 adapter->physical_wwpn = fc_host_port_name(shost);
2003 if (adapter->physical_s_id == 0)
2004 adapter->physical_s_id = fc_host_port_id(shost);
2010 } else { 2005 } else {
2011 adapter->wwnn = 0; 2006 fc_host_node_name(shost) = 0;
2012 adapter->wwpn = 0; 2007 fc_host_port_name(shost) = 0;
2013 adapter->s_id = 0; 2008 fc_host_port_id(shost) = 0;
2009 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
2014 adapter->fc_topology = 0; 2010 adapter->fc_topology = 0;
2015 adapter->fc_link_speed = 0;
2016 adapter->hydra_version = 0; 2011 adapter->hydra_version = 0;
2017 } 2012 }
2018 2013
@@ -2022,26 +2017,28 @@ zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
2022 adapter->peer_wwnn = bottom->plogi_payload.wwnn; 2017 adapter->peer_wwnn = bottom->plogi_payload.wwnn;
2023 } 2018 }
2024 2019
2025 if(adapter->supported_features & FSF_FEATURE_HBAAPI_MANAGEMENT){ 2020 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
2026 adapter->hardware_version = bottom->hardware_version; 2021 adapter->hardware_version = bottom->hardware_version;
2027 memcpy(adapter->serial_number, bottom->serial_number, 17); 2022 memcpy(fc_host_serial_number(shost), bottom->serial_number,
2028 EBCASC(adapter->serial_number, sizeof(adapter->serial_number)); 2023 min(FC_SERIAL_NUMBER_SIZE, 17));
2024 EBCASC(fc_host_serial_number(shost),
2025 min(FC_SERIAL_NUMBER_SIZE, 17));
2029 } 2026 }
2030 2027
2031 ZFCP_LOG_NORMAL("The adapter %s reported the following characteristics:\n" 2028 ZFCP_LOG_NORMAL("The adapter %s reported the following characteristics:\n"
2032 "WWNN 0x%016Lx, " 2029 "WWNN 0x%016Lx, "
2033 "WWPN 0x%016Lx, " 2030 "WWPN 0x%016Lx, "
2034 "S_ID 0x%08x,\n" 2031 "S_ID 0x%08x,\n"
2035 "adapter version 0x%x, " 2032 "adapter version 0x%x, "
2036 "LIC version 0x%x, " 2033 "LIC version 0x%x, "
2037 "FC link speed %d Gb/s\n", 2034 "FC link speed %d Gb/s\n",
2038 zfcp_get_busid_by_adapter(adapter), 2035 zfcp_get_busid_by_adapter(adapter),
2039 adapter->wwnn, 2036 (wwn_t) fc_host_node_name(shost),
2040 adapter->wwpn, 2037 (wwn_t) fc_host_port_name(shost),
2041 (unsigned int) adapter->s_id, 2038 fc_host_port_id(shost),
2042 adapter->hydra_version, 2039 adapter->hydra_version,
2043 adapter->fsf_lic_version, 2040 adapter->fsf_lic_version,
2044 adapter->fc_link_speed); 2041 fc_host_speed(shost));
2045 if (ZFCP_QTCB_VERSION < bottom->low_qtcb_version) { 2042 if (ZFCP_QTCB_VERSION < bottom->low_qtcb_version) {
2046 ZFCP_LOG_NORMAL("error: the adapter %s " 2043 ZFCP_LOG_NORMAL("error: the adapter %s "
2047 "only supports newer control block " 2044 "only supports newer control block "
@@ -2062,7 +2059,6 @@ zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
2062 zfcp_erp_adapter_shutdown(adapter, 0); 2059 zfcp_erp_adapter_shutdown(adapter, 0);
2063 return -EIO; 2060 return -EIO;
2064 } 2061 }
2065 zfcp_set_fc_host_attrs(adapter);
2066 return 0; 2062 return 0;
2067} 2063}
2068 2064
@@ -2078,11 +2074,12 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
2078{ 2074{
2079 struct fsf_qtcb_bottom_config *bottom; 2075 struct fsf_qtcb_bottom_config *bottom;
2080 struct zfcp_adapter *adapter = fsf_req->adapter; 2076 struct zfcp_adapter *adapter = fsf_req->adapter;
2077 struct fsf_qtcb *qtcb = fsf_req->qtcb;
2081 2078
2082 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) 2079 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
2083 return -EIO; 2080 return -EIO;
2084 2081
2085 switch (fsf_req->qtcb->header.fsf_status) { 2082 switch (qtcb->header.fsf_status) {
2086 2083
2087 case FSF_GOOD: 2084 case FSF_GOOD:
2088 if (zfcp_fsf_exchange_config_evaluate(fsf_req, 1)) 2085 if (zfcp_fsf_exchange_config_evaluate(fsf_req, 1))
@@ -2112,7 +2109,7 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
2112 zfcp_erp_adapter_shutdown(adapter, 0); 2109 zfcp_erp_adapter_shutdown(adapter, 0);
2113 return -EIO; 2110 return -EIO;
2114 case FSF_TOPO_FABRIC: 2111 case FSF_TOPO_FABRIC:
2115 ZFCP_LOG_INFO("Switched fabric fibrechannel " 2112 ZFCP_LOG_NORMAL("Switched fabric fibrechannel "
2116 "network detected at adapter %s.\n", 2113 "network detected at adapter %s.\n",
2117 zfcp_get_busid_by_adapter(adapter)); 2114 zfcp_get_busid_by_adapter(adapter));
2118 break; 2115 break;
@@ -2130,7 +2127,7 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
2130 zfcp_erp_adapter_shutdown(adapter, 0); 2127 zfcp_erp_adapter_shutdown(adapter, 0);
2131 return -EIO; 2128 return -EIO;
2132 } 2129 }
2133 bottom = &fsf_req->qtcb->bottom.config; 2130 bottom = &qtcb->bottom.config;
2134 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) { 2131 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
2135 ZFCP_LOG_NORMAL("bug: Maximum QTCB size (%d bytes) " 2132 ZFCP_LOG_NORMAL("bug: Maximum QTCB size (%d bytes) "
2136 "allowed by the adapter %s " 2133 "allowed by the adapter %s "
@@ -2155,12 +2152,10 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
2155 if (zfcp_fsf_exchange_config_evaluate(fsf_req, 0)) 2152 if (zfcp_fsf_exchange_config_evaluate(fsf_req, 0))
2156 return -EIO; 2153 return -EIO;
2157 2154
2158 ZFCP_LOG_INFO("Local link to adapter %s is down\n", 2155 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
2159 zfcp_get_busid_by_adapter(adapter)); 2156
2160 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | 2157 zfcp_fsf_link_down_info_eval(adapter,
2161 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, 2158 &qtcb->header.fsf_status_qual.link_down_info);
2162 &adapter->status);
2163 zfcp_erp_adapter_failed(adapter);
2164 break; 2159 break;
2165 default: 2160 default:
2166 debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf-stat-ng"); 2161 debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf-stat-ng");
@@ -2174,11 +2169,13 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
2174 2169
2175/** 2170/**
2176 * zfcp_fsf_exchange_port_data - request information about local port 2171 * zfcp_fsf_exchange_port_data - request information about local port
2172 * @erp_action: ERP action for the adapter for which port data is requested
2177 * @adapter: for which port data is requested 2173 * @adapter: for which port data is requested
2178 * @data: response to exchange port data request 2174 * @data: response to exchange port data request
2179 */ 2175 */
2180int 2176int
2181zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter, 2177zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
2178 struct zfcp_adapter *adapter,
2182 struct fsf_qtcb_bottom_port *data) 2179 struct fsf_qtcb_bottom_port *data)
2183{ 2180{
2184 volatile struct qdio_buffer_element *sbale; 2181 volatile struct qdio_buffer_element *sbale;
@@ -2187,7 +2184,7 @@ zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter,
2187 struct zfcp_fsf_req *fsf_req; 2184 struct zfcp_fsf_req *fsf_req;
2188 struct timer_list *timer; 2185 struct timer_list *timer;
2189 2186
2190 if(!(adapter->supported_features & FSF_FEATURE_HBAAPI_MANAGEMENT)){ 2187 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) {
2191 ZFCP_LOG_INFO("error: exchange port data " 2188 ZFCP_LOG_INFO("error: exchange port data "
2192 "command not supported by adapter %s\n", 2189 "command not supported by adapter %s\n",
2193 zfcp_get_busid_by_adapter(adapter)); 2190 zfcp_get_busid_by_adapter(adapter));
@@ -2211,12 +2208,18 @@ zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter,
2211 goto out; 2208 goto out;
2212 } 2209 }
2213 2210
2211 if (erp_action) {
2212 erp_action->fsf_req = fsf_req;
2213 fsf_req->erp_action = erp_action;
2214 }
2215
2216 if (data)
2217 fsf_req->data = (unsigned long) data;
2218
2214 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 2219 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
2215 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 2220 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2216 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2221 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2217 2222
2218 fsf_req->data.port_data = data;
2219
2220 init_timer(timer); 2223 init_timer(timer);
2221 timer->function = zfcp_fsf_request_timeout_handler; 2224 timer->function = zfcp_fsf_request_timeout_handler;
2222 timer->data = (unsigned long) adapter; 2225 timer->data = (unsigned long) adapter;
@@ -2228,6 +2231,8 @@ zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter,
2228 "command on the adapter %s\n", 2231 "command on the adapter %s\n",
2229 zfcp_get_busid_by_adapter(adapter)); 2232 zfcp_get_busid_by_adapter(adapter));
2230 zfcp_fsf_req_free(fsf_req); 2233 zfcp_fsf_req_free(fsf_req);
2234 if (erp_action)
2235 erp_action->fsf_req = NULL;
2231 write_unlock_irqrestore(&adapter->request_queue.queue_lock, 2236 write_unlock_irqrestore(&adapter->request_queue.queue_lock,
2232 lock_flags); 2237 lock_flags);
2233 goto out; 2238 goto out;
@@ -2256,21 +2261,42 @@ zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter,
2256static void 2261static void
2257zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req) 2262zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req)
2258{ 2263{
2259 struct fsf_qtcb_bottom_port *bottom; 2264 struct zfcp_adapter *adapter = fsf_req->adapter;
2260 struct fsf_qtcb_bottom_port *data = fsf_req->data.port_data; 2265 struct Scsi_Host *shost = adapter->scsi_host;
2266 struct fsf_qtcb *qtcb = fsf_req->qtcb;
2267 struct fsf_qtcb_bottom_port *bottom, *data;
2261 2268
2262 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) 2269 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
2263 return; 2270 return;
2264 2271
2265 switch (fsf_req->qtcb->header.fsf_status) { 2272 switch (qtcb->header.fsf_status) {
2266 case FSF_GOOD: 2273 case FSF_GOOD:
2267 bottom = &fsf_req->qtcb->bottom.port; 2274 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
2268 memcpy(data, bottom, sizeof(*data)); 2275
2276 bottom = &qtcb->bottom.port;
2277 data = (struct fsf_qtcb_bottom_port*) fsf_req->data;
2278 if (data)
2279 memcpy(data, bottom, sizeof(struct fsf_qtcb_bottom_port));
2280 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
2281 adapter->physical_wwpn = bottom->wwpn;
2282 adapter->physical_s_id = bottom->fc_port_id;
2283 } else {
2284 adapter->physical_wwpn = fc_host_port_name(shost);
2285 adapter->physical_s_id = fc_host_port_id(shost);
2286 }
2287 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
2288 break;
2289
2290 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
2291 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
2292
2293 zfcp_fsf_link_down_info_eval(adapter,
2294 &qtcb->header.fsf_status_qual.link_down_info);
2269 break; 2295 break;
2270 2296
2271 default: 2297 default:
2272 debug_text_event(fsf_req->adapter->erp_dbf, 0, "xchg-port-ng"); 2298 debug_text_event(adapter->erp_dbf, 0, "xchg-port-ng");
2273 debug_event(fsf_req->adapter->erp_dbf, 0, 2299 debug_event(adapter->erp_dbf, 0,
2274 &fsf_req->qtcb->header.fsf_status, sizeof(u32)); 2300 &fsf_req->qtcb->header.fsf_status, sizeof(u32));
2275 } 2301 }
2276} 2302}
@@ -2312,7 +2338,7 @@ zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
2312 2338
2313 erp_action->fsf_req->qtcb->bottom.support.d_id = erp_action->port->d_id; 2339 erp_action->fsf_req->qtcb->bottom.support.d_id = erp_action->port->d_id;
2314 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status); 2340 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status);
2315 erp_action->fsf_req->data.open_port.port = erp_action->port; 2341 erp_action->fsf_req->data = (unsigned long) erp_action->port;
2316 erp_action->fsf_req->erp_action = erp_action; 2342 erp_action->fsf_req->erp_action = erp_action;
2317 2343
2318 /* start QDIO request for this FSF request */ 2344 /* start QDIO request for this FSF request */
@@ -2353,7 +2379,7 @@ zfcp_fsf_open_port_handler(struct zfcp_fsf_req *fsf_req)
2353 struct fsf_qtcb_header *header; 2379 struct fsf_qtcb_header *header;
2354 u16 subtable, rule, counter; 2380 u16 subtable, rule, counter;
2355 2381
2356 port = fsf_req->data.open_port.port; 2382 port = (struct zfcp_port *) fsf_req->data;
2357 header = &fsf_req->qtcb->header; 2383 header = &fsf_req->qtcb->header;
2358 2384
2359 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 2385 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
@@ -2566,7 +2592,7 @@ zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
2566 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2592 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2567 2593
2568 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status); 2594 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status);
2569 erp_action->fsf_req->data.close_port.port = erp_action->port; 2595 erp_action->fsf_req->data = (unsigned long) erp_action->port;
2570 erp_action->fsf_req->erp_action = erp_action; 2596 erp_action->fsf_req->erp_action = erp_action;
2571 erp_action->fsf_req->qtcb->header.port_handle = 2597 erp_action->fsf_req->qtcb->header.port_handle =
2572 erp_action->port->handle; 2598 erp_action->port->handle;
@@ -2606,7 +2632,7 @@ zfcp_fsf_close_port_handler(struct zfcp_fsf_req *fsf_req)
2606 int retval = -EINVAL; 2632 int retval = -EINVAL;
2607 struct zfcp_port *port; 2633 struct zfcp_port *port;
2608 2634
2609 port = fsf_req->data.close_port.port; 2635 port = (struct zfcp_port *) fsf_req->data;
2610 2636
2611 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 2637 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
2612 /* don't change port status in our bookkeeping */ 2638 /* don't change port status in our bookkeeping */
@@ -2703,8 +2729,8 @@ zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
2703 atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, 2729 atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING,
2704 &erp_action->port->status); 2730 &erp_action->port->status);
2705 /* save a pointer to this port */ 2731 /* save a pointer to this port */
2706 erp_action->fsf_req->data.close_physical_port.port = erp_action->port; 2732 erp_action->fsf_req->data = (unsigned long) erp_action->port;
2707 /* port to be closeed */ 2733 /* port to be closed */
2708 erp_action->fsf_req->qtcb->header.port_handle = 2734 erp_action->fsf_req->qtcb->header.port_handle =
2709 erp_action->port->handle; 2735 erp_action->port->handle;
2710 erp_action->fsf_req->erp_action = erp_action; 2736 erp_action->fsf_req->erp_action = erp_action;
@@ -2747,7 +2773,7 @@ zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *fsf_req)
2747 struct fsf_qtcb_header *header; 2773 struct fsf_qtcb_header *header;
2748 u16 subtable, rule, counter; 2774 u16 subtable, rule, counter;
2749 2775
2750 port = fsf_req->data.close_physical_port.port; 2776 port = (struct zfcp_port *) fsf_req->data;
2751 header = &fsf_req->qtcb->header; 2777 header = &fsf_req->qtcb->header;
2752 2778
2753 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 2779 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
@@ -2908,10 +2934,11 @@ zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
2908 erp_action->port->handle; 2934 erp_action->port->handle;
2909 erp_action->fsf_req->qtcb->bottom.support.fcp_lun = 2935 erp_action->fsf_req->qtcb->bottom.support.fcp_lun =
2910 erp_action->unit->fcp_lun; 2936 erp_action->unit->fcp_lun;
2937 if (!(erp_action->adapter->connection_features & FSF_FEATURE_NPIV_MODE))
2911 erp_action->fsf_req->qtcb->bottom.support.option = 2938 erp_action->fsf_req->qtcb->bottom.support.option =
2912 FSF_OPEN_LUN_SUPPRESS_BOXING; 2939 FSF_OPEN_LUN_SUPPRESS_BOXING;
2913 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status); 2940 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status);
2914 erp_action->fsf_req->data.open_unit.unit = erp_action->unit; 2941 erp_action->fsf_req->data = (unsigned long) erp_action->unit;
2915 erp_action->fsf_req->erp_action = erp_action; 2942 erp_action->fsf_req->erp_action = erp_action;
2916 2943
2917 /* start QDIO request for this FSF request */ 2944 /* start QDIO request for this FSF request */
@@ -2955,9 +2982,9 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
2955 struct fsf_qtcb_bottom_support *bottom; 2982 struct fsf_qtcb_bottom_support *bottom;
2956 struct fsf_queue_designator *queue_designator; 2983 struct fsf_queue_designator *queue_designator;
2957 u16 subtable, rule, counter; 2984 u16 subtable, rule, counter;
2958 u32 allowed, exclusive, readwrite; 2985 int exclusive, readwrite;
2959 2986
2960 unit = fsf_req->data.open_unit.unit; 2987 unit = (struct zfcp_unit *) fsf_req->data;
2961 2988
2962 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 2989 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
2963 /* don't change unit status in our bookkeeping */ 2990 /* don't change unit status in our bookkeeping */
@@ -2969,10 +2996,6 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
2969 bottom = &fsf_req->qtcb->bottom.support; 2996 bottom = &fsf_req->qtcb->bottom.support;
2970 queue_designator = &header->fsf_status_qual.fsf_queue_designator; 2997 queue_designator = &header->fsf_status_qual.fsf_queue_designator;
2971 2998
2972 allowed = bottom->lun_access_info & FSF_UNIT_ACCESS_OPEN_LUN_ALLOWED;
2973 exclusive = bottom->lun_access_info & FSF_UNIT_ACCESS_EXCLUSIVE;
2974 readwrite = bottom->lun_access_info & FSF_UNIT_ACCESS_OUTBOUND_TRANSFER;
2975
2976 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 2999 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
2977 ZFCP_STATUS_UNIT_SHARED | 3000 ZFCP_STATUS_UNIT_SHARED |
2978 ZFCP_STATUS_UNIT_READONLY, 3001 ZFCP_STATUS_UNIT_READONLY,
@@ -3146,10 +3169,15 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
3146 unit->handle); 3169 unit->handle);
3147 /* mark unit as open */ 3170 /* mark unit as open */
3148 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); 3171 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
3149 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 3172
3150 ZFCP_STATUS_COMMON_ACCESS_BOXED, 3173 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
3151 &unit->status); 3174 (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) &&
3152 if (adapter->supported_features & FSF_FEATURE_LUN_SHARING){ 3175 (adapter->ccw_device->id.dev_model != ZFCP_DEVICE_MODEL_PRIV)) {
3176 exclusive = (bottom->lun_access_info &
3177 FSF_UNIT_ACCESS_EXCLUSIVE);
3178 readwrite = (bottom->lun_access_info &
3179 FSF_UNIT_ACCESS_OUTBOUND_TRANSFER);
3180
3153 if (!exclusive) 3181 if (!exclusive)
3154 atomic_set_mask(ZFCP_STATUS_UNIT_SHARED, 3182 atomic_set_mask(ZFCP_STATUS_UNIT_SHARED,
3155 &unit->status); 3183 &unit->status);
@@ -3242,7 +3270,7 @@ zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
3242 erp_action->port->handle; 3270 erp_action->port->handle;
3243 erp_action->fsf_req->qtcb->header.lun_handle = erp_action->unit->handle; 3271 erp_action->fsf_req->qtcb->header.lun_handle = erp_action->unit->handle;
3244 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status); 3272 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status);
3245 erp_action->fsf_req->data.close_unit.unit = erp_action->unit; 3273 erp_action->fsf_req->data = (unsigned long) erp_action->unit;
3246 erp_action->fsf_req->erp_action = erp_action; 3274 erp_action->fsf_req->erp_action = erp_action;
3247 3275
3248 /* start QDIO request for this FSF request */ 3276 /* start QDIO request for this FSF request */
@@ -3281,7 +3309,7 @@ zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req)
3281 int retval = -EINVAL; 3309 int retval = -EINVAL;
3282 struct zfcp_unit *unit; 3310 struct zfcp_unit *unit;
3283 3311
3284 unit = fsf_req->data.close_unit.unit; /* restore unit */ 3312 unit = (struct zfcp_unit *) fsf_req->data;
3285 3313
3286 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 3314 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
3287 /* don't change unit status in our bookkeeping */ 3315 /* don't change unit status in our bookkeeping */
@@ -3305,9 +3333,6 @@ zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req)
3305 debug_text_event(fsf_req->adapter->erp_dbf, 1, 3333 debug_text_event(fsf_req->adapter->erp_dbf, 1,
3306 "fsf_s_phand_nv"); 3334 "fsf_s_phand_nv");
3307 zfcp_erp_adapter_reopen(unit->port->adapter, 0); 3335 zfcp_erp_adapter_reopen(unit->port->adapter, 0);
3308 zfcp_cmd_dbf_event_fsf("porthinv", fsf_req,
3309 &fsf_req->qtcb->header.fsf_status_qual,
3310 sizeof (union fsf_status_qual));
3311 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 3336 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3312 break; 3337 break;
3313 3338
@@ -3326,9 +3351,6 @@ zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req)
3326 debug_text_event(fsf_req->adapter->erp_dbf, 1, 3351 debug_text_event(fsf_req->adapter->erp_dbf, 1,
3327 "fsf_s_lhand_nv"); 3352 "fsf_s_lhand_nv");
3328 zfcp_erp_port_reopen(unit->port, 0); 3353 zfcp_erp_port_reopen(unit->port, 0);
3329 zfcp_cmd_dbf_event_fsf("lunhinv", fsf_req,
3330 &fsf_req->qtcb->header.fsf_status_qual,
3331 sizeof (union fsf_status_qual));
3332 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 3354 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3333 break; 3355 break;
3334 3356
@@ -3436,21 +3458,14 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
3436 goto failed_req_create; 3458 goto failed_req_create;
3437 } 3459 }
3438 3460
3439 /* 3461 zfcp_unit_get(unit);
3440 * associate FSF request with SCSI request 3462 fsf_req->unit = unit;
3441 * (need this for look up on abort)
3442 */
3443 fsf_req->data.send_fcp_command_task.fsf_req = fsf_req;
3444 scsi_cmnd->host_scribble = (char *) &(fsf_req->data);
3445 3463
3446 /* 3464 /* associate FSF request with SCSI request (for look up on abort) */
3447 * associate SCSI command with FSF request 3465 scsi_cmnd->host_scribble = (char *) fsf_req;
3448 * (need this for look up on normal command completion) 3466
3449 */ 3467 /* associate SCSI command with FSF request */
3450 fsf_req->data.send_fcp_command_task.scsi_cmnd = scsi_cmnd; 3468 fsf_req->data = (unsigned long) scsi_cmnd;
3451 fsf_req->data.send_fcp_command_task.start_jiffies = jiffies;
3452 fsf_req->data.send_fcp_command_task.unit = unit;
3453 ZFCP_LOG_DEBUG("unit=%p, fcp_lun=0x%016Lx\n", unit, unit->fcp_lun);
3454 3469
3455 /* set handles of unit and its parent port in QTCB */ 3470 /* set handles of unit and its parent port in QTCB */
3456 fsf_req->qtcb->header.lun_handle = unit->handle; 3471 fsf_req->qtcb->header.lun_handle = unit->handle;
@@ -3584,6 +3599,7 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
3584 send_failed: 3599 send_failed:
3585 no_fit: 3600 no_fit:
3586 failed_scsi_cmnd: 3601 failed_scsi_cmnd:
3602 zfcp_unit_put(unit);
3587 zfcp_fsf_req_free(fsf_req); 3603 zfcp_fsf_req_free(fsf_req);
3588 fsf_req = NULL; 3604 fsf_req = NULL;
3589 scsi_cmnd->host_scribble = NULL; 3605 scsi_cmnd->host_scribble = NULL;
@@ -3640,7 +3656,7 @@ zfcp_fsf_send_fcp_command_task_management(struct zfcp_adapter *adapter,
3640 * hold a pointer to the unit being target of this 3656 * hold a pointer to the unit being target of this
3641 * task management request 3657 * task management request
3642 */ 3658 */
3643 fsf_req->data.send_fcp_command_task_management.unit = unit; 3659 fsf_req->data = (unsigned long) unit;
3644 3660
3645 /* set FSF related fields in QTCB */ 3661 /* set FSF related fields in QTCB */
3646 fsf_req->qtcb->header.lun_handle = unit->handle; 3662 fsf_req->qtcb->header.lun_handle = unit->handle;
@@ -3706,9 +3722,9 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
3706 header = &fsf_req->qtcb->header; 3722 header = &fsf_req->qtcb->header;
3707 3723
3708 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)) 3724 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT))
3709 unit = fsf_req->data.send_fcp_command_task_management.unit; 3725 unit = (struct zfcp_unit *) fsf_req->data;
3710 else 3726 else
3711 unit = fsf_req->data.send_fcp_command_task.unit; 3727 unit = fsf_req->unit;
3712 3728
3713 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 3729 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
3714 /* go directly to calls of special handlers */ 3730 /* go directly to calls of special handlers */
@@ -3765,10 +3781,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
3765 debug_text_event(fsf_req->adapter->erp_dbf, 1, 3781 debug_text_event(fsf_req->adapter->erp_dbf, 1,
3766 "fsf_s_hand_mis"); 3782 "fsf_s_hand_mis");
3767 zfcp_erp_adapter_reopen(unit->port->adapter, 0); 3783 zfcp_erp_adapter_reopen(unit->port->adapter, 0);
3768 zfcp_cmd_dbf_event_fsf("handmism",
3769 fsf_req,
3770 &header->fsf_status_qual,
3771 sizeof (union fsf_status_qual));
3772 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 3784 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3773 break; 3785 break;
3774 3786
@@ -3789,10 +3801,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
3789 debug_text_exception(fsf_req->adapter->erp_dbf, 0, 3801 debug_text_exception(fsf_req->adapter->erp_dbf, 0,
3790 "fsf_s_class_nsup"); 3802 "fsf_s_class_nsup");
3791 zfcp_erp_adapter_shutdown(unit->port->adapter, 0); 3803 zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
3792 zfcp_cmd_dbf_event_fsf("unsclass",
3793 fsf_req,
3794 &header->fsf_status_qual,
3795 sizeof (union fsf_status_qual));
3796 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 3804 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3797 break; 3805 break;
3798 3806
@@ -3811,10 +3819,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
3811 debug_text_event(fsf_req->adapter->erp_dbf, 1, 3819 debug_text_event(fsf_req->adapter->erp_dbf, 1,
3812 "fsf_s_fcp_lun_nv"); 3820 "fsf_s_fcp_lun_nv");
3813 zfcp_erp_port_reopen(unit->port, 0); 3821 zfcp_erp_port_reopen(unit->port, 0);
3814 zfcp_cmd_dbf_event_fsf("fluninv",
3815 fsf_req,
3816 &header->fsf_status_qual,
3817 sizeof (union fsf_status_qual));
3818 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 3822 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3819 break; 3823 break;
3820 3824
@@ -3853,10 +3857,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
3853 debug_text_event(fsf_req->adapter->erp_dbf, 0, 3857 debug_text_event(fsf_req->adapter->erp_dbf, 0,
3854 "fsf_s_dir_ind_nv"); 3858 "fsf_s_dir_ind_nv");
3855 zfcp_erp_adapter_shutdown(unit->port->adapter, 0); 3859 zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
3856 zfcp_cmd_dbf_event_fsf("dirinv",
3857 fsf_req,
3858 &header->fsf_status_qual,
3859 sizeof (union fsf_status_qual));
3860 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 3860 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3861 break; 3861 break;
3862 3862
@@ -3872,10 +3872,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
3872 debug_text_event(fsf_req->adapter->erp_dbf, 0, 3872 debug_text_event(fsf_req->adapter->erp_dbf, 0,
3873 "fsf_s_cmd_len_nv"); 3873 "fsf_s_cmd_len_nv");
3874 zfcp_erp_adapter_shutdown(unit->port->adapter, 0); 3874 zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
3875 zfcp_cmd_dbf_event_fsf("cleninv",
3876 fsf_req,
3877 &header->fsf_status_qual,
3878 sizeof (union fsf_status_qual));
3879 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 3875 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3880 break; 3876 break;
3881 3877
@@ -3947,6 +3943,8 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
3947 zfcp_fsf_send_fcp_command_task_management_handler(fsf_req); 3943 zfcp_fsf_send_fcp_command_task_management_handler(fsf_req);
3948 } else { 3944 } else {
3949 retval = zfcp_fsf_send_fcp_command_task_handler(fsf_req); 3945 retval = zfcp_fsf_send_fcp_command_task_handler(fsf_req);
3946 fsf_req->unit = NULL;
3947 zfcp_unit_put(unit);
3950 } 3948 }
3951 return retval; 3949 return retval;
3952} 3950}
@@ -3970,10 +3968,10 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
3970 u32 sns_len; 3968 u32 sns_len;
3971 char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu); 3969 char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu);
3972 unsigned long flags; 3970 unsigned long flags;
3973 struct zfcp_unit *unit = fsf_req->data.send_fcp_command_task.unit; 3971 struct zfcp_unit *unit = fsf_req->unit;
3974 3972
3975 read_lock_irqsave(&fsf_req->adapter->abort_lock, flags); 3973 read_lock_irqsave(&fsf_req->adapter->abort_lock, flags);
3976 scpnt = fsf_req->data.send_fcp_command_task.scsi_cmnd; 3974 scpnt = (struct scsi_cmnd *) fsf_req->data;
3977 if (unlikely(!scpnt)) { 3975 if (unlikely(!scpnt)) {
3978 ZFCP_LOG_DEBUG 3976 ZFCP_LOG_DEBUG
3979 ("Command with fsf_req %p is not associated to " 3977 ("Command with fsf_req %p is not associated to "
@@ -4043,7 +4041,6 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
4043 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, 4041 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
4044 (char *) &fsf_req->qtcb-> 4042 (char *) &fsf_req->qtcb->
4045 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE); 4043 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
4046 zfcp_cmd_dbf_event_fsf("clenmis", fsf_req, NULL, 0);
4047 set_host_byte(&scpnt->result, DID_ERROR); 4044 set_host_byte(&scpnt->result, DID_ERROR);
4048 goto skip_fsfstatus; 4045 goto skip_fsfstatus;
4049 case RSP_CODE_FIELD_INVALID: 4046 case RSP_CODE_FIELD_INVALID:
@@ -4062,7 +4059,6 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
4062 (char *) &fsf_req->qtcb-> 4059 (char *) &fsf_req->qtcb->
4063 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE); 4060 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
4064 set_host_byte(&scpnt->result, DID_ERROR); 4061 set_host_byte(&scpnt->result, DID_ERROR);
4065 zfcp_cmd_dbf_event_fsf("codeinv", fsf_req, NULL, 0);
4066 goto skip_fsfstatus; 4062 goto skip_fsfstatus;
4067 case RSP_CODE_RO_MISMATCH: 4063 case RSP_CODE_RO_MISMATCH:
4068 /* hardware bug */ 4064 /* hardware bug */
@@ -4079,7 +4075,6 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
4079 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, 4075 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
4080 (char *) &fsf_req->qtcb-> 4076 (char *) &fsf_req->qtcb->
4081 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE); 4077 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
4082 zfcp_cmd_dbf_event_fsf("codemism", fsf_req, NULL, 0);
4083 set_host_byte(&scpnt->result, DID_ERROR); 4078 set_host_byte(&scpnt->result, DID_ERROR);
4084 goto skip_fsfstatus; 4079 goto skip_fsfstatus;
4085 default: 4080 default:
@@ -4096,7 +4091,6 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
4096 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, 4091 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
4097 (char *) &fsf_req->qtcb-> 4092 (char *) &fsf_req->qtcb->
4098 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE); 4093 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
4099 zfcp_cmd_dbf_event_fsf("undeffcp", fsf_req, NULL, 0);
4100 set_host_byte(&scpnt->result, DID_ERROR); 4094 set_host_byte(&scpnt->result, DID_ERROR);
4101 goto skip_fsfstatus; 4095 goto skip_fsfstatus;
4102 } 4096 }
@@ -4158,19 +4152,17 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
4158 skip_fsfstatus: 4152 skip_fsfstatus:
4159 ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result); 4153 ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result);
4160 4154
4161 zfcp_cmd_dbf_event_scsi("response", scpnt); 4155 if (scpnt->result != 0)
4156 zfcp_scsi_dbf_event_result("erro", 3, fsf_req->adapter, scpnt);
4157 else if (scpnt->retries > 0)
4158 zfcp_scsi_dbf_event_result("retr", 4, fsf_req->adapter, scpnt);
4159 else
4160 zfcp_scsi_dbf_event_result("norm", 6, fsf_req->adapter, scpnt);
4162 4161
4163 /* cleanup pointer (need this especially for abort) */ 4162 /* cleanup pointer (need this especially for abort) */
4164 scpnt->host_scribble = NULL; 4163 scpnt->host_scribble = NULL;
4165 4164
4166 /*
4167 * NOTE:
4168 * according to the outcome of a discussion on linux-scsi we
4169 * don't need to grab the io_request_lock here since we use
4170 * the new eh
4171 */
4172 /* always call back */ 4165 /* always call back */
4173
4174 (scpnt->scsi_done) (scpnt); 4166 (scpnt->scsi_done) (scpnt);
4175 4167
4176 /* 4168 /*
@@ -4198,8 +4190,7 @@ zfcp_fsf_send_fcp_command_task_management_handler(struct zfcp_fsf_req *fsf_req)
4198 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *) 4190 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
4199 &(fsf_req->qtcb->bottom.io.fcp_rsp); 4191 &(fsf_req->qtcb->bottom.io.fcp_rsp);
4200 char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu); 4192 char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu);
4201 struct zfcp_unit *unit = 4193 struct zfcp_unit *unit = (struct zfcp_unit *) fsf_req->data;
4202 fsf_req->data.send_fcp_command_task_management.unit;
4203 4194
4204 del_timer(&fsf_req->adapter->scsi_er_timer); 4195 del_timer(&fsf_req->adapter->scsi_er_timer);
4205 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 4196 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
@@ -4276,7 +4267,7 @@ zfcp_fsf_control_file(struct zfcp_adapter *adapter,
4276 int direction; 4267 int direction;
4277 int retval = 0; 4268 int retval = 0;
4278 4269
4279 if (!(adapter->supported_features & FSF_FEATURE_CFDC)) { 4270 if (!(adapter->adapter_features & FSF_FEATURE_CFDC)) {
4280 ZFCP_LOG_INFO("cfdc not supported (adapter %s)\n", 4271 ZFCP_LOG_INFO("cfdc not supported (adapter %s)\n",
4281 zfcp_get_busid_by_adapter(adapter)); 4272 zfcp_get_busid_by_adapter(adapter));
4282 retval = -EOPNOTSUPP; 4273 retval = -EOPNOTSUPP;
@@ -4549,52 +4540,6 @@ skip_fsfstatus:
4549 return retval; 4540 return retval;
4550} 4541}
4551 4542
4552
4553/*
4554 * function: zfcp_fsf_req_wait_and_cleanup
4555 *
4556 * purpose:
4557 *
4558 * FIXME(design): signal seems to be <0 !!!
4559 * returns: 0 - request completed (*status is valid), cleanup succ.
4560 * <0 - request completed (*status is valid), cleanup failed
4561 * >0 - signal which interrupted waiting (*status invalid),
4562 * request not completed, no cleanup
4563 *
4564 * *status is a copy of status of completed fsf_req
4565 */
4566int
4567zfcp_fsf_req_wait_and_cleanup(struct zfcp_fsf_req *fsf_req,
4568 int interruptible, u32 * status)
4569{
4570 int retval = 0;
4571 int signal = 0;
4572
4573 if (interruptible) {
4574 __wait_event_interruptible(fsf_req->completion_wq,
4575 fsf_req->status &
4576 ZFCP_STATUS_FSFREQ_COMPLETED,
4577 signal);
4578 if (signal) {
4579 ZFCP_LOG_DEBUG("Caught signal %i while waiting for the "
4580 "completion of the request at %p\n",
4581 signal, fsf_req);
4582 retval = signal;
4583 goto out;
4584 }
4585 } else {
4586 __wait_event(fsf_req->completion_wq,
4587 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
4588 }
4589
4590 *status = fsf_req->status;
4591
4592 /* cleanup request */
4593 zfcp_fsf_req_free(fsf_req);
4594 out:
4595 return retval;
4596}
4597
4598static inline int 4543static inline int
4599zfcp_fsf_req_sbal_check(unsigned long *flags, 4544zfcp_fsf_req_sbal_check(unsigned long *flags,
4600 struct zfcp_qdio_queue *queue, int needed) 4545 struct zfcp_qdio_queue *queue, int needed)
@@ -4610,15 +4555,16 @@ zfcp_fsf_req_sbal_check(unsigned long *flags,
4610 * set qtcb pointer in fsf_req and initialize QTCB 4555 * set qtcb pointer in fsf_req and initialize QTCB
4611 */ 4556 */
4612static inline void 4557static inline void
4613zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req, u32 fsf_cmd) 4558zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req)
4614{ 4559{
4615 if (likely(fsf_req->qtcb != NULL)) { 4560 if (likely(fsf_req->qtcb != NULL)) {
4561 fsf_req->qtcb->prefix.req_seq_no = fsf_req->adapter->fsf_req_seq_no;
4616 fsf_req->qtcb->prefix.req_id = (unsigned long)fsf_req; 4562 fsf_req->qtcb->prefix.req_id = (unsigned long)fsf_req;
4617 fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION; 4563 fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION;
4618 fsf_req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd]; 4564 fsf_req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_req->fsf_command];
4619 fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION; 4565 fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION;
4620 fsf_req->qtcb->header.req_handle = (unsigned long)fsf_req; 4566 fsf_req->qtcb->header.req_handle = (unsigned long)fsf_req;
4621 fsf_req->qtcb->header.fsf_command = fsf_cmd; 4567 fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command;
4622 } 4568 }
4623} 4569}
4624 4570
@@ -4686,7 +4632,10 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4686 goto failed_fsf_req; 4632 goto failed_fsf_req;
4687 } 4633 }
4688 4634
4689 zfcp_fsf_req_qtcb_init(fsf_req, fsf_cmd); 4635 fsf_req->adapter = adapter;
4636 fsf_req->fsf_command = fsf_cmd;
4637
4638 zfcp_fsf_req_qtcb_init(fsf_req);
4690 4639
4691 /* initialize waitqueue which may be used to wait on 4640 /* initialize waitqueue which may be used to wait on
4692 this request completion */ 4641 this request completion */
@@ -4708,8 +4657,10 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4708 goto failed_sbals; 4657 goto failed_sbals;
4709 } 4658 }
4710 4659
4711 fsf_req->adapter = adapter; /* pointer to "parent" adapter */ 4660 if (fsf_req->qtcb) {
4712 fsf_req->fsf_command = fsf_cmd; 4661 fsf_req->seq_no = adapter->fsf_req_seq_no;
4662 fsf_req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
4663 }
4713 fsf_req->sbal_number = 1; 4664 fsf_req->sbal_number = 1;
4714 fsf_req->sbal_first = req_queue->free_index; 4665 fsf_req->sbal_first = req_queue->free_index;
4715 fsf_req->sbal_curr = req_queue->free_index; 4666 fsf_req->sbal_curr = req_queue->free_index;
@@ -4760,9 +4711,9 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4760 struct zfcp_adapter *adapter; 4711 struct zfcp_adapter *adapter;
4761 struct zfcp_qdio_queue *req_queue; 4712 struct zfcp_qdio_queue *req_queue;
4762 volatile struct qdio_buffer_element *sbale; 4713 volatile struct qdio_buffer_element *sbale;
4714 int inc_seq_no;
4763 int new_distance_from_int; 4715 int new_distance_from_int;
4764 unsigned long flags; 4716 unsigned long flags;
4765 int inc_seq_no = 1;
4766 int retval = 0; 4717 int retval = 0;
4767 4718
4768 adapter = fsf_req->adapter; 4719 adapter = fsf_req->adapter;
@@ -4776,23 +4727,13 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4776 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr, 4727 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr,
4777 sbale[1].length); 4728 sbale[1].length);
4778 4729
4779 /* set sequence counter in QTCB */
4780 if (likely(fsf_req->qtcb)) {
4781 fsf_req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
4782 fsf_req->seq_no = adapter->fsf_req_seq_no;
4783 ZFCP_LOG_TRACE("FSF request %p of adapter %s gets "
4784 "FSF sequence counter value of %i\n",
4785 fsf_req,
4786 zfcp_get_busid_by_adapter(adapter),
4787 fsf_req->qtcb->prefix.req_seq_no);
4788 } else
4789 inc_seq_no = 0;
4790
4791 /* put allocated FSF request at list tail */ 4730 /* put allocated FSF request at list tail */
4792 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); 4731 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags);
4793 list_add_tail(&fsf_req->list, &adapter->fsf_req_list_head); 4732 list_add_tail(&fsf_req->list, &adapter->fsf_req_list_head);
4794 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); 4733 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
4795 4734
4735 inc_seq_no = (fsf_req->qtcb != NULL);
4736
4796 /* figure out expiration time of timeout and start timeout */ 4737 /* figure out expiration time of timeout and start timeout */
4797 if (unlikely(timer)) { 4738 if (unlikely(timer)) {
4798 timer->expires += jiffies; 4739 timer->expires += jiffies;
@@ -4822,6 +4763,8 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4822 req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap if needed */ 4763 req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap if needed */
4823 new_distance_from_int = zfcp_qdio_determine_pci(req_queue, fsf_req); 4764 new_distance_from_int = zfcp_qdio_determine_pci(req_queue, fsf_req);
4824 4765
4766 fsf_req->issued = get_clock();
4767
4825 retval = do_QDIO(adapter->ccw_device, 4768 retval = do_QDIO(adapter->ccw_device,
4826 QDIO_FLAG_SYNC_OUTPUT, 4769 QDIO_FLAG_SYNC_OUTPUT,
4827 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL); 4770 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL);
@@ -4860,15 +4803,11 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4860 * routines resulting in missing sequence counter values 4803 * routines resulting in missing sequence counter values
4861 * otherwise, 4804 * otherwise,
4862 */ 4805 */
4806
4863 /* Don't increase for unsolicited status */ 4807 /* Don't increase for unsolicited status */
4864 if (likely(inc_seq_no)) { 4808 if (inc_seq_no)
4865 adapter->fsf_req_seq_no++; 4809 adapter->fsf_req_seq_no++;
4866 ZFCP_LOG_TRACE 4810
4867 ("FSF sequence counter value of adapter %s "
4868 "increased to %i\n",
4869 zfcp_get_busid_by_adapter(adapter),
4870 adapter->fsf_req_seq_no);
4871 }
4872 /* count FSF requests pending */ 4811 /* count FSF requests pending */
4873 atomic_inc(&adapter->fsf_reqs_active); 4812 atomic_inc(&adapter->fsf_reqs_active);
4874 } 4813 }
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 07140dfda2a7..48719f055952 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -116,6 +116,7 @@
116#define FSF_INVALID_COMMAND_OPTION 0x000000E5 116#define FSF_INVALID_COMMAND_OPTION 0x000000E5
117/* #define FSF_ERROR 0x000000FF */ 117/* #define FSF_ERROR 0x000000FF */
118 118
119#define FSF_PROT_STATUS_QUAL_SIZE 16
119#define FSF_STATUS_QUALIFIER_SIZE 16 120#define FSF_STATUS_QUALIFIER_SIZE 16
120 121
121/* FSF status qualifier, recommendations */ 122/* FSF status qualifier, recommendations */
@@ -139,9 +140,18 @@
139#define FSF_SQ_CFDC_SUBTABLE_LUN 0x0004 140#define FSF_SQ_CFDC_SUBTABLE_LUN 0x0004
140 141
141/* FSF status qualifier (most significant 4 bytes), local link down */ 142/* FSF status qualifier (most significant 4 bytes), local link down */
142#define FSF_PSQ_LINK_NOLIGHT 0x00000004 143#define FSF_PSQ_LINK_NO_LIGHT 0x00000004
143#define FSF_PSQ_LINK_WRAPPLUG 0x00000008 144#define FSF_PSQ_LINK_WRAP_PLUG 0x00000008
144#define FSF_PSQ_LINK_NOFCP 0x00000010 145#define FSF_PSQ_LINK_NO_FCP 0x00000010
146#define FSF_PSQ_LINK_FIRMWARE_UPDATE 0x00000020
147#define FSF_PSQ_LINK_INVALID_WWPN 0x00000100
148#define FSF_PSQ_LINK_NO_NPIV_SUPPORT 0x00000200
149#define FSF_PSQ_LINK_NO_FCP_RESOURCES 0x00000400
150#define FSF_PSQ_LINK_NO_FABRIC_RESOURCES 0x00000800
151#define FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE 0x00001000
152#define FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED 0x00002000
153#define FSF_PSQ_LINK_MODE_TABLE_CURRUPTED 0x00004000
154#define FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT 0x00008000
145 155
146/* payload size in status read buffer */ 156/* payload size in status read buffer */
147#define FSF_STATUS_READ_PAYLOAD_SIZE 4032 157#define FSF_STATUS_READ_PAYLOAD_SIZE 4032
@@ -154,15 +164,21 @@
154#define FSF_STATUS_READ_INCOMING_ELS 0x00000002 164#define FSF_STATUS_READ_INCOMING_ELS 0x00000002
155#define FSF_STATUS_READ_SENSE_DATA_AVAIL 0x00000003 165#define FSF_STATUS_READ_SENSE_DATA_AVAIL 0x00000003
156#define FSF_STATUS_READ_BIT_ERROR_THRESHOLD 0x00000004 166#define FSF_STATUS_READ_BIT_ERROR_THRESHOLD 0x00000004
157#define FSF_STATUS_READ_LINK_DOWN 0x00000005 /* FIXME: really? */ 167#define FSF_STATUS_READ_LINK_DOWN 0x00000005
158#define FSF_STATUS_READ_LINK_UP 0x00000006 168#define FSF_STATUS_READ_LINK_UP 0x00000006
159#define FSF_STATUS_READ_CFDC_UPDATED 0x0000000A 169#define FSF_STATUS_READ_CFDC_UPDATED 0x0000000A
160#define FSF_STATUS_READ_CFDC_HARDENED 0x0000000B 170#define FSF_STATUS_READ_CFDC_HARDENED 0x0000000B
171#define FSF_STATUS_READ_FEATURE_UPDATE_ALERT 0x0000000C
161 172
162/* status subtypes in status read buffer */ 173/* status subtypes in status read buffer */
163#define FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT 0x00000001 174#define FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT 0x00000001
164#define FSF_STATUS_READ_SUB_ERROR_PORT 0x00000002 175#define FSF_STATUS_READ_SUB_ERROR_PORT 0x00000002
165 176
177/* status subtypes for link down */
178#define FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK 0x00000000
179#define FSF_STATUS_READ_SUB_FDISC_FAILED 0x00000001
180#define FSF_STATUS_READ_SUB_FIRMWARE_UPDATE 0x00000002
181
166/* status subtypes for CFDC */ 182/* status subtypes for CFDC */
167#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE 0x00000002 183#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE 0x00000002
168#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2 0x0000000F 184#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2 0x0000000F
@@ -193,11 +209,15 @@
193#define FSF_QTCB_LOG_SIZE 1024 209#define FSF_QTCB_LOG_SIZE 1024
194 210
195/* channel features */ 211/* channel features */
196#define FSF_FEATURE_QTCB_SUPPRESSION 0x00000001
197#define FSF_FEATURE_CFDC 0x00000002 212#define FSF_FEATURE_CFDC 0x00000002
198#define FSF_FEATURE_LUN_SHARING 0x00000004 213#define FSF_FEATURE_LUN_SHARING 0x00000004
199#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010 214#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010
200#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020 215#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
216#define FSF_FEATURE_UPDATE_ALERT 0x00000100
217
218/* host connection features */
219#define FSF_FEATURE_NPIV_MODE 0x00000001
220#define FSF_FEATURE_VM_ASSIGNED_WWPN 0x00000002
201 221
202/* option */ 222/* option */
203#define FSF_OPEN_LUN_SUPPRESS_BOXING 0x00000001 223#define FSF_OPEN_LUN_SUPPRESS_BOXING 0x00000001
@@ -305,15 +325,23 @@ struct fsf_qual_sequence_error {
305 u32 res1[3]; 325 u32 res1[3];
306} __attribute__ ((packed)); 326} __attribute__ ((packed));
307 327
308struct fsf_qual_locallink_error { 328struct fsf_link_down_info {
309 u32 code; 329 u32 error_code;
310 u32 res1[3]; 330 u32 res1;
331 u8 res2[2];
332 u8 primary_status;
333 u8 ioerr_code;
334 u8 action_code;
335 u8 reason_code;
336 u8 explanation_code;
337 u8 vendor_specific_code;
311} __attribute__ ((packed)); 338} __attribute__ ((packed));
312 339
313union fsf_prot_status_qual { 340union fsf_prot_status_qual {
341 u64 doubleword[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u64)];
314 struct fsf_qual_version_error version_error; 342 struct fsf_qual_version_error version_error;
315 struct fsf_qual_sequence_error sequence_error; 343 struct fsf_qual_sequence_error sequence_error;
316 struct fsf_qual_locallink_error locallink_error; 344 struct fsf_link_down_info link_down_info;
317} __attribute__ ((packed)); 345} __attribute__ ((packed));
318 346
319struct fsf_qtcb_prefix { 347struct fsf_qtcb_prefix {
@@ -331,7 +359,9 @@ union fsf_status_qual {
331 u8 byte[FSF_STATUS_QUALIFIER_SIZE]; 359 u8 byte[FSF_STATUS_QUALIFIER_SIZE];
332 u16 halfword[FSF_STATUS_QUALIFIER_SIZE / sizeof (u16)]; 360 u16 halfword[FSF_STATUS_QUALIFIER_SIZE / sizeof (u16)];
333 u32 word[FSF_STATUS_QUALIFIER_SIZE / sizeof (u32)]; 361 u32 word[FSF_STATUS_QUALIFIER_SIZE / sizeof (u32)];
362 u64 doubleword[FSF_STATUS_QUALIFIER_SIZE / sizeof(u64)];
334 struct fsf_queue_designator fsf_queue_designator; 363 struct fsf_queue_designator fsf_queue_designator;
364 struct fsf_link_down_info link_down_info;
335} __attribute__ ((packed)); 365} __attribute__ ((packed));
336 366
337struct fsf_qtcb_header { 367struct fsf_qtcb_header {
@@ -406,8 +436,8 @@ struct fsf_qtcb_bottom_config {
406 u32 low_qtcb_version; 436 u32 low_qtcb_version;
407 u32 max_qtcb_size; 437 u32 max_qtcb_size;
408 u32 max_data_transfer_size; 438 u32 max_data_transfer_size;
409 u32 supported_features; 439 u32 adapter_features;
410 u8 res1[4]; 440 u32 connection_features;
411 u32 fc_topology; 441 u32 fc_topology;
412 u32 fc_link_speed; 442 u32 fc_link_speed;
413 u32 adapter_type; 443 u32 adapter_type;
@@ -425,7 +455,7 @@ struct fsf_qtcb_bottom_config {
425} __attribute__ ((packed)); 455} __attribute__ ((packed));
426 456
427struct fsf_qtcb_bottom_port { 457struct fsf_qtcb_bottom_port {
428 u8 res1[8]; 458 u64 wwpn;
429 u32 fc_port_id; 459 u32 fc_port_id;
430 u32 port_type; 460 u32 port_type;
431 u32 port_state; 461 u32 port_state;
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 24e16ec331d9..d719f66a29a4 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -54,8 +54,7 @@ static inline int zfcp_qdio_sbals_from_buffer
54static qdio_handler_t zfcp_qdio_request_handler; 54static qdio_handler_t zfcp_qdio_request_handler;
55static qdio_handler_t zfcp_qdio_response_handler; 55static qdio_handler_t zfcp_qdio_response_handler;
56static int zfcp_qdio_handler_error_check(struct zfcp_adapter *, 56static int zfcp_qdio_handler_error_check(struct zfcp_adapter *,
57 unsigned int, 57 unsigned int, unsigned int, unsigned int, int, int);
58 unsigned int, unsigned int);
59 58
60#define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO 59#define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO
61 60
@@ -214,22 +213,12 @@ zfcp_qdio_allocate(struct zfcp_adapter *adapter)
214 * 213 *
215 */ 214 */
216static inline int 215static inline int
217zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, 216zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status,
218 unsigned int status, 217 unsigned int qdio_error, unsigned int siga_error,
219 unsigned int qdio_error, unsigned int siga_error) 218 int first_element, int elements_processed)
220{ 219{
221 int retval = 0; 220 int retval = 0;
222 221
223 if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_TRACE)) {
224 if (status & QDIO_STATUS_INBOUND_INT) {
225 ZFCP_LOG_TRACE("status is"
226 " QDIO_STATUS_INBOUND_INT \n");
227 }
228 if (status & QDIO_STATUS_OUTBOUND_INT) {
229 ZFCP_LOG_TRACE("status is"
230 " QDIO_STATUS_OUTBOUND_INT \n");
231 }
232 }
233 if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) { 222 if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) {
234 retval = -EIO; 223 retval = -EIO;
235 224
@@ -237,9 +226,10 @@ zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter,
237 "qdio_error=0x%x, siga_error=0x%x)\n", 226 "qdio_error=0x%x, siga_error=0x%x)\n",
238 status, qdio_error, siga_error); 227 status, qdio_error, siga_error);
239 228
240 /* Restarting IO on the failed adapter from scratch */ 229 zfcp_hba_dbf_event_qdio(adapter, status, qdio_error, siga_error,
241 debug_text_event(adapter->erp_dbf, 1, "qdio_err"); 230 first_element, elements_processed);
242 /* 231 /*
232 * Restarting IO on the failed adapter from scratch.
243 * Since we have been using this adapter, it is save to assume 233 * Since we have been using this adapter, it is save to assume
244 * that it is not failed but recoverable. The card seems to 234 * that it is not failed but recoverable. The card seems to
245 * report link-up events by self-initiated queue shutdown. 235 * report link-up events by self-initiated queue shutdown.
@@ -282,7 +272,8 @@ zfcp_qdio_request_handler(struct ccw_device *ccw_device,
282 first_element, elements_processed); 272 first_element, elements_processed);
283 273
284 if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error, 274 if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error,
285 siga_error))) 275 siga_error, first_element,
276 elements_processed)))
286 goto out; 277 goto out;
287 /* 278 /*
288 * we stored address of struct zfcp_adapter data structure 279 * we stored address of struct zfcp_adapter data structure
@@ -334,7 +325,8 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device,
334 queue = &adapter->response_queue; 325 queue = &adapter->response_queue;
335 326
336 if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error, 327 if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error,
337 siga_error))) 328 siga_error, first_element,
329 elements_processed)))
338 goto out; 330 goto out;
339 331
340 /* 332 /*
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 31a76065cf28..3dcd1bfba3b4 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -44,7 +44,8 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *);
44static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *); 44static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *);
45static int zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *); 45static int zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *);
46static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *); 46static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *);
47static int zfcp_task_management_function(struct zfcp_unit *, u8); 47static int zfcp_task_management_function(struct zfcp_unit *, u8,
48 struct scsi_cmnd *);
48 49
49static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *, int, scsi_id_t, 50static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *, int, scsi_id_t,
50 scsi_lun_t); 51 scsi_lun_t);
@@ -242,7 +243,10 @@ static void
242zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) 243zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
243{ 244{
244 set_host_byte(&scpnt->result, result); 245 set_host_byte(&scpnt->result, result);
245 zfcp_cmd_dbf_event_scsi("failing", scpnt); 246 if ((scpnt->device != NULL) && (scpnt->device->host != NULL))
247 zfcp_scsi_dbf_event_result("fail", 4,
248 (struct zfcp_adapter*) scpnt->device->host->hostdata[0],
249 scpnt);
246 /* return directly */ 250 /* return directly */
247 scpnt->scsi_done(scpnt); 251 scpnt->scsi_done(scpnt);
248} 252}
@@ -414,67 +418,38 @@ zfcp_port_lookup(struct zfcp_adapter *adapter, int channel, scsi_id_t id)
414 return (struct zfcp_port *) NULL; 418 return (struct zfcp_port *) NULL;
415} 419}
416 420
417/* 421/**
418 * function: zfcp_scsi_eh_abort_handler 422 * zfcp_scsi_eh_abort_handler - abort the specified SCSI command
419 * 423 * @scpnt: pointer to scsi_cmnd to be aborted
420 * purpose: tries to abort the specified (timed out) SCSI command 424 * Return: SUCCESS - command has been aborted and cleaned up in internal
421 * 425 * bookkeeping, SCSI stack won't be called for aborted command
422 * note: We do not need to care for a SCSI command which completes 426 * FAILED - otherwise
423 * normally but late during this abort routine runs.
424 * We are allowed to return late commands to the SCSI stack.
425 * It tracks the state of commands and will handle late commands.
426 * (Usually, the normal completion of late commands is ignored with
427 * respect to the running abort operation. Grep for 'done_late'
428 * in the SCSI stacks sources.)
429 * 427 *
430 * returns: SUCCESS - command has been aborted and cleaned up in internal 428 * We do not need to care for a SCSI command which completes normally
431 * bookkeeping, 429 * but late during this abort routine runs. We are allowed to return
432 * SCSI stack won't be called for aborted command 430 * late commands to the SCSI stack. It tracks the state of commands and
433 * FAILED - otherwise 431 * will handle late commands. (Usually, the normal completion of late
432 * commands is ignored with respect to the running abort operation.)
434 */ 433 */
435int 434int
436__zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) 435zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
437{ 436{
437 struct Scsi_Host *scsi_host;
438 struct zfcp_adapter *adapter;
439 struct zfcp_unit *unit;
438 int retval = SUCCESS; 440 int retval = SUCCESS;
439 struct zfcp_fsf_req *new_fsf_req, *old_fsf_req; 441 struct zfcp_fsf_req *new_fsf_req = NULL;
440 struct zfcp_adapter *adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; 442 struct zfcp_fsf_req *old_fsf_req;
441 struct zfcp_unit *unit = (struct zfcp_unit *) scpnt->device->hostdata;
442 struct zfcp_port *port = unit->port;
443 struct Scsi_Host *scsi_host = scpnt->device->host;
444 union zfcp_req_data *req_data = NULL;
445 unsigned long flags; 443 unsigned long flags;
446 u32 status = 0; 444
447 445 scsi_host = scpnt->device->host;
448 /* the components of a abort_dbf record (fixed size record) */ 446 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
449 u64 dbf_scsi_cmnd = (unsigned long) scpnt; 447 unit = (struct zfcp_unit *) scpnt->device->hostdata;
450 char dbf_opcode[ZFCP_ABORT_DBF_LENGTH];
451 wwn_t dbf_wwn = port->wwpn;
452 fcp_lun_t dbf_fcp_lun = unit->fcp_lun;
453 u64 dbf_retries = scpnt->retries;
454 u64 dbf_allowed = scpnt->allowed;
455 u64 dbf_timeout = 0;
456 u64 dbf_fsf_req = 0;
457 u64 dbf_fsf_status = 0;
458 u64 dbf_fsf_qual[2] = { 0, 0 };
459 char dbf_result[ZFCP_ABORT_DBF_LENGTH] = "##undef";
460
461 memset(dbf_opcode, 0, ZFCP_ABORT_DBF_LENGTH);
462 memcpy(dbf_opcode,
463 scpnt->cmnd,
464 min(scpnt->cmd_len, (unsigned char) ZFCP_ABORT_DBF_LENGTH));
465 448
466 ZFCP_LOG_INFO("aborting scsi_cmnd=%p on adapter %s\n", 449 ZFCP_LOG_INFO("aborting scsi_cmnd=%p on adapter %s\n",
467 scpnt, zfcp_get_busid_by_adapter(adapter)); 450 scpnt, zfcp_get_busid_by_adapter(adapter));
468 451
469 spin_unlock_irq(scsi_host->host_lock); 452 /* avoid race condition between late normal completion and abort */
470
471 /*
472 * Race condition between normal (late) completion and abort has
473 * to be avoided.
474 * The entirity of all accesses to scsi_req have to be atomic.
475 * scsi_req is usually part of the fsf_req and thus we block the
476 * release of fsf_req as long as we need to access scsi_req.
477 */
478 write_lock_irqsave(&adapter->abort_lock, flags); 453 write_lock_irqsave(&adapter->abort_lock, flags);
479 454
480 /* 455 /*
@@ -484,144 +459,47 @@ __zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
484 * this routine returns. (scpnt is parameter passed to this routine 459 * this routine returns. (scpnt is parameter passed to this routine
485 * and must not disappear during abort even on late completion.) 460 * and must not disappear during abort even on late completion.)
486 */ 461 */
487 req_data = (union zfcp_req_data *) scpnt->host_scribble; 462 old_fsf_req = (struct zfcp_fsf_req *) scpnt->host_scribble;
488 /* DEBUG */
489 ZFCP_LOG_DEBUG("req_data=%p\n", req_data);
490 if (!req_data) {
491 ZFCP_LOG_DEBUG("late command completion overtook abort\n");
492 /*
493 * That's it.
494 * Do not initiate abort but return SUCCESS.
495 */
496 write_unlock_irqrestore(&adapter->abort_lock, flags);
497 retval = SUCCESS;
498 strncpy(dbf_result, "##late1", ZFCP_ABORT_DBF_LENGTH);
499 goto out;
500 }
501
502 /* Figure out which fsf_req needs to be aborted. */
503 old_fsf_req = req_data->send_fcp_command_task.fsf_req;
504
505 dbf_fsf_req = (unsigned long) old_fsf_req;
506 dbf_timeout =
507 (jiffies - req_data->send_fcp_command_task.start_jiffies) / HZ;
508
509 ZFCP_LOG_DEBUG("old_fsf_req=%p\n", old_fsf_req);
510 if (!old_fsf_req) { 463 if (!old_fsf_req) {
511 write_unlock_irqrestore(&adapter->abort_lock, flags); 464 write_unlock_irqrestore(&adapter->abort_lock, flags);
512 ZFCP_LOG_NORMAL("bug: no old fsf request found\n"); 465 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, new_fsf_req);
513 ZFCP_LOG_NORMAL("req_data:\n"); 466 retval = SUCCESS;
514 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
515 (char *) req_data, sizeof (union zfcp_req_data));
516 ZFCP_LOG_NORMAL("scsi_cmnd:\n");
517 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
518 (char *) scpnt, sizeof (struct scsi_cmnd));
519 retval = FAILED;
520 strncpy(dbf_result, "##bug:r", ZFCP_ABORT_DBF_LENGTH);
521 goto out; 467 goto out;
522 } 468 }
523 old_fsf_req->data.send_fcp_command_task.scsi_cmnd = NULL; 469 old_fsf_req->data = 0;
524 /* mark old request as being aborted */
525 old_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING; 470 old_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING;
526 /*
527 * We have to collect all information (e.g. unit) needed by
528 * zfcp_fsf_abort_fcp_command before calling that routine
529 * since that routine is not allowed to access
530 * fsf_req which it is going to abort.
531 * This is because of we need to release fsf_req_list_lock
532 * before calling zfcp_fsf_abort_fcp_command.
533 * Since this lock will not be held, fsf_req may complete
534 * late and may be released meanwhile.
535 */
536 ZFCP_LOG_DEBUG("unit 0x%016Lx (%p)\n", unit->fcp_lun, unit);
537 471
538 /* 472 /* don't access old_fsf_req after releasing the abort_lock */
539 * We block (call schedule)
540 * That's why we must release the lock and enable the
541 * interrupts before.
542 * On the other hand we do not need the lock anymore since
543 * all critical accesses to scsi_req are done.
544 */
545 write_unlock_irqrestore(&adapter->abort_lock, flags); 473 write_unlock_irqrestore(&adapter->abort_lock, flags);
546 /* call FSF routine which does the abort */ 474 /* call FSF routine which does the abort */
547 new_fsf_req = zfcp_fsf_abort_fcp_command((unsigned long) old_fsf_req, 475 new_fsf_req = zfcp_fsf_abort_fcp_command((unsigned long) old_fsf_req,
548 adapter, unit, 0); 476 adapter, unit, 0);
549 ZFCP_LOG_DEBUG("new_fsf_req=%p\n", new_fsf_req);
550 if (!new_fsf_req) { 477 if (!new_fsf_req) {
478 ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n");
551 retval = FAILED; 479 retval = FAILED;
552 ZFCP_LOG_NORMAL("error: initiation of Abort FCP Cmnd "
553 "failed\n");
554 strncpy(dbf_result, "##nores", ZFCP_ABORT_DBF_LENGTH);
555 goto out; 480 goto out;
556 } 481 }
557 482
558 /* wait for completion of abort */ 483 /* wait for completion of abort */
559 ZFCP_LOG_DEBUG("waiting for cleanup...\n");
560#if 1
561 /*
562 * FIXME:
563 * copying zfcp_fsf_req_wait_and_cleanup code is not really nice
564 */
565 __wait_event(new_fsf_req->completion_wq, 484 __wait_event(new_fsf_req->completion_wq,
566 new_fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); 485 new_fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
567 status = new_fsf_req->status; 486
568 dbf_fsf_status = new_fsf_req->qtcb->header.fsf_status;
569 /*
570 * Ralphs special debug load provides timestamps in the FSF
571 * status qualifier. This might be specified later if being
572 * useful for debugging aborts.
573 */
574 dbf_fsf_qual[0] =
575 *(u64 *) & new_fsf_req->qtcb->header.fsf_status_qual.word[0];
576 dbf_fsf_qual[1] =
577 *(u64 *) & new_fsf_req->qtcb->header.fsf_status_qual.word[2];
578 zfcp_fsf_req_free(new_fsf_req);
579#else
580 retval = zfcp_fsf_req_wait_and_cleanup(new_fsf_req,
581 ZFCP_UNINTERRUPTIBLE, &status);
582#endif
583 ZFCP_LOG_DEBUG("Waiting for cleanup complete, status=0x%x\n", status);
584 /* status should be valid since signals were not permitted */ 487 /* status should be valid since signals were not permitted */
585 if (status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) { 488 if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) {
489 zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, new_fsf_req);
586 retval = SUCCESS; 490 retval = SUCCESS;
587 strncpy(dbf_result, "##succ", ZFCP_ABORT_DBF_LENGTH); 491 } else if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) {
588 } else if (status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) { 492 zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, new_fsf_req);
589 retval = SUCCESS; 493 retval = SUCCESS;
590 strncpy(dbf_result, "##late2", ZFCP_ABORT_DBF_LENGTH);
591 } else { 494 } else {
495 zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, new_fsf_req);
592 retval = FAILED; 496 retval = FAILED;
593 strncpy(dbf_result, "##fail", ZFCP_ABORT_DBF_LENGTH);
594 } 497 }
595 498 zfcp_fsf_req_free(new_fsf_req);
596 out: 499 out:
597 debug_event(adapter->abort_dbf, 1, &dbf_scsi_cmnd, sizeof (u64));
598 debug_event(adapter->abort_dbf, 1, &dbf_opcode, ZFCP_ABORT_DBF_LENGTH);
599 debug_event(adapter->abort_dbf, 1, &dbf_wwn, sizeof (wwn_t));
600 debug_event(adapter->abort_dbf, 1, &dbf_fcp_lun, sizeof (fcp_lun_t));
601 debug_event(adapter->abort_dbf, 1, &dbf_retries, sizeof (u64));
602 debug_event(adapter->abort_dbf, 1, &dbf_allowed, sizeof (u64));
603 debug_event(adapter->abort_dbf, 1, &dbf_timeout, sizeof (u64));
604 debug_event(adapter->abort_dbf, 1, &dbf_fsf_req, sizeof (u64));
605 debug_event(adapter->abort_dbf, 1, &dbf_fsf_status, sizeof (u64));
606 debug_event(adapter->abort_dbf, 1, &dbf_fsf_qual[0], sizeof (u64));
607 debug_event(adapter->abort_dbf, 1, &dbf_fsf_qual[1], sizeof (u64));
608 debug_text_event(adapter->abort_dbf, 1, dbf_result);
609
610 spin_lock_irq(scsi_host->host_lock);
611 return retval; 500 return retval;
612} 501}
613 502
614int
615zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
616{
617 int rc;
618 struct Scsi_Host *scsi_host = scpnt->device->host;
619 spin_lock_irq(scsi_host->host_lock);
620 rc = __zfcp_scsi_eh_abort_handler(scpnt);
621 spin_unlock_irq(scsi_host->host_lock);
622 return rc;
623}
624
625/* 503/*
626 * function: zfcp_scsi_eh_device_reset_handler 504 * function: zfcp_scsi_eh_device_reset_handler
627 * 505 *
@@ -651,8 +529,9 @@ zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
651 */ 529 */
652 if (!atomic_test_mask(ZFCP_STATUS_UNIT_NOTSUPPUNITRESET, 530 if (!atomic_test_mask(ZFCP_STATUS_UNIT_NOTSUPPUNITRESET,
653 &unit->status)) { 531 &unit->status)) {
654 retval = 532 retval = zfcp_task_management_function(unit,
655 zfcp_task_management_function(unit, FCP_LOGICAL_UNIT_RESET); 533 FCP_LOGICAL_UNIT_RESET,
534 scpnt);
656 if (retval) { 535 if (retval) {
657 ZFCP_LOG_DEBUG("unit reset failed (unit=%p)\n", unit); 536 ZFCP_LOG_DEBUG("unit reset failed (unit=%p)\n", unit);
658 if (retval == -ENOTSUPP) 537 if (retval == -ENOTSUPP)
@@ -668,7 +547,7 @@ zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
668 goto out; 547 goto out;
669 } 548 }
670 } 549 }
671 retval = zfcp_task_management_function(unit, FCP_TARGET_RESET); 550 retval = zfcp_task_management_function(unit, FCP_TARGET_RESET, scpnt);
672 if (retval) { 551 if (retval) {
673 ZFCP_LOG_DEBUG("target reset failed (unit=%p)\n", unit); 552 ZFCP_LOG_DEBUG("target reset failed (unit=%p)\n", unit);
674 retval = FAILED; 553 retval = FAILED;
@@ -681,12 +560,12 @@ zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
681} 560}
682 561
683static int 562static int
684zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags) 563zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags,
564 struct scsi_cmnd *scpnt)
685{ 565{
686 struct zfcp_adapter *adapter = unit->port->adapter; 566 struct zfcp_adapter *adapter = unit->port->adapter;
687 int retval;
688 int status;
689 struct zfcp_fsf_req *fsf_req; 567 struct zfcp_fsf_req *fsf_req;
568 int retval = 0;
690 569
691 /* issue task management function */ 570 /* issue task management function */
692 fsf_req = zfcp_fsf_send_fcp_command_task_management 571 fsf_req = zfcp_fsf_send_fcp_command_task_management
@@ -696,70 +575,63 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags)
696 "failed for unit 0x%016Lx on port 0x%016Lx on " 575 "failed for unit 0x%016Lx on port 0x%016Lx on "
697 "adapter %s\n", unit->fcp_lun, unit->port->wwpn, 576 "adapter %s\n", unit->fcp_lun, unit->port->wwpn,
698 zfcp_get_busid_by_adapter(adapter)); 577 zfcp_get_busid_by_adapter(adapter));
578 zfcp_scsi_dbf_event_devreset("nres", tm_flags, unit, scpnt);
699 retval = -ENOMEM; 579 retval = -ENOMEM;
700 goto out; 580 goto out;
701 } 581 }
702 582
703 retval = zfcp_fsf_req_wait_and_cleanup(fsf_req, 583 __wait_event(fsf_req->completion_wq,
704 ZFCP_UNINTERRUPTIBLE, &status); 584 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
585
705 /* 586 /*
706 * check completion status of task management function 587 * check completion status of task management function
707 * (status should always be valid since no signals permitted)
708 */ 588 */
709 if (status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) 589 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
590 zfcp_scsi_dbf_event_devreset("fail", tm_flags, unit, scpnt);
710 retval = -EIO; 591 retval = -EIO;
711 else if (status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) 592 } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) {
593 zfcp_scsi_dbf_event_devreset("nsup", tm_flags, unit, scpnt);
712 retval = -ENOTSUPP; 594 retval = -ENOTSUPP;
713 else 595 } else
714 retval = 0; 596 zfcp_scsi_dbf_event_devreset("okay", tm_flags, unit, scpnt);
597
598 zfcp_fsf_req_free(fsf_req);
715 out: 599 out:
716 return retval; 600 return retval;
717} 601}
718 602
719/* 603/**
720 * function: zfcp_scsi_eh_bus_reset_handler 604 * zfcp_scsi_eh_bus_reset_handler - reset bus (reopen adapter)
721 *
722 * purpose:
723 *
724 * returns:
725 */ 605 */
726int 606int
727zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *scpnt) 607zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *scpnt)
728{ 608{
729 int retval = 0; 609 struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata;
730 struct zfcp_unit *unit; 610 struct zfcp_adapter *adapter = unit->port->adapter;
731 611
732 unit = (struct zfcp_unit *) scpnt->device->hostdata;
733 ZFCP_LOG_NORMAL("bus reset because of problems with " 612 ZFCP_LOG_NORMAL("bus reset because of problems with "
734 "unit 0x%016Lx\n", unit->fcp_lun); 613 "unit 0x%016Lx\n", unit->fcp_lun);
735 zfcp_erp_adapter_reopen(unit->port->adapter, 0); 614 zfcp_erp_adapter_reopen(adapter, 0);
736 zfcp_erp_wait(unit->port->adapter); 615 zfcp_erp_wait(adapter);
737 retval = SUCCESS;
738 616
739 return retval; 617 return SUCCESS;
740} 618}
741 619
742/* 620/**
743 * function: zfcp_scsi_eh_host_reset_handler 621 * zfcp_scsi_eh_host_reset_handler - reset host (reopen adapter)
744 *
745 * purpose:
746 *
747 * returns:
748 */ 622 */
749int 623int
750zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) 624zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
751{ 625{
752 int retval = 0; 626 struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata;
753 struct zfcp_unit *unit; 627 struct zfcp_adapter *adapter = unit->port->adapter;
754 628
755 unit = (struct zfcp_unit *) scpnt->device->hostdata;
756 ZFCP_LOG_NORMAL("host reset because of problems with " 629 ZFCP_LOG_NORMAL("host reset because of problems with "
757 "unit 0x%016Lx\n", unit->fcp_lun); 630 "unit 0x%016Lx\n", unit->fcp_lun);
758 zfcp_erp_adapter_reopen(unit->port->adapter, 0); 631 zfcp_erp_adapter_reopen(adapter, 0);
759 zfcp_erp_wait(unit->port->adapter); 632 zfcp_erp_wait(adapter);
760 retval = SUCCESS;
761 633
762 return retval; 634 return SUCCESS;
763} 635}
764 636
765/* 637/*
@@ -826,10 +698,16 @@ void
826zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter) 698zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
827{ 699{
828 struct Scsi_Host *shost; 700 struct Scsi_Host *shost;
701 struct zfcp_port *port;
829 702
830 shost = adapter->scsi_host; 703 shost = adapter->scsi_host;
831 if (!shost) 704 if (!shost)
832 return; 705 return;
706 read_lock_irq(&zfcp_data.config_lock);
707 list_for_each_entry(port, &adapter->port_list_head, list)
708 if (port->rport)
709 port->rport = NULL;
710 read_unlock_irq(&zfcp_data.config_lock);
833 fc_remove_host(shost); 711 fc_remove_host(shost);
834 scsi_remove_host(shost); 712 scsi_remove_host(shost);
835 scsi_host_put(shost); 713 scsi_host_put(shost);
@@ -904,18 +782,6 @@ zfcp_get_node_name(struct scsi_target *starget)
904 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 782 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
905} 783}
906 784
907void
908zfcp_set_fc_host_attrs(struct zfcp_adapter *adapter)
909{
910 struct Scsi_Host *shost = adapter->scsi_host;
911
912 fc_host_node_name(shost) = adapter->wwnn;
913 fc_host_port_name(shost) = adapter->wwpn;
914 strncpy(fc_host_serial_number(shost), adapter->serial_number,
915 min(FC_SERIAL_NUMBER_SIZE, 32));
916 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
917}
918
919struct fc_function_template zfcp_transport_functions = { 785struct fc_function_template zfcp_transport_functions = {
920 .get_starget_port_id = zfcp_get_port_id, 786 .get_starget_port_id = zfcp_get_port_id,
921 .get_starget_port_name = zfcp_get_port_name, 787 .get_starget_port_name = zfcp_get_port_name,
@@ -927,7 +793,10 @@ struct fc_function_template zfcp_transport_functions = {
927 .show_host_node_name = 1, 793 .show_host_node_name = 1,
928 .show_host_port_name = 1, 794 .show_host_port_name = 1,
929 .show_host_supported_classes = 1, 795 .show_host_supported_classes = 1,
796 .show_host_maxframe_size = 1,
930 .show_host_serial_number = 1, 797 .show_host_serial_number = 1,
798 .show_host_speed = 1,
799 .show_host_port_id = 1,
931}; 800};
932 801
933/** 802/**
diff --git a/drivers/s390/scsi/zfcp_sysfs_adapter.c b/drivers/s390/scsi/zfcp_sysfs_adapter.c
index e7345a74800a..0cd435280e7d 100644
--- a/drivers/s390/scsi/zfcp_sysfs_adapter.c
+++ b/drivers/s390/scsi/zfcp_sysfs_adapter.c
@@ -62,21 +62,18 @@ static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, struct devi
62static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL); 62static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL);
63 63
64ZFCP_DEFINE_ADAPTER_ATTR(status, "0x%08x\n", atomic_read(&adapter->status)); 64ZFCP_DEFINE_ADAPTER_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
65ZFCP_DEFINE_ADAPTER_ATTR(wwnn, "0x%016llx\n", adapter->wwnn);
66ZFCP_DEFINE_ADAPTER_ATTR(wwpn, "0x%016llx\n", adapter->wwpn);
67ZFCP_DEFINE_ADAPTER_ATTR(s_id, "0x%06x\n", adapter->s_id);
68ZFCP_DEFINE_ADAPTER_ATTR(peer_wwnn, "0x%016llx\n", adapter->peer_wwnn); 65ZFCP_DEFINE_ADAPTER_ATTR(peer_wwnn, "0x%016llx\n", adapter->peer_wwnn);
69ZFCP_DEFINE_ADAPTER_ATTR(peer_wwpn, "0x%016llx\n", adapter->peer_wwpn); 66ZFCP_DEFINE_ADAPTER_ATTR(peer_wwpn, "0x%016llx\n", adapter->peer_wwpn);
70ZFCP_DEFINE_ADAPTER_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id); 67ZFCP_DEFINE_ADAPTER_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id);
68ZFCP_DEFINE_ADAPTER_ATTR(physical_wwpn, "0x%016llx\n", adapter->physical_wwpn);
69ZFCP_DEFINE_ADAPTER_ATTR(physical_s_id, "0x%06x\n", adapter->physical_s_id);
71ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version); 70ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
72ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version); 71ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
73ZFCP_DEFINE_ADAPTER_ATTR(fc_link_speed, "%d Gb/s\n", adapter->fc_link_speed);
74ZFCP_DEFINE_ADAPTER_ATTR(fc_service_class, "%d\n", adapter->fc_service_class); 72ZFCP_DEFINE_ADAPTER_ATTR(fc_service_class, "%d\n", adapter->fc_service_class);
75ZFCP_DEFINE_ADAPTER_ATTR(fc_topology, "%s\n", 73ZFCP_DEFINE_ADAPTER_ATTR(fc_topology, "%s\n",
76 fc_topologies[adapter->fc_topology]); 74 fc_topologies[adapter->fc_topology]);
77ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n", 75ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n",
78 adapter->hardware_version); 76 adapter->hardware_version);
79ZFCP_DEFINE_ADAPTER_ATTR(serial_number, "%17s\n", adapter->serial_number);
80ZFCP_DEFINE_ADAPTER_ATTR(scsi_host_no, "0x%x\n", adapter->scsi_host_no); 77ZFCP_DEFINE_ADAPTER_ATTR(scsi_host_no, "0x%x\n", adapter->scsi_host_no);
81ZFCP_DEFINE_ADAPTER_ATTR(in_recovery, "%d\n", atomic_test_mask 78ZFCP_DEFINE_ADAPTER_ATTR(in_recovery, "%d\n", atomic_test_mask
82 (ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status)); 79 (ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status));
@@ -255,21 +252,18 @@ static struct attribute *zfcp_adapter_attrs[] = {
255 &dev_attr_in_recovery.attr, 252 &dev_attr_in_recovery.attr,
256 &dev_attr_port_remove.attr, 253 &dev_attr_port_remove.attr,
257 &dev_attr_port_add.attr, 254 &dev_attr_port_add.attr,
258 &dev_attr_wwnn.attr,
259 &dev_attr_wwpn.attr,
260 &dev_attr_s_id.attr,
261 &dev_attr_peer_wwnn.attr, 255 &dev_attr_peer_wwnn.attr,
262 &dev_attr_peer_wwpn.attr, 256 &dev_attr_peer_wwpn.attr,
263 &dev_attr_peer_d_id.attr, 257 &dev_attr_peer_d_id.attr,
258 &dev_attr_physical_wwpn.attr,
259 &dev_attr_physical_s_id.attr,
264 &dev_attr_card_version.attr, 260 &dev_attr_card_version.attr,
265 &dev_attr_lic_version.attr, 261 &dev_attr_lic_version.attr,
266 &dev_attr_fc_link_speed.attr,
267 &dev_attr_fc_service_class.attr, 262 &dev_attr_fc_service_class.attr,
268 &dev_attr_fc_topology.attr, 263 &dev_attr_fc_topology.attr,
269 &dev_attr_scsi_host_no.attr, 264 &dev_attr_scsi_host_no.attr,
270 &dev_attr_status.attr, 265 &dev_attr_status.attr,
271 &dev_attr_hardware_version.attr, 266 &dev_attr_hardware_version.attr,
272 &dev_attr_serial_number.attr,
273 NULL 267 NULL
274}; 268};
275 269
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index c932b3b94490..876d1de8480d 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1109,15 +1109,6 @@ ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *templa
1109 return (0); 1109 return (0);
1110} 1110}
1111 1111
1112uint64_t
1113ahc_linux_get_memsize(void)
1114{
1115 struct sysinfo si;
1116
1117 si_meminfo(&si);
1118 return ((uint64_t)si.totalram << PAGE_SHIFT);
1119}
1120
1121/* 1112/*
1122 * Place the SCSI bus into a known state by either resetting it, 1113 * Place the SCSI bus into a known state by either resetting it,
1123 * or forcing transfer negotiations on the next command to any 1114 * or forcing transfer negotiations on the next command to any
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index c52996269240..be9edbe26dbe 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -494,8 +494,6 @@ ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
494int ahc_linux_register_host(struct ahc_softc *, 494int ahc_linux_register_host(struct ahc_softc *,
495 struct scsi_host_template *); 495 struct scsi_host_template *);
496 496
497uint64_t ahc_linux_get_memsize(void);
498
499/*************************** Pretty Printing **********************************/ 497/*************************** Pretty Printing **********************************/
500struct info_str { 498struct info_str {
501 char *buffer; 499 char *buffer;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 0d44a6907dd2..3ce77ddc889e 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -180,6 +180,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
180 struct ahc_pci_identity *entry; 180 struct ahc_pci_identity *entry;
181 char *name; 181 char *name;
182 int error; 182 int error;
183 struct device *dev = &pdev->dev;
183 184
184 pci = pdev; 185 pci = pdev;
185 entry = ahc_find_pci_device(pci); 186 entry = ahc_find_pci_device(pci);
@@ -209,11 +210,12 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
209 pci_set_master(pdev); 210 pci_set_master(pdev);
210 211
211 if (sizeof(dma_addr_t) > 4 212 if (sizeof(dma_addr_t) > 4
212 && ahc_linux_get_memsize() > 0x80000000 213 && ahc->features & AHC_LARGE_SCBS
213 && pci_set_dma_mask(pdev, mask_39bit) == 0) { 214 && dma_set_mask(dev, mask_39bit) == 0
215 && dma_get_required_mask(dev) > DMA_32BIT_MASK) {
214 ahc->flags |= AHC_39BIT_ADDRESSING; 216 ahc->flags |= AHC_39BIT_ADDRESSING;
215 } else { 217 } else {
216 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { 218 if (dma_set_mask(dev, DMA_32BIT_MASK)) {
217 printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n"); 219 printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
218 return (-ENODEV); 220 return (-ENODEV);
219 } 221 }
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 87e0c36f1554..d71cef767cec 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -442,7 +442,6 @@ static void piix_sata_phy_reset(struct ata_port *ap)
442 * piix_set_piomode - Initialize host controller PATA PIO timings 442 * piix_set_piomode - Initialize host controller PATA PIO timings
443 * @ap: Port whose timings we are configuring 443 * @ap: Port whose timings we are configuring
444 * @adev: um 444 * @adev: um
445 * @pio: PIO mode, 0 - 4
446 * 445 *
447 * Set PIO mode for device, in host controller PCI config space. 446 * Set PIO mode for device, in host controller PCI config space.
448 * 447 *
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index e6153fe5842a..a8cfbef304b5 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -996,6 +996,7 @@ oktosend:
996#ifdef ED_DBGP 996#ifdef ED_DBGP
997 printk("send_s870: prdaddr_2 0x%8x tmpcip %x target_id %d\n", dev->id[c][target_id].prdaddr,tmpcip,target_id); 997 printk("send_s870: prdaddr_2 0x%8x tmpcip %x target_id %d\n", dev->id[c][target_id].prdaddr,tmpcip,target_id);
998#endif 998#endif
999 dev->id[c][target_id].prdaddr = dev->id[c][target_id].prd_bus;
999 outl(dev->id[c][target_id].prdaddr, tmpcip); 1000 outl(dev->id[c][target_id].prdaddr, tmpcip);
1000 tmpcip = tmpcip - 2; 1001 tmpcip = tmpcip - 2;
1001 outb(0x06, tmpcip); 1002 outb(0x06, tmpcip);
@@ -2572,7 +2573,7 @@ static void atp870u_free_tables(struct Scsi_Host *host)
2572 for (k = 0; k < 16; k++) { 2573 for (k = 0; k < 16; k++) {
2573 if (!atp_dev->id[j][k].prd_table) 2574 if (!atp_dev->id[j][k].prd_table)
2574 continue; 2575 continue;
2575 pci_free_consistent(atp_dev->pdev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prdaddr); 2576 pci_free_consistent(atp_dev->pdev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prd_bus);
2576 atp_dev->id[j][k].prd_table = NULL; 2577 atp_dev->id[j][k].prd_table = NULL;
2577 } 2578 }
2578 } 2579 }
@@ -2584,12 +2585,13 @@ static int atp870u_init_tables(struct Scsi_Host *host)
2584 int c,k; 2585 int c,k;
2585 for(c=0;c < 2;c++) { 2586 for(c=0;c < 2;c++) {
2586 for(k=0;k<16;k++) { 2587 for(k=0;k<16;k++) {
2587 atp_dev->id[c][k].prd_table = pci_alloc_consistent(atp_dev->pdev, 1024, &(atp_dev->id[c][k].prdaddr)); 2588 atp_dev->id[c][k].prd_table = pci_alloc_consistent(atp_dev->pdev, 1024, &(atp_dev->id[c][k].prd_bus));
2588 if (!atp_dev->id[c][k].prd_table) { 2589 if (!atp_dev->id[c][k].prd_table) {
2589 printk("atp870u_init_tables fail\n"); 2590 printk("atp870u_init_tables fail\n");
2590 atp870u_free_tables(host); 2591 atp870u_free_tables(host);
2591 return -ENOMEM; 2592 return -ENOMEM;
2592 } 2593 }
2594 atp_dev->id[c][k].prdaddr = atp_dev->id[c][k].prd_bus;
2593 atp_dev->id[c][k].devsp=0x20; 2595 atp_dev->id[c][k].devsp=0x20;
2594 atp_dev->id[c][k].devtype = 0x7f; 2596 atp_dev->id[c][k].devtype = 0x7f;
2595 atp_dev->id[c][k].curr_req = NULL; 2597 atp_dev->id[c][k].curr_req = NULL;
diff --git a/drivers/scsi/atp870u.h b/drivers/scsi/atp870u.h
index 89f43af39cf2..62bae64a01c1 100644
--- a/drivers/scsi/atp870u.h
+++ b/drivers/scsi/atp870u.h
@@ -54,8 +54,9 @@ struct atp_unit
54 unsigned long tran_len; 54 unsigned long tran_len;
55 unsigned long last_len; 55 unsigned long last_len;
56 unsigned char *prd_pos; 56 unsigned char *prd_pos;
57 unsigned char *prd_table; 57 unsigned char *prd_table; /* Kernel address of PRD table */
58 dma_addr_t prdaddr; 58 dma_addr_t prd_bus; /* Bus address of PRD */
59 dma_addr_t prdaddr; /* Dynamically updated in driver */
59 struct scsi_cmnd *curr_req; 60 struct scsi_cmnd *curr_req;
60 } id[2][16]; 61 } id[2][16];
61 struct Scsi_Host *host; 62 struct Scsi_Host *host;
diff --git a/drivers/scsi/fd_mcs.c b/drivers/scsi/fd_mcs.c
index fa652f8aa643..d59d449a9e4d 100644
--- a/drivers/scsi/fd_mcs.c
+++ b/drivers/scsi/fd_mcs.c
@@ -1360,3 +1360,5 @@ static Scsi_Host_Template driver_template = {
1360 .use_clustering = DISABLE_CLUSTERING, 1360 .use_clustering = DISABLE_CLUSTERING,
1361}; 1361};
1362#include "scsi_module.c" 1362#include "scsi_module.c"
1363
1364MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 85503fad789a..f2a72d33132c 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -98,6 +98,7 @@ int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state)
98 switch (oldstate) { 98 switch (oldstate) {
99 case SHOST_CREATED: 99 case SHOST_CREATED:
100 case SHOST_RUNNING: 100 case SHOST_RUNNING:
101 case SHOST_CANCEL_RECOVERY:
101 break; 102 break;
102 default: 103 default:
103 goto illegal; 104 goto illegal;
@@ -107,12 +108,31 @@ int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state)
107 case SHOST_DEL: 108 case SHOST_DEL:
108 switch (oldstate) { 109 switch (oldstate) {
109 case SHOST_CANCEL: 110 case SHOST_CANCEL:
111 case SHOST_DEL_RECOVERY:
110 break; 112 break;
111 default: 113 default:
112 goto illegal; 114 goto illegal;
113 } 115 }
114 break; 116 break;
115 117
118 case SHOST_CANCEL_RECOVERY:
119 switch (oldstate) {
120 case SHOST_CANCEL:
121 case SHOST_RECOVERY:
122 break;
123 default:
124 goto illegal;
125 }
126 break;
127
128 case SHOST_DEL_RECOVERY:
129 switch (oldstate) {
130 case SHOST_CANCEL_RECOVERY:
131 break;
132 default:
133 goto illegal;
134 }
135 break;
116 } 136 }
117 shost->shost_state = state; 137 shost->shost_state = state;
118 return 0; 138 return 0;
@@ -134,13 +154,24 @@ EXPORT_SYMBOL(scsi_host_set_state);
134 **/ 154 **/
135void scsi_remove_host(struct Scsi_Host *shost) 155void scsi_remove_host(struct Scsi_Host *shost)
136{ 156{
157 unsigned long flags;
137 down(&shost->scan_mutex); 158 down(&shost->scan_mutex);
138 scsi_host_set_state(shost, SHOST_CANCEL); 159 spin_lock_irqsave(shost->host_lock, flags);
160 if (scsi_host_set_state(shost, SHOST_CANCEL))
161 if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)) {
162 spin_unlock_irqrestore(shost->host_lock, flags);
163 up(&shost->scan_mutex);
164 return;
165 }
166 spin_unlock_irqrestore(shost->host_lock, flags);
139 up(&shost->scan_mutex); 167 up(&shost->scan_mutex);
140 scsi_forget_host(shost); 168 scsi_forget_host(shost);
141 scsi_proc_host_rm(shost); 169 scsi_proc_host_rm(shost);
142 170
143 scsi_host_set_state(shost, SHOST_DEL); 171 spin_lock_irqsave(shost->host_lock, flags);
172 if (scsi_host_set_state(shost, SHOST_DEL))
173 BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY));
174 spin_unlock_irqrestore(shost->host_lock, flags);
144 175
145 transport_unregister_device(&shost->shost_gendev); 176 transport_unregister_device(&shost->shost_gendev);
146 class_device_unregister(&shost->shost_classdev); 177 class_device_unregister(&shost->shost_classdev);
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 6e54c7d9b33c..19392f651272 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -460,6 +460,8 @@ MODULE_PARM(adisplay, "1i");
460MODULE_PARM(normal, "1i"); 460MODULE_PARM(normal, "1i");
461MODULE_PARM(ansi, "1i"); 461MODULE_PARM(ansi, "1i");
462#endif 462#endif
463
464MODULE_LICENSE("GPL");
463#endif 465#endif
464/*counter of concurrent disk read/writes, to turn on/off disk led */ 466/*counter of concurrent disk read/writes, to turn on/off disk led */
465static int disk_rw_in_progress = 0; 467static int disk_rw_in_progress = 0;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 5b14934ba861..ff25210b00ba 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -727,6 +727,16 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
727 if (hostdata->madapter_info.port_max_txu[0]) 727 if (hostdata->madapter_info.port_max_txu[0])
728 hostdata->host->max_sectors = 728 hostdata->host->max_sectors =
729 hostdata->madapter_info.port_max_txu[0] >> 9; 729 hostdata->madapter_info.port_max_txu[0] >> 9;
730
731 if (hostdata->madapter_info.os_type == 3 &&
732 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
733 printk("ibmvscsi: host (Ver. %s) doesn't support large"
734 "transfers\n",
735 hostdata->madapter_info.srp_version);
736 printk("ibmvscsi: limiting scatterlists to %d\n",
737 MAX_INDIRECT_BUFS);
738 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
739 }
730 } 740 }
731} 741}
732 742
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index d92273cbe0de..e5b01997117a 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -4132,6 +4132,53 @@ err_out:
4132} 4132}
4133 4133
4134/** 4134/**
4135 * ata_host_set_remove - PCI layer callback for device removal
4136 * @host_set: ATA host set that was removed
4137 *
4138 * Unregister all objects associated with this host set. Free those
4139 * objects.
4140 *
4141 * LOCKING:
4142 * Inherited from calling layer (may sleep).
4143 */
4144
4145
4146void ata_host_set_remove(struct ata_host_set *host_set)
4147{
4148 struct ata_port *ap;
4149 unsigned int i;
4150
4151 for (i = 0; i < host_set->n_ports; i++) {
4152 ap = host_set->ports[i];
4153 scsi_remove_host(ap->host);
4154 }
4155
4156 free_irq(host_set->irq, host_set);
4157
4158 for (i = 0; i < host_set->n_ports; i++) {
4159 ap = host_set->ports[i];
4160
4161 ata_scsi_release(ap->host);
4162
4163 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4164 struct ata_ioports *ioaddr = &ap->ioaddr;
4165
4166 if (ioaddr->cmd_addr == 0x1f0)
4167 release_region(0x1f0, 8);
4168 else if (ioaddr->cmd_addr == 0x170)
4169 release_region(0x170, 8);
4170 }
4171
4172 scsi_host_put(ap->host);
4173 }
4174
4175 if (host_set->ops->host_stop)
4176 host_set->ops->host_stop(host_set);
4177
4178 kfree(host_set);
4179}
4180
4181/**
4135 * ata_scsi_release - SCSI layer callback hook for host unload 4182 * ata_scsi_release - SCSI layer callback hook for host unload
4136 * @host: libata host to be unloaded 4183 * @host: libata host to be unloaded
4137 * 4184 *
@@ -4471,39 +4518,8 @@ void ata_pci_remove_one (struct pci_dev *pdev)
4471{ 4518{
4472 struct device *dev = pci_dev_to_dev(pdev); 4519 struct device *dev = pci_dev_to_dev(pdev);
4473 struct ata_host_set *host_set = dev_get_drvdata(dev); 4520 struct ata_host_set *host_set = dev_get_drvdata(dev);
4474 struct ata_port *ap;
4475 unsigned int i;
4476
4477 for (i = 0; i < host_set->n_ports; i++) {
4478 ap = host_set->ports[i];
4479
4480 scsi_remove_host(ap->host);
4481 }
4482
4483 free_irq(host_set->irq, host_set);
4484
4485 for (i = 0; i < host_set->n_ports; i++) {
4486 ap = host_set->ports[i];
4487
4488 ata_scsi_release(ap->host);
4489
4490 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4491 struct ata_ioports *ioaddr = &ap->ioaddr;
4492
4493 if (ioaddr->cmd_addr == 0x1f0)
4494 release_region(0x1f0, 8);
4495 else if (ioaddr->cmd_addr == 0x170)
4496 release_region(0x170, 8);
4497 }
4498
4499 scsi_host_put(ap->host);
4500 }
4501
4502 if (host_set->ops->host_stop)
4503 host_set->ops->host_stop(host_set);
4504
4505 kfree(host_set);
4506 4521
4522 ata_host_set_remove(host_set);
4507 pci_release_regions(pdev); 4523 pci_release_regions(pdev);
4508 pci_disable_device(pdev); 4524 pci_disable_device(pdev);
4509 dev_set_drvdata(dev, NULL); 4525 dev_set_drvdata(dev, NULL);
@@ -4573,6 +4589,7 @@ module_exit(ata_exit);
4573EXPORT_SYMBOL_GPL(ata_std_bios_param); 4589EXPORT_SYMBOL_GPL(ata_std_bios_param);
4574EXPORT_SYMBOL_GPL(ata_std_ports); 4590EXPORT_SYMBOL_GPL(ata_std_ports);
4575EXPORT_SYMBOL_GPL(ata_device_add); 4591EXPORT_SYMBOL_GPL(ata_device_add);
4592EXPORT_SYMBOL_GPL(ata_host_set_remove);
4576EXPORT_SYMBOL_GPL(ata_sg_init); 4593EXPORT_SYMBOL_GPL(ata_sg_init);
4577EXPORT_SYMBOL_GPL(ata_sg_init_one); 4594EXPORT_SYMBOL_GPL(ata_sg_init_one);
4578EXPORT_SYMBOL_GPL(ata_qc_complete); 4595EXPORT_SYMBOL_GPL(ata_qc_complete);
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index a4857db4f9b8..b235556b7b65 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1959,22 +1959,35 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
1959 /* Set it up */ 1959 /* Set it up */
1960 mesh_init(ms); 1960 mesh_init(ms);
1961 1961
1962 /* XXX FIXME: error should be fatal */ 1962 /* Request interrupt */
1963 if (request_irq(ms->meshintr, do_mesh_interrupt, 0, "MESH", ms)) 1963 if (request_irq(ms->meshintr, do_mesh_interrupt, 0, "MESH", ms)) {
1964 printk(KERN_ERR "MESH: can't get irq %d\n", ms->meshintr); 1964 printk(KERN_ERR "MESH: can't get irq %d\n", ms->meshintr);
1965 goto out_shutdown;
1966 }
1965 1967
1966 /* XXX FIXME: handle failure */ 1968 /* Add scsi host & scan */
1967 scsi_add_host(mesh_host, &mdev->ofdev.dev); 1969 if (scsi_add_host(mesh_host, &mdev->ofdev.dev))
1970 goto out_release_irq;
1968 scsi_scan_host(mesh_host); 1971 scsi_scan_host(mesh_host);
1969 1972
1970 return 0; 1973 return 0;
1971 1974
1972out_unmap: 1975 out_release_irq:
1976 free_irq(ms->meshintr, ms);
1977 out_shutdown:
1978 /* shutdown & reset bus in case of error or macos can be confused
1979 * at reboot if the bus was set to synchronous mode already
1980 */
1981 mesh_shutdown(mdev);
1982 set_mesh_power(ms, 0);
1983 pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
1984 ms->dma_cmd_space, ms->dma_cmd_bus);
1985 out_unmap:
1973 iounmap(ms->dma); 1986 iounmap(ms->dma);
1974 iounmap(ms->mesh); 1987 iounmap(ms->mesh);
1975out_free: 1988 out_free:
1976 scsi_host_put(mesh_host); 1989 scsi_host_put(mesh_host);
1977out_release: 1990 out_release:
1978 macio_release_resources(mdev); 1991 macio_release_resources(mdev);
1979 1992
1980 return -ENODEV; 1993 return -ENODEV;
@@ -2001,7 +2014,7 @@ static int mesh_remove(struct macio_dev *mdev)
2001 2014
2002 /* Free DMA commands memory */ 2015 /* Free DMA commands memory */
2003 pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size, 2016 pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
2004 ms->dma_cmd_space, ms->dma_cmd_bus); 2017 ms->dma_cmd_space, ms->dma_cmd_bus);
2005 2018
2006 /* Release memory resources */ 2019 /* Release memory resources */
2007 macio_release_resources(mdev); 2020 macio_release_resources(mdev);
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index a1d62dee3be6..c05653c7779d 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -158,6 +158,8 @@ static struct pci_device_id nv_pci_tbl[] = {
158 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP51 }, 158 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP51 },
159 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA, 159 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,
160 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 }, 160 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },
161 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
162 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },
161 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, 163 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
162 PCI_ANY_ID, PCI_ANY_ID, 164 PCI_ANY_ID, PCI_ANY_ID,
163 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC }, 165 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index a780546eda9c..1f0ebabf6d47 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1265,9 +1265,8 @@ int scsi_device_cancel(struct scsi_device *sdev, int recovery)
1265 list_for_each_safe(lh, lh_sf, &active_list) { 1265 list_for_each_safe(lh, lh_sf, &active_list) {
1266 scmd = list_entry(lh, struct scsi_cmnd, eh_entry); 1266 scmd = list_entry(lh, struct scsi_cmnd, eh_entry);
1267 list_del_init(lh); 1267 list_del_init(lh);
1268 if (recovery) { 1268 if (recovery &&
1269 scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD); 1269 !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) {
1270 } else {
1271 scmd->result = (DID_ABORT << 16); 1270 scmd->result = (DID_ABORT << 16);
1272 scsi_finish_command(scmd); 1271 scsi_finish_command(scmd);
1273 } 1272 }
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 07b554affcf2..64fc9e21f35b 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -110,6 +110,7 @@ static struct {
110 {"RELISYS", "Scorpio", NULL, BLIST_NOLUN}, /* responds to all lun */ 110 {"RELISYS", "Scorpio", NULL, BLIST_NOLUN}, /* responds to all lun */
111 {"SANKYO", "CP525", "6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */ 111 {"SANKYO", "CP525", "6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */
112 {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN}, 112 {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN},
113 {"transtec", "T5008", "0001", BLIST_NOREPORTLUN },
113 {"YAMAHA", "CDR100", "1.00", BLIST_NOLUN}, /* locks up */ 114 {"YAMAHA", "CDR100", "1.00", BLIST_NOLUN}, /* locks up */
114 {"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */ 115 {"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */
115 {"YAMAHA", "CRW8424S", "1.0", BLIST_NOLUN}, /* locks up */ 116 {"YAMAHA", "CRW8424S", "1.0", BLIST_NOLUN}, /* locks up */
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 895c9452be4c..ad5342165079 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -50,7 +50,7 @@
50void scsi_eh_wakeup(struct Scsi_Host *shost) 50void scsi_eh_wakeup(struct Scsi_Host *shost)
51{ 51{
52 if (shost->host_busy == shost->host_failed) { 52 if (shost->host_busy == shost->host_failed) {
53 up(shost->eh_wait); 53 wake_up_process(shost->ehandler);
54 SCSI_LOG_ERROR_RECOVERY(5, 54 SCSI_LOG_ERROR_RECOVERY(5,
55 printk("Waking error handler thread\n")); 55 printk("Waking error handler thread\n"));
56 } 56 }
@@ -68,19 +68,24 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
68{ 68{
69 struct Scsi_Host *shost = scmd->device->host; 69 struct Scsi_Host *shost = scmd->device->host;
70 unsigned long flags; 70 unsigned long flags;
71 int ret = 0;
71 72
72 if (shost->eh_wait == NULL) 73 if (!shost->ehandler)
73 return 0; 74 return 0;
74 75
75 spin_lock_irqsave(shost->host_lock, flags); 76 spin_lock_irqsave(shost->host_lock, flags);
77 if (scsi_host_set_state(shost, SHOST_RECOVERY))
78 if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY))
79 goto out_unlock;
76 80
81 ret = 1;
77 scmd->eh_eflags |= eh_flag; 82 scmd->eh_eflags |= eh_flag;
78 list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q); 83 list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
79 scsi_host_set_state(shost, SHOST_RECOVERY);
80 shost->host_failed++; 84 shost->host_failed++;
81 scsi_eh_wakeup(shost); 85 scsi_eh_wakeup(shost);
86 out_unlock:
82 spin_unlock_irqrestore(shost->host_lock, flags); 87 spin_unlock_irqrestore(shost->host_lock, flags);
83 return 1; 88 return ret;
84} 89}
85 90
86/** 91/**
@@ -176,8 +181,8 @@ void scsi_times_out(struct scsi_cmnd *scmd)
176 } 181 }
177 182
178 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) { 183 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
179 panic("Error handler thread not present at %p %p %s %d", 184 scmd->result |= DID_TIME_OUT << 16;
180 scmd, scmd->device->host, __FILE__, __LINE__); 185 __scsi_done(scmd);
181 } 186 }
182} 187}
183 188
@@ -196,8 +201,7 @@ int scsi_block_when_processing_errors(struct scsi_device *sdev)
196{ 201{
197 int online; 202 int online;
198 203
199 wait_event(sdev->host->host_wait, (sdev->host->shost_state != 204 wait_event(sdev->host->host_wait, !scsi_host_in_recovery(sdev->host));
200 SHOST_RECOVERY));
201 205
202 online = scsi_device_online(sdev); 206 online = scsi_device_online(sdev);
203 207
@@ -1441,6 +1445,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
1441static void scsi_restart_operations(struct Scsi_Host *shost) 1445static void scsi_restart_operations(struct Scsi_Host *shost)
1442{ 1446{
1443 struct scsi_device *sdev; 1447 struct scsi_device *sdev;
1448 unsigned long flags;
1444 1449
1445 /* 1450 /*
1446 * If the door was locked, we need to insert a door lock request 1451 * If the door was locked, we need to insert a door lock request
@@ -1460,7 +1465,11 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
1460 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n", 1465 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n",
1461 __FUNCTION__)); 1466 __FUNCTION__));
1462 1467
1463 scsi_host_set_state(shost, SHOST_RUNNING); 1468 spin_lock_irqsave(shost->host_lock, flags);
1469 if (scsi_host_set_state(shost, SHOST_RUNNING))
1470 if (scsi_host_set_state(shost, SHOST_CANCEL))
1471 BUG_ON(scsi_host_set_state(shost, SHOST_DEL));
1472 spin_unlock_irqrestore(shost->host_lock, flags);
1464 1473
1465 wake_up(&shost->host_wait); 1474 wake_up(&shost->host_wait);
1466 1475
@@ -1582,40 +1591,31 @@ int scsi_error_handler(void *data)
1582{ 1591{
1583 struct Scsi_Host *shost = (struct Scsi_Host *) data; 1592 struct Scsi_Host *shost = (struct Scsi_Host *) data;
1584 int rtn; 1593 int rtn;
1585 DECLARE_MUTEX_LOCKED(sem);
1586 1594
1587 current->flags |= PF_NOFREEZE; 1595 current->flags |= PF_NOFREEZE;
1588 shost->eh_wait = &sem;
1589 1596
1597
1590 /* 1598 /*
1591 * Wake up the thread that created us. 1599 * Note - we always use TASK_INTERRUPTIBLE even if the module
1600 * was loaded as part of the kernel. The reason is that
1601 * UNINTERRUPTIBLE would cause this thread to be counted in
1602 * the load average as a running process, and an interruptible
1603 * wait doesn't.
1592 */ 1604 */
1593 SCSI_LOG_ERROR_RECOVERY(3, printk("Wake up parent of" 1605 set_current_state(TASK_INTERRUPTIBLE);
1594 " scsi_eh_%d\n",shost->host_no)); 1606 while (!kthread_should_stop()) {
1595 1607 if (shost->host_failed == 0 ||
1596 while (1) { 1608 shost->host_failed != shost->host_busy) {
1597 /* 1609 SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler"
1598 * If we get a signal, it means we are supposed to go 1610 " scsi_eh_%d"
1599 * away and die. This typically happens if the user is 1611 " sleeping\n",
1600 * trying to unload a module. 1612 shost->host_no));
1601 */ 1613 schedule();
1602 SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler" 1614 set_current_state(TASK_INTERRUPTIBLE);
1603 " scsi_eh_%d" 1615 continue;
1604 " sleeping\n",shost->host_no)); 1616 }
1605
1606 /*
1607 * Note - we always use down_interruptible with the semaphore
1608 * even if the module was loaded as part of the kernel. The
1609 * reason is that down() will cause this thread to be counted
1610 * in the load average as a running process, and down
1611 * interruptible doesn't. Given that we need to allow this
1612 * thread to die if the driver was loaded as a module, using
1613 * semaphores isn't unreasonable.
1614 */
1615 down_interruptible(&sem);
1616 if (kthread_should_stop())
1617 break;
1618 1617
1618 __set_current_state(TASK_RUNNING);
1619 SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler" 1619 SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler"
1620 " scsi_eh_%d waking" 1620 " scsi_eh_%d waking"
1621 " up\n",shost->host_no)); 1621 " up\n",shost->host_no));
@@ -1642,7 +1642,7 @@ int scsi_error_handler(void *data)
1642 * which are still online. 1642 * which are still online.
1643 */ 1643 */
1644 scsi_restart_operations(shost); 1644 scsi_restart_operations(shost);
1645 1645 set_current_state(TASK_INTERRUPTIBLE);
1646 } 1646 }
1647 1647
1648 SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler scsi_eh_%d" 1648 SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler scsi_eh_%d"
@@ -1651,7 +1651,7 @@ int scsi_error_handler(void *data)
1651 /* 1651 /*
1652 * Make sure that nobody tries to wake us up again. 1652 * Make sure that nobody tries to wake us up again.
1653 */ 1653 */
1654 shost->eh_wait = NULL; 1654 shost->ehandler = NULL;
1655 return 0; 1655 return 0;
1656} 1656}
1657 1657
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index b7fddac81347..de7f98cc38fe 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -458,7 +458,7 @@ int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd,
458 * error processing, as long as the device was opened 458 * error processing, as long as the device was opened
459 * non-blocking */ 459 * non-blocking */
460 if (filp && filp->f_flags & O_NONBLOCK) { 460 if (filp && filp->f_flags & O_NONBLOCK) {
461 if (sdev->host->shost_state == SHOST_RECOVERY) 461 if (scsi_host_in_recovery(sdev->host))
462 return -ENODEV; 462 return -ENODEV;
463 } else if (!scsi_block_when_processing_errors(sdev)) 463 } else if (!scsi_block_when_processing_errors(sdev))
464 return -ENODEV; 464 return -ENODEV;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 863bb6495daa..dc9c772bc874 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -118,7 +118,6 @@ static void scsi_unprep_request(struct request *req)
118 req->flags &= ~REQ_DONTPREP; 118 req->flags &= ~REQ_DONTPREP;
119 req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL; 119 req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL;
120 120
121 scsi_release_buffers(cmd);
122 scsi_put_command(cmd); 121 scsi_put_command(cmd);
123} 122}
124 123
@@ -140,14 +139,12 @@ static void scsi_unprep_request(struct request *req)
140 * commands. 139 * commands.
141 * Notes: This could be called either from an interrupt context or a 140 * Notes: This could be called either from an interrupt context or a
142 * normal process context. 141 * normal process context.
143 * Notes: Upon return, cmd is a stale pointer.
144 */ 142 */
145int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 143int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
146{ 144{
147 struct Scsi_Host *host = cmd->device->host; 145 struct Scsi_Host *host = cmd->device->host;
148 struct scsi_device *device = cmd->device; 146 struct scsi_device *device = cmd->device;
149 struct request_queue *q = device->request_queue; 147 struct request_queue *q = device->request_queue;
150 struct request *req = cmd->request;
151 unsigned long flags; 148 unsigned long flags;
152 149
153 SCSI_LOG_MLQUEUE(1, 150 SCSI_LOG_MLQUEUE(1,
@@ -188,9 +185,8 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
188 * function. The SCSI request function detects the blocked condition 185 * function. The SCSI request function detects the blocked condition
189 * and plugs the queue appropriately. 186 * and plugs the queue appropriately.
190 */ 187 */
191 scsi_unprep_request(req);
192 spin_lock_irqsave(q->queue_lock, flags); 188 spin_lock_irqsave(q->queue_lock, flags);
193 blk_requeue_request(q, req); 189 blk_requeue_request(q, cmd->request);
194 spin_unlock_irqrestore(q->queue_lock, flags); 190 spin_unlock_irqrestore(q->queue_lock, flags);
195 191
196 scsi_run_queue(q); 192 scsi_run_queue(q);
@@ -451,7 +447,7 @@ void scsi_device_unbusy(struct scsi_device *sdev)
451 447
452 spin_lock_irqsave(shost->host_lock, flags); 448 spin_lock_irqsave(shost->host_lock, flags);
453 shost->host_busy--; 449 shost->host_busy--;
454 if (unlikely((shost->shost_state == SHOST_RECOVERY) && 450 if (unlikely(scsi_host_in_recovery(shost) &&
455 shost->host_failed)) 451 shost->host_failed))
456 scsi_eh_wakeup(shost); 452 scsi_eh_wakeup(shost);
457 spin_unlock(shost->host_lock); 453 spin_unlock(shost->host_lock);
@@ -1268,6 +1264,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
1268 } 1264 }
1269 } else { 1265 } else {
1270 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd)); 1266 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
1267 cmd->cmd_len = req->cmd_len;
1271 if (rq_data_dir(req) == WRITE) 1268 if (rq_data_dir(req) == WRITE)
1272 cmd->sc_data_direction = DMA_TO_DEVICE; 1269 cmd->sc_data_direction = DMA_TO_DEVICE;
1273 else if (req->data_len) 1270 else if (req->data_len)
@@ -1342,7 +1339,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
1342 struct Scsi_Host *shost, 1339 struct Scsi_Host *shost,
1343 struct scsi_device *sdev) 1340 struct scsi_device *sdev)
1344{ 1341{
1345 if (shost->shost_state == SHOST_RECOVERY) 1342 if (scsi_host_in_recovery(shost))
1346 return 0; 1343 return 0;
1347 if (shost->host_busy == 0 && shost->host_blocked) { 1344 if (shost->host_busy == 0 && shost->host_blocked) {
1348 /* 1345 /*
@@ -1514,7 +1511,6 @@ static void scsi_request_fn(struct request_queue *q)
1514 * cases (host limits or settings) should run the queue at some 1511 * cases (host limits or settings) should run the queue at some
1515 * later time. 1512 * later time.
1516 */ 1513 */
1517 scsi_unprep_request(req);
1518 spin_lock_irq(q->queue_lock); 1514 spin_lock_irq(q->queue_lock);
1519 blk_requeue_request(q, req); 1515 blk_requeue_request(q, req);
1520 sdev->device_busy--; 1516 sdev->device_busy--;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index b86f170fa8ed..fcf9f6cbb142 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1466,23 +1466,17 @@ EXPORT_SYMBOL(scsi_scan_single_target);
1466 1466
1467void scsi_forget_host(struct Scsi_Host *shost) 1467void scsi_forget_host(struct Scsi_Host *shost)
1468{ 1468{
1469 struct scsi_target *starget, *tmp; 1469 struct scsi_device *sdev;
1470 unsigned long flags; 1470 unsigned long flags;
1471 1471
1472 /* 1472 restart:
1473 * Ok, this look a bit strange. We always look for the first device
1474 * on the list as scsi_remove_device removes them from it - thus we
1475 * also have to release the lock.
1476 * We don't need to get another reference to the device before
1477 * releasing the lock as we already own the reference from
1478 * scsi_register_device that's release in scsi_remove_device. And
1479 * after that we don't look at sdev anymore.
1480 */
1481 spin_lock_irqsave(shost->host_lock, flags); 1473 spin_lock_irqsave(shost->host_lock, flags);
1482 list_for_each_entry_safe(starget, tmp, &shost->__targets, siblings) { 1474 list_for_each_entry(sdev, &shost->__devices, siblings) {
1475 if (sdev->sdev_state == SDEV_DEL)
1476 continue;
1483 spin_unlock_irqrestore(shost->host_lock, flags); 1477 spin_unlock_irqrestore(shost->host_lock, flags);
1484 scsi_remove_target(&starget->dev); 1478 __scsi_remove_device(sdev);
1485 spin_lock_irqsave(shost->host_lock, flags); 1479 goto restart;
1486 } 1480 }
1487 spin_unlock_irqrestore(shost->host_lock, flags); 1481 spin_unlock_irqrestore(shost->host_lock, flags);
1488} 1482}
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index b8052d5206cc..72a6550a056c 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -57,6 +57,8 @@ static struct {
57 { SHOST_CANCEL, "cancel" }, 57 { SHOST_CANCEL, "cancel" },
58 { SHOST_DEL, "deleted" }, 58 { SHOST_DEL, "deleted" },
59 { SHOST_RECOVERY, "recovery" }, 59 { SHOST_RECOVERY, "recovery" },
60 { SHOST_CANCEL_RECOVERY, "cancel/recovery" },
61 { SHOST_DEL_RECOVERY, "deleted/recovery", },
60}; 62};
61const char *scsi_host_state_name(enum scsi_host_state state) 63const char *scsi_host_state_name(enum scsi_host_state state)
62{ 64{
@@ -707,9 +709,11 @@ void __scsi_remove_device(struct scsi_device *sdev)
707 **/ 709 **/
708void scsi_remove_device(struct scsi_device *sdev) 710void scsi_remove_device(struct scsi_device *sdev)
709{ 711{
710 down(&sdev->host->scan_mutex); 712 struct Scsi_Host *shost = sdev->host;
713
714 down(&shost->scan_mutex);
711 __scsi_remove_device(sdev); 715 __scsi_remove_device(sdev);
712 up(&sdev->host->scan_mutex); 716 up(&shost->scan_mutex);
713} 717}
714EXPORT_SYMBOL(scsi_remove_device); 718EXPORT_SYMBOL(scsi_remove_device);
715 719
@@ -717,17 +721,20 @@ void __scsi_remove_target(struct scsi_target *starget)
717{ 721{
718 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 722 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
719 unsigned long flags; 723 unsigned long flags;
720 struct scsi_device *sdev, *tmp; 724 struct scsi_device *sdev;
721 725
722 spin_lock_irqsave(shost->host_lock, flags); 726 spin_lock_irqsave(shost->host_lock, flags);
723 starget->reap_ref++; 727 starget->reap_ref++;
724 list_for_each_entry_safe(sdev, tmp, &shost->__devices, siblings) { 728 restart:
729 list_for_each_entry(sdev, &shost->__devices, siblings) {
725 if (sdev->channel != starget->channel || 730 if (sdev->channel != starget->channel ||
726 sdev->id != starget->id) 731 sdev->id != starget->id ||
732 sdev->sdev_state == SDEV_DEL)
727 continue; 733 continue;
728 spin_unlock_irqrestore(shost->host_lock, flags); 734 spin_unlock_irqrestore(shost->host_lock, flags);
729 scsi_remove_device(sdev); 735 scsi_remove_device(sdev);
730 spin_lock_irqsave(shost->host_lock, flags); 736 spin_lock_irqsave(shost->host_lock, flags);
737 goto restart;
731 } 738 }
732 spin_unlock_irqrestore(shost->host_lock, flags); 739 spin_unlock_irqrestore(shost->host_lock, flags);
733 scsi_target_reap(starget); 740 scsi_target_reap(starget);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index de564b386052..9a1dc0cea03c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -235,6 +235,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
235 return 0; 235 return 0;
236 236
237 memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd)); 237 memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd));
238 SCpnt->cmd_len = rq->cmd_len;
238 if (rq_data_dir(rq) == WRITE) 239 if (rq_data_dir(rq) == WRITE)
239 SCpnt->sc_data_direction = DMA_TO_DEVICE; 240 SCpnt->sc_data_direction = DMA_TO_DEVICE;
240 else if (rq->data_len) 241 else if (rq->data_len)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 9ea4765d1d12..4d09a6e4dd2e 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1027,7 +1027,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
1027 if (sdp->detached) 1027 if (sdp->detached)
1028 return -ENODEV; 1028 return -ENODEV;
1029 if (filp->f_flags & O_NONBLOCK) { 1029 if (filp->f_flags & O_NONBLOCK) {
1030 if (sdp->device->host->shost_state == SHOST_RECOVERY) 1030 if (scsi_host_in_recovery(sdp->device->host))
1031 return -EBUSY; 1031 return -EBUSY;
1032 } else if (!scsi_block_when_processing_errors(sdp->device)) 1032 } else if (!scsi_block_when_processing_errors(sdp->device))
1033 return -EBUSY; 1033 return -EBUSY;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index ce63fc8312dc..561901b1cf11 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -326,6 +326,7 @@ static int sr_init_command(struct scsi_cmnd * SCpnt)
326 return 0; 326 return 0;
327 327
328 memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd)); 328 memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd));
329 SCpnt->cmd_len = rq->cmd_len;
329 if (!rq->data_len) 330 if (!rq->data_len)
330 SCpnt->sc_data_direction = DMA_NONE; 331 SCpnt->sc_data_direction = DMA_NONE;
331 else if (rq_data_dir(rq) == WRITE) 332 else if (rq_data_dir(rq) == WRITE)
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index a93308ae9736..d001c046551b 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4206,6 +4206,7 @@ static int st_init_command(struct scsi_cmnd *SCpnt)
4206 return 0; 4206 return 0;
4207 4207
4208 memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd)); 4208 memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd));
4209 SCpnt->cmd_len = rq->cmd_len;
4209 4210
4210 if (rq_data_dir(rq) == WRITE) 4211 if (rq_data_dir(rq) == WRITE)
4211 SCpnt->sc_data_direction = DMA_TO_DEVICE; 4212 SCpnt->sc_data_direction = DMA_TO_DEVICE;
diff --git a/drivers/serial/clps711x.c b/drivers/serial/clps711x.c
index 78c1f36ad9b7..87ef368384fb 100644
--- a/drivers/serial/clps711x.c
+++ b/drivers/serial/clps711x.c
@@ -98,7 +98,7 @@ static irqreturn_t clps711xuart_int_rx(int irq, void *dev_id, struct pt_regs *re
98{ 98{
99 struct uart_port *port = dev_id; 99 struct uart_port *port = dev_id;
100 struct tty_struct *tty = port->info->tty; 100 struct tty_struct *tty = port->info->tty;
101 unsigned int status, ch, flg, ignored = 0; 101 unsigned int status, ch, flg;
102 102
103 status = clps_readl(SYSFLG(port)); 103 status = clps_readl(SYSFLG(port));
104 while (!(status & SYSFLG_URXFE)) { 104 while (!(status & SYSFLG_URXFE)) {
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index c47c8052b486..f1fb67fe22a8 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -987,7 +987,7 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
987 987
988 /* remove this interface if it has been registered */ 988 /* remove this interface if it has been registered */
989 interface = dev->actconfig->interface[i]; 989 interface = dev->actconfig->interface[i];
990 if (!klist_node_attached(&interface->dev.knode_bus)) 990 if (!device_is_registered(&interface->dev))
991 continue; 991 continue;
992 dev_dbg (&dev->dev, "unregistering interface %s\n", 992 dev_dbg (&dev->dev, "unregistering interface %s\n",
993 interface->dev.bus_id); 993 interface->dev.bus_id);
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 087af73a59dd..7d131509e419 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -303,7 +303,7 @@ int usb_driver_claim_interface(struct usb_driver *driver,
303 /* if interface was already added, bind now; else let 303 /* if interface was already added, bind now; else let
304 * the future device_add() bind it, bypassing probe() 304 * the future device_add() bind it, bypassing probe()
305 */ 305 */
306 if (klist_node_attached(&dev->knode_bus)) 306 if (device_is_registered(dev))
307 device_bind_driver(dev); 307 device_bind_driver(dev);
308 308
309 return 0; 309 return 0;
@@ -336,8 +336,8 @@ void usb_driver_release_interface(struct usb_driver *driver,
336 if (iface->condition != USB_INTERFACE_BOUND) 336 if (iface->condition != USB_INTERFACE_BOUND)
337 return; 337 return;
338 338
339 /* release only after device_add() */ 339 /* don't release if the interface hasn't been added yet */
340 if (klist_node_attached(&dev->knode_bus)) { 340 if (device_is_registered(dev)) {
341 iface->condition = USB_INTERFACE_UNBINDING; 341 iface->condition = USB_INTERFACE_UNBINDING;
342 device_release_driver(dev); 342 device_release_driver(dev);
343 } 343 }
diff --git a/drivers/usb/gadget/pxa2xx_udc.c b/drivers/usb/gadget/pxa2xx_udc.c
index 1507738337c4..73f8c9404156 100644
--- a/drivers/usb/gadget/pxa2xx_udc.c
+++ b/drivers/usb/gadget/pxa2xx_udc.c
@@ -422,7 +422,7 @@ static inline void ep0_idle (struct pxa2xx_udc *dev)
422} 422}
423 423
424static int 424static int
425write_packet(volatile u32 *uddr, struct pxa2xx_request *req, unsigned max) 425write_packet(volatile unsigned long *uddr, struct pxa2xx_request *req, unsigned max)
426{ 426{
427 u8 *buf; 427 u8 *buf;
428 unsigned length, count; 428 unsigned length, count;
@@ -2602,7 +2602,7 @@ static int __exit pxa2xx_udc_remove(struct device *_dev)
2602 * VBUS IRQs should probably be ignored so that the PXA device just acts 2602 * VBUS IRQs should probably be ignored so that the PXA device just acts
2603 * "dead" to USB hosts until system resume. 2603 * "dead" to USB hosts until system resume.
2604 */ 2604 */
2605static int pxa2xx_udc_suspend(struct device *dev, u32 state, u32 level) 2605static int pxa2xx_udc_suspend(struct device *dev, pm_message_t state, u32 level)
2606{ 2606{
2607 struct pxa2xx_udc *udc = dev_get_drvdata(dev); 2607 struct pxa2xx_udc *udc = dev_get_drvdata(dev);
2608 2608
diff --git a/drivers/usb/gadget/pxa2xx_udc.h b/drivers/usb/gadget/pxa2xx_udc.h
index d0bc396a85d5..a58f3e6e71f1 100644
--- a/drivers/usb/gadget/pxa2xx_udc.h
+++ b/drivers/usb/gadget/pxa2xx_udc.h
@@ -69,11 +69,11 @@ struct pxa2xx_ep {
69 * UDDR = UDC Endpoint Data Register (the fifo) 69 * UDDR = UDC Endpoint Data Register (the fifo)
70 * DRCM = DMA Request Channel Map 70 * DRCM = DMA Request Channel Map
71 */ 71 */
72 volatile u32 *reg_udccs; 72 volatile unsigned long *reg_udccs;
73 volatile u32 *reg_ubcr; 73 volatile unsigned long *reg_ubcr;
74 volatile u32 *reg_uddr; 74 volatile unsigned long *reg_uddr;
75#ifdef USE_DMA 75#ifdef USE_DMA
76 volatile u32 *reg_drcmr; 76 volatile unsigned long *reg_drcmr;
77#define drcmr(n) .reg_drcmr = & DRCMR ## n , 77#define drcmr(n) .reg_drcmr = & DRCMR ## n ,
78#else 78#else
79#define drcmr(n) 79#define drcmr(n)
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index d2a1fd40dfcb..d42a15d10a46 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -782,6 +782,9 @@ retry:
782/* usb 1.1 says max 90% of a frame is available for periodic transfers. 782/* usb 1.1 says max 90% of a frame is available for periodic transfers.
783 * this driver doesn't promise that much since it's got to handle an 783 * this driver doesn't promise that much since it's got to handle an
784 * IRQ per packet; irq handling latencies also use up that time. 784 * IRQ per packet; irq handling latencies also use up that time.
785 *
786 * NOTE: the periodic schedule is a sparse tree, with the load for
787 * each branch minimized. see fig 3.5 in the OHCI spec for example.
785 */ 788 */
786#define MAX_PERIODIC_LOAD 500 /* out of 1000 usec */ 789#define MAX_PERIODIC_LOAD 500 /* out of 1000 usec */
787 790
@@ -843,6 +846,7 @@ static int sl811h_urb_enqueue(
843 if (!(sl811->port1 & (1 << USB_PORT_FEAT_ENABLE)) 846 if (!(sl811->port1 & (1 << USB_PORT_FEAT_ENABLE))
844 || !HC_IS_RUNNING(hcd->state)) { 847 || !HC_IS_RUNNING(hcd->state)) {
845 retval = -ENODEV; 848 retval = -ENODEV;
849 kfree(ep);
846 goto fail; 850 goto fail;
847 } 851 }
848 852
@@ -911,8 +915,16 @@ static int sl811h_urb_enqueue(
911 case PIPE_ISOCHRONOUS: 915 case PIPE_ISOCHRONOUS:
912 case PIPE_INTERRUPT: 916 case PIPE_INTERRUPT:
913 urb->interval = ep->period; 917 urb->interval = ep->period;
914 if (ep->branch < PERIODIC_SIZE) 918 if (ep->branch < PERIODIC_SIZE) {
919 /* NOTE: the phase is correct here, but the value
920 * needs offsetting by the transfer queue depth.
921 * All current drivers ignore start_frame, so this
922 * is unlikely to ever matter...
923 */
924 urb->start_frame = (sl811->frame & (PERIODIC_SIZE - 1))
925 + ep->branch;
915 break; 926 break;
927 }
916 928
917 retval = balance(sl811, ep->period, ep->load); 929 retval = balance(sl811, ep->period, ep->load);
918 if (retval < 0) 930 if (retval < 0)
@@ -1122,7 +1134,7 @@ sl811h_hub_descriptor (
1122 desc->wHubCharacteristics = (__force __u16)cpu_to_le16(temp); 1134 desc->wHubCharacteristics = (__force __u16)cpu_to_le16(temp);
1123 1135
1124 /* two bitmaps: ports removable, and legacy PortPwrCtrlMask */ 1136 /* two bitmaps: ports removable, and legacy PortPwrCtrlMask */
1125 desc->bitmap[0] = 1 << 1; 1137 desc->bitmap[0] = 0 << 1;
1126 desc->bitmap[1] = ~0; 1138 desc->bitmap[1] = ~0;
1127} 1139}
1128 1140
diff --git a/drivers/usb/net/pegasus.c b/drivers/usb/net/pegasus.c
index 7484d34780fc..6a4ffe6c3977 100644
--- a/drivers/usb/net/pegasus.c
+++ b/drivers/usb/net/pegasus.c
@@ -648,6 +648,13 @@ static void read_bulk_callback(struct urb *urb, struct pt_regs *regs)
648 } 648 }
649 649
650 /* 650 /*
651 * If the packet is unreasonably long, quietly drop it rather than
652 * kernel panicing by calling skb_put.
653 */
654 if (pkt_len > PEGASUS_MTU)
655 goto goon;
656
657 /*
651 * at this point we are sure pegasus->rx_skb != NULL 658 * at this point we are sure pegasus->rx_skb != NULL
652 * so we go ahead and pass up the packet. 659 * so we go ahead and pass up the packet.
653 */ 660 */
@@ -886,15 +893,17 @@ static inline void get_interrupt_interval(pegasus_t * pegasus)
886 __u8 data[2]; 893 __u8 data[2];
887 894
888 read_eprom_word(pegasus, 4, (__u16 *) data); 895 read_eprom_word(pegasus, 4, (__u16 *) data);
889 if (data[1] < 0x80) { 896 if (pegasus->usb->speed != USB_SPEED_HIGH) {
890 if (netif_msg_timer(pegasus)) 897 if (data[1] < 0x80) {
891 dev_info(&pegasus->intf->dev, 898 if (netif_msg_timer(pegasus))
892 "intr interval changed from %ums to %ums\n", 899 dev_info(&pegasus->intf->dev, "intr interval "
893 data[1], 0x80); 900 "changed from %ums to %ums\n",
894 data[1] = 0x80; 901 data[1], 0x80);
895#ifdef PEGASUS_WRITE_EEPROM 902 data[1] = 0x80;
896 write_eprom_word(pegasus, 4, *(__u16 *) data); 903#ifdef PEGASUS_WRITE_EEPROM
904 write_eprom_word(pegasus, 4, *(__u16 *) data);
897#endif 905#endif
906 }
898 } 907 }
899 pegasus->intr_interval = data[1]; 908 pegasus->intr_interval = data[1];
900} 909}
@@ -904,8 +913,9 @@ static void set_carrier(struct net_device *net)
904 pegasus_t *pegasus = netdev_priv(net); 913 pegasus_t *pegasus = netdev_priv(net);
905 u16 tmp; 914 u16 tmp;
906 915
907 if (read_mii_word(pegasus, pegasus->phy, MII_BMSR, &tmp)) 916 if (!read_mii_word(pegasus, pegasus->phy, MII_BMSR, &tmp))
908 return; 917 return;
918
909 if (tmp & BMSR_LSTATUS) 919 if (tmp & BMSR_LSTATUS)
910 netif_carrier_on(net); 920 netif_carrier_on(net);
911 else 921 else
@@ -1355,6 +1365,7 @@ static void pegasus_disconnect(struct usb_interface *intf)
1355 cancel_delayed_work(&pegasus->carrier_check); 1365 cancel_delayed_work(&pegasus->carrier_check);
1356 unregister_netdev(pegasus->net); 1366 unregister_netdev(pegasus->net);
1357 usb_put_dev(interface_to_usbdev(intf)); 1367 usb_put_dev(interface_to_usbdev(intf));
1368 unlink_all_urbs(pegasus);
1358 free_all_urbs(pegasus); 1369 free_all_urbs(pegasus);
1359 free_skb_pool(pegasus); 1370 free_skb_pool(pegasus);
1360 if (pegasus->rx_skb) 1371 if (pegasus->rx_skb)
diff --git a/drivers/usb/serial/airprime.c b/drivers/usb/serial/airprime.c
index a4ce0008d69b..926d4c2c1600 100644
--- a/drivers/usb/serial/airprime.c
+++ b/drivers/usb/serial/airprime.c
@@ -16,7 +16,8 @@
16#include "usb-serial.h" 16#include "usb-serial.h"
17 17
18static struct usb_device_id id_table [] = { 18static struct usb_device_id id_table [] = {
19 { USB_DEVICE(0xf3d, 0x0112) }, 19 { USB_DEVICE(0xf3d, 0x0112) }, /* AirPrime CDMA Wireless PC Card */
20 { USB_DEVICE(0x1410, 0x1110) }, /* Novatel Wireless Merlin CDMA */
20 { }, 21 { },
21}; 22};
22MODULE_DEVICE_TABLE(usb, id_table); 23MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 4e434cb10bb1..5a8631c8a4a7 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1846,10 +1846,12 @@ static void ftdi_set_termios (struct usb_serial_port *port, struct termios *old_
1846 } else { 1846 } else {
1847 /* set the baudrate determined before */ 1847 /* set the baudrate determined before */
1848 if (change_speed(port)) { 1848 if (change_speed(port)) {
1849 err("%s urb failed to set baurdrate", __FUNCTION__); 1849 err("%s urb failed to set baudrate", __FUNCTION__);
1850 }
1851 /* Ensure RTS and DTR are raised when baudrate changed from 0 */
1852 if ((old_termios->c_cflag & CBAUD) == B0) {
1853 set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
1850 } 1854 }
1851 /* Ensure RTS and DTR are raised */
1852 set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
1853 } 1855 }
1854 1856
1855 /* Set flow control */ 1857 /* Set flow control */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 92d0f925d053..4989e5740d18 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -25,6 +25,9 @@
25 2005-06-20 v0.4.1 add missing braces :-/ 25 2005-06-20 v0.4.1 add missing braces :-/
26 killed end-of-line whitespace 26 killed end-of-line whitespace
27 2005-07-15 v0.4.2 rename WLAN product to FUSION, add FUSION2 27 2005-07-15 v0.4.2 rename WLAN product to FUSION, add FUSION2
28 2005-09-10 v0.4.3 added HUAWEI E600 card and Audiovox AirCard
29 2005-09-20 v0.4.4 increased recv buffer size: the card sometimes
30 wants to send >2000 bytes.
28 31
29 Work sponsored by: Sigos GmbH, Germany <info@sigos.de> 32 Work sponsored by: Sigos GmbH, Germany <info@sigos.de>
30 33
@@ -71,15 +74,21 @@ static int option_send_setup(struct usb_serial_port *port);
71 74
72/* Vendor and product IDs */ 75/* Vendor and product IDs */
73#define OPTION_VENDOR_ID 0x0AF0 76#define OPTION_VENDOR_ID 0x0AF0
77#define HUAWEI_VENDOR_ID 0x12D1
78#define AUDIOVOX_VENDOR_ID 0x0F3D
74 79
75#define OPTION_PRODUCT_OLD 0x5000 80#define OPTION_PRODUCT_OLD 0x5000
76#define OPTION_PRODUCT_FUSION 0x6000 81#define OPTION_PRODUCT_FUSION 0x6000
77#define OPTION_PRODUCT_FUSION2 0x6300 82#define OPTION_PRODUCT_FUSION2 0x6300
83#define HUAWEI_PRODUCT_E600 0x1001
84#define AUDIOVOX_PRODUCT_AIRCARD 0x0112
78 85
79static struct usb_device_id option_ids[] = { 86static struct usb_device_id option_ids[] = {
80 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_OLD) }, 87 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_OLD) },
81 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUSION) }, 88 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUSION) },
82 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUSION2) }, 89 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUSION2) },
90 { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600) },
91 { USB_DEVICE(AUDIOVOX_VENDOR_ID, AUDIOVOX_PRODUCT_AIRCARD) },
83 { } /* Terminating entry */ 92 { } /* Terminating entry */
84}; 93};
85 94
@@ -132,7 +141,7 @@ static int debug;
132 141
133#define N_IN_URB 4 142#define N_IN_URB 4
134#define N_OUT_URB 1 143#define N_OUT_URB 1
135#define IN_BUFLEN 1024 144#define IN_BUFLEN 4096
136#define OUT_BUFLEN 128 145#define OUT_BUFLEN 128
137 146
138struct option_port_private { 147struct option_port_private {
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 31ee13eef7af..773ae11b4a19 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -650,6 +650,7 @@ config FB_NVIDIA
650 select FB_CFB_FILLRECT 650 select FB_CFB_FILLRECT
651 select FB_CFB_COPYAREA 651 select FB_CFB_COPYAREA
652 select FB_CFB_IMAGEBLIT 652 select FB_CFB_IMAGEBLIT
653 select FB_SOFT_CURSOR
653 help 654 help
654 This driver supports graphics boards with the nVidia chips, TNT 655 This driver supports graphics boards with the nVidia chips, TNT
655 and newer. For very old chipsets, such as the RIVA128, then use 656 and newer. For very old chipsets, such as the RIVA128, then use
diff --git a/drivers/video/aty/xlinit.c b/drivers/video/aty/xlinit.c
index 92643af12581..a085cbf74ecb 100644
--- a/drivers/video/aty/xlinit.c
+++ b/drivers/video/aty/xlinit.c
@@ -174,7 +174,7 @@ int atyfb_xl_init(struct fb_info *info)
174 const struct xl_card_cfg_t * card = &card_cfg[xl_card]; 174 const struct xl_card_cfg_t * card = &card_cfg[xl_card];
175 struct atyfb_par *par = (struct atyfb_par *) info->par; 175 struct atyfb_par *par = (struct atyfb_par *) info->par;
176 union aty_pll pll; 176 union aty_pll pll;
177 int i, err; 177 int err;
178 u32 temp; 178 u32 temp;
179 179
180 aty_st_8(CONFIG_STAT0, 0x85, par); 180 aty_st_8(CONFIG_STAT0, 0x85, par);
@@ -252,9 +252,14 @@ int atyfb_xl_init(struct fb_info *info)
252 aty_st_le32(0xEC, 0x00000000, par); 252 aty_st_le32(0xEC, 0x00000000, par);
253 aty_st_le32(0xFC, 0x00000000, par); 253 aty_st_le32(0xFC, 0x00000000, par);
254 254
255 for (i=0; i<sizeof(lcd_tbl)/sizeof(lcd_tbl_t); i++) { 255#if defined (CONFIG_FB_ATY_GENERIC_LCD)
256 aty_st_lcd(lcd_tbl[i].lcd_reg, lcd_tbl[i].val, par); 256 {
257 int i;
258
259 for (i = 0; i < ARRAY_SIZE(lcd_tbl); i++)
260 aty_st_lcd(lcd_tbl[i].lcd_reg, lcd_tbl[i].val, par);
257 } 261 }
262#endif
258 263
259 aty_st_le16(CONFIG_STAT0, 0x00A4, par); 264 aty_st_le16(CONFIG_STAT0, 0x00A4, par);
260 mdelay(10); 265 mdelay(10);
diff --git a/drivers/video/fbcvt.c b/drivers/video/fbcvt.c
index cfa61b512de0..0b6af00d197e 100644
--- a/drivers/video/fbcvt.c
+++ b/drivers/video/fbcvt.c
@@ -272,11 +272,11 @@ static void fb_cvt_convert_to_mode(struct fb_cvt_data *cvt,
272{ 272{
273 mode->refresh = cvt->f_refresh; 273 mode->refresh = cvt->f_refresh;
274 mode->pixclock = KHZ2PICOS(cvt->pixclock/1000); 274 mode->pixclock = KHZ2PICOS(cvt->pixclock/1000);
275 mode->left_margin = cvt->h_front_porch; 275 mode->left_margin = cvt->h_back_porch;
276 mode->right_margin = cvt->h_back_porch; 276 mode->right_margin = cvt->h_front_porch;
277 mode->hsync_len = cvt->hsync; 277 mode->hsync_len = cvt->hsync;
278 mode->upper_margin = cvt->v_front_porch; 278 mode->upper_margin = cvt->v_back_porch;
279 mode->lower_margin = cvt->v_back_porch; 279 mode->lower_margin = cvt->v_front_porch;
280 mode->vsync_len = cvt->vsync; 280 mode->vsync_len = cvt->vsync;
281 281
282 mode->sync &= ~(FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT); 282 mode->sync &= ~(FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT);
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 3620de0f252e..a7f020ada630 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -893,7 +893,7 @@ static int nvidiafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
893 int i, set = cursor->set; 893 int i, set = cursor->set;
894 u16 fg, bg; 894 u16 fg, bg;
895 895
896 if (!hwcur || cursor->image.width > MAX_CURS || cursor->image.height > MAX_CURS) 896 if (cursor->image.width > MAX_CURS || cursor->image.height > MAX_CURS)
897 return -ENXIO; 897 return -ENXIO;
898 898
899 NVShowHideCursor(par, 0); 899 NVShowHideCursor(par, 0);
@@ -1356,6 +1356,9 @@ static int __devinit nvidia_set_fbinfo(struct fb_info *info)
1356 info->pixmap.size = 8 * 1024; 1356 info->pixmap.size = 8 * 1024;
1357 info->pixmap.flags = FB_PIXMAP_SYSTEM; 1357 info->pixmap.flags = FB_PIXMAP_SYSTEM;
1358 1358
1359 if (!hwcur)
1360 info->fbops->fb_cursor = soft_cursor;
1361
1359 info->var.accel_flags = (!noaccel); 1362 info->var.accel_flags = (!noaccel);
1360 1363
1361 switch (par->Architecture) { 1364 switch (par->Architecture) {