aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-05-19 01:02:42 -0400
committerPaul Mackerras <paulus@samba.org>2006-05-19 01:02:42 -0400
commit3c06da5ae5358e9d325d541a053e1059e9654bcc (patch)
tree04c953cc82fe57cff248ac523095cd4f0d9611a7 /drivers
parent4d1f3f25d9c303d1ce63b42cc94c54ac0ab2e950 (diff)
parenta54c9d30dbb06391ec4422aaf0e1dc2c8c53bd3e (diff)
Merge ../linux-2.6
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/class.c32
-rw-r--r--drivers/block/ub.c18
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c10
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c11
-rw-r--r--drivers/char/rio/host.h9
-rw-r--r--drivers/char/rio/rioboot.c1
-rw-r--r--drivers/char/rio/rioctrl.c43
-rw-r--r--drivers/char/rio/rioioctl.h56
-rw-r--r--drivers/char/tpm/Kconfig2
-rw-r--r--drivers/char/tpm/tpm.h2
-rw-r--r--drivers/char/tpm/tpm_tis.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c28
-rw-r--r--drivers/i2c/busses/scx200_acb.c16
-rw-r--r--drivers/ide/legacy/ide-cs.c1
-rw-r--r--drivers/ieee1394/ohci1394.c2
-rw-r--r--drivers/ieee1394/sbp2.c209
-rw-r--r--drivers/ieee1394/sbp2.h18
-rw-r--r--drivers/infiniband/core/cm.c12
-rw-r--r--drivers/infiniband/core/mad.c47
-rw-r--r--drivers/infiniband/core/mad_priv.h5
-rw-r--r--drivers/infiniband/core/mad_rmpp.c20
-rw-r--r--drivers/infiniband/core/sysfs.c2
-rw-r--r--drivers/infiniband/core/ucm.c12
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c7
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c41
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c15
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h22
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c31
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c23
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c4
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c195
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h4
-rw-r--r--drivers/isdn/capi/capi.c1
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c4
-rw-r--r--drivers/leds/Kconfig7
-rw-r--r--drivers/leds/led-class.c9
-rw-r--r--drivers/leds/ledtrig-timer.c17
-rw-r--r--drivers/message/fusion/mptbase.c63
-rw-r--r--drivers/message/fusion/mptbase.h10
-rw-r--r--drivers/message/fusion/mptfc.c134
-rw-r--r--drivers/message/fusion/mptsas.c99
-rw-r--r--drivers/message/fusion/mptscsih.c50
-rw-r--r--drivers/message/fusion/mptspi.c68
-rw-r--r--drivers/net/au1000_eth.c18
-rw-r--r--drivers/net/b44.c28
-rw-r--r--drivers/net/dl2k.c13
-rw-r--r--drivers/net/irda/Makefile2
-rw-r--r--drivers/net/irda/irda-usb.c2
-rw-r--r--drivers/net/irda/sir-dev.h13
-rw-r--r--drivers/net/irda/sir_dev.c315
-rw-r--r--drivers/net/irda/sir_kthread.c508
-rw-r--r--drivers/net/irda/smsc-ircc2.c14
-rw-r--r--drivers/net/ixp2000/enp2611.c13
-rw-r--r--drivers/net/ixp2000/pm3386.c30
-rw-r--r--drivers/net/ixp2000/pm3386.h1
-rw-r--r--drivers/net/ne.c31
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/sis900.c1
-rw-r--r--drivers/net/sky2.c235
-rw-r--r--drivers/net/sky2.h3
-rw-r--r--drivers/net/spider_net.c12
-rw-r--r--drivers/net/spider_net.h2
-rw-r--r--drivers/net/sungem_phy.c45
-rw-r--r--drivers/net/sungem_phy.h1
-rw-r--r--drivers/net/tg3.c11
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c45
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.h6
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_phy.c2
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_wx.c7
-rw-r--r--drivers/pci/quirks.c16
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c23
-rw-r--r--drivers/rtc/rtc-sa1100.c6
-rw-r--r--drivers/s390/net/lcs.c2
-rw-r--r--drivers/sbus/char/openprom.c15
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c1
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_pci.c12
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c30
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c95
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c18
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c134
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c68
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c59
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.h7
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c19
-rw-r--r--drivers/scsi/scsi_devinfo.c2
-rw-r--r--drivers/scsi/scsi_lib.c27
-rw-r--r--drivers/scsi/sim710.c2
-rw-r--r--drivers/serial/8250.c74
-rw-r--r--drivers/serial/8250_au1x00.c5
-rw-r--r--drivers/serial/serial_core.c123
-rw-r--r--drivers/spi/Kconfig8
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/pxa2xx_spi.c1467
-rw-r--r--drivers/spi/spi.c7
-rw-r--r--drivers/spi/spi_bitbang.c104
-rw-r--r--drivers/usb/atm/speedtch.c2
-rw-r--r--drivers/usb/atm/usbatm.c8
-rw-r--r--drivers/usb/core/hcd.c13
-rw-r--r--drivers/usb/core/hub.c23
-rw-r--r--drivers/usb/host/ohci-hcd.c2
-rw-r--r--drivers/usb/input/hid-core.c4
-rw-r--r--drivers/usb/misc/emi26.c4
-rw-r--r--drivers/usb/misc/emi62.c4
-rw-r--r--drivers/usb/net/pegasus.c20
-rw-r--r--drivers/usb/serial/Kconfig10
-rw-r--r--drivers/usb/serial/Makefile1
-rw-r--r--drivers/usb/serial/airprime.c1
-rw-r--r--drivers/usb/serial/ark3116.c465
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.h9
-rw-r--r--drivers/usb/serial/generic.c1
-rw-r--r--drivers/usb/serial/omninet.c12
-rw-r--r--drivers/usb/serial/usb-serial.c19
-rw-r--r--drivers/video/backlight/backlight.c18
-rw-r--r--drivers/video/backlight/lcd.c32
-rw-r--r--drivers/video/logo/Makefile2
126 files changed, 4119 insertions, 1655 deletions
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 0e71dff327cd..b1ea4df85c7d 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -456,6 +456,35 @@ static void class_device_remove_attrs(struct class_device * cd)
456 } 456 }
457} 457}
458 458
459static int class_device_add_groups(struct class_device * cd)
460{
461 int i;
462 int error = 0;
463
464 if (cd->groups) {
465 for (i = 0; cd->groups[i]; i++) {
466 error = sysfs_create_group(&cd->kobj, cd->groups[i]);
467 if (error) {
468 while (--i >= 0)
469 sysfs_remove_group(&cd->kobj, cd->groups[i]);
470 goto out;
471 }
472 }
473 }
474out:
475 return error;
476}
477
478static void class_device_remove_groups(struct class_device * cd)
479{
480 int i;
481 if (cd->groups) {
482 for (i = 0; cd->groups[i]; i++) {
483 sysfs_remove_group(&cd->kobj, cd->groups[i]);
484 }
485 }
486}
487
459static ssize_t show_dev(struct class_device *class_dev, char *buf) 488static ssize_t show_dev(struct class_device *class_dev, char *buf)
460{ 489{
461 return print_dev_t(buf, class_dev->devt); 490 return print_dev_t(buf, class_dev->devt);
@@ -559,6 +588,8 @@ int class_device_add(struct class_device *class_dev)
559 class_name); 588 class_name);
560 } 589 }
561 590
591 class_device_add_groups(class_dev);
592
562 kobject_uevent(&class_dev->kobj, KOBJ_ADD); 593 kobject_uevent(&class_dev->kobj, KOBJ_ADD);
563 594
564 /* notify any interfaces this device is now here */ 595 /* notify any interfaces this device is now here */
@@ -672,6 +703,7 @@ void class_device_del(struct class_device *class_dev)
672 if (class_dev->devt_attr) 703 if (class_dev->devt_attr)
673 class_device_remove_file(class_dev, class_dev->devt_attr); 704 class_device_remove_file(class_dev, class_dev->devt_attr);
674 class_device_remove_attrs(class_dev); 705 class_device_remove_attrs(class_dev);
706 class_device_remove_groups(class_dev);
675 707
676 kobject_uevent(&class_dev->kobj, KOBJ_REMOVE); 708 kobject_uevent(&class_dev->kobj, KOBJ_REMOVE);
677 kobject_del(&class_dev->kobj); 709 kobject_del(&class_dev->kobj);
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index f73446f580df..c688c25992e4 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -536,6 +536,9 @@ static void ub_cleanup(struct ub_dev *sc)
536 kfree(lun); 536 kfree(lun);
537 } 537 }
538 538
539 usb_set_intfdata(sc->intf, NULL);
540 usb_put_intf(sc->intf);
541 usb_put_dev(sc->dev);
539 kfree(sc); 542 kfree(sc);
540} 543}
541 544
@@ -2221,7 +2224,12 @@ static int ub_probe(struct usb_interface *intf,
2221 // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; 2224 // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
2222 usb_set_intfdata(intf, sc); 2225 usb_set_intfdata(intf, sc);
2223 usb_get_dev(sc->dev); 2226 usb_get_dev(sc->dev);
2224 // usb_get_intf(sc->intf); /* Do we need this? */ 2227 /*
2228 * Since we give the interface struct to the block level through
2229 * disk->driverfs_dev, we have to pin it. Otherwise, block_uevent
2230 * oopses on close after a disconnect (kernels 2.6.16 and up).
2231 */
2232 usb_get_intf(sc->intf);
2225 2233
2226 snprintf(sc->name, 12, DRV_NAME "(%d.%d)", 2234 snprintf(sc->name, 12, DRV_NAME "(%d.%d)",
2227 sc->dev->bus->busnum, sc->dev->devnum); 2235 sc->dev->bus->busnum, sc->dev->devnum);
@@ -2286,7 +2294,7 @@ static int ub_probe(struct usb_interface *intf,
2286 2294
2287err_dev_desc: 2295err_dev_desc:
2288 usb_set_intfdata(intf, NULL); 2296 usb_set_intfdata(intf, NULL);
2289 // usb_put_intf(sc->intf); 2297 usb_put_intf(sc->intf);
2290 usb_put_dev(sc->dev); 2298 usb_put_dev(sc->dev);
2291 kfree(sc); 2299 kfree(sc);
2292err_core: 2300err_core:
@@ -2461,12 +2469,6 @@ static void ub_disconnect(struct usb_interface *intf)
2461 * and no URBs left in transit. 2469 * and no URBs left in transit.
2462 */ 2470 */
2463 2471
2464 usb_set_intfdata(intf, NULL);
2465 // usb_put_intf(sc->intf);
2466 sc->intf = NULL;
2467 usb_put_dev(sc->dev);
2468 sc->dev = NULL;
2469
2470 ub_put(sc); 2472 ub_put(sc);
2471} 2473}
2472 2474
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 402296670d3a..78d928f9d9f1 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -291,7 +291,7 @@ config SX
291 291
292config RIO 292config RIO
293 tristate "Specialix RIO system support" 293 tristate "Specialix RIO system support"
294 depends on SERIAL_NONSTANDARD && !64BIT 294 depends on SERIAL_NONSTANDARD
295 help 295 help
296 This is a driver for the Specialix RIO, a smart serial card which 296 This is a driver for the Specialix RIO, a smart serial card which
297 drives an outboard box that can support up to 128 ports. Product 297 drives an outboard box that can support up to 128 ports. Product
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 02114a0bd0d9..128b2632512d 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1981,10 +1981,6 @@ static int __init cmm_init(void)
1981 if (!cmm_class) 1981 if (!cmm_class)
1982 return -1; 1982 return -1;
1983 1983
1984 rc = pcmcia_register_driver(&cm4000_driver);
1985 if (rc < 0)
1986 return rc;
1987
1988 major = register_chrdev(0, DEVICE_NAME, &cm4000_fops); 1984 major = register_chrdev(0, DEVICE_NAME, &cm4000_fops);
1989 if (major < 0) { 1985 if (major < 0) {
1990 printk(KERN_WARNING MODULE_NAME 1986 printk(KERN_WARNING MODULE_NAME
@@ -1992,6 +1988,12 @@ static int __init cmm_init(void)
1992 return -1; 1988 return -1;
1993 } 1989 }
1994 1990
1991 rc = pcmcia_register_driver(&cm4000_driver);
1992 if (rc < 0) {
1993 unregister_chrdev(major, DEVICE_NAME);
1994 return rc;
1995 }
1996
1995 return 0; 1997 return 0;
1996} 1998}
1997 1999
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index 29efa64580a8..47a8465bf95b 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -724,16 +724,19 @@ static int __init cm4040_init(void)
724 if (!cmx_class) 724 if (!cmx_class)
725 return -1; 725 return -1;
726 726
727 rc = pcmcia_register_driver(&reader_driver);
728 if (rc < 0)
729 return rc;
730
731 major = register_chrdev(0, DEVICE_NAME, &reader_fops); 727 major = register_chrdev(0, DEVICE_NAME, &reader_fops);
732 if (major < 0) { 728 if (major < 0) {
733 printk(KERN_WARNING MODULE_NAME 729 printk(KERN_WARNING MODULE_NAME
734 ": could not get major number\n"); 730 ": could not get major number\n");
735 return -1; 731 return -1;
736 } 732 }
733
734 rc = pcmcia_register_driver(&reader_driver);
735 if (rc < 0) {
736 unregister_chrdev(major, DEVICE_NAME);
737 return rc;
738 }
739
737 return 0; 740 return 0;
738} 741}
739 742
diff --git a/drivers/char/rio/host.h b/drivers/char/rio/host.h
index 3ec73d1a279a..179cdbea712b 100644
--- a/drivers/char/rio/host.h
+++ b/drivers/char/rio/host.h
@@ -33,12 +33,6 @@
33#ifndef __rio_host_h__ 33#ifndef __rio_host_h__
34#define __rio_host_h__ 34#define __rio_host_h__
35 35
36#ifdef SCCS_LABELS
37#ifndef lint
38static char *_host_h_sccs_ = "@(#)host.h 1.2";
39#endif
40#endif
41
42/* 36/*
43** the host structure - one per host card in the system. 37** the host structure - one per host card in the system.
44*/ 38*/
@@ -77,9 +71,6 @@ struct Host {
77#define RC_STARTUP 1 71#define RC_STARTUP 1
78#define RC_RUNNING 2 72#define RC_RUNNING 2
79#define RC_STUFFED 3 73#define RC_STUFFED 3
80#define RC_SOMETHING 4
81#define RC_SOMETHING_NEW 5
82#define RC_SOMETHING_ELSE 6
83#define RC_READY 7 74#define RC_READY 7
84#define RUN_STATE 7 75#define RUN_STATE 7
85/* 76/*
diff --git a/drivers/char/rio/rioboot.c b/drivers/char/rio/rioboot.c
index acda9326c2ef..290143addd34 100644
--- a/drivers/char/rio/rioboot.c
+++ b/drivers/char/rio/rioboot.c
@@ -34,6 +34,7 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/termios.h> 35#include <linux/termios.h>
36#include <linux/serial.h> 36#include <linux/serial.h>
37#include <linux/vmalloc.h>
37#include <asm/semaphore.h> 38#include <asm/semaphore.h>
38#include <linux/generic_serial.h> 39#include <linux/generic_serial.h>
39#include <linux/errno.h> 40#include <linux/errno.h>
diff --git a/drivers/char/rio/rioctrl.c b/drivers/char/rio/rioctrl.c
index d31aba62bb7f..75b2557c37ec 100644
--- a/drivers/char/rio/rioctrl.c
+++ b/drivers/char/rio/rioctrl.c
@@ -1394,14 +1394,17 @@ int RIOPreemptiveCmd(struct rio_info *p, struct Port *PortP, u8 Cmd)
1394 return RIO_FAIL; 1394 return RIO_FAIL;
1395 } 1395 }
1396 1396
1397 if (((int) ((char) PortP->InUse) == -1) || !(CmdBlkP = RIOGetCmdBlk())) { 1397 if ((PortP->InUse == (typeof(PortP->InUse))-1) ||
1398 rio_dprintk(RIO_DEBUG_CTRL, "Cannot allocate command block for command %d on port %d\n", Cmd, PortP->PortNum); 1398 !(CmdBlkP = RIOGetCmdBlk())) {
1399 rio_dprintk(RIO_DEBUG_CTRL, "Cannot allocate command block "
1400 "for command %d on port %d\n", Cmd, PortP->PortNum);
1399 return RIO_FAIL; 1401 return RIO_FAIL;
1400 } 1402 }
1401 1403
1402 rio_dprintk(RIO_DEBUG_CTRL, "Command blk %p - InUse now %d\n", CmdBlkP, PortP->InUse); 1404 rio_dprintk(RIO_DEBUG_CTRL, "Command blk %p - InUse now %d\n",
1405 CmdBlkP, PortP->InUse);
1403 1406
1404 PktCmdP = (struct PktCmd_M *) &CmdBlkP->Packet.data[0]; 1407 PktCmdP = (struct PktCmd_M *)&CmdBlkP->Packet.data[0];
1405 1408
1406 CmdBlkP->Packet.src_unit = 0; 1409 CmdBlkP->Packet.src_unit = 0;
1407 if (PortP->SecondBlock) 1410 if (PortP->SecondBlock)
@@ -1425,38 +1428,46 @@ int RIOPreemptiveCmd(struct rio_info *p, struct Port *PortP, u8 Cmd)
1425 1428
1426 switch (Cmd) { 1429 switch (Cmd) {
1427 case MEMDUMP: 1430 case MEMDUMP:
1428 rio_dprintk(RIO_DEBUG_CTRL, "Queue MEMDUMP command blk %p (addr 0x%x)\n", CmdBlkP, (int) SubCmd.Addr); 1431 rio_dprintk(RIO_DEBUG_CTRL, "Queue MEMDUMP command blk %p "
1432 "(addr 0x%x)\n", CmdBlkP, (int) SubCmd.Addr);
1429 PktCmdP->SubCommand = MEMDUMP; 1433 PktCmdP->SubCommand = MEMDUMP;
1430 PktCmdP->SubAddr = SubCmd.Addr; 1434 PktCmdP->SubAddr = SubCmd.Addr;
1431 break; 1435 break;
1432 case FCLOSE: 1436 case FCLOSE:
1433 rio_dprintk(RIO_DEBUG_CTRL, "Queue FCLOSE command blk %p\n", CmdBlkP); 1437 rio_dprintk(RIO_DEBUG_CTRL, "Queue FCLOSE command blk %p\n",
1438 CmdBlkP);
1434 break; 1439 break;
1435 case READ_REGISTER: 1440 case READ_REGISTER:
1436 rio_dprintk(RIO_DEBUG_CTRL, "Queue READ_REGISTER (0x%x) command blk %p\n", (int) SubCmd.Addr, CmdBlkP); 1441 rio_dprintk(RIO_DEBUG_CTRL, "Queue READ_REGISTER (0x%x) "
1442 "command blk %p\n", (int) SubCmd.Addr, CmdBlkP);
1437 PktCmdP->SubCommand = READ_REGISTER; 1443 PktCmdP->SubCommand = READ_REGISTER;
1438 PktCmdP->SubAddr = SubCmd.Addr; 1444 PktCmdP->SubAddr = SubCmd.Addr;
1439 break; 1445 break;
1440 case RESUME: 1446 case RESUME:
1441 rio_dprintk(RIO_DEBUG_CTRL, "Queue RESUME command blk %p\n", CmdBlkP); 1447 rio_dprintk(RIO_DEBUG_CTRL, "Queue RESUME command blk %p\n",
1448 CmdBlkP);
1442 break; 1449 break;
1443 case RFLUSH: 1450 case RFLUSH:
1444 rio_dprintk(RIO_DEBUG_CTRL, "Queue RFLUSH command blk %p\n", CmdBlkP); 1451 rio_dprintk(RIO_DEBUG_CTRL, "Queue RFLUSH command blk %p\n",
1452 CmdBlkP);
1445 CmdBlkP->PostFuncP = RIORFlushEnable; 1453 CmdBlkP->PostFuncP = RIORFlushEnable;
1446 break; 1454 break;
1447 case SUSPEND: 1455 case SUSPEND:
1448 rio_dprintk(RIO_DEBUG_CTRL, "Queue SUSPEND command blk %p\n", CmdBlkP); 1456 rio_dprintk(RIO_DEBUG_CTRL, "Queue SUSPEND command blk %p\n",
1457 CmdBlkP);
1449 break; 1458 break;
1450 1459
1451 case MGET: 1460 case MGET:
1452 rio_dprintk(RIO_DEBUG_CTRL, "Queue MGET command blk %p\n", CmdBlkP); 1461 rio_dprintk(RIO_DEBUG_CTRL, "Queue MGET command blk %p\n",
1462 CmdBlkP);
1453 break; 1463 break;
1454 1464
1455 case MSET: 1465 case MSET:
1456 case MBIC: 1466 case MBIC:
1457 case MBIS: 1467 case MBIS:
1458 CmdBlkP->Packet.data[4] = (char) PortP->ModemLines; 1468 CmdBlkP->Packet.data[4] = (char) PortP->ModemLines;
1459 rio_dprintk(RIO_DEBUG_CTRL, "Queue MSET/MBIC/MBIS command blk %p\n", CmdBlkP); 1469 rio_dprintk(RIO_DEBUG_CTRL, "Queue MSET/MBIC/MBIS command "
1470 "blk %p\n", CmdBlkP);
1460 break; 1471 break;
1461 1472
1462 case WFLUSH: 1473 case WFLUSH:
@@ -1465,12 +1476,14 @@ int RIOPreemptiveCmd(struct rio_info *p, struct Port *PortP, u8 Cmd)
1465 ** allowed then we should not bother sending any more to the 1476 ** allowed then we should not bother sending any more to the
1466 ** RTA. 1477 ** RTA.
1467 */ 1478 */
1468 if ((int) ((char) PortP->WflushFlag) == (int) -1) { 1479 if (PortP->WflushFlag == (typeof(PortP->WflushFlag))-1) {
1469 rio_dprintk(RIO_DEBUG_CTRL, "Trashed WFLUSH, WflushFlag about to wrap!"); 1480 rio_dprintk(RIO_DEBUG_CTRL, "Trashed WFLUSH, "
1481 "WflushFlag about to wrap!");
1470 RIOFreeCmdBlk(CmdBlkP); 1482 RIOFreeCmdBlk(CmdBlkP);
1471 return (RIO_FAIL); 1483 return (RIO_FAIL);
1472 } else { 1484 } else {
1473 rio_dprintk(RIO_DEBUG_CTRL, "Queue WFLUSH command blk %p\n", CmdBlkP); 1485 rio_dprintk(RIO_DEBUG_CTRL, "Queue WFLUSH command "
1486 "blk %p\n", CmdBlkP);
1474 CmdBlkP->PostFuncP = RIOWFlushMark; 1487 CmdBlkP->PostFuncP = RIOWFlushMark;
1475 } 1488 }
1476 break; 1489 break;
diff --git a/drivers/char/rio/rioioctl.h b/drivers/char/rio/rioioctl.h
index 14b83fae75c8..e8af5b30519e 100644
--- a/drivers/char/rio/rioioctl.h
+++ b/drivers/char/rio/rioioctl.h
@@ -33,10 +33,6 @@
33#ifndef __rioioctl_h__ 33#ifndef __rioioctl_h__
34#define __rioioctl_h__ 34#define __rioioctl_h__
35 35
36#ifdef SCCS_LABELS
37static char *_rioioctl_h_sccs_ = "@(#)rioioctl.h 1.2";
38#endif
39
40/* 36/*
41** RIO device driver - user ioctls and associated structures. 37** RIO device driver - user ioctls and associated structures.
42*/ 38*/
@@ -44,55 +40,13 @@ static char *_rioioctl_h_sccs_ = "@(#)rioioctl.h 1.2";
44struct portStats { 40struct portStats {
45 int port; 41 int port;
46 int gather; 42 int gather;
47 ulong txchars; 43 unsigned long txchars;
48 ulong rxchars; 44 unsigned long rxchars;
49 ulong opens; 45 unsigned long opens;
50 ulong closes; 46 unsigned long closes;
51 ulong ioctls; 47 unsigned long ioctls;
52}; 48};
53 49
54
55#define rIOC ('r'<<8)
56#define TCRIOSTATE (rIOC | 1)
57#define TCRIOXPON (rIOC | 2)
58#define TCRIOXPOFF (rIOC | 3)
59#define TCRIOXPCPS (rIOC | 4)
60#define TCRIOXPRINT (rIOC | 5)
61#define TCRIOIXANYON (rIOC | 6)
62#define TCRIOIXANYOFF (rIOC | 7)
63#define TCRIOIXONON (rIOC | 8)
64#define TCRIOIXONOFF (rIOC | 9)
65#define TCRIOMBIS (rIOC | 10)
66#define TCRIOMBIC (rIOC | 11)
67#define TCRIOTRIAD (rIOC | 12)
68#define TCRIOTSTATE (rIOC | 13)
69
70/*
71** 15.10.1998 ARG - ESIL 0761 part fix
72** Add RIO ioctls for manipulating RTS and CTS flow control, (as LynxOS
73** appears to not support hardware flow control).
74*/
75#define TCRIOCTSFLOWEN (rIOC | 14) /* enable CTS flow control */
76#define TCRIOCTSFLOWDIS (rIOC | 15) /* disable CTS flow control */
77#define TCRIORTSFLOWEN (rIOC | 16) /* enable RTS flow control */
78#define TCRIORTSFLOWDIS (rIOC | 17) /* disable RTS flow control */
79
80/*
81** 09.12.1998 ARG - ESIL 0776 part fix
82** Definition for 'RIOC' also appears in daemon.h, so we'd better do a
83** #ifndef here first.
84** 'RIO_QUICK_CHECK' also #define'd here as this ioctl is now
85** allowed to be used by customers.
86**
87** 05.02.1999 ARG -
88** This is what I've decied to do with ioctls etc., which are intended to be
89** invoked from users applications :
90** Anything that needs to be defined here will be removed from daemon.h, that
91** way it won't end up having to be defined/maintained in two places. The only
92** consequence of this is that this file should now be #include'd by daemon.h
93**
94** 'stats' ioctls now #define'd here as they are to be used by customers.
95*/
96#define RIOC ('R'<<8)|('i'<<16)|('o'<<24) 50#define RIOC ('R'<<8)|('i'<<16)|('o'<<24)
97 51
98#define RIO_QUICK_CHECK (RIOC | 105) 52#define RIO_QUICK_CHECK (RIOC | 105)
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 1efde3b27619..fe00c7dfb649 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -22,7 +22,7 @@ config TCG_TPM
22 22
23config TCG_TIS 23config TCG_TIS
24 tristate "TPM Interface Specification 1.2 Interface" 24 tristate "TPM Interface Specification 1.2 Interface"
25 depends on TCG_TPM 25 depends on TCG_TPM && PNPACPI
26 ---help--- 26 ---help---
27 If you have a TPM security chip that is compliant with the 27 If you have a TPM security chip that is compliant with the
28 TCG TIS 1.2 TPM specification say Yes and it will be accessible 28 TCG TIS 1.2 TPM specification say Yes and it will be accessible
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 54a4c804e25f..050ced247f68 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -140,7 +140,7 @@ extern int tpm_pm_resume(struct device *);
140extern struct dentry ** tpm_bios_log_setup(char *); 140extern struct dentry ** tpm_bios_log_setup(char *);
141extern void tpm_bios_log_teardown(struct dentry **); 141extern void tpm_bios_log_teardown(struct dentry **);
142#else 142#else
143static inline struct dentry* tpm_bios_log_setup(char *name) 143static inline struct dentry ** tpm_bios_log_setup(char *name)
144{ 144{
145 return NULL; 145 return NULL;
146} 146}
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index b9cae9a238bb..f621168f38ae 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -55,7 +55,7 @@ enum tis_int_flags {
55}; 55};
56 56
57enum tis_defaults { 57enum tis_defaults {
58 TIS_MEM_BASE = 0xFED4000, 58 TIS_MEM_BASE = 0xFED40000,
59 TIS_MEM_LEN = 0x5000, 59 TIS_MEM_LEN = 0x5000,
60 TIS_SHORT_TIMEOUT = 750, /* ms */ 60 TIS_SHORT_TIMEOUT = 750, /* ms */
61 TIS_LONG_TIMEOUT = 2000, /* 2 sec */ 61 TIS_LONG_TIMEOUT = 2000, /* 2 sec */
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 956d121cb161..3e6ffcaa5af4 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -74,6 +74,8 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
74static DEFINE_MUTEX (dbs_mutex); 74static DEFINE_MUTEX (dbs_mutex);
75static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); 75static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
76 76
77static struct workqueue_struct *dbs_workq;
78
77struct dbs_tuners { 79struct dbs_tuners {
78 unsigned int sampling_rate; 80 unsigned int sampling_rate;
79 unsigned int sampling_down_factor; 81 unsigned int sampling_down_factor;
@@ -364,23 +366,29 @@ static void do_dbs_timer(void *data)
364 mutex_lock(&dbs_mutex); 366 mutex_lock(&dbs_mutex);
365 for_each_online_cpu(i) 367 for_each_online_cpu(i)
366 dbs_check_cpu(i); 368 dbs_check_cpu(i);
367 schedule_delayed_work(&dbs_work, 369 queue_delayed_work(dbs_workq, &dbs_work,
368 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 370 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
369 mutex_unlock(&dbs_mutex); 371 mutex_unlock(&dbs_mutex);
370} 372}
371 373
372static inline void dbs_timer_init(void) 374static inline void dbs_timer_init(void)
373{ 375{
374 INIT_WORK(&dbs_work, do_dbs_timer, NULL); 376 INIT_WORK(&dbs_work, do_dbs_timer, NULL);
375 schedule_delayed_work(&dbs_work, 377 if (!dbs_workq)
376 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 378 dbs_workq = create_singlethread_workqueue("ondemand");
379 if (!dbs_workq) {
380 printk(KERN_ERR "ondemand: Cannot initialize kernel thread\n");
381 return;
382 }
383 queue_delayed_work(dbs_workq, &dbs_work,
384 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
377 return; 385 return;
378} 386}
379 387
380static inline void dbs_timer_exit(void) 388static inline void dbs_timer_exit(void)
381{ 389{
382 cancel_delayed_work(&dbs_work); 390 if (dbs_workq)
383 return; 391 cancel_rearming_delayed_workqueue(dbs_workq, &dbs_work);
384} 392}
385 393
386static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 394static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
@@ -489,8 +497,12 @@ static int __init cpufreq_gov_dbs_init(void)
489 497
490static void __exit cpufreq_gov_dbs_exit(void) 498static void __exit cpufreq_gov_dbs_exit(void)
491{ 499{
492 /* Make sure that the scheduled work is indeed not running */ 500 /* Make sure that the scheduled work is indeed not running.
493 flush_scheduled_work(); 501 Assumes the timer has been cancelled first. */
502 if (dbs_workq) {
503 flush_workqueue(dbs_workq);
504 destroy_workqueue(dbs_workq);
505 }
494 506
495 cpufreq_unregister_governor(&cpufreq_gov_dbs); 507 cpufreq_unregister_governor(&cpufreq_gov_dbs);
496} 508}
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 8bd305e47f0d..a140e4536a4e 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -133,6 +133,9 @@ static void scx200_acb_machine(struct scx200_acb_iface *iface, u8 status)
133 133
134 outb(inb(ACBCTL1) | ACBCTL1_STOP, ACBCTL1); 134 outb(inb(ACBCTL1) | ACBCTL1_STOP, ACBCTL1);
135 outb(ACBST_STASTR | ACBST_NEGACK, ACBST); 135 outb(ACBST_STASTR | ACBST_NEGACK, ACBST);
136
137 /* Reset the status register */
138 outb(0, ACBST);
136 return; 139 return;
137 } 140 }
138 141
@@ -228,6 +231,10 @@ static void scx200_acb_poll(struct scx200_acb_iface *iface)
228 timeout = jiffies + POLL_TIMEOUT; 231 timeout = jiffies + POLL_TIMEOUT;
229 while (time_before(jiffies, timeout)) { 232 while (time_before(jiffies, timeout)) {
230 status = inb(ACBST); 233 status = inb(ACBST);
234
235 /* Reset the status register to avoid the hang */
236 outb(0, ACBST);
237
231 if ((status & (ACBST_SDAST|ACBST_BER|ACBST_NEGACK)) != 0) { 238 if ((status & (ACBST_SDAST|ACBST_BER|ACBST_NEGACK)) != 0) {
232 scx200_acb_machine(iface, status); 239 scx200_acb_machine(iface, status);
233 return; 240 return;
@@ -415,7 +422,6 @@ static int __init scx200_acb_create(const char *text, int base, int index)
415 struct scx200_acb_iface *iface; 422 struct scx200_acb_iface *iface;
416 struct i2c_adapter *adapter; 423 struct i2c_adapter *adapter;
417 int rc; 424 int rc;
418 char description[64];
419 425
420 iface = kzalloc(sizeof(*iface), GFP_KERNEL); 426 iface = kzalloc(sizeof(*iface), GFP_KERNEL);
421 if (!iface) { 427 if (!iface) {
@@ -434,10 +440,7 @@ static int __init scx200_acb_create(const char *text, int base, int index)
434 440
435 mutex_init(&iface->mutex); 441 mutex_init(&iface->mutex);
436 442
437 snprintf(description, sizeof(description), "%s ACCESS.bus [%s]", 443 if (!request_region(base, 8, adapter->name)) {
438 text, adapter->name);
439
440 if (request_region(base, 8, description) == 0) {
441 printk(KERN_ERR NAME ": can't allocate io 0x%x-0x%x\n", 444 printk(KERN_ERR NAME ": can't allocate io 0x%x-0x%x\n",
442 base, base + 8-1); 445 base, base + 8-1);
443 rc = -EBUSY; 446 rc = -EBUSY;
@@ -524,6 +527,9 @@ static int __init scx200_acb_init(void)
524 } else if (pci_dev_present(divil_pci)) 527 } else if (pci_dev_present(divil_pci))
525 rc = scx200_add_cs553x(); 528 rc = scx200_add_cs553x();
526 529
530 /* If at least one bus was created, init must succeed */
531 if (scx200_acb_list)
532 return 0;
527 return rc; 533 return rc;
528} 534}
529 535
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index 4961f1e764a7..602797a44208 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -392,6 +392,7 @@ static struct pcmcia_device_id ide_ids[] = {
392 PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e), 392 PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e),
393 PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae), 393 PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae),
394 PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178), 394 PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178),
395 PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
395 PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), 396 PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
396 PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), 397 PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b),
397 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), 398 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index 19222878aae9..11f13778f139 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -553,7 +553,7 @@ static void ohci_initialize(struct ti_ohci *ohci)
553 * register content. 553 * register content.
554 * To actually enable physical responses is the job of our interrupt 554 * To actually enable physical responses is the job of our interrupt
555 * handler which programs the physical request filter. */ 555 * handler which programs the physical request filter. */
556 reg_write(ohci, OHCI1394_PhyUpperBound, 0xffff0000); 556 reg_write(ohci, OHCI1394_PhyUpperBound, 0x01000000);
557 557
558 DBGMSG("physUpperBoundOffset=%08x", 558 DBGMSG("physUpperBoundOffset=%08x",
559 reg_read(ohci, OHCI1394_PhyUpperBound)); 559 reg_read(ohci, OHCI1394_PhyUpperBound));
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index f4206604db03..8a23fb54c693 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -42,6 +42,7 @@
42#include <linux/kernel.h> 42#include <linux/kernel.h>
43#include <linux/list.h> 43#include <linux/list.h>
44#include <linux/string.h> 44#include <linux/string.h>
45#include <linux/stringify.h>
45#include <linux/slab.h> 46#include <linux/slab.h>
46#include <linux/interrupt.h> 47#include <linux/interrupt.h>
47#include <linux/fs.h> 48#include <linux/fs.h>
@@ -117,7 +118,8 @@ MODULE_PARM_DESC(serialize_io, "Serialize I/O coming from scsi drivers (default
117 */ 118 */
118static int max_sectors = SBP2_MAX_SECTORS; 119static int max_sectors = SBP2_MAX_SECTORS;
119module_param(max_sectors, int, 0444); 120module_param(max_sectors, int, 0444);
120MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = 255)"); 121MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = "
122 __stringify(SBP2_MAX_SECTORS) ")");
121 123
122/* 124/*
123 * Exclusive login to sbp2 device? In most cases, the sbp2 driver should 125 * Exclusive login to sbp2 device? In most cases, the sbp2 driver should
@@ -135,18 +137,45 @@ module_param(exclusive_login, int, 0644);
135MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)"); 137MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)");
136 138
137/* 139/*
138 * SCSI inquiry hack for really badly behaved sbp2 devices. Turn this on 140 * If any of the following workarounds is required for your device to work,
139 * if your sbp2 device is not properly handling the SCSI inquiry command. 141 * please submit the kernel messages logged by sbp2 to the linux1394-devel
140 * This hack makes the inquiry look more like a typical MS Windows inquiry 142 * mailing list.
141 * by enforcing 36 byte inquiry and avoiding access to mode_sense page 8.
142 * 143 *
143 * If force_inquiry_hack=1 is required for your device to work, 144 * - 128kB max transfer
144 * please submit the logged sbp2_firmware_revision value of this device to 145 * Limit transfer size. Necessary for some old bridges.
145 * the linux1394-devel mailing list. 146 *
147 * - 36 byte inquiry
148 * When scsi_mod probes the device, let the inquiry command look like that
149 * from MS Windows.
150 *
151 * - skip mode page 8
152 * Suppress sending of mode_sense for mode page 8 if the device pretends to
153 * support the SCSI Primary Block commands instead of Reduced Block Commands.
154 *
155 * - fix capacity
156 * Tell sd_mod to correct the last sector number reported by read_capacity.
157 * Avoids access beyond actual disk limits on devices with an off-by-one bug.
158 * Don't use this with devices which don't have this bug.
159 *
160 * - override internal blacklist
161 * Instead of adding to the built-in blacklist, use only the workarounds
162 * specified in the module load parameter.
163 * Useful if a blacklist entry interfered with a non-broken device.
146 */ 164 */
165static int sbp2_default_workarounds;
166module_param_named(workarounds, sbp2_default_workarounds, int, 0644);
167MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
168 ", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS)
169 ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36)
170 ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
171 ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
172 ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
173 ", or a combination)");
174
175/* legacy parameter */
147static int force_inquiry_hack; 176static int force_inquiry_hack;
148module_param(force_inquiry_hack, int, 0644); 177module_param(force_inquiry_hack, int, 0644);
149MODULE_PARM_DESC(force_inquiry_hack, "Force SCSI inquiry hack (default = 0)"); 178MODULE_PARM_DESC(force_inquiry_hack, "Deprecated, use 'workarounds'");
150 179
151/* 180/*
152 * Export information about protocols/devices supported by this driver. 181 * Export information about protocols/devices supported by this driver.
@@ -266,14 +295,55 @@ static struct hpsb_protocol_driver sbp2_driver = {
266}; 295};
267 296
268/* 297/*
269 * List of device firmwares that require the inquiry hack. 298 * List of devices with known bugs.
270 * Yields a few false positives but did not break other devices so far. 299 *
300 * The firmware_revision field, masked with 0xffff00, is the best indicator
301 * for the type of bridge chip of a device. It yields a few false positives
302 * but this did not break correctly behaving devices so far.
271 */ 303 */
272static u32 sbp2_broken_inquiry_list[] = { 304static const struct {
273 0x00002800, /* Stefan Richter <stefanr@s5r6.in-berlin.de> */ 305 u32 firmware_revision;
274 /* DViCO Momobay CX-1 */ 306 u32 model_id;
275 0x00000200 /* Andreas Plesch <plesch@fas.harvard.edu> */ 307 unsigned workarounds;
276 /* QPS Fire DVDBurner */ 308} sbp2_workarounds_table[] = {
309 /* TSB42AA9 */ {
310 .firmware_revision = 0x002800,
311 .workarounds = SBP2_WORKAROUND_INQUIRY_36 |
312 SBP2_WORKAROUND_MODE_SENSE_8,
313 },
314 /* Initio bridges, actually only needed for some older ones */ {
315 .firmware_revision = 0x000200,
316 .workarounds = SBP2_WORKAROUND_INQUIRY_36,
317 },
318 /* Symbios bridge */ {
319 .firmware_revision = 0xa0b800,
320 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
321 },
322 /*
323 * Note about the following Apple iPod blacklist entries:
324 *
325 * There are iPods (2nd gen, 3rd gen) with model_id==0. Since our
326 * matching logic treats 0 as a wildcard, we cannot match this ID
327 * without rewriting the matching routine. Fortunately these iPods
328 * do not feature the read_capacity bug according to one report.
329 * Read_capacity behaviour as well as model_id could change due to
330 * Apple-supplied firmware updates though.
331 */
332 /* iPod 4th generation */ {
333 .firmware_revision = 0x0a2700,
334 .model_id = 0x000021,
335 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
336 },
337 /* iPod mini */ {
338 .firmware_revision = 0x0a2700,
339 .model_id = 0x000023,
340 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
341 },
342 /* iPod Photo */ {
343 .firmware_revision = 0x0a2700,
344 .model_id = 0x00007e,
345 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
346 }
277}; 347};
278 348
279/************************************** 349/**************************************
@@ -765,11 +835,16 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
765 835
766 /* Register the status FIFO address range. We could use the same FIFO 836 /* Register the status FIFO address range. We could use the same FIFO
767 * for targets at different nodes. However we need different FIFOs per 837 * for targets at different nodes. However we need different FIFOs per
768 * target in order to support multi-unit devices. */ 838 * target in order to support multi-unit devices.
839 * The FIFO is located out of the local host controller's physical range
840 * but, if possible, within the posted write area. Status writes will
841 * then be performed as unified transactions. This slightly reduces
842 * bandwidth usage, and some Prolific based devices seem to require it.
843 */
769 scsi_id->status_fifo_addr = hpsb_allocate_and_register_addrspace( 844 scsi_id->status_fifo_addr = hpsb_allocate_and_register_addrspace(
770 &sbp2_highlevel, ud->ne->host, &sbp2_ops, 845 &sbp2_highlevel, ud->ne->host, &sbp2_ops,
771 sizeof(struct sbp2_status_block), sizeof(quadlet_t), 846 sizeof(struct sbp2_status_block), sizeof(quadlet_t),
772 ~0ULL, ~0ULL); 847 0x010000000000ULL, CSR1212_ALL_SPACE_END);
773 if (!scsi_id->status_fifo_addr) { 848 if (!scsi_id->status_fifo_addr) {
774 SBP2_ERR("failed to allocate status FIFO address range"); 849 SBP2_ERR("failed to allocate status FIFO address range");
775 goto failed_alloc; 850 goto failed_alloc;
@@ -1450,7 +1525,8 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
1450 struct csr1212_dentry *dentry; 1525 struct csr1212_dentry *dentry;
1451 u64 management_agent_addr; 1526 u64 management_agent_addr;
1452 u32 command_set_spec_id, command_set, unit_characteristics, 1527 u32 command_set_spec_id, command_set, unit_characteristics,
1453 firmware_revision, workarounds; 1528 firmware_revision;
1529 unsigned workarounds;
1454 int i; 1530 int i;
1455 1531
1456 SBP2_DEBUG_ENTER(); 1532 SBP2_DEBUG_ENTER();
@@ -1506,12 +1582,8 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
1506 case SBP2_FIRMWARE_REVISION_KEY: 1582 case SBP2_FIRMWARE_REVISION_KEY:
1507 /* Firmware revision */ 1583 /* Firmware revision */
1508 firmware_revision = kv->value.immediate; 1584 firmware_revision = kv->value.immediate;
1509 if (force_inquiry_hack) 1585 SBP2_DEBUG("sbp2_firmware_revision = %x",
1510 SBP2_INFO("sbp2_firmware_revision = %x", 1586 (unsigned int)firmware_revision);
1511 (unsigned int)firmware_revision);
1512 else
1513 SBP2_DEBUG("sbp2_firmware_revision = %x",
1514 (unsigned int)firmware_revision);
1515 break; 1587 break;
1516 1588
1517 default: 1589 default:
@@ -1519,41 +1591,44 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
1519 } 1591 }
1520 } 1592 }
1521 1593
1522 /* This is the start of our broken device checking. We try to hack 1594 workarounds = sbp2_default_workarounds;
1523 * around oddities and known defects. */ 1595 if (force_inquiry_hack) {
1524 workarounds = 0x0; 1596 SBP2_WARN("force_inquiry_hack is deprecated. "
1597 "Use parameter 'workarounds' instead.");
1598 workarounds |= SBP2_WORKAROUND_INQUIRY_36;
1599 }
1525 1600
1526 /* If the vendor id is 0xa0b8 (Symbios vendor id), then we have a 1601 if (!(workarounds & SBP2_WORKAROUND_OVERRIDE))
1527 * bridge with 128KB max transfer size limitation. For sanity, we 1602 for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
1528 * only voice this when the current max_sectors setting 1603 if (sbp2_workarounds_table[i].firmware_revision &&
1529 * exceeds the 128k limit. By default, that is not the case. 1604 sbp2_workarounds_table[i].firmware_revision !=
1530 * 1605 (firmware_revision & 0xffff00))
1531 * It would be really nice if we could detect this before the scsi 1606 continue;
1532 * host gets initialized. That way we can down-force the 1607 if (sbp2_workarounds_table[i].model_id &&
1533 * max_sectors to account for it. That is not currently 1608 sbp2_workarounds_table[i].model_id != ud->model_id)
1534 * possible. */ 1609 continue;
1535 if ((firmware_revision & 0xffff00) == 1610 workarounds |= sbp2_workarounds_table[i].workarounds;
1536 SBP2_128KB_BROKEN_FIRMWARE && 1611 break;
1537 (max_sectors * 512) > (128*1024)) {
1538 SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB max transfer size.",
1539 NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid));
1540 SBP2_WARN("WARNING: Current max_sectors setting is larger than 128KB (%d sectors)!",
1541 max_sectors);
1542 workarounds |= SBP2_BREAKAGE_128K_MAX_TRANSFER;
1543 }
1544
1545 /* Check for a blacklisted set of devices that require us to force
1546 * a 36 byte host inquiry. This can be overriden as a module param
1547 * (to force all hosts). */
1548 for (i = 0; i < ARRAY_SIZE(sbp2_broken_inquiry_list); i++) {
1549 if ((firmware_revision & 0xffff00) ==
1550 sbp2_broken_inquiry_list[i]) {
1551 SBP2_WARN("Node " NODE_BUS_FMT ": Using 36byte inquiry workaround",
1552 NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid));
1553 workarounds |= SBP2_BREAKAGE_INQUIRY_HACK;
1554 break; /* No need to continue. */
1555 } 1612 }
1556 } 1613
1614 if (workarounds)
1615 SBP2_INFO("Workarounds for node " NODE_BUS_FMT ": 0x%x "
1616 "(firmware_revision 0x%06x, vendor_id 0x%06x,"
1617 " model_id 0x%06x)",
1618 NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
1619 workarounds, firmware_revision,
1620 ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id,
1621 ud->model_id);
1622
1623 /* We would need one SCSI host template for each target to adjust
1624 * max_sectors on the fly, therefore warn only. */
1625 if (workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
1626 (max_sectors * 512) > (128 * 1024))
1627 SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB "
1628 "max transfer size. WARNING: Current max_sectors "
1629 "setting is larger than 128KB (%d sectors)",
1630 NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
1631 max_sectors);
1557 1632
1558 /* If this is a logical unit directory entry, process the parent 1633 /* If this is a logical unit directory entry, process the parent
1559 * to get the values. */ 1634 * to get the values. */
@@ -2447,19 +2522,25 @@ static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
2447 2522
2448 scsi_id->sdev = sdev; 2523 scsi_id->sdev = sdev;
2449 2524
2450 if (force_inquiry_hack || 2525 if (scsi_id->workarounds & SBP2_WORKAROUND_INQUIRY_36)
2451 scsi_id->workarounds & SBP2_BREAKAGE_INQUIRY_HACK) {
2452 sdev->inquiry_len = 36; 2526 sdev->inquiry_len = 36;
2453 sdev->skip_ms_page_8 = 1;
2454 }
2455 return 0; 2527 return 0;
2456} 2528}
2457 2529
2458static int sbp2scsi_slave_configure(struct scsi_device *sdev) 2530static int sbp2scsi_slave_configure(struct scsi_device *sdev)
2459{ 2531{
2532 struct scsi_id_instance_data *scsi_id =
2533 (struct scsi_id_instance_data *)sdev->host->hostdata[0];
2534
2460 blk_queue_dma_alignment(sdev->request_queue, (512 - 1)); 2535 blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
2461 sdev->use_10_for_rw = 1; 2536 sdev->use_10_for_rw = 1;
2462 sdev->use_10_for_ms = 1; 2537 sdev->use_10_for_ms = 1;
2538
2539 if (sdev->type == TYPE_DISK &&
2540 scsi_id->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
2541 sdev->skip_ms_page_8 = 1;
2542 if (scsi_id->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
2543 sdev->fix_capacity = 1;
2463 return 0; 2544 return 0;
2464} 2545}
2465 2546
@@ -2603,7 +2684,9 @@ static int sbp2_module_init(void)
2603 scsi_driver_template.cmd_per_lun = 1; 2684 scsi_driver_template.cmd_per_lun = 1;
2604 } 2685 }
2605 2686
2606 /* Set max sectors (module load option). Default is 255 sectors. */ 2687 if (sbp2_default_workarounds & SBP2_WORKAROUND_128K_MAX_TRANS &&
2688 (max_sectors * 512) > (128 * 1024))
2689 max_sectors = 128 * 1024 / 512;
2607 scsi_driver_template.max_sectors = max_sectors; 2690 scsi_driver_template.max_sectors = max_sectors;
2608 2691
2609 /* Register our high level driver with 1394 stack */ 2692 /* Register our high level driver with 1394 stack */
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
index e2d357a9ea3a..f4ccc9d0fba4 100644
--- a/drivers/ieee1394/sbp2.h
+++ b/drivers/ieee1394/sbp2.h
@@ -227,11 +227,6 @@ struct sbp2_status_block {
227#define SBP2_SW_VERSION_ENTRY 0x00010483 227#define SBP2_SW_VERSION_ENTRY 0x00010483
228 228
229/* 229/*
230 * Other misc defines
231 */
232#define SBP2_128KB_BROKEN_FIRMWARE 0xa0b800
233
234/*
235 * SCSI specific stuff 230 * SCSI specific stuff
236 */ 231 */
237 232
@@ -239,6 +234,13 @@ struct sbp2_status_block {
239#define SBP2_MAX_SECTORS 255 /* Max sectors supported */ 234#define SBP2_MAX_SECTORS 255 /* Max sectors supported */
240#define SBP2_MAX_CMDS 8 /* This should be safe */ 235#define SBP2_MAX_CMDS 8 /* This should be safe */
241 236
237/* Flags for detected oddities and brokeness */
238#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
239#define SBP2_WORKAROUND_INQUIRY_36 0x2
240#define SBP2_WORKAROUND_MODE_SENSE_8 0x4
241#define SBP2_WORKAROUND_FIX_CAPACITY 0x8
242#define SBP2_WORKAROUND_OVERRIDE 0x100
243
242/* This is the two dma types we use for cmd_dma below */ 244/* This is the two dma types we use for cmd_dma below */
243enum cmd_dma_types { 245enum cmd_dma_types {
244 CMD_DMA_NONE, 246 CMD_DMA_NONE,
@@ -268,10 +270,6 @@ struct sbp2_command_info {
268 270
269}; 271};
270 272
271/* A list of flags for detected oddities and brokeness. */
272#define SBP2_BREAKAGE_128K_MAX_TRANSFER 0x1
273#define SBP2_BREAKAGE_INQUIRY_HACK 0x2
274
275struct sbp2scsi_host_info; 273struct sbp2scsi_host_info;
276 274
277/* 275/*
@@ -345,7 +343,7 @@ struct scsi_id_instance_data {
345 struct Scsi_Host *scsi_host; 343 struct Scsi_Host *scsi_host;
346 344
347 /* Device specific workarounds/brokeness */ 345 /* Device specific workarounds/brokeness */
348 u32 workarounds; 346 unsigned workarounds;
349}; 347};
350 348
351/* Sbp2 host data structure (one per IEEE1394 host) */ 349/* Sbp2 host data structure (one per IEEE1394 host) */
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 7cfedb8d9bcd..86fee43502cd 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -34,6 +34,8 @@
34 * 34 *
35 * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $ 35 * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $
36 */ 36 */
37
38#include <linux/completion.h>
37#include <linux/dma-mapping.h> 39#include <linux/dma-mapping.h>
38#include <linux/err.h> 40#include <linux/err.h>
39#include <linux/idr.h> 41#include <linux/idr.h>
@@ -122,7 +124,7 @@ struct cm_id_private {
122 struct rb_node service_node; 124 struct rb_node service_node;
123 struct rb_node sidr_id_node; 125 struct rb_node sidr_id_node;
124 spinlock_t lock; /* Do not acquire inside cm.lock */ 126 spinlock_t lock; /* Do not acquire inside cm.lock */
125 wait_queue_head_t wait; 127 struct completion comp;
126 atomic_t refcount; 128 atomic_t refcount;
127 129
128 struct ib_mad_send_buf *msg; 130 struct ib_mad_send_buf *msg;
@@ -159,7 +161,7 @@ static void cm_work_handler(void *data);
159static inline void cm_deref_id(struct cm_id_private *cm_id_priv) 161static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
160{ 162{
161 if (atomic_dec_and_test(&cm_id_priv->refcount)) 163 if (atomic_dec_and_test(&cm_id_priv->refcount))
162 wake_up(&cm_id_priv->wait); 164 complete(&cm_id_priv->comp);
163} 165}
164 166
165static int cm_alloc_msg(struct cm_id_private *cm_id_priv, 167static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
@@ -559,7 +561,7 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
559 goto error; 561 goto error;
560 562
561 spin_lock_init(&cm_id_priv->lock); 563 spin_lock_init(&cm_id_priv->lock);
562 init_waitqueue_head(&cm_id_priv->wait); 564 init_completion(&cm_id_priv->comp);
563 INIT_LIST_HEAD(&cm_id_priv->work_list); 565 INIT_LIST_HEAD(&cm_id_priv->work_list);
564 atomic_set(&cm_id_priv->work_count, -1); 566 atomic_set(&cm_id_priv->work_count, -1);
565 atomic_set(&cm_id_priv->refcount, 1); 567 atomic_set(&cm_id_priv->refcount, 1);
@@ -724,8 +726,8 @@ retest:
724 } 726 }
725 727
726 cm_free_id(cm_id->local_id); 728 cm_free_id(cm_id->local_id);
727 atomic_dec(&cm_id_priv->refcount); 729 cm_deref_id(cm_id_priv);
728 wait_event(cm_id_priv->wait, !atomic_read(&cm_id_priv->refcount)); 730 wait_for_completion(&cm_id_priv->comp);
729 while ((work = cm_dequeue_work(cm_id_priv)) != NULL) 731 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
730 cm_free_work(work); 732 cm_free_work(work);
731 if (cm_id_priv->private_data && cm_id_priv->private_data_len) 733 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 469b6923a2e2..5ad41a64314c 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -352,7 +352,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
352 INIT_WORK(&mad_agent_priv->local_work, local_completions, 352 INIT_WORK(&mad_agent_priv->local_work, local_completions,
353 mad_agent_priv); 353 mad_agent_priv);
354 atomic_set(&mad_agent_priv->refcount, 1); 354 atomic_set(&mad_agent_priv->refcount, 1);
355 init_waitqueue_head(&mad_agent_priv->wait); 355 init_completion(&mad_agent_priv->comp);
356 356
357 return &mad_agent_priv->agent; 357 return &mad_agent_priv->agent;
358 358
@@ -467,7 +467,7 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
467 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; 467 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
468 mad_snoop_priv->agent.port_num = port_num; 468 mad_snoop_priv->agent.port_num = port_num;
469 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; 469 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
470 init_waitqueue_head(&mad_snoop_priv->wait); 470 init_completion(&mad_snoop_priv->comp);
471 mad_snoop_priv->snoop_index = register_snoop_agent( 471 mad_snoop_priv->snoop_index = register_snoop_agent(
472 &port_priv->qp_info[qpn], 472 &port_priv->qp_info[qpn],
473 mad_snoop_priv); 473 mad_snoop_priv);
@@ -486,6 +486,18 @@ error1:
486} 486}
487EXPORT_SYMBOL(ib_register_mad_snoop); 487EXPORT_SYMBOL(ib_register_mad_snoop);
488 488
489static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
490{
491 if (atomic_dec_and_test(&mad_agent_priv->refcount))
492 complete(&mad_agent_priv->comp);
493}
494
495static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
496{
497 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
498 complete(&mad_snoop_priv->comp);
499}
500
489static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) 501static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
490{ 502{
491 struct ib_mad_port_private *port_priv; 503 struct ib_mad_port_private *port_priv;
@@ -509,9 +521,8 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
509 flush_workqueue(port_priv->wq); 521 flush_workqueue(port_priv->wq);
510 ib_cancel_rmpp_recvs(mad_agent_priv); 522 ib_cancel_rmpp_recvs(mad_agent_priv);
511 523
512 atomic_dec(&mad_agent_priv->refcount); 524 deref_mad_agent(mad_agent_priv);
513 wait_event(mad_agent_priv->wait, 525 wait_for_completion(&mad_agent_priv->comp);
514 !atomic_read(&mad_agent_priv->refcount));
515 526
516 kfree(mad_agent_priv->reg_req); 527 kfree(mad_agent_priv->reg_req);
517 ib_dereg_mr(mad_agent_priv->agent.mr); 528 ib_dereg_mr(mad_agent_priv->agent.mr);
@@ -529,9 +540,8 @@ static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
529 atomic_dec(&qp_info->snoop_count); 540 atomic_dec(&qp_info->snoop_count);
530 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 541 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
531 542
532 atomic_dec(&mad_snoop_priv->refcount); 543 deref_snoop_agent(mad_snoop_priv);
533 wait_event(mad_snoop_priv->wait, 544 wait_for_completion(&mad_snoop_priv->comp);
534 !atomic_read(&mad_snoop_priv->refcount));
535 545
536 kfree(mad_snoop_priv); 546 kfree(mad_snoop_priv);
537} 547}
@@ -600,8 +610,7 @@ static void snoop_send(struct ib_mad_qp_info *qp_info,
600 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 610 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
601 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, 611 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
602 send_buf, mad_send_wc); 612 send_buf, mad_send_wc);
603 if (atomic_dec_and_test(&mad_snoop_priv->refcount)) 613 deref_snoop_agent(mad_snoop_priv);
604 wake_up(&mad_snoop_priv->wait);
605 spin_lock_irqsave(&qp_info->snoop_lock, flags); 614 spin_lock_irqsave(&qp_info->snoop_lock, flags);
606 } 615 }
607 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 616 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
@@ -626,8 +635,7 @@ static void snoop_recv(struct ib_mad_qp_info *qp_info,
626 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 635 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
627 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, 636 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent,
628 mad_recv_wc); 637 mad_recv_wc);
629 if (atomic_dec_and_test(&mad_snoop_priv->refcount)) 638 deref_snoop_agent(mad_snoop_priv);
630 wake_up(&mad_snoop_priv->wait);
631 spin_lock_irqsave(&qp_info->snoop_lock, flags); 639 spin_lock_irqsave(&qp_info->snoop_lock, flags);
632 } 640 }
633 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 641 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
@@ -968,8 +976,7 @@ void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
968 976
969 free_send_rmpp_list(mad_send_wr); 977 free_send_rmpp_list(mad_send_wr);
970 kfree(send_buf->mad); 978 kfree(send_buf->mad);
971 if (atomic_dec_and_test(&mad_agent_priv->refcount)) 979 deref_mad_agent(mad_agent_priv);
972 wake_up(&mad_agent_priv->wait);
973} 980}
974EXPORT_SYMBOL(ib_free_send_mad); 981EXPORT_SYMBOL(ib_free_send_mad);
975 982
@@ -1757,8 +1764,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1757 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, 1764 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1758 mad_recv_wc); 1765 mad_recv_wc);
1759 if (!mad_recv_wc) { 1766 if (!mad_recv_wc) {
1760 if (atomic_dec_and_test(&mad_agent_priv->refcount)) 1767 deref_mad_agent(mad_agent_priv);
1761 wake_up(&mad_agent_priv->wait);
1762 return; 1768 return;
1763 } 1769 }
1764 } 1770 }
@@ -1770,8 +1776,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1770 if (!mad_send_wr) { 1776 if (!mad_send_wr) {
1771 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1777 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1772 ib_free_recv_mad(mad_recv_wc); 1778 ib_free_recv_mad(mad_recv_wc);
1773 if (atomic_dec_and_test(&mad_agent_priv->refcount)) 1779 deref_mad_agent(mad_agent_priv);
1774 wake_up(&mad_agent_priv->wait);
1775 return; 1780 return;
1776 } 1781 }
1777 ib_mark_mad_done(mad_send_wr); 1782 ib_mark_mad_done(mad_send_wr);
@@ -1790,8 +1795,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1790 } else { 1795 } else {
1791 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, 1796 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1792 mad_recv_wc); 1797 mad_recv_wc);
1793 if (atomic_dec_and_test(&mad_agent_priv->refcount)) 1798 deref_mad_agent(mad_agent_priv);
1794 wake_up(&mad_agent_priv->wait);
1795 } 1799 }
1796} 1800}
1797 1801
@@ -2021,8 +2025,7 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2021 mad_send_wc); 2025 mad_send_wc);
2022 2026
2023 /* Release reference on agent taken when sending */ 2027 /* Release reference on agent taken when sending */
2024 if (atomic_dec_and_test(&mad_agent_priv->refcount)) 2028 deref_mad_agent(mad_agent_priv);
2025 wake_up(&mad_agent_priv->wait);
2026 return; 2029 return;
2027done: 2030done:
2028 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2031 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 6c9c133d71ef..b4fa28d3160f 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -37,6 +37,7 @@
37#ifndef __IB_MAD_PRIV_H__ 37#ifndef __IB_MAD_PRIV_H__
38#define __IB_MAD_PRIV_H__ 38#define __IB_MAD_PRIV_H__
39 39
40#include <linux/completion.h>
40#include <linux/pci.h> 41#include <linux/pci.h>
41#include <linux/kthread.h> 42#include <linux/kthread.h>
42#include <linux/workqueue.h> 43#include <linux/workqueue.h>
@@ -108,7 +109,7 @@ struct ib_mad_agent_private {
108 struct list_head rmpp_list; 109 struct list_head rmpp_list;
109 110
110 atomic_t refcount; 111 atomic_t refcount;
111 wait_queue_head_t wait; 112 struct completion comp;
112}; 113};
113 114
114struct ib_mad_snoop_private { 115struct ib_mad_snoop_private {
@@ -117,7 +118,7 @@ struct ib_mad_snoop_private {
117 int snoop_index; 118 int snoop_index;
118 int mad_snoop_flags; 119 int mad_snoop_flags;
119 atomic_t refcount; 120 atomic_t refcount;
120 wait_queue_head_t wait; 121 struct completion comp;
121}; 122};
122 123
123struct ib_mad_send_wr_private { 124struct ib_mad_send_wr_private {
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index dfd4e588ce03..d4704e054e30 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -49,7 +49,7 @@ struct mad_rmpp_recv {
49 struct list_head list; 49 struct list_head list;
50 struct work_struct timeout_work; 50 struct work_struct timeout_work;
51 struct work_struct cleanup_work; 51 struct work_struct cleanup_work;
52 wait_queue_head_t wait; 52 struct completion comp;
53 enum rmpp_state state; 53 enum rmpp_state state;
54 spinlock_t lock; 54 spinlock_t lock;
55 atomic_t refcount; 55 atomic_t refcount;
@@ -69,10 +69,16 @@ struct mad_rmpp_recv {
69 u8 method; 69 u8 method;
70}; 70};
71 71
72static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
73{
74 if (atomic_dec_and_test(&rmpp_recv->refcount))
75 complete(&rmpp_recv->comp);
76}
77
72static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) 78static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
73{ 79{
74 atomic_dec(&rmpp_recv->refcount); 80 deref_rmpp_recv(rmpp_recv);
75 wait_event(rmpp_recv->wait, !atomic_read(&rmpp_recv->refcount)); 81 wait_for_completion(&rmpp_recv->comp);
76 ib_destroy_ah(rmpp_recv->ah); 82 ib_destroy_ah(rmpp_recv->ah);
77 kfree(rmpp_recv); 83 kfree(rmpp_recv);
78} 84}
@@ -253,7 +259,7 @@ create_rmpp_recv(struct ib_mad_agent_private *agent,
253 goto error; 259 goto error;
254 260
255 rmpp_recv->agent = agent; 261 rmpp_recv->agent = agent;
256 init_waitqueue_head(&rmpp_recv->wait); 262 init_completion(&rmpp_recv->comp);
257 INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv); 263 INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv);
258 INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv); 264 INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv);
259 spin_lock_init(&rmpp_recv->lock); 265 spin_lock_init(&rmpp_recv->lock);
@@ -279,12 +285,6 @@ error: kfree(rmpp_recv);
279 return NULL; 285 return NULL;
280} 286}
281 287
282static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
283{
284 if (atomic_dec_and_test(&rmpp_recv->refcount))
285 wake_up(&rmpp_recv->wait);
286}
287
288static struct mad_rmpp_recv * 288static struct mad_rmpp_recv *
289find_rmpp_recv(struct ib_mad_agent_private *agent, 289find_rmpp_recv(struct ib_mad_agent_private *agent,
290 struct ib_mad_recv_wc *mad_recv_wc) 290 struct ib_mad_recv_wc *mad_recv_wc)
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 15121cb5a1f6..21f9282c1b25 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -336,7 +336,7 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
336 switch (width) { 336 switch (width) {
337 case 4: 337 case 4:
338 ret = sprintf(buf, "%u\n", (out_mad->data[40 + offset / 8] >> 338 ret = sprintf(buf, "%u\n", (out_mad->data[40 + offset / 8] >>
339 (offset % 4)) & 0xf); 339 (4 - (offset % 8))) & 0xf);
340 break; 340 break;
341 case 8: 341 case 8:
342 ret = sprintf(buf, "%u\n", out_mad->data[40 + offset / 8]); 342 ret = sprintf(buf, "%u\n", out_mad->data[40 + offset / 8]);
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index f6a05965a4e8..9164a09b6ccd 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -32,6 +32,8 @@
32 * 32 *
33 * $Id: ucm.c 2594 2005-06-13 19:46:02Z libor $ 33 * $Id: ucm.c 2594 2005-06-13 19:46:02Z libor $
34 */ 34 */
35
36#include <linux/completion.h>
35#include <linux/init.h> 37#include <linux/init.h>
36#include <linux/fs.h> 38#include <linux/fs.h>
37#include <linux/module.h> 39#include <linux/module.h>
@@ -72,7 +74,7 @@ struct ib_ucm_file {
72 74
73struct ib_ucm_context { 75struct ib_ucm_context {
74 int id; 76 int id;
75 wait_queue_head_t wait; 77 struct completion comp;
76 atomic_t ref; 78 atomic_t ref;
77 int events_reported; 79 int events_reported;
78 80
@@ -138,7 +140,7 @@ static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id)
138static void ib_ucm_ctx_put(struct ib_ucm_context *ctx) 140static void ib_ucm_ctx_put(struct ib_ucm_context *ctx)
139{ 141{
140 if (atomic_dec_and_test(&ctx->ref)) 142 if (atomic_dec_and_test(&ctx->ref))
141 wake_up(&ctx->wait); 143 complete(&ctx->comp);
142} 144}
143 145
144static inline int ib_ucm_new_cm_id(int event) 146static inline int ib_ucm_new_cm_id(int event)
@@ -178,7 +180,7 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
178 return NULL; 180 return NULL;
179 181
180 atomic_set(&ctx->ref, 1); 182 atomic_set(&ctx->ref, 1);
181 init_waitqueue_head(&ctx->wait); 183 init_completion(&ctx->comp);
182 ctx->file = file; 184 ctx->file = file;
183 INIT_LIST_HEAD(&ctx->events); 185 INIT_LIST_HEAD(&ctx->events);
184 186
@@ -586,8 +588,8 @@ static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file,
586 if (IS_ERR(ctx)) 588 if (IS_ERR(ctx))
587 return PTR_ERR(ctx); 589 return PTR_ERR(ctx);
588 590
589 atomic_dec(&ctx->ref); 591 ib_ucm_ctx_put(ctx);
590 wait_event(ctx->wait, !atomic_read(&ctx->ref)); 592 wait_for_completion(&ctx->comp);
591 593
592 /* No new events will be generated after destroying the cm_id. */ 594 /* No new events will be generated after destroying the cm_id. */
593 ib_destroy_cm_id(ctx->cm_id); 595 ib_destroy_cm_id(ctx->cm_id);
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 398add4d4cb1..3697edafd6d2 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -116,10 +116,9 @@ static int __devinit ipath_init_one(struct pci_dev *,
116#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10 116#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
117 117
118static const struct pci_device_id ipath_pci_tbl[] = { 118static const struct pci_device_id ipath_pci_tbl[] = {
119 {PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, 119 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
120 PCI_DEVICE_ID_INFINIPATH_HT)}, 120 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) },
121 {PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, 121 { 0, }
122 PCI_DEVICE_ID_INFINIPATH_PE800)},
123}; 122};
124 123
125MODULE_DEVICE_TABLE(pci, ipath_pci_tbl); 124MODULE_DEVICE_TABLE(pci, ipath_pci_tbl);
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 312cf90731ea..205854e9c662 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -238,9 +238,9 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
238 spin_lock(&dev->cq_table.lock); 238 spin_lock(&dev->cq_table.lock);
239 239
240 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); 240 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
241
242 if (cq) 241 if (cq)
243 atomic_inc(&cq->refcount); 242 ++cq->refcount;
243
244 spin_unlock(&dev->cq_table.lock); 244 spin_unlock(&dev->cq_table.lock);
245 245
246 if (!cq) { 246 if (!cq) {
@@ -254,8 +254,10 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
254 if (cq->ibcq.event_handler) 254 if (cq->ibcq.event_handler)
255 cq->ibcq.event_handler(&event, cq->ibcq.cq_context); 255 cq->ibcq.event_handler(&event, cq->ibcq.cq_context);
256 256
257 if (atomic_dec_and_test(&cq->refcount)) 257 spin_lock(&dev->cq_table.lock);
258 if (!--cq->refcount)
258 wake_up(&cq->wait); 259 wake_up(&cq->wait);
260 spin_unlock(&dev->cq_table.lock);
259} 261}
260 262
261static inline int is_recv_cqe(struct mthca_cqe *cqe) 263static inline int is_recv_cqe(struct mthca_cqe *cqe)
@@ -267,23 +269,13 @@ static inline int is_recv_cqe(struct mthca_cqe *cqe)
267 return !(cqe->is_send & 0x80); 269 return !(cqe->is_send & 0x80);
268} 270}
269 271
270void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, 272void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
271 struct mthca_srq *srq) 273 struct mthca_srq *srq)
272{ 274{
273 struct mthca_cq *cq;
274 struct mthca_cqe *cqe; 275 struct mthca_cqe *cqe;
275 u32 prod_index; 276 u32 prod_index;
276 int nfreed = 0; 277 int nfreed = 0;
277 278
278 spin_lock_irq(&dev->cq_table.lock);
279 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
280 if (cq)
281 atomic_inc(&cq->refcount);
282 spin_unlock_irq(&dev->cq_table.lock);
283
284 if (!cq)
285 return;
286
287 spin_lock_irq(&cq->lock); 279 spin_lock_irq(&cq->lock);
288 280
289 /* 281 /*
@@ -301,7 +293,7 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
301 293
302 if (0) 294 if (0)
303 mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n", 295 mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n",
304 qpn, cqn, cq->cons_index, prod_index); 296 qpn, cq->cqn, cq->cons_index, prod_index);
305 297
306 /* 298 /*
307 * Now sweep backwards through the CQ, removing CQ entries 299 * Now sweep backwards through the CQ, removing CQ entries
@@ -325,8 +317,6 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
325 } 317 }
326 318
327 spin_unlock_irq(&cq->lock); 319 spin_unlock_irq(&cq->lock);
328 if (atomic_dec_and_test(&cq->refcount))
329 wake_up(&cq->wait);
330} 320}
331 321
332void mthca_cq_resize_copy_cqes(struct mthca_cq *cq) 322void mthca_cq_resize_copy_cqes(struct mthca_cq *cq)
@@ -821,7 +811,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
821 } 811 }
822 812
823 spin_lock_init(&cq->lock); 813 spin_lock_init(&cq->lock);
824 atomic_set(&cq->refcount, 1); 814 cq->refcount = 1;
825 init_waitqueue_head(&cq->wait); 815 init_waitqueue_head(&cq->wait);
826 816
827 memset(cq_context, 0, sizeof *cq_context); 817 memset(cq_context, 0, sizeof *cq_context);
@@ -896,6 +886,17 @@ err_out:
896 return err; 886 return err;
897} 887}
898 888
889static inline int get_cq_refcount(struct mthca_dev *dev, struct mthca_cq *cq)
890{
891 int c;
892
893 spin_lock_irq(&dev->cq_table.lock);
894 c = cq->refcount;
895 spin_unlock_irq(&dev->cq_table.lock);
896
897 return c;
898}
899
899void mthca_free_cq(struct mthca_dev *dev, 900void mthca_free_cq(struct mthca_dev *dev,
900 struct mthca_cq *cq) 901 struct mthca_cq *cq)
901{ 902{
@@ -929,6 +930,7 @@ void mthca_free_cq(struct mthca_dev *dev,
929 spin_lock_irq(&dev->cq_table.lock); 930 spin_lock_irq(&dev->cq_table.lock);
930 mthca_array_clear(&dev->cq_table.cq, 931 mthca_array_clear(&dev->cq_table.cq,
931 cq->cqn & (dev->limits.num_cqs - 1)); 932 cq->cqn & (dev->limits.num_cqs - 1));
933 --cq->refcount;
932 spin_unlock_irq(&dev->cq_table.lock); 934 spin_unlock_irq(&dev->cq_table.lock);
933 935
934 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) 936 if (dev->mthca_flags & MTHCA_FLAG_MSI_X)
@@ -936,8 +938,7 @@ void mthca_free_cq(struct mthca_dev *dev,
936 else 938 else
937 synchronize_irq(dev->pdev->irq); 939 synchronize_irq(dev->pdev->irq);
938 940
939 atomic_dec(&cq->refcount); 941 wait_event(cq->wait, !get_cq_refcount(dev, cq));
940 wait_event(cq->wait, !atomic_read(&cq->refcount));
941 942
942 if (cq->is_kernel) { 943 if (cq->is_kernel) {
943 mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); 944 mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 4c1dcb4c1822..f8160b8de090 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -496,7 +496,7 @@ void mthca_free_cq(struct mthca_dev *dev,
496void mthca_cq_completion(struct mthca_dev *dev, u32 cqn); 496void mthca_cq_completion(struct mthca_dev *dev, u32 cqn);
497void mthca_cq_event(struct mthca_dev *dev, u32 cqn, 497void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
498 enum ib_event_type event_type); 498 enum ib_event_type event_type);
499void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, 499void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
500 struct mthca_srq *srq); 500 struct mthca_srq *srq);
501void mthca_cq_resize_copy_cqes(struct mthca_cq *cq); 501void mthca_cq_resize_copy_cqes(struct mthca_cq *cq);
502int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent); 502int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent);
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 25e1c1db9a40..a486dec1707e 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -761,6 +761,7 @@ void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
761 761
762int __devinit mthca_init_mr_table(struct mthca_dev *dev) 762int __devinit mthca_init_mr_table(struct mthca_dev *dev)
763{ 763{
764 unsigned long addr;
764 int err, i; 765 int err, i;
765 766
766 err = mthca_alloc_init(&dev->mr_table.mpt_alloc, 767 err = mthca_alloc_init(&dev->mr_table.mpt_alloc,
@@ -796,9 +797,12 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev)
796 goto err_fmr_mpt; 797 goto err_fmr_mpt;
797 } 798 }
798 799
800 addr = pci_resource_start(dev->pdev, 4) +
801 ((pci_resource_len(dev->pdev, 4) - 1) &
802 dev->mr_table.mpt_base);
803
799 dev->mr_table.tavor_fmr.mpt_base = 804 dev->mr_table.tavor_fmr.mpt_base =
800 ioremap(dev->mr_table.mpt_base, 805 ioremap(addr, (1 << i) * sizeof(struct mthca_mpt_entry));
801 (1 << i) * sizeof (struct mthca_mpt_entry));
802 806
803 if (!dev->mr_table.tavor_fmr.mpt_base) { 807 if (!dev->mr_table.tavor_fmr.mpt_base) {
804 mthca_warn(dev, "MPT ioremap for FMR failed.\n"); 808 mthca_warn(dev, "MPT ioremap for FMR failed.\n");
@@ -806,9 +810,12 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev)
806 goto err_fmr_mpt; 810 goto err_fmr_mpt;
807 } 811 }
808 812
813 addr = pci_resource_start(dev->pdev, 4) +
814 ((pci_resource_len(dev->pdev, 4) - 1) &
815 dev->mr_table.mtt_base);
816
809 dev->mr_table.tavor_fmr.mtt_base = 817 dev->mr_table.tavor_fmr.mtt_base =
810 ioremap(dev->mr_table.mtt_base, 818 ioremap(addr, (1 << i) * MTHCA_MTT_SEG_SIZE);
811 (1 << i) * MTHCA_MTT_SEG_SIZE);
812 if (!dev->mr_table.tavor_fmr.mtt_base) { 819 if (!dev->mr_table.tavor_fmr.mtt_base) {
813 mthca_warn(dev, "MTT ioremap for FMR failed.\n"); 820 mthca_warn(dev, "MTT ioremap for FMR failed.\n");
814 err = -ENOMEM; 821 err = -ENOMEM;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 6676a786d690..179a8f610d0f 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -139,11 +139,12 @@ struct mthca_ah {
139 * a qp may be locked, with the send cq locked first. No other 139 * a qp may be locked, with the send cq locked first. No other
140 * nesting should be done. 140 * nesting should be done.
141 * 141 *
142 * Each struct mthca_cq/qp also has an atomic_t ref count. The 142 * Each struct mthca_cq/qp also has an ref count, protected by the
143 * pointer from the cq/qp_table to the struct counts as one reference. 143 * corresponding table lock. The pointer from the cq/qp_table to the
144 * This reference also is good for access through the consumer API, so 144 * struct counts as one reference. This reference also is good for
145 * modifying the CQ/QP etc doesn't need to take another reference. 145 * access through the consumer API, so modifying the CQ/QP etc doesn't
146 * Access because of a completion being polled does need a reference. 146 * need to take another reference. Access to a QP because of a
147 * completion being polled does not need a reference either.
147 * 148 *
148 * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the 149 * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the
149 * destroy function to sleep on. 150 * destroy function to sleep on.
@@ -159,8 +160,9 @@ struct mthca_ah {
159 * - decrement ref count; if zero, wake up waiters 160 * - decrement ref count; if zero, wake up waiters
160 * 161 *
161 * To destroy a CQ/QP, we can do the following: 162 * To destroy a CQ/QP, we can do the following:
162 * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock 163 * - lock cq/qp_table
163 * - decrement ref count 164 * - remove pointer and decrement ref count
165 * - unlock cq/qp_table lock
164 * - wait_event until ref count is zero 166 * - wait_event until ref count is zero
165 * 167 *
166 * It is the consumer's responsibilty to make sure that no QP 168 * It is the consumer's responsibilty to make sure that no QP
@@ -197,7 +199,7 @@ struct mthca_cq_resize {
197struct mthca_cq { 199struct mthca_cq {
198 struct ib_cq ibcq; 200 struct ib_cq ibcq;
199 spinlock_t lock; 201 spinlock_t lock;
200 atomic_t refcount; 202 int refcount;
201 int cqn; 203 int cqn;
202 u32 cons_index; 204 u32 cons_index;
203 struct mthca_cq_buf buf; 205 struct mthca_cq_buf buf;
@@ -217,7 +219,7 @@ struct mthca_cq {
217struct mthca_srq { 219struct mthca_srq {
218 struct ib_srq ibsrq; 220 struct ib_srq ibsrq;
219 spinlock_t lock; 221 spinlock_t lock;
220 atomic_t refcount; 222 int refcount;
221 int srqn; 223 int srqn;
222 int max; 224 int max;
223 int max_gs; 225 int max_gs;
@@ -254,7 +256,7 @@ struct mthca_wq {
254 256
255struct mthca_qp { 257struct mthca_qp {
256 struct ib_qp ibqp; 258 struct ib_qp ibqp;
257 atomic_t refcount; 259 int refcount;
258 u32 qpn; 260 u32 qpn;
259 int is_direct; 261 int is_direct;
260 u8 port; /* for SQP and memfree use only */ 262 u8 port; /* for SQP and memfree use only */
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index f37b0e367323..19765f6f8d58 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -240,7 +240,7 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
240 spin_lock(&dev->qp_table.lock); 240 spin_lock(&dev->qp_table.lock);
241 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); 241 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
242 if (qp) 242 if (qp)
243 atomic_inc(&qp->refcount); 243 ++qp->refcount;
244 spin_unlock(&dev->qp_table.lock); 244 spin_unlock(&dev->qp_table.lock);
245 245
246 if (!qp) { 246 if (!qp) {
@@ -257,8 +257,10 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
257 if (qp->ibqp.event_handler) 257 if (qp->ibqp.event_handler)
258 qp->ibqp.event_handler(&event, qp->ibqp.qp_context); 258 qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
259 259
260 if (atomic_dec_and_test(&qp->refcount)) 260 spin_lock(&dev->qp_table.lock);
261 if (!--qp->refcount)
261 wake_up(&qp->wait); 262 wake_up(&qp->wait);
263 spin_unlock(&dev->qp_table.lock);
262} 264}
263 265
264static int to_mthca_state(enum ib_qp_state ib_state) 266static int to_mthca_state(enum ib_qp_state ib_state)
@@ -833,10 +835,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
833 * entries and reinitialize the QP. 835 * entries and reinitialize the QP.
834 */ 836 */
835 if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { 837 if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
836 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, 838 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,
837 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 839 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
838 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 840 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
839 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, 841 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
840 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 842 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
841 843
842 mthca_wq_init(&qp->sq); 844 mthca_wq_init(&qp->sq);
@@ -1096,7 +1098,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
1096 int ret; 1098 int ret;
1097 int i; 1099 int i;
1098 1100
1099 atomic_set(&qp->refcount, 1); 1101 qp->refcount = 1;
1100 init_waitqueue_head(&qp->wait); 1102 init_waitqueue_head(&qp->wait);
1101 qp->state = IB_QPS_RESET; 1103 qp->state = IB_QPS_RESET;
1102 qp->atomic_rd_en = 0; 1104 qp->atomic_rd_en = 0;
@@ -1318,6 +1320,17 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
1318 return err; 1320 return err;
1319} 1321}
1320 1322
1323static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp)
1324{
1325 int c;
1326
1327 spin_lock_irq(&dev->qp_table.lock);
1328 c = qp->refcount;
1329 spin_unlock_irq(&dev->qp_table.lock);
1330
1331 return c;
1332}
1333
1321void mthca_free_qp(struct mthca_dev *dev, 1334void mthca_free_qp(struct mthca_dev *dev,
1322 struct mthca_qp *qp) 1335 struct mthca_qp *qp)
1323{ 1336{
@@ -1339,14 +1352,14 @@ void mthca_free_qp(struct mthca_dev *dev,
1339 spin_lock(&dev->qp_table.lock); 1352 spin_lock(&dev->qp_table.lock);
1340 mthca_array_clear(&dev->qp_table.qp, 1353 mthca_array_clear(&dev->qp_table.qp,
1341 qp->qpn & (dev->limits.num_qps - 1)); 1354 qp->qpn & (dev->limits.num_qps - 1));
1355 --qp->refcount;
1342 spin_unlock(&dev->qp_table.lock); 1356 spin_unlock(&dev->qp_table.lock);
1343 1357
1344 if (send_cq != recv_cq) 1358 if (send_cq != recv_cq)
1345 spin_unlock(&recv_cq->lock); 1359 spin_unlock(&recv_cq->lock);
1346 spin_unlock_irq(&send_cq->lock); 1360 spin_unlock_irq(&send_cq->lock);
1347 1361
1348 atomic_dec(&qp->refcount); 1362 wait_event(qp->wait, !get_qp_refcount(dev, qp));
1349 wait_event(qp->wait, !atomic_read(&qp->refcount));
1350 1363
1351 if (qp->state != IB_QPS_RESET) 1364 if (qp->state != IB_QPS_RESET)
1352 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, 1365 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,
@@ -1358,10 +1371,10 @@ void mthca_free_qp(struct mthca_dev *dev,
1358 * unref the mem-free tables and free the QPN in our table. 1371 * unref the mem-free tables and free the QPN in our table.
1359 */ 1372 */
1360 if (!qp->ibqp.uobject) { 1373 if (!qp->ibqp.uobject) {
1361 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, 1374 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,
1362 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 1375 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1363 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 1376 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
1364 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, 1377 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
1365 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 1378 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1366 1379
1367 mthca_free_memfree(dev, qp); 1380 mthca_free_memfree(dev, qp);
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index adcaf85355ae..1ea433291fa7 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -241,7 +241,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
241 goto err_out_mailbox; 241 goto err_out_mailbox;
242 242
243 spin_lock_init(&srq->lock); 243 spin_lock_init(&srq->lock);
244 atomic_set(&srq->refcount, 1); 244 srq->refcount = 1;
245 init_waitqueue_head(&srq->wait); 245 init_waitqueue_head(&srq->wait);
246 246
247 if (mthca_is_memfree(dev)) 247 if (mthca_is_memfree(dev))
@@ -308,6 +308,17 @@ err_out:
308 return err; 308 return err;
309} 309}
310 310
311static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq)
312{
313 int c;
314
315 spin_lock_irq(&dev->srq_table.lock);
316 c = srq->refcount;
317 spin_unlock_irq(&dev->srq_table.lock);
318
319 return c;
320}
321
311void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) 322void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
312{ 323{
313 struct mthca_mailbox *mailbox; 324 struct mthca_mailbox *mailbox;
@@ -329,10 +340,10 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
329 spin_lock_irq(&dev->srq_table.lock); 340 spin_lock_irq(&dev->srq_table.lock);
330 mthca_array_clear(&dev->srq_table.srq, 341 mthca_array_clear(&dev->srq_table.srq,
331 srq->srqn & (dev->limits.num_srqs - 1)); 342 srq->srqn & (dev->limits.num_srqs - 1));
343 --srq->refcount;
332 spin_unlock_irq(&dev->srq_table.lock); 344 spin_unlock_irq(&dev->srq_table.lock);
333 345
334 atomic_dec(&srq->refcount); 346 wait_event(srq->wait, !get_srq_refcount(dev, srq));
335 wait_event(srq->wait, !atomic_read(&srq->refcount));
336 347
337 if (!srq->ibsrq.uobject) { 348 if (!srq->ibsrq.uobject) {
338 mthca_free_srq_buf(dev, srq); 349 mthca_free_srq_buf(dev, srq);
@@ -414,7 +425,7 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
414 spin_lock(&dev->srq_table.lock); 425 spin_lock(&dev->srq_table.lock);
415 srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); 426 srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
416 if (srq) 427 if (srq)
417 atomic_inc(&srq->refcount); 428 ++srq->refcount;
418 spin_unlock(&dev->srq_table.lock); 429 spin_unlock(&dev->srq_table.lock);
419 430
420 if (!srq) { 431 if (!srq) {
@@ -431,8 +442,10 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
431 srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); 442 srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
432 443
433out: 444out:
434 if (atomic_dec_and_test(&srq->refcount)) 445 spin_lock(&dev->srq_table.lock);
446 if (!--srq->refcount)
435 wake_up(&srq->wait); 447 wake_up(&srq->wait);
448 spin_unlock(&dev->srq_table.lock);
436} 449}
437 450
438/* 451/*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 4ca175553f9f..f887780e8093 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -158,10 +158,8 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
158 if (priv->pkey == pkey) { 158 if (priv->pkey == pkey) {
159 unregister_netdev(priv->dev); 159 unregister_netdev(priv->dev);
160 ipoib_dev_cleanup(priv->dev); 160 ipoib_dev_cleanup(priv->dev);
161
162 list_del(&priv->list); 161 list_del(&priv->list);
163 162 free_netdev(priv->dev);
164 kfree(priv);
165 163
166 ret = 0; 164 ret = 0;
167 break; 165 break;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 5bb55742ada6..c32ce4348e1b 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -409,6 +409,34 @@ static int srp_connect_target(struct srp_target_port *target)
409 } 409 }
410} 410}
411 411
412static void srp_unmap_data(struct scsi_cmnd *scmnd,
413 struct srp_target_port *target,
414 struct srp_request *req)
415{
416 struct scatterlist *scat;
417 int nents;
418
419 if (!scmnd->request_buffer ||
420 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
421 scmnd->sc_data_direction != DMA_FROM_DEVICE))
422 return;
423
424 /*
425 * This handling of non-SG commands can be killed when the
426 * SCSI midlayer no longer generates non-SG commands.
427 */
428 if (likely(scmnd->use_sg)) {
429 nents = scmnd->use_sg;
430 scat = scmnd->request_buffer;
431 } else {
432 nents = 1;
433 scat = &req->fake_sg;
434 }
435
436 dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents,
437 scmnd->sc_data_direction);
438}
439
412static int srp_reconnect_target(struct srp_target_port *target) 440static int srp_reconnect_target(struct srp_target_port *target)
413{ 441{
414 struct ib_cm_id *new_cm_id; 442 struct ib_cm_id *new_cm_id;
@@ -455,16 +483,16 @@ static int srp_reconnect_target(struct srp_target_port *target)
455 list_for_each_entry(req, &target->req_queue, list) { 483 list_for_each_entry(req, &target->req_queue, list) {
456 req->scmnd->result = DID_RESET << 16; 484 req->scmnd->result = DID_RESET << 16;
457 req->scmnd->scsi_done(req->scmnd); 485 req->scmnd->scsi_done(req->scmnd);
486 srp_unmap_data(req->scmnd, target, req);
458 } 487 }
459 488
460 target->rx_head = 0; 489 target->rx_head = 0;
461 target->tx_head = 0; 490 target->tx_head = 0;
462 target->tx_tail = 0; 491 target->tx_tail = 0;
463 target->req_head = 0; 492 INIT_LIST_HEAD(&target->free_reqs);
464 for (i = 0; i < SRP_SQ_SIZE - 1; ++i)
465 target->req_ring[i].next = i + 1;
466 target->req_ring[SRP_SQ_SIZE - 1].next = -1;
467 INIT_LIST_HEAD(&target->req_queue); 493 INIT_LIST_HEAD(&target->req_queue);
494 for (i = 0; i < SRP_SQ_SIZE; ++i)
495 list_add_tail(&target->req_ring[i].list, &target->free_reqs);
468 496
469 ret = srp_connect_target(target); 497 ret = srp_connect_target(target);
470 if (ret) 498 if (ret)
@@ -589,40 +617,10 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
589 return len; 617 return len;
590} 618}
591 619
592static void srp_unmap_data(struct scsi_cmnd *scmnd, 620static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
593 struct srp_target_port *target,
594 struct srp_request *req)
595{
596 struct scatterlist *scat;
597 int nents;
598
599 if (!scmnd->request_buffer ||
600 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
601 scmnd->sc_data_direction != DMA_FROM_DEVICE))
602 return;
603
604 /*
605 * This handling of non-SG commands can be killed when the
606 * SCSI midlayer no longer generates non-SG commands.
607 */
608 if (likely(scmnd->use_sg)) {
609 nents = scmnd->use_sg;
610 scat = scmnd->request_buffer;
611 } else {
612 nents = 1;
613 scat = &req->fake_sg;
614 }
615
616 dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents,
617 scmnd->sc_data_direction);
618}
619
620static void srp_remove_req(struct srp_target_port *target, struct srp_request *req,
621 int index)
622{ 621{
623 list_del(&req->list); 622 srp_unmap_data(req->scmnd, target, req);
624 req->next = target->req_head; 623 list_move_tail(&req->list, &target->free_reqs);
625 target->req_head = index;
626} 624}
627 625
628static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) 626static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
@@ -647,7 +645,7 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
647 req->tsk_status = rsp->data[3]; 645 req->tsk_status = rsp->data[3];
648 complete(&req->done); 646 complete(&req->done);
649 } else { 647 } else {
650 scmnd = req->scmnd; 648 scmnd = req->scmnd;
651 if (!scmnd) 649 if (!scmnd)
652 printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n", 650 printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n",
653 (unsigned long long) rsp->tag); 651 (unsigned long long) rsp->tag);
@@ -665,14 +663,11 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
665 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) 663 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
666 scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt); 664 scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt);
667 665
668 srp_unmap_data(scmnd, target, req);
669
670 if (!req->tsk_mgmt) { 666 if (!req->tsk_mgmt) {
671 req->scmnd = NULL;
672 scmnd->host_scribble = (void *) -1L; 667 scmnd->host_scribble = (void *) -1L;
673 scmnd->scsi_done(scmnd); 668 scmnd->scsi_done(scmnd);
674 669
675 srp_remove_req(target, req, rsp->tag & ~SRP_TAG_TSK_MGMT); 670 srp_remove_req(target, req);
676 } else 671 } else
677 req->cmd_done = 1; 672 req->cmd_done = 1;
678 } 673 }
@@ -859,7 +854,6 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
859 struct srp_request *req; 854 struct srp_request *req;
860 struct srp_iu *iu; 855 struct srp_iu *iu;
861 struct srp_cmd *cmd; 856 struct srp_cmd *cmd;
862 long req_index;
863 int len; 857 int len;
864 858
865 if (target->state == SRP_TARGET_CONNECTING) 859 if (target->state == SRP_TARGET_CONNECTING)
@@ -879,22 +873,20 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
879 dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, 873 dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma,
880 SRP_MAX_IU_LEN, DMA_TO_DEVICE); 874 SRP_MAX_IU_LEN, DMA_TO_DEVICE);
881 875
882 req_index = target->req_head; 876 req = list_entry(target->free_reqs.next, struct srp_request, list);
883 877
884 scmnd->scsi_done = done; 878 scmnd->scsi_done = done;
885 scmnd->result = 0; 879 scmnd->result = 0;
886 scmnd->host_scribble = (void *) req_index; 880 scmnd->host_scribble = (void *) (long) req->index;
887 881
888 cmd = iu->buf; 882 cmd = iu->buf;
889 memset(cmd, 0, sizeof *cmd); 883 memset(cmd, 0, sizeof *cmd);
890 884
891 cmd->opcode = SRP_CMD; 885 cmd->opcode = SRP_CMD;
892 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); 886 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
893 cmd->tag = req_index; 887 cmd->tag = req->index;
894 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); 888 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
895 889
896 req = &target->req_ring[req_index];
897
898 req->scmnd = scmnd; 890 req->scmnd = scmnd;
899 req->cmd = iu; 891 req->cmd = iu;
900 req->cmd_done = 0; 892 req->cmd_done = 0;
@@ -919,8 +911,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
919 goto err_unmap; 911 goto err_unmap;
920 } 912 }
921 913
922 target->req_head = req->next; 914 list_move_tail(&req->list, &target->req_queue);
923 list_add_tail(&req->list, &target->req_queue);
924 915
925 return 0; 916 return 0;
926 917
@@ -1143,30 +1134,20 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1143 return 0; 1134 return 0;
1144} 1135}
1145 1136
1146static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func) 1137static int srp_send_tsk_mgmt(struct srp_target_port *target,
1138 struct srp_request *req, u8 func)
1147{ 1139{
1148 struct srp_target_port *target = host_to_target(scmnd->device->host);
1149 struct srp_request *req;
1150 struct srp_iu *iu; 1140 struct srp_iu *iu;
1151 struct srp_tsk_mgmt *tsk_mgmt; 1141 struct srp_tsk_mgmt *tsk_mgmt;
1152 int req_index;
1153 int ret = FAILED;
1154 1142
1155 spin_lock_irq(target->scsi_host->host_lock); 1143 spin_lock_irq(target->scsi_host->host_lock);
1156 1144
1157 if (target->state == SRP_TARGET_DEAD || 1145 if (target->state == SRP_TARGET_DEAD ||
1158 target->state == SRP_TARGET_REMOVED) { 1146 target->state == SRP_TARGET_REMOVED) {
1159 scmnd->result = DID_BAD_TARGET << 16; 1147 req->scmnd->result = DID_BAD_TARGET << 16;
1160 goto out; 1148 goto out;
1161 } 1149 }
1162 1150
1163 if (scmnd->host_scribble == (void *) -1L)
1164 goto out;
1165
1166 req_index = (long) scmnd->host_scribble;
1167 printk(KERN_ERR "Abort for req_index %d\n", req_index);
1168
1169 req = &target->req_ring[req_index];
1170 init_completion(&req->done); 1151 init_completion(&req->done);
1171 1152
1172 iu = __srp_get_tx_iu(target); 1153 iu = __srp_get_tx_iu(target);
@@ -1177,10 +1158,10 @@ static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func)
1177 memset(tsk_mgmt, 0, sizeof *tsk_mgmt); 1158 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1178 1159
1179 tsk_mgmt->opcode = SRP_TSK_MGMT; 1160 tsk_mgmt->opcode = SRP_TSK_MGMT;
1180 tsk_mgmt->lun = cpu_to_be64((u64) scmnd->device->lun << 48); 1161 tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48);
1181 tsk_mgmt->tag = req_index | SRP_TAG_TSK_MGMT; 1162 tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT;
1182 tsk_mgmt->tsk_mgmt_func = func; 1163 tsk_mgmt->tsk_mgmt_func = func;
1183 tsk_mgmt->task_tag = req_index; 1164 tsk_mgmt->task_tag = req->index;
1184 1165
1185 if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) 1166 if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
1186 goto out; 1167 goto out;
@@ -1188,37 +1169,85 @@ static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func)
1188 req->tsk_mgmt = iu; 1169 req->tsk_mgmt = iu;
1189 1170
1190 spin_unlock_irq(target->scsi_host->host_lock); 1171 spin_unlock_irq(target->scsi_host->host_lock);
1172
1191 if (!wait_for_completion_timeout(&req->done, 1173 if (!wait_for_completion_timeout(&req->done,
1192 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) 1174 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1193 return FAILED; 1175 return -1;
1194 spin_lock_irq(target->scsi_host->host_lock);
1195 1176
1196 if (req->cmd_done) { 1177 return 0;
1197 srp_remove_req(target, req, req_index);
1198 scmnd->scsi_done(scmnd);
1199 } else if (!req->tsk_status) {
1200 srp_remove_req(target, req, req_index);
1201 scmnd->result = DID_ABORT << 16;
1202 ret = SUCCESS;
1203 }
1204 1178
1205out: 1179out:
1206 spin_unlock_irq(target->scsi_host->host_lock); 1180 spin_unlock_irq(target->scsi_host->host_lock);
1207 return ret; 1181 return -1;
1182}
1183
1184static int srp_find_req(struct srp_target_port *target,
1185 struct scsi_cmnd *scmnd,
1186 struct srp_request **req)
1187{
1188 if (scmnd->host_scribble == (void *) -1L)
1189 return -1;
1190
1191 *req = &target->req_ring[(long) scmnd->host_scribble];
1192
1193 return 0;
1208} 1194}
1209 1195
1210static int srp_abort(struct scsi_cmnd *scmnd) 1196static int srp_abort(struct scsi_cmnd *scmnd)
1211{ 1197{
1198 struct srp_target_port *target = host_to_target(scmnd->device->host);
1199 struct srp_request *req;
1200 int ret = SUCCESS;
1201
1212 printk(KERN_ERR "SRP abort called\n"); 1202 printk(KERN_ERR "SRP abort called\n");
1213 1203
1214 return srp_send_tsk_mgmt(scmnd, SRP_TSK_ABORT_TASK); 1204 if (srp_find_req(target, scmnd, &req))
1205 return FAILED;
1206 if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
1207 return FAILED;
1208
1209 spin_lock_irq(target->scsi_host->host_lock);
1210
1211 if (req->cmd_done) {
1212 srp_remove_req(target, req);
1213 scmnd->scsi_done(scmnd);
1214 } else if (!req->tsk_status) {
1215 srp_remove_req(target, req);
1216 scmnd->result = DID_ABORT << 16;
1217 } else
1218 ret = FAILED;
1219
1220 spin_unlock_irq(target->scsi_host->host_lock);
1221
1222 return ret;
1215} 1223}
1216 1224
1217static int srp_reset_device(struct scsi_cmnd *scmnd) 1225static int srp_reset_device(struct scsi_cmnd *scmnd)
1218{ 1226{
1227 struct srp_target_port *target = host_to_target(scmnd->device->host);
1228 struct srp_request *req, *tmp;
1229
1219 printk(KERN_ERR "SRP reset_device called\n"); 1230 printk(KERN_ERR "SRP reset_device called\n");
1220 1231
1221 return srp_send_tsk_mgmt(scmnd, SRP_TSK_LUN_RESET); 1232 if (srp_find_req(target, scmnd, &req))
1233 return FAILED;
1234 if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
1235 return FAILED;
1236 if (req->tsk_status)
1237 return FAILED;
1238
1239 spin_lock_irq(target->scsi_host->host_lock);
1240
1241 list_for_each_entry_safe(req, tmp, &target->req_queue, list)
1242 if (req->scmnd->device == scmnd->device) {
1243 req->scmnd->result = DID_RESET << 16;
1244 scmnd->scsi_done(scmnd);
1245 srp_remove_req(target, req);
1246 }
1247
1248 spin_unlock_irq(target->scsi_host->host_lock);
1249
1250 return SUCCESS;
1222} 1251}
1223 1252
1224static int srp_reset_host(struct scsi_cmnd *scmnd) 1253static int srp_reset_host(struct scsi_cmnd *scmnd)
@@ -1518,10 +1547,12 @@ static ssize_t srp_create_target(struct class_device *class_dev,
1518 1547
1519 INIT_WORK(&target->work, srp_reconnect_work, target); 1548 INIT_WORK(&target->work, srp_reconnect_work, target);
1520 1549
1521 for (i = 0; i < SRP_SQ_SIZE - 1; ++i) 1550 INIT_LIST_HEAD(&target->free_reqs);
1522 target->req_ring[i].next = i + 1;
1523 target->req_ring[SRP_SQ_SIZE - 1].next = -1;
1524 INIT_LIST_HEAD(&target->req_queue); 1551 INIT_LIST_HEAD(&target->req_queue);
1552 for (i = 0; i < SRP_SQ_SIZE; ++i) {
1553 target->req_ring[i].index = i;
1554 list_add_tail(&target->req_ring[i].list, &target->free_reqs);
1555 }
1525 1556
1526 ret = srp_parse_options(buf, target); 1557 ret = srp_parse_options(buf, target);
1527 if (ret) 1558 if (ret)
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index bd7f7c3115de..c5cd43aae860 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -101,7 +101,7 @@ struct srp_request {
101 */ 101 */
102 struct scatterlist fake_sg; 102 struct scatterlist fake_sg;
103 struct completion done; 103 struct completion done;
104 short next; 104 short index;
105 u8 cmd_done; 105 u8 cmd_done;
106 u8 tsk_status; 106 u8 tsk_status;
107}; 107};
@@ -133,7 +133,7 @@ struct srp_target_port {
133 unsigned tx_tail; 133 unsigned tx_tail;
134 struct srp_iu *tx_ring[SRP_SQ_SIZE + 1]; 134 struct srp_iu *tx_ring[SRP_SQ_SIZE + 1];
135 135
136 int req_head; 136 struct list_head free_reqs;
137 struct list_head req_queue; 137 struct list_head req_queue;
138 struct srp_request req_ring[SRP_SQ_SIZE]; 138 struct srp_request req_ring[SRP_SQ_SIZE];
139 139
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 9b493f0becc4..173c899a1fb4 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -1499,7 +1499,6 @@ static int __init capi_init(void)
1499 printk(KERN_ERR "capi20: unable to get major %d\n", capi_major); 1499 printk(KERN_ERR "capi20: unable to get major %d\n", capi_major);
1500 return major_ret; 1500 return major_ret;
1501 } 1501 }
1502 capi_major = major_ret;
1503 capi_class = class_create(THIS_MODULE, "capi"); 1502 capi_class = class_create(THIS_MODULE, "capi");
1504 if (IS_ERR(capi_class)) { 1503 if (IS_ERR(capi_class)) {
1505 unregister_chrdev(capi_major, "capi20"); 1504 unregister_chrdev(capi_major, "capi20");
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index bfb73fd5077e..d86ab68114b0 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -710,8 +710,8 @@ static int gigaset_probe(struct usb_interface *interface,
710 retval = -ENODEV; //FIXME 710 retval = -ENODEV; //FIXME
711 711
712 /* See if the device offered us matches what we can accept */ 712 /* See if the device offered us matches what we can accept */
713 if ((le16_to_cpu(udev->descriptor.idVendor != USB_M105_VENDOR_ID)) || 713 if ((le16_to_cpu(udev->descriptor.idVendor) != USB_M105_VENDOR_ID) ||
714 (le16_to_cpu(udev->descriptor.idProduct != USB_M105_PRODUCT_ID))) 714 (le16_to_cpu(udev->descriptor.idProduct) != USB_M105_PRODUCT_ID))
715 return -ENODEV; 715 return -ENODEV;
716 716
717 /* this starts to become ascii art... */ 717 /* this starts to become ascii art... */
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 3f5b64794542..626506234b76 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -4,8 +4,11 @@ menu "LED devices"
4config NEW_LEDS 4config NEW_LEDS
5 bool "LED Support" 5 bool "LED Support"
6 help 6 help
7 Say Y to enable Linux LED support. This is not related to standard 7 Say Y to enable Linux LED support. This allows control of supported
8 keyboard LEDs which are controlled via the input system. 8 LEDs from both userspace and optionally, by kernel events (triggers).
9
10 This is not related to standard keyboard LEDs which are controlled
11 via the input system.
9 12
10config LEDS_CLASS 13config LEDS_CLASS
11 tristate "LED Class Support" 14 tristate "LED Class Support"
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index b0b5d05fadd6..c75d0ef1609c 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -19,6 +19,7 @@
19#include <linux/sysdev.h> 19#include <linux/sysdev.h>
20#include <linux/timer.h> 20#include <linux/timer.h>
21#include <linux/err.h> 21#include <linux/err.h>
22#include <linux/ctype.h>
22#include <linux/leds.h> 23#include <linux/leds.h>
23#include "leds.h" 24#include "leds.h"
24 25
@@ -43,9 +44,13 @@ static ssize_t led_brightness_store(struct class_device *dev,
43 ssize_t ret = -EINVAL; 44 ssize_t ret = -EINVAL;
44 char *after; 45 char *after;
45 unsigned long state = simple_strtoul(buf, &after, 10); 46 unsigned long state = simple_strtoul(buf, &after, 10);
47 size_t count = after - buf;
46 48
47 if (after - buf > 0) { 49 if (*after && isspace(*after))
48 ret = after - buf; 50 count++;
51
52 if (count == size) {
53 ret = count;
49 led_set_brightness(led_cdev, state); 54 led_set_brightness(led_cdev, state);
50 } 55 }
51 56
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index f484b5d6dbf8..fbf141ef46ec 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -20,6 +20,7 @@
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/sysdev.h> 21#include <linux/sysdev.h>
22#include <linux/timer.h> 22#include <linux/timer.h>
23#include <linux/ctype.h>
23#include <linux/leds.h> 24#include <linux/leds.h>
24#include "leds.h" 25#include "leds.h"
25 26
@@ -69,11 +70,15 @@ static ssize_t led_delay_on_store(struct class_device *dev, const char *buf,
69 int ret = -EINVAL; 70 int ret = -EINVAL;
70 char *after; 71 char *after;
71 unsigned long state = simple_strtoul(buf, &after, 10); 72 unsigned long state = simple_strtoul(buf, &after, 10);
73 size_t count = after - buf;
72 74
73 if (after - buf > 0) { 75 if (*after && isspace(*after))
76 count++;
77
78 if (count == size) {
74 timer_data->delay_on = state; 79 timer_data->delay_on = state;
75 mod_timer(&timer_data->timer, jiffies + 1); 80 mod_timer(&timer_data->timer, jiffies + 1);
76 ret = after - buf; 81 ret = count;
77 } 82 }
78 83
79 return ret; 84 return ret;
@@ -97,11 +102,15 @@ static ssize_t led_delay_off_store(struct class_device *dev, const char *buf,
97 int ret = -EINVAL; 102 int ret = -EINVAL;
98 char *after; 103 char *after;
99 unsigned long state = simple_strtoul(buf, &after, 10); 104 unsigned long state = simple_strtoul(buf, &after, 10);
105 size_t count = after - buf;
106
107 if (*after && isspace(*after))
108 count++;
100 109
101 if (after - buf > 0) { 110 if (count == size) {
102 timer_data->delay_off = state; 111 timer_data->delay_off = state;
103 mod_timer(&timer_data->timer, jiffies + 1); 112 mod_timer(&timer_data->timer, jiffies + 1);
104 ret = after - buf; 113 ret = count;
105 } 114 }
106 115
107 return ret; 116 return ret;
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 266414ca2814..9080853fe283 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1189,7 +1189,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1189 ioc->diagPending = 0; 1189 ioc->diagPending = 0;
1190 spin_lock_init(&ioc->diagLock); 1190 spin_lock_init(&ioc->diagLock);
1191 spin_lock_init(&ioc->fc_rescan_work_lock); 1191 spin_lock_init(&ioc->fc_rescan_work_lock);
1192 spin_lock_init(&ioc->fc_rport_lock);
1193 spin_lock_init(&ioc->initializing_hba_lock); 1192 spin_lock_init(&ioc->initializing_hba_lock);
1194 1193
1195 /* Initialize the event logging. 1194 /* Initialize the event logging.
@@ -5736,11 +5735,13 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
5736 return rc; 5735 return rc;
5737} 5736}
5738 5737
5738# define EVENT_DESCR_STR_SZ 100
5739
5739/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5740/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5740static void 5741static void
5741EventDescriptionStr(u8 event, u32 evData0, char *evStr) 5742EventDescriptionStr(u8 event, u32 evData0, char *evStr)
5742{ 5743{
5743 char *ds; 5744 char *ds = NULL;
5744 5745
5745 switch(event) { 5746 switch(event) {
5746 case MPI_EVENT_NONE: 5747 case MPI_EVENT_NONE:
@@ -5777,9 +5778,9 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
5777 if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP) 5778 if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP)
5778 ds = "Loop State(LIP) Change"; 5779 ds = "Loop State(LIP) Change";
5779 else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE) 5780 else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE)
5780 ds = "Loop State(LPE) Change"; /* ??? */ 5781 ds = "Loop State(LPE) Change"; /* ??? */
5781 else 5782 else
5782 ds = "Loop State(LPB) Change"; /* ??? */ 5783 ds = "Loop State(LPB) Change"; /* ??? */
5783 break; 5784 break;
5784 case MPI_EVENT_LOGOUT: 5785 case MPI_EVENT_LOGOUT:
5785 ds = "Logout"; 5786 ds = "Logout";
@@ -5841,27 +5842,32 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
5841 break; 5842 break;
5842 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 5843 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
5843 { 5844 {
5844 char buf[50];
5845 u8 id = (u8)(evData0); 5845 u8 id = (u8)(evData0);
5846 u8 ReasonCode = (u8)(evData0 >> 16); 5846 u8 ReasonCode = (u8)(evData0 >> 16);
5847 switch (ReasonCode) { 5847 switch (ReasonCode) {
5848 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED: 5848 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
5849 sprintf(buf,"SAS Device Status Change: Added: id=%d", id); 5849 snprintf(evStr, EVENT_DESCR_STR_SZ,
5850 "SAS Device Status Change: Added: id=%d", id);
5850 break; 5851 break;
5851 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING: 5852 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
5852 sprintf(buf,"SAS Device Status Change: Deleted: id=%d", id); 5853 snprintf(evStr, EVENT_DESCR_STR_SZ,
5854 "SAS Device Status Change: Deleted: id=%d", id);
5853 break; 5855 break;
5854 case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA: 5856 case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
5855 sprintf(buf,"SAS Device Status Change: SMART Data: id=%d", id); 5857 snprintf(evStr, EVENT_DESCR_STR_SZ,
5858 "SAS Device Status Change: SMART Data: id=%d",
5859 id);
5856 break; 5860 break;
5857 case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED: 5861 case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
5858 sprintf(buf,"SAS Device Status Change: No Persistancy Added: id=%d", id); 5862 snprintf(evStr, EVENT_DESCR_STR_SZ,
5863 "SAS Device Status Change: No Persistancy "
5864 "Added: id=%d", id);
5859 break; 5865 break;
5860 default: 5866 default:
5861 sprintf(buf,"SAS Device Status Change: Unknown: id=%d", id); 5867 snprintf(evStr, EVENT_DESCR_STR_SZ,
5862 break; 5868 "SAS Device Status Change: Unknown: id=%d", id);
5869 break;
5863 } 5870 }
5864 ds = buf;
5865 break; 5871 break;
5866 } 5872 }
5867 case MPI_EVENT_ON_BUS_TIMER_EXPIRED: 5873 case MPI_EVENT_ON_BUS_TIMER_EXPIRED:
@@ -5878,41 +5884,46 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
5878 break; 5884 break;
5879 case MPI_EVENT_SAS_PHY_LINK_STATUS: 5885 case MPI_EVENT_SAS_PHY_LINK_STATUS:
5880 { 5886 {
5881 char buf[50];
5882 u8 LinkRates = (u8)(evData0 >> 8); 5887 u8 LinkRates = (u8)(evData0 >> 8);
5883 u8 PhyNumber = (u8)(evData0); 5888 u8 PhyNumber = (u8)(evData0);
5884 LinkRates = (LinkRates & MPI_EVENT_SAS_PLS_LR_CURRENT_MASK) >> 5889 LinkRates = (LinkRates & MPI_EVENT_SAS_PLS_LR_CURRENT_MASK) >>
5885 MPI_EVENT_SAS_PLS_LR_CURRENT_SHIFT; 5890 MPI_EVENT_SAS_PLS_LR_CURRENT_SHIFT;
5886 switch (LinkRates) { 5891 switch (LinkRates) {
5887 case MPI_EVENT_SAS_PLS_LR_RATE_UNKNOWN: 5892 case MPI_EVENT_SAS_PLS_LR_RATE_UNKNOWN:
5888 sprintf(buf,"SAS PHY Link Status: Phy=%d:" 5893 snprintf(evStr, EVENT_DESCR_STR_SZ,
5894 "SAS PHY Link Status: Phy=%d:"
5889 " Rate Unknown",PhyNumber); 5895 " Rate Unknown",PhyNumber);
5890 break; 5896 break;
5891 case MPI_EVENT_SAS_PLS_LR_RATE_PHY_DISABLED: 5897 case MPI_EVENT_SAS_PLS_LR_RATE_PHY_DISABLED:
5892 sprintf(buf,"SAS PHY Link Status: Phy=%d:" 5898 snprintf(evStr, EVENT_DESCR_STR_SZ,
5899 "SAS PHY Link Status: Phy=%d:"
5893 " Phy Disabled",PhyNumber); 5900 " Phy Disabled",PhyNumber);
5894 break; 5901 break;
5895 case MPI_EVENT_SAS_PLS_LR_RATE_FAILED_SPEED_NEGOTIATION: 5902 case MPI_EVENT_SAS_PLS_LR_RATE_FAILED_SPEED_NEGOTIATION:
5896 sprintf(buf,"SAS PHY Link Status: Phy=%d:" 5903 snprintf(evStr, EVENT_DESCR_STR_SZ,
5904 "SAS PHY Link Status: Phy=%d:"
5897 " Failed Speed Nego",PhyNumber); 5905 " Failed Speed Nego",PhyNumber);
5898 break; 5906 break;
5899 case MPI_EVENT_SAS_PLS_LR_RATE_SATA_OOB_COMPLETE: 5907 case MPI_EVENT_SAS_PLS_LR_RATE_SATA_OOB_COMPLETE:
5900 sprintf(buf,"SAS PHY Link Status: Phy=%d:" 5908 snprintf(evStr, EVENT_DESCR_STR_SZ,
5909 "SAS PHY Link Status: Phy=%d:"
5901 " Sata OOB Completed",PhyNumber); 5910 " Sata OOB Completed",PhyNumber);
5902 break; 5911 break;
5903 case MPI_EVENT_SAS_PLS_LR_RATE_1_5: 5912 case MPI_EVENT_SAS_PLS_LR_RATE_1_5:
5904 sprintf(buf,"SAS PHY Link Status: Phy=%d:" 5913 snprintf(evStr, EVENT_DESCR_STR_SZ,
5914 "SAS PHY Link Status: Phy=%d:"
5905 " Rate 1.5 Gbps",PhyNumber); 5915 " Rate 1.5 Gbps",PhyNumber);
5906 break; 5916 break;
5907 case MPI_EVENT_SAS_PLS_LR_RATE_3_0: 5917 case MPI_EVENT_SAS_PLS_LR_RATE_3_0:
5908 sprintf(buf,"SAS PHY Link Status: Phy=%d:" 5918 snprintf(evStr, EVENT_DESCR_STR_SZ,
5919 "SAS PHY Link Status: Phy=%d:"
5909 " Rate 3.0 Gpbs",PhyNumber); 5920 " Rate 3.0 Gpbs",PhyNumber);
5910 break; 5921 break;
5911 default: 5922 default:
5912 sprintf(buf,"SAS PHY Link Status: Phy=%d", PhyNumber); 5923 snprintf(evStr, EVENT_DESCR_STR_SZ,
5924 "SAS PHY Link Status: Phy=%d", PhyNumber);
5913 break; 5925 break;
5914 } 5926 }
5915 ds = buf;
5916 break; 5927 break;
5917 } 5928 }
5918 case MPI_EVENT_SAS_DISCOVERY_ERROR: 5929 case MPI_EVENT_SAS_DISCOVERY_ERROR:
@@ -5921,9 +5932,8 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
5921 case MPI_EVENT_IR_RESYNC_UPDATE: 5932 case MPI_EVENT_IR_RESYNC_UPDATE:
5922 { 5933 {
5923 u8 resync_complete = (u8)(evData0 >> 16); 5934 u8 resync_complete = (u8)(evData0 >> 16);
5924 char buf[40]; 5935 snprintf(evStr, EVENT_DESCR_STR_SZ,
5925 sprintf(buf,"IR Resync Update: Complete = %d:",resync_complete); 5936 "IR Resync Update: Complete = %d:",resync_complete);
5926 ds = buf;
5927 break; 5937 break;
5928 } 5938 }
5929 case MPI_EVENT_IR2: 5939 case MPI_EVENT_IR2:
@@ -5976,7 +5986,8 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
5976 ds = "Unknown"; 5986 ds = "Unknown";
5977 break; 5987 break;
5978 } 5988 }
5979 strcpy(evStr,ds); 5989 if (ds)
5990 strncpy(evStr, ds, EVENT_DESCR_STR_SZ);
5980} 5991}
5981 5992
5982/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5993/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5998,7 +6009,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
5998 int ii; 6009 int ii;
5999 int r = 0; 6010 int r = 0;
6000 int handlers = 0; 6011 int handlers = 0;
6001 char evStr[100]; 6012 char evStr[EVENT_DESCR_STR_SZ];
6002 u8 event; 6013 u8 event;
6003 6014
6004 /* 6015 /*
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index be7e8501b53c..f673cca507e1 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
76#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR 76#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR
77#endif 77#endif
78 78
79#define MPT_LINUX_VERSION_COMMON "3.03.08" 79#define MPT_LINUX_VERSION_COMMON "3.03.09"
80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.08" 80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.09"
81#define WHAT_MAGIC_STRING "@" "(" "#" ")" 81#define WHAT_MAGIC_STRING "@" "(" "#" ")"
82 82
83#define show_mptmod_ver(s,ver) \ 83#define show_mptmod_ver(s,ver) \
@@ -489,7 +489,6 @@ typedef struct _RaidCfgData {
489 489
490#define MPT_RPORT_INFO_FLAGS_REGISTERED 0x01 /* rport registered */ 490#define MPT_RPORT_INFO_FLAGS_REGISTERED 0x01 /* rport registered */
491#define MPT_RPORT_INFO_FLAGS_MISSING 0x02 /* missing from DevPage0 scan */ 491#define MPT_RPORT_INFO_FLAGS_MISSING 0x02 /* missing from DevPage0 scan */
492#define MPT_RPORT_INFO_FLAGS_MAPPED_VDEV 0x04 /* target mapped in vdev */
493 492
494/* 493/*
495 * data allocated for each fc rport device 494 * data allocated for each fc rport device
@@ -501,7 +500,6 @@ struct mptfc_rport_info
501 struct scsi_target *starget; 500 struct scsi_target *starget;
502 FCDevicePage0_t pg0; 501 FCDevicePage0_t pg0;
503 u8 flags; 502 u8 flags;
504 u8 remap_needed;
505}; 503};
506 504
507/* 505/*
@@ -628,11 +626,11 @@ typedef struct _MPT_ADAPTER
628 struct work_struct mptscsih_persistTask; 626 struct work_struct mptscsih_persistTask;
629 627
630 struct list_head fc_rports; 628 struct list_head fc_rports;
631 spinlock_t fc_rport_lock; /* list and ri flags */
632 spinlock_t fc_rescan_work_lock; 629 spinlock_t fc_rescan_work_lock;
633 int fc_rescan_work_count; 630 int fc_rescan_work_count;
634 struct work_struct fc_rescan_work; 631 struct work_struct fc_rescan_work;
635 632 char fc_rescan_work_q_name[KOBJ_NAME_LEN];
633 struct workqueue_struct *fc_rescan_work_q;
636} MPT_ADAPTER; 634} MPT_ADAPTER;
637 635
638/* 636/*
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index b343f2a68b1c..856487741ef4 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -341,9 +341,6 @@ mptfc_generate_rport_ids(FCDevicePage0_t *pg0, struct fc_rport_identifiers *rid)
341 rid->port_name = ((u64)pg0->WWPN.High) << 32 | (u64)pg0->WWPN.Low; 341 rid->port_name = ((u64)pg0->WWPN.High) << 32 | (u64)pg0->WWPN.Low;
342 rid->port_id = pg0->PortIdentifier; 342 rid->port_id = pg0->PortIdentifier;
343 rid->roles = FC_RPORT_ROLE_UNKNOWN; 343 rid->roles = FC_RPORT_ROLE_UNKNOWN;
344 rid->roles |= FC_RPORT_ROLE_FCP_TARGET;
345 if (pg0->Protocol & MPI_FC_DEVICE_PAGE0_PROT_FCP_INITIATOR)
346 rid->roles |= FC_RPORT_ROLE_FCP_INITIATOR;
347 344
348 return 0; 345 return 0;
349} 346}
@@ -355,15 +352,18 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
355 struct fc_rport *rport; 352 struct fc_rport *rport;
356 struct mptfc_rport_info *ri; 353 struct mptfc_rport_info *ri;
357 int new_ri = 1; 354 int new_ri = 1;
358 u64 pn; 355 u64 pn, nn;
359 unsigned long flags;
360 VirtTarget *vtarget; 356 VirtTarget *vtarget;
357 u32 roles = FC_RPORT_ROLE_UNKNOWN;
361 358
362 if (mptfc_generate_rport_ids(pg0, &rport_ids) < 0) 359 if (mptfc_generate_rport_ids(pg0, &rport_ids) < 0)
363 return; 360 return;
364 361
362 roles |= FC_RPORT_ROLE_FCP_TARGET;
363 if (pg0->Protocol & MPI_FC_DEVICE_PAGE0_PROT_FCP_INITIATOR)
364 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
365
365 /* scan list looking for a match */ 366 /* scan list looking for a match */
366 spin_lock_irqsave(&ioc->fc_rport_lock, flags);
367 list_for_each_entry(ri, &ioc->fc_rports, list) { 367 list_for_each_entry(ri, &ioc->fc_rports, list) {
368 pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low; 368 pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low;
369 if (pn == rport_ids.port_name) { /* match */ 369 if (pn == rport_ids.port_name) { /* match */
@@ -373,11 +373,9 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
373 } 373 }
374 } 374 }
375 if (new_ri) { /* allocate one */ 375 if (new_ri) { /* allocate one */
376 spin_unlock_irqrestore(&ioc->fc_rport_lock, flags);
377 ri = kzalloc(sizeof(struct mptfc_rport_info), GFP_KERNEL); 376 ri = kzalloc(sizeof(struct mptfc_rport_info), GFP_KERNEL);
378 if (!ri) 377 if (!ri)
379 return; 378 return;
380 spin_lock_irqsave(&ioc->fc_rport_lock, flags);
381 list_add_tail(&ri->list, &ioc->fc_rports); 379 list_add_tail(&ri->list, &ioc->fc_rports);
382 } 380 }
383 381
@@ -387,14 +385,11 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
387 /* MPT_RPORT_INFO_FLAGS_REGISTERED - rport not previously deleted */ 385 /* MPT_RPORT_INFO_FLAGS_REGISTERED - rport not previously deleted */
388 if (!(ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED)) { 386 if (!(ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED)) {
389 ri->flags |= MPT_RPORT_INFO_FLAGS_REGISTERED; 387 ri->flags |= MPT_RPORT_INFO_FLAGS_REGISTERED;
390 spin_unlock_irqrestore(&ioc->fc_rport_lock, flags);
391 rport = fc_remote_port_add(ioc->sh, channel, &rport_ids); 388 rport = fc_remote_port_add(ioc->sh, channel, &rport_ids);
392 spin_lock_irqsave(&ioc->fc_rport_lock, flags);
393 if (rport) { 389 if (rport) {
394 ri->rport = rport; 390 ri->rport = rport;
395 if (new_ri) /* may have been reset by user */ 391 if (new_ri) /* may have been reset by user */
396 rport->dev_loss_tmo = mptfc_dev_loss_tmo; 392 rport->dev_loss_tmo = mptfc_dev_loss_tmo;
397 *((struct mptfc_rport_info **)rport->dd_data) = ri;
398 /* 393 /*
399 * if already mapped, remap here. If not mapped, 394 * if already mapped, remap here. If not mapped,
400 * target_alloc will allocate vtarget and map, 395 * target_alloc will allocate vtarget and map,
@@ -406,16 +401,21 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
406 vtarget->target_id = pg0->CurrentTargetID; 401 vtarget->target_id = pg0->CurrentTargetID;
407 vtarget->bus_id = pg0->CurrentBus; 402 vtarget->bus_id = pg0->CurrentBus;
408 } 403 }
409 ri->remap_needed = 0;
410 } 404 }
405 *((struct mptfc_rport_info **)rport->dd_data) = ri;
406 /* scan will be scheduled once rport becomes a target */
407 fc_remote_port_rolechg(rport,roles);
408
409 pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low;
410 nn = (u64)ri->pg0.WWNN.High << 32 | (u64)ri->pg0.WWNN.Low;
411 dfcprintk ((MYIOC_s_INFO_FMT 411 dfcprintk ((MYIOC_s_INFO_FMT
412 "mptfc_reg_dev.%d: %x, %llx / %llx, tid %d, " 412 "mptfc_reg_dev.%d: %x, %llx / %llx, tid %d, "
413 "rport tid %d, tmo %d\n", 413 "rport tid %d, tmo %d\n",
414 ioc->name, 414 ioc->name,
415 ioc->sh->host_no, 415 ioc->sh->host_no,
416 pg0->PortIdentifier, 416 pg0->PortIdentifier,
417 pg0->WWNN, 417 (unsigned long long)nn,
418 pg0->WWPN, 418 (unsigned long long)pn,
419 pg0->CurrentTargetID, 419 pg0->CurrentTargetID,
420 ri->rport->scsi_target_id, 420 ri->rport->scsi_target_id,
421 ri->rport->dev_loss_tmo)); 421 ri->rport->dev_loss_tmo));
@@ -425,8 +425,6 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
425 ri = NULL; 425 ri = NULL;
426 } 426 }
427 } 427 }
428 spin_unlock_irqrestore(&ioc->fc_rport_lock,flags);
429
430} 428}
431 429
432/* 430/*
@@ -476,7 +474,6 @@ mptfc_target_alloc(struct scsi_target *starget)
476 vtarget->target_id = ri->pg0.CurrentTargetID; 474 vtarget->target_id = ri->pg0.CurrentTargetID;
477 vtarget->bus_id = ri->pg0.CurrentBus; 475 vtarget->bus_id = ri->pg0.CurrentBus;
478 ri->starget = starget; 476 ri->starget = starget;
479 ri->remap_needed = 0;
480 rc = 0; 477 rc = 0;
481 } 478 }
482 } 479 }
@@ -502,10 +499,10 @@ mptfc_slave_alloc(struct scsi_device *sdev)
502 VirtDevice *vdev; 499 VirtDevice *vdev;
503 struct scsi_target *starget; 500 struct scsi_target *starget;
504 struct fc_rport *rport; 501 struct fc_rport *rport;
505 unsigned long flags;
506 502
507 503
508 rport = starget_to_rport(scsi_target(sdev)); 504 starget = scsi_target(sdev);
505 rport = starget_to_rport(starget);
509 506
510 if (!rport || fc_remote_port_chkready(rport)) 507 if (!rport || fc_remote_port_chkready(rport))
511 return -ENXIO; 508 return -ENXIO;
@@ -519,10 +516,8 @@ mptfc_slave_alloc(struct scsi_device *sdev)
519 return -ENOMEM; 516 return -ENOMEM;
520 } 517 }
521 518
522 spin_lock_irqsave(&hd->ioc->fc_rport_lock,flags);
523 519
524 sdev->hostdata = vdev; 520 sdev->hostdata = vdev;
525 starget = scsi_target(sdev);
526 vtarget = starget->hostdata; 521 vtarget = starget->hostdata;
527 522
528 if (vtarget->num_luns == 0) { 523 if (vtarget->num_luns == 0) {
@@ -535,14 +530,16 @@ mptfc_slave_alloc(struct scsi_device *sdev)
535 vdev->vtarget = vtarget; 530 vdev->vtarget = vtarget;
536 vdev->lun = sdev->lun; 531 vdev->lun = sdev->lun;
537 532
538 spin_unlock_irqrestore(&hd->ioc->fc_rport_lock,flags);
539
540 vtarget->num_luns++; 533 vtarget->num_luns++;
541 534
535
542#ifdef DMPT_DEBUG_FC 536#ifdef DMPT_DEBUG_FC
543 { 537 {
538 u64 nn, pn;
544 struct mptfc_rport_info *ri; 539 struct mptfc_rport_info *ri;
545 ri = *((struct mptfc_rport_info **)rport->dd_data); 540 ri = *((struct mptfc_rport_info **)rport->dd_data);
541 pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low;
542 nn = (u64)ri->pg0.WWNN.High << 32 | (u64)ri->pg0.WWNN.Low;
546 dfcprintk ((MYIOC_s_INFO_FMT 543 dfcprintk ((MYIOC_s_INFO_FMT
547 "mptfc_slv_alloc.%d: num_luns %d, sdev.id %d, " 544 "mptfc_slv_alloc.%d: num_luns %d, sdev.id %d, "
548 "CurrentTargetID %d, %x %llx %llx\n", 545 "CurrentTargetID %d, %x %llx %llx\n",
@@ -550,7 +547,9 @@ mptfc_slave_alloc(struct scsi_device *sdev)
550 sdev->host->host_no, 547 sdev->host->host_no,
551 vtarget->num_luns, 548 vtarget->num_luns,
552 sdev->id, ri->pg0.CurrentTargetID, 549 sdev->id, ri->pg0.CurrentTargetID,
553 ri->pg0.PortIdentifier, ri->pg0.WWPN, ri->pg0.WWNN)); 550 ri->pg0.PortIdentifier,
551 (unsigned long long)pn,
552 (unsigned long long)nn));
554 } 553 }
555#endif 554#endif
556 555
@@ -570,11 +569,31 @@ mptfc_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
570 done(SCpnt); 569 done(SCpnt);
571 return 0; 570 return 0;
572 } 571 }
572
573 /* dd_data is null until finished adding target */
573 ri = *((struct mptfc_rport_info **)rport->dd_data); 574 ri = *((struct mptfc_rport_info **)rport->dd_data);
574 if (unlikely(ri->remap_needed)) 575 if (unlikely(!ri)) {
575 return SCSI_MLQUEUE_HOST_BUSY; 576 dfcprintk ((MYIOC_s_INFO_FMT
577 "mptfc_qcmd.%d: %d:%d, dd_data is null.\n",
578 ((MPT_SCSI_HOST *) SCpnt->device->host->hostdata)->ioc->name,
579 ((MPT_SCSI_HOST *) SCpnt->device->host->hostdata)->ioc->sh->host_no,
580 SCpnt->device->id,SCpnt->device->lun));
581 SCpnt->result = DID_IMM_RETRY << 16;
582 done(SCpnt);
583 return 0;
584 }
576 585
577 return mptscsih_qcmd(SCpnt,done); 586 err = mptscsih_qcmd(SCpnt,done);
587#ifdef DMPT_DEBUG_FC
588 if (unlikely(err)) {
589 dfcprintk ((MYIOC_s_INFO_FMT
590 "mptfc_qcmd.%d: %d:%d, mptscsih_qcmd returns non-zero.\n",
591 ((MPT_SCSI_HOST *) SCpnt->device->host->hostdata)->ioc->name,
592 ((MPT_SCSI_HOST *) SCpnt->device->host->hostdata)->ioc->sh->host_no,
593 SCpnt->device->id,SCpnt->device->lun));
594 }
595#endif
596 return err;
578} 597}
579 598
580static void 599static void
@@ -615,18 +634,17 @@ mptfc_rescan_devices(void *arg)
615 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; 634 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
616 int ii; 635 int ii;
617 int work_to_do; 636 int work_to_do;
637 u64 pn;
618 unsigned long flags; 638 unsigned long flags;
619 struct mptfc_rport_info *ri; 639 struct mptfc_rport_info *ri;
620 640
621 do { 641 do {
622 /* start by tagging all ports as missing */ 642 /* start by tagging all ports as missing */
623 spin_lock_irqsave(&ioc->fc_rport_lock,flags);
624 list_for_each_entry(ri, &ioc->fc_rports, list) { 643 list_for_each_entry(ri, &ioc->fc_rports, list) {
625 if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) { 644 if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) {
626 ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING; 645 ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING;
627 } 646 }
628 } 647 }
629 spin_unlock_irqrestore(&ioc->fc_rport_lock,flags);
630 648
631 /* 649 /*
632 * now rescan devices known to adapter, 650 * now rescan devices known to adapter,
@@ -639,33 +657,24 @@ mptfc_rescan_devices(void *arg)
639 } 657 }
640 658
641 /* delete devices still missing */ 659 /* delete devices still missing */
642 spin_lock_irqsave(&ioc->fc_rport_lock, flags);
643 list_for_each_entry(ri, &ioc->fc_rports, list) { 660 list_for_each_entry(ri, &ioc->fc_rports, list) {
644 /* if newly missing, delete it */ 661 /* if newly missing, delete it */
645 if ((ri->flags & (MPT_RPORT_INFO_FLAGS_REGISTERED | 662 if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) {
646 MPT_RPORT_INFO_FLAGS_MISSING))
647 == (MPT_RPORT_INFO_FLAGS_REGISTERED |
648 MPT_RPORT_INFO_FLAGS_MISSING)) {
649 663
650 ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED| 664 ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED|
651 MPT_RPORT_INFO_FLAGS_MISSING); 665 MPT_RPORT_INFO_FLAGS_MISSING);
652 ri->remap_needed = 1; 666 fc_remote_port_delete(ri->rport); /* won't sleep */
653 fc_remote_port_delete(ri->rport);
654 /*
655 * remote port not really deleted 'cause
656 * binding is by WWPN and driver only
657 * registers FCP_TARGETs but cannot trust
658 * data structures.
659 */
660 ri->rport = NULL; 667 ri->rport = NULL;
668
669 pn = (u64)ri->pg0.WWPN.High << 32 |
670 (u64)ri->pg0.WWPN.Low;
661 dfcprintk ((MYIOC_s_INFO_FMT 671 dfcprintk ((MYIOC_s_INFO_FMT
662 "mptfc_rescan.%d: %llx deleted\n", 672 "mptfc_rescan.%d: %llx deleted\n",
663 ioc->name, 673 ioc->name,
664 ioc->sh->host_no, 674 ioc->sh->host_no,
665 ri->pg0.WWPN)); 675 (unsigned long long)pn));
666 } 676 }
667 } 677 }
668 spin_unlock_irqrestore(&ioc->fc_rport_lock,flags);
669 678
670 /* 679 /*
671 * allow multiple passes as target state 680 * allow multiple passes as target state
@@ -870,10 +879,23 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
870 goto out_mptfc_probe; 879 goto out_mptfc_probe;
871 } 880 }
872 881
873 for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { 882 /* initialize workqueue */
874 mptfc_init_host_attr(ioc,ii); 883
875 mptfc_GetFcDevPage0(ioc,ii,mptfc_register_dev); 884 snprintf(ioc->fc_rescan_work_q_name, KOBJ_NAME_LEN, "mptfc_wq_%d",
876 } 885 sh->host_no);
886 ioc->fc_rescan_work_q =
887 create_singlethread_workqueue(ioc->fc_rescan_work_q_name);
888 if (!ioc->fc_rescan_work_q)
889 goto out_mptfc_probe;
890
891 /*
892 * scan for rports -
893 * by doing it via the workqueue, some locking is eliminated
894 */
895
896 ioc->fc_rescan_work_count = 1;
897 queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work);
898 flush_workqueue(ioc->fc_rescan_work_q);
877 899
878 return 0; 900 return 0;
879 901
@@ -949,8 +971,18 @@ mptfc_init(void)
949static void __devexit 971static void __devexit
950mptfc_remove(struct pci_dev *pdev) 972mptfc_remove(struct pci_dev *pdev)
951{ 973{
952 MPT_ADAPTER *ioc = pci_get_drvdata(pdev); 974 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
953 struct mptfc_rport_info *p, *n; 975 struct mptfc_rport_info *p, *n;
976 struct workqueue_struct *work_q;
977 unsigned long flags;
978
979 /* destroy workqueue */
980 if ((work_q=ioc->fc_rescan_work_q)) {
981 spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
982 ioc->fc_rescan_work_q = NULL;
983 spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
984 destroy_workqueue(work_q);
985 }
954 986
955 fc_remove_host(ioc->sh); 987 fc_remove_host(ioc->sh);
956 988
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index e9716b10acea..af6ec553ff7c 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -91,6 +91,7 @@ enum mptsas_hotplug_action {
91 MPTSAS_DEL_DEVICE, 91 MPTSAS_DEL_DEVICE,
92 MPTSAS_ADD_RAID, 92 MPTSAS_ADD_RAID,
93 MPTSAS_DEL_RAID, 93 MPTSAS_DEL_RAID,
94 MPTSAS_IGNORE_EVENT,
94}; 95};
95 96
96struct mptsas_hotplug_event { 97struct mptsas_hotplug_event {
@@ -298,6 +299,26 @@ mptsas_find_portinfo_by_handle(MPT_ADAPTER *ioc, u16 handle)
298 return rc; 299 return rc;
299} 300}
300 301
302/*
303 * Returns true if there is a scsi end device
304 */
305static inline int
306mptsas_is_end_device(struct mptsas_devinfo * attached)
307{
308 if ((attached->handle) &&
309 (attached->device_info &
310 MPI_SAS_DEVICE_INFO_END_DEVICE) &&
311 ((attached->device_info &
312 MPI_SAS_DEVICE_INFO_SSP_TARGET) |
313 (attached->device_info &
314 MPI_SAS_DEVICE_INFO_STP_TARGET) |
315 (attached->device_info &
316 MPI_SAS_DEVICE_INFO_SATA_DEVICE)))
317 return 1;
318 else
319 return 0;
320}
321
301static int 322static int
302mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure, 323mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
303 u32 form, u32 form_specific) 324 u32 form, u32 form_specific)
@@ -872,7 +893,11 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
872 SasDevicePage0_t *buffer; 893 SasDevicePage0_t *buffer;
873 dma_addr_t dma_handle; 894 dma_addr_t dma_handle;
874 __le64 sas_address; 895 __le64 sas_address;
875 int error; 896 int error=0;
897
898 if (ioc->sas_discovery_runtime &&
899 mptsas_is_end_device(device_info))
900 goto out;
876 901
877 hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION; 902 hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION;
878 hdr.ExtPageLength = 0; 903 hdr.ExtPageLength = 0;
@@ -1009,7 +1034,11 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
1009 CONFIGPARMS cfg; 1034 CONFIGPARMS cfg;
1010 SasExpanderPage1_t *buffer; 1035 SasExpanderPage1_t *buffer;
1011 dma_addr_t dma_handle; 1036 dma_addr_t dma_handle;
1012 int error; 1037 int error=0;
1038
1039 if (ioc->sas_discovery_runtime &&
1040 mptsas_is_end_device(&phy_info->attached))
1041 goto out;
1013 1042
1014 hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION; 1043 hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
1015 hdr.ExtPageLength = 0; 1044 hdr.ExtPageLength = 0;
@@ -1068,26 +1097,6 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
1068 return error; 1097 return error;
1069} 1098}
1070 1099
1071/*
1072 * Returns true if there is a scsi end device
1073 */
1074static inline int
1075mptsas_is_end_device(struct mptsas_devinfo * attached)
1076{
1077 if ((attached->handle) &&
1078 (attached->device_info &
1079 MPI_SAS_DEVICE_INFO_END_DEVICE) &&
1080 ((attached->device_info &
1081 MPI_SAS_DEVICE_INFO_SSP_TARGET) |
1082 (attached->device_info &
1083 MPI_SAS_DEVICE_INFO_STP_TARGET) |
1084 (attached->device_info &
1085 MPI_SAS_DEVICE_INFO_SATA_DEVICE)))
1086 return 1;
1087 else
1088 return 0;
1089}
1090
1091static void 1100static void
1092mptsas_parse_device_info(struct sas_identify *identify, 1101mptsas_parse_device_info(struct sas_identify *identify,
1093 struct mptsas_devinfo *device_info) 1102 struct mptsas_devinfo *device_info)
@@ -1737,6 +1746,9 @@ mptsas_hotplug_work(void *arg)
1737 break; 1746 break;
1738 case MPTSAS_ADD_DEVICE: 1747 case MPTSAS_ADD_DEVICE:
1739 1748
1749 if (ev->phys_disk_num_valid)
1750 mpt_findImVolumes(ioc);
1751
1740 /* 1752 /*
1741 * Refresh sas device pg0 data 1753 * Refresh sas device pg0 data
1742 */ 1754 */
@@ -1868,6 +1880,9 @@ mptsas_hotplug_work(void *arg)
1868 scsi_device_put(sdev); 1880 scsi_device_put(sdev);
1869 mpt_findImVolumes(ioc); 1881 mpt_findImVolumes(ioc);
1870 break; 1882 break;
1883 case MPTSAS_IGNORE_EVENT:
1884 default:
1885 break;
1871 } 1886 }
1872 1887
1873 kfree(ev); 1888 kfree(ev);
@@ -1940,7 +1955,8 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc,
1940 EVENT_DATA_RAID *raid_event_data) 1955 EVENT_DATA_RAID *raid_event_data)
1941{ 1956{
1942 struct mptsas_hotplug_event *ev; 1957 struct mptsas_hotplug_event *ev;
1943 RAID_VOL0_STATUS * volumeStatus; 1958 int status = le32_to_cpu(raid_event_data->SettingsStatus);
1959 int state = (status >> 8) & 0xff;
1944 1960
1945 if (ioc->bus_type != SAS) 1961 if (ioc->bus_type != SAS)
1946 return; 1962 return;
@@ -1955,6 +1971,7 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc,
1955 INIT_WORK(&ev->work, mptsas_hotplug_work, ev); 1971 INIT_WORK(&ev->work, mptsas_hotplug_work, ev);
1956 ev->ioc = ioc; 1972 ev->ioc = ioc;
1957 ev->id = raid_event_data->VolumeID; 1973 ev->id = raid_event_data->VolumeID;
1974 ev->event_type = MPTSAS_IGNORE_EVENT;
1958 1975
1959 switch (raid_event_data->ReasonCode) { 1976 switch (raid_event_data->ReasonCode) {
1960 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED: 1977 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
@@ -1966,6 +1983,25 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc,
1966 ev->phys_disk_num = raid_event_data->PhysDiskNum; 1983 ev->phys_disk_num = raid_event_data->PhysDiskNum;
1967 ev->event_type = MPTSAS_DEL_DEVICE; 1984 ev->event_type = MPTSAS_DEL_DEVICE;
1968 break; 1985 break;
1986 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
1987 switch (state) {
1988 case MPI_PD_STATE_ONLINE:
1989 ioc->raid_data.isRaid = 1;
1990 ev->phys_disk_num_valid = 1;
1991 ev->phys_disk_num = raid_event_data->PhysDiskNum;
1992 ev->event_type = MPTSAS_ADD_DEVICE;
1993 break;
1994 case MPI_PD_STATE_MISSING:
1995 case MPI_PD_STATE_NOT_COMPATIBLE:
1996 case MPI_PD_STATE_OFFLINE_AT_HOST_REQUEST:
1997 case MPI_PD_STATE_FAILED_AT_HOST_REQUEST:
1998 case MPI_PD_STATE_OFFLINE_FOR_ANOTHER_REASON:
1999 ev->event_type = MPTSAS_DEL_DEVICE;
2000 break;
2001 default:
2002 break;
2003 }
2004 break;
1969 case MPI_EVENT_RAID_RC_VOLUME_DELETED: 2005 case MPI_EVENT_RAID_RC_VOLUME_DELETED:
1970 ev->event_type = MPTSAS_DEL_RAID; 2006 ev->event_type = MPTSAS_DEL_RAID;
1971 break; 2007 break;
@@ -1973,11 +2009,18 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc,
1973 ev->event_type = MPTSAS_ADD_RAID; 2009 ev->event_type = MPTSAS_ADD_RAID;
1974 break; 2010 break;
1975 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED: 2011 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
1976 volumeStatus = (RAID_VOL0_STATUS *) & 2012 switch (state) {
1977 raid_event_data->SettingsStatus; 2013 case MPI_RAIDVOL0_STATUS_STATE_FAILED:
1978 ev->event_type = (volumeStatus->State == 2014 case MPI_RAIDVOL0_STATUS_STATE_MISSING:
1979 MPI_RAIDVOL0_STATUS_STATE_FAILED) ? 2015 ev->event_type = MPTSAS_DEL_RAID;
1980 MPTSAS_DEL_RAID : MPTSAS_ADD_RAID; 2016 break;
2017 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
2018 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
2019 ev->event_type = MPTSAS_ADD_RAID;
2020 break;
2021 default:
2022 break;
2023 }
1981 break; 2024 break;
1982 default: 2025 default:
1983 break; 2026 break;
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 3729062db317..84fa271eb8f4 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -632,7 +632,11 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
632 632
633 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */ 633 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
634 /* Spoof to SCSI Selection Timeout! */ 634 /* Spoof to SCSI Selection Timeout! */
635 sc->result = DID_NO_CONNECT << 16; 635 if (ioc->bus_type != FC)
636 sc->result = DID_NO_CONNECT << 16;
637 /* else fibre, just stall until rescan event */
638 else
639 sc->result = DID_REQUEUE << 16;
636 640
637 if (hd->sel_timeout[pScsiReq->TargetID] < 0xFFFF) 641 if (hd->sel_timeout[pScsiReq->TargetID] < 0xFFFF)
638 hd->sel_timeout[pScsiReq->TargetID]++; 642 hd->sel_timeout[pScsiReq->TargetID]++;
@@ -877,7 +881,7 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
877 struct scsi_cmnd *sc; 881 struct scsi_cmnd *sc;
878 882
879 dsprintk((KERN_INFO MYNAM ": search_running target %d lun %d max %d\n", 883 dsprintk((KERN_INFO MYNAM ": search_running target %d lun %d max %d\n",
880 vdevice->target_id, vdevice->lun, max)); 884 vdevice->vtarget->target_id, vdevice->lun, max));
881 885
882 for (ii=0; ii < max; ii++) { 886 for (ii=0; ii < max; ii++) {
883 if ((sc = hd->ScsiLookup[ii]) != NULL) { 887 if ((sc = hd->ScsiLookup[ii]) != NULL) {
@@ -1645,7 +1649,6 @@ int
1645mptscsih_abort(struct scsi_cmnd * SCpnt) 1649mptscsih_abort(struct scsi_cmnd * SCpnt)
1646{ 1650{
1647 MPT_SCSI_HOST *hd; 1651 MPT_SCSI_HOST *hd;
1648 MPT_ADAPTER *ioc;
1649 MPT_FRAME_HDR *mf; 1652 MPT_FRAME_HDR *mf;
1650 u32 ctx2abort; 1653 u32 ctx2abort;
1651 int scpnt_idx; 1654 int scpnt_idx;
@@ -1663,14 +1666,6 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1663 return FAILED; 1666 return FAILED;
1664 } 1667 }
1665 1668
1666 ioc = hd->ioc;
1667 if (hd->resetPending) {
1668 return FAILED;
1669 }
1670
1671 if (hd->timeouts < -1)
1672 hd->timeouts++;
1673
1674 /* Find this command 1669 /* Find this command
1675 */ 1670 */
1676 if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) { 1671 if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) {
@@ -1684,6 +1679,13 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1684 return SUCCESS; 1679 return SUCCESS;
1685 } 1680 }
1686 1681
1682 if (hd->resetPending) {
1683 return FAILED;
1684 }
1685
1686 if (hd->timeouts < -1)
1687 hd->timeouts++;
1688
1687 printk(KERN_WARNING MYNAM ": %s: attempting task abort! (sc=%p)\n", 1689 printk(KERN_WARNING MYNAM ": %s: attempting task abort! (sc=%p)\n",
1688 hd->ioc->name, SCpnt); 1690 hd->ioc->name, SCpnt);
1689 scsi_print_command(SCpnt); 1691 scsi_print_command(SCpnt);
@@ -1703,7 +1705,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1703 vdev = SCpnt->device->hostdata; 1705 vdev = SCpnt->device->hostdata;
1704 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 1706 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
1705 vdev->vtarget->bus_id, vdev->vtarget->target_id, vdev->lun, 1707 vdev->vtarget->bus_id, vdev->vtarget->target_id, vdev->lun,
1706 ctx2abort, mptscsih_get_tm_timeout(ioc)); 1708 ctx2abort, mptscsih_get_tm_timeout(hd->ioc));
1707 1709
1708 printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n", 1710 printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n",
1709 hd->ioc->name, 1711 hd->ioc->name,
@@ -2521,15 +2523,15 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
2521 2523
2522 /* 7. FC: Rescan for blocked rports which might have returned. 2524 /* 7. FC: Rescan for blocked rports which might have returned.
2523 */ 2525 */
2524 else if (ioc->bus_type == FC) { 2526 if (ioc->bus_type == FC) {
2525 int work_count;
2526 unsigned long flags;
2527
2528 spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); 2527 spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
2529 work_count = ++ioc->fc_rescan_work_count; 2528 if (ioc->fc_rescan_work_q) {
2529 if (ioc->fc_rescan_work_count++ == 0) {
2530 queue_work(ioc->fc_rescan_work_q,
2531 &ioc->fc_rescan_work);
2532 }
2533 }
2530 spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); 2534 spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
2531 if (work_count == 1)
2532 schedule_work(&ioc->fc_rescan_work);
2533 } 2535 }
2534 dtmprintk((MYIOC_s_WARN_FMT "Post-Reset complete.\n", ioc->name)); 2536 dtmprintk((MYIOC_s_WARN_FMT "Post-Reset complete.\n", ioc->name));
2535 2537
@@ -2544,7 +2546,6 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2544{ 2546{
2545 MPT_SCSI_HOST *hd; 2547 MPT_SCSI_HOST *hd;
2546 u8 event = le32_to_cpu(pEvReply->Event) & 0xFF; 2548 u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
2547 int work_count;
2548 unsigned long flags; 2549 unsigned long flags;
2549 2550
2550 devtverboseprintk((MYIOC_s_INFO_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n", 2551 devtverboseprintk((MYIOC_s_INFO_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n",
@@ -2569,10 +2570,13 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2569 2570
2570 case MPI_EVENT_RESCAN: /* 06 */ 2571 case MPI_EVENT_RESCAN: /* 06 */
2571 spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); 2572 spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
2572 work_count = ++ioc->fc_rescan_work_count; 2573 if (ioc->fc_rescan_work_q) {
2574 if (ioc->fc_rescan_work_count++ == 0) {
2575 queue_work(ioc->fc_rescan_work_q,
2576 &ioc->fc_rescan_work);
2577 }
2578 }
2573 spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); 2579 spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
2574 if (work_count == 1)
2575 schedule_work(&ioc->fc_rescan_work);
2576 break; 2580 break;
2577 2581
2578 /* 2582 /*
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 09c745b19cc8..f2a4d382ea19 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -783,6 +783,70 @@ static struct pci_device_id mptspi_pci_table[] = {
783}; 783};
784MODULE_DEVICE_TABLE(pci, mptspi_pci_table); 784MODULE_DEVICE_TABLE(pci, mptspi_pci_table);
785 785
786
787/*
788 * renegotiate for a given target
789 */
790static void
791mptspi_dv_renegotiate_work(void *data)
792{
793 struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
794 struct _MPT_SCSI_HOST *hd = wqw->hd;
795 struct scsi_device *sdev;
796
797 kfree(wqw);
798
799 shost_for_each_device(sdev, hd->ioc->sh)
800 mptspi_dv_device(hd, sdev);
801}
802
803static void
804mptspi_dv_renegotiate(struct _MPT_SCSI_HOST *hd)
805{
806 struct work_queue_wrapper *wqw = kmalloc(sizeof(*wqw), GFP_ATOMIC);
807
808 if (!wqw)
809 return;
810
811 INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work, wqw);
812 wqw->hd = hd;
813
814 schedule_work(&wqw->work);
815}
816
817/*
818 * spi module reset handler
819 */
820static int
821mptspi_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
822{
823 struct _MPT_SCSI_HOST *hd = (struct _MPT_SCSI_HOST *)ioc->sh->hostdata;
824 int rc;
825
826 rc = mptscsih_ioc_reset(ioc, reset_phase);
827
828 if (reset_phase == MPT_IOC_POST_RESET)
829 mptspi_dv_renegotiate(hd);
830
831 return rc;
832}
833
834/*
835 * spi module resume handler
836 */
837static int
838mptspi_resume(struct pci_dev *pdev)
839{
840 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
841 struct _MPT_SCSI_HOST *hd = (struct _MPT_SCSI_HOST *)ioc->sh->hostdata;
842 int rc;
843
844 rc = mptscsih_resume(pdev);
845 mptspi_dv_renegotiate(hd);
846
847 return rc;
848}
849
786/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 850/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
787/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 851/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
788/* 852/*
@@ -1032,7 +1096,7 @@ static struct pci_driver mptspi_driver = {
1032 .shutdown = mptscsih_shutdown, 1096 .shutdown = mptscsih_shutdown,
1033#ifdef CONFIG_PM 1097#ifdef CONFIG_PM
1034 .suspend = mptscsih_suspend, 1098 .suspend = mptscsih_suspend,
1035 .resume = mptscsih_resume, 1099 .resume = mptspi_resume,
1036#endif 1100#endif
1037}; 1101};
1038 1102
@@ -1061,7 +1125,7 @@ mptspi_init(void)
1061 ": Registered for IOC event notifications\n")); 1125 ": Registered for IOC event notifications\n"));
1062 } 1126 }
1063 1127
1064 if (mpt_reset_register(mptspiDoneCtx, mptscsih_ioc_reset) == 0) { 1128 if (mpt_reset_register(mptspiDoneCtx, mptspi_ioc_reset) == 0) {
1065 dprintk((KERN_INFO MYNAM 1129 dprintk((KERN_INFO MYNAM
1066 ": Registered for IOC reset notifications\n")); 1130 ": Registered for IOC reset notifications\n"));
1067 } 1131 }
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 1363083b4d83..14dbad14afb6 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -52,6 +52,7 @@
52#include <linux/mii.h> 52#include <linux/mii.h>
53#include <linux/skbuff.h> 53#include <linux/skbuff.h>
54#include <linux/delay.h> 54#include <linux/delay.h>
55#include <linux/crc32.h>
55#include <asm/mipsregs.h> 56#include <asm/mipsregs.h>
56#include <asm/irq.h> 57#include <asm/irq.h>
57#include <asm/io.h> 58#include <asm/io.h>
@@ -2070,23 +2071,6 @@ static void au1000_tx_timeout(struct net_device *dev)
2070 netif_wake_queue(dev); 2071 netif_wake_queue(dev);
2071} 2072}
2072 2073
2073
2074static unsigned const ethernet_polynomial = 0x04c11db7U;
2075static inline u32 ether_crc(int length, unsigned char *data)
2076{
2077 int crc = -1;
2078
2079 while(--length >= 0) {
2080 unsigned char current_octet = *data++;
2081 int bit;
2082 for (bit = 0; bit < 8; bit++, current_octet >>= 1)
2083 crc = (crc << 1) ^
2084 ((crc < 0) ^ (current_octet & 1) ?
2085 ethernet_polynomial : 0);
2086 }
2087 return crc;
2088}
2089
2090static void set_rx_mode(struct net_device *dev) 2074static void set_rx_mode(struct net_device *dev)
2091{ 2075{
2092 struct au1000_private *aup = (struct au1000_private *) dev->priv; 2076 struct au1000_private *aup = (struct au1000_private *) dev->priv;
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 3d306681919e..d8233e0b7899 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -650,9 +650,11 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
650 650
651 /* Hardware bug work-around, the chip is unable to do PCI DMA 651 /* Hardware bug work-around, the chip is unable to do PCI DMA
652 to/from anything above 1GB :-( */ 652 to/from anything above 1GB :-( */
653 if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) { 653 if (dma_mapping_error(mapping) ||
654 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
654 /* Sigh... */ 655 /* Sigh... */
655 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); 656 if (!dma_mapping_error(mapping))
657 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
656 dev_kfree_skb_any(skb); 658 dev_kfree_skb_any(skb);
657 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA); 659 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
658 if (skb == NULL) 660 if (skb == NULL)
@@ -660,8 +662,10 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
660 mapping = pci_map_single(bp->pdev, skb->data, 662 mapping = pci_map_single(bp->pdev, skb->data,
661 RX_PKT_BUF_SZ, 663 RX_PKT_BUF_SZ,
662 PCI_DMA_FROMDEVICE); 664 PCI_DMA_FROMDEVICE);
663 if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) { 665 if (dma_mapping_error(mapping) ||
664 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); 666 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
667 if (!dma_mapping_error(mapping))
668 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
665 dev_kfree_skb_any(skb); 669 dev_kfree_skb_any(skb);
666 return -ENOMEM; 670 return -ENOMEM;
667 } 671 }
@@ -967,9 +971,10 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
967 } 971 }
968 972
969 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); 973 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
970 if (mapping + len > B44_DMA_MASK) { 974 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
971 /* Chip can't handle DMA to/from >1GB, use bounce buffer */ 975 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
972 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE); 976 if (!dma_mapping_error(mapping))
977 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
973 978
974 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ, 979 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
975 GFP_ATOMIC|GFP_DMA); 980 GFP_ATOMIC|GFP_DMA);
@@ -978,8 +983,9 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
978 983
979 mapping = pci_map_single(bp->pdev, bounce_skb->data, 984 mapping = pci_map_single(bp->pdev, bounce_skb->data,
980 len, PCI_DMA_TODEVICE); 985 len, PCI_DMA_TODEVICE);
981 if (mapping + len > B44_DMA_MASK) { 986 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
982 pci_unmap_single(bp->pdev, mapping, 987 if (!dma_mapping_error(mapping))
988 pci_unmap_single(bp->pdev, mapping,
983 len, PCI_DMA_TODEVICE); 989 len, PCI_DMA_TODEVICE);
984 dev_kfree_skb_any(bounce_skb); 990 dev_kfree_skb_any(bounce_skb);
985 goto err_out; 991 goto err_out;
@@ -1203,7 +1209,8 @@ static int b44_alloc_consistent(struct b44 *bp)
1203 DMA_TABLE_BYTES, 1209 DMA_TABLE_BYTES,
1204 DMA_BIDIRECTIONAL); 1210 DMA_BIDIRECTIONAL);
1205 1211
1206 if (rx_ring_dma + size > B44_DMA_MASK) { 1212 if (dma_mapping_error(rx_ring_dma) ||
1213 rx_ring_dma + size > B44_DMA_MASK) {
1207 kfree(rx_ring); 1214 kfree(rx_ring);
1208 goto out_err; 1215 goto out_err;
1209 } 1216 }
@@ -1229,7 +1236,8 @@ static int b44_alloc_consistent(struct b44 *bp)
1229 DMA_TABLE_BYTES, 1236 DMA_TABLE_BYTES,
1230 DMA_TO_DEVICE); 1237 DMA_TO_DEVICE);
1231 1238
1232 if (tx_ring_dma + size > B44_DMA_MASK) { 1239 if (dma_mapping_error(tx_ring_dma) ||
1240 tx_ring_dma + size > B44_DMA_MASK) {
1233 kfree(tx_ring); 1241 kfree(tx_ring);
1234 goto out_err; 1242 goto out_err;
1235 } 1243 }
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 1f3627470c95..038447fb5c5e 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -53,6 +53,7 @@
53#define DRV_VERSION "v1.17b" 53#define DRV_VERSION "v1.17b"
54#define DRV_RELDATE "2006/03/10" 54#define DRV_RELDATE "2006/03/10"
55#include "dl2k.h" 55#include "dl2k.h"
56#include <linux/dma-mapping.h>
56 57
57static char version[] __devinitdata = 58static char version[] __devinitdata =
58 KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n"; 59 KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
@@ -765,7 +766,7 @@ rio_free_tx (struct net_device *dev, int irq)
765 break; 766 break;
766 skb = np->tx_skbuff[entry]; 767 skb = np->tx_skbuff[entry];
767 pci_unmap_single (np->pdev, 768 pci_unmap_single (np->pdev,
768 np->tx_ring[entry].fraginfo & 0xffffffffffff, 769 np->tx_ring[entry].fraginfo & DMA_48BIT_MASK,
769 skb->len, PCI_DMA_TODEVICE); 770 skb->len, PCI_DMA_TODEVICE);
770 if (irq) 771 if (irq)
771 dev_kfree_skb_irq (skb); 772 dev_kfree_skb_irq (skb);
@@ -893,7 +894,7 @@ receive_packet (struct net_device *dev)
893 /* Small skbuffs for short packets */ 894 /* Small skbuffs for short packets */
894 if (pkt_len > copy_thresh) { 895 if (pkt_len > copy_thresh) {
895 pci_unmap_single (np->pdev, 896 pci_unmap_single (np->pdev,
896 desc->fraginfo & 0xffffffffffff, 897 desc->fraginfo & DMA_48BIT_MASK,
897 np->rx_buf_sz, 898 np->rx_buf_sz,
898 PCI_DMA_FROMDEVICE); 899 PCI_DMA_FROMDEVICE);
899 skb_put (skb = np->rx_skbuff[entry], pkt_len); 900 skb_put (skb = np->rx_skbuff[entry], pkt_len);
@@ -901,7 +902,7 @@ receive_packet (struct net_device *dev)
901 } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) { 902 } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) {
902 pci_dma_sync_single_for_cpu(np->pdev, 903 pci_dma_sync_single_for_cpu(np->pdev,
903 desc->fraginfo & 904 desc->fraginfo &
904 0xffffffffffff, 905 DMA_48BIT_MASK,
905 np->rx_buf_sz, 906 np->rx_buf_sz,
906 PCI_DMA_FROMDEVICE); 907 PCI_DMA_FROMDEVICE);
907 skb->dev = dev; 908 skb->dev = dev;
@@ -913,7 +914,7 @@ receive_packet (struct net_device *dev)
913 skb_put (skb, pkt_len); 914 skb_put (skb, pkt_len);
914 pci_dma_sync_single_for_device(np->pdev, 915 pci_dma_sync_single_for_device(np->pdev,
915 desc->fraginfo & 916 desc->fraginfo &
916 0xffffffffffff, 917 DMA_48BIT_MASK,
917 np->rx_buf_sz, 918 np->rx_buf_sz,
918 PCI_DMA_FROMDEVICE); 919 PCI_DMA_FROMDEVICE);
919 } 920 }
@@ -1800,7 +1801,7 @@ rio_close (struct net_device *dev)
1800 skb = np->rx_skbuff[i]; 1801 skb = np->rx_skbuff[i];
1801 if (skb) { 1802 if (skb) {
1802 pci_unmap_single(np->pdev, 1803 pci_unmap_single(np->pdev,
1803 np->rx_ring[i].fraginfo & 0xffffffffffff, 1804 np->rx_ring[i].fraginfo & DMA_48BIT_MASK,
1804 skb->len, PCI_DMA_FROMDEVICE); 1805 skb->len, PCI_DMA_FROMDEVICE);
1805 dev_kfree_skb (skb); 1806 dev_kfree_skb (skb);
1806 np->rx_skbuff[i] = NULL; 1807 np->rx_skbuff[i] = NULL;
@@ -1810,7 +1811,7 @@ rio_close (struct net_device *dev)
1810 skb = np->tx_skbuff[i]; 1811 skb = np->tx_skbuff[i];
1811 if (skb) { 1812 if (skb) {
1812 pci_unmap_single(np->pdev, 1813 pci_unmap_single(np->pdev,
1813 np->tx_ring[i].fraginfo & 0xffffffffffff, 1814 np->tx_ring[i].fraginfo & DMA_48BIT_MASK,
1814 skb->len, PCI_DMA_TODEVICE); 1815 skb->len, PCI_DMA_TODEVICE);
1815 dev_kfree_skb (skb); 1816 dev_kfree_skb (skb);
1816 np->tx_skbuff[i] = NULL; 1817 np->tx_skbuff[i] = NULL;
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index 27ab75f20799..c1ce2398efea 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -46,4 +46,4 @@ obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o
46obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o 46obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o
47 47
48# The SIR helper module 48# The SIR helper module
49sir-dev-objs := sir_dev.o sir_dongle.o sir_kthread.o 49sir-dev-objs := sir_dev.o sir_dongle.o
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 96bdb73c2283..cd87593e4e8a 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1778,7 +1778,7 @@ static int irda_usb_probe(struct usb_interface *intf,
1778 1778
1779 if (self->needspatch) { 1779 if (self->needspatch) {
1780 ret = usb_control_msg (self->usbdev, usb_sndctrlpipe (self->usbdev, 0), 1780 ret = usb_control_msg (self->usbdev, usb_sndctrlpipe (self->usbdev, 0),
1781 0x02, 0x40, 0, 0, 0, 0, msecs_to_jiffies(500)); 1781 0x02, 0x40, 0, 0, NULL, 0, 500);
1782 if (ret < 0) { 1782 if (ret < 0) {
1783 IRDA_DEBUG (0, "usb_control_msg failed %d\n", ret); 1783 IRDA_DEBUG (0, "usb_control_msg failed %d\n", ret);
1784 goto err_out_3; 1784 goto err_out_3;
diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h
index f69fb4cec76f..9fa294a546d6 100644
--- a/drivers/net/irda/sir-dev.h
+++ b/drivers/net/irda/sir-dev.h
@@ -15,23 +15,14 @@
15#define IRDA_SIR_H 15#define IRDA_SIR_H
16 16
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <linux/workqueue.h>
18 19
19#include <net/irda/irda.h> 20#include <net/irda/irda.h>
20#include <net/irda/irda_device.h> // iobuff_t 21#include <net/irda/irda_device.h> // iobuff_t
21 22
22/* FIXME: unify irda_request with sir_fsm! */
23
24struct irda_request {
25 struct list_head lh_request;
26 unsigned long pending;
27 void (*func)(void *);
28 void *data;
29 struct timer_list timer;
30};
31
32struct sir_fsm { 23struct sir_fsm {
33 struct semaphore sem; 24 struct semaphore sem;
34 struct irda_request rq; 25 struct work_struct work;
35 unsigned state, substate; 26 unsigned state, substate;
36 int param; 27 int param;
37 int result; 28 int result;
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index ea7c9464d46a..3b5854d10c17 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -23,6 +23,298 @@
23 23
24#include "sir-dev.h" 24#include "sir-dev.h"
25 25
26
27static struct workqueue_struct *irda_sir_wq;
28
29/* STATE MACHINE */
30
31/* substate handler of the config-fsm to handle the cases where we want
32 * to wait for transmit completion before changing the port configuration
33 */
34
35static int sirdev_tx_complete_fsm(struct sir_dev *dev)
36{
37 struct sir_fsm *fsm = &dev->fsm;
38 unsigned next_state, delay;
39 unsigned bytes_left;
40
41 do {
42 next_state = fsm->substate; /* default: stay in current substate */
43 delay = 0;
44
45 switch(fsm->substate) {
46
47 case SIRDEV_STATE_WAIT_XMIT:
48 if (dev->drv->chars_in_buffer)
49 bytes_left = dev->drv->chars_in_buffer(dev);
50 else
51 bytes_left = 0;
52 if (!bytes_left) {
53 next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
54 break;
55 }
56
57 if (dev->speed > 115200)
58 delay = (bytes_left*8*10000) / (dev->speed/100);
59 else if (dev->speed > 0)
60 delay = (bytes_left*10*10000) / (dev->speed/100);
61 else
62 delay = 0;
63 /* expected delay (usec) until remaining bytes are sent */
64 if (delay < 100) {
65 udelay(delay);
66 delay = 0;
67 break;
68 }
69 /* sleep some longer delay (msec) */
70 delay = (delay+999) / 1000;
71 break;
72
73 case SIRDEV_STATE_WAIT_UNTIL_SENT:
74 /* block until underlaying hardware buffer are empty */
75 if (dev->drv->wait_until_sent)
76 dev->drv->wait_until_sent(dev);
77 next_state = SIRDEV_STATE_TX_DONE;
78 break;
79
80 case SIRDEV_STATE_TX_DONE:
81 return 0;
82
83 default:
84 IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
85 return -EINVAL;
86 }
87 fsm->substate = next_state;
88 } while (delay == 0);
89 return delay;
90}
91
92/*
93 * Function sirdev_config_fsm
94 *
95 * State machine to handle the configuration of the device (and attached dongle, if any).
96 * This handler is scheduled for execution in kIrDAd context, so we can sleep.
97 * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too
98 * long. Instead, for longer delays we start a timer to reschedule us later.
99 * On entry, fsm->sem is always locked and the netdev xmit queue stopped.
100 * Both must be unlocked/restarted on completion - but only on final exit.
101 */
102
103static void sirdev_config_fsm(void *data)
104{
105 struct sir_dev *dev = data;
106 struct sir_fsm *fsm = &dev->fsm;
107 int next_state;
108 int ret = -1;
109 unsigned delay;
110
111 IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies);
112
113 do {
114 IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
115 __FUNCTION__, fsm->state, fsm->substate);
116
117 next_state = fsm->state;
118 delay = 0;
119
120 switch(fsm->state) {
121
122 case SIRDEV_STATE_DONGLE_OPEN:
123 if (dev->dongle_drv != NULL) {
124 ret = sirdev_put_dongle(dev);
125 if (ret) {
126 fsm->result = -EINVAL;
127 next_state = SIRDEV_STATE_ERROR;
128 break;
129 }
130 }
131
132 /* Initialize dongle */
133 ret = sirdev_get_dongle(dev, fsm->param);
134 if (ret) {
135 fsm->result = ret;
136 next_state = SIRDEV_STATE_ERROR;
137 break;
138 }
139
140 /* Dongles are powered through the modem control lines which
141 * were just set during open. Before resetting, let's wait for
142 * the power to stabilize. This is what some dongle drivers did
143 * in open before, while others didn't - should be safe anyway.
144 */
145
146 delay = 50;
147 fsm->substate = SIRDEV_STATE_DONGLE_RESET;
148 next_state = SIRDEV_STATE_DONGLE_RESET;
149
150 fsm->param = 9600;
151
152 break;
153
154 case SIRDEV_STATE_DONGLE_CLOSE:
155 /* shouldn't we just treat this as success=? */
156 if (dev->dongle_drv == NULL) {
157 fsm->result = -EINVAL;
158 next_state = SIRDEV_STATE_ERROR;
159 break;
160 }
161
162 ret = sirdev_put_dongle(dev);
163 if (ret) {
164 fsm->result = ret;
165 next_state = SIRDEV_STATE_ERROR;
166 break;
167 }
168 next_state = SIRDEV_STATE_DONE;
169 break;
170
171 case SIRDEV_STATE_SET_DTR_RTS:
172 ret = sirdev_set_dtr_rts(dev,
173 (fsm->param&0x02) ? TRUE : FALSE,
174 (fsm->param&0x01) ? TRUE : FALSE);
175 next_state = SIRDEV_STATE_DONE;
176 break;
177
178 case SIRDEV_STATE_SET_SPEED:
179 fsm->substate = SIRDEV_STATE_WAIT_XMIT;
180 next_state = SIRDEV_STATE_DONGLE_CHECK;
181 break;
182
183 case SIRDEV_STATE_DONGLE_CHECK:
184 ret = sirdev_tx_complete_fsm(dev);
185 if (ret < 0) {
186 fsm->result = ret;
187 next_state = SIRDEV_STATE_ERROR;
188 break;
189 }
190 if ((delay=ret) != 0)
191 break;
192
193 if (dev->dongle_drv) {
194 fsm->substate = SIRDEV_STATE_DONGLE_RESET;
195 next_state = SIRDEV_STATE_DONGLE_RESET;
196 }
197 else {
198 dev->speed = fsm->param;
199 next_state = SIRDEV_STATE_PORT_SPEED;
200 }
201 break;
202
203 case SIRDEV_STATE_DONGLE_RESET:
204 if (dev->dongle_drv->reset) {
205 ret = dev->dongle_drv->reset(dev);
206 if (ret < 0) {
207 fsm->result = ret;
208 next_state = SIRDEV_STATE_ERROR;
209 break;
210 }
211 }
212 else
213 ret = 0;
214 if ((delay=ret) == 0) {
215 /* set serial port according to dongle default speed */
216 if (dev->drv->set_speed)
217 dev->drv->set_speed(dev, dev->speed);
218 fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
219 next_state = SIRDEV_STATE_DONGLE_SPEED;
220 }
221 break;
222
223 case SIRDEV_STATE_DONGLE_SPEED:
224 if (dev->dongle_drv->reset) {
225 ret = dev->dongle_drv->set_speed(dev, fsm->param);
226 if (ret < 0) {
227 fsm->result = ret;
228 next_state = SIRDEV_STATE_ERROR;
229 break;
230 }
231 }
232 else
233 ret = 0;
234 if ((delay=ret) == 0)
235 next_state = SIRDEV_STATE_PORT_SPEED;
236 break;
237
238 case SIRDEV_STATE_PORT_SPEED:
239 /* Finally we are ready to change the serial port speed */
240 if (dev->drv->set_speed)
241 dev->drv->set_speed(dev, dev->speed);
242 dev->new_speed = 0;
243 next_state = SIRDEV_STATE_DONE;
244 break;
245
246 case SIRDEV_STATE_DONE:
247 /* Signal network layer so it can send more frames */
248 netif_wake_queue(dev->netdev);
249 next_state = SIRDEV_STATE_COMPLETE;
250 break;
251
252 default:
253 IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
254 fsm->result = -EINVAL;
255 /* fall thru */
256
257 case SIRDEV_STATE_ERROR:
258 IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result);
259
260#if 0 /* don't enable this before we have netdev->tx_timeout to recover */
261 netif_stop_queue(dev->netdev);
262#else
263 netif_wake_queue(dev->netdev);
264#endif
265 /* fall thru */
266
267 case SIRDEV_STATE_COMPLETE:
268 /* config change finished, so we are not busy any longer */
269 sirdev_enable_rx(dev);
270 up(&fsm->sem);
271 return;
272 }
273 fsm->state = next_state;
274 } while(!delay);
275
276 queue_delayed_work(irda_sir_wq, &fsm->work, msecs_to_jiffies(delay));
277}
278
279/* schedule some device configuration task for execution by kIrDAd
280 * on behalf of the above state machine.
281 * can be called from process or interrupt/tasklet context.
282 */
283
284int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
285{
286 struct sir_fsm *fsm = &dev->fsm;
287
288 IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param);
289
290 if (down_trylock(&fsm->sem)) {
291 if (in_interrupt() || in_atomic() || irqs_disabled()) {
292 IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__);
293 return -EWOULDBLOCK;
294 } else
295 down(&fsm->sem);
296 }
297
298 if (fsm->state == SIRDEV_STATE_DEAD) {
299 /* race with sirdev_close should never happen */
300 IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__);
301 up(&fsm->sem);
302 return -ESTALE; /* or better EPIPE? */
303 }
304
305 netif_stop_queue(dev->netdev);
306 atomic_set(&dev->enable_rx, 0);
307
308 fsm->state = initial_state;
309 fsm->param = param;
310 fsm->result = 0;
311
312 INIT_WORK(&fsm->work, sirdev_config_fsm, dev);
313 queue_work(irda_sir_wq, &fsm->work);
314 return 0;
315}
316
317
26/***************************************************************************/ 318/***************************************************************************/
27 319
28void sirdev_enable_rx(struct sir_dev *dev) 320void sirdev_enable_rx(struct sir_dev *dev)
@@ -619,10 +911,6 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
619 spin_lock_init(&dev->tx_lock); 911 spin_lock_init(&dev->tx_lock);
620 init_MUTEX(&dev->fsm.sem); 912 init_MUTEX(&dev->fsm.sem);
621 913
622 INIT_LIST_HEAD(&dev->fsm.rq.lh_request);
623 dev->fsm.rq.pending = 0;
624 init_timer(&dev->fsm.rq.timer);
625
626 dev->drv = drv; 914 dev->drv = drv;
627 dev->netdev = ndev; 915 dev->netdev = ndev;
628 916
@@ -682,3 +970,22 @@ int sirdev_put_instance(struct sir_dev *dev)
682} 970}
683EXPORT_SYMBOL(sirdev_put_instance); 971EXPORT_SYMBOL(sirdev_put_instance);
684 972
973static int __init sir_wq_init(void)
974{
975 irda_sir_wq = create_singlethread_workqueue("irda_sir_wq");
976 if (!irda_sir_wq)
977 return -ENOMEM;
978 return 0;
979}
980
981static void __exit sir_wq_exit(void)
982{
983 destroy_workqueue(irda_sir_wq);
984}
985
986module_init(sir_wq_init);
987module_exit(sir_wq_exit);
988
989MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
990MODULE_DESCRIPTION("IrDA SIR core");
991MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/sir_kthread.c b/drivers/net/irda/sir_kthread.c
deleted file mode 100644
index e3904d6bfecd..000000000000
--- a/drivers/net/irda/sir_kthread.c
+++ /dev/null
@@ -1,508 +0,0 @@
1/*********************************************************************
2 *
3 * sir_kthread.c: dedicated thread to process scheduled
4 * sir device setup requests
5 *
6 * Copyright (c) 2002 Martin Diehl
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
12 *
13 ********************************************************************/
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/version.h>
18#include <linux/init.h>
19#include <linux/smp_lock.h>
20#include <linux/completion.h>
21#include <linux/delay.h>
22
23#include <net/irda/irda.h>
24
25#include "sir-dev.h"
26
27/**************************************************************************
28 *
29 * kIrDAd kernel thread and config state machine
30 *
31 */
32
33struct irda_request_queue {
34 struct list_head request_list;
35 spinlock_t lock;
36 task_t *thread;
37 struct completion exit;
38 wait_queue_head_t kick, done;
39 atomic_t num_pending;
40};
41
42static struct irda_request_queue irda_rq_queue;
43
44static int irda_queue_request(struct irda_request *rq)
45{
46 int ret = 0;
47 unsigned long flags;
48
49 if (!test_and_set_bit(0, &rq->pending)) {
50 spin_lock_irqsave(&irda_rq_queue.lock, flags);
51 list_add_tail(&rq->lh_request, &irda_rq_queue.request_list);
52 wake_up(&irda_rq_queue.kick);
53 atomic_inc(&irda_rq_queue.num_pending);
54 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
55 ret = 1;
56 }
57 return ret;
58}
59
60static void irda_request_timer(unsigned long data)
61{
62 struct irda_request *rq = (struct irda_request *)data;
63 unsigned long flags;
64
65 spin_lock_irqsave(&irda_rq_queue.lock, flags);
66 list_add_tail(&rq->lh_request, &irda_rq_queue.request_list);
67 wake_up(&irda_rq_queue.kick);
68 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
69}
70
71static int irda_queue_delayed_request(struct irda_request *rq, unsigned long delay)
72{
73 int ret = 0;
74 struct timer_list *timer = &rq->timer;
75
76 if (!test_and_set_bit(0, &rq->pending)) {
77 timer->expires = jiffies + delay;
78 timer->function = irda_request_timer;
79 timer->data = (unsigned long)rq;
80 atomic_inc(&irda_rq_queue.num_pending);
81 add_timer(timer);
82 ret = 1;
83 }
84 return ret;
85}
86
87static void run_irda_queue(void)
88{
89 unsigned long flags;
90 struct list_head *entry, *tmp;
91 struct irda_request *rq;
92
93 spin_lock_irqsave(&irda_rq_queue.lock, flags);
94 list_for_each_safe(entry, tmp, &irda_rq_queue.request_list) {
95 rq = list_entry(entry, struct irda_request, lh_request);
96 list_del_init(entry);
97 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
98
99 clear_bit(0, &rq->pending);
100 rq->func(rq->data);
101
102 if (atomic_dec_and_test(&irda_rq_queue.num_pending))
103 wake_up(&irda_rq_queue.done);
104
105 spin_lock_irqsave(&irda_rq_queue.lock, flags);
106 }
107 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
108}
109
110static int irda_thread(void *startup)
111{
112 DECLARE_WAITQUEUE(wait, current);
113
114 daemonize("kIrDAd");
115
116 irda_rq_queue.thread = current;
117
118 complete((struct completion *)startup);
119
120 while (irda_rq_queue.thread != NULL) {
121
122 /* We use TASK_INTERRUPTIBLE, rather than
123 * TASK_UNINTERRUPTIBLE. Andrew Morton made this
124 * change ; he told me that it is safe, because "signal
125 * blocking is now handled in daemonize()", he added
126 * that the problem is that "uninterruptible sleep
127 * contributes to load average", making user worry.
128 * Jean II */
129 set_task_state(current, TASK_INTERRUPTIBLE);
130 add_wait_queue(&irda_rq_queue.kick, &wait);
131 if (list_empty(&irda_rq_queue.request_list))
132 schedule();
133 else
134 __set_task_state(current, TASK_RUNNING);
135 remove_wait_queue(&irda_rq_queue.kick, &wait);
136
137 /* make swsusp happy with our thread */
138 try_to_freeze();
139
140 run_irda_queue();
141 }
142
143#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,35)
144 reparent_to_init();
145#endif
146 complete_and_exit(&irda_rq_queue.exit, 0);
147 /* never reached */
148 return 0;
149}
150
151
152static void flush_irda_queue(void)
153{
154 if (atomic_read(&irda_rq_queue.num_pending)) {
155
156 DECLARE_WAITQUEUE(wait, current);
157
158 if (!list_empty(&irda_rq_queue.request_list))
159 run_irda_queue();
160
161 set_task_state(current, TASK_UNINTERRUPTIBLE);
162 add_wait_queue(&irda_rq_queue.done, &wait);
163 if (atomic_read(&irda_rq_queue.num_pending))
164 schedule();
165 else
166 __set_task_state(current, TASK_RUNNING);
167 remove_wait_queue(&irda_rq_queue.done, &wait);
168 }
169}
170
171/* substate handler of the config-fsm to handle the cases where we want
172 * to wait for transmit completion before changing the port configuration
173 */
174
175static int irda_tx_complete_fsm(struct sir_dev *dev)
176{
177 struct sir_fsm *fsm = &dev->fsm;
178 unsigned next_state, delay;
179 unsigned bytes_left;
180
181 do {
182 next_state = fsm->substate; /* default: stay in current substate */
183 delay = 0;
184
185 switch(fsm->substate) {
186
187 case SIRDEV_STATE_WAIT_XMIT:
188 if (dev->drv->chars_in_buffer)
189 bytes_left = dev->drv->chars_in_buffer(dev);
190 else
191 bytes_left = 0;
192 if (!bytes_left) {
193 next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
194 break;
195 }
196
197 if (dev->speed > 115200)
198 delay = (bytes_left*8*10000) / (dev->speed/100);
199 else if (dev->speed > 0)
200 delay = (bytes_left*10*10000) / (dev->speed/100);
201 else
202 delay = 0;
203 /* expected delay (usec) until remaining bytes are sent */
204 if (delay < 100) {
205 udelay(delay);
206 delay = 0;
207 break;
208 }
209 /* sleep some longer delay (msec) */
210 delay = (delay+999) / 1000;
211 break;
212
213 case SIRDEV_STATE_WAIT_UNTIL_SENT:
214 /* block until underlaying hardware buffer are empty */
215 if (dev->drv->wait_until_sent)
216 dev->drv->wait_until_sent(dev);
217 next_state = SIRDEV_STATE_TX_DONE;
218 break;
219
220 case SIRDEV_STATE_TX_DONE:
221 return 0;
222
223 default:
224 IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
225 return -EINVAL;
226 }
227 fsm->substate = next_state;
228 } while (delay == 0);
229 return delay;
230}
231
232/*
233 * Function irda_config_fsm
234 *
235 * State machine to handle the configuration of the device (and attached dongle, if any).
236 * This handler is scheduled for execution in kIrDAd context, so we can sleep.
237 * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too
238 * long. Instead, for longer delays we start a timer to reschedule us later.
239 * On entry, fsm->sem is always locked and the netdev xmit queue stopped.
240 * Both must be unlocked/restarted on completion - but only on final exit.
241 */
242
243static void irda_config_fsm(void *data)
244{
245 struct sir_dev *dev = data;
246 struct sir_fsm *fsm = &dev->fsm;
247 int next_state;
248 int ret = -1;
249 unsigned delay;
250
251 IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies);
252
253 do {
254 IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
255 __FUNCTION__, fsm->state, fsm->substate);
256
257 next_state = fsm->state;
258 delay = 0;
259
260 switch(fsm->state) {
261
262 case SIRDEV_STATE_DONGLE_OPEN:
263 if (dev->dongle_drv != NULL) {
264 ret = sirdev_put_dongle(dev);
265 if (ret) {
266 fsm->result = -EINVAL;
267 next_state = SIRDEV_STATE_ERROR;
268 break;
269 }
270 }
271
272 /* Initialize dongle */
273 ret = sirdev_get_dongle(dev, fsm->param);
274 if (ret) {
275 fsm->result = ret;
276 next_state = SIRDEV_STATE_ERROR;
277 break;
278 }
279
280 /* Dongles are powered through the modem control lines which
281 * were just set during open. Before resetting, let's wait for
282 * the power to stabilize. This is what some dongle drivers did
283 * in open before, while others didn't - should be safe anyway.
284 */
285
286 delay = 50;
287 fsm->substate = SIRDEV_STATE_DONGLE_RESET;
288 next_state = SIRDEV_STATE_DONGLE_RESET;
289
290 fsm->param = 9600;
291
292 break;
293
294 case SIRDEV_STATE_DONGLE_CLOSE:
295 /* shouldn't we just treat this as success=? */
296 if (dev->dongle_drv == NULL) {
297 fsm->result = -EINVAL;
298 next_state = SIRDEV_STATE_ERROR;
299 break;
300 }
301
302 ret = sirdev_put_dongle(dev);
303 if (ret) {
304 fsm->result = ret;
305 next_state = SIRDEV_STATE_ERROR;
306 break;
307 }
308 next_state = SIRDEV_STATE_DONE;
309 break;
310
311 case SIRDEV_STATE_SET_DTR_RTS:
312 ret = sirdev_set_dtr_rts(dev,
313 (fsm->param&0x02) ? TRUE : FALSE,
314 (fsm->param&0x01) ? TRUE : FALSE);
315 next_state = SIRDEV_STATE_DONE;
316 break;
317
318 case SIRDEV_STATE_SET_SPEED:
319 fsm->substate = SIRDEV_STATE_WAIT_XMIT;
320 next_state = SIRDEV_STATE_DONGLE_CHECK;
321 break;
322
323 case SIRDEV_STATE_DONGLE_CHECK:
324 ret = irda_tx_complete_fsm(dev);
325 if (ret < 0) {
326 fsm->result = ret;
327 next_state = SIRDEV_STATE_ERROR;
328 break;
329 }
330 if ((delay=ret) != 0)
331 break;
332
333 if (dev->dongle_drv) {
334 fsm->substate = SIRDEV_STATE_DONGLE_RESET;
335 next_state = SIRDEV_STATE_DONGLE_RESET;
336 }
337 else {
338 dev->speed = fsm->param;
339 next_state = SIRDEV_STATE_PORT_SPEED;
340 }
341 break;
342
343 case SIRDEV_STATE_DONGLE_RESET:
344 if (dev->dongle_drv->reset) {
345 ret = dev->dongle_drv->reset(dev);
346 if (ret < 0) {
347 fsm->result = ret;
348 next_state = SIRDEV_STATE_ERROR;
349 break;
350 }
351 }
352 else
353 ret = 0;
354 if ((delay=ret) == 0) {
355 /* set serial port according to dongle default speed */
356 if (dev->drv->set_speed)
357 dev->drv->set_speed(dev, dev->speed);
358 fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
359 next_state = SIRDEV_STATE_DONGLE_SPEED;
360 }
361 break;
362
363 case SIRDEV_STATE_DONGLE_SPEED:
364 if (dev->dongle_drv->reset) {
365 ret = dev->dongle_drv->set_speed(dev, fsm->param);
366 if (ret < 0) {
367 fsm->result = ret;
368 next_state = SIRDEV_STATE_ERROR;
369 break;
370 }
371 }
372 else
373 ret = 0;
374 if ((delay=ret) == 0)
375 next_state = SIRDEV_STATE_PORT_SPEED;
376 break;
377
378 case SIRDEV_STATE_PORT_SPEED:
379 /* Finally we are ready to change the serial port speed */
380 if (dev->drv->set_speed)
381 dev->drv->set_speed(dev, dev->speed);
382 dev->new_speed = 0;
383 next_state = SIRDEV_STATE_DONE;
384 break;
385
386 case SIRDEV_STATE_DONE:
387 /* Signal network layer so it can send more frames */
388 netif_wake_queue(dev->netdev);
389 next_state = SIRDEV_STATE_COMPLETE;
390 break;
391
392 default:
393 IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
394 fsm->result = -EINVAL;
395 /* fall thru */
396
397 case SIRDEV_STATE_ERROR:
398 IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result);
399
400#if 0 /* don't enable this before we have netdev->tx_timeout to recover */
401 netif_stop_queue(dev->netdev);
402#else
403 netif_wake_queue(dev->netdev);
404#endif
405 /* fall thru */
406
407 case SIRDEV_STATE_COMPLETE:
408 /* config change finished, so we are not busy any longer */
409 sirdev_enable_rx(dev);
410 up(&fsm->sem);
411 return;
412 }
413 fsm->state = next_state;
414 } while(!delay);
415
416 irda_queue_delayed_request(&fsm->rq, msecs_to_jiffies(delay));
417}
418
419/* schedule some device configuration task for execution by kIrDAd
420 * on behalf of the above state machine.
421 * can be called from process or interrupt/tasklet context.
422 */
423
424int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
425{
426 struct sir_fsm *fsm = &dev->fsm;
427 int xmit_was_down;
428
429 IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param);
430
431 if (down_trylock(&fsm->sem)) {
432 if (in_interrupt() || in_atomic() || irqs_disabled()) {
433 IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__);
434 return -EWOULDBLOCK;
435 } else
436 down(&fsm->sem);
437 }
438
439 if (fsm->state == SIRDEV_STATE_DEAD) {
440 /* race with sirdev_close should never happen */
441 IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__);
442 up(&fsm->sem);
443 return -ESTALE; /* or better EPIPE? */
444 }
445
446 xmit_was_down = netif_queue_stopped(dev->netdev);
447 netif_stop_queue(dev->netdev);
448 atomic_set(&dev->enable_rx, 0);
449
450 fsm->state = initial_state;
451 fsm->param = param;
452 fsm->result = 0;
453
454 INIT_LIST_HEAD(&fsm->rq.lh_request);
455 fsm->rq.pending = 0;
456 fsm->rq.func = irda_config_fsm;
457 fsm->rq.data = dev;
458
459 if (!irda_queue_request(&fsm->rq)) { /* returns 0 on error! */
460 atomic_set(&dev->enable_rx, 1);
461 if (!xmit_was_down)
462 netif_wake_queue(dev->netdev);
463 up(&fsm->sem);
464 return -EAGAIN;
465 }
466 return 0;
467}
468
469static int __init irda_thread_create(void)
470{
471 struct completion startup;
472 int pid;
473
474 spin_lock_init(&irda_rq_queue.lock);
475 irda_rq_queue.thread = NULL;
476 INIT_LIST_HEAD(&irda_rq_queue.request_list);
477 init_waitqueue_head(&irda_rq_queue.kick);
478 init_waitqueue_head(&irda_rq_queue.done);
479 atomic_set(&irda_rq_queue.num_pending, 0);
480
481 init_completion(&startup);
482 pid = kernel_thread(irda_thread, &startup, CLONE_FS|CLONE_FILES);
483 if (pid <= 0)
484 return -EAGAIN;
485 else
486 wait_for_completion(&startup);
487
488 return 0;
489}
490
491static void __exit irda_thread_join(void)
492{
493 if (irda_rq_queue.thread) {
494 flush_irda_queue();
495 init_completion(&irda_rq_queue.exit);
496 irda_rq_queue.thread = NULL;
497 wake_up(&irda_rq_queue.kick);
498 wait_for_completion(&irda_rq_queue.exit);
499 }
500}
501
502module_init(irda_thread_create);
503module_exit(irda_thread_join);
504
505MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
506MODULE_DESCRIPTION("IrDA SIR core");
507MODULE_LICENSE("GPL");
508
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 58f76cefbc83..a4674044bd6f 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -54,6 +54,7 @@
54#include <linux/rtnetlink.h> 54#include <linux/rtnetlink.h>
55#include <linux/serial_reg.h> 55#include <linux/serial_reg.h>
56#include <linux/dma-mapping.h> 56#include <linux/dma-mapping.h>
57#include <linux/pnp.h>
57#include <linux/platform_device.h> 58#include <linux/platform_device.h>
58 59
59#include <asm/io.h> 60#include <asm/io.h>
@@ -358,6 +359,16 @@ static inline void register_bank(int iobase, int bank)
358 iobase + IRCC_MASTER); 359 iobase + IRCC_MASTER);
359} 360}
360 361
362#ifdef CONFIG_PNP
363/* PNP hotplug support */
364static const struct pnp_device_id smsc_ircc_pnp_table[] = {
365 { .id = "SMCf010", .driver_data = 0 },
366 /* and presumably others */
367 { }
368};
369MODULE_DEVICE_TABLE(pnp, smsc_ircc_pnp_table);
370#endif
371
361 372
362/******************************************************************************* 373/*******************************************************************************
363 * 374 *
@@ -2072,7 +2083,8 @@ static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self)
2072 2083
2073/* PROBING 2084/* PROBING
2074 * 2085 *
2075 * 2086 * REVISIT we can be told about the device by PNP, and should use that info
2087 * instead of probing hardware and creating a platform_device ...
2076 */ 2088 */
2077 2089
2078static int __init smsc_ircc_look_for_chips(void) 2090static int __init smsc_ircc_look_for_chips(void)
diff --git a/drivers/net/ixp2000/enp2611.c b/drivers/net/ixp2000/enp2611.c
index 6f7dce8eba51..b67f586d7392 100644
--- a/drivers/net/ixp2000/enp2611.c
+++ b/drivers/net/ixp2000/enp2611.c
@@ -149,6 +149,8 @@ static void enp2611_check_link_status(unsigned long __dummy)
149 int status; 149 int status;
150 150
151 dev = nds[i]; 151 dev = nds[i];
152 if (dev == NULL)
153 continue;
152 154
153 status = pm3386_is_link_up(i); 155 status = pm3386_is_link_up(i);
154 if (status && !netif_carrier_ok(dev)) { 156 if (status && !netif_carrier_ok(dev)) {
@@ -191,6 +193,7 @@ static void enp2611_set_port_admin_status(int port, int up)
191 193
192static int __init enp2611_init_module(void) 194static int __init enp2611_init_module(void)
193{ 195{
196 int ports;
194 int i; 197 int i;
195 198
196 if (!machine_is_enp2611()) 199 if (!machine_is_enp2611())
@@ -199,7 +202,8 @@ static int __init enp2611_init_module(void)
199 caleb_reset(); 202 caleb_reset();
200 pm3386_reset(); 203 pm3386_reset();
201 204
202 for (i = 0; i < 3; i++) { 205 ports = pm3386_port_count();
206 for (i = 0; i < ports; i++) {
203 nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv)); 207 nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv));
204 if (nds[i] == NULL) { 208 if (nds[i] == NULL) {
205 while (--i >= 0) 209 while (--i >= 0)
@@ -215,9 +219,10 @@ static int __init enp2611_init_module(void)
215 219
216 ixp2400_msf_init(&enp2611_msf_parameters); 220 ixp2400_msf_init(&enp2611_msf_parameters);
217 221
218 if (ixpdev_init(3, nds, enp2611_set_port_admin_status)) { 222 if (ixpdev_init(ports, nds, enp2611_set_port_admin_status)) {
219 for (i = 0; i < 3; i++) 223 for (i = 0; i < ports; i++)
220 free_netdev(nds[i]); 224 if (nds[i])
225 free_netdev(nds[i]);
221 return -EINVAL; 226 return -EINVAL;
222 } 227 }
223 228
diff --git a/drivers/net/ixp2000/pm3386.c b/drivers/net/ixp2000/pm3386.c
index 5c7ab7564053..5224651c9aac 100644
--- a/drivers/net/ixp2000/pm3386.c
+++ b/drivers/net/ixp2000/pm3386.c
@@ -86,40 +86,53 @@ static void pm3386_port_reg_write(int port, int _reg, int spacing, u16 value)
86 pm3386_reg_write(port >> 1, reg, value); 86 pm3386_reg_write(port >> 1, reg, value);
87} 87}
88 88
89int pm3386_secondary_present(void)
90{
91 return pm3386_reg_read(1, 0) == 0x3386;
92}
89 93
90void pm3386_reset(void) 94void pm3386_reset(void)
91{ 95{
92 u8 mac[3][6]; 96 u8 mac[3][6];
97 int secondary;
98
99 secondary = pm3386_secondary_present();
93 100
94 /* Save programmed MAC addresses. */ 101 /* Save programmed MAC addresses. */
95 pm3386_get_mac(0, mac[0]); 102 pm3386_get_mac(0, mac[0]);
96 pm3386_get_mac(1, mac[1]); 103 pm3386_get_mac(1, mac[1]);
97 pm3386_get_mac(2, mac[2]); 104 if (secondary)
105 pm3386_get_mac(2, mac[2]);
98 106
99 /* Assert analog and digital reset. */ 107 /* Assert analog and digital reset. */
100 pm3386_reg_write(0, 0x002, 0x0060); 108 pm3386_reg_write(0, 0x002, 0x0060);
101 pm3386_reg_write(1, 0x002, 0x0060); 109 if (secondary)
110 pm3386_reg_write(1, 0x002, 0x0060);
102 mdelay(1); 111 mdelay(1);
103 112
104 /* Deassert analog reset. */ 113 /* Deassert analog reset. */
105 pm3386_reg_write(0, 0x002, 0x0062); 114 pm3386_reg_write(0, 0x002, 0x0062);
106 pm3386_reg_write(1, 0x002, 0x0062); 115 if (secondary)
116 pm3386_reg_write(1, 0x002, 0x0062);
107 mdelay(10); 117 mdelay(10);
108 118
109 /* Deassert digital reset. */ 119 /* Deassert digital reset. */
110 pm3386_reg_write(0, 0x002, 0x0063); 120 pm3386_reg_write(0, 0x002, 0x0063);
111 pm3386_reg_write(1, 0x002, 0x0063); 121 if (secondary)
122 pm3386_reg_write(1, 0x002, 0x0063);
112 mdelay(10); 123 mdelay(10);
113 124
114 /* Restore programmed MAC addresses. */ 125 /* Restore programmed MAC addresses. */
115 pm3386_set_mac(0, mac[0]); 126 pm3386_set_mac(0, mac[0]);
116 pm3386_set_mac(1, mac[1]); 127 pm3386_set_mac(1, mac[1]);
117 pm3386_set_mac(2, mac[2]); 128 if (secondary)
129 pm3386_set_mac(2, mac[2]);
118 130
119 /* Disable carrier on all ports. */ 131 /* Disable carrier on all ports. */
120 pm3386_set_carrier(0, 0); 132 pm3386_set_carrier(0, 0);
121 pm3386_set_carrier(1, 0); 133 pm3386_set_carrier(1, 0);
122 pm3386_set_carrier(2, 0); 134 if (secondary)
135 pm3386_set_carrier(2, 0);
123} 136}
124 137
125static u16 swaph(u16 x) 138static u16 swaph(u16 x)
@@ -127,6 +140,11 @@ static u16 swaph(u16 x)
127 return ((x << 8) | (x >> 8)) & 0xffff; 140 return ((x << 8) | (x >> 8)) & 0xffff;
128} 141}
129 142
143int pm3386_port_count(void)
144{
145 return 2 + pm3386_secondary_present();
146}
147
130void pm3386_init_port(int port) 148void pm3386_init_port(int port)
131{ 149{
132 int pm = port >> 1; 150 int pm = port >> 1;
diff --git a/drivers/net/ixp2000/pm3386.h b/drivers/net/ixp2000/pm3386.h
index fe92bb056ac4..cc4183dca911 100644
--- a/drivers/net/ixp2000/pm3386.h
+++ b/drivers/net/ixp2000/pm3386.h
@@ -13,6 +13,7 @@
13#define __PM3386_H 13#define __PM3386_H
14 14
15void pm3386_reset(void); 15void pm3386_reset(void);
16int pm3386_port_count(void);
16void pm3386_init_port(int port); 17void pm3386_init_port(int port);
17void pm3386_get_mac(int port, u8 *mac); 18void pm3386_get_mac(int port, u8 *mac);
18void pm3386_set_mac(int port, u8 *mac); 19void pm3386_set_mac(int port, u8 *mac);
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index 93c494bcd18d..b32765215f75 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -139,8 +139,9 @@ bad_clone_list[] __initdata = {
139 139
140#if defined(CONFIG_PLAT_MAPPI) 140#if defined(CONFIG_PLAT_MAPPI)
141# define DCR_VAL 0x4b 141# define DCR_VAL 0x4b
142#elif defined(CONFIG_PLAT_OAKS32R) 142#elif defined(CONFIG_PLAT_OAKS32R) || \
143# define DCR_VAL 0x48 143 defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938)
144# define DCR_VAL 0x48 /* 8-bit mode */
144#else 145#else
145# define DCR_VAL 0x49 146# define DCR_VAL 0x49
146#endif 147#endif
@@ -396,10 +397,22 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
396 /* We must set the 8390 for word mode. */ 397 /* We must set the 8390 for word mode. */
397 outb_p(DCR_VAL, ioaddr + EN0_DCFG); 398 outb_p(DCR_VAL, ioaddr + EN0_DCFG);
398 start_page = NESM_START_PG; 399 start_page = NESM_START_PG;
399 stop_page = NESM_STOP_PG; 400
401 /*
402 * Realtek RTL8019AS datasheet says that the PSTOP register
403 * shouldn't exceed 0x60 in 8-bit mode.
404 * This chip can be identified by reading the signature from
405 * the remote byte count registers (otherwise write-only)...
406 */
407 if ((DCR_VAL & 0x01) == 0 && /* 8-bit mode */
408 inb(ioaddr + EN0_RCNTLO) == 0x50 &&
409 inb(ioaddr + EN0_RCNTHI) == 0x70)
410 stop_page = 0x60;
411 else
412 stop_page = NESM_STOP_PG;
400 } else { 413 } else {
401 start_page = NE1SM_START_PG; 414 start_page = NE1SM_START_PG;
402 stop_page = NE1SM_STOP_PG; 415 stop_page = NE1SM_STOP_PG;
403 } 416 }
404 417
405#if defined(CONFIG_PLAT_MAPPI) || defined(CONFIG_PLAT_OAKS32R) 418#if defined(CONFIG_PLAT_MAPPI) || defined(CONFIG_PLAT_OAKS32R)
@@ -509,15 +522,9 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
509 ei_status.name = name; 522 ei_status.name = name;
510 ei_status.tx_start_page = start_page; 523 ei_status.tx_start_page = start_page;
511 ei_status.stop_page = stop_page; 524 ei_status.stop_page = stop_page;
512#if defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938)
513 wordlength = 1;
514#endif
515 525
516#ifdef CONFIG_PLAT_OAKS32R 526 /* Use 16-bit mode only if this wasn't overridden by DCR_VAL */
517 ei_status.word16 = 0; 527 ei_status.word16 = (wordlength == 2 && (DCR_VAL & 0x01));
518#else
519 ei_status.word16 = (wordlength == 2);
520#endif
521 528
522 ei_status.rx_start_page = start_page + TX_PAGES; 529 ei_status.rx_start_page = start_page + TX_PAGES;
523#ifdef PACKETBUF_MEMSIZE 530#ifdef PACKETBUF_MEMSIZE
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 459443b572ce..1b236bdf6b92 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -60,8 +60,10 @@ int mdiobus_register(struct mii_bus *bus)
60 for (i = 0; i < PHY_MAX_ADDR; i++) { 60 for (i = 0; i < PHY_MAX_ADDR; i++) {
61 struct phy_device *phydev; 61 struct phy_device *phydev;
62 62
63 if (bus->phy_mask & (1 << i)) 63 if (bus->phy_mask & (1 << i)) {
64 bus->phy_map[i] = NULL;
64 continue; 65 continue;
66 }
65 67
66 phydev = get_phy_device(bus, i); 68 phydev = get_phy_device(bus, i);
67 69
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index b82191d2bee1..f5a3bf4d959a 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -127,6 +127,7 @@ static const struct mii_chip_info {
127} mii_chip_table[] = { 127} mii_chip_table[] = {
128 { "SiS 900 Internal MII PHY", 0x001d, 0x8000, LAN }, 128 { "SiS 900 Internal MII PHY", 0x001d, 0x8000, LAN },
129 { "SiS 7014 Physical Layer Solution", 0x0016, 0xf830, LAN }, 129 { "SiS 7014 Physical Layer Solution", 0x0016, 0xf830, LAN },
130 { "SiS 900 on Foxconn 661 7MI", 0x0143, 0xBC70, LAN },
130 { "Altimata AC101LF PHY", 0x0022, 0x5520, LAN }, 131 { "Altimata AC101LF PHY", 0x0022, 0x5520, LAN },
131 { "ADM 7001 LAN PHY", 0x002e, 0xcc60, LAN }, 132 { "ADM 7001 LAN PHY", 0x002e, 0xcc60, LAN },
132 { "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN }, 133 { "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN },
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 227df9876a2c..62be6d99d05c 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -51,7 +51,7 @@
51#include "sky2.h" 51#include "sky2.h"
52 52
53#define DRV_NAME "sky2" 53#define DRV_NAME "sky2"
54#define DRV_VERSION "1.2" 54#define DRV_VERSION "1.3"
55#define PFX DRV_NAME " " 55#define PFX DRV_NAME " "
56 56
57/* 57/*
@@ -79,6 +79,8 @@
79#define NAPI_WEIGHT 64 79#define NAPI_WEIGHT 64
80#define PHY_RETRIES 1000 80#define PHY_RETRIES 1000
81 81
82#define RING_NEXT(x,s) (((x)+1) & ((s)-1))
83
82static const u32 default_msg = 84static const u32 default_msg =
83 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK 85 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
84 | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR 86 | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
@@ -96,6 +98,10 @@ static int disable_msi = 0;
96module_param(disable_msi, int, 0); 98module_param(disable_msi, int, 0);
97MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); 99MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
98 100
101static int idle_timeout = 100;
102module_param(idle_timeout, int, 0);
103MODULE_PARM_DESC(idle_timeout, "Idle timeout workaround for lost interrupts (ms)");
104
99static const struct pci_device_id sky2_id_table[] = { 105static const struct pci_device_id sky2_id_table[] = {
100 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, 106 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
101 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, 107 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
@@ -122,6 +128,7 @@ MODULE_DEVICE_TABLE(pci, sky2_id_table);
122/* Avoid conditionals by using array */ 128/* Avoid conditionals by using array */
123static const unsigned txqaddr[] = { Q_XA1, Q_XA2 }; 129static const unsigned txqaddr[] = { Q_XA1, Q_XA2 };
124static const unsigned rxqaddr[] = { Q_R1, Q_R2 }; 130static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
131static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 };
125 132
126/* This driver supports yukon2 chipset only */ 133/* This driver supports yukon2 chipset only */
127static const char *yukon2_name[] = { 134static const char *yukon2_name[] = {
@@ -298,7 +305,8 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
298 struct sky2_port *sky2 = netdev_priv(hw->dev[port]); 305 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
299 u16 ctrl, ct1000, adv, pg, ledctrl, ledover; 306 u16 ctrl, ct1000, adv, pg, ledctrl, ledover;
300 307
301 if (sky2->autoneg == AUTONEG_ENABLE && hw->chip_id != CHIP_ID_YUKON_XL) { 308 if (sky2->autoneg == AUTONEG_ENABLE &&
309 (hw->chip_id != CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
302 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); 310 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
303 311
304 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | 312 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
@@ -326,7 +334,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
326 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO); 334 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
327 335
328 if (sky2->autoneg == AUTONEG_ENABLE && 336 if (sky2->autoneg == AUTONEG_ENABLE &&
329 hw->chip_id == CHIP_ID_YUKON_XL) { 337 (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
330 ctrl &= ~PHY_M_PC_DSC_MSK; 338 ctrl &= ~PHY_M_PC_DSC_MSK;
331 ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA; 339 ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
332 } 340 }
@@ -442,10 +450,11 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
442 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); 450 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
443 451
444 /* set LED Function Control register */ 452 /* set LED Function Control register */
445 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ 453 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
446 PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */ 454 (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
447 PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */ 455 PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */
448 PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */ 456 PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
457 PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */
449 458
450 /* set Polarity Control register */ 459 /* set Polarity Control register */
451 gm_phy_write(hw, port, PHY_MARV_PHY_STAT, 460 gm_phy_write(hw, port, PHY_MARV_PHY_STAT,
@@ -459,6 +468,25 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
459 /* restore page register */ 468 /* restore page register */
460 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); 469 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
461 break; 470 break;
471 case CHIP_ID_YUKON_EC_U:
472 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
473
474 /* select page 3 to access LED control register */
475 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
476
477 /* set LED Function Control register */
478 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
479 (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
480 PHY_M_LEDC_INIT_CTRL(8) | /* 10 Mbps */
481 PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
482 PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */
483
484 /* set Blink Rate in LED Timer Control Register */
485 gm_phy_write(hw, port, PHY_MARV_INT_MASK,
486 ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS));
487 /* restore page register */
488 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
489 break;
462 490
463 default: 491 default:
464 /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */ 492 /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
@@ -467,19 +495,21 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
467 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF); 495 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
468 } 496 }
469 497
470 if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) { 498 if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == CHIP_REV_YU_EC_A1) {
471 /* apply fixes in PHY AFE */ 499 /* apply fixes in PHY AFE */
472 gm_phy_write(hw, port, 22, 255); 500 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
501 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255);
502
473 /* increase differential signal amplitude in 10BASE-T */ 503 /* increase differential signal amplitude in 10BASE-T */
474 gm_phy_write(hw, port, 24, 0xaa99); 504 gm_phy_write(hw, port, 0x18, 0xaa99);
475 gm_phy_write(hw, port, 23, 0x2011); 505 gm_phy_write(hw, port, 0x17, 0x2011);
476 506
477 /* fix for IEEE A/B Symmetry failure in 1000BASE-T */ 507 /* fix for IEEE A/B Symmetry failure in 1000BASE-T */
478 gm_phy_write(hw, port, 24, 0xa204); 508 gm_phy_write(hw, port, 0x18, 0xa204);
479 gm_phy_write(hw, port, 23, 0x2002); 509 gm_phy_write(hw, port, 0x17, 0x2002);
480 510
481 /* set page register to 0 */ 511 /* set page register to 0 */
482 gm_phy_write(hw, port, 22, 0); 512 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
483 } else { 513 } else {
484 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); 514 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
485 515
@@ -553,6 +583,11 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
553 583
554 if (sky2->duplex == DUPLEX_FULL) 584 if (sky2->duplex == DUPLEX_FULL)
555 reg |= GM_GPCR_DUP_FULL; 585 reg |= GM_GPCR_DUP_FULL;
586
587 /* turn off pause in 10/100mbps half duplex */
588 else if (sky2->speed != SPEED_1000 &&
589 hw->chip_id != CHIP_ID_YUKON_EC_U)
590 sky2->tx_pause = sky2->rx_pause = 0;
556 } else 591 } else
557 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; 592 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
558 593
@@ -719,7 +754,7 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
719{ 754{
720 struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod; 755 struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod;
721 756
722 sky2->tx_prod = (sky2->tx_prod + 1) % TX_RING_SIZE; 757 sky2->tx_prod = RING_NEXT(sky2->tx_prod, TX_RING_SIZE);
723 return le; 758 return le;
724} 759}
725 760
@@ -735,7 +770,7 @@ static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
735static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2) 770static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
736{ 771{
737 struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put; 772 struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
738 sky2->rx_put = (sky2->rx_put + 1) % RX_LE_SIZE; 773 sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE);
739 return le; 774 return le;
740} 775}
741 776
@@ -985,8 +1020,19 @@ static int sky2_up(struct net_device *dev)
985 struct sky2_hw *hw = sky2->hw; 1020 struct sky2_hw *hw = sky2->hw;
986 unsigned port = sky2->port; 1021 unsigned port = sky2->port;
987 u32 ramsize, rxspace, imask; 1022 u32 ramsize, rxspace, imask;
988 int err = -ENOMEM; 1023 int err;
1024 struct net_device *otherdev = hw->dev[sky2->port^1];
989 1025
1026 /* Block bringing up both ports at the same time on a dual port card.
1027 * There is an unfixed bug where receiver gets confused and picks up
1028 * packets out of order. Until this is fixed, prevent data corruption.
1029 */
1030 if (otherdev && netif_running(otherdev)) {
1031 printk(KERN_INFO PFX "dual port support is disabled.\n");
1032 return -EBUSY;
1033 }
1034
1035 err = -ENOMEM;
990 if (netif_msg_ifup(sky2)) 1036 if (netif_msg_ifup(sky2))
991 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); 1037 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
992 1038
@@ -1050,7 +1096,7 @@ static int sky2_up(struct net_device *dev)
1050 1096
1051 /* Enable interrupts from phy/mac for port */ 1097 /* Enable interrupts from phy/mac for port */
1052 imask = sky2_read32(hw, B0_IMSK); 1098 imask = sky2_read32(hw, B0_IMSK);
1053 imask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; 1099 imask |= portirq_msk[port];
1054 sky2_write32(hw, B0_IMSK, imask); 1100 sky2_write32(hw, B0_IMSK, imask);
1055 1101
1056 return 0; 1102 return 0;
@@ -1078,7 +1124,7 @@ err_out:
1078/* Modular subtraction in ring */ 1124/* Modular subtraction in ring */
1079static inline int tx_dist(unsigned tail, unsigned head) 1125static inline int tx_dist(unsigned tail, unsigned head)
1080{ 1126{
1081 return (head - tail) % TX_RING_SIZE; 1127 return (head - tail) & (TX_RING_SIZE - 1);
1082} 1128}
1083 1129
1084/* Number of list elements available for next tx */ 1130/* Number of list elements available for next tx */
@@ -1255,7 +1301,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1255 le->opcode = OP_BUFFER | HW_OWNER; 1301 le->opcode = OP_BUFFER | HW_OWNER;
1256 1302
1257 fre = sky2->tx_ring 1303 fre = sky2->tx_ring
1258 + ((re - sky2->tx_ring) + i + 1) % TX_RING_SIZE; 1304 + RING_NEXT((re - sky2->tx_ring) + i, TX_RING_SIZE);
1259 pci_unmap_addr_set(fre, mapaddr, mapping); 1305 pci_unmap_addr_set(fre, mapaddr, mapping);
1260 } 1306 }
1261 1307
@@ -1315,7 +1361,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1315 1361
1316 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1362 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1317 struct tx_ring_info *fre; 1363 struct tx_ring_info *fre;
1318 fre = sky2->tx_ring + (put + i + 1) % TX_RING_SIZE; 1364 fre = sky2->tx_ring + RING_NEXT(put + i, TX_RING_SIZE);
1319 pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr), 1365 pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr),
1320 skb_shinfo(skb)->frags[i].size, 1366 skb_shinfo(skb)->frags[i].size,
1321 PCI_DMA_TODEVICE); 1367 PCI_DMA_TODEVICE);
@@ -1401,7 +1447,7 @@ static int sky2_down(struct net_device *dev)
1401 1447
1402 /* Disable port IRQ */ 1448 /* Disable port IRQ */
1403 imask = sky2_read32(hw, B0_IMSK); 1449 imask = sky2_read32(hw, B0_IMSK);
1404 imask &= ~(sky2->port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; 1450 imask &= ~portirq_msk[port];
1405 sky2_write32(hw, B0_IMSK, imask); 1451 sky2_write32(hw, B0_IMSK, imask);
1406 1452
1407 /* turn off LED's */ 1453 /* turn off LED's */
@@ -1498,17 +1544,26 @@ static void sky2_link_up(struct sky2_port *sky2)
1498 sky2_write8(hw, SK_REG(port, LNK_LED_REG), 1544 sky2_write8(hw, SK_REG(port, LNK_LED_REG),
1499 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF); 1545 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
1500 1546
1501 if (hw->chip_id == CHIP_ID_YUKON_XL) { 1547 if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U) {
1502 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); 1548 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
1549 u16 led = PHY_M_LEDC_LOS_CTRL(1); /* link active */
1550
1551 switch(sky2->speed) {
1552 case SPEED_10:
1553 led |= PHY_M_LEDC_INIT_CTRL(7);
1554 break;
1555
1556 case SPEED_100:
1557 led |= PHY_M_LEDC_STA1_CTRL(7);
1558 break;
1559
1560 case SPEED_1000:
1561 led |= PHY_M_LEDC_STA0_CTRL(7);
1562 break;
1563 }
1503 1564
1504 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); 1565 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
1505 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ 1566 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, led);
1506 PHY_M_LEDC_INIT_CTRL(sky2->speed ==
1507 SPEED_10 ? 7 : 0) |
1508 PHY_M_LEDC_STA1_CTRL(sky2->speed ==
1509 SPEED_100 ? 7 : 0) |
1510 PHY_M_LEDC_STA0_CTRL(sky2->speed ==
1511 SPEED_1000 ? 7 : 0));
1512 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); 1567 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
1513 } 1568 }
1514 1569
@@ -1583,7 +1638,7 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
1583 sky2->speed = sky2_phy_speed(hw, aux); 1638 sky2->speed = sky2_phy_speed(hw, aux);
1584 1639
1585 /* Pause bits are offset (9..8) */ 1640 /* Pause bits are offset (9..8) */
1586 if (hw->chip_id == CHIP_ID_YUKON_XL) 1641 if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)
1587 aux >>= 6; 1642 aux >>= 6;
1588 1643
1589 sky2->rx_pause = (aux & PHY_M_PS_RX_P_EN) != 0; 1644 sky2->rx_pause = (aux & PHY_M_PS_RX_P_EN) != 0;
@@ -1859,35 +1914,28 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
1859static int sky2_status_intr(struct sky2_hw *hw, int to_do) 1914static int sky2_status_intr(struct sky2_hw *hw, int to_do)
1860{ 1915{
1861 int work_done = 0; 1916 int work_done = 0;
1917 u16 hwidx = sky2_read16(hw, STAT_PUT_IDX);
1862 1918
1863 rmb(); 1919 rmb();
1864 1920
1865 for(;;) { 1921 while (hw->st_idx != hwidx) {
1866 struct sky2_status_le *le = hw->st_le + hw->st_idx; 1922 struct sky2_status_le *le = hw->st_le + hw->st_idx;
1867 struct net_device *dev; 1923 struct net_device *dev;
1868 struct sky2_port *sky2; 1924 struct sky2_port *sky2;
1869 struct sk_buff *skb; 1925 struct sk_buff *skb;
1870 u32 status; 1926 u32 status;
1871 u16 length; 1927 u16 length;
1872 u8 link, opcode;
1873
1874 opcode = le->opcode;
1875 if (!opcode)
1876 break;
1877 opcode &= ~HW_OWNER;
1878 1928
1879 hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE; 1929 hw->st_idx = RING_NEXT(hw->st_idx, STATUS_RING_SIZE);
1880 le->opcode = 0;
1881 1930
1882 link = le->link; 1931 BUG_ON(le->link >= 2);
1883 BUG_ON(link >= 2); 1932 dev = hw->dev[le->link];
1884 dev = hw->dev[link];
1885 1933
1886 sky2 = netdev_priv(dev); 1934 sky2 = netdev_priv(dev);
1887 length = le->length; 1935 length = le->length;
1888 status = le->status; 1936 status = le->status;
1889 1937
1890 switch (opcode) { 1938 switch (le->opcode & ~HW_OWNER) {
1891 case OP_RXSTAT: 1939 case OP_RXSTAT:
1892 skb = sky2_receive(sky2, length, status); 1940 skb = sky2_receive(sky2, length, status);
1893 if (!skb) 1941 if (!skb)
@@ -1927,7 +1975,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
1927 1975
1928 case OP_TXINDEXLE: 1976 case OP_TXINDEXLE:
1929 /* TX index reports status for both ports */ 1977 /* TX index reports status for both ports */
1930 sky2_tx_done(hw->dev[0], status & 0xffff); 1978 BUILD_BUG_ON(TX_RING_SIZE > 0x1000);
1979 sky2_tx_done(hw->dev[0], status & 0xfff);
1931 if (hw->dev[1]) 1980 if (hw->dev[1])
1932 sky2_tx_done(hw->dev[1], 1981 sky2_tx_done(hw->dev[1],
1933 ((status >> 24) & 0xff) 1982 ((status >> 24) & 0xff)
@@ -1937,8 +1986,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
1937 default: 1986 default:
1938 if (net_ratelimit()) 1987 if (net_ratelimit())
1939 printk(KERN_WARNING PFX 1988 printk(KERN_WARNING PFX
1940 "unknown status opcode 0x%x\n", opcode); 1989 "unknown status opcode 0x%x\n", le->opcode);
1941 break; 1990 goto exit_loop;
1942 } 1991 }
1943 } 1992 }
1944 1993
@@ -2089,12 +2138,13 @@ static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port,
2089 */ 2138 */
2090static void sky2_idle(unsigned long arg) 2139static void sky2_idle(unsigned long arg)
2091{ 2140{
2092 struct net_device *dev = (struct net_device *) arg; 2141 struct sky2_hw *hw = (struct sky2_hw *) arg;
2142 struct net_device *dev = hw->dev[0];
2093 2143
2094 local_irq_disable();
2095 if (__netif_rx_schedule_prep(dev)) 2144 if (__netif_rx_schedule_prep(dev))
2096 __netif_rx_schedule(dev); 2145 __netif_rx_schedule(dev);
2097 local_irq_enable(); 2146
2147 mod_timer(&hw->idle_timer, jiffies + msecs_to_jiffies(idle_timeout));
2098} 2148}
2099 2149
2100 2150
@@ -2105,65 +2155,46 @@ static int sky2_poll(struct net_device *dev0, int *budget)
2105 int work_done = 0; 2155 int work_done = 0;
2106 u32 status = sky2_read32(hw, B0_Y2_SP_EISR); 2156 u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
2107 2157
2108 restart_poll: 2158 if (status & Y2_IS_HW_ERR)
2109 if (unlikely(status & ~Y2_IS_STAT_BMU)) { 2159 sky2_hw_intr(hw);
2110 if (status & Y2_IS_HW_ERR)
2111 sky2_hw_intr(hw);
2112
2113 if (status & Y2_IS_IRQ_PHY1)
2114 sky2_phy_intr(hw, 0);
2115 2160
2116 if (status & Y2_IS_IRQ_PHY2) 2161 if (status & Y2_IS_IRQ_PHY1)
2117 sky2_phy_intr(hw, 1); 2162 sky2_phy_intr(hw, 0);
2118 2163
2119 if (status & Y2_IS_IRQ_MAC1) 2164 if (status & Y2_IS_IRQ_PHY2)
2120 sky2_mac_intr(hw, 0); 2165 sky2_phy_intr(hw, 1);
2121 2166
2122 if (status & Y2_IS_IRQ_MAC2) 2167 if (status & Y2_IS_IRQ_MAC1)
2123 sky2_mac_intr(hw, 1); 2168 sky2_mac_intr(hw, 0);
2124 2169
2125 if (status & Y2_IS_CHK_RX1) 2170 if (status & Y2_IS_IRQ_MAC2)
2126 sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1); 2171 sky2_mac_intr(hw, 1);
2127 2172
2128 if (status & Y2_IS_CHK_RX2) 2173 if (status & Y2_IS_CHK_RX1)
2129 sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2); 2174 sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1);
2130 2175
2131 if (status & Y2_IS_CHK_TXA1) 2176 if (status & Y2_IS_CHK_RX2)
2132 sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1); 2177 sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2);
2133 2178
2134 if (status & Y2_IS_CHK_TXA2) 2179 if (status & Y2_IS_CHK_TXA1)
2135 sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2); 2180 sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1);
2136 }
2137
2138 if (status & Y2_IS_STAT_BMU) {
2139 work_done += sky2_status_intr(hw, work_limit - work_done);
2140 *budget -= work_done;
2141 dev0->quota -= work_done;
2142 2181
2143 if (work_done >= work_limit) 2182 if (status & Y2_IS_CHK_TXA2)
2144 return 1; 2183 sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2);
2145 2184
2185 if (status & Y2_IS_STAT_BMU)
2146 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); 2186 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
2147 }
2148 2187
2149 mod_timer(&hw->idle_timer, jiffies + HZ); 2188 work_done = sky2_status_intr(hw, work_limit);
2189 *budget -= work_done;
2190 dev0->quota -= work_done;
2150 2191
2151 local_irq_disable(); 2192 if (work_done >= work_limit)
2152 __netif_rx_complete(dev0); 2193 return 1;
2153 2194
2154 status = sky2_read32(hw, B0_Y2_SP_LISR); 2195 netif_rx_complete(dev0);
2155
2156 if (unlikely(status)) {
2157 /* More work pending, try and keep going */
2158 if (__netif_rx_schedule_prep(dev0)) {
2159 __netif_rx_reschedule(dev0, work_done);
2160 status = sky2_read32(hw, B0_Y2_SP_EISR);
2161 local_irq_enable();
2162 goto restart_poll;
2163 }
2164 }
2165 2196
2166 local_irq_enable(); 2197 status = sky2_read32(hw, B0_Y2_SP_LISR);
2167 return 0; 2198 return 0;
2168} 2199}
2169 2200
@@ -2244,13 +2275,6 @@ static int __devinit sky2_reset(struct sky2_hw *hw)
2244 return -EOPNOTSUPP; 2275 return -EOPNOTSUPP;
2245 } 2276 }
2246 2277
2247 /* This chip is new and not tested yet */
2248 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
2249 pr_info(PFX "%s: is a version of Yukon 2 chipset that has not been tested yet.\n",
2250 pci_name(hw->pdev));
2251 pr_info("Please report success/failure to maintainer <shemminger@osdl.org>\n");
2252 }
2253
2254 /* disable ASF */ 2278 /* disable ASF */
2255 if (hw->chip_id <= CHIP_ID_YUKON_EC) { 2279 if (hw->chip_id <= CHIP_ID_YUKON_EC) {
2256 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 2280 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
@@ -3302,7 +3326,10 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3302 3326
3303 sky2_write32(hw, B0_IMSK, Y2_IS_BASE); 3327 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
3304 3328
3305 setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) dev); 3329 setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw);
3330 if (idle_timeout > 0)
3331 mod_timer(&hw->idle_timer,
3332 jiffies + msecs_to_jiffies(idle_timeout));
3306 3333
3307 pci_set_drvdata(pdev, hw); 3334 pci_set_drvdata(pdev, hw);
3308 3335
@@ -3342,6 +3369,8 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
3342 del_timer_sync(&hw->idle_timer); 3369 del_timer_sync(&hw->idle_timer);
3343 3370
3344 sky2_write32(hw, B0_IMSK, 0); 3371 sky2_write32(hw, B0_IMSK, 0);
3372 synchronize_irq(hw->pdev->irq);
3373
3345 dev0 = hw->dev[0]; 3374 dev0 = hw->dev[0];
3346 dev1 = hw->dev[1]; 3375 dev1 = hw->dev[1];
3347 if (dev1) 3376 if (dev1)
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index b026f5653f04..8012994c9b93 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -378,6 +378,9 @@ enum {
378 CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */ 378 CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */
379 CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */ 379 CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */
380 CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */ 380 CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */
381
382 CHIP_REV_YU_EC_U_A0 = 0,
383 CHIP_REV_YU_EC_U_A1 = 1,
381}; 384};
382 385
383/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */ 386/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 43f5e86fc559..394339d5e87c 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1652,6 +1652,8 @@ spider_net_enable_card(struct spider_net_card *card)
1652 { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE }, 1652 { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE },
1653 1653
1654 { SPIDER_NET_GMRWOLCTRL, 0 }, 1654 { SPIDER_NET_GMRWOLCTRL, 0 },
1655 { SPIDER_NET_GTESTMD, 0x10000000 },
1656 { SPIDER_NET_GTTQMSK, 0x00400040 },
1655 { SPIDER_NET_GTESTMD, 0 }, 1657 { SPIDER_NET_GTESTMD, 0 },
1656 1658
1657 { SPIDER_NET_GMACINTEN, 0 }, 1659 { SPIDER_NET_GMACINTEN, 0 },
@@ -1792,15 +1794,7 @@ spider_net_setup_phy(struct spider_net_card *card)
1792 if (phy->def->ops->setup_forced) 1794 if (phy->def->ops->setup_forced)
1793 phy->def->ops->setup_forced(phy, SPEED_1000, DUPLEX_FULL); 1795 phy->def->ops->setup_forced(phy, SPEED_1000, DUPLEX_FULL);
1794 1796
1795 /* the following two writes could be moved to sungem_phy.c */ 1797 phy->def->ops->enable_fiber(phy);
1796 /* enable fiber mode */
1797 spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0x9020);
1798 /* LEDs active in both modes, autosense prio = fiber */
1799 spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0x945f);
1800
1801 /* switch off fibre autoneg */
1802 spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0xfc01);
1803 spider_net_write_phy(card->netdev, 1, 0x0b, 0x0004);
1804 1798
1805 phy->def->ops->read_link(phy); 1799 phy->def->ops->read_link(phy);
1806 pr_info("Found %s with %i Mbps, %s-duplex.\n", phy->def->name, 1800 pr_info("Found %s with %i Mbps, %s-duplex.\n", phy->def->name,
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index 5922b529a048..3b8d951cf73c 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -120,6 +120,8 @@ extern char spider_net_driver_name[];
120#define SPIDER_NET_GMRUAFILnR 0x00000500 120#define SPIDER_NET_GMRUAFILnR 0x00000500
121#define SPIDER_NET_GMRUA0FIL15R 0x00000578 121#define SPIDER_NET_GMRUA0FIL15R 0x00000578
122 122
123#define SPIDER_NET_GTTQMSK 0x00000934
124
123/* RX DMA controller registers, all 0x00000a.. are for DMA controller A, 125/* RX DMA controller registers, all 0x00000a.. are for DMA controller A,
124 * 0x00000b.. for DMA controller B, etc. */ 126 * 0x00000b.. for DMA controller B, etc. */
125#define SPIDER_NET_GDADCHA 0x00000a00 127#define SPIDER_NET_GDADCHA 0x00000a00
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index 046371ee5bbe..b2ddd5e79303 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -329,6 +329,30 @@ static int bcm5421_init(struct mii_phy* phy)
329 return 0; 329 return 0;
330} 330}
331 331
332static int bcm5421_enable_fiber(struct mii_phy* phy)
333{
334 /* enable fiber mode */
335 phy_write(phy, MII_NCONFIG, 0x9020);
336 /* LEDs active in both modes, autosense prio = fiber */
337 phy_write(phy, MII_NCONFIG, 0x945f);
338
339 /* switch off fibre autoneg */
340 phy_write(phy, MII_NCONFIG, 0xfc01);
341 phy_write(phy, 0x0b, 0x0004);
342
343 return 0;
344}
345
346static int bcm5461_enable_fiber(struct mii_phy* phy)
347{
348 phy_write(phy, MII_NCONFIG, 0xfc0c);
349 phy_write(phy, MII_BMCR, 0x4140);
350 phy_write(phy, MII_NCONFIG, 0xfc0b);
351 phy_write(phy, MII_BMCR, 0x0140);
352
353 return 0;
354}
355
332static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise) 356static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise)
333{ 357{
334 u16 ctl, adv; 358 u16 ctl, adv;
@@ -762,6 +786,7 @@ static struct mii_phy_ops bcm5421_phy_ops = {
762 .setup_forced = bcm54xx_setup_forced, 786 .setup_forced = bcm54xx_setup_forced,
763 .poll_link = genmii_poll_link, 787 .poll_link = genmii_poll_link,
764 .read_link = bcm54xx_read_link, 788 .read_link = bcm54xx_read_link,
789 .enable_fiber = bcm5421_enable_fiber,
765}; 790};
766 791
767static struct mii_phy_def bcm5421_phy_def = { 792static struct mii_phy_def bcm5421_phy_def = {
@@ -792,6 +817,25 @@ static struct mii_phy_def bcm5421k2_phy_def = {
792 .ops = &bcm5421k2_phy_ops 817 .ops = &bcm5421k2_phy_ops
793}; 818};
794 819
820static struct mii_phy_ops bcm5461_phy_ops = {
821 .init = bcm5421_init,
822 .suspend = generic_suspend,
823 .setup_aneg = bcm54xx_setup_aneg,
824 .setup_forced = bcm54xx_setup_forced,
825 .poll_link = genmii_poll_link,
826 .read_link = bcm54xx_read_link,
827 .enable_fiber = bcm5461_enable_fiber,
828};
829
830static struct mii_phy_def bcm5461_phy_def = {
831 .phy_id = 0x002060c0,
832 .phy_id_mask = 0xfffffff0,
833 .name = "BCM5461",
834 .features = MII_GBIT_FEATURES,
835 .magic_aneg = 1,
836 .ops = &bcm5461_phy_ops
837};
838
795/* Broadcom BCM 5462 built-in Vesta */ 839/* Broadcom BCM 5462 built-in Vesta */
796static struct mii_phy_ops bcm5462V_phy_ops = { 840static struct mii_phy_ops bcm5462V_phy_ops = {
797 .init = bcm5421_init, 841 .init = bcm5421_init,
@@ -857,6 +901,7 @@ static struct mii_phy_def* mii_phy_table[] = {
857 &bcm5411_phy_def, 901 &bcm5411_phy_def,
858 &bcm5421_phy_def, 902 &bcm5421_phy_def,
859 &bcm5421k2_phy_def, 903 &bcm5421k2_phy_def,
904 &bcm5461_phy_def,
860 &bcm5462V_phy_def, 905 &bcm5462V_phy_def,
861 &marvell_phy_def, 906 &marvell_phy_def,
862 &genmii_phy_def, 907 &genmii_phy_def,
diff --git a/drivers/net/sungem_phy.h b/drivers/net/sungem_phy.h
index 430544496c52..69e125197fcf 100644
--- a/drivers/net/sungem_phy.h
+++ b/drivers/net/sungem_phy.h
@@ -12,6 +12,7 @@ struct mii_phy_ops
12 int (*setup_forced)(struct mii_phy *phy, int speed, int fd); 12 int (*setup_forced)(struct mii_phy *phy, int speed, int fd);
13 int (*poll_link)(struct mii_phy *phy); 13 int (*poll_link)(struct mii_phy *phy);
14 int (*read_link)(struct mii_phy *phy); 14 int (*read_link)(struct mii_phy *phy);
15 int (*enable_fiber)(struct mii_phy *phy);
15}; 16};
16 17
17/* Structure used to statically define an mii/gii based PHY */ 18/* Structure used to statically define an mii/gii based PHY */
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index beeb612be98f..e1b33a25a25f 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -7653,21 +7653,23 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7653 cmd->supported |= (SUPPORTED_1000baseT_Half | 7653 cmd->supported |= (SUPPORTED_1000baseT_Half |
7654 SUPPORTED_1000baseT_Full); 7654 SUPPORTED_1000baseT_Full);
7655 7655
7656 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) 7656 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7657 cmd->supported |= (SUPPORTED_100baseT_Half | 7657 cmd->supported |= (SUPPORTED_100baseT_Half |
7658 SUPPORTED_100baseT_Full | 7658 SUPPORTED_100baseT_Full |
7659 SUPPORTED_10baseT_Half | 7659 SUPPORTED_10baseT_Half |
7660 SUPPORTED_10baseT_Full | 7660 SUPPORTED_10baseT_Full |
7661 SUPPORTED_MII); 7661 SUPPORTED_MII);
7662 else 7662 cmd->port = PORT_TP;
7663 } else {
7663 cmd->supported |= SUPPORTED_FIBRE; 7664 cmd->supported |= SUPPORTED_FIBRE;
7665 cmd->port = PORT_FIBRE;
7666 }
7664 7667
7665 cmd->advertising = tp->link_config.advertising; 7668 cmd->advertising = tp->link_config.advertising;
7666 if (netif_running(dev)) { 7669 if (netif_running(dev)) {
7667 cmd->speed = tp->link_config.active_speed; 7670 cmd->speed = tp->link_config.active_speed;
7668 cmd->duplex = tp->link_config.active_duplex; 7671 cmd->duplex = tp->link_config.active_duplex;
7669 } 7672 }
7670 cmd->port = 0;
7671 cmd->phy_address = PHY_ADDR; 7673 cmd->phy_address = PHY_ADDR;
7672 cmd->transceiver = 0; 7674 cmd->transceiver = 0;
7673 cmd->autoneg = tp->link_config.autoneg; 7675 cmd->autoneg = tp->link_config.autoneg;
@@ -8454,6 +8456,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8454 8456
8455 tx_len = 1514; 8457 tx_len = 1514;
8456 skb = dev_alloc_skb(tx_len); 8458 skb = dev_alloc_skb(tx_len);
8459 if (!skb)
8460 return -ENOMEM;
8461
8457 tx_data = skb_put(skb, tx_len); 8462 tx_data = skb_put(skb, tx_len);
8458 memcpy(tx_data, tp->dev->dev_addr, 6); 8463 memcpy(tx_data, tp->dev->dev_addr, 6);
8459 memset(tx_data + 6, 0x0, 8); 8464 memset(tx_data + 6, 0x0, 8);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index 9a06e61df0a2..e2982a83ae42 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -939,9 +939,9 @@ static int bcm43xx_sprom_extract(struct bcm43xx_private *bcm)
939 return 0; 939 return 0;
940} 940}
941 941
942static void bcm43xx_geo_init(struct bcm43xx_private *bcm) 942static int bcm43xx_geo_init(struct bcm43xx_private *bcm)
943{ 943{
944 struct ieee80211_geo geo; 944 struct ieee80211_geo *geo;
945 struct ieee80211_channel *chan; 945 struct ieee80211_channel *chan;
946 int have_a = 0, have_bg = 0; 946 int have_a = 0, have_bg = 0;
947 int i; 947 int i;
@@ -949,7 +949,10 @@ static void bcm43xx_geo_init(struct bcm43xx_private *bcm)
949 struct bcm43xx_phyinfo *phy; 949 struct bcm43xx_phyinfo *phy;
950 const char *iso_country; 950 const char *iso_country;
951 951
952 memset(&geo, 0, sizeof(geo)); 952 geo = kzalloc(sizeof(*geo), GFP_KERNEL);
953 if (!geo)
954 return -ENOMEM;
955
953 for (i = 0; i < bcm->nr_80211_available; i++) { 956 for (i = 0; i < bcm->nr_80211_available; i++) {
954 phy = &(bcm->core_80211_ext[i].phy); 957 phy = &(bcm->core_80211_ext[i].phy);
955 switch (phy->type) { 958 switch (phy->type) {
@@ -967,31 +970,36 @@ static void bcm43xx_geo_init(struct bcm43xx_private *bcm)
967 iso_country = bcm43xx_locale_iso(bcm->sprom.locale); 970 iso_country = bcm43xx_locale_iso(bcm->sprom.locale);
968 971
969 if (have_a) { 972 if (have_a) {
970 for (i = 0, channel = 0; channel < 201; channel++) { 973 for (i = 0, channel = IEEE80211_52GHZ_MIN_CHANNEL;
971 chan = &geo.a[i++]; 974 channel <= IEEE80211_52GHZ_MAX_CHANNEL; channel++) {
975 chan = &geo->a[i++];
972 chan->freq = bcm43xx_channel_to_freq_a(channel); 976 chan->freq = bcm43xx_channel_to_freq_a(channel);
973 chan->channel = channel; 977 chan->channel = channel;
974 } 978 }
975 geo.a_channels = i; 979 geo->a_channels = i;
976 } 980 }
977 if (have_bg) { 981 if (have_bg) {
978 for (i = 0, channel = 1; channel < 15; channel++) { 982 for (i = 0, channel = IEEE80211_24GHZ_MIN_CHANNEL;
979 chan = &geo.bg[i++]; 983 channel <= IEEE80211_24GHZ_MAX_CHANNEL; channel++) {
984 chan = &geo->bg[i++];
980 chan->freq = bcm43xx_channel_to_freq_bg(channel); 985 chan->freq = bcm43xx_channel_to_freq_bg(channel);
981 chan->channel = channel; 986 chan->channel = channel;
982 } 987 }
983 geo.bg_channels = i; 988 geo->bg_channels = i;
984 } 989 }
985 memcpy(geo.name, iso_country, 2); 990 memcpy(geo->name, iso_country, 2);
986 if (0 /*TODO: Outdoor use only */) 991 if (0 /*TODO: Outdoor use only */)
987 geo.name[2] = 'O'; 992 geo->name[2] = 'O';
988 else if (0 /*TODO: Indoor use only */) 993 else if (0 /*TODO: Indoor use only */)
989 geo.name[2] = 'I'; 994 geo->name[2] = 'I';
990 else 995 else
991 geo.name[2] = ' '; 996 geo->name[2] = ' ';
992 geo.name[3] = '\0'; 997 geo->name[3] = '\0';
998
999 ieee80211_set_geo(bcm->ieee, geo);
1000 kfree(geo);
993 1001
994 ieee80211_set_geo(bcm->ieee, &geo); 1002 return 0;
995} 1003}
996 1004
997/* DummyTransmission function, as documented on 1005/* DummyTransmission function, as documented on
@@ -3479,16 +3487,17 @@ static int bcm43xx_attach_board(struct bcm43xx_private *bcm)
3479 goto err_80211_unwind; 3487 goto err_80211_unwind;
3480 bcm43xx_wireless_core_disable(bcm); 3488 bcm43xx_wireless_core_disable(bcm);
3481 } 3489 }
3490 err = bcm43xx_geo_init(bcm);
3491 if (err)
3492 goto err_80211_unwind;
3482 bcm43xx_pctl_set_crystal(bcm, 0); 3493 bcm43xx_pctl_set_crystal(bcm, 0);
3483 3494
3484 /* Set the MAC address in the networking subsystem */ 3495 /* Set the MAC address in the networking subsystem */
3485 if (bcm43xx_current_phy(bcm)->type == BCM43xx_PHYTYPE_A) 3496 if (is_valid_ether_addr(bcm->sprom.et1macaddr))
3486 memcpy(bcm->net_dev->dev_addr, bcm->sprom.et1macaddr, 6); 3497 memcpy(bcm->net_dev->dev_addr, bcm->sprom.et1macaddr, 6);
3487 else 3498 else
3488 memcpy(bcm->net_dev->dev_addr, bcm->sprom.il0macaddr, 6); 3499 memcpy(bcm->net_dev->dev_addr, bcm->sprom.il0macaddr, 6);
3489 3500
3490 bcm43xx_geo_init(bcm);
3491
3492 snprintf(bcm->nick, IW_ESSID_MAX_SIZE, 3501 snprintf(bcm->nick, IW_ESSID_MAX_SIZE,
3493 "Broadcom %04X", bcm->chip_id); 3502 "Broadcom %04X", bcm->chip_id);
3494 3503
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.h b/drivers/net/wireless/bcm43xx/bcm43xx_main.h
index eca79a38594a..30a202b258b5 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.h
@@ -118,12 +118,14 @@ int bcm43xx_channel_to_freq(struct bcm43xx_private *bcm,
118static inline 118static inline
119int bcm43xx_is_valid_channel_a(u8 channel) 119int bcm43xx_is_valid_channel_a(u8 channel)
120{ 120{
121 return (channel <= 200); 121 return (channel >= IEEE80211_52GHZ_MIN_CHANNEL
122 && channel <= IEEE80211_52GHZ_MAX_CHANNEL);
122} 123}
123static inline 124static inline
124int bcm43xx_is_valid_channel_bg(u8 channel) 125int bcm43xx_is_valid_channel_bg(u8 channel)
125{ 126{
126 return (channel >= 1 && channel <= 14); 127 return (channel >= IEEE80211_24GHZ_MIN_CHANNEL
128 && channel <= IEEE80211_24GHZ_MAX_CHANNEL);
127} 129}
128static inline 130static inline
129int bcm43xx_is_valid_channel(struct bcm43xx_private *bcm, 131int bcm43xx_is_valid_channel(struct bcm43xx_private *bcm,
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
index 33137165727f..b0abac515530 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
@@ -1287,7 +1287,7 @@ static void bcm43xx_phy_initg(struct bcm43xx_private *bcm)
1287 if (radio->revision == 8) 1287 if (radio->revision == 8)
1288 bcm43xx_phy_write(bcm, 0x0805, 0x3230); 1288 bcm43xx_phy_write(bcm, 0x0805, 0x3230);
1289 bcm43xx_phy_init_pctl(bcm); 1289 bcm43xx_phy_init_pctl(bcm);
1290 if (bcm->chip_id == 0x4306 && bcm->chip_package != 2) { 1290 if (bcm->chip_id == 0x4306 && bcm->chip_package == 2) {
1291 bcm43xx_phy_write(bcm, 0x0429, 1291 bcm43xx_phy_write(bcm, 0x0429,
1292 bcm43xx_phy_read(bcm, 0x0429) & 0xBFFF); 1292 bcm43xx_phy_read(bcm, 0x0429) & 0xBFFF);
1293 bcm43xx_phy_write(bcm, 0x04C3, 1293 bcm43xx_phy_write(bcm, 0x04C3,
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
index 3edbb481a0a0..b45063974ae9 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
@@ -182,8 +182,11 @@ static int bcm43xx_wx_set_mode(struct net_device *net_dev,
182 mode = BCM43xx_INITIAL_IWMODE; 182 mode = BCM43xx_INITIAL_IWMODE;
183 183
184 bcm43xx_lock_mmio(bcm, flags); 184 bcm43xx_lock_mmio(bcm, flags);
185 if (bcm->ieee->iw_mode != mode) 185 if (bcm->initialized) {
186 bcm43xx_set_iwmode(bcm, mode); 186 if (bcm->ieee->iw_mode != mode)
187 bcm43xx_set_iwmode(bcm, mode);
188 } else
189 bcm->ieee->iw_mode = mode;
187 bcm43xx_unlock_mmio(bcm, flags); 190 bcm43xx_unlock_mmio(bcm, flags);
188 191
189 return 0; 192 return 0;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 19e2b174d33c..d378478612fb 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -634,6 +634,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vi
634 * non-x86 architectures (yes Via exists on PPC among other places), 634 * non-x86 architectures (yes Via exists on PPC among other places),
635 * we must mask the PCI_INTERRUPT_LINE value versus 0xf to get 635 * we must mask the PCI_INTERRUPT_LINE value versus 0xf to get
636 * interrupts delivered properly. 636 * interrupts delivered properly.
637 *
638 * Some of the on-chip devices are actually '586 devices' so they are
639 * listed here.
637 */ 640 */
638static void quirk_via_irq(struct pci_dev *dev) 641static void quirk_via_irq(struct pci_dev *dev)
639{ 642{
@@ -648,6 +651,10 @@ static void quirk_via_irq(struct pci_dev *dev)
648 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq); 651 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq);
649 } 652 }
650} 653}
654DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_via_irq);
655DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1, quirk_via_irq);
656DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_2, quirk_via_irq);
657DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_irq);
651DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_irq); 658DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_irq);
652DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_irq); 659DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_irq);
653DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_5, quirk_via_irq); 660DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_5, quirk_via_irq);
@@ -895,6 +902,7 @@ static void __init k8t_sound_hostbridge(struct pci_dev *dev)
895} 902}
896DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_hostbridge); 903DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_hostbridge);
897 904
905#ifndef CONFIG_ACPI_SLEEP
898/* 906/*
899 * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge 907 * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge
900 * is not activated. The myth is that Asus said that they do not want the 908 * is not activated. The myth is that Asus said that they do not want the
@@ -906,8 +914,12 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_ho
906 * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it 914 * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it
907 * becomes necessary to do this tweak in two steps -- I've chosen the Host 915 * becomes necessary to do this tweak in two steps -- I've chosen the Host
908 * bridge as trigger. 916 * bridge as trigger.
917 *
918 * Actually, leaving it unhidden and not redoing the quirk over suspend2ram
919 * will cause thermal management to break down, and causing machine to
920 * overheat.
909 */ 921 */
910static int __initdata asus_hides_smbus = 0; 922static int __initdata asus_hides_smbus;
911 923
912static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev) 924static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
913{ 925{
@@ -1050,6 +1062,8 @@ static void __init asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
1050} 1062}
1051DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6 ); 1063DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6 );
1052 1064
1065#endif
1066
1053/* 1067/*
1054 * SiS 96x south bridge: BIOS typically hides SMBus device... 1068 * SiS 96x south bridge: BIOS typically hides SMBus device...
1055 */ 1069 */
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index c53db7ceda5e..738b1ef595a3 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -426,7 +426,7 @@ static int ds_open(struct inode *inode, struct file *file)
426 426
427 if (!warning_printed) { 427 if (!warning_printed) {
428 printk(KERN_INFO "pcmcia: Detected deprecated PCMCIA ioctl " 428 printk(KERN_INFO "pcmcia: Detected deprecated PCMCIA ioctl "
429 "usage.\n"); 429 "usage from process: %s.\n", current->comm);
430 printk(KERN_INFO "pcmcia: This interface will soon be removed from " 430 printk(KERN_INFO "pcmcia: This interface will soon be removed from "
431 "the kernel; please expect breakage unless you upgrade " 431 "the kernel; please expect breakage unless you upgrade "
432 "to new tools.\n"); 432 "to new tools.\n");
@@ -601,8 +601,12 @@ static int ds_ioctl(struct inode * inode, struct file * file,
601 ret = CS_BAD_ARGS; 601 ret = CS_BAD_ARGS;
602 else { 602 else {
603 struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->config.Function); 603 struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->config.Function);
604 ret = pccard_get_configuration_info(s, p_dev, &buf->config); 604 if (p_dev == NULL)
605 pcmcia_put_dev(p_dev); 605 ret = CS_BAD_ARGS;
606 else {
607 ret = pccard_get_configuration_info(s, p_dev, &buf->config);
608 pcmcia_put_dev(p_dev);
609 }
606 } 610 }
607 break; 611 break;
608 case DS_GET_FIRST_TUPLE: 612 case DS_GET_FIRST_TUPLE:
@@ -632,8 +636,12 @@ static int ds_ioctl(struct inode * inode, struct file * file,
632 ret = CS_BAD_ARGS; 636 ret = CS_BAD_ARGS;
633 else { 637 else {
634 struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->status.Function); 638 struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->status.Function);
635 ret = pccard_get_status(s, p_dev, &buf->status); 639 if (p_dev == NULL)
636 pcmcia_put_dev(p_dev); 640 ret = CS_BAD_ARGS;
641 else {
642 ret = pccard_get_status(s, p_dev, &buf->status);
643 pcmcia_put_dev(p_dev);
644 }
637 } 645 }
638 break; 646 break;
639 case DS_VALIDATE_CIS: 647 case DS_VALIDATE_CIS:
@@ -665,9 +673,10 @@ static int ds_ioctl(struct inode * inode, struct file * file,
665 if (!(buf->conf_reg.Function && 673 if (!(buf->conf_reg.Function &&
666 (buf->conf_reg.Function >= s->functions))) { 674 (buf->conf_reg.Function >= s->functions))) {
667 struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->conf_reg.Function); 675 struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->conf_reg.Function);
668 if (p_dev) 676 if (p_dev) {
669 ret = pcmcia_access_configuration_register(p_dev, &buf->conf_reg); 677 ret = pcmcia_access_configuration_register(p_dev, &buf->conf_reg);
670 pcmcia_put_dev(p_dev); 678 pcmcia_put_dev(p_dev);
679 }
671 } 680 }
672 break; 681 break;
673 case DS_GET_FIRST_REGION: 682 case DS_GET_FIRST_REGION:
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index a23ec54989f6..2bc8aad47219 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -178,9 +178,9 @@ static int sa1100_rtc_open(struct device *dev)
178 return 0; 178 return 0;
179 179
180 fail_pi: 180 fail_pi:
181 free_irq(IRQ_RTCAlrm, NULL); 181 free_irq(IRQ_RTCAlrm, dev);
182 fail_ai: 182 fail_ai:
183 free_irq(IRQ_RTC1Hz, NULL); 183 free_irq(IRQ_RTC1Hz, dev);
184 fail_ui: 184 fail_ui:
185 return ret; 185 return ret;
186} 186}
@@ -295,7 +295,7 @@ static int sa1100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
295 295
296static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq) 296static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq)
297{ 297{
298 seq_printf(seq, "trim/divider\t: 0x%08x\n", RTTR); 298 seq_printf(seq, "trim/divider\t: 0x%08lx\n", RTTR);
299 seq_printf(seq, "alarm_IRQ\t: %s\n", 299 seq_printf(seq, "alarm_IRQ\t: %s\n",
300 (RTSR & RTSR_ALE) ? "yes" : "no" ); 300 (RTSR & RTSR_ALE) ? "yes" : "no" );
301 seq_printf(seq, "update_IRQ\t: %s\n", 301 seq_printf(seq, "update_IRQ\t: %s\n",
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 5d6b7a57b02f..e65da921a827 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1348,7 +1348,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1348 index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa) 1348 index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa)
1349 - channel->ccws; 1349 - channel->ccws;
1350 if ((irb->scsw.actl & SCSW_ACTL_SUSPENDED) || 1350 if ((irb->scsw.actl & SCSW_ACTL_SUSPENDED) ||
1351 (irb->scsw.cstat | SCHN_STAT_PCI)) 1351 (irb->scsw.cstat & SCHN_STAT_PCI))
1352 /* Bloody io subsystem tells us lies about cpa... */ 1352 /* Bloody io subsystem tells us lies about cpa... */
1353 index = (index - 1) & (LCS_NUM_BUFFS - 1); 1353 index = (index - 1) & (LCS_NUM_BUFFS - 1);
1354 while (channel->io_idx != index) { 1354 while (channel->io_idx != index) {
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c
index 383a95f34a0d..239e108b8ed1 100644
--- a/drivers/sbus/char/openprom.c
+++ b/drivers/sbus/char/openprom.c
@@ -392,13 +392,16 @@ static int openprom_bsd_ioctl(struct inode * inode, struct file * file,
392 return -ENOMEM; 392 return -ENOMEM;
393 } 393 }
394 394
395 prom_getproperty(op.op_nodeid, str, tmp, len); 395 cnt = prom_getproperty(op.op_nodeid, str, tmp, len);
396 396 if (cnt <= 0) {
397 tmp[len] = '\0'; 397 error = -EINVAL;
398 } else {
399 tmp[len] = '\0';
398 400
399 if (__copy_to_user(argp, &op, sizeof(op)) != 0 401 if (__copy_to_user(argp, &op, sizeof(op)) != 0 ||
400 || copy_to_user(op.op_buf, tmp, len) != 0) 402 copy_to_user(op.op_buf, tmp, len) != 0)
401 error = -EFAULT; 403 error = -EFAULT;
404 }
402 405
403 kfree(tmp); 406 kfree(tmp);
404 kfree(str); 407 kfree(str);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index cb30d9c1153d..0c9c2f400bf6 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -219,6 +219,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
219 ahc->flags |= AHC_39BIT_ADDRESSING; 219 ahc->flags |= AHC_39BIT_ADDRESSING;
220 } else { 220 } else {
221 if (dma_set_mask(dev, DMA_32BIT_MASK)) { 221 if (dma_set_mask(dev, DMA_32BIT_MASK)) {
222 ahc_free(ahc);
222 printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n"); 223 printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
223 return (-ENODEV); 224 return (-ENODEV);
224 } 225 }
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
index 5f586140e057..3adecef21783 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -2036,12 +2036,12 @@ ahc_pci_resume(struct ahc_softc *ahc)
2036 * that the OS doesn't know about and rely on our chip 2036 * that the OS doesn't know about and rely on our chip
2037 * reset handler to handle the rest. 2037 * reset handler to handle the rest.
2038 */ 2038 */
2039 ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4, 2039 ahc_pci_write_config(ahc->dev_softc, DEVCONFIG,
2040 ahc->bus_softc.pci_softc.devconfig); 2040 ahc->bus_softc.pci_softc.devconfig, /*bytes*/4);
2041 ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/1, 2041 ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND,
2042 ahc->bus_softc.pci_softc.command); 2042 ahc->bus_softc.pci_softc.command, /*bytes*/1);
2043 ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME, /*bytes*/1, 2043 ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME,
2044 ahc->bus_softc.pci_softc.csize_lattime); 2044 ahc->bus_softc.pci_softc.csize_lattime, /*bytes*/1);
2045 if ((ahc->flags & AHC_HAS_TERM_LOGIC) != 0) { 2045 if ((ahc->flags & AHC_HAS_TERM_LOGIC) != 0) {
2046 struct seeprom_descriptor sd; 2046 struct seeprom_descriptor sd;
2047 u_int sxfrctl1; 2047 u_int sxfrctl1;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 0a8ad37ae899..2e9be83a697f 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -739,7 +739,8 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
739{ 739{
740 struct viosrp_adapter_info *req; 740 struct viosrp_adapter_info *req;
741 struct srp_event_struct *evt_struct; 741 struct srp_event_struct *evt_struct;
742 742 dma_addr_t addr;
743
743 evt_struct = get_event_struct(&hostdata->pool); 744 evt_struct = get_event_struct(&hostdata->pool);
744 if (!evt_struct) { 745 if (!evt_struct) {
745 printk(KERN_ERR "ibmvscsi: couldn't allocate an event " 746 printk(KERN_ERR "ibmvscsi: couldn't allocate an event "
@@ -757,10 +758,10 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
757 758
758 req->common.type = VIOSRP_ADAPTER_INFO_TYPE; 759 req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
759 req->common.length = sizeof(hostdata->madapter_info); 760 req->common.length = sizeof(hostdata->madapter_info);
760 req->buffer = dma_map_single(hostdata->dev, 761 req->buffer = addr = dma_map_single(hostdata->dev,
761 &hostdata->madapter_info, 762 &hostdata->madapter_info,
762 sizeof(hostdata->madapter_info), 763 sizeof(hostdata->madapter_info),
763 DMA_BIDIRECTIONAL); 764 DMA_BIDIRECTIONAL);
764 765
765 if (dma_mapping_error(req->buffer)) { 766 if (dma_mapping_error(req->buffer)) {
766 printk(KERN_ERR 767 printk(KERN_ERR
@@ -770,8 +771,13 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
770 return; 771 return;
771 } 772 }
772 773
773 if (ibmvscsi_send_srp_event(evt_struct, hostdata)) 774 if (ibmvscsi_send_srp_event(evt_struct, hostdata)) {
774 printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n"); 775 printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n");
776 dma_unmap_single(hostdata->dev,
777 addr,
778 sizeof(hostdata->madapter_info),
779 DMA_BIDIRECTIONAL);
780 }
775}; 781};
776 782
777/** 783/**
@@ -1259,6 +1265,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1259{ 1265{
1260 struct viosrp_host_config *host_config; 1266 struct viosrp_host_config *host_config;
1261 struct srp_event_struct *evt_struct; 1267 struct srp_event_struct *evt_struct;
1268 dma_addr_t addr;
1262 int rc; 1269 int rc;
1263 1270
1264 evt_struct = get_event_struct(&hostdata->pool); 1271 evt_struct = get_event_struct(&hostdata->pool);
@@ -1279,8 +1286,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1279 memset(host_config, 0x00, sizeof(*host_config)); 1286 memset(host_config, 0x00, sizeof(*host_config));
1280 host_config->common.type = VIOSRP_HOST_CONFIG_TYPE; 1287 host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
1281 host_config->common.length = length; 1288 host_config->common.length = length;
1282 host_config->buffer = dma_map_single(hostdata->dev, buffer, length, 1289 host_config->buffer = addr = dma_map_single(hostdata->dev, buffer,
1283 DMA_BIDIRECTIONAL); 1290 length,
1291 DMA_BIDIRECTIONAL);
1284 1292
1285 if (dma_mapping_error(host_config->buffer)) { 1293 if (dma_mapping_error(host_config->buffer)) {
1286 printk(KERN_ERR 1294 printk(KERN_ERR
@@ -1291,11 +1299,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1291 1299
1292 init_completion(&evt_struct->comp); 1300 init_completion(&evt_struct->comp);
1293 rc = ibmvscsi_send_srp_event(evt_struct, hostdata); 1301 rc = ibmvscsi_send_srp_event(evt_struct, hostdata);
1294 if (rc == 0) { 1302 if (rc == 0)
1295 wait_for_completion(&evt_struct->comp); 1303 wait_for_completion(&evt_struct->comp);
1296 dma_unmap_single(hostdata->dev, host_config->buffer, 1304 dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL);
1297 length, DMA_BIDIRECTIONAL);
1298 }
1299 1305
1300 return rc; 1306 return rc;
1301} 1307}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index fad607b2e6f4..ee22173fce43 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -27,7 +27,6 @@ void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
27int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *); 27int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
28void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); 28void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
29void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); 29void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
30void lpfc_set_slim(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
31int lpfc_reg_login(struct lpfc_hba *, uint32_t, uint8_t *, LPFC_MBOXQ_t *, 30int lpfc_reg_login(struct lpfc_hba *, uint32_t, uint8_t *, LPFC_MBOXQ_t *,
32 uint32_t); 31 uint32_t);
33void lpfc_unreg_login(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *); 32void lpfc_unreg_login(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *);
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 8932b1be2b60..41cf5d3ea6ce 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -113,6 +113,7 @@ struct lpfc_nodelist {
113#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from 113#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from
114 NPR list */ 114 NPR list */
115#define NLP_DELAY_REMOVE 0x4000000 /* Defer removal till end of DSM */ 115#define NLP_DELAY_REMOVE 0x4000000 /* Defer removal till end of DSM */
116#define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */
116 117
117/* Defines for list searchs */ 118/* Defines for list searchs */
118#define NLP_SEARCH_MAPPED 0x1 /* search mapped */ 119#define NLP_SEARCH_MAPPED 0x1 /* search mapped */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 4813beaaca8f..283b7d824c34 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -302,10 +302,6 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
302 if (lpfc_reg_login(phba, Fabric_DID, (uint8_t *) sp, mbox, 0)) 302 if (lpfc_reg_login(phba, Fabric_DID, (uint8_t *) sp, mbox, 0))
303 goto fail_free_mbox; 303 goto fail_free_mbox;
304 304
305 /*
306 * set_slim mailbox command needs to execute first,
307 * queue this command to be processed later.
308 */
309 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 305 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
310 mbox->context2 = ndlp; 306 mbox->context2 = ndlp;
311 307
@@ -781,25 +777,26 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
781 if (disc && phba->num_disc_nodes) { 777 if (disc && phba->num_disc_nodes) {
782 /* Check to see if there are more PLOGIs to be sent */ 778 /* Check to see if there are more PLOGIs to be sent */
783 lpfc_more_plogi(phba); 779 lpfc_more_plogi(phba);
784 }
785 780
786 if (phba->num_disc_nodes == 0) { 781 if (phba->num_disc_nodes == 0) {
787 spin_lock_irq(phba->host->host_lock); 782 spin_lock_irq(phba->host->host_lock);
788 phba->fc_flag &= ~FC_NDISC_ACTIVE; 783 phba->fc_flag &= ~FC_NDISC_ACTIVE;
789 spin_unlock_irq(phba->host->host_lock); 784 spin_unlock_irq(phba->host->host_lock);
790 785
791 lpfc_can_disctmo(phba); 786 lpfc_can_disctmo(phba);
792 if (phba->fc_flag & FC_RSCN_MODE) { 787 if (phba->fc_flag & FC_RSCN_MODE) {
793 /* Check to see if more RSCNs came in while we were 788 /*
794 * processing this one. 789 * Check to see if more RSCNs came in while
795 */ 790 * we were processing this one.
796 if ((phba->fc_rscn_id_cnt == 0) && 791 */
797 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) { 792 if ((phba->fc_rscn_id_cnt == 0) &&
798 spin_lock_irq(phba->host->host_lock); 793 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
799 phba->fc_flag &= ~FC_RSCN_MODE; 794 spin_lock_irq(phba->host->host_lock);
800 spin_unlock_irq(phba->host->host_lock); 795 phba->fc_flag &= ~FC_RSCN_MODE;
801 } else { 796 spin_unlock_irq(phba->host->host_lock);
802 lpfc_els_handle_rscn(phba); 797 } else {
798 lpfc_els_handle_rscn(phba);
799 }
803 } 800 }
804 } 801 }
805 } 802 }
@@ -1263,7 +1260,7 @@ lpfc_issue_els_logo(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
1263 psli = &phba->sli; 1260 psli = &phba->sli;
1264 pring = &psli->ring[LPFC_ELS_RING]; 1261 pring = &psli->ring[LPFC_ELS_RING];
1265 1262
1266 cmdsize = 2 * (sizeof (uint32_t) + sizeof (struct lpfc_name)); 1263 cmdsize = (2 * sizeof (uint32_t)) + sizeof (struct lpfc_name);
1267 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp, 1264 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
1268 ndlp->nlp_DID, ELS_CMD_LOGO); 1265 ndlp->nlp_DID, ELS_CMD_LOGO);
1269 if (!elsiocb) 1266 if (!elsiocb)
@@ -1451,22 +1448,23 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_hba *phba, struct lpfc_nodelist * nlp)
1451 * PLOGIs to be sent 1448 * PLOGIs to be sent
1452 */ 1449 */
1453 lpfc_more_plogi(phba); 1450 lpfc_more_plogi(phba);
1454 }
1455 1451
1456 if (phba->num_disc_nodes == 0) { 1452 if (phba->num_disc_nodes == 0) {
1457 phba->fc_flag &= ~FC_NDISC_ACTIVE; 1453 phba->fc_flag &= ~FC_NDISC_ACTIVE;
1458 lpfc_can_disctmo(phba); 1454 lpfc_can_disctmo(phba);
1459 if (phba->fc_flag & FC_RSCN_MODE) { 1455 if (phba->fc_flag & FC_RSCN_MODE) {
1460 /* Check to see if more RSCNs 1456 /*
1461 * came in while we were 1457 * Check to see if more RSCNs
1462 * processing this one. 1458 * came in while we were
1463 */ 1459 * processing this one.
1464 if((phba->fc_rscn_id_cnt==0) && 1460 */
1465 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) { 1461 if((phba->fc_rscn_id_cnt==0) &&
1466 phba->fc_flag &= ~FC_RSCN_MODE; 1462 !(phba->fc_flag & FC_RSCN_DISCOVERY)) {
1467 } 1463 phba->fc_flag &= ~FC_RSCN_MODE;
1468 else { 1464 }
1469 lpfc_els_handle_rscn(phba); 1465 else {
1466 lpfc_els_handle_rscn(phba);
1467 }
1470 } 1468 }
1471 } 1469 }
1472 } 1470 }
@@ -1872,9 +1870,6 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1872 if (mbox) { 1870 if (mbox) {
1873 if ((rspiocb->iocb.ulpStatus == 0) 1871 if ((rspiocb->iocb.ulpStatus == 0)
1874 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 1872 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
1875 /* set_slim mailbox command needs to execute first,
1876 * queue this command to be processed later.
1877 */
1878 lpfc_unreg_rpi(phba, ndlp); 1873 lpfc_unreg_rpi(phba, ndlp);
1879 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 1874 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
1880 mbox->context2 = ndlp; 1875 mbox->context2 = ndlp;
@@ -1920,6 +1915,7 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
1920 uint8_t *pcmd; 1915 uint8_t *pcmd;
1921 uint16_t cmdsize; 1916 uint16_t cmdsize;
1922 int rc; 1917 int rc;
1918 ELS_PKT *els_pkt_ptr;
1923 1919
1924 psli = &phba->sli; 1920 psli = &phba->sli;
1925 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1921 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
@@ -1958,6 +1954,23 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
1958 pcmd += sizeof (uint32_t); 1954 pcmd += sizeof (uint32_t);
1959 memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm)); 1955 memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
1960 break; 1956 break;
1957 case ELS_CMD_PRLO:
1958 cmdsize = sizeof (uint32_t) + sizeof (PRLO);
1959 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
1960 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
1961 if (!elsiocb)
1962 return 1;
1963
1964 icmd = &elsiocb->iocb;
1965 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
1966 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1967
1968 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
1969 sizeof (uint32_t) + sizeof (PRLO));
1970 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
1971 els_pkt_ptr = (ELS_PKT *) pcmd;
1972 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
1973 break;
1961 default: 1974 default:
1962 return 1; 1975 return 1;
1963 } 1976 }
@@ -2498,7 +2511,7 @@ lpfc_els_rcv_rscn(struct lpfc_hba * phba,
2498 /* If we are about to begin discovery, just ACC the RSCN. 2511 /* If we are about to begin discovery, just ACC the RSCN.
2499 * Discovery processing will satisfy it. 2512 * Discovery processing will satisfy it.
2500 */ 2513 */
2501 if (phba->hba_state < LPFC_NS_QRY) { 2514 if (phba->hba_state <= LPFC_NS_QRY) {
2502 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 2515 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
2503 newnode); 2516 newnode);
2504 return 0; 2517 return 0;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 6721e679df62..adb086009ae0 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -311,8 +311,8 @@ lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
311 evtp->evt_arg2 = arg2; 311 evtp->evt_arg2 = arg2;
312 evtp->evt = evt; 312 evtp->evt = evt;
313 313
314 list_add_tail(&evtp->evt_listp, &phba->work_list);
315 spin_lock_irq(phba->host->host_lock); 314 spin_lock_irq(phba->host->host_lock);
315 list_add_tail(&evtp->evt_listp, &phba->work_list);
316 if (phba->work_wait) 316 if (phba->work_wait)
317 wake_up(phba->work_wait); 317 wake_up(phba->work_wait);
318 spin_unlock_irq(phba->host->host_lock); 318 spin_unlock_irq(phba->host->host_lock);
@@ -1071,10 +1071,6 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
1071 /* initialize static port data */ 1071 /* initialize static port data */
1072 rport->maxframe_size = ndlp->nlp_maxframe; 1072 rport->maxframe_size = ndlp->nlp_maxframe;
1073 rport->supported_classes = ndlp->nlp_class_sup; 1073 rport->supported_classes = ndlp->nlp_class_sup;
1074 if ((rport->scsi_target_id != -1) &&
1075 (rport->scsi_target_id < MAX_FCP_TARGET)) {
1076 ndlp->nlp_sid = rport->scsi_target_id;
1077 }
1078 rdata = rport->dd_data; 1074 rdata = rport->dd_data;
1079 rdata->pnode = ndlp; 1075 rdata->pnode = ndlp;
1080 1076
@@ -1087,6 +1083,10 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
1087 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) 1083 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
1088 fc_remote_port_rolechg(rport, rport_ids.roles); 1084 fc_remote_port_rolechg(rport, rport_ids.roles);
1089 1085
1086 if ((rport->scsi_target_id != -1) &&
1087 (rport->scsi_target_id < MAX_FCP_TARGET)) {
1088 ndlp->nlp_sid = rport->scsi_target_id;
1089 }
1090 1090
1091 return; 1091 return;
1092} 1092}
@@ -1238,6 +1238,7 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1238 evt_listp); 1238 evt_listp);
1239 1239
1240 } 1240 }
1241 nlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1241 nlp->nlp_type |= NLP_FC_NODE; 1242 nlp->nlp_type |= NLP_FC_NODE;
1242 break; 1243 break;
1243 case NLP_MAPPED_LIST: 1244 case NLP_MAPPED_LIST:
@@ -1258,6 +1259,7 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1258 evt_listp); 1259 evt_listp);
1259 1260
1260 } 1261 }
1262 nlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1261 break; 1263 break;
1262 case NLP_NPR_LIST: 1264 case NLP_NPR_LIST:
1263 nlp->nlp_flag |= list; 1265 nlp->nlp_flag |= list;
@@ -1402,6 +1404,8 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba,
1402 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) 1404 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
1403 return 1; 1405 return 1;
1404 case CMD_ELS_REQUEST64_CR: 1406 case CMD_ELS_REQUEST64_CR:
1407 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
1408 return 1;
1405 case CMD_XMIT_ELS_RSP64_CX: 1409 case CMD_XMIT_ELS_RSP64_CX:
1406 if (iocb->context1 == (uint8_t *) ndlp) 1410 if (iocb->context1 == (uint8_t *) ndlp)
1407 return 1; 1411 return 1;
@@ -1901,10 +1905,8 @@ lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
1901 */ 1905 */
1902 if (ndlp->nlp_flag & NLP_DELAY_TMO) 1906 if (ndlp->nlp_flag & NLP_DELAY_TMO)
1903 lpfc_cancel_retry_delay_tmo(phba, ndlp); 1907 lpfc_cancel_retry_delay_tmo(phba, ndlp);
1904 } else { 1908 } else
1905 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1906 ndlp = NULL; 1909 ndlp = NULL;
1907 }
1908 } else { 1910 } else {
1909 flg = ndlp->nlp_flag & NLP_LIST_MASK; 1911 flg = ndlp->nlp_flag & NLP_LIST_MASK;
1910 if ((flg == NLP_ADISC_LIST) || (flg == NLP_PLOGI_LIST)) 1912 if ((flg == NLP_ADISC_LIST) || (flg == NLP_PLOGI_LIST))
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 54d04188f7cc..eedf98801366 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -449,6 +449,7 @@ struct serv_parm { /* Structure is in Big Endian format */
449#define ELS_CMD_RRQ 0x12000000 449#define ELS_CMD_RRQ 0x12000000
450#define ELS_CMD_PRLI 0x20100014 450#define ELS_CMD_PRLI 0x20100014
451#define ELS_CMD_PRLO 0x21100014 451#define ELS_CMD_PRLO 0x21100014
452#define ELS_CMD_PRLO_ACC 0x02100014
452#define ELS_CMD_PDISC 0x50000000 453#define ELS_CMD_PDISC 0x50000000
453#define ELS_CMD_FDISC 0x51000000 454#define ELS_CMD_FDISC 0x51000000
454#define ELS_CMD_ADISC 0x52000000 455#define ELS_CMD_ADISC 0x52000000
@@ -484,6 +485,7 @@ struct serv_parm { /* Structure is in Big Endian format */
484#define ELS_CMD_RRQ 0x12 485#define ELS_CMD_RRQ 0x12
485#define ELS_CMD_PRLI 0x14001020 486#define ELS_CMD_PRLI 0x14001020
486#define ELS_CMD_PRLO 0x14001021 487#define ELS_CMD_PRLO 0x14001021
488#define ELS_CMD_PRLO_ACC 0x14001002
487#define ELS_CMD_PDISC 0x50 489#define ELS_CMD_PDISC 0x50
488#define ELS_CMD_FDISC 0x51 490#define ELS_CMD_FDISC 0x51
489#define ELS_CMD_ADISC 0x52 491#define ELS_CMD_ADISC 0x52
@@ -1539,6 +1541,7 @@ typedef struct {
1539 1541
1540#define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */ 1542#define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */
1541#define FLAGS_LINK_SPEED 0x0800 /* Bit 11 */ 1543#define FLAGS_LINK_SPEED 0x0800 /* Bit 11 */
1544#define FLAGS_IMED_ABORT 0x04000 /* Bit 14 */
1542 1545
1543 uint32_t link_speed; 1546 uint32_t link_speed;
1544#define LINK_SPEED_AUTO 0 /* Auto selection */ 1547#define LINK_SPEED_AUTO 0 /* Auto selection */
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 66d5d003555d..908d0f27706f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -294,15 +294,6 @@ lpfc_config_port_post(struct lpfc_hba * phba)
294 } 294 }
295 } 295 }
296 296
297 /* This should turn on DELAYED ABTS for ELS timeouts */
298 lpfc_set_slim(phba, pmb, 0x052198, 0x1);
299 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
300 phba->hba_state = LPFC_HBA_ERROR;
301 mempool_free( pmb, phba->mbox_mem_pool);
302 return -EIO;
303 }
304
305
306 lpfc_read_config(phba, pmb); 297 lpfc_read_config(phba, pmb);
307 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 298 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
308 lpfc_printf_log(phba, 299 lpfc_printf_log(phba,
@@ -804,7 +795,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
804 int max_speed; 795 int max_speed;
805 char * ports; 796 char * ports;
806 char * bus; 797 char * bus;
807 } m; 798 } m = {"<Unknown>", 0, "", ""};
808 799
809 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 800 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
810 ports = (hdrtype == 0x80) ? "2-port " : ""; 801 ports = (hdrtype == 0x80) ? "2-port " : "";
@@ -1627,7 +1618,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1627 1618
1628 error = lpfc_alloc_sysfs_attr(phba); 1619 error = lpfc_alloc_sysfs_attr(phba);
1629 if (error) 1620 if (error)
1630 goto out_kthread_stop; 1621 goto out_remove_host;
1631 1622
1632 error = request_irq(phba->pcidev->irq, lpfc_intr_handler, SA_SHIRQ, 1623 error = request_irq(phba->pcidev->irq, lpfc_intr_handler, SA_SHIRQ,
1633 LPFC_DRIVER_NAME, phba); 1624 LPFC_DRIVER_NAME, phba);
@@ -1644,8 +1635,10 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1644 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 1635 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
1645 1636
1646 error = lpfc_sli_hba_setup(phba); 1637 error = lpfc_sli_hba_setup(phba);
1647 if (error) 1638 if (error) {
1639 error = -ENODEV;
1648 goto out_free_irq; 1640 goto out_free_irq;
1641 }
1649 1642
1650 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1643 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1651 spin_lock_irq(phba->host->host_lock); 1644 spin_lock_irq(phba->host->host_lock);
@@ -1700,6 +1693,9 @@ out_free_irq:
1700 free_irq(phba->pcidev->irq, phba); 1693 free_irq(phba->pcidev->irq, phba);
1701out_free_sysfs_attr: 1694out_free_sysfs_attr:
1702 lpfc_free_sysfs_attr(phba); 1695 lpfc_free_sysfs_attr(phba);
1696out_remove_host:
1697 fc_remove_host(phba->host);
1698 scsi_remove_host(phba->host);
1703out_kthread_stop: 1699out_kthread_stop:
1704 kthread_stop(phba->worker_thread); 1700 kthread_stop(phba->worker_thread);
1705out_free_iocbq: 1701out_free_iocbq:
@@ -1721,12 +1717,14 @@ out_iounmap_slim:
1721out_idr_remove: 1717out_idr_remove:
1722 idr_remove(&lpfc_hba_index, phba->brd_no); 1718 idr_remove(&lpfc_hba_index, phba->brd_no);
1723out_put_host: 1719out_put_host:
1720 phba->host = NULL;
1724 scsi_host_put(host); 1721 scsi_host_put(host);
1725out_release_regions: 1722out_release_regions:
1726 pci_release_regions(pdev); 1723 pci_release_regions(pdev);
1727out_disable_device: 1724out_disable_device:
1728 pci_disable_device(pdev); 1725 pci_disable_device(pdev);
1729out: 1726out:
1727 pci_set_drvdata(pdev, NULL);
1730 return error; 1728 return error;
1731} 1729}
1732 1730
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index c585e2b2e589..e42f22aaf71b 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -200,6 +200,9 @@ lpfc_init_link(struct lpfc_hba * phba,
200 break; 200 break;
201 } 201 }
202 202
203 /* Enable asynchronous ABTS responses from firmware */
204 mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
205
203 /* NEW_FEATURE 206 /* NEW_FEATURE
204 * Setting up the link speed 207 * Setting up the link speed
205 */ 208 */
@@ -292,36 +295,6 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint32_t did, LPFC_MBOXQ_t * pmb)
292 return; 295 return;
293} 296}
294 297
295/***********************************************/
296
297/* command to write slim */
298/***********************************************/
299void
300lpfc_set_slim(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint32_t addr,
301 uint32_t value)
302{
303 MAILBOX_t *mb;
304
305 mb = &pmb->mb;
306 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
307
308 /* addr = 0x090597 is AUTO ABTS disable for ELS commands */
309 /* addr = 0x052198 is DELAYED ABTS enable for ELS commands */
310
311 /*
312 * Always turn on DELAYED ABTS for ELS timeouts
313 */
314 if ((addr == 0x052198) && (value == 0))
315 value = 1;
316
317 mb->un.varWords[0] = addr;
318 mb->un.varWords[1] = value;
319
320 mb->mbxCommand = MBX_SET_SLIM;
321 mb->mbxOwner = OWN_HOST;
322 return;
323}
324
325/**********************************************/ 298/**********************************************/
326/* lpfc_read_nv Issue a READ CONFIG */ 299/* lpfc_read_nv Issue a READ CONFIG */
327/* mailbox command */ 300/* mailbox command */
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 3d77bd999b70..27d60ad897cd 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -465,14 +465,18 @@ lpfc_rcv_padisc(struct lpfc_hba * phba,
465static int 465static int
466lpfc_rcv_logo(struct lpfc_hba * phba, 466lpfc_rcv_logo(struct lpfc_hba * phba,
467 struct lpfc_nodelist * ndlp, 467 struct lpfc_nodelist * ndlp,
468 struct lpfc_iocbq *cmdiocb) 468 struct lpfc_iocbq *cmdiocb,
469 uint32_t els_cmd)
469{ 470{
470 /* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */ 471 /* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */
471 /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary 472 /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
472 * PLOGIs during LOGO storms from a device. 473 * PLOGIs during LOGO storms from a device.
473 */ 474 */
474 ndlp->nlp_flag |= NLP_LOGO_ACC; 475 ndlp->nlp_flag |= NLP_LOGO_ACC;
475 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 476 if (els_cmd == ELS_CMD_PRLO)
477 lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
478 else
479 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
476 480
477 if (!(ndlp->nlp_type & NLP_FABRIC) || 481 if (!(ndlp->nlp_type & NLP_FABRIC) ||
478 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { 482 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
@@ -681,7 +685,7 @@ lpfc_rcv_logo_plogi_issue(struct lpfc_hba * phba,
681 /* software abort outstanding PLOGI */ 685 /* software abort outstanding PLOGI */
682 lpfc_els_abort(phba, ndlp, 1); 686 lpfc_els_abort(phba, ndlp, 1);
683 687
684 lpfc_rcv_logo(phba, ndlp, cmdiocb); 688 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
685 return ndlp->nlp_state; 689 return ndlp->nlp_state;
686} 690}
687 691
@@ -788,10 +792,6 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
788 if (lpfc_reg_login 792 if (lpfc_reg_login
789 (phba, irsp->un.elsreq64.remoteID, 793 (phba, irsp->un.elsreq64.remoteID,
790 (uint8_t *) sp, mbox, 0) == 0) { 794 (uint8_t *) sp, mbox, 0) == 0) {
791 /* set_slim mailbox command needs to
792 * execute first, queue this command to
793 * be processed later.
794 */
795 switch (ndlp->nlp_DID) { 795 switch (ndlp->nlp_DID) {
796 case NameServer_DID: 796 case NameServer_DID:
797 mbox->mbox_cmpl = 797 mbox->mbox_cmpl =
@@ -832,11 +832,17 @@ static uint32_t
832lpfc_device_rm_plogi_issue(struct lpfc_hba * phba, 832lpfc_device_rm_plogi_issue(struct lpfc_hba * phba,
833 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 833 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
834{ 834{
835 /* software abort outstanding PLOGI */ 835 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
836 lpfc_els_abort(phba, ndlp, 1); 836 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
837 return ndlp->nlp_state;
838 }
839 else {
840 /* software abort outstanding PLOGI */
841 lpfc_els_abort(phba, ndlp, 1);
837 842
838 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 843 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
839 return NLP_STE_FREED_NODE; 844 return NLP_STE_FREED_NODE;
845 }
840} 846}
841 847
842static uint32_t 848static uint32_t
@@ -851,7 +857,7 @@ lpfc_device_recov_plogi_issue(struct lpfc_hba * phba,
851 ndlp->nlp_state = NLP_STE_NPR_NODE; 857 ndlp->nlp_state = NLP_STE_NPR_NODE;
852 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 858 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
853 spin_lock_irq(phba->host->host_lock); 859 spin_lock_irq(phba->host->host_lock);
854 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 860 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
855 spin_unlock_irq(phba->host->host_lock); 861 spin_unlock_irq(phba->host->host_lock);
856 862
857 return ndlp->nlp_state; 863 return ndlp->nlp_state;
@@ -905,7 +911,7 @@ lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba,
905 /* software abort outstanding ADISC */ 911 /* software abort outstanding ADISC */
906 lpfc_els_abort(phba, ndlp, 0); 912 lpfc_els_abort(phba, ndlp, 0);
907 913
908 lpfc_rcv_logo(phba, ndlp, cmdiocb); 914 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
909 return ndlp->nlp_state; 915 return ndlp->nlp_state;
910} 916}
911 917
@@ -932,7 +938,7 @@ lpfc_rcv_prlo_adisc_issue(struct lpfc_hba * phba,
932 cmdiocb = (struct lpfc_iocbq *) arg; 938 cmdiocb = (struct lpfc_iocbq *) arg;
933 939
934 /* Treat like rcv logo */ 940 /* Treat like rcv logo */
935 lpfc_rcv_logo(phba, ndlp, cmdiocb); 941 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO);
936 return ndlp->nlp_state; 942 return ndlp->nlp_state;
937} 943}
938 944
@@ -987,11 +993,17 @@ lpfc_device_rm_adisc_issue(struct lpfc_hba * phba,
987 struct lpfc_nodelist * ndlp, void *arg, 993 struct lpfc_nodelist * ndlp, void *arg,
988 uint32_t evt) 994 uint32_t evt)
989{ 995{
990 /* software abort outstanding ADISC */ 996 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
991 lpfc_els_abort(phba, ndlp, 1); 997 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
998 return ndlp->nlp_state;
999 }
1000 else {
1001 /* software abort outstanding ADISC */
1002 lpfc_els_abort(phba, ndlp, 1);
992 1003
993 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1004 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
994 return NLP_STE_FREED_NODE; 1005 return NLP_STE_FREED_NODE;
1006 }
995} 1007}
996 1008
997static uint32_t 1009static uint32_t
@@ -1006,7 +1018,7 @@ lpfc_device_recov_adisc_issue(struct lpfc_hba * phba,
1006 ndlp->nlp_state = NLP_STE_NPR_NODE; 1018 ndlp->nlp_state = NLP_STE_NPR_NODE;
1007 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 1019 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1008 spin_lock_irq(phba->host->host_lock); 1020 spin_lock_irq(phba->host->host_lock);
1009 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1021 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1010 ndlp->nlp_flag |= NLP_NPR_ADISC; 1022 ndlp->nlp_flag |= NLP_NPR_ADISC;
1011 spin_unlock_irq(phba->host->host_lock); 1023 spin_unlock_irq(phba->host->host_lock);
1012 1024
@@ -1048,7 +1060,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
1048 1060
1049 cmdiocb = (struct lpfc_iocbq *) arg; 1061 cmdiocb = (struct lpfc_iocbq *) arg;
1050 1062
1051 lpfc_rcv_logo(phba, ndlp, cmdiocb); 1063 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1052 return ndlp->nlp_state; 1064 return ndlp->nlp_state;
1053} 1065}
1054 1066
@@ -1073,7 +1085,7 @@ lpfc_rcv_prlo_reglogin_issue(struct lpfc_hba * phba,
1073 struct lpfc_iocbq *cmdiocb; 1085 struct lpfc_iocbq *cmdiocb;
1074 1086
1075 cmdiocb = (struct lpfc_iocbq *) arg; 1087 cmdiocb = (struct lpfc_iocbq *) arg;
1076 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 1088 lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
1077 return ndlp->nlp_state; 1089 return ndlp->nlp_state;
1078} 1090}
1079 1091
@@ -1133,8 +1145,14 @@ lpfc_device_rm_reglogin_issue(struct lpfc_hba * phba,
1133 struct lpfc_nodelist * ndlp, void *arg, 1145 struct lpfc_nodelist * ndlp, void *arg,
1134 uint32_t evt) 1146 uint32_t evt)
1135{ 1147{
1136 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1148 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1137 return NLP_STE_FREED_NODE; 1149 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1150 return ndlp->nlp_state;
1151 }
1152 else {
1153 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1154 return NLP_STE_FREED_NODE;
1155 }
1138} 1156}
1139 1157
1140static uint32_t 1158static uint32_t
@@ -1146,7 +1164,7 @@ lpfc_device_recov_reglogin_issue(struct lpfc_hba * phba,
1146 ndlp->nlp_state = NLP_STE_NPR_NODE; 1164 ndlp->nlp_state = NLP_STE_NPR_NODE;
1147 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 1165 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1148 spin_lock_irq(phba->host->host_lock); 1166 spin_lock_irq(phba->host->host_lock);
1149 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1167 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1150 spin_unlock_irq(phba->host->host_lock); 1168 spin_unlock_irq(phba->host->host_lock);
1151 return ndlp->nlp_state; 1169 return ndlp->nlp_state;
1152} 1170}
@@ -1186,7 +1204,7 @@ lpfc_rcv_logo_prli_issue(struct lpfc_hba * phba,
1186 /* Software abort outstanding PRLI before sending acc */ 1204 /* Software abort outstanding PRLI before sending acc */
1187 lpfc_els_abort(phba, ndlp, 1); 1205 lpfc_els_abort(phba, ndlp, 1);
1188 1206
1189 lpfc_rcv_logo(phba, ndlp, cmdiocb); 1207 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1190 return ndlp->nlp_state; 1208 return ndlp->nlp_state;
1191} 1209}
1192 1210
@@ -1214,7 +1232,7 @@ lpfc_rcv_prlo_prli_issue(struct lpfc_hba * phba,
1214 struct lpfc_iocbq *cmdiocb; 1232 struct lpfc_iocbq *cmdiocb;
1215 1233
1216 cmdiocb = (struct lpfc_iocbq *) arg; 1234 cmdiocb = (struct lpfc_iocbq *) arg;
1217 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 1235 lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
1218 return ndlp->nlp_state; 1236 return ndlp->nlp_state;
1219} 1237}
1220 1238
@@ -1278,11 +1296,17 @@ static uint32_t
1278lpfc_device_rm_prli_issue(struct lpfc_hba * phba, 1296lpfc_device_rm_prli_issue(struct lpfc_hba * phba,
1279 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1297 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1280{ 1298{
1281 /* software abort outstanding PRLI */ 1299 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1282 lpfc_els_abort(phba, ndlp, 1); 1300 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1301 return ndlp->nlp_state;
1302 }
1303 else {
1304 /* software abort outstanding PLOGI */
1305 lpfc_els_abort(phba, ndlp, 1);
1283 1306
1284 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1307 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1285 return NLP_STE_FREED_NODE; 1308 return NLP_STE_FREED_NODE;
1309 }
1286} 1310}
1287 1311
1288 1312
@@ -1313,7 +1337,7 @@ lpfc_device_recov_prli_issue(struct lpfc_hba * phba,
1313 ndlp->nlp_state = NLP_STE_NPR_NODE; 1337 ndlp->nlp_state = NLP_STE_NPR_NODE;
1314 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 1338 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1315 spin_lock_irq(phba->host->host_lock); 1339 spin_lock_irq(phba->host->host_lock);
1316 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1340 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1317 spin_unlock_irq(phba->host->host_lock); 1341 spin_unlock_irq(phba->host->host_lock);
1318 return ndlp->nlp_state; 1342 return ndlp->nlp_state;
1319} 1343}
@@ -1351,7 +1375,7 @@ lpfc_rcv_logo_unmap_node(struct lpfc_hba * phba,
1351 1375
1352 cmdiocb = (struct lpfc_iocbq *) arg; 1376 cmdiocb = (struct lpfc_iocbq *) arg;
1353 1377
1354 lpfc_rcv_logo(phba, ndlp, cmdiocb); 1378 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1355 return ndlp->nlp_state; 1379 return ndlp->nlp_state;
1356} 1380}
1357 1381
@@ -1375,7 +1399,7 @@ lpfc_rcv_prlo_unmap_node(struct lpfc_hba * phba,
1375 1399
1376 cmdiocb = (struct lpfc_iocbq *) arg; 1400 cmdiocb = (struct lpfc_iocbq *) arg;
1377 1401
1378 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 1402 lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
1379 return ndlp->nlp_state; 1403 return ndlp->nlp_state;
1380} 1404}
1381 1405
@@ -1386,7 +1410,7 @@ lpfc_device_recov_unmap_node(struct lpfc_hba * phba,
1386 ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE; 1410 ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
1387 ndlp->nlp_state = NLP_STE_NPR_NODE; 1411 ndlp->nlp_state = NLP_STE_NPR_NODE;
1388 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 1412 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1389 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1413 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1390 lpfc_disc_set_adisc(phba, ndlp); 1414 lpfc_disc_set_adisc(phba, ndlp);
1391 1415
1392 return ndlp->nlp_state; 1416 return ndlp->nlp_state;
@@ -1424,7 +1448,7 @@ lpfc_rcv_logo_mapped_node(struct lpfc_hba * phba,
1424 1448
1425 cmdiocb = (struct lpfc_iocbq *) arg; 1449 cmdiocb = (struct lpfc_iocbq *) arg;
1426 1450
1427 lpfc_rcv_logo(phba, ndlp, cmdiocb); 1451 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1428 return ndlp->nlp_state; 1452 return ndlp->nlp_state;
1429} 1453}
1430 1454
@@ -1456,7 +1480,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_hba * phba,
1456 spin_unlock_irq(phba->host->host_lock); 1480 spin_unlock_irq(phba->host->host_lock);
1457 1481
1458 /* Treat like rcv logo */ 1482 /* Treat like rcv logo */
1459 lpfc_rcv_logo(phba, ndlp, cmdiocb); 1483 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO);
1460 return ndlp->nlp_state; 1484 return ndlp->nlp_state;
1461} 1485}
1462 1486
@@ -1469,7 +1493,7 @@ lpfc_device_recov_mapped_node(struct lpfc_hba * phba,
1469 ndlp->nlp_state = NLP_STE_NPR_NODE; 1493 ndlp->nlp_state = NLP_STE_NPR_NODE;
1470 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 1494 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1471 spin_lock_irq(phba->host->host_lock); 1495 spin_lock_irq(phba->host->host_lock);
1472 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1496 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1473 spin_unlock_irq(phba->host->host_lock); 1497 spin_unlock_irq(phba->host->host_lock);
1474 lpfc_disc_set_adisc(phba, ndlp); 1498 lpfc_disc_set_adisc(phba, ndlp);
1475 return ndlp->nlp_state; 1499 return ndlp->nlp_state;
@@ -1551,7 +1575,7 @@ lpfc_rcv_logo_npr_node(struct lpfc_hba * phba,
1551 1575
1552 cmdiocb = (struct lpfc_iocbq *) arg; 1576 cmdiocb = (struct lpfc_iocbq *) arg;
1553 1577
1554 lpfc_rcv_logo(phba, ndlp, cmdiocb); 1578 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1555 return ndlp->nlp_state; 1579 return ndlp->nlp_state;
1556} 1580}
1557 1581
@@ -1617,9 +1641,16 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_hba * phba,
1617 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1641 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1618{ 1642{
1619 struct lpfc_iocbq *cmdiocb, *rspiocb; 1643 struct lpfc_iocbq *cmdiocb, *rspiocb;
1644 IOCB_t *irsp;
1620 1645
1621 cmdiocb = (struct lpfc_iocbq *) arg; 1646 cmdiocb = (struct lpfc_iocbq *) arg;
1622 rspiocb = cmdiocb->context_un.rsp_iocb; 1647 rspiocb = cmdiocb->context_un.rsp_iocb;
1648
1649 irsp = &rspiocb->iocb;
1650 if (irsp->ulpStatus) {
1651 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1652 return NLP_STE_FREED_NODE;
1653 }
1623 return ndlp->nlp_state; 1654 return ndlp->nlp_state;
1624} 1655}
1625 1656
@@ -1628,9 +1659,16 @@ lpfc_cmpl_prli_npr_node(struct lpfc_hba * phba,
1628 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1659 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1629{ 1660{
1630 struct lpfc_iocbq *cmdiocb, *rspiocb; 1661 struct lpfc_iocbq *cmdiocb, *rspiocb;
1662 IOCB_t *irsp;
1631 1663
1632 cmdiocb = (struct lpfc_iocbq *) arg; 1664 cmdiocb = (struct lpfc_iocbq *) arg;
1633 rspiocb = cmdiocb->context_un.rsp_iocb; 1665 rspiocb = cmdiocb->context_un.rsp_iocb;
1666
1667 irsp = &rspiocb->iocb;
1668 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
1669 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1670 return NLP_STE_FREED_NODE;
1671 }
1634 return ndlp->nlp_state; 1672 return ndlp->nlp_state;
1635} 1673}
1636 1674
@@ -1649,9 +1687,16 @@ lpfc_cmpl_adisc_npr_node(struct lpfc_hba * phba,
1649 uint32_t evt) 1687 uint32_t evt)
1650{ 1688{
1651 struct lpfc_iocbq *cmdiocb, *rspiocb; 1689 struct lpfc_iocbq *cmdiocb, *rspiocb;
1690 IOCB_t *irsp;
1652 1691
1653 cmdiocb = (struct lpfc_iocbq *) arg; 1692 cmdiocb = (struct lpfc_iocbq *) arg;
1654 rspiocb = cmdiocb->context_un.rsp_iocb; 1693 rspiocb = cmdiocb->context_un.rsp_iocb;
1694
1695 irsp = &rspiocb->iocb;
1696 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
1697 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1698 return NLP_STE_FREED_NODE;
1699 }
1655 return ndlp->nlp_state; 1700 return ndlp->nlp_state;
1656} 1701}
1657 1702
@@ -1668,7 +1713,12 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba,
1668 1713
1669 if (!mb->mbxStatus) 1714 if (!mb->mbxStatus)
1670 ndlp->nlp_rpi = mb->un.varWords[0]; 1715 ndlp->nlp_rpi = mb->un.varWords[0];
1671 1716 else {
1717 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
1718 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1719 return NLP_STE_FREED_NODE;
1720 }
1721 }
1672 return ndlp->nlp_state; 1722 return ndlp->nlp_state;
1673} 1723}
1674 1724
@@ -1677,6 +1727,10 @@ lpfc_device_rm_npr_node(struct lpfc_hba * phba,
1677 struct lpfc_nodelist * ndlp, void *arg, 1727 struct lpfc_nodelist * ndlp, void *arg,
1678 uint32_t evt) 1728 uint32_t evt)
1679{ 1729{
1730 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1731 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1732 return ndlp->nlp_state;
1733 }
1680 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1734 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1681 return NLP_STE_FREED_NODE; 1735 return NLP_STE_FREED_NODE;
1682} 1736}
@@ -1687,7 +1741,7 @@ lpfc_device_recov_npr_node(struct lpfc_hba * phba,
1687 uint32_t evt) 1741 uint32_t evt)
1688{ 1742{
1689 spin_lock_irq(phba->host->host_lock); 1743 spin_lock_irq(phba->host->host_lock);
1690 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1744 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1691 spin_unlock_irq(phba->host->host_lock); 1745 spin_unlock_irq(phba->host->host_lock);
1692 if (ndlp->nlp_flag & NLP_DELAY_TMO) { 1746 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1693 lpfc_cancel_retry_delay_tmo(phba, ndlp); 1747 lpfc_cancel_retry_delay_tmo(phba, ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index f93799873721..7dc4c2e6bed2 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -629,8 +629,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
629 struct lpfc_iocbq *piocbq; 629 struct lpfc_iocbq *piocbq;
630 IOCB_t *piocb; 630 IOCB_t *piocb;
631 struct fcp_cmnd *fcp_cmnd; 631 struct fcp_cmnd *fcp_cmnd;
632 struct scsi_device *scsi_dev = lpfc_cmd->pCmd->device; 632 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
633 struct lpfc_rport_data *rdata = scsi_dev->hostdata;
634 struct lpfc_nodelist *ndlp = rdata->pnode; 633 struct lpfc_nodelist *ndlp = rdata->pnode;
635 634
636 if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 635 if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
@@ -665,56 +664,18 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
665 piocb->ulpTimeout = lpfc_cmd->timeout; 664 piocb->ulpTimeout = lpfc_cmd->timeout;
666 } 665 }
667 666
668 lpfc_cmd->rdata = rdata;
669
670 switch (task_mgmt_cmd) {
671 case FCP_LUN_RESET:
672 /* Issue LUN Reset to TGT <num> LUN <num> */
673 lpfc_printf_log(phba,
674 KERN_INFO,
675 LOG_FCP,
676 "%d:0703 Issue LUN Reset to TGT %d LUN %d "
677 "Data: x%x x%x\n",
678 phba->brd_no,
679 scsi_dev->id, scsi_dev->lun,
680 ndlp->nlp_rpi, ndlp->nlp_flag);
681
682 break;
683 case FCP_ABORT_TASK_SET:
684 /* Issue Abort Task Set to TGT <num> LUN <num> */
685 lpfc_printf_log(phba,
686 KERN_INFO,
687 LOG_FCP,
688 "%d:0701 Issue Abort Task Set to TGT %d LUN %d "
689 "Data: x%x x%x\n",
690 phba->brd_no,
691 scsi_dev->id, scsi_dev->lun,
692 ndlp->nlp_rpi, ndlp->nlp_flag);
693
694 break;
695 case FCP_TARGET_RESET:
696 /* Issue Target Reset to TGT <num> */
697 lpfc_printf_log(phba,
698 KERN_INFO,
699 LOG_FCP,
700 "%d:0702 Issue Target Reset to TGT %d "
701 "Data: x%x x%x\n",
702 phba->brd_no,
703 scsi_dev->id, ndlp->nlp_rpi,
704 ndlp->nlp_flag);
705 break;
706 }
707
708 return (1); 667 return (1);
709} 668}
710 669
711static int 670static int
712lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba) 671lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
672 unsigned tgt_id, struct lpfc_rport_data *rdata)
713{ 673{
714 struct lpfc_iocbq *iocbq; 674 struct lpfc_iocbq *iocbq;
715 struct lpfc_iocbq *iocbqrsp; 675 struct lpfc_iocbq *iocbqrsp;
716 int ret; 676 int ret;
717 677
678 lpfc_cmd->rdata = rdata;
718 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET); 679 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET);
719 if (!ret) 680 if (!ret)
720 return FAILED; 681 return FAILED;
@@ -726,6 +687,13 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba)
726 if (!iocbqrsp) 687 if (!iocbqrsp)
727 return FAILED; 688 return FAILED;
728 689
690 /* Issue Target Reset to TGT <num> */
691 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
692 "%d:0702 Issue Target Reset to TGT %d "
693 "Data: x%x x%x\n",
694 phba->brd_no, tgt_id, rdata->pnode->nlp_rpi,
695 rdata->pnode->nlp_flag);
696
729 ret = lpfc_sli_issue_iocb_wait(phba, 697 ret = lpfc_sli_issue_iocb_wait(phba,
730 &phba->sli.ring[phba->sli.fcp_ring], 698 &phba->sli.ring[phba->sli.fcp_ring],
731 iocbq, iocbqrsp, lpfc_cmd->timeout); 699 iocbq, iocbqrsp, lpfc_cmd->timeout);
@@ -1021,6 +989,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1021 lpfc_cmd->pCmd = cmnd; 989 lpfc_cmd->pCmd = cmnd;
1022 lpfc_cmd->timeout = 60; 990 lpfc_cmd->timeout = 60;
1023 lpfc_cmd->scsi_hba = phba; 991 lpfc_cmd->scsi_hba = phba;
992 lpfc_cmd->rdata = rdata;
1024 993
1025 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET); 994 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET);
1026 if (!ret) 995 if (!ret)
@@ -1033,6 +1002,11 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1033 if (iocbqrsp == NULL) 1002 if (iocbqrsp == NULL)
1034 goto out_free_scsi_buf; 1003 goto out_free_scsi_buf;
1035 1004
1005 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
1006 "%d:0703 Issue LUN Reset to TGT %d LUN %d "
1007 "Data: x%x x%x\n", phba->brd_no, cmnd->device->id,
1008 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
1009
1036 ret = lpfc_sli_issue_iocb_wait(phba, 1010 ret = lpfc_sli_issue_iocb_wait(phba,
1037 &phba->sli.ring[phba->sli.fcp_ring], 1011 &phba->sli.ring[phba->sli.fcp_ring],
1038 iocbq, iocbqrsp, lpfc_cmd->timeout); 1012 iocbq, iocbqrsp, lpfc_cmd->timeout);
@@ -1104,7 +1078,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1104 int match; 1078 int match;
1105 int ret = FAILED, i, err_count = 0; 1079 int ret = FAILED, i, err_count = 0;
1106 int cnt, loopcnt; 1080 int cnt, loopcnt;
1107 unsigned int midlayer_id = 0;
1108 struct lpfc_scsi_buf * lpfc_cmd; 1081 struct lpfc_scsi_buf * lpfc_cmd;
1109 1082
1110 lpfc_block_requests(phba); 1083 lpfc_block_requests(phba);
@@ -1124,7 +1097,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1124 * targets known to the driver. Should any target reset 1097 * targets known to the driver. Should any target reset
1125 * fail, this routine returns failure to the midlayer. 1098 * fail, this routine returns failure to the midlayer.
1126 */ 1099 */
1127 midlayer_id = cmnd->device->id;
1128 for (i = 0; i < MAX_FCP_TARGET; i++) { 1100 for (i = 0; i < MAX_FCP_TARGET; i++) {
1129 /* Search the mapped list for this target ID */ 1101 /* Search the mapped list for this target ID */
1130 match = 0; 1102 match = 0;
@@ -1137,9 +1109,8 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1137 if (!match) 1109 if (!match)
1138 continue; 1110 continue;
1139 1111
1140 lpfc_cmd->pCmd->device->id = i; 1112 ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba,
1141 lpfc_cmd->pCmd->device->hostdata = ndlp->rport->dd_data; 1113 i, ndlp->rport->dd_data);
1142 ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba);
1143 if (ret != SUCCESS) { 1114 if (ret != SUCCESS) {
1144 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1115 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1145 "%d:0713 Bus Reset on target %d failed\n", 1116 "%d:0713 Bus Reset on target %d failed\n",
@@ -1158,7 +1129,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1158 * the targets. Unfortunately, some targets do not abide by 1129 * the targets. Unfortunately, some targets do not abide by
1159 * this forcing the driver to double check. 1130 * this forcing the driver to double check.
1160 */ 1131 */
1161 cmnd->device->id = midlayer_id;
1162 cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 1132 cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
1163 0, 0, LPFC_CTX_HOST); 1133 0, 0, LPFC_CTX_HOST);
1164 if (cnt) 1134 if (cnt)
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 4cf1366108b7..6b737568b831 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.1.4" 21#define LPFC_DRIVER_VERSION "8.1.6"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24 24
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 80b68a2481b3..de35ffe2f79d 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4471,7 +4471,6 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
4471{ 4471{
4472 Scsi_Cmnd *scmd; 4472 Scsi_Cmnd *scmd;
4473 struct scsi_device *sdev; 4473 struct scsi_device *sdev;
4474 unsigned long flags = 0;
4475 scb_t *scb; 4474 scb_t *scb;
4476 int rval; 4475 int rval;
4477 4476
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index c11e5ce6865e..bec1424eda85 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * FILE : megaraid_mbox.c 12 * FILE : megaraid_mbox.c
13 * Version : v2.20.4.7 (Nov 14 2005) 13 * Version : v2.20.4.8 (Apr 11 2006)
14 * 14 *
15 * Authors: 15 * Authors:
16 * Atul Mukker <Atul.Mukker@lsil.com> 16 * Atul Mukker <Atul.Mukker@lsil.com>
@@ -2278,6 +2278,7 @@ megaraid_mbox_dpc(unsigned long devp)
2278 unsigned long flags; 2278 unsigned long flags;
2279 uint8_t c; 2279 uint8_t c;
2280 int status; 2280 int status;
2281 uioc_t *kioc;
2281 2282
2282 2283
2283 if (!adapter) return; 2284 if (!adapter) return;
@@ -2320,6 +2321,9 @@ megaraid_mbox_dpc(unsigned long devp)
2320 // remove from local clist 2321 // remove from local clist
2321 list_del_init(&scb->list); 2322 list_del_init(&scb->list);
2322 2323
2324 kioc = (uioc_t *)scb->gp;
2325 kioc->status = 0;
2326
2323 megaraid_mbox_mm_done(adapter, scb); 2327 megaraid_mbox_mm_done(adapter, scb);
2324 2328
2325 continue; 2329 continue;
@@ -2636,6 +2640,7 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
2636 int recovery_window; 2640 int recovery_window;
2637 int recovering; 2641 int recovering;
2638 int i; 2642 int i;
2643 uioc_t *kioc;
2639 2644
2640 adapter = SCP2ADAPTER(scp); 2645 adapter = SCP2ADAPTER(scp);
2641 raid_dev = ADAP2RAIDDEV(adapter); 2646 raid_dev = ADAP2RAIDDEV(adapter);
@@ -2655,32 +2660,51 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
2655 // Also, reset all the commands currently owned by the driver 2660 // Also, reset all the commands currently owned by the driver
2656 spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); 2661 spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
2657 list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) { 2662 list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) {
2658
2659 list_del_init(&scb->list); // from pending list 2663 list_del_init(&scb->list); // from pending list
2660 2664
2661 con_log(CL_ANN, (KERN_WARNING 2665 if (scb->sno >= MBOX_MAX_SCSI_CMDS) {
2662 "megaraid: %ld:%d[%d:%d], reset from pending list\n", 2666 con_log(CL_ANN, (KERN_WARNING
2663 scp->serial_number, scb->sno, 2667 "megaraid: IOCTL packet with %d[%d:%d] being reset\n",
2664 scb->dev_channel, scb->dev_target)); 2668 scb->sno, scb->dev_channel, scb->dev_target));
2665 2669
2666 scp->result = (DID_RESET << 16); 2670 scb->status = -1;
2667 scp->scsi_done(scp);
2668 2671
2669 megaraid_dealloc_scb(adapter, scb); 2672 kioc = (uioc_t *)scb->gp;
2673 kioc->status = -EFAULT;
2674
2675 megaraid_mbox_mm_done(adapter, scb);
2676 } else {
2677 if (scb->scp == scp) { // Found command
2678 con_log(CL_ANN, (KERN_WARNING
2679 "megaraid: %ld:%d[%d:%d], reset from pending list\n",
2680 scp->serial_number, scb->sno,
2681 scb->dev_channel, scb->dev_target));
2682 } else {
2683 con_log(CL_ANN, (KERN_WARNING
2684 "megaraid: IO packet with %d[%d:%d] being reset\n",
2685 scb->sno, scb->dev_channel, scb->dev_target));
2686 }
2687
2688 scb->scp->result = (DID_RESET << 16);
2689 scb->scp->scsi_done(scb->scp);
2690
2691 megaraid_dealloc_scb(adapter, scb);
2692 }
2670 } 2693 }
2671 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); 2694 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
2672 2695
2673 if (adapter->outstanding_cmds) { 2696 if (adapter->outstanding_cmds) {
2674 con_log(CL_ANN, (KERN_NOTICE 2697 con_log(CL_ANN, (KERN_NOTICE
2675 "megaraid: %d outstanding commands. Max wait %d sec\n", 2698 "megaraid: %d outstanding commands. Max wait %d sec\n",
2676 adapter->outstanding_cmds, MBOX_RESET_WAIT)); 2699 adapter->outstanding_cmds,
2700 (MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT)));
2677 } 2701 }
2678 2702
2679 recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT; 2703 recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
2680 2704
2681 recovering = adapter->outstanding_cmds; 2705 recovering = adapter->outstanding_cmds;
2682 2706
2683 for (i = 0; i < recovery_window && adapter->outstanding_cmds; i++) { 2707 for (i = 0; i < recovery_window; i++) {
2684 2708
2685 megaraid_ack_sequence(adapter); 2709 megaraid_ack_sequence(adapter);
2686 2710
@@ -2689,12 +2713,11 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
2689 con_log(CL_ANN, ( 2713 con_log(CL_ANN, (
2690 "megaraid mbox: Wait for %d commands to complete:%d\n", 2714 "megaraid mbox: Wait for %d commands to complete:%d\n",
2691 adapter->outstanding_cmds, 2715 adapter->outstanding_cmds,
2692 MBOX_RESET_WAIT - i)); 2716 (MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT) - i));
2693 } 2717 }
2694 2718
2695 // bailout if no recovery happended in reset time 2719 // bailout if no recovery happended in reset time
2696 if ((i == MBOX_RESET_WAIT) && 2720 if (adapter->outstanding_cmds == 0) {
2697 (recovering == adapter->outstanding_cmds)) {
2698 break; 2721 break;
2699 } 2722 }
2700 2723
@@ -2918,12 +2941,13 @@ mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[])
2918 wmb(); 2941 wmb();
2919 WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1); 2942 WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
2920 2943
2921 for (i = 0; i < 0xFFFFF; i++) { 2944 for (i = 0; i < MBOX_SYNC_WAIT_CNT; i++) {
2922 if (mbox->numstatus != 0xFF) break; 2945 if (mbox->numstatus != 0xFF) break;
2923 rmb(); 2946 rmb();
2947 udelay(MBOX_SYNC_DELAY_200);
2924 } 2948 }
2925 2949
2926 if (i == 0xFFFFF) { 2950 if (i == MBOX_SYNC_WAIT_CNT) {
2927 // We may need to re-calibrate the counter 2951 // We may need to re-calibrate the counter
2928 con_log(CL_ANN, (KERN_CRIT 2952 con_log(CL_ANN, (KERN_CRIT
2929 "megaraid: fast sync command timed out\n")); 2953 "megaraid: fast sync command timed out\n"));
@@ -3475,7 +3499,7 @@ megaraid_cmm_register(adapter_t *adapter)
3475 adp.drvr_data = (unsigned long)adapter; 3499 adp.drvr_data = (unsigned long)adapter;
3476 adp.pdev = adapter->pdev; 3500 adp.pdev = adapter->pdev;
3477 adp.issue_uioc = megaraid_mbox_mm_handler; 3501 adp.issue_uioc = megaraid_mbox_mm_handler;
3478 adp.timeout = 300; 3502 adp.timeout = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
3479 adp.max_kioc = MBOX_MAX_USER_CMDS; 3503 adp.max_kioc = MBOX_MAX_USER_CMDS;
3480 3504
3481 if ((rval = mraid_mm_register_adp(&adp)) != 0) { 3505 if ((rval = mraid_mm_register_adp(&adp)) != 0) {
@@ -3702,7 +3726,6 @@ megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb)
3702 unsigned long flags; 3726 unsigned long flags;
3703 3727
3704 kioc = (uioc_t *)scb->gp; 3728 kioc = (uioc_t *)scb->gp;
3705 kioc->status = 0;
3706 mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf; 3729 mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
3707 mbox64->mbox32.status = scb->status; 3730 mbox64->mbox32.status = scb->status;
3708 raw_mbox = (uint8_t *)&mbox64->mbox32; 3731 raw_mbox = (uint8_t *)&mbox64->mbox32;
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
index 882fb1a0b575..868fb0ec93e7 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.h
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -21,8 +21,8 @@
21#include "megaraid_ioctl.h" 21#include "megaraid_ioctl.h"
22 22
23 23
24#define MEGARAID_VERSION "2.20.4.7" 24#define MEGARAID_VERSION "2.20.4.8"
25#define MEGARAID_EXT_VERSION "(Release Date: Mon Nov 14 12:27:22 EST 2005)" 25#define MEGARAID_EXT_VERSION "(Release Date: Mon Apr 11 12:27:22 EST 2006)"
26 26
27 27
28/* 28/*
@@ -100,6 +100,9 @@
100#define MBOX_BUSY_WAIT 10 // max usec to wait for busy mailbox 100#define MBOX_BUSY_WAIT 10 // max usec to wait for busy mailbox
101#define MBOX_RESET_WAIT 180 // wait these many seconds in reset 101#define MBOX_RESET_WAIT 180 // wait these many seconds in reset
102#define MBOX_RESET_EXT_WAIT 120 // extended wait reset 102#define MBOX_RESET_EXT_WAIT 120 // extended wait reset
103#define MBOX_SYNC_WAIT_CNT 0xFFFF // wait loop index for synchronous mode
104
105#define MBOX_SYNC_DELAY_200 200 // 200 micro-seconds
103 106
104/* 107/*
105 * maximum transfer that can happen through the firmware commands issued 108 * maximum transfer that can happen through the firmware commands issued
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 8f3ce0432295..e8f534fb336b 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -898,10 +898,8 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
898 898
899 adapter = kmalloc(sizeof(mraid_mmadp_t), GFP_KERNEL); 899 adapter = kmalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
900 900
901 if (!adapter) { 901 if (!adapter)
902 rval = -ENOMEM; 902 return -ENOMEM;
903 goto memalloc_error;
904 }
905 903
906 memset(adapter, 0, sizeof(mraid_mmadp_t)); 904 memset(adapter, 0, sizeof(mraid_mmadp_t));
907 905
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 017729c59a49..584fe5d8e507 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -599,6 +599,7 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
599* Either SUCCESS or FAILED. 599* Either SUCCESS or FAILED.
600* 600*
601* Note: 601* Note:
602* Only return FAILED if command not returned by firmware.
602**************************************************************************/ 603**************************************************************************/
603int 604int
604qla2xxx_eh_abort(struct scsi_cmnd *cmd) 605qla2xxx_eh_abort(struct scsi_cmnd *cmd)
@@ -609,11 +610,12 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
609 unsigned int id, lun; 610 unsigned int id, lun;
610 unsigned long serial; 611 unsigned long serial;
611 unsigned long flags; 612 unsigned long flags;
613 int wait = 0;
612 614
613 if (!CMD_SP(cmd)) 615 if (!CMD_SP(cmd))
614 return FAILED; 616 return SUCCESS;
615 617
616 ret = FAILED; 618 ret = SUCCESS;
617 619
618 id = cmd->device->id; 620 id = cmd->device->id;
619 lun = cmd->device->lun; 621 lun = cmd->device->lun;
@@ -642,7 +644,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
642 } else { 644 } else {
643 DEBUG3(printk("%s(%ld): abort_command " 645 DEBUG3(printk("%s(%ld): abort_command "
644 "mbx success.\n", __func__, ha->host_no)); 646 "mbx success.\n", __func__, ha->host_no));
645 ret = SUCCESS; 647 wait = 1;
646 } 648 }
647 spin_lock_irqsave(&ha->hardware_lock, flags); 649 spin_lock_irqsave(&ha->hardware_lock, flags);
648 650
@@ -651,17 +653,18 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
651 spin_unlock_irqrestore(&ha->hardware_lock, flags); 653 spin_unlock_irqrestore(&ha->hardware_lock, flags);
652 654
653 /* Wait for the command to be returned. */ 655 /* Wait for the command to be returned. */
654 if (ret == SUCCESS) { 656 if (wait) {
655 if (qla2x00_eh_wait_on_command(ha, cmd) != QLA_SUCCESS) { 657 if (qla2x00_eh_wait_on_command(ha, cmd) != QLA_SUCCESS) {
656 qla_printk(KERN_ERR, ha, 658 qla_printk(KERN_ERR, ha,
657 "scsi(%ld:%d:%d): Abort handler timed out -- %lx " 659 "scsi(%ld:%d:%d): Abort handler timed out -- %lx "
658 "%x.\n", ha->host_no, id, lun, serial, ret); 660 "%x.\n", ha->host_no, id, lun, serial, ret);
661 ret = FAILED;
659 } 662 }
660 } 663 }
661 664
662 qla_printk(KERN_INFO, ha, 665 qla_printk(KERN_INFO, ha,
663 "scsi(%ld:%d:%d): Abort command issued -- %lx %x.\n", ha->host_no, 666 "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n",
664 id, lun, serial, ret); 667 ha->host_no, id, lun, wait, serial, ret);
665 668
666 return ret; 669 return ret;
667} 670}
@@ -1700,8 +1703,8 @@ qla2x00_free_device(scsi_qla_host_t *ha)
1700 ha->flags.online = 0; 1703 ha->flags.online = 0;
1701 1704
1702 /* Detach interrupts */ 1705 /* Detach interrupts */
1703 if (ha->pdev->irq) 1706 if (ha->host->irq)
1704 free_irq(ha->pdev->irq, ha); 1707 free_irq(ha->host->irq, ha);
1705 1708
1706 /* release io space registers */ 1709 /* release io space registers */
1707 if (ha->iobase) 1710 if (ha->iobase)
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index c750d3399a97..941c1e15c899 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -56,6 +56,8 @@ static struct {
56 {"DENON", "DRD-25X", "V", BLIST_NOLUN}, /* locks up */ 56 {"DENON", "DRD-25X", "V", BLIST_NOLUN}, /* locks up */
57 {"HITACHI", "DK312C", "CM81", BLIST_NOLUN}, /* responds to all lun */ 57 {"HITACHI", "DK312C", "CM81", BLIST_NOLUN}, /* responds to all lun */
58 {"HITACHI", "DK314C", "CR21", BLIST_NOLUN}, /* responds to all lun */ 58 {"HITACHI", "DK314C", "CR21", BLIST_NOLUN}, /* responds to all lun */
59 {"IBM", "2104-DU3", NULL, BLIST_NOLUN}, /* locks up */
60 {"IBM", "2104-TU3", NULL, BLIST_NOLUN}, /* locks up */
59 {"IMS", "CDD521/10", "2.06", BLIST_NOLUN}, /* locks up */ 61 {"IMS", "CDD521/10", "2.06", BLIST_NOLUN}, /* locks up */
60 {"MAXTOR", "XT-3280", "PR02", BLIST_NOLUN}, /* locks up */ 62 {"MAXTOR", "XT-3280", "PR02", BLIST_NOLUN}, /* locks up */
61 {"MAXTOR", "XT-4380S", "B3C", BLIST_NOLUN}, /* locks up */ 63 {"MAXTOR", "XT-4380S", "B3C", BLIST_NOLUN}, /* locks up */
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 7b0f9a3810d2..764a8b375ead 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1067,16 +1067,29 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
1067 break; 1067 break;
1068 case NOT_READY: 1068 case NOT_READY:
1069 /* 1069 /*
1070 * If the device is in the process of becoming ready, 1070 * If the device is in the process of becoming
1071 * retry. 1071 * ready, or has a temporary blockage, retry.
1072 */ 1072 */
1073 if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) { 1073 if (sshdr.asc == 0x04) {
1074 scsi_requeue_command(q, cmd); 1074 switch (sshdr.ascq) {
1075 return; 1075 case 0x01: /* becoming ready */
1076 case 0x04: /* format in progress */
1077 case 0x05: /* rebuild in progress */
1078 case 0x06: /* recalculation in progress */
1079 case 0x07: /* operation in progress */
1080 case 0x08: /* Long write in progress */
1081 case 0x09: /* self test in progress */
1082 scsi_requeue_command(q, cmd);
1083 return;
1084 default:
1085 break;
1086 }
1076 } 1087 }
1077 if (!(req->flags & REQ_QUIET)) 1088 if (!(req->flags & REQ_QUIET)) {
1078 scmd_printk(KERN_INFO, cmd, 1089 scmd_printk(KERN_INFO, cmd,
1079 "Device not ready.\n"); 1090 "Device not ready: ");
1091 scsi_print_sense_hdr("", &sshdr);
1092 }
1080 scsi_end_request(cmd, 0, this_count, 1); 1093 scsi_end_request(cmd, 0, this_count, 1);
1081 return; 1094 return;
1082 case VOLUME_OVERFLOW: 1095 case VOLUME_OVERFLOW:
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index 3274ab76c8d3..255886a9ac55 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -75,7 +75,7 @@ param_setup(char *str)
75 else if(!strncmp(pos, "id:", 3)) { 75 else if(!strncmp(pos, "id:", 3)) {
76 if(slot == -1) { 76 if(slot == -1) {
77 printk(KERN_WARNING "sim710: Must specify slot for id parameter\n"); 77 printk(KERN_WARNING "sim710: Must specify slot for id parameter\n");
78 } else if(slot > MAX_SLOTS) { 78 } else if(slot >= MAX_SLOTS) {
79 printk(KERN_WARNING "sim710: Illegal slot %d for id %d\n", slot, val); 79 printk(KERN_WARNING "sim710: Illegal slot %d for id %d\n", slot, val);
80 } else { 80 } else {
81 id_array[slot] = val; 81 id_array[slot] = val;
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 674b15c78f68..bbf78aaf9e01 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -362,6 +362,40 @@ serial_out(struct uart_8250_port *up, int offset, int value)
362#define serial_inp(up, offset) serial_in(up, offset) 362#define serial_inp(up, offset) serial_in(up, offset)
363#define serial_outp(up, offset, value) serial_out(up, offset, value) 363#define serial_outp(up, offset, value) serial_out(up, offset, value)
364 364
365/* Uart divisor latch read */
366static inline int _serial_dl_read(struct uart_8250_port *up)
367{
368 return serial_inp(up, UART_DLL) | serial_inp(up, UART_DLM) << 8;
369}
370
371/* Uart divisor latch write */
372static inline void _serial_dl_write(struct uart_8250_port *up, int value)
373{
374 serial_outp(up, UART_DLL, value & 0xff);
375 serial_outp(up, UART_DLM, value >> 8 & 0xff);
376}
377
378#ifdef CONFIG_SERIAL_8250_AU1X00
379/* Au1x00 haven't got a standard divisor latch */
380static int serial_dl_read(struct uart_8250_port *up)
381{
382 if (up->port.iotype == UPIO_AU)
383 return __raw_readl(up->port.membase + 0x28);
384 else
385 return _serial_dl_read(up);
386}
387
388static void serial_dl_write(struct uart_8250_port *up, int value)
389{
390 if (up->port.iotype == UPIO_AU)
391 __raw_writel(value, up->port.membase + 0x28);
392 else
393 _serial_dl_write(up, value);
394}
395#else
396#define serial_dl_read(up) _serial_dl_read(up)
397#define serial_dl_write(up, value) _serial_dl_write(up, value)
398#endif
365 399
366/* 400/*
367 * For the 16C950 401 * For the 16C950
@@ -494,7 +528,8 @@ static void disable_rsa(struct uart_8250_port *up)
494 */ 528 */
495static int size_fifo(struct uart_8250_port *up) 529static int size_fifo(struct uart_8250_port *up)
496{ 530{
497 unsigned char old_fcr, old_mcr, old_dll, old_dlm, old_lcr; 531 unsigned char old_fcr, old_mcr, old_lcr;
532 unsigned short old_dl;
498 int count; 533 int count;
499 534
500 old_lcr = serial_inp(up, UART_LCR); 535 old_lcr = serial_inp(up, UART_LCR);
@@ -505,10 +540,8 @@ static int size_fifo(struct uart_8250_port *up)
505 UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); 540 UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
506 serial_outp(up, UART_MCR, UART_MCR_LOOP); 541 serial_outp(up, UART_MCR, UART_MCR_LOOP);
507 serial_outp(up, UART_LCR, UART_LCR_DLAB); 542 serial_outp(up, UART_LCR, UART_LCR_DLAB);
508 old_dll = serial_inp(up, UART_DLL); 543 old_dl = serial_dl_read(up);
509 old_dlm = serial_inp(up, UART_DLM); 544 serial_dl_write(up, 0x0001);
510 serial_outp(up, UART_DLL, 0x01);
511 serial_outp(up, UART_DLM, 0x00);
512 serial_outp(up, UART_LCR, 0x03); 545 serial_outp(up, UART_LCR, 0x03);
513 for (count = 0; count < 256; count++) 546 for (count = 0; count < 256; count++)
514 serial_outp(up, UART_TX, count); 547 serial_outp(up, UART_TX, count);
@@ -519,8 +552,7 @@ static int size_fifo(struct uart_8250_port *up)
519 serial_outp(up, UART_FCR, old_fcr); 552 serial_outp(up, UART_FCR, old_fcr);
520 serial_outp(up, UART_MCR, old_mcr); 553 serial_outp(up, UART_MCR, old_mcr);
521 serial_outp(up, UART_LCR, UART_LCR_DLAB); 554 serial_outp(up, UART_LCR, UART_LCR_DLAB);
522 serial_outp(up, UART_DLL, old_dll); 555 serial_dl_write(up, old_dl);
523 serial_outp(up, UART_DLM, old_dlm);
524 serial_outp(up, UART_LCR, old_lcr); 556 serial_outp(up, UART_LCR, old_lcr);
525 557
526 return count; 558 return count;
@@ -750,8 +782,7 @@ static void autoconfig_16550a(struct uart_8250_port *up)
750 782
751 serial_outp(up, UART_LCR, 0xE0); 783 serial_outp(up, UART_LCR, 0xE0);
752 784
753 quot = serial_inp(up, UART_DLM) << 8; 785 quot = serial_dl_read(up);
754 quot += serial_inp(up, UART_DLL);
755 quot <<= 3; 786 quot <<= 3;
756 787
757 status1 = serial_in(up, 0x04); /* EXCR1 */ 788 status1 = serial_in(up, 0x04); /* EXCR1 */
@@ -759,8 +790,7 @@ static void autoconfig_16550a(struct uart_8250_port *up)
759 status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ 790 status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */
760 serial_outp(up, 0x04, status1); 791 serial_outp(up, 0x04, status1);
761 792
762 serial_outp(up, UART_DLL, quot & 0xff); 793 serial_dl_write(up, quot);
763 serial_outp(up, UART_DLM, quot >> 8);
764 794
765 serial_outp(up, UART_LCR, 0); 795 serial_outp(up, UART_LCR, 0);
766 796
@@ -1862,8 +1892,7 @@ serial8250_set_termios(struct uart_port *port, struct termios *termios,
1862 serial_outp(up, UART_LCR, cval | UART_LCR_DLAB);/* set DLAB */ 1892 serial_outp(up, UART_LCR, cval | UART_LCR_DLAB);/* set DLAB */
1863 } 1893 }
1864 1894
1865 serial_outp(up, UART_DLL, quot & 0xff); /* LS of divisor */ 1895 serial_dl_write(up, quot);
1866 serial_outp(up, UART_DLM, quot >> 8); /* MS of divisor */
1867 1896
1868 /* 1897 /*
1869 * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR 1898 * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR
@@ -1906,6 +1935,9 @@ static int serial8250_request_std_resource(struct uart_8250_port *up)
1906 int ret = 0; 1935 int ret = 0;
1907 1936
1908 switch (up->port.iotype) { 1937 switch (up->port.iotype) {
1938 case UPIO_AU:
1939 size = 0x100000;
1940 /* fall thru */
1909 case UPIO_MEM: 1941 case UPIO_MEM:
1910 if (!up->port.mapbase) 1942 if (!up->port.mapbase)
1911 break; 1943 break;
@@ -1938,6 +1970,9 @@ static void serial8250_release_std_resource(struct uart_8250_port *up)
1938 unsigned int size = 8 << up->port.regshift; 1970 unsigned int size = 8 << up->port.regshift;
1939 1971
1940 switch (up->port.iotype) { 1972 switch (up->port.iotype) {
1973 case UPIO_AU:
1974 size = 0x100000;
1975 /* fall thru */
1941 case UPIO_MEM: 1976 case UPIO_MEM:
1942 if (!up->port.mapbase) 1977 if (!up->port.mapbase)
1943 break; 1978 break;
@@ -2200,10 +2235,17 @@ static void
2200serial8250_console_write(struct console *co, const char *s, unsigned int count) 2235serial8250_console_write(struct console *co, const char *s, unsigned int count)
2201{ 2236{
2202 struct uart_8250_port *up = &serial8250_ports[co->index]; 2237 struct uart_8250_port *up = &serial8250_ports[co->index];
2238 unsigned long flags;
2203 unsigned int ier; 2239 unsigned int ier;
2240 int locked = 1;
2204 2241
2205 touch_nmi_watchdog(); 2242 touch_nmi_watchdog();
2206 2243
2244 if (oops_in_progress) {
2245 locked = spin_trylock_irqsave(&up->port.lock, flags);
2246 } else
2247 spin_lock_irqsave(&up->port.lock, flags);
2248
2207 /* 2249 /*
2208 * First save the IER then disable the interrupts 2250 * First save the IER then disable the interrupts
2209 */ 2251 */
@@ -2221,8 +2263,10 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
2221 * and restore the IER 2263 * and restore the IER
2222 */ 2264 */
2223 wait_for_xmitr(up, BOTH_EMPTY); 2265 wait_for_xmitr(up, BOTH_EMPTY);
2224 up->ier |= UART_IER_THRI; 2266 serial_out(up, UART_IER, ier);
2225 serial_out(up, UART_IER, ier | UART_IER_THRI); 2267
2268 if (locked)
2269 spin_unlock_irqrestore(&up->port.lock, flags);
2226} 2270}
2227 2271
2228static int serial8250_console_setup(struct console *co, char *options) 2272static int serial8250_console_setup(struct console *co, char *options)
diff --git a/drivers/serial/8250_au1x00.c b/drivers/serial/8250_au1x00.c
index 3d1bfd07208d..58015fd14be9 100644
--- a/drivers/serial/8250_au1x00.c
+++ b/drivers/serial/8250_au1x00.c
@@ -30,13 +30,12 @@
30 { \ 30 { \
31 .iobase = _base, \ 31 .iobase = _base, \
32 .membase = (void __iomem *)_base,\ 32 .membase = (void __iomem *)_base,\
33 .mapbase = _base, \ 33 .mapbase = CPHYSADDR(_base), \
34 .irq = _irq, \ 34 .irq = _irq, \
35 .uartclk = 0, /* filled */ \ 35 .uartclk = 0, /* filled */ \
36 .regshift = 2, \ 36 .regshift = 2, \
37 .iotype = UPIO_AU, \ 37 .iotype = UPIO_AU, \
38 .flags = UPF_SKIP_TEST | \ 38 .flags = UPF_SKIP_TEST \
39 UPF_IOREMAP, \
40 } 39 }
41 40
42static struct plat_serial8250_port au1x00_data[] = { 41static struct plat_serial8250_port au1x00_data[] = {
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index fcd7744c4253..17839e753e4c 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -1500,20 +1500,18 @@ uart_block_til_ready(struct file *filp, struct uart_state *state)
1500static struct uart_state *uart_get(struct uart_driver *drv, int line) 1500static struct uart_state *uart_get(struct uart_driver *drv, int line)
1501{ 1501{
1502 struct uart_state *state; 1502 struct uart_state *state;
1503 int ret = 0;
1503 1504
1504 mutex_lock(&port_mutex);
1505 state = drv->state + line; 1505 state = drv->state + line;
1506 if (mutex_lock_interruptible(&state->mutex)) { 1506 if (mutex_lock_interruptible(&state->mutex)) {
1507 state = ERR_PTR(-ERESTARTSYS); 1507 ret = -ERESTARTSYS;
1508 goto out; 1508 goto err;
1509 } 1509 }
1510 1510
1511 state->count++; 1511 state->count++;
1512 if (!state->port) { 1512 if (!state->port || state->port->flags & UPF_DEAD) {
1513 state->count--; 1513 ret = -ENXIO;
1514 mutex_unlock(&state->mutex); 1514 goto err_unlock;
1515 state = ERR_PTR(-ENXIO);
1516 goto out;
1517 } 1515 }
1518 1516
1519 if (!state->info) { 1517 if (!state->info) {
@@ -1531,15 +1529,17 @@ static struct uart_state *uart_get(struct uart_driver *drv, int line)
1531 tasklet_init(&state->info->tlet, uart_tasklet_action, 1529 tasklet_init(&state->info->tlet, uart_tasklet_action,
1532 (unsigned long)state); 1530 (unsigned long)state);
1533 } else { 1531 } else {
1534 state->count--; 1532 ret = -ENOMEM;
1535 mutex_unlock(&state->mutex); 1533 goto err_unlock;
1536 state = ERR_PTR(-ENOMEM);
1537 } 1534 }
1538 } 1535 }
1539
1540 out:
1541 mutex_unlock(&port_mutex);
1542 return state; 1536 return state;
1537
1538 err_unlock:
1539 state->count--;
1540 mutex_unlock(&state->mutex);
1541 err:
1542 return ERR_PTR(ret);
1543} 1543}
1544 1544
1545/* 1545/*
@@ -1907,9 +1907,12 @@ uart_set_options(struct uart_port *port, struct console *co,
1907static void uart_change_pm(struct uart_state *state, int pm_state) 1907static void uart_change_pm(struct uart_state *state, int pm_state)
1908{ 1908{
1909 struct uart_port *port = state->port; 1909 struct uart_port *port = state->port;
1910 if (port->ops->pm) 1910
1911 port->ops->pm(port, pm_state, state->pm_state); 1911 if (state->pm_state != pm_state) {
1912 state->pm_state = pm_state; 1912 if (port->ops->pm)
1913 port->ops->pm(port, pm_state, state->pm_state);
1914 state->pm_state = pm_state;
1915 }
1913} 1916}
1914 1917
1915int uart_suspend_port(struct uart_driver *drv, struct uart_port *port) 1918int uart_suspend_port(struct uart_driver *drv, struct uart_port *port)
@@ -2085,45 +2088,6 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
2085 } 2088 }
2086} 2089}
2087 2090
2088/*
2089 * This reverses the effects of uart_configure_port, hanging up the
2090 * port before removal.
2091 */
2092static void
2093uart_unconfigure_port(struct uart_driver *drv, struct uart_state *state)
2094{
2095 struct uart_port *port = state->port;
2096 struct uart_info *info = state->info;
2097
2098 if (info && info->tty)
2099 tty_vhangup(info->tty);
2100
2101 mutex_lock(&state->mutex);
2102
2103 state->info = NULL;
2104
2105 /*
2106 * Free the port IO and memory resources, if any.
2107 */
2108 if (port->type != PORT_UNKNOWN)
2109 port->ops->release_port(port);
2110
2111 /*
2112 * Indicate that there isn't a port here anymore.
2113 */
2114 port->type = PORT_UNKNOWN;
2115
2116 /*
2117 * Kill the tasklet, and free resources.
2118 */
2119 if (info) {
2120 tasklet_kill(&info->tlet);
2121 kfree(info);
2122 }
2123
2124 mutex_unlock(&state->mutex);
2125}
2126
2127static struct tty_operations uart_ops = { 2091static struct tty_operations uart_ops = {
2128 .open = uart_open, 2092 .open = uart_open,
2129 .close = uart_close, 2093 .close = uart_close,
@@ -2270,6 +2234,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *port)
2270 state = drv->state + port->line; 2234 state = drv->state + port->line;
2271 2235
2272 mutex_lock(&port_mutex); 2236 mutex_lock(&port_mutex);
2237 mutex_lock(&state->mutex);
2273 if (state->port) { 2238 if (state->port) {
2274 ret = -EINVAL; 2239 ret = -EINVAL;
2275 goto out; 2240 goto out;
@@ -2304,7 +2269,13 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *port)
2304 port->cons && !(port->cons->flags & CON_ENABLED)) 2269 port->cons && !(port->cons->flags & CON_ENABLED))
2305 register_console(port->cons); 2270 register_console(port->cons);
2306 2271
2272 /*
2273 * Ensure UPF_DEAD is not set.
2274 */
2275 port->flags &= ~UPF_DEAD;
2276
2307 out: 2277 out:
2278 mutex_unlock(&state->mutex);
2308 mutex_unlock(&port_mutex); 2279 mutex_unlock(&port_mutex);
2309 2280
2310 return ret; 2281 return ret;
@@ -2322,6 +2293,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *port)
2322int uart_remove_one_port(struct uart_driver *drv, struct uart_port *port) 2293int uart_remove_one_port(struct uart_driver *drv, struct uart_port *port)
2323{ 2294{
2324 struct uart_state *state = drv->state + port->line; 2295 struct uart_state *state = drv->state + port->line;
2296 struct uart_info *info;
2325 2297
2326 BUG_ON(in_interrupt()); 2298 BUG_ON(in_interrupt());
2327 2299
@@ -2332,11 +2304,48 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *port)
2332 mutex_lock(&port_mutex); 2304 mutex_lock(&port_mutex);
2333 2305
2334 /* 2306 /*
2307 * Mark the port "dead" - this prevents any opens from
2308 * succeeding while we shut down the port.
2309 */
2310 mutex_lock(&state->mutex);
2311 port->flags |= UPF_DEAD;
2312 mutex_unlock(&state->mutex);
2313
2314 /*
2335 * Remove the devices from devfs 2315 * Remove the devices from devfs
2336 */ 2316 */
2337 tty_unregister_device(drv->tty_driver, port->line); 2317 tty_unregister_device(drv->tty_driver, port->line);
2338 2318
2339 uart_unconfigure_port(drv, state); 2319 info = state->info;
2320 if (info && info->tty)
2321 tty_vhangup(info->tty);
2322
2323 /*
2324 * All users of this port should now be disconnected from
2325 * this driver, and the port shut down. We should be the
2326 * only thread fiddling with this port from now on.
2327 */
2328 state->info = NULL;
2329
2330 /*
2331 * Free the port IO and memory resources, if any.
2332 */
2333 if (port->type != PORT_UNKNOWN)
2334 port->ops->release_port(port);
2335
2336 /*
2337 * Indicate that there isn't a port here anymore.
2338 */
2339 port->type = PORT_UNKNOWN;
2340
2341 /*
2342 * Kill the tasklet, and free resources.
2343 */
2344 if (info) {
2345 tasklet_kill(&info->tlet);
2346 kfree(info);
2347 }
2348
2340 state->port = NULL; 2349 state->port = NULL;
2341 mutex_unlock(&port_mutex); 2350 mutex_unlock(&port_mutex);
2342 2351
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 7a75faeb0526..9ce1d01469b1 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -75,6 +75,14 @@ config SPI_BUTTERFLY
75 inexpensive battery powered microcontroller evaluation board. 75 inexpensive battery powered microcontroller evaluation board.
76 This same cable can be used to flash new firmware. 76 This same cable can be used to flash new firmware.
77 77
78config SPI_PXA2XX
79 tristate "PXA2xx SSP SPI master"
80 depends on SPI_MASTER && ARCH_PXA && EXPERIMENTAL
81 help
82 This enables using a PXA2xx SSP port as a SPI master controller.
83 The driver can be configured to use any SSP port and additional
84 documentation can be found a Documentation/spi/pxa2xx.
85
78# 86#
79# Add new SPI master controllers in alphabetical order above this line 87# Add new SPI master controllers in alphabetical order above this line
80# 88#
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index c2c87e845abf..1bca5f95de25 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_SPI_MASTER) += spi.o
13# SPI master controller drivers (bus) 13# SPI master controller drivers (bus)
14obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o 14obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
15obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o 15obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
16obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o
16# ... add above this line ... 17# ... add above this line ...
17 18
18# SPI protocol drivers (device/link on bus) 19# SPI protocol drivers (device/link on bus)
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
new file mode 100644
index 000000000000..596bf820b70c
--- /dev/null
+++ b/drivers/spi/pxa2xx_spi.c
@@ -0,0 +1,1467 @@
1/*
2 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/device.h>
22#include <linux/ioport.h>
23#include <linux/errno.h>
24#include <linux/interrupt.h>
25#include <linux/platform_device.h>
26#include <linux/dma-mapping.h>
27#include <linux/spi/spi.h>
28#include <linux/workqueue.h>
29#include <linux/errno.h>
30#include <linux/delay.h>
31
32#include <asm/io.h>
33#include <asm/irq.h>
34#include <asm/hardware.h>
35#include <asm/delay.h>
36#include <asm/dma.h>
37
38#include <asm/arch/hardware.h>
39#include <asm/arch/pxa-regs.h>
40#include <asm/arch/pxa2xx_spi.h>
41
42MODULE_AUTHOR("Stephen Street");
43MODULE_DESCRIPTION("PXA2xx SSP SPI Contoller");
44MODULE_LICENSE("GPL");
45
46#define MAX_BUSES 3
47
48#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
49#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
50#define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0)
51
52#define DEFINE_SSP_REG(reg, off) \
53static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \
54static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); }
55
56DEFINE_SSP_REG(SSCR0, 0x00)
57DEFINE_SSP_REG(SSCR1, 0x04)
58DEFINE_SSP_REG(SSSR, 0x08)
59DEFINE_SSP_REG(SSITR, 0x0c)
60DEFINE_SSP_REG(SSDR, 0x10)
61DEFINE_SSP_REG(SSTO, 0x28)
62DEFINE_SSP_REG(SSPSP, 0x2c)
63
64#define START_STATE ((void*)0)
65#define RUNNING_STATE ((void*)1)
66#define DONE_STATE ((void*)2)
67#define ERROR_STATE ((void*)-1)
68
69#define QUEUE_RUNNING 0
70#define QUEUE_STOPPED 1
71
72struct driver_data {
73 /* Driver model hookup */
74 struct platform_device *pdev;
75
76 /* SPI framework hookup */
77 enum pxa_ssp_type ssp_type;
78 struct spi_master *master;
79
80 /* PXA hookup */
81 struct pxa2xx_spi_master *master_info;
82
83 /* DMA setup stuff */
84 int rx_channel;
85 int tx_channel;
86 u32 *null_dma_buf;
87
88 /* SSP register addresses */
89 void *ioaddr;
90 u32 ssdr_physical;
91
92 /* SSP masks*/
93 u32 dma_cr1;
94 u32 int_cr1;
95 u32 clear_sr;
96 u32 mask_sr;
97
98 /* Driver message queue */
99 struct workqueue_struct *workqueue;
100 struct work_struct pump_messages;
101 spinlock_t lock;
102 struct list_head queue;
103 int busy;
104 int run;
105
106 /* Message Transfer pump */
107 struct tasklet_struct pump_transfers;
108
109 /* Current message transfer state info */
110 struct spi_message* cur_msg;
111 struct spi_transfer* cur_transfer;
112 struct chip_data *cur_chip;
113 size_t len;
114 void *tx;
115 void *tx_end;
116 void *rx;
117 void *rx_end;
118 int dma_mapped;
119 dma_addr_t rx_dma;
120 dma_addr_t tx_dma;
121 size_t rx_map_len;
122 size_t tx_map_len;
123 u8 n_bytes;
124 u32 dma_width;
125 int cs_change;
126 void (*write)(struct driver_data *drv_data);
127 void (*read)(struct driver_data *drv_data);
128 irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
129 void (*cs_control)(u32 command);
130};
131
132struct chip_data {
133 u32 cr0;
134 u32 cr1;
135 u32 to;
136 u32 psp;
137 u32 timeout;
138 u8 n_bytes;
139 u32 dma_width;
140 u32 dma_burst_size;
141 u32 threshold;
142 u32 dma_threshold;
143 u8 enable_dma;
144 u8 bits_per_word;
145 u32 speed_hz;
146 void (*write)(struct driver_data *drv_data);
147 void (*read)(struct driver_data *drv_data);
148 void (*cs_control)(u32 command);
149};
150
151static void pump_messages(void *data);
152
153static int flush(struct driver_data *drv_data)
154{
155 unsigned long limit = loops_per_jiffy << 1;
156
157 void *reg = drv_data->ioaddr;
158
159 do {
160 while (read_SSSR(reg) & SSSR_RNE) {
161 read_SSDR(reg);
162 }
163 } while ((read_SSSR(reg) & SSSR_BSY) && limit--);
164 write_SSSR(SSSR_ROR, reg);
165
166 return limit;
167}
168
169static void restore_state(struct driver_data *drv_data)
170{
171 void *reg = drv_data->ioaddr;
172
173 /* Clear status and disable clock */
174 write_SSSR(drv_data->clear_sr, reg);
175 write_SSCR0(drv_data->cur_chip->cr0 & ~SSCR0_SSE, reg);
176
177 /* Load the registers */
178 write_SSCR1(drv_data->cur_chip->cr1, reg);
179 write_SSCR0(drv_data->cur_chip->cr0, reg);
180 if (drv_data->ssp_type != PXA25x_SSP) {
181 write_SSTO(0, reg);
182 write_SSPSP(drv_data->cur_chip->psp, reg);
183 }
184}
185
186static void null_cs_control(u32 command)
187{
188}
189
190static void null_writer(struct driver_data *drv_data)
191{
192 void *reg = drv_data->ioaddr;
193 u8 n_bytes = drv_data->n_bytes;
194
195 while ((read_SSSR(reg) & SSSR_TNF)
196 && (drv_data->tx < drv_data->tx_end)) {
197 write_SSDR(0, reg);
198 drv_data->tx += n_bytes;
199 }
200}
201
202static void null_reader(struct driver_data *drv_data)
203{
204 void *reg = drv_data->ioaddr;
205 u8 n_bytes = drv_data->n_bytes;
206
207 while ((read_SSSR(reg) & SSSR_RNE)
208 && (drv_data->rx < drv_data->rx_end)) {
209 read_SSDR(reg);
210 drv_data->rx += n_bytes;
211 }
212}
213
214static void u8_writer(struct driver_data *drv_data)
215{
216 void *reg = drv_data->ioaddr;
217
218 while ((read_SSSR(reg) & SSSR_TNF)
219 && (drv_data->tx < drv_data->tx_end)) {
220 write_SSDR(*(u8 *)(drv_data->tx), reg);
221 ++drv_data->tx;
222 }
223}
224
225static void u8_reader(struct driver_data *drv_data)
226{
227 void *reg = drv_data->ioaddr;
228
229 while ((read_SSSR(reg) & SSSR_RNE)
230 && (drv_data->rx < drv_data->rx_end)) {
231 *(u8 *)(drv_data->rx) = read_SSDR(reg);
232 ++drv_data->rx;
233 }
234}
235
236static void u16_writer(struct driver_data *drv_data)
237{
238 void *reg = drv_data->ioaddr;
239
240 while ((read_SSSR(reg) & SSSR_TNF)
241 && (drv_data->tx < drv_data->tx_end)) {
242 write_SSDR(*(u16 *)(drv_data->tx), reg);
243 drv_data->tx += 2;
244 }
245}
246
247static void u16_reader(struct driver_data *drv_data)
248{
249 void *reg = drv_data->ioaddr;
250
251 while ((read_SSSR(reg) & SSSR_RNE)
252 && (drv_data->rx < drv_data->rx_end)) {
253 *(u16 *)(drv_data->rx) = read_SSDR(reg);
254 drv_data->rx += 2;
255 }
256}
257static void u32_writer(struct driver_data *drv_data)
258{
259 void *reg = drv_data->ioaddr;
260
261 while ((read_SSSR(reg) & SSSR_TNF)
262 && (drv_data->tx < drv_data->tx_end)) {
263 write_SSDR(*(u32 *)(drv_data->tx), reg);
264 drv_data->tx += 4;
265 }
266}
267
268static void u32_reader(struct driver_data *drv_data)
269{
270 void *reg = drv_data->ioaddr;
271
272 while ((read_SSSR(reg) & SSSR_RNE)
273 && (drv_data->rx < drv_data->rx_end)) {
274 *(u32 *)(drv_data->rx) = read_SSDR(reg);
275 drv_data->rx += 4;
276 }
277}
278
279static void *next_transfer(struct driver_data *drv_data)
280{
281 struct spi_message *msg = drv_data->cur_msg;
282 struct spi_transfer *trans = drv_data->cur_transfer;
283
284 /* Move to next transfer */
285 if (trans->transfer_list.next != &msg->transfers) {
286 drv_data->cur_transfer =
287 list_entry(trans->transfer_list.next,
288 struct spi_transfer,
289 transfer_list);
290 return RUNNING_STATE;
291 } else
292 return DONE_STATE;
293}
294
295static int map_dma_buffers(struct driver_data *drv_data)
296{
297 struct spi_message *msg = drv_data->cur_msg;
298 struct device *dev = &msg->spi->dev;
299
300 if (!drv_data->cur_chip->enable_dma)
301 return 0;
302
303 if (msg->is_dma_mapped)
304 return drv_data->rx_dma && drv_data->tx_dma;
305
306 if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
307 return 0;
308
309 /* Modify setup if rx buffer is null */
310 if (drv_data->rx == NULL) {
311 *drv_data->null_dma_buf = 0;
312 drv_data->rx = drv_data->null_dma_buf;
313 drv_data->rx_map_len = 4;
314 } else
315 drv_data->rx_map_len = drv_data->len;
316
317
318 /* Modify setup if tx buffer is null */
319 if (drv_data->tx == NULL) {
320 *drv_data->null_dma_buf = 0;
321 drv_data->tx = drv_data->null_dma_buf;
322 drv_data->tx_map_len = 4;
323 } else
324 drv_data->tx_map_len = drv_data->len;
325
326 /* Stream map the rx buffer */
327 drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
328 drv_data->rx_map_len,
329 DMA_FROM_DEVICE);
330 if (dma_mapping_error(drv_data->rx_dma))
331 return 0;
332
333 /* Stream map the tx buffer */
334 drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
335 drv_data->tx_map_len,
336 DMA_TO_DEVICE);
337
338 if (dma_mapping_error(drv_data->tx_dma)) {
339 dma_unmap_single(dev, drv_data->rx_dma,
340 drv_data->rx_map_len, DMA_FROM_DEVICE);
341 return 0;
342 }
343
344 return 1;
345}
346
347static void unmap_dma_buffers(struct driver_data *drv_data)
348{
349 struct device *dev;
350
351 if (!drv_data->dma_mapped)
352 return;
353
354 if (!drv_data->cur_msg->is_dma_mapped) {
355 dev = &drv_data->cur_msg->spi->dev;
356 dma_unmap_single(dev, drv_data->rx_dma,
357 drv_data->rx_map_len, DMA_FROM_DEVICE);
358 dma_unmap_single(dev, drv_data->tx_dma,
359 drv_data->tx_map_len, DMA_TO_DEVICE);
360 }
361
362 drv_data->dma_mapped = 0;
363}
364
365/* caller already set message->status; dma and pio irqs are blocked */
366static void giveback(struct spi_message *message, struct driver_data *drv_data)
367{
368 struct spi_transfer* last_transfer;
369
370 last_transfer = list_entry(message->transfers.prev,
371 struct spi_transfer,
372 transfer_list);
373
374 if (!last_transfer->cs_change)
375 drv_data->cs_control(PXA2XX_CS_DEASSERT);
376
377 message->state = NULL;
378 if (message->complete)
379 message->complete(message->context);
380
381 drv_data->cur_msg = NULL;
382 drv_data->cur_transfer = NULL;
383 drv_data->cur_chip = NULL;
384 queue_work(drv_data->workqueue, &drv_data->pump_messages);
385}
386
387static int wait_ssp_rx_stall(void *ioaddr)
388{
389 unsigned long limit = loops_per_jiffy << 1;
390
391 while ((read_SSSR(ioaddr) & SSSR_BSY) && limit--)
392 cpu_relax();
393
394 return limit;
395}
396
397static int wait_dma_channel_stop(int channel)
398{
399 unsigned long limit = loops_per_jiffy << 1;
400
401 while (!(DCSR(channel) & DCSR_STOPSTATE) && limit--)
402 cpu_relax();
403
404 return limit;
405}
406
407static void dma_handler(int channel, void *data, struct pt_regs *regs)
408{
409 struct driver_data *drv_data = data;
410 struct spi_message *msg = drv_data->cur_msg;
411 void *reg = drv_data->ioaddr;
412 u32 irq_status = DCSR(channel) & DMA_INT_MASK;
413 u32 trailing_sssr = 0;
414
415 if (irq_status & DCSR_BUSERR) {
416
417 /* Disable interrupts, clear status and reset DMA */
418 if (drv_data->ssp_type != PXA25x_SSP)
419 write_SSTO(0, reg);
420 write_SSSR(drv_data->clear_sr, reg);
421 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
422 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
423 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
424
425 if (flush(drv_data) == 0)
426 dev_err(&drv_data->pdev->dev,
427 "dma_handler: flush fail\n");
428
429 unmap_dma_buffers(drv_data);
430
431 if (channel == drv_data->tx_channel)
432 dev_err(&drv_data->pdev->dev,
433 "dma_handler: bad bus address on "
434 "tx channel %d, source %x target = %x\n",
435 channel, DSADR(channel), DTADR(channel));
436 else
437 dev_err(&drv_data->pdev->dev,
438 "dma_handler: bad bus address on "
439 "rx channel %d, source %x target = %x\n",
440 channel, DSADR(channel), DTADR(channel));
441
442 msg->state = ERROR_STATE;
443 tasklet_schedule(&drv_data->pump_transfers);
444 }
445
446 /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
447 if ((drv_data->ssp_type == PXA25x_SSP)
448 && (channel == drv_data->tx_channel)
449 && (irq_status & DCSR_ENDINTR)) {
450
451 /* Wait for rx to stall */
452 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
453 dev_err(&drv_data->pdev->dev,
454 "dma_handler: ssp rx stall failed\n");
455
456 /* Clear and disable interrupts on SSP and DMA channels*/
457 write_SSSR(drv_data->clear_sr, reg);
458 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
459 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
460 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
461 if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
462 dev_err(&drv_data->pdev->dev,
463 "dma_handler: dma rx channel stop failed\n");
464
465 unmap_dma_buffers(drv_data);
466
467 /* Read trailing bytes */
468 /* Calculate number of trailing bytes, read them */
469 trailing_sssr = read_SSSR(reg);
470 if ((trailing_sssr & 0xf008) != 0xf000) {
471 drv_data->rx = drv_data->rx_end -
472 (((trailing_sssr >> 12) & 0x0f) + 1);
473 drv_data->read(drv_data);
474 }
475 msg->actual_length += drv_data->len;
476
477 /* Release chip select if requested, transfer delays are
478 * handled in pump_transfers */
479 if (drv_data->cs_change)
480 drv_data->cs_control(PXA2XX_CS_DEASSERT);
481
482 /* Move to next transfer */
483 msg->state = next_transfer(drv_data);
484
485 /* Schedule transfer tasklet */
486 tasklet_schedule(&drv_data->pump_transfers);
487 }
488}
489
490static irqreturn_t dma_transfer(struct driver_data *drv_data)
491{
492 u32 irq_status;
493 u32 trailing_sssr = 0;
494 struct spi_message *msg = drv_data->cur_msg;
495 void *reg = drv_data->ioaddr;
496
497 irq_status = read_SSSR(reg) & drv_data->mask_sr;
498 if (irq_status & SSSR_ROR) {
499 /* Clear and disable interrupts on SSP and DMA channels*/
500 if (drv_data->ssp_type != PXA25x_SSP)
501 write_SSTO(0, reg);
502 write_SSSR(drv_data->clear_sr, reg);
503 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
504 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
505 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
506 unmap_dma_buffers(drv_data);
507
508 if (flush(drv_data) == 0)
509 dev_err(&drv_data->pdev->dev,
510 "dma_transfer: flush fail\n");
511
512 dev_warn(&drv_data->pdev->dev, "dma_transfer: fifo overun\n");
513
514 drv_data->cur_msg->state = ERROR_STATE;
515 tasklet_schedule(&drv_data->pump_transfers);
516
517 return IRQ_HANDLED;
518 }
519
520 /* Check for false positive timeout */
521 if ((irq_status & SSSR_TINT) && DCSR(drv_data->tx_channel) & DCSR_RUN) {
522 write_SSSR(SSSR_TINT, reg);
523 return IRQ_HANDLED;
524 }
525
526 if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
527
528 /* Clear and disable interrupts on SSP and DMA channels*/
529 if (drv_data->ssp_type != PXA25x_SSP)
530 write_SSTO(0, reg);
531 write_SSSR(drv_data->clear_sr, reg);
532 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
533 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
534 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
535
536 if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
537 dev_err(&drv_data->pdev->dev,
538 "dma_transfer: dma rx channel stop failed\n");
539
540 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
541 dev_err(&drv_data->pdev->dev,
542 "dma_transfer: ssp rx stall failed\n");
543
544 unmap_dma_buffers(drv_data);
545
546 /* Calculate number of trailing bytes, read them */
547 trailing_sssr = read_SSSR(reg);
548 if ((trailing_sssr & 0xf008) != 0xf000) {
549 drv_data->rx = drv_data->rx_end -
550 (((trailing_sssr >> 12) & 0x0f) + 1);
551 drv_data->read(drv_data);
552 }
553 msg->actual_length += drv_data->len;
554
555 /* Release chip select if requested, transfer delays are
556 * handled in pump_transfers */
557 if (drv_data->cs_change)
558 drv_data->cs_control(PXA2XX_CS_DEASSERT);
559
560 /* Move to next transfer */
561 msg->state = next_transfer(drv_data);
562
563 /* Schedule transfer tasklet */
564 tasklet_schedule(&drv_data->pump_transfers);
565
566 return IRQ_HANDLED;
567 }
568
569 /* Opps problem detected */
570 return IRQ_NONE;
571}
572
573static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
574{
575 u32 irq_status;
576 struct spi_message *msg = drv_data->cur_msg;
577 void *reg = drv_data->ioaddr;
578 irqreturn_t handled = IRQ_NONE;
579 unsigned long limit = loops_per_jiffy << 1;
580
581 while ((irq_status = (read_SSSR(reg) & drv_data->mask_sr))) {
582
583 if (irq_status & SSSR_ROR) {
584
585 /* Clear and disable interrupts */
586 if (drv_data->ssp_type != PXA25x_SSP)
587 write_SSTO(0, reg);
588 write_SSSR(drv_data->clear_sr, reg);
589 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
590
591 if (flush(drv_data) == 0)
592 dev_err(&drv_data->pdev->dev,
593 "interrupt_transfer: flush fail\n");
594
595 dev_warn(&drv_data->pdev->dev,
596 "interrupt_transfer: fifo overun\n");
597
598 msg->state = ERROR_STATE;
599 tasklet_schedule(&drv_data->pump_transfers);
600
601 return IRQ_HANDLED;
602 }
603
604 /* Look for false positive timeout */
605 if ((irq_status & SSSR_TINT)
606 && (drv_data->rx < drv_data->rx_end))
607 write_SSSR(SSSR_TINT, reg);
608
609 /* Pump data */
610 drv_data->read(drv_data);
611 drv_data->write(drv_data);
612
613 if (drv_data->tx == drv_data->tx_end) {
614 /* Disable tx interrupt */
615 write_SSCR1(read_SSCR1(reg) & ~SSCR1_TIE, reg);
616
617 /* PXA25x_SSP has no timeout, read trailing bytes */
618 if (drv_data->ssp_type == PXA25x_SSP) {
619 while ((read_SSSR(reg) & SSSR_BSY) && limit--)
620 drv_data->read(drv_data);
621
622 if (limit == 0)
623 dev_err(&drv_data->pdev->dev,
624 "interrupt_transfer: "
625 "trailing byte read failed\n");
626 }
627 }
628
629 if ((irq_status & SSSR_TINT)
630 || (drv_data->rx == drv_data->rx_end)) {
631
632 /* Clear timeout */
633 if (drv_data->ssp_type != PXA25x_SSP)
634 write_SSTO(0, reg);
635 write_SSSR(drv_data->clear_sr, reg);
636 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
637
638 /* Update total byte transfered */
639 msg->actual_length += drv_data->len;
640
641 /* Release chip select if requested, transfer delays are
642 * handled in pump_transfers */
643 if (drv_data->cs_change)
644 drv_data->cs_control(PXA2XX_CS_DEASSERT);
645
646 /* Move to next transfer */
647 msg->state = next_transfer(drv_data);
648
649 /* Schedule transfer tasklet */
650 tasklet_schedule(&drv_data->pump_transfers);
651
652 return IRQ_HANDLED;
653 }
654
655 /* We did something */
656 handled = IRQ_HANDLED;
657 }
658
659 return handled;
660}
661
662static irqreturn_t ssp_int(int irq, void *dev_id, struct pt_regs *regs)
663{
664 struct driver_data *drv_data = (struct driver_data *)dev_id;
665
666 if (!drv_data->cur_msg) {
667 dev_err(&drv_data->pdev->dev, "bad message state "
668 "in interrupt handler\n");
669 /* Never fail */
670 return IRQ_HANDLED;
671 }
672
673 return drv_data->transfer_handler(drv_data);
674}
675
676static void pump_transfers(unsigned long data)
677{
678 struct driver_data *drv_data = (struct driver_data *)data;
679 struct spi_message *message = NULL;
680 struct spi_transfer *transfer = NULL;
681 struct spi_transfer *previous = NULL;
682 struct chip_data *chip = NULL;
683 void *reg = drv_data->ioaddr;
684 u32 clk_div = 0;
685 u8 bits = 0;
686 u32 speed = 0;
687 u32 cr0;
688
689 /* Get current state information */
690 message = drv_data->cur_msg;
691 transfer = drv_data->cur_transfer;
692 chip = drv_data->cur_chip;
693
694 /* Handle for abort */
695 if (message->state == ERROR_STATE) {
696 message->status = -EIO;
697 giveback(message, drv_data);
698 return;
699 }
700
701 /* Handle end of message */
702 if (message->state == DONE_STATE) {
703 message->status = 0;
704 giveback(message, drv_data);
705 return;
706 }
707
708 /* Delay if requested at end of transfer*/
709 if (message->state == RUNNING_STATE) {
710 previous = list_entry(transfer->transfer_list.prev,
711 struct spi_transfer,
712 transfer_list);
713 if (previous->delay_usecs)
714 udelay(previous->delay_usecs);
715 }
716
717 /* Setup the transfer state based on the type of transfer */
718 if (flush(drv_data) == 0) {
719 dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
720 message->status = -EIO;
721 giveback(message, drv_data);
722 return;
723 }
724 drv_data->n_bytes = chip->n_bytes;
725 drv_data->dma_width = chip->dma_width;
726 drv_data->cs_control = chip->cs_control;
727 drv_data->tx = (void *)transfer->tx_buf;
728 drv_data->tx_end = drv_data->tx + transfer->len;
729 drv_data->rx = transfer->rx_buf;
730 drv_data->rx_end = drv_data->rx + transfer->len;
731 drv_data->rx_dma = transfer->rx_dma;
732 drv_data->tx_dma = transfer->tx_dma;
733 drv_data->len = transfer->len;
734 drv_data->write = drv_data->tx ? chip->write : null_writer;
735 drv_data->read = drv_data->rx ? chip->read : null_reader;
736 drv_data->cs_change = transfer->cs_change;
737
738 /* Change speed and bit per word on a per transfer */
739 if (transfer->speed_hz || transfer->bits_per_word) {
740
741 /* Disable clock */
742 write_SSCR0(chip->cr0 & ~SSCR0_SSE, reg);
743 cr0 = chip->cr0;
744 bits = chip->bits_per_word;
745 speed = chip->speed_hz;
746
747 if (transfer->speed_hz)
748 speed = transfer->speed_hz;
749
750 if (transfer->bits_per_word)
751 bits = transfer->bits_per_word;
752
753 if (reg == SSP1_VIRT)
754 clk_div = SSP1_SerClkDiv(speed);
755 else if (reg == SSP2_VIRT)
756 clk_div = SSP2_SerClkDiv(speed);
757 else if (reg == SSP3_VIRT)
758 clk_div = SSP3_SerClkDiv(speed);
759
760 if (bits <= 8) {
761 drv_data->n_bytes = 1;
762 drv_data->dma_width = DCMD_WIDTH1;
763 drv_data->read = drv_data->read != null_reader ?
764 u8_reader : null_reader;
765 drv_data->write = drv_data->write != null_writer ?
766 u8_writer : null_writer;
767 } else if (bits <= 16) {
768 drv_data->n_bytes = 2;
769 drv_data->dma_width = DCMD_WIDTH2;
770 drv_data->read = drv_data->read != null_reader ?
771 u16_reader : null_reader;
772 drv_data->write = drv_data->write != null_writer ?
773 u16_writer : null_writer;
774 } else if (bits <= 32) {
775 drv_data->n_bytes = 4;
776 drv_data->dma_width = DCMD_WIDTH4;
777 drv_data->read = drv_data->read != null_reader ?
778 u32_reader : null_reader;
779 drv_data->write = drv_data->write != null_writer ?
780 u32_writer : null_writer;
781 }
782
783 cr0 = clk_div
784 | SSCR0_Motorola
785 | SSCR0_DataSize(bits & 0x0f)
786 | SSCR0_SSE
787 | (bits > 16 ? SSCR0_EDSS : 0);
788
789 /* Start it back up */
790 write_SSCR0(cr0, reg);
791 }
792
793 message->state = RUNNING_STATE;
794
795 /* Try to map dma buffer and do a dma transfer if successful */
796 if ((drv_data->dma_mapped = map_dma_buffers(drv_data))) {
797
798 /* Ensure we have the correct interrupt handler */
799 drv_data->transfer_handler = dma_transfer;
800
801 /* Setup rx DMA Channel */
802 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
803 DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
804 DTADR(drv_data->rx_channel) = drv_data->rx_dma;
805 if (drv_data->rx == drv_data->null_dma_buf)
806 /* No target address increment */
807 DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
808 | drv_data->dma_width
809 | chip->dma_burst_size
810 | drv_data->len;
811 else
812 DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
813 | DCMD_FLOWSRC
814 | drv_data->dma_width
815 | chip->dma_burst_size
816 | drv_data->len;
817
818 /* Setup tx DMA Channel */
819 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
820 DSADR(drv_data->tx_channel) = drv_data->tx_dma;
821 DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
822 if (drv_data->tx == drv_data->null_dma_buf)
823 /* No source address increment */
824 DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
825 | drv_data->dma_width
826 | chip->dma_burst_size
827 | drv_data->len;
828 else
829 DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
830 | DCMD_FLOWTRG
831 | drv_data->dma_width
832 | chip->dma_burst_size
833 | drv_data->len;
834
835 /* Enable dma end irqs on SSP to detect end of transfer */
836 if (drv_data->ssp_type == PXA25x_SSP)
837 DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
838
839 /* Fix me, need to handle cs polarity */
840 drv_data->cs_control(PXA2XX_CS_ASSERT);
841
842 /* Go baby, go */
843 write_SSSR(drv_data->clear_sr, reg);
844 DCSR(drv_data->rx_channel) |= DCSR_RUN;
845 DCSR(drv_data->tx_channel) |= DCSR_RUN;
846 if (drv_data->ssp_type != PXA25x_SSP)
847 write_SSTO(chip->timeout, reg);
848 write_SSCR1(chip->cr1
849 | chip->dma_threshold
850 | drv_data->dma_cr1,
851 reg);
852 } else {
853 /* Ensure we have the correct interrupt handler */
854 drv_data->transfer_handler = interrupt_transfer;
855
856 /* Fix me, need to handle cs polarity */
857 drv_data->cs_control(PXA2XX_CS_ASSERT);
858
859 /* Go baby, go */
860 write_SSSR(drv_data->clear_sr, reg);
861 if (drv_data->ssp_type != PXA25x_SSP)
862 write_SSTO(chip->timeout, reg);
863 write_SSCR1(chip->cr1
864 | chip->threshold
865 | drv_data->int_cr1,
866 reg);
867 }
868}
869
870static void pump_messages(void *data)
871{
872 struct driver_data *drv_data = data;
873 unsigned long flags;
874
875 /* Lock queue and check for queue work */
876 spin_lock_irqsave(&drv_data->lock, flags);
877 if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
878 drv_data->busy = 0;
879 spin_unlock_irqrestore(&drv_data->lock, flags);
880 return;
881 }
882
883 /* Make sure we are not already running a message */
884 if (drv_data->cur_msg) {
885 spin_unlock_irqrestore(&drv_data->lock, flags);
886 return;
887 }
888
889 /* Extract head of queue */
890 drv_data->cur_msg = list_entry(drv_data->queue.next,
891 struct spi_message, queue);
892 list_del_init(&drv_data->cur_msg->queue);
893 drv_data->busy = 1;
894 spin_unlock_irqrestore(&drv_data->lock, flags);
895
896 /* Initial message state*/
897 drv_data->cur_msg->state = START_STATE;
898 drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
899 struct spi_transfer,
900 transfer_list);
901
902 /* Setup the SSP using the per chip configuration */
903 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
904 restore_state(drv_data);
905
906 /* Mark as busy and launch transfers */
907 tasklet_schedule(&drv_data->pump_transfers);
908}
909
910static int transfer(struct spi_device *spi, struct spi_message *msg)
911{
912 struct driver_data *drv_data = spi_master_get_devdata(spi->master);
913 unsigned long flags;
914
915 spin_lock_irqsave(&drv_data->lock, flags);
916
917 if (drv_data->run == QUEUE_STOPPED) {
918 spin_unlock_irqrestore(&drv_data->lock, flags);
919 return -ESHUTDOWN;
920 }
921
922 msg->actual_length = 0;
923 msg->status = -EINPROGRESS;
924 msg->state = START_STATE;
925
926 list_add_tail(&msg->queue, &drv_data->queue);
927
928 if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
929 queue_work(drv_data->workqueue, &drv_data->pump_messages);
930
931 spin_unlock_irqrestore(&drv_data->lock, flags);
932
933 return 0;
934}
935
936static int setup(struct spi_device *spi)
937{
938 struct pxa2xx_spi_chip *chip_info = NULL;
939 struct chip_data *chip;
940 struct driver_data *drv_data = spi_master_get_devdata(spi->master);
941 unsigned int clk_div;
942
943 if (!spi->bits_per_word)
944 spi->bits_per_word = 8;
945
946 if (drv_data->ssp_type != PXA25x_SSP
947 && (spi->bits_per_word < 4 || spi->bits_per_word > 32))
948 return -EINVAL;
949 else if (spi->bits_per_word < 4 || spi->bits_per_word > 16)
950 return -EINVAL;
951
952 /* Only alloc (or use chip_info) on first setup */
953 chip = spi_get_ctldata(spi);
954 if (chip == NULL) {
955 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
956 if (!chip)
957 return -ENOMEM;
958
959 chip->cs_control = null_cs_control;
960 chip->enable_dma = 0;
961 chip->timeout = 5;
962 chip->threshold = SSCR1_RxTresh(1) | SSCR1_TxTresh(1);
963 chip->dma_burst_size = drv_data->master_info->enable_dma ?
964 DCMD_BURST8 : 0;
965
966 chip_info = spi->controller_data;
967 }
968
969 /* chip_info isn't always needed */
970 if (chip_info) {
971 if (chip_info->cs_control)
972 chip->cs_control = chip_info->cs_control;
973
974 chip->timeout = (chip_info->timeout_microsecs * 10000) / 2712;
975
976 chip->threshold = SSCR1_RxTresh(chip_info->rx_threshold)
977 | SSCR1_TxTresh(chip_info->tx_threshold);
978
979 chip->enable_dma = chip_info->dma_burst_size != 0
980 && drv_data->master_info->enable_dma;
981 chip->dma_threshold = 0;
982
983 if (chip->enable_dma) {
984 if (chip_info->dma_burst_size <= 8) {
985 chip->dma_threshold = SSCR1_RxTresh(8)
986 | SSCR1_TxTresh(8);
987 chip->dma_burst_size = DCMD_BURST8;
988 } else if (chip_info->dma_burst_size <= 16) {
989 chip->dma_threshold = SSCR1_RxTresh(16)
990 | SSCR1_TxTresh(16);
991 chip->dma_burst_size = DCMD_BURST16;
992 } else {
993 chip->dma_threshold = SSCR1_RxTresh(32)
994 | SSCR1_TxTresh(32);
995 chip->dma_burst_size = DCMD_BURST32;
996 }
997 }
998
999
1000 if (chip_info->enable_loopback)
1001 chip->cr1 = SSCR1_LBM;
1002 }
1003
1004 if (drv_data->ioaddr == SSP1_VIRT)
1005 clk_div = SSP1_SerClkDiv(spi->max_speed_hz);
1006 else if (drv_data->ioaddr == SSP2_VIRT)
1007 clk_div = SSP2_SerClkDiv(spi->max_speed_hz);
1008 else if (drv_data->ioaddr == SSP3_VIRT)
1009 clk_div = SSP3_SerClkDiv(spi->max_speed_hz);
1010 else
1011 return -ENODEV;
1012 chip->speed_hz = spi->max_speed_hz;
1013
1014 chip->cr0 = clk_div
1015 | SSCR0_Motorola
1016 | SSCR0_DataSize(spi->bits_per_word & 0x0f)
1017 | SSCR0_SSE
1018 | (spi->bits_per_word > 16 ? SSCR0_EDSS : 0);
1019 chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) << 4)
1020 | (((spi->mode & SPI_CPOL) != 0) << 3);
1021
1022 /* NOTE: PXA25x_SSP _could_ use external clocking ... */
1023 if (drv_data->ssp_type != PXA25x_SSP)
1024 dev_dbg(&spi->dev, "%d bits/word, %d Hz, mode %d\n",
1025 spi->bits_per_word,
1026 (CLOCK_SPEED_HZ)
1027 / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)),
1028 spi->mode & 0x3);
1029 else
1030 dev_dbg(&spi->dev, "%d bits/word, %d Hz, mode %d\n",
1031 spi->bits_per_word,
1032 (CLOCK_SPEED_HZ/2)
1033 / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)),
1034 spi->mode & 0x3);
1035
1036 if (spi->bits_per_word <= 8) {
1037 chip->n_bytes = 1;
1038 chip->dma_width = DCMD_WIDTH1;
1039 chip->read = u8_reader;
1040 chip->write = u8_writer;
1041 } else if (spi->bits_per_word <= 16) {
1042 chip->n_bytes = 2;
1043 chip->dma_width = DCMD_WIDTH2;
1044 chip->read = u16_reader;
1045 chip->write = u16_writer;
1046 } else if (spi->bits_per_word <= 32) {
1047 chip->cr0 |= SSCR0_EDSS;
1048 chip->n_bytes = 4;
1049 chip->dma_width = DCMD_WIDTH4;
1050 chip->read = u32_reader;
1051 chip->write = u32_writer;
1052 } else {
1053 dev_err(&spi->dev, "invalid wordsize\n");
1054 kfree(chip);
1055 return -ENODEV;
1056 }
1057 chip->bits_per_word = spi->bits_per_word;
1058
1059 spi_set_ctldata(spi, chip);
1060
1061 return 0;
1062}
1063
1064static void cleanup(const struct spi_device *spi)
1065{
1066 struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
1067
1068 kfree(chip);
1069}
1070
1071static int init_queue(struct driver_data *drv_data)
1072{
1073 INIT_LIST_HEAD(&drv_data->queue);
1074 spin_lock_init(&drv_data->lock);
1075
1076 drv_data->run = QUEUE_STOPPED;
1077 drv_data->busy = 0;
1078
1079 tasklet_init(&drv_data->pump_transfers,
1080 pump_transfers, (unsigned long)drv_data);
1081
1082 INIT_WORK(&drv_data->pump_messages, pump_messages, drv_data);
1083 drv_data->workqueue = create_singlethread_workqueue(
1084 drv_data->master->cdev.dev->bus_id);
1085 if (drv_data->workqueue == NULL)
1086 return -EBUSY;
1087
1088 return 0;
1089}
1090
1091static int start_queue(struct driver_data *drv_data)
1092{
1093 unsigned long flags;
1094
1095 spin_lock_irqsave(&drv_data->lock, flags);
1096
1097 if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
1098 spin_unlock_irqrestore(&drv_data->lock, flags);
1099 return -EBUSY;
1100 }
1101
1102 drv_data->run = QUEUE_RUNNING;
1103 drv_data->cur_msg = NULL;
1104 drv_data->cur_transfer = NULL;
1105 drv_data->cur_chip = NULL;
1106 spin_unlock_irqrestore(&drv_data->lock, flags);
1107
1108 queue_work(drv_data->workqueue, &drv_data->pump_messages);
1109
1110 return 0;
1111}
1112
1113static int stop_queue(struct driver_data *drv_data)
1114{
1115 unsigned long flags;
1116 unsigned limit = 500;
1117 int status = 0;
1118
1119 spin_lock_irqsave(&drv_data->lock, flags);
1120
1121 /* This is a bit lame, but is optimized for the common execution path.
1122 * A wait_queue on the drv_data->busy could be used, but then the common
1123 * execution path (pump_messages) would be required to call wake_up or
1124 * friends on every SPI message. Do this instead */
1125 drv_data->run = QUEUE_STOPPED;
1126 while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
1127 spin_unlock_irqrestore(&drv_data->lock, flags);
1128 msleep(10);
1129 spin_lock_irqsave(&drv_data->lock, flags);
1130 }
1131
1132 if (!list_empty(&drv_data->queue) || drv_data->busy)
1133 status = -EBUSY;
1134
1135 spin_unlock_irqrestore(&drv_data->lock, flags);
1136
1137 return status;
1138}
1139
1140static int destroy_queue(struct driver_data *drv_data)
1141{
1142 int status;
1143
1144 status = stop_queue(drv_data);
1145 if (status != 0)
1146 return status;
1147
1148 destroy_workqueue(drv_data->workqueue);
1149
1150 return 0;
1151}
1152
1153static int pxa2xx_spi_probe(struct platform_device *pdev)
1154{
1155 struct device *dev = &pdev->dev;
1156 struct pxa2xx_spi_master *platform_info;
1157 struct spi_master *master;
1158 struct driver_data *drv_data = 0;
1159 struct resource *memory_resource;
1160 int irq;
1161 int status = 0;
1162
1163 platform_info = dev->platform_data;
1164
1165 if (platform_info->ssp_type == SSP_UNDEFINED) {
1166 dev_err(&pdev->dev, "undefined SSP\n");
1167 return -ENODEV;
1168 }
1169
1170 /* Allocate master with space for drv_data and null dma buffer */
1171 master = spi_alloc_master(dev, sizeof(struct driver_data) + 16);
1172 if (!master) {
1173 dev_err(&pdev->dev, "can not alloc spi_master\n");
1174 return -ENOMEM;
1175 }
1176 drv_data = spi_master_get_devdata(master);
1177 drv_data->master = master;
1178 drv_data->master_info = platform_info;
1179 drv_data->pdev = pdev;
1180
1181 master->bus_num = pdev->id;
1182 master->num_chipselect = platform_info->num_chipselect;
1183 master->cleanup = cleanup;
1184 master->setup = setup;
1185 master->transfer = transfer;
1186
1187 drv_data->ssp_type = platform_info->ssp_type;
1188 drv_data->null_dma_buf = (u32 *)ALIGN((u32)(drv_data +
1189 sizeof(struct driver_data)), 8);
1190
1191 /* Setup register addresses */
1192 memory_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1193 if (!memory_resource) {
1194 dev_err(&pdev->dev, "memory resources not defined\n");
1195 status = -ENODEV;
1196 goto out_error_master_alloc;
1197 }
1198
1199 drv_data->ioaddr = (void *)io_p2v(memory_resource->start);
1200 drv_data->ssdr_physical = memory_resource->start + 0x00000010;
1201 if (platform_info->ssp_type == PXA25x_SSP) {
1202 drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE;
1203 drv_data->dma_cr1 = 0;
1204 drv_data->clear_sr = SSSR_ROR;
1205 drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
1206 } else {
1207 drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
1208 drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE;
1209 drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
1210 drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
1211 }
1212
1213 /* Attach to IRQ */
1214 irq = platform_get_irq(pdev, 0);
1215 if (irq < 0) {
1216 dev_err(&pdev->dev, "irq resource not defined\n");
1217 status = -ENODEV;
1218 goto out_error_master_alloc;
1219 }
1220
1221 status = request_irq(irq, ssp_int, SA_INTERRUPT, dev->bus_id, drv_data);
1222 if (status < 0) {
1223 dev_err(&pdev->dev, "can not get IRQ\n");
1224 goto out_error_master_alloc;
1225 }
1226
1227 /* Setup DMA if requested */
1228 drv_data->tx_channel = -1;
1229 drv_data->rx_channel = -1;
1230 if (platform_info->enable_dma) {
1231
1232 /* Get two DMA channels (rx and tx) */
1233 drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx",
1234 DMA_PRIO_HIGH,
1235 dma_handler,
1236 drv_data);
1237 if (drv_data->rx_channel < 0) {
1238 dev_err(dev, "problem (%d) requesting rx channel\n",
1239 drv_data->rx_channel);
1240 status = -ENODEV;
1241 goto out_error_irq_alloc;
1242 }
1243 drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
1244 DMA_PRIO_MEDIUM,
1245 dma_handler,
1246 drv_data);
1247 if (drv_data->tx_channel < 0) {
1248 dev_err(dev, "problem (%d) requesting tx channel\n",
1249 drv_data->tx_channel);
1250 status = -ENODEV;
1251 goto out_error_dma_alloc;
1252 }
1253
1254 if (drv_data->ioaddr == SSP1_VIRT) {
1255 DRCMRRXSSDR = DRCMR_MAPVLD
1256 | drv_data->rx_channel;
1257 DRCMRTXSSDR = DRCMR_MAPVLD
1258 | drv_data->tx_channel;
1259 } else if (drv_data->ioaddr == SSP2_VIRT) {
1260 DRCMRRXSS2DR = DRCMR_MAPVLD
1261 | drv_data->rx_channel;
1262 DRCMRTXSS2DR = DRCMR_MAPVLD
1263 | drv_data->tx_channel;
1264 } else if (drv_data->ioaddr == SSP3_VIRT) {
1265 DRCMRRXSS3DR = DRCMR_MAPVLD
1266 | drv_data->rx_channel;
1267 DRCMRTXSS3DR = DRCMR_MAPVLD
1268 | drv_data->tx_channel;
1269 } else {
1270 dev_err(dev, "bad SSP type\n");
1271 goto out_error_dma_alloc;
1272 }
1273 }
1274
1275 /* Enable SOC clock */
1276 pxa_set_cken(platform_info->clock_enable, 1);
1277
1278 /* Load default SSP configuration */
1279 write_SSCR0(0, drv_data->ioaddr);
1280 write_SSCR1(SSCR1_RxTresh(4) | SSCR1_TxTresh(12), drv_data->ioaddr);
1281 write_SSCR0(SSCR0_SerClkDiv(2)
1282 | SSCR0_Motorola
1283 | SSCR0_DataSize(8),
1284 drv_data->ioaddr);
1285 if (drv_data->ssp_type != PXA25x_SSP)
1286 write_SSTO(0, drv_data->ioaddr);
1287 write_SSPSP(0, drv_data->ioaddr);
1288
1289 /* Initial and start queue */
1290 status = init_queue(drv_data);
1291 if (status != 0) {
1292 dev_err(&pdev->dev, "problem initializing queue\n");
1293 goto out_error_clock_enabled;
1294 }
1295 status = start_queue(drv_data);
1296 if (status != 0) {
1297 dev_err(&pdev->dev, "problem starting queue\n");
1298 goto out_error_clock_enabled;
1299 }
1300
1301 /* Register with the SPI framework */
1302 platform_set_drvdata(pdev, drv_data);
1303 status = spi_register_master(master);
1304 if (status != 0) {
1305 dev_err(&pdev->dev, "problem registering spi master\n");
1306 goto out_error_queue_alloc;
1307 }
1308
1309 return status;
1310
1311out_error_queue_alloc:
1312 destroy_queue(drv_data);
1313
1314out_error_clock_enabled:
1315 pxa_set_cken(platform_info->clock_enable, 0);
1316
1317out_error_dma_alloc:
1318 if (drv_data->tx_channel != -1)
1319 pxa_free_dma(drv_data->tx_channel);
1320 if (drv_data->rx_channel != -1)
1321 pxa_free_dma(drv_data->rx_channel);
1322
1323out_error_irq_alloc:
1324 free_irq(irq, drv_data);
1325
1326out_error_master_alloc:
1327 spi_master_put(master);
1328 return status;
1329}
1330
1331static int pxa2xx_spi_remove(struct platform_device *pdev)
1332{
1333 struct driver_data *drv_data = platform_get_drvdata(pdev);
1334 int irq;
1335 int status = 0;
1336
1337 if (!drv_data)
1338 return 0;
1339
1340 /* Remove the queue */
1341 status = destroy_queue(drv_data);
1342 if (status != 0)
1343 return status;
1344
1345 /* Disable the SSP at the peripheral and SOC level */
1346 write_SSCR0(0, drv_data->ioaddr);
1347 pxa_set_cken(drv_data->master_info->clock_enable, 0);
1348
1349 /* Release DMA */
1350 if (drv_data->master_info->enable_dma) {
1351 if (drv_data->ioaddr == SSP1_VIRT) {
1352 DRCMRRXSSDR = 0;
1353 DRCMRTXSSDR = 0;
1354 } else if (drv_data->ioaddr == SSP2_VIRT) {
1355 DRCMRRXSS2DR = 0;
1356 DRCMRTXSS2DR = 0;
1357 } else if (drv_data->ioaddr == SSP3_VIRT) {
1358 DRCMRRXSS3DR = 0;
1359 DRCMRTXSS3DR = 0;
1360 }
1361 pxa_free_dma(drv_data->tx_channel);
1362 pxa_free_dma(drv_data->rx_channel);
1363 }
1364
1365 /* Release IRQ */
1366 irq = platform_get_irq(pdev, 0);
1367 if (irq >= 0)
1368 free_irq(irq, drv_data);
1369
1370 /* Disconnect from the SPI framework */
1371 spi_unregister_master(drv_data->master);
1372
1373 /* Prevent double remove */
1374 platform_set_drvdata(pdev, NULL);
1375
1376 return 0;
1377}
1378
1379static void pxa2xx_spi_shutdown(struct platform_device *pdev)
1380{
1381 int status = 0;
1382
1383 if ((status = pxa2xx_spi_remove(pdev)) != 0)
1384 dev_err(&pdev->dev, "shutdown failed with %d\n", status);
1385}
1386
1387#ifdef CONFIG_PM
1388static int suspend_devices(struct device *dev, void *pm_message)
1389{
1390 pm_message_t *state = pm_message;
1391
1392 if (dev->power.power_state.event != state->event) {
1393 dev_warn(dev, "pm state does not match request\n");
1394 return -1;
1395 }
1396
1397 return 0;
1398}
1399
1400static int pxa2xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
1401{
1402 struct driver_data *drv_data = platform_get_drvdata(pdev);
1403 int status = 0;
1404
1405 /* Check all childern for current power state */
1406 if (device_for_each_child(&pdev->dev, &state, suspend_devices) != 0) {
1407 dev_warn(&pdev->dev, "suspend aborted\n");
1408 return -1;
1409 }
1410
1411 status = stop_queue(drv_data);
1412 if (status != 0)
1413 return status;
1414 write_SSCR0(0, drv_data->ioaddr);
1415 pxa_set_cken(drv_data->master_info->clock_enable, 0);
1416
1417 return 0;
1418}
1419
1420static int pxa2xx_spi_resume(struct platform_device *pdev)
1421{
1422 struct driver_data *drv_data = platform_get_drvdata(pdev);
1423 int status = 0;
1424
1425 /* Enable the SSP clock */
1426 pxa_set_cken(drv_data->master_info->clock_enable, 1);
1427
1428 /* Start the queue running */
1429 status = start_queue(drv_data);
1430 if (status != 0) {
1431 dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
1432 return status;
1433 }
1434
1435 return 0;
1436}
1437#else
1438#define pxa2xx_spi_suspend NULL
1439#define pxa2xx_spi_resume NULL
1440#endif /* CONFIG_PM */
1441
1442static struct platform_driver driver = {
1443 .driver = {
1444 .name = "pxa2xx-spi",
1445 .bus = &platform_bus_type,
1446 .owner = THIS_MODULE,
1447 },
1448 .probe = pxa2xx_spi_probe,
1449 .remove = __devexit_p(pxa2xx_spi_remove),
1450 .shutdown = pxa2xx_spi_shutdown,
1451 .suspend = pxa2xx_spi_suspend,
1452 .resume = pxa2xx_spi_resume,
1453};
1454
1455static int __init pxa2xx_spi_init(void)
1456{
1457 platform_driver_register(&driver);
1458
1459 return 0;
1460}
1461module_init(pxa2xx_spi_init);
1462
1463static void __exit pxa2xx_spi_exit(void)
1464{
1465 platform_driver_unregister(&driver);
1466}
1467module_exit(pxa2xx_spi_exit);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 94f5e8ed83a7..7a3f733051e9 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -395,7 +395,7 @@ EXPORT_SYMBOL_GPL(spi_alloc_master);
395int __init_or_module 395int __init_or_module
396spi_register_master(struct spi_master *master) 396spi_register_master(struct spi_master *master)
397{ 397{
398 static atomic_t dyn_bus_id = ATOMIC_INIT(0); 398 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<16) - 1);
399 struct device *dev = master->cdev.dev; 399 struct device *dev = master->cdev.dev;
400 int status = -ENODEV; 400 int status = -ENODEV;
401 int dynamic = 0; 401 int dynamic = 0;
@@ -404,7 +404,7 @@ spi_register_master(struct spi_master *master)
404 return -ENODEV; 404 return -ENODEV;
405 405
406 /* convention: dynamically assigned bus IDs count down from the max */ 406 /* convention: dynamically assigned bus IDs count down from the max */
407 if (master->bus_num == 0) { 407 if (master->bus_num < 0) {
408 master->bus_num = atomic_dec_return(&dyn_bus_id); 408 master->bus_num = atomic_dec_return(&dyn_bus_id);
409 dynamic = 1; 409 dynamic = 1;
410 } 410 }
@@ -522,7 +522,8 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
522} 522}
523EXPORT_SYMBOL_GPL(spi_sync); 523EXPORT_SYMBOL_GPL(spi_sync);
524 524
525#define SPI_BUFSIZ (SMP_CACHE_BYTES) 525/* portable code must never pass more than 32 bytes */
526#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
526 527
527static u8 *buf; 528static u8 *buf;
528 529
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index f037e5593269..dd2f950b21a7 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -138,6 +138,45 @@ static unsigned bitbang_txrx_32(
138 return t->len - count; 138 return t->len - count;
139} 139}
140 140
141int spi_bitbang_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
142{
143 struct spi_bitbang_cs *cs = spi->controller_state;
144 u8 bits_per_word;
145 u32 hz;
146
147 if (t) {
148 bits_per_word = t->bits_per_word;
149 hz = t->speed_hz;
150 } else {
151 bits_per_word = 0;
152 hz = 0;
153 }
154
155 /* spi_transfer level calls that work per-word */
156 if (!bits_per_word)
157 bits_per_word = spi->bits_per_word;
158 if (bits_per_word <= 8)
159 cs->txrx_bufs = bitbang_txrx_8;
160 else if (bits_per_word <= 16)
161 cs->txrx_bufs = bitbang_txrx_16;
162 else if (bits_per_word <= 32)
163 cs->txrx_bufs = bitbang_txrx_32;
164 else
165 return -EINVAL;
166
167 /* nsecs = (clock period)/2 */
168 if (!hz)
169 hz = spi->max_speed_hz;
170 if (hz) {
171 cs->nsecs = (1000000000/2) / hz;
172 if (cs->nsecs > (MAX_UDELAY_MS * 1000 * 1000))
173 return -EINVAL;
174 }
175
176 return 0;
177}
178EXPORT_SYMBOL_GPL(spi_bitbang_setup_transfer);
179
141/** 180/**
142 * spi_bitbang_setup - default setup for per-word I/O loops 181 * spi_bitbang_setup - default setup for per-word I/O loops
143 */ 182 */
@@ -145,8 +184,16 @@ int spi_bitbang_setup(struct spi_device *spi)
145{ 184{
146 struct spi_bitbang_cs *cs = spi->controller_state; 185 struct spi_bitbang_cs *cs = spi->controller_state;
147 struct spi_bitbang *bitbang; 186 struct spi_bitbang *bitbang;
187 int retval;
148 188
149 if (!spi->max_speed_hz) 189 bitbang = spi_master_get_devdata(spi->master);
190
191 /* REVISIT: some systems will want to support devices using lsb-first
192 * bit encodings on the wire. In pure software that would be trivial,
193 * just bitbang_txrx_le_cphaX() routines shifting the other way, and
194 * some hardware controllers also have this support.
195 */
196 if ((spi->mode & SPI_LSB_FIRST) != 0)
150 return -EINVAL; 197 return -EINVAL;
151 198
152 if (!cs) { 199 if (!cs) {
@@ -155,32 +202,20 @@ int spi_bitbang_setup(struct spi_device *spi)
155 return -ENOMEM; 202 return -ENOMEM;
156 spi->controller_state = cs; 203 spi->controller_state = cs;
157 } 204 }
158 bitbang = spi_master_get_devdata(spi->master);
159 205
160 if (!spi->bits_per_word) 206 if (!spi->bits_per_word)
161 spi->bits_per_word = 8; 207 spi->bits_per_word = 8;
162 208
163 /* spi_transfer level calls that work per-word */
164 if (spi->bits_per_word <= 8)
165 cs->txrx_bufs = bitbang_txrx_8;
166 else if (spi->bits_per_word <= 16)
167 cs->txrx_bufs = bitbang_txrx_16;
168 else if (spi->bits_per_word <= 32)
169 cs->txrx_bufs = bitbang_txrx_32;
170 else
171 return -EINVAL;
172
173 /* per-word shift register access, in hardware or bitbanging */ 209 /* per-word shift register access, in hardware or bitbanging */
174 cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)]; 210 cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)];
175 if (!cs->txrx_word) 211 if (!cs->txrx_word)
176 return -EINVAL; 212 return -EINVAL;
177 213
178 /* nsecs = (clock period)/2 */ 214 retval = spi_bitbang_setup_transfer(spi, NULL);
179 cs->nsecs = (1000000000/2) / (spi->max_speed_hz); 215 if (retval < 0)
180 if (cs->nsecs > MAX_UDELAY_MS * 1000) 216 return retval;
181 return -EINVAL;
182 217
183 dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec\n", 218 dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n",
184 __FUNCTION__, spi->mode & (SPI_CPOL | SPI_CPHA), 219 __FUNCTION__, spi->mode & (SPI_CPOL | SPI_CPHA),
185 spi->bits_per_word, 2 * cs->nsecs); 220 spi->bits_per_word, 2 * cs->nsecs);
186 221
@@ -246,6 +281,8 @@ static void bitbang_work(void *_bitbang)
246 unsigned tmp; 281 unsigned tmp;
247 unsigned cs_change; 282 unsigned cs_change;
248 int status; 283 int status;
284 int (*setup_transfer)(struct spi_device *,
285 struct spi_transfer *);
249 286
250 m = container_of(bitbang->queue.next, struct spi_message, 287 m = container_of(bitbang->queue.next, struct spi_message,
251 queue); 288 queue);
@@ -262,6 +299,7 @@ static void bitbang_work(void *_bitbang)
262 tmp = 0; 299 tmp = 0;
263 cs_change = 1; 300 cs_change = 1;
264 status = 0; 301 status = 0;
302 setup_transfer = NULL;
265 303
266 list_for_each_entry (t, &m->transfers, transfer_list) { 304 list_for_each_entry (t, &m->transfers, transfer_list) {
267 if (bitbang->shutdown) { 305 if (bitbang->shutdown) {
@@ -269,6 +307,20 @@ static void bitbang_work(void *_bitbang)
269 break; 307 break;
270 } 308 }
271 309
310 /* override or restore speed and wordsize */
311 if (t->speed_hz || t->bits_per_word) {
312 setup_transfer = bitbang->setup_transfer;
313 if (!setup_transfer) {
314 status = -ENOPROTOOPT;
315 break;
316 }
317 }
318 if (setup_transfer) {
319 status = setup_transfer(spi, t);
320 if (status < 0)
321 break;
322 }
323
272 /* set up default clock polarity, and activate chip; 324 /* set up default clock polarity, and activate chip;
273 * this implicitly updates clock and spi modes as 325 * this implicitly updates clock and spi modes as
274 * previously recorded for this device via setup(). 326 * previously recorded for this device via setup().
@@ -325,6 +377,10 @@ static void bitbang_work(void *_bitbang)
325 m->status = status; 377 m->status = status;
326 m->complete(m->context); 378 m->complete(m->context);
327 379
380 /* restore speed and wordsize */
381 if (setup_transfer)
382 setup_transfer(spi, NULL);
383
328 /* normally deactivate chipselect ... unless no error and 384 /* normally deactivate chipselect ... unless no error and
329 * cs_change has hinted that the next message will probably 385 * cs_change has hinted that the next message will probably
330 * be for this chip too. 386 * be for this chip too.
@@ -348,6 +404,7 @@ int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m)
348{ 404{
349 struct spi_bitbang *bitbang; 405 struct spi_bitbang *bitbang;
350 unsigned long flags; 406 unsigned long flags;
407 int status = 0;
351 408
352 m->actual_length = 0; 409 m->actual_length = 0;
353 m->status = -EINPROGRESS; 410 m->status = -EINPROGRESS;
@@ -357,11 +414,15 @@ int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m)
357 return -ESHUTDOWN; 414 return -ESHUTDOWN;
358 415
359 spin_lock_irqsave(&bitbang->lock, flags); 416 spin_lock_irqsave(&bitbang->lock, flags);
360 list_add_tail(&m->queue, &bitbang->queue); 417 if (!spi->max_speed_hz)
361 queue_work(bitbang->workqueue, &bitbang->work); 418 status = -ENETDOWN;
419 else {
420 list_add_tail(&m->queue, &bitbang->queue);
421 queue_work(bitbang->workqueue, &bitbang->work);
422 }
362 spin_unlock_irqrestore(&bitbang->lock, flags); 423 spin_unlock_irqrestore(&bitbang->lock, flags);
363 424
364 return 0; 425 return status;
365} 426}
366EXPORT_SYMBOL_GPL(spi_bitbang_transfer); 427EXPORT_SYMBOL_GPL(spi_bitbang_transfer);
367 428
@@ -406,6 +467,9 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
406 bitbang->use_dma = 0; 467 bitbang->use_dma = 0;
407 bitbang->txrx_bufs = spi_bitbang_bufs; 468 bitbang->txrx_bufs = spi_bitbang_bufs;
408 if (!bitbang->master->setup) { 469 if (!bitbang->master->setup) {
470 if (!bitbang->setup_transfer)
471 bitbang->setup_transfer =
472 spi_bitbang_setup_transfer;
409 bitbang->master->setup = spi_bitbang_setup; 473 bitbang->master->setup = spi_bitbang_setup;
410 bitbang->master->cleanup = spi_bitbang_cleanup; 474 bitbang->master->cleanup = spi_bitbang_cleanup;
411 } 475 }
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index 7860c8a5800d..956b7a1e8af9 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -69,7 +69,7 @@ static const char speedtch_driver_name[] = "speedtch";
69#define RESUBMIT_DELAY 1000 /* milliseconds */ 69#define RESUBMIT_DELAY 1000 /* milliseconds */
70 70
71#define DEFAULT_BULK_ALTSETTING 1 71#define DEFAULT_BULK_ALTSETTING 1
72#define DEFAULT_ISOC_ALTSETTING 2 72#define DEFAULT_ISOC_ALTSETTING 3
73#define DEFAULT_DL_512_FIRST 0 73#define DEFAULT_DL_512_FIRST 0
74#define DEFAULT_ENABLE_ISOC 0 74#define DEFAULT_ENABLE_ISOC 0
75#define DEFAULT_SW_BUFFERING 0 75#define DEFAULT_SW_BUFFERING 0
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index c1211fc037d9..546249843b8e 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -99,11 +99,11 @@ static const char usbatm_driver_name[] = "usbatm";
99 99
100#define UDSL_MAX_RCV_URBS 16 100#define UDSL_MAX_RCV_URBS 16
101#define UDSL_MAX_SND_URBS 16 101#define UDSL_MAX_SND_URBS 16
102#define UDSL_MAX_BUF_SIZE 64 * 1024 /* bytes */ 102#define UDSL_MAX_BUF_SIZE 65536
103#define UDSL_DEFAULT_RCV_URBS 4 103#define UDSL_DEFAULT_RCV_URBS 4
104#define UDSL_DEFAULT_SND_URBS 4 104#define UDSL_DEFAULT_SND_URBS 4
105#define UDSL_DEFAULT_RCV_BUF_SIZE 64 * ATM_CELL_SIZE /* bytes */ 105#define UDSL_DEFAULT_RCV_BUF_SIZE 3392 /* 64 * ATM_CELL_SIZE */
106#define UDSL_DEFAULT_SND_BUF_SIZE 64 * ATM_CELL_SIZE /* bytes */ 106#define UDSL_DEFAULT_SND_BUF_SIZE 3392 /* 64 * ATM_CELL_SIZE */
107 107
108#define ATM_CELL_HEADER (ATM_CELL_SIZE - ATM_CELL_PAYLOAD) 108#define ATM_CELL_HEADER (ATM_CELL_SIZE - ATM_CELL_PAYLOAD)
109 109
@@ -135,7 +135,7 @@ MODULE_PARM_DESC(rcv_buf_bytes,
135module_param(snd_buf_bytes, uint, S_IRUGO); 135module_param(snd_buf_bytes, uint, S_IRUGO);
136MODULE_PARM_DESC(snd_buf_bytes, 136MODULE_PARM_DESC(snd_buf_bytes,
137 "Size of the buffers used for transmission, in bytes (range: 1-" 137 "Size of the buffers used for transmission, in bytes (range: 1-"
138 __MODULE_STRING(UDSL_MAX_SND_BUF_SIZE) ", default: " 138 __MODULE_STRING(UDSL_MAX_BUF_SIZE) ", default: "
139 __MODULE_STRING(UDSL_DEFAULT_SND_BUF_SIZE) ")"); 139 __MODULE_STRING(UDSL_DEFAULT_SND_BUF_SIZE) ")");
140 140
141 141
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index fbd938d4ea58..e2e00ba4e1e6 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1805,6 +1805,12 @@ int usb_add_hcd(struct usb_hcd *hcd,
1805 USB_SPEED_FULL; 1805 USB_SPEED_FULL;
1806 hcd->self.root_hub = rhdev; 1806 hcd->self.root_hub = rhdev;
1807 1807
1808 /* wakeup flag init defaults to "everything works" for root hubs,
1809 * but drivers can override it in reset() if needed, along with
1810 * recording the overall controller's system wakeup capability.
1811 */
1812 device_init_wakeup(&rhdev->dev, 1);
1813
1808 /* "reset" is misnamed; its role is now one-time init. the controller 1814 /* "reset" is misnamed; its role is now one-time init. the controller
1809 * should already have been reset (and boot firmware kicked off etc). 1815 * should already have been reset (and boot firmware kicked off etc).
1810 */ 1816 */
@@ -1813,13 +1819,6 @@ int usb_add_hcd(struct usb_hcd *hcd,
1813 goto err_hcd_driver_setup; 1819 goto err_hcd_driver_setup;
1814 } 1820 }
1815 1821
1816 /* wakeup flag init is in transition; for now we can't rely on PCI to
1817 * initialize these bits properly, so we let reset() override it.
1818 * This init should _precede_ the reset() once PCI behaves.
1819 */
1820 device_init_wakeup(&rhdev->dev,
1821 device_can_wakeup(hcd->self.controller));
1822
1823 /* NOTE: root hub and controller capabilities may not be the same */ 1822 /* NOTE: root hub and controller capabilities may not be the same */
1824 if (device_can_wakeup(hcd->self.controller) 1823 if (device_can_wakeup(hcd->self.controller)
1825 && device_can_wakeup(&hcd->self.root_hub->dev)) 1824 && device_can_wakeup(&hcd->self.root_hub->dev))
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 0c87f73f2933..90b8d43c6b33 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1168,19 +1168,9 @@ static inline const char *plural(int n)
1168static int choose_configuration(struct usb_device *udev) 1168static int choose_configuration(struct usb_device *udev)
1169{ 1169{
1170 int i; 1170 int i;
1171 u16 devstatus;
1172 int bus_powered;
1173 int num_configs; 1171 int num_configs;
1174 struct usb_host_config *c, *best; 1172 struct usb_host_config *c, *best;
1175 1173
1176 /* If this fails, assume the device is bus-powered */
1177 devstatus = 0;
1178 usb_get_status(udev, USB_RECIP_DEVICE, 0, &devstatus);
1179 le16_to_cpus(&devstatus);
1180 bus_powered = ((devstatus & (1 << USB_DEVICE_SELF_POWERED)) == 0);
1181 dev_dbg(&udev->dev, "device is %s-powered\n",
1182 bus_powered ? "bus" : "self");
1183
1184 best = NULL; 1174 best = NULL;
1185 c = udev->config; 1175 c = udev->config;
1186 num_configs = udev->descriptor.bNumConfigurations; 1176 num_configs = udev->descriptor.bNumConfigurations;
@@ -1197,6 +1187,19 @@ static int choose_configuration(struct usb_device *udev)
1197 * similar errors in their descriptors. If the next test 1187 * similar errors in their descriptors. If the next test
1198 * were allowed to execute, such configurations would always 1188 * were allowed to execute, such configurations would always
1199 * be rejected and the devices would not work as expected. 1189 * be rejected and the devices would not work as expected.
1190 * In the meantime, we run the risk of selecting a config
1191 * that requires external power at a time when that power
1192 * isn't available. It seems to be the lesser of two evils.
1193 *
1194 * Bugzilla #6448 reports a device that appears to crash
1195 * when it receives a GET_DEVICE_STATUS request! We don't
1196 * have any other way to tell whether a device is self-powered,
1197 * but since we don't use that information anywhere but here,
1198 * the call has been removed.
1199 *
1200 * Maybe the GET_DEVICE_STATUS call and the test below can
1201 * be reinstated when device firmwares become more reliable.
1202 * Don't hold your breath.
1200 */ 1203 */
1201#if 0 1204#if 0
1202 /* Rule out self-powered configs for a bus-powered device */ 1205 /* Rule out self-powered configs for a bus-powered device */
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 544f7589912f..73f5a379d9b3 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -863,7 +863,7 @@ static int ohci_restart (struct ohci_hcd *ohci)
863 i = ohci->num_ports; 863 i = ohci->num_ports;
864 while (i--) 864 while (i--)
865 ohci_writel (ohci, RH_PS_PSS, 865 ohci_writel (ohci, RH_PS_PSS,
866 &ohci->regs->roothub.portstatus [temp]); 866 &ohci->regs->roothub.portstatus [i]);
867 ohci_dbg (ohci, "restart complete\n"); 867 ohci_dbg (ohci, "restart complete\n");
868 } 868 }
869 return 0; 869 return 0;
diff --git a/drivers/usb/input/hid-core.c b/drivers/usb/input/hid-core.c
index f419bd82ab7f..435273e7c85c 100644
--- a/drivers/usb/input/hid-core.c
+++ b/drivers/usb/input/hid-core.c
@@ -1557,6 +1557,9 @@ void hid_init_reports(struct hid_device *hid)
1557#define USB_VENDOR_ID_HP 0x03f0 1557#define USB_VENDOR_ID_HP 0x03f0
1558#define USB_DEVICE_ID_HP_USBHUB_KB 0x020c 1558#define USB_DEVICE_ID_HP_USBHUB_KB 0x020c
1559 1559
1560#define USB_VENDOR_ID_IBM 0x04b3
1561#define USB_DEVICE_ID_IBM_USBHUB_KB 0x3005
1562
1560#define USB_VENDOR_ID_CREATIVELABS 0x062a 1563#define USB_VENDOR_ID_CREATIVELABS 0x062a
1561#define USB_DEVICE_ID_CREATIVELABS_SILVERCREST 0x0201 1564#define USB_DEVICE_ID_CREATIVELABS_SILVERCREST 0x0201
1562 1565
@@ -1681,6 +1684,7 @@ static const struct hid_blacklist {
1681 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_USBHUB_KB, HID_QUIRK_NOGET}, 1684 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_USBHUB_KB, HID_QUIRK_NOGET},
1682 { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVELABS_SILVERCREST, HID_QUIRK_NOGET }, 1685 { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVELABS_SILVERCREST, HID_QUIRK_NOGET },
1683 { USB_VENDOR_ID_HP, USB_DEVICE_ID_HP_USBHUB_KB, HID_QUIRK_NOGET }, 1686 { USB_VENDOR_ID_HP, USB_DEVICE_ID_HP_USBHUB_KB, HID_QUIRK_NOGET },
1687 { USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_USBHUB_KB, HID_QUIRK_NOGET },
1684 { USB_VENDOR_ID_TANGTOP, USB_DEVICE_ID_TANGTOP_USBPS2, HID_QUIRK_NOGET }, 1688 { USB_VENDOR_ID_TANGTOP, USB_DEVICE_ID_TANGTOP_USBPS2, HID_QUIRK_NOGET },
1685 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, 1689 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
1686 { USB_VENDOR_ID_SILVERCREST, USB_DEVICE_ID_SILVERCREST_KB, HID_QUIRK_NOGET }, 1690 { USB_VENDOR_ID_SILVERCREST, USB_DEVICE_ID_SILVERCREST_KB, HID_QUIRK_NOGET },
diff --git a/drivers/usb/misc/emi26.c b/drivers/usb/misc/emi26.c
index 3824df33094e..1fd9cb85f4ca 100644
--- a/drivers/usb/misc/emi26.c
+++ b/drivers/usb/misc/emi26.c
@@ -15,6 +15,7 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/usb.h> 17#include <linux/usb.h>
18#include <linux/delay.h>
18 19
19#define MAX_INTEL_HEX_RECORD_LENGTH 16 20#define MAX_INTEL_HEX_RECORD_LENGTH 16
20typedef struct _INTEL_HEX_RECORD 21typedef struct _INTEL_HEX_RECORD
@@ -114,6 +115,7 @@ static int emi26_load_firmware (struct usb_device *dev)
114 115
115 /* De-assert reset (let the CPU run) */ 116 /* De-assert reset (let the CPU run) */
116 err = emi26_set_reset(dev,0); 117 err = emi26_set_reset(dev,0);
118 msleep(250); /* let device settle */
117 119
118 /* 2. We upload the FPGA firmware into the EMI 120 /* 2. We upload the FPGA firmware into the EMI
119 * Note: collect up to 1023 (yes!) bytes and send them with 121 * Note: collect up to 1023 (yes!) bytes and send them with
@@ -150,6 +152,7 @@ static int emi26_load_firmware (struct usb_device *dev)
150 goto wraperr; 152 goto wraperr;
151 } 153 }
152 } 154 }
155 msleep(250); /* let device settle */
153 156
154 /* De-assert reset (let the CPU run) */ 157 /* De-assert reset (let the CPU run) */
155 err = emi26_set_reset(dev,0); 158 err = emi26_set_reset(dev,0);
@@ -192,6 +195,7 @@ static int emi26_load_firmware (struct usb_device *dev)
192 err("%s - error loading firmware: error = %d", __FUNCTION__, err); 195 err("%s - error loading firmware: error = %d", __FUNCTION__, err);
193 goto wraperr; 196 goto wraperr;
194 } 197 }
198 msleep(250); /* let device settle */
195 199
196 /* return 1 to fail the driver inialization 200 /* return 1 to fail the driver inialization
197 * and give real driver change to load */ 201 * and give real driver change to load */
diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
index 52fea2e08db8..fe351371f274 100644
--- a/drivers/usb/misc/emi62.c
+++ b/drivers/usb/misc/emi62.c
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/usb.h> 17#include <linux/usb.h>
18#include <linux/delay.h>
18 19
19#define MAX_INTEL_HEX_RECORD_LENGTH 16 20#define MAX_INTEL_HEX_RECORD_LENGTH 16
20typedef struct _INTEL_HEX_RECORD 21typedef struct _INTEL_HEX_RECORD
@@ -123,6 +124,7 @@ static int emi62_load_firmware (struct usb_device *dev)
123 124
124 /* De-assert reset (let the CPU run) */ 125 /* De-assert reset (let the CPU run) */
125 err = emi62_set_reset(dev,0); 126 err = emi62_set_reset(dev,0);
127 msleep(250); /* let device settle */
126 128
127 /* 2. We upload the FPGA firmware into the EMI 129 /* 2. We upload the FPGA firmware into the EMI
128 * Note: collect up to 1023 (yes!) bytes and send them with 130 * Note: collect up to 1023 (yes!) bytes and send them with
@@ -166,6 +168,7 @@ static int emi62_load_firmware (struct usb_device *dev)
166 err("%s - error loading firmware: error = %d", __FUNCTION__, err); 168 err("%s - error loading firmware: error = %d", __FUNCTION__, err);
167 goto wraperr; 169 goto wraperr;
168 } 170 }
171 msleep(250); /* let device settle */
169 172
170 /* 4. We put the part of the firmware that lies in the external RAM into the EZ-USB */ 173 /* 4. We put the part of the firmware that lies in the external RAM into the EZ-USB */
171 174
@@ -228,6 +231,7 @@ static int emi62_load_firmware (struct usb_device *dev)
228 err("%s - error loading firmware: error = %d", __FUNCTION__, err); 231 err("%s - error loading firmware: error = %d", __FUNCTION__, err);
229 goto wraperr; 232 goto wraperr;
230 } 233 }
234 msleep(250); /* let device settle */
231 235
232 kfree(buf); 236 kfree(buf);
233 237
diff --git a/drivers/usb/net/pegasus.c b/drivers/usb/net/pegasus.c
index 2deb4c01539e..7683926a1b6f 100644
--- a/drivers/usb/net/pegasus.c
+++ b/drivers/usb/net/pegasus.c
@@ -318,6 +318,8 @@ static int read_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 * regd)
318 set_register(pegasus, PhyCtrl, (indx | PHY_READ)); 318 set_register(pegasus, PhyCtrl, (indx | PHY_READ));
319 for (i = 0; i < REG_TIMEOUT; i++) { 319 for (i = 0; i < REG_TIMEOUT; i++) {
320 ret = get_registers(pegasus, PhyCtrl, 1, data); 320 ret = get_registers(pegasus, PhyCtrl, 1, data);
321 if (ret == -ESHUTDOWN)
322 goto fail;
321 if (data[0] & PHY_DONE) 323 if (data[0] & PHY_DONE)
322 break; 324 break;
323 } 325 }
@@ -326,6 +328,7 @@ static int read_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 * regd)
326 *regd = le16_to_cpu(regdi); 328 *regd = le16_to_cpu(regdi);
327 return ret; 329 return ret;
328 } 330 }
331fail:
329 if (netif_msg_drv(pegasus)) 332 if (netif_msg_drv(pegasus))
330 dev_warn(&pegasus->intf->dev, "fail %s\n", __FUNCTION__); 333 dev_warn(&pegasus->intf->dev, "fail %s\n", __FUNCTION__);
331 334
@@ -354,12 +357,15 @@ static int write_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 regd)
354 set_register(pegasus, PhyCtrl, (indx | PHY_WRITE)); 357 set_register(pegasus, PhyCtrl, (indx | PHY_WRITE));
355 for (i = 0; i < REG_TIMEOUT; i++) { 358 for (i = 0; i < REG_TIMEOUT; i++) {
356 ret = get_registers(pegasus, PhyCtrl, 1, data); 359 ret = get_registers(pegasus, PhyCtrl, 1, data);
360 if (ret == -ESHUTDOWN)
361 goto fail;
357 if (data[0] & PHY_DONE) 362 if (data[0] & PHY_DONE)
358 break; 363 break;
359 } 364 }
360 if (i < REG_TIMEOUT) 365 if (i < REG_TIMEOUT)
361 return ret; 366 return ret;
362 367
368fail:
363 if (netif_msg_drv(pegasus)) 369 if (netif_msg_drv(pegasus))
364 dev_warn(&pegasus->intf->dev, "fail %s\n", __FUNCTION__); 370 dev_warn(&pegasus->intf->dev, "fail %s\n", __FUNCTION__);
365 return -ETIMEDOUT; 371 return -ETIMEDOUT;
@@ -387,6 +393,8 @@ static int read_eprom_word(pegasus_t * pegasus, __u8 index, __u16 * retdata)
387 ret = get_registers(pegasus, EpromCtrl, 1, &tmp); 393 ret = get_registers(pegasus, EpromCtrl, 1, &tmp);
388 if (tmp & EPROM_DONE) 394 if (tmp & EPROM_DONE)
389 break; 395 break;
396 if (ret == -ESHUTDOWN)
397 goto fail;
390 } 398 }
391 if (i < REG_TIMEOUT) { 399 if (i < REG_TIMEOUT) {
392 ret = get_registers(pegasus, EpromData, 2, &retdatai); 400 ret = get_registers(pegasus, EpromData, 2, &retdatai);
@@ -394,6 +402,7 @@ static int read_eprom_word(pegasus_t * pegasus, __u8 index, __u16 * retdata)
394 return ret; 402 return ret;
395 } 403 }
396 404
405fail:
397 if (netif_msg_drv(pegasus)) 406 if (netif_msg_drv(pegasus))
398 dev_warn(&pegasus->intf->dev, "fail %s\n", __FUNCTION__); 407 dev_warn(&pegasus->intf->dev, "fail %s\n", __FUNCTION__);
399 return -ETIMEDOUT; 408 return -ETIMEDOUT;
@@ -433,12 +442,15 @@ static int write_eprom_word(pegasus_t * pegasus, __u8 index, __u16 data)
433 442
434 for (i = 0; i < REG_TIMEOUT; i++) { 443 for (i = 0; i < REG_TIMEOUT; i++) {
435 ret = get_registers(pegasus, EpromCtrl, 1, &tmp); 444 ret = get_registers(pegasus, EpromCtrl, 1, &tmp);
445 if (ret == -ESHUTDOWN)
446 goto fail;
436 if (tmp & EPROM_DONE) 447 if (tmp & EPROM_DONE)
437 break; 448 break;
438 } 449 }
439 disable_eprom_write(pegasus); 450 disable_eprom_write(pegasus);
440 if (i < REG_TIMEOUT) 451 if (i < REG_TIMEOUT)
441 return ret; 452 return ret;
453fail:
442 if (netif_msg_drv(pegasus)) 454 if (netif_msg_drv(pegasus))
443 dev_warn(&pegasus->intf->dev, "fail %s\n", __FUNCTION__); 455 dev_warn(&pegasus->intf->dev, "fail %s\n", __FUNCTION__);
444 return -ETIMEDOUT; 456 return -ETIMEDOUT;
@@ -1378,9 +1390,8 @@ static int pegasus_suspend (struct usb_interface *intf, pm_message_t message)
1378 struct pegasus *pegasus = usb_get_intfdata(intf); 1390 struct pegasus *pegasus = usb_get_intfdata(intf);
1379 1391
1380 netif_device_detach (pegasus->net); 1392 netif_device_detach (pegasus->net);
1393 cancel_delayed_work(&pegasus->carrier_check);
1381 if (netif_running(pegasus->net)) { 1394 if (netif_running(pegasus->net)) {
1382 cancel_delayed_work(&pegasus->carrier_check);
1383
1384 usb_kill_urb(pegasus->rx_urb); 1395 usb_kill_urb(pegasus->rx_urb);
1385 usb_kill_urb(pegasus->intr_urb); 1396 usb_kill_urb(pegasus->intr_urb);
1386 } 1397 }
@@ -1400,10 +1411,9 @@ static int pegasus_resume (struct usb_interface *intf)
1400 pegasus->intr_urb->status = 0; 1411 pegasus->intr_urb->status = 0;
1401 pegasus->intr_urb->actual_length = 0; 1412 pegasus->intr_urb->actual_length = 0;
1402 intr_callback(pegasus->intr_urb, NULL); 1413 intr_callback(pegasus->intr_urb, NULL);
1403
1404 queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
1405 CARRIER_CHECK_DELAY);
1406 } 1414 }
1415 queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
1416 CARRIER_CHECK_DELAY);
1407 return 0; 1417 return 0;
1408} 1418}
1409 1419
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index f96b73f54bf1..5c60be521561 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -71,6 +71,16 @@ config USB_SERIAL_ANYDATA
71 To compile this driver as a module, choose M here: the 71 To compile this driver as a module, choose M here: the
72 module will be called anydata. 72 module will be called anydata.
73 73
74config USB_SERIAL_ARK3116
75 tristate "USB ARK Micro 3116 USB Serial Driver (EXPERIMENTAL)"
76 depends on USB_SERIAL && EXPERIMENTAL
77 help
78 Say Y here if you want to use a ARK Micro 3116 USB to Serial
79 device.
80
81 To compile this driver as a module, choose M here: the
82 module will be called ark3116
83
74config USB_SERIAL_BELKIN 84config USB_SERIAL_BELKIN
75 tristate "USB Belkin and Peracom Single Port Serial Driver" 85 tristate "USB Belkin and Peracom Single Port Serial Driver"
76 depends on USB_SERIAL 86 depends on USB_SERIAL
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index 93c21245b1af..5a0960fc9d3e 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -13,6 +13,7 @@ usbserial-objs := usb-serial.o generic.o bus.o $(usbserial-obj-y)
13 13
14obj-$(CONFIG_USB_SERIAL_AIRPRIME) += airprime.o 14obj-$(CONFIG_USB_SERIAL_AIRPRIME) += airprime.o
15obj-$(CONFIG_USB_SERIAL_ANYDATA) += anydata.o 15obj-$(CONFIG_USB_SERIAL_ANYDATA) += anydata.o
16obj-$(CONFIG_USB_SERIAL_ARK3116) += ark3116.o
16obj-$(CONFIG_USB_SERIAL_BELKIN) += belkin_sa.o 17obj-$(CONFIG_USB_SERIAL_BELKIN) += belkin_sa.o
17obj-$(CONFIG_USB_SERIAL_CP2101) += cp2101.o 18obj-$(CONFIG_USB_SERIAL_CP2101) += cp2101.o
18obj-$(CONFIG_USB_SERIAL_CYBERJACK) += cyberjack.o 19obj-$(CONFIG_USB_SERIAL_CYBERJACK) += cyberjack.o
diff --git a/drivers/usb/serial/airprime.c b/drivers/usb/serial/airprime.c
index dbf1f063098c..694b205f9b73 100644
--- a/drivers/usb/serial/airprime.c
+++ b/drivers/usb/serial/airprime.c
@@ -18,6 +18,7 @@
18static struct usb_device_id id_table [] = { 18static struct usb_device_id id_table [] = {
19 { USB_DEVICE(0xf3d, 0x0112) }, /* AirPrime CDMA Wireless PC Card */ 19 { USB_DEVICE(0xf3d, 0x0112) }, /* AirPrime CDMA Wireless PC Card */
20 { USB_DEVICE(0x1410, 0x1110) }, /* Novatel Wireless Merlin CDMA */ 20 { USB_DEVICE(0x1410, 0x1110) }, /* Novatel Wireless Merlin CDMA */
21 { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless Aircard 580 */
21 { }, 22 { },
22}; 23};
23MODULE_DEVICE_TABLE(usb, id_table); 24MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
new file mode 100644
index 000000000000..8dec796222a0
--- /dev/null
+++ b/drivers/usb/serial/ark3116.c
@@ -0,0 +1,465 @@
1/*
2 * ark3116
3 * - implements a driver for the arkmicro ark3116 chipset (vendor=0x6547,
4 * productid=0x0232) (used in a datacable called KQ-U8A)
5 *
6 * - based on code by krisfx -> thanks !!
7 * (see http://www.linuxquestions.org/questions/showthread.php?p=2184457#post2184457)
8 *
9 * - based on logs created by usbsnoopy
10 *
11 * Author : Simon Schulz [ark3116_driver<AT>auctionant.de]
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 */
18
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/tty.h>
22#include <linux/module.h>
23#include <linux/usb.h>
24#include "usb-serial.h"
25
26
27static int debug;
28
29static struct usb_device_id id_table [] = {
30 { USB_DEVICE(0x6547, 0x0232) },
31 { },
32};
33MODULE_DEVICE_TABLE(usb, id_table);
34
35struct ark3116_private {
36 spinlock_t lock;
37 u8 termios_initialized;
38};
39
40static inline void ARK3116_SND(struct usb_serial *serial, int seq,
41 __u8 request, __u8 requesttype,
42 __u16 value, __u16 index)
43{
44 int result;
45 result = usb_control_msg(serial->dev,
46 usb_sndctrlpipe(serial->dev,0),
47 request, requesttype, value, index,
48 NULL,0x00, 1000);
49 dbg("%03d > ok",seq);
50}
51
52static inline void ARK3116_RCV(struct usb_serial *serial, int seq,
53 __u8 request, __u8 requesttype,
54 __u16 value, __u16 index, __u8 expected,
55 char *buf)
56{
57 int result;
58 result = usb_control_msg(serial->dev,
59 usb_rcvctrlpipe(serial->dev,0),
60 request, requesttype, value, index,
61 buf, 0x0000001, 1000);
62 if (result)
63 dbg("%03d < %d bytes [0x%02X]",seq, result, buf[0]);
64 else
65 dbg("%03d < 0 bytes", seq);
66}
67
68
69static inline void ARK3116_RCV_QUIET(struct usb_serial *serial,
70 __u8 request, __u8 requesttype,
71 __u16 value, __u16 index, char *buf)
72{
73 usb_control_msg(serial->dev,
74 usb_rcvctrlpipe(serial->dev,0),
75 request, requesttype, value, index,
76 buf, 0x0000001, 1000);
77}
78
79
80static int ark3116_attach(struct usb_serial *serial)
81{
82 char *buf;
83 struct ark3116_private *priv;
84 int i;
85
86 for (i = 0; i < serial->num_ports; ++i) {
87 priv = kmalloc (sizeof (struct ark3116_private), GFP_KERNEL);
88 if (!priv)
89 goto cleanup;
90 memset (priv, 0x00, sizeof (struct ark3116_private));
91 spin_lock_init(&priv->lock);
92
93 usb_set_serial_port_data(serial->port[i], priv);
94 }
95
96 buf = kmalloc(1, GFP_KERNEL);
97 if (!buf) {
98 dbg("error kmalloc -> out of mem ?");
99 goto cleanup;
100 }
101
102 /* 3 */
103 ARK3116_SND(serial, 3,0xFE,0x40,0x0008,0x0002);
104 ARK3116_SND(serial, 4,0xFE,0x40,0x0008,0x0001);
105 ARK3116_SND(serial, 5,0xFE,0x40,0x0000,0x0008);
106 ARK3116_SND(serial, 6,0xFE,0x40,0x0000,0x000B);
107
108 /* <-- seq7 */
109 ARK3116_RCV(serial, 7,0xFE,0xC0,0x0000,0x0003, 0x00, buf);
110 ARK3116_SND(serial, 8,0xFE,0x40,0x0080,0x0003);
111 ARK3116_SND(serial, 9,0xFE,0x40,0x001A,0x0000);
112 ARK3116_SND(serial,10,0xFE,0x40,0x0000,0x0001);
113 ARK3116_SND(serial,11,0xFE,0x40,0x0000,0x0003);
114
115 /* <-- seq12 */
116 ARK3116_RCV(serial,12,0xFE,0xC0,0x0000,0x0004, 0x00, buf);
117 ARK3116_SND(serial,13,0xFE,0x40,0x0000,0x0004);
118
119 /* 14 */
120 ARK3116_RCV(serial,14,0xFE,0xC0,0x0000,0x0004, 0x00, buf);
121 ARK3116_SND(serial,15,0xFE,0x40,0x0000,0x0004);
122
123 /* 16 */
124 ARK3116_RCV(serial,16,0xFE,0xC0,0x0000,0x0004, 0x00, buf);
125 /* --> seq17 */
126 ARK3116_SND(serial,17,0xFE,0x40,0x0001,0x0004);
127
128 /* <-- seq18 */
129 ARK3116_RCV(serial,18,0xFE,0xC0,0x0000,0x0004, 0x01, buf);
130
131 /* --> seq19 */
132 ARK3116_SND(serial,19,0xFE,0x40,0x0003,0x0004);
133
134
135 /* <-- seq20 */
136 /* seems like serial port status info (RTS, CTS,...) */
137 /* returns modem control line status ?! */
138 ARK3116_RCV(serial,20,0xFE,0xC0,0x0000,0x0006, 0xFF, buf);
139
140 /* set 9600 baud & do some init ?! */
141 ARK3116_SND(serial,147,0xFE,0x40,0x0083,0x0003);
142 ARK3116_SND(serial,148,0xFE,0x40,0x0038,0x0000);
143 ARK3116_SND(serial,149,0xFE,0x40,0x0001,0x0001);
144 ARK3116_SND(serial,150,0xFE,0x40,0x0003,0x0003);
145 ARK3116_RCV(serial,151,0xFE,0xC0,0x0000,0x0004,0x03, buf);
146 ARK3116_SND(serial,152,0xFE,0x40,0x0000,0x0003);
147 ARK3116_RCV(serial,153,0xFE,0xC0,0x0000,0x0003,0x00, buf);
148 ARK3116_SND(serial,154,0xFE,0x40,0x0003,0x0003);
149
150 kfree(buf);
151 return(0);
152
153cleanup:
154 for (--i; i>=0; --i)
155 usb_set_serial_port_data(serial->port[i], NULL);
156 return -ENOMEM;
157}
158
159static void ark3116_set_termios(struct usb_serial_port *port,
160 struct termios *old_termios)
161{
162 struct usb_serial *serial = port->serial;
163 struct ark3116_private *priv = usb_get_serial_port_data(port);
164 unsigned int cflag = port->tty->termios->c_cflag;
165 unsigned long flags;
166 int baud;
167 int ark3116_baud;
168 char *buf;
169 char config;
170
171 config = 0;
172
173 dbg("%s - port %d", __FUNCTION__, port->number);
174
175 if ((!port->tty) || (!port->tty->termios)) {
176 dbg("%s - no tty structures", __FUNCTION__);
177 return;
178 }
179
180 spin_lock_irqsave(&priv->lock, flags);
181 if (!priv->termios_initialized) {
182 *(port->tty->termios) = tty_std_termios;
183 port->tty->termios->c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
184 priv->termios_initialized = 1;
185 }
186 spin_unlock_irqrestore(&priv->lock, flags);
187
188 cflag = port->tty->termios->c_cflag;
189
190 /* check that they really want us to change something: */
191 if (old_termios) {
192 if ((cflag == old_termios->c_cflag) &&
193 (RELEVANT_IFLAG(port->tty->termios->c_iflag) ==
194 RELEVANT_IFLAG(old_termios->c_iflag))) {
195 dbg("%s - nothing to change...", __FUNCTION__);
196 return;
197 }
198 }
199
200 buf = kmalloc(1, GFP_KERNEL);
201 if (!buf) {
202 dbg("error kmalloc");
203 return;
204 }
205
206 /* set data bit count (8/7/6/5) */
207 if (cflag & CSIZE){
208 switch (cflag & CSIZE){
209 case CS5:
210 config |= 0x00;
211 dbg("setting CS5");
212 break;
213 case CS6:
214 config |= 0x01;
215 dbg("setting CS6");
216 break;
217 case CS7:
218 config |= 0x02;
219 dbg("setting CS7");
220 break;
221 default:
222 err ("CSIZE was set but not CS5-CS8, using CS8!");
223 case CS8:
224 config |= 0x03;
225 dbg("setting CS8");
226 break;
227 }
228 }
229
230 /* set parity (NONE,EVEN,ODD) */
231 if (cflag & PARENB){
232 if (cflag & PARODD) {
233 config |= 0x08;
234 dbg("setting parity to ODD");
235 } else {
236 config |= 0x18;
237 dbg("setting parity to EVEN");
238 }
239 } else {
240 dbg("setting parity to NONE");
241 }
242
243 /* SET STOPBIT (1/2) */
244 if (cflag & CSTOPB) {
245 config |= 0x04;
246 dbg ("setting 2 stop bits");
247 } else {
248 dbg ("setting 1 stop bit");
249 }
250
251
252 /* set baudrate: */
253 baud = 0;
254 switch (cflag & CBAUD){
255 case B0:
256 err("can't set 0baud, using 9600 instead");
257 break;
258 case B75: baud = 75; break;
259 case B150: baud = 150; break;
260 case B300: baud = 300; break;
261 case B600: baud = 600; break;
262 case B1200: baud = 1200; break;
263 case B1800: baud = 1800; break;
264 case B2400: baud = 2400; break;
265 case B4800: baud = 4800; break;
266 case B9600: baud = 9600; break;
267 case B19200: baud = 19200; break;
268 case B38400: baud = 38400; break;
269 case B57600: baud = 57600; break;
270 case B115200: baud = 115200; break;
271 case B230400: baud = 230400; break;
272 case B460800: baud = 460800; break;
273 default:
274 dbg("does not support the baudrate requested (fix it)");
275 break;
276 }
277
278 /* set 9600 as default (if given baudrate is invalid for example) */
279 if (baud == 0)
280 baud = 9600;
281
282 /*
283 * found by try'n'error, be careful, maybe there are other options
284 * for multiplicator etc!
285 */
286 if (baud == 460800)
287 /* strange, for 460800 the formula is wrong
288 * (dont use round(), then 9600baud is wrong) */
289 ark3116_baud = 7;
290 else
291 ark3116_baud = 3000000 / baud;
292
293 /* ? */
294 ARK3116_RCV(serial,0,0xFE,0xC0,0x0000,0x0003, 0x03, buf);
295 /* offset = buf[0]; */
296 /* offset = 0x03; */
297 /* dbg("using 0x%04X as target for 0x0003:",0x0080+offset); */
298
299
300 /* set baudrate */
301 dbg("setting baudrate to %d (->reg=%d)",baud,ark3116_baud);
302 ARK3116_SND(serial,147,0xFE,0x40,0x0083,0x0003);
303 ARK3116_SND(serial,148,0xFE,0x40,(ark3116_baud & 0x00FF) ,0x0000);
304 ARK3116_SND(serial,149,0xFE,0x40,(ark3116_baud & 0xFF00)>>8,0x0001);
305 ARK3116_SND(serial,150,0xFE,0x40,0x0003,0x0003);
306
307 /* ? */
308 ARK3116_RCV(serial,151,0xFE,0xC0,0x0000,0x0004,0x03, buf);
309 ARK3116_SND(serial,152,0xFE,0x40,0x0000,0x0003);
310
311 /* set data bit count, stop bit count & parity: */
312 dbg("updating bit count, stop bit or parity (cfg=0x%02X)", config);
313 ARK3116_RCV(serial,153,0xFE,0xC0,0x0000,0x0003,0x00, buf);
314 ARK3116_SND(serial,154,0xFE,0x40,config,0x0003);
315
316 if (cflag & CRTSCTS)
317 dbg("CRTSCTS not supported by chipset ?!");
318
319 /* TEST ARK3116_SND(154,0xFE,0x40,0xFFFF, 0x0006); */
320
321 kfree(buf);
322 return;
323}
324
325static int ark3116_open(struct usb_serial_port *port, struct file *filp)
326{
327 struct termios tmp_termios;
328 struct usb_serial *serial = port->serial;
329 char *buf;
330 int result = 0;
331
332 dbg("%s - port %d", __FUNCTION__, port->number);
333
334 buf = kmalloc(1, GFP_KERNEL);
335 if (!buf) {
336 dbg("error kmalloc -> out of mem ?");
337 return -ENOMEM;
338 }
339
340 result = usb_serial_generic_open(port, filp);
341 if (result)
342 return result;
343
344 /* open */
345 ARK3116_RCV(serial,111,0xFE,0xC0,0x0000,0x0003, 0x02, buf);
346
347 ARK3116_SND(serial,112,0xFE,0x40,0x0082,0x0003);
348 ARK3116_SND(serial,113,0xFE,0x40,0x001A,0x0000);
349 ARK3116_SND(serial,114,0xFE,0x40,0x0000,0x0001);
350 ARK3116_SND(serial,115,0xFE,0x40,0x0002,0x0003);
351
352 ARK3116_RCV(serial,116,0xFE,0xC0,0x0000,0x0004, 0x03, buf);
353 ARK3116_SND(serial,117,0xFE,0x40,0x0002,0x0004);
354
355 ARK3116_RCV(serial,118,0xFE,0xC0,0x0000,0x0004, 0x02, buf);
356 ARK3116_SND(serial,119,0xFE,0x40,0x0000,0x0004);
357
358 ARK3116_RCV(serial,120,0xFE,0xC0,0x0000,0x0004, 0x00, buf);
359
360 ARK3116_SND(serial,121,0xFE,0x40,0x0001,0x0004);
361
362 ARK3116_RCV(serial,122,0xFE,0xC0,0x0000,0x0004, 0x01, buf);
363
364 ARK3116_SND(serial,123,0xFE,0x40,0x0003,0x0004);
365
366 /* returns different values (control lines ?!) */
367 ARK3116_RCV(serial,124,0xFE,0xC0,0x0000,0x0006, 0xFF, buf);
368
369 /* initialise termios: */
370 if (port->tty)
371 ark3116_set_termios(port, &tmp_termios);
372
373 kfree(buf);
374
375 return result;
376
377}
378
379static int ark3116_ioctl(struct usb_serial_port *port, struct file *file,
380 unsigned int cmd, unsigned long arg)
381{
382 dbg("ioctl not supported yet...");
383 return -ENOIOCTLCMD;
384}
385
386static int ark3116_tiocmget(struct usb_serial_port *port, struct file *file)
387{
388 struct usb_serial *serial = port->serial;
389 char *buf;
390 char temp;
391
392 /* seems like serial port status info (RTS, CTS,...) is stored
393 * in reg(?) 0x0006
394 * pcb connection point 11 = GND -> sets bit4 of response
395 * pcb connection point 7 = GND -> sets bit6 of response
396 */
397
398 buf = kmalloc(1, GFP_KERNEL);
399 if (!buf) {
400 dbg("error kmalloc");
401 return -ENOMEM;
402 }
403
404 /* read register: */
405 ARK3116_RCV_QUIET(serial,0xFE,0xC0,0x0000,0x0006,buf);
406 temp = buf[0];
407 kfree(buf);
408
409 /* i do not really know if bit4=CTS and bit6=DSR... was just a
410 * quick guess !!
411 */
412 return (temp & (1<<4) ? TIOCM_CTS : 0) |
413 (temp & (1<<6) ? TIOCM_DSR : 0);
414}
415
416static struct usb_driver ark3116_driver = {
417 .name = "ark3116",
418 .probe = usb_serial_probe,
419 .disconnect = usb_serial_disconnect,
420 .id_table = id_table,
421};
422
423static struct usb_serial_driver ark3116_device = {
424 .driver = {
425 .owner = THIS_MODULE,
426 .name = "ark3116",
427 },
428 .id_table = id_table,
429 .num_interrupt_in = 1,
430 .num_bulk_in = 1,
431 .num_bulk_out = 1,
432 .num_ports = 1,
433 .attach = ark3116_attach,
434 .set_termios = ark3116_set_termios,
435 .ioctl = ark3116_ioctl,
436 .tiocmget = ark3116_tiocmget,
437 .open = ark3116_open,
438};
439
440static int __init ark3116_init(void)
441{
442 int retval;
443
444 retval = usb_serial_register(&ark3116_device);
445 if (retval)
446 return retval;
447 retval = usb_register(&ark3116_driver);
448 if (retval)
449 usb_serial_deregister(&ark3116_device);
450 return retval;
451}
452
453static void __exit ark3116_exit(void)
454{
455 usb_deregister(&ark3116_driver);
456 usb_serial_deregister(&ark3116_device);
457}
458
459module_init(ark3116_init);
460module_exit(ark3116_exit);
461MODULE_LICENSE("GPL");
462
463module_param(debug, bool, S_IRUGO | S_IWUSR);
464MODULE_PARM_DESC(debug, "Debug enabled or not");
465
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 82151207d814..986d7622273d 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -307,6 +307,7 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
307 307
308 308
309static struct usb_device_id id_table_combined [] = { 309static struct usb_device_id id_table_combined [] = {
310 { USB_DEVICE(FTDI_VID, FTDI_ACTZWAVE_PID) },
310 { USB_DEVICE(FTDI_VID, FTDI_IRTRANS_PID) }, 311 { USB_DEVICE(FTDI_VID, FTDI_IRTRANS_PID) },
311 { USB_DEVICE(FTDI_VID, FTDI_IPLUS_PID) }, 312 { USB_DEVICE(FTDI_VID, FTDI_IPLUS_PID) },
312 { USB_DEVICE(FTDI_VID, FTDI_SIO_PID) }, 313 { USB_DEVICE(FTDI_VID, FTDI_SIO_PID) },
@@ -498,6 +499,7 @@ static struct usb_device_id id_table_combined [] = {
498 { USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) }, 499 { USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) },
499 { USB_DEVICE(ICOM_ID1_VID, ICOM_ID1_PID) }, 500 { USB_DEVICE(ICOM_ID1_VID, ICOM_ID1_PID) },
500 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_TMU_PID) }, 501 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_TMU_PID) },
502 { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) },
501 { }, /* Optional parameter entry */ 503 { }, /* Optional parameter entry */
502 { } /* Terminating entry */ 504 { } /* Terminating entry */
503}; 505};
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 2c55a5ea9c99..d69a917e768f 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -32,6 +32,10 @@
32#define FTDI_NF_RIC_PID 0x0001 /* Product Id */ 32#define FTDI_NF_RIC_PID 0x0001 /* Product Id */
33 33
34 34
35/* ACT Solutions HomePro ZWave interface (http://www.act-solutions.com/HomePro.htm) */
36#define FTDI_ACTZWAVE_PID 0xF2D0
37
38
35/* www.irtrans.de device */ 39/* www.irtrans.de device */
36#define FTDI_IRTRANS_PID 0xFC60 /* Product Id */ 40#define FTDI_IRTRANS_PID 0xFC60 /* Product Id */
37 41
@@ -426,6 +430,11 @@
426#define PAPOUCH_VID 0x5050 /* Vendor ID */ 430#define PAPOUCH_VID 0x5050 /* Vendor ID */
427#define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */ 431#define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */
428 432
433/*
434 * ACG Identification Technologies GmbH products (http://www.acg.de/).
435 * Submitted by anton -at- goto10 -dot- org.
436 */
437#define FTDI_ACG_HFDUAL_PID 0xDD20 /* HF Dual ISO Reader (RFID) */
429 438
430/* Commands */ 439/* Commands */
431#define FTDI_SIO_RESET 0 /* Reset the port */ 440#define FTDI_SIO_RESET 0 /* Reset the port */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 476cda107f4f..c62cc2876519 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -138,6 +138,7 @@ int usb_serial_generic_open (struct usb_serial_port *port, struct file *filp)
138 138
139 return result; 139 return result;
140} 140}
141EXPORT_SYMBOL_GPL(usb_serial_generic_open);
141 142
142static void generic_cleanup (struct usb_serial_port *port) 143static void generic_cleanup (struct usb_serial_port *port)
143{ 144{
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index 4d40704dea2c..238033a87092 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -257,14 +257,14 @@ static int omninet_write (struct usb_serial_port *port, const unsigned char *buf
257 return (0); 257 return (0);
258 } 258 }
259 259
260 spin_lock(&port->lock); 260 spin_lock(&wport->lock);
261 if (port->write_urb_busy) { 261 if (wport->write_urb_busy) {
262 spin_unlock(&port->lock); 262 spin_unlock(&wport->lock);
263 dbg("%s - already writing", __FUNCTION__); 263 dbg("%s - already writing", __FUNCTION__);
264 return 0; 264 return 0;
265 } 265 }
266 port->write_urb_busy = 1; 266 wport->write_urb_busy = 1;
267 spin_unlock(&port->lock); 267 spin_unlock(&wport->lock);
268 268
269 count = (count > OMNINET_BULKOUTSIZE) ? OMNINET_BULKOUTSIZE : count; 269 count = (count > OMNINET_BULKOUTSIZE) ? OMNINET_BULKOUTSIZE : count;
270 270
@@ -283,7 +283,7 @@ static int omninet_write (struct usb_serial_port *port, const unsigned char *buf
283 wport->write_urb->dev = serial->dev; 283 wport->write_urb->dev = serial->dev;
284 result = usb_submit_urb(wport->write_urb, GFP_ATOMIC); 284 result = usb_submit_urb(wport->write_urb, GFP_ATOMIC);
285 if (result) { 285 if (result) {
286 port->write_urb_busy = 0; 286 wport->write_urb_busy = 0;
287 err("%s - failed submitting write urb, error %d", __FUNCTION__, result); 287 err("%s - failed submitting write urb, error %d", __FUNCTION__, result);
288 } else 288 } else
289 result = count; 289 result = count;
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 071f86a59c08..9c36f0ece20f 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -189,11 +189,15 @@ static int serial_open (struct tty_struct *tty, struct file * filp)
189 189
190 portNumber = tty->index - serial->minor; 190 portNumber = tty->index - serial->minor;
191 port = serial->port[portNumber]; 191 port = serial->port[portNumber];
192 if (!port) 192 if (!port) {
193 return -ENODEV; 193 retval = -ENODEV;
194 goto bailout_kref_put;
195 }
194 196
195 if (mutex_lock_interruptible(&port->mutex)) 197 if (mutex_lock_interruptible(&port->mutex)) {
196 return -ERESTARTSYS; 198 retval = -ERESTARTSYS;
199 goto bailout_kref_put;
200 }
197 201
198 ++port->open_count; 202 ++port->open_count;
199 203
@@ -209,7 +213,7 @@ static int serial_open (struct tty_struct *tty, struct file * filp)
209 * safe because we are called with BKL held */ 213 * safe because we are called with BKL held */
210 if (!try_module_get(serial->type->driver.owner)) { 214 if (!try_module_get(serial->type->driver.owner)) {
211 retval = -ENODEV; 215 retval = -ENODEV;
212 goto bailout_kref_put; 216 goto bailout_mutex_unlock;
213 } 217 }
214 218
215 /* only call the device specific open if this 219 /* only call the device specific open if this
@@ -224,10 +228,11 @@ static int serial_open (struct tty_struct *tty, struct file * filp)
224 228
225bailout_module_put: 229bailout_module_put:
226 module_put(serial->type->driver.owner); 230 module_put(serial->type->driver.owner);
227bailout_kref_put: 231bailout_mutex_unlock:
228 kref_put(&serial->kref, destroy_serial);
229 port->open_count = 0; 232 port->open_count = 0;
230 mutex_unlock(&port->mutex); 233 mutex_unlock(&port->mutex);
234bailout_kref_put:
235 kref_put(&serial->kref, destroy_serial);
231 return retval; 236 return retval;
232} 237}
233 238
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 334b1db1bd7c..27597c576eff 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -29,12 +29,15 @@ static ssize_t backlight_show_power(struct class_device *cdev, char *buf)
29 29
30static ssize_t backlight_store_power(struct class_device *cdev, const char *buf, size_t count) 30static ssize_t backlight_store_power(struct class_device *cdev, const char *buf, size_t count)
31{ 31{
32 int rc = -ENXIO, power; 32 int rc = -ENXIO;
33 char *endp; 33 char *endp;
34 struct backlight_device *bd = to_backlight_device(cdev); 34 struct backlight_device *bd = to_backlight_device(cdev);
35 int power = simple_strtoul(buf, &endp, 0);
36 size_t size = endp - buf;
35 37
36 power = simple_strtoul(buf, &endp, 0); 38 if (*endp && isspace(*endp))
37 if (*endp && !isspace(*endp)) 39 size++;
40 if (size != count)
38 return -EINVAL; 41 return -EINVAL;
39 42
40 down(&bd->sem); 43 down(&bd->sem);
@@ -65,12 +68,15 @@ static ssize_t backlight_show_brightness(struct class_device *cdev, char *buf)
65 68
66static ssize_t backlight_store_brightness(struct class_device *cdev, const char *buf, size_t count) 69static ssize_t backlight_store_brightness(struct class_device *cdev, const char *buf, size_t count)
67{ 70{
68 int rc = -ENXIO, brightness; 71 int rc = -ENXIO;
69 char *endp; 72 char *endp;
70 struct backlight_device *bd = to_backlight_device(cdev); 73 struct backlight_device *bd = to_backlight_device(cdev);
74 int brightness = simple_strtoul(buf, &endp, 0);
75 size_t size = endp - buf;
71 76
72 brightness = simple_strtoul(buf, &endp, 0); 77 if (*endp && isspace(*endp))
73 if (*endp && !isspace(*endp)) 78 size++;
79 if (size != count)
74 return -EINVAL; 80 return -EINVAL;
75 81
76 down(&bd->sem); 82 down(&bd->sem);
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 86908a60c630..bc8ab005a3fb 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -31,12 +31,15 @@ static ssize_t lcd_show_power(struct class_device *cdev, char *buf)
31 31
32static ssize_t lcd_store_power(struct class_device *cdev, const char *buf, size_t count) 32static ssize_t lcd_store_power(struct class_device *cdev, const char *buf, size_t count)
33{ 33{
34 int rc, power; 34 int rc = -ENXIO;
35 char *endp; 35 char *endp;
36 struct lcd_device *ld = to_lcd_device(cdev); 36 struct lcd_device *ld = to_lcd_device(cdev);
37 int power = simple_strtoul(buf, &endp, 0);
38 size_t size = endp - buf;
37 39
38 power = simple_strtoul(buf, &endp, 0); 40 if (*endp && isspace(*endp))
39 if (*endp && !isspace(*endp)) 41 size++;
42 if (size != count)
40 return -EINVAL; 43 return -EINVAL;
41 44
42 down(&ld->sem); 45 down(&ld->sem);
@@ -44,8 +47,7 @@ static ssize_t lcd_store_power(struct class_device *cdev, const char *buf, size_
44 pr_debug("lcd: set power to %d\n", power); 47 pr_debug("lcd: set power to %d\n", power);
45 ld->props->set_power(ld, power); 48 ld->props->set_power(ld, power);
46 rc = count; 49 rc = count;
47 } else 50 }
48 rc = -ENXIO;
49 up(&ld->sem); 51 up(&ld->sem);
50 52
51 return rc; 53 return rc;
@@ -53,14 +55,12 @@ static ssize_t lcd_store_power(struct class_device *cdev, const char *buf, size_
53 55
54static ssize_t lcd_show_contrast(struct class_device *cdev, char *buf) 56static ssize_t lcd_show_contrast(struct class_device *cdev, char *buf)
55{ 57{
56 int rc; 58 int rc = -ENXIO;
57 struct lcd_device *ld = to_lcd_device(cdev); 59 struct lcd_device *ld = to_lcd_device(cdev);
58 60
59 down(&ld->sem); 61 down(&ld->sem);
60 if (likely(ld->props && ld->props->get_contrast)) 62 if (likely(ld->props && ld->props->get_contrast))
61 rc = sprintf(buf, "%d\n", ld->props->get_contrast(ld)); 63 rc = sprintf(buf, "%d\n", ld->props->get_contrast(ld));
62 else
63 rc = -ENXIO;
64 up(&ld->sem); 64 up(&ld->sem);
65 65
66 return rc; 66 return rc;
@@ -68,12 +68,15 @@ static ssize_t lcd_show_contrast(struct class_device *cdev, char *buf)
68 68
69static ssize_t lcd_store_contrast(struct class_device *cdev, const char *buf, size_t count) 69static ssize_t lcd_store_contrast(struct class_device *cdev, const char *buf, size_t count)
70{ 70{
71 int rc, contrast; 71 int rc = -ENXIO;
72 char *endp; 72 char *endp;
73 struct lcd_device *ld = to_lcd_device(cdev); 73 struct lcd_device *ld = to_lcd_device(cdev);
74 int contrast = simple_strtoul(buf, &endp, 0);
75 size_t size = endp - buf;
74 76
75 contrast = simple_strtoul(buf, &endp, 0); 77 if (*endp && isspace(*endp))
76 if (*endp && !isspace(*endp)) 78 size++;
79 if (size != count)
77 return -EINVAL; 80 return -EINVAL;
78 81
79 down(&ld->sem); 82 down(&ld->sem);
@@ -81,8 +84,7 @@ static ssize_t lcd_store_contrast(struct class_device *cdev, const char *buf, si
81 pr_debug("lcd: set contrast to %d\n", contrast); 84 pr_debug("lcd: set contrast to %d\n", contrast);
82 ld->props->set_contrast(ld, contrast); 85 ld->props->set_contrast(ld, contrast);
83 rc = count; 86 rc = count;
84 } else 87 }
85 rc = -ENXIO;
86 up(&ld->sem); 88 up(&ld->sem);
87 89
88 return rc; 90 return rc;
@@ -90,14 +92,12 @@ static ssize_t lcd_store_contrast(struct class_device *cdev, const char *buf, si
90 92
91static ssize_t lcd_show_max_contrast(struct class_device *cdev, char *buf) 93static ssize_t lcd_show_max_contrast(struct class_device *cdev, char *buf)
92{ 94{
93 int rc; 95 int rc = -ENXIO;
94 struct lcd_device *ld = to_lcd_device(cdev); 96 struct lcd_device *ld = to_lcd_device(cdev);
95 97
96 down(&ld->sem); 98 down(&ld->sem);
97 if (likely(ld->props)) 99 if (likely(ld->props))
98 rc = sprintf(buf, "%d\n", ld->props->max_contrast); 100 rc = sprintf(buf, "%d\n", ld->props->max_contrast);
99 else
100 rc = -ENXIO;
101 up(&ld->sem); 101 up(&ld->sem);
102 102
103 return rc; 103 return rc;
diff --git a/drivers/video/logo/Makefile b/drivers/video/logo/Makefile
index 4ef5cd19609d..b985dfad6c63 100644
--- a/drivers/video/logo/Makefile
+++ b/drivers/video/logo/Makefile
@@ -34,7 +34,7 @@ extra-y += $(call logo-cfiles,_clut224,ppm)
34extra-y += $(call logo-cfiles,_gray256,pgm) 34extra-y += $(call logo-cfiles,_gray256,pgm)
35 35
36# Create commands like "pnmtologo -t mono -n logo_mac_mono -o ..." 36# Create commands like "pnmtologo -t mono -n logo_mac_mono -o ..."
37quiet_cmd_logo = LOGO $@ 37quiet_cmd_logo = LOGO $@
38 cmd_logo = scripts/pnmtologo \ 38 cmd_logo = scripts/pnmtologo \
39 -t $(patsubst $*_%,%,$(notdir $(basename $<))) \ 39 -t $(patsubst $*_%,%,$(notdir $(basename $<))) \
40 -n $(notdir $(basename $<)) -o $@ $< 40 -n $(notdir $(basename $<)) -o $@ $<