aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-05-12 10:48:52 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-05-12 10:48:52 -0400
commit7d63b54a65ce902f9aaa8efe8192aa3b983264d4 (patch)
tree250a77bebe92cbd6edac70a649866044295876db /drivers
parentfd88de569b802c4a04aaa6ee74667775f4aed8c6 (diff)
parentd8c3291c73b958243b33f8509d4507e76dafd055 (diff)
Merge branch 'master'
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/base/class.c32
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/char/genrtc.c8
-rw-r--r--drivers/char/keyboard.c38
-rw-r--r--drivers/char/mwave/mwavedd.c2
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c10
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c11
-rw-r--r--drivers/char/tipar.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c28
-rw-r--r--drivers/edac/e752x_edac.c17
-rw-r--r--drivers/infiniband/core/sysfs.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_debug.h15
-rw-r--r--drivers/infiniband/hw/ipath/ipath_diag.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c18
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c36
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c21
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h10
-rw-r--r--drivers/infiniband/hw/ipath/ipath_layer.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_pe800.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_registers.h31
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c15
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sysfs.c14
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c39
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h3
-rw-r--r--drivers/infiniband/hw/ipath/ips_common.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c41
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c15
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h22
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c31
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c23
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c4
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c195
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h4
-rw-r--r--drivers/input/evdev.c21
-rw-r--r--drivers/input/input.c11
-rw-r--r--drivers/input/keyboard/spitzkbd.c4
-rw-r--r--drivers/input/misc/wistron_btns.c30
-rw-r--r--drivers/input/mouse/psmouse-base.c4
-rw-r--r--drivers/input/serio/i8042-io.h4
-rw-r--r--drivers/input/touchscreen/ads7846.c414
-rw-r--r--drivers/input/touchscreen/corgi_ts.c2
-rw-r--r--drivers/md/raid1.c29
-rw-r--r--drivers/md/raid10.c46
-rw-r--r--drivers/media/video/cx88/cx88-alsa.c2
-rw-r--r--drivers/message/fusion/mptbase.c63
-rw-r--r--drivers/message/fusion/mptbase.h10
-rw-r--r--drivers/message/fusion/mptfc.c134
-rw-r--r--drivers/message/fusion/mptsas.c99
-rw-r--r--drivers/message/fusion/mptscsih.c50
-rw-r--r--drivers/message/fusion/mptspi.c68
-rw-r--r--drivers/mmc/at91_mci.c3
-rw-r--r--drivers/mmc/au1xmmc.c4
-rw-r--r--drivers/mmc/imxmmc.c60
-rw-r--r--drivers/mmc/mmc.c62
-rw-r--r--drivers/mmc/mmc_block.c6
-rw-r--r--drivers/mmc/mmci.c3
-rw-r--r--drivers/mmc/pxamci.c13
-rw-r--r--drivers/mmc/sdhci.c4
-rw-r--r--drivers/mmc/wbsd.c4
-rw-r--r--drivers/net/au1000_eth.c18
-rw-r--r--drivers/net/dl2k.c12
-rw-r--r--drivers/net/forcedeth.c312
-rw-r--r--drivers/net/hamradio/dmascc.c1
-rw-r--r--drivers/net/hamradio/scc.c1
-rw-r--r--drivers/net/hamradio/yam.c1
-rw-r--r--drivers/net/irda/Makefile2
-rw-r--r--drivers/net/irda/irda-usb.c2
-rw-r--r--drivers/net/irda/sir-dev.h13
-rw-r--r--drivers/net/irda/sir_dev.c315
-rw-r--r--drivers/net/irda/sir_kthread.c508
-rw-r--r--drivers/net/irda/smsc-ircc2.c14
-rw-r--r--drivers/net/mv643xx_eth.c2
-rw-r--r--drivers/net/ne.c31
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/sis900.c1
-rw-r--r--drivers/net/sky2.c222
-rw-r--r--drivers/net/sky2.h3
-rw-r--r--drivers/net/spider_net.c12
-rw-r--r--drivers/net/spider_net.h2
-rw-r--r--drivers/net/sungem_phy.c45
-rw-r--r--drivers/net/sungem_phy.h1
-rw-r--r--drivers/net/tg3.c85
-rw-r--r--drivers/net/tg3.h1
-rw-r--r--drivers/net/via-rhine.c6
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c45
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.h6
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_phy.c2
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_wx.c7
-rw-r--r--drivers/pci/msi.c4
-rw-r--r--drivers/pci/quirks.c6
-rw-r--r--drivers/pcmcia/i82365.c7
-rw-r--r--drivers/rtc/rtc-dev.c17
-rw-r--r--drivers/rtc/rtc-sa1100.c6
-rw-r--r--drivers/s390/block/dasd.c5
-rw-r--r--drivers/s390/block/dasd_devmap.c102
-rw-r--r--drivers/s390/block/dasd_eckd.c51
-rw-r--r--drivers/s390/block/dasd_eckd.h46
-rw-r--r--drivers/s390/block/dasd_int.h12
-rw-r--r--drivers/s390/char/tape_3590.c22
-rw-r--r--drivers/s390/char/tape_std.h1
-rw-r--r--drivers/s390/cio/chsc.c30
-rw-r--r--drivers/s390/cio/qdio.c36
-rw-r--r--drivers/s390/net/qeth_main.c1
-rw-r--r--drivers/s390/s390mach.c34
-rw-r--r--drivers/scsi/Kconfig4
-rw-r--r--drivers/scsi/advansys.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c1
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_pci.c12
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c30
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c95
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c18
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c134
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c68
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c59
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.h7
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c19
-rw-r--r--drivers/scsi/scsi_devinfo.c2
-rw-r--r--drivers/scsi/scsi_lib.c27
-rw-r--r--drivers/scsi/sim710.c2
-rw-r--r--drivers/serial/8250.c74
-rw-r--r--drivers/serial/8250_au1x00.c5
-rw-r--r--drivers/serial/cpm_uart/cpm_uart.h58
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_core.c307
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_cpm1.c56
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_cpm2.c16
-rw-r--r--drivers/serial/imx.c40
-rw-r--r--drivers/serial/serial_core.c114
-rw-r--r--drivers/sn/ioc3.c2
-rw-r--r--drivers/sn/ioc4.c2
-rw-r--r--drivers/usb/gadget/net2280.c15
-rw-r--r--drivers/usb/host/ehci-pci.c2
-rw-r--r--drivers/usb/host/ohci-pci.c2
-rw-r--r--drivers/usb/host/uhci-hcd.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.h13
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h1
-rw-r--r--drivers/usb/serial/whiteheat.c1
-rw-r--r--drivers/usb/storage/unusual_devs.h9
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--drivers/video/au1200fb.c1922
-rw-r--r--drivers/video/fbsysfs.c92
-rw-r--r--drivers/video/logo/Makefile2
155 files changed, 3232 insertions, 3978 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 5c91d6afb117..aeb5ab2391e4 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -68,8 +68,6 @@ source "drivers/leds/Kconfig"
68 68
69source "drivers/infiniband/Kconfig" 69source "drivers/infiniband/Kconfig"
70 70
71source "drivers/sn/Kconfig"
72
73source "drivers/edac/Kconfig" 71source "drivers/edac/Kconfig"
74 72
75source "drivers/rtc/Kconfig" 73source "drivers/rtc/Kconfig"
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 0e71dff327cd..b1ea4df85c7d 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -456,6 +456,35 @@ static void class_device_remove_attrs(struct class_device * cd)
456 } 456 }
457} 457}
458 458
459static int class_device_add_groups(struct class_device * cd)
460{
461 int i;
462 int error = 0;
463
464 if (cd->groups) {
465 for (i = 0; cd->groups[i]; i++) {
466 error = sysfs_create_group(&cd->kobj, cd->groups[i]);
467 if (error) {
468 while (--i >= 0)
469 sysfs_remove_group(&cd->kobj, cd->groups[i]);
470 goto out;
471 }
472 }
473 }
474out:
475 return error;
476}
477
478static void class_device_remove_groups(struct class_device * cd)
479{
480 int i;
481 if (cd->groups) {
482 for (i = 0; cd->groups[i]; i++) {
483 sysfs_remove_group(&cd->kobj, cd->groups[i]);
484 }
485 }
486}
487
459static ssize_t show_dev(struct class_device *class_dev, char *buf) 488static ssize_t show_dev(struct class_device *class_dev, char *buf)
460{ 489{
461 return print_dev_t(buf, class_dev->devt); 490 return print_dev_t(buf, class_dev->devt);
@@ -559,6 +588,8 @@ int class_device_add(struct class_device *class_dev)
559 class_name); 588 class_name);
560 } 589 }
561 590
591 class_device_add_groups(class_dev);
592
562 kobject_uevent(&class_dev->kobj, KOBJ_ADD); 593 kobject_uevent(&class_dev->kobj, KOBJ_ADD);
563 594
564 /* notify any interfaces this device is now here */ 595 /* notify any interfaces this device is now here */
@@ -672,6 +703,7 @@ void class_device_del(struct class_device *class_dev)
672 if (class_dev->devt_attr) 703 if (class_dev->devt_attr)
673 class_device_remove_file(class_dev, class_dev->devt_attr); 704 class_device_remove_file(class_dev, class_dev->devt_attr);
674 class_device_remove_attrs(class_dev); 705 class_device_remove_attrs(class_dev);
706 class_device_remove_groups(class_dev);
675 707
676 kobject_uevent(&class_dev->kobj, KOBJ_REMOVE); 708 kobject_uevent(&class_dev->kobj, KOBJ_REMOVE);
677 kobject_del(&class_dev->kobj); 709 kobject_del(&class_dev->kobj);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index bedb689b051f..dff1e67b1dd4 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4301,7 +4301,7 @@ static int __init floppy_init(void)
4301 } 4301 }
4302 4302
4303 use_virtual_dma = can_use_virtual_dma & 1; 4303 use_virtual_dma = can_use_virtual_dma & 1;
4304#if defined(CONFIG_PPC64) 4304#if defined(CONFIG_PPC_MERGE)
4305 if (check_legacy_ioport(FDC1)) { 4305 if (check_legacy_ioport(FDC1)) {
4306 del_timer(&fd_timeout); 4306 del_timer(&fd_timeout);
4307 err = -ENODEV; 4307 err = -ENODEV;
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
index d3a2bc36129b..588fca542a98 100644
--- a/drivers/char/genrtc.c
+++ b/drivers/char/genrtc.c
@@ -200,13 +200,13 @@ static ssize_t gen_rtc_read(struct file *file, char __user *buf,
200 /* first test allows optimizer to nuke this case for 32-bit machines */ 200 /* first test allows optimizer to nuke this case for 32-bit machines */
201 if (sizeof (int) != sizeof (long) && count == sizeof (unsigned int)) { 201 if (sizeof (int) != sizeof (long) && count == sizeof (unsigned int)) {
202 unsigned int uidata = data; 202 unsigned int uidata = data;
203 retval = put_user(uidata, (unsigned long __user *)buf); 203 retval = put_user(uidata, (unsigned int __user *)buf) ?:
204 sizeof(unsigned int);
204 } 205 }
205 else { 206 else {
206 retval = put_user(data, (unsigned long __user *)buf); 207 retval = put_user(data, (unsigned long __user *)buf) ?:
208 sizeof(unsigned long);
207 } 209 }
208 if (!retval)
209 retval = sizeof(unsigned long);
210 out: 210 out:
211 current->state = TASK_RUNNING; 211 current->state = TASK_RUNNING;
212 remove_wait_queue(&gen_rtc_wait, &wait); 212 remove_wait_queue(&gen_rtc_wait, &wait);
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
index 935670a3cd98..5755b7e5f187 100644
--- a/drivers/char/keyboard.c
+++ b/drivers/char/keyboard.c
@@ -860,9 +860,32 @@ static void k_slock(struct vc_data *vc, unsigned char value, char up_flag, struc
860} 860}
861 861
862/* by default, 300ms interval for combination release */ 862/* by default, 300ms interval for combination release */
863static long brl_timeout = 300; 863static unsigned brl_timeout = 300;
864MODULE_PARM_DESC(brl_timeout, "Braille keys release delay in ms (0 for combination on first release, < 0 for dead characters)"); 864MODULE_PARM_DESC(brl_timeout, "Braille keys release delay in ms (0 for commit on first key release)");
865module_param(brl_timeout, long, 0644); 865module_param(brl_timeout, uint, 0644);
866
867static unsigned brl_nbchords = 1;
868MODULE_PARM_DESC(brl_nbchords, "Number of chords that produce a braille pattern (0 for dead chords)");
869module_param(brl_nbchords, uint, 0644);
870
871static void k_brlcommit(struct vc_data *vc, unsigned int pattern, char up_flag, struct pt_regs *regs)
872{
873 static unsigned long chords;
874 static unsigned committed;
875
876 if (!brl_nbchords)
877 k_deadunicode(vc, BRL_UC_ROW | pattern, up_flag, regs);
878 else {
879 committed |= pattern;
880 chords++;
881 if (chords == brl_nbchords) {
882 k_unicode(vc, BRL_UC_ROW | committed, up_flag, regs);
883 chords = 0;
884 committed = 0;
885 }
886 }
887}
888
866static void k_brl(struct vc_data *vc, unsigned char value, char up_flag, struct pt_regs *regs) 889static void k_brl(struct vc_data *vc, unsigned char value, char up_flag, struct pt_regs *regs)
867{ 890{
868 static unsigned pressed,committing; 891 static unsigned pressed,committing;
@@ -882,11 +905,6 @@ static void k_brl(struct vc_data *vc, unsigned char value, char up_flag, struct
882 if (value > 8) 905 if (value > 8)
883 return; 906 return;
884 907
885 if (brl_timeout < 0) {
886 k_deadunicode(vc, BRL_UC_ROW | (1 << (value - 1)), up_flag, regs);
887 return;
888 }
889
890 if (up_flag) { 908 if (up_flag) {
891 if (brl_timeout) { 909 if (brl_timeout) {
892 if (!committing || 910 if (!committing ||
@@ -897,13 +915,13 @@ static void k_brl(struct vc_data *vc, unsigned char value, char up_flag, struct
897 pressed &= ~(1 << (value - 1)); 915 pressed &= ~(1 << (value - 1));
898 if (!pressed) { 916 if (!pressed) {
899 if (committing) { 917 if (committing) {
900 k_unicode(vc, BRL_UC_ROW | committing, 0, regs); 918 k_brlcommit(vc, committing, 0, regs);
901 committing = 0; 919 committing = 0;
902 } 920 }
903 } 921 }
904 } else { 922 } else {
905 if (committing) { 923 if (committing) {
906 k_unicode(vc, BRL_UC_ROW | committing, 0, regs); 924 k_brlcommit(vc, committing, 0, regs);
907 committing = 0; 925 committing = 0;
908 } 926 }
909 pressed &= ~(1 << (value - 1)); 927 pressed &= ~(1 << (value - 1));
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
index 8666171e187b..d3ba2f860ef0 100644
--- a/drivers/char/mwave/mwavedd.c
+++ b/drivers/char/mwave/mwavedd.c
@@ -271,7 +271,7 @@ static int mwave_ioctl(struct inode *inode, struct file *file,
271 ipcnum, 271 ipcnum,
272 pDrvData->IPCs[ipcnum].usIntCount); 272 pDrvData->IPCs[ipcnum].usIntCount);
273 273
274 if (ipcnum > ARRAY_SIZE(pDrvData->IPCs)) { 274 if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
275 PRINTK_ERROR(KERN_ERR_MWAVE 275 PRINTK_ERROR(KERN_ERR_MWAVE
276 "mwavedd::mwave_ioctl:" 276 "mwavedd::mwave_ioctl:"
277 " IOCTL_MW_REGISTER_IPC:" 277 " IOCTL_MW_REGISTER_IPC:"
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 02114a0bd0d9..128b2632512d 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1981,10 +1981,6 @@ static int __init cmm_init(void)
1981 if (!cmm_class) 1981 if (!cmm_class)
1982 return -1; 1982 return -1;
1983 1983
1984 rc = pcmcia_register_driver(&cm4000_driver);
1985 if (rc < 0)
1986 return rc;
1987
1988 major = register_chrdev(0, DEVICE_NAME, &cm4000_fops); 1984 major = register_chrdev(0, DEVICE_NAME, &cm4000_fops);
1989 if (major < 0) { 1985 if (major < 0) {
1990 printk(KERN_WARNING MODULE_NAME 1986 printk(KERN_WARNING MODULE_NAME
@@ -1992,6 +1988,12 @@ static int __init cmm_init(void)
1992 return -1; 1988 return -1;
1993 } 1989 }
1994 1990
1991 rc = pcmcia_register_driver(&cm4000_driver);
1992 if (rc < 0) {
1993 unregister_chrdev(major, DEVICE_NAME);
1994 return rc;
1995 }
1996
1995 return 0; 1997 return 0;
1996} 1998}
1997 1999
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index 29efa64580a8..47a8465bf95b 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -724,16 +724,19 @@ static int __init cm4040_init(void)
724 if (!cmx_class) 724 if (!cmx_class)
725 return -1; 725 return -1;
726 726
727 rc = pcmcia_register_driver(&reader_driver);
728 if (rc < 0)
729 return rc;
730
731 major = register_chrdev(0, DEVICE_NAME, &reader_fops); 727 major = register_chrdev(0, DEVICE_NAME, &reader_fops);
732 if (major < 0) { 728 if (major < 0) {
733 printk(KERN_WARNING MODULE_NAME 729 printk(KERN_WARNING MODULE_NAME
734 ": could not get major number\n"); 730 ": could not get major number\n");
735 return -1; 731 return -1;
736 } 732 }
733
734 rc = pcmcia_register_driver(&reader_driver);
735 if (rc < 0) {
736 unregister_chrdev(major, DEVICE_NAME);
737 return rc;
738 }
739
737 return 0; 740 return 0;
738} 741}
739 742
diff --git a/drivers/char/tipar.c b/drivers/char/tipar.c
index eb2eb3e12d6a..079db5a935a1 100644
--- a/drivers/char/tipar.c
+++ b/drivers/char/tipar.c
@@ -515,7 +515,7 @@ tipar_init_module(void)
515 err = PTR_ERR(tipar_class); 515 err = PTR_ERR(tipar_class);
516 goto out_chrdev; 516 goto out_chrdev;
517 } 517 }
518 if (parport_register_driver(&tipar_driver) || tp_count == 0) { 518 if (parport_register_driver(&tipar_driver)) {
519 printk(KERN_ERR "tipar: unable to register with parport\n"); 519 printk(KERN_ERR "tipar: unable to register with parport\n");
520 err = -EIO; 520 err = -EIO;
521 goto out_class; 521 goto out_class;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 956d121cb161..3e6ffcaa5af4 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -74,6 +74,8 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
74static DEFINE_MUTEX (dbs_mutex); 74static DEFINE_MUTEX (dbs_mutex);
75static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); 75static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
76 76
77static struct workqueue_struct *dbs_workq;
78
77struct dbs_tuners { 79struct dbs_tuners {
78 unsigned int sampling_rate; 80 unsigned int sampling_rate;
79 unsigned int sampling_down_factor; 81 unsigned int sampling_down_factor;
@@ -364,23 +366,29 @@ static void do_dbs_timer(void *data)
364 mutex_lock(&dbs_mutex); 366 mutex_lock(&dbs_mutex);
365 for_each_online_cpu(i) 367 for_each_online_cpu(i)
366 dbs_check_cpu(i); 368 dbs_check_cpu(i);
367 schedule_delayed_work(&dbs_work, 369 queue_delayed_work(dbs_workq, &dbs_work,
368 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 370 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
369 mutex_unlock(&dbs_mutex); 371 mutex_unlock(&dbs_mutex);
370} 372}
371 373
372static inline void dbs_timer_init(void) 374static inline void dbs_timer_init(void)
373{ 375{
374 INIT_WORK(&dbs_work, do_dbs_timer, NULL); 376 INIT_WORK(&dbs_work, do_dbs_timer, NULL);
375 schedule_delayed_work(&dbs_work, 377 if (!dbs_workq)
376 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 378 dbs_workq = create_singlethread_workqueue("ondemand");
379 if (!dbs_workq) {
380 printk(KERN_ERR "ondemand: Cannot initialize kernel thread\n");
381 return;
382 }
383 queue_delayed_work(dbs_workq, &dbs_work,
384 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
377 return; 385 return;
378} 386}
379 387
380static inline void dbs_timer_exit(void) 388static inline void dbs_timer_exit(void)
381{ 389{
382 cancel_delayed_work(&dbs_work); 390 if (dbs_workq)
383 return; 391 cancel_rearming_delayed_workqueue(dbs_workq, &dbs_work);
384} 392}
385 393
386static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 394static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
@@ -489,8 +497,12 @@ static int __init cpufreq_gov_dbs_init(void)
489 497
490static void __exit cpufreq_gov_dbs_exit(void) 498static void __exit cpufreq_gov_dbs_exit(void)
491{ 499{
492 /* Make sure that the scheduled work is indeed not running */ 500 /* Make sure that the scheduled work is indeed not running.
493 flush_scheduled_work(); 501 Assumes the timer has been cancelled first. */
502 if (dbs_workq) {
503 flush_workqueue(dbs_workq);
504 destroy_workqueue(dbs_workq);
505 }
494 506
495 cpufreq_unregister_governor(&cpufreq_gov_dbs); 507 cpufreq_unregister_governor(&cpufreq_gov_dbs);
496} 508}
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 66572c5323ad..fce31936e6d7 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -25,6 +25,8 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include "edac_mc.h" 26#include "edac_mc.h"
27 27
28static int force_function_unhide;
29
28#define e752x_printk(level, fmt, arg...) \ 30#define e752x_printk(level, fmt, arg...) \
29 edac_printk(level, "e752x", fmt, ##arg) 31 edac_printk(level, "e752x", fmt, ##arg)
30 32
@@ -782,8 +784,16 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
782 debugf0("%s(): mci\n", __func__); 784 debugf0("%s(): mci\n", __func__);
783 debugf0("Starting Probe1\n"); 785 debugf0("Starting Probe1\n");
784 786
785 /* enable device 0 function 1 */ 787 /* check to see if device 0 function 1 is enabled; if it isn't, we
788 * assume the BIOS has reserved it for a reason and is expecting
789 * exclusive access, we take care not to violate that assumption and
790 * fail the probe. */
786 pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8); 791 pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
792 if (!force_function_unhide && !(stat8 & (1 << 5))) {
793 printk(KERN_INFO "Contact your BIOS vendor to see if the "
794 "E752x error registers can be safely un-hidden\n");
795 goto fail;
796 }
787 stat8 |= (1 << 5); 797 stat8 |= (1 << 5);
788 pci_write_config_byte(pdev, E752X_DEVPRES1, stat8); 798 pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
789 799
@@ -1063,3 +1073,8 @@ module_exit(e752x_exit);
1063MODULE_LICENSE("GPL"); 1073MODULE_LICENSE("GPL");
1064MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n"); 1074MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
1065MODULE_DESCRIPTION("MC support for Intel e752x memory controllers"); 1075MODULE_DESCRIPTION("MC support for Intel e752x memory controllers");
1076
1077module_param(force_function_unhide, int, 0444);
1078MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:"
1079" 1=force unhide and hope BIOS doesn't fight driver for Dev0:Fun1 access");
1080
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 15121cb5a1f6..21f9282c1b25 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -336,7 +336,7 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
336 switch (width) { 336 switch (width) {
337 case 4: 337 case 4:
338 ret = sprintf(buf, "%u\n", (out_mad->data[40 + offset / 8] >> 338 ret = sprintf(buf, "%u\n", (out_mad->data[40 + offset / 8] >>
339 (offset % 4)) & 0xf); 339 (4 - (offset % 8))) & 0xf);
340 break; 340 break;
341 case 8: 341 case 8:
342 ret = sprintf(buf, "%u\n", out_mad->data[40 + offset / 8]); 342 ret = sprintf(buf, "%u\n", out_mad->data[40 + offset / 8]);
diff --git a/drivers/infiniband/hw/ipath/ipath_debug.h b/drivers/infiniband/hw/ipath/ipath_debug.h
index 593e28969c69..46762387f5f8 100644
--- a/drivers/infiniband/hw/ipath/ipath_debug.h
+++ b/drivers/infiniband/hw/ipath/ipath_debug.h
@@ -60,11 +60,11 @@
60#define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */ 60#define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */
61#define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */ 61#define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */
62#define __IPATH_SMADBG 0x8000 /* sma packet debug */ 62#define __IPATH_SMADBG 0x8000 /* sma packet debug */
63#define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) general debug on */ 63#define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) gen debug */
64#define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings on */ 64#define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings */
65#define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors on */ 65#define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors */
66#define __IPATH_IPATHPD 0x80000 /* Ethernet (IPATH) packet dump on */ 66#define __IPATH_IPATHPD 0x80000 /* Ethernet (IPATH) packet dump */
67#define __IPATH_IPATHTABLE 0x100000 /* Ethernet (IPATH) table dump on */ 67#define __IPATH_IPATHTABLE 0x100000 /* Ethernet (IPATH) table dump */
68 68
69#else /* _IPATH_DEBUGGING */ 69#else /* _IPATH_DEBUGGING */
70 70
@@ -79,11 +79,12 @@
79#define __IPATH_TRSAMPLE 0x0 /* generate trace buffer sample entries */ 79#define __IPATH_TRSAMPLE 0x0 /* generate trace buffer sample entries */
80#define __IPATH_VERBDBG 0x0 /* very verbose debug */ 80#define __IPATH_VERBDBG 0x0 /* very verbose debug */
81#define __IPATH_PKTDBG 0x0 /* print packet data */ 81#define __IPATH_PKTDBG 0x0 /* print packet data */
82#define __IPATH_PROCDBG 0x0 /* print process startup (init)/exit messages */ 82#define __IPATH_PROCDBG 0x0 /* process startup (init)/exit messages */
83/* print mmap/nopage stuff, not using VDBG any more */ 83/* print mmap/nopage stuff, not using VDBG any more */
84#define __IPATH_MMDBG 0x0 84#define __IPATH_MMDBG 0x0
85#define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */ 85#define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */
86#define __IPATH_SMADBG 0x0 /* print process startup (init)/exit messages */#define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */ 86#define __IPATH_SMADBG 0x0 /* process startup (init)/exit messages */
87#define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */
87#define __IPATH_IPATHWARN 0x0 /* Ethernet (IPATH) warnings on */ 88#define __IPATH_IPATHWARN 0x0 /* Ethernet (IPATH) warnings on */
88#define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */ 89#define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */
89#define __IPATH_IPATHPD 0x0 /* Ethernet (IPATH) packet dump on */ 90#define __IPATH_IPATHPD 0x0 /* Ethernet (IPATH) packet dump on */
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
index 7d3fb6996b41..28ddceb260e8 100644
--- a/drivers/infiniband/hw/ipath/ipath_diag.c
+++ b/drivers/infiniband/hw/ipath/ipath_diag.c
@@ -277,13 +277,14 @@ static int ipath_diag_open(struct inode *in, struct file *fp)
277 277
278bail: 278bail:
279 spin_unlock_irqrestore(&ipath_devs_lock, flags); 279 spin_unlock_irqrestore(&ipath_devs_lock, flags);
280 mutex_unlock(&ipath_mutex);
281 280
282 /* Only expose a way to reset the device if we 281 /* Only expose a way to reset the device if we
283 make it into diag mode. */ 282 make it into diag mode. */
284 if (ret == 0) 283 if (ret == 0)
285 ipath_expose_reset(&dd->pcidev->dev); 284 ipath_expose_reset(&dd->pcidev->dev);
286 285
286 mutex_unlock(&ipath_mutex);
287
287 return ret; 288 return ret;
288} 289}
289 290
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index e7617c3982ea..398add4d4cb1 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -418,9 +418,19 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
418 418
419 ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK); 419 ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
420 if (ret) { 420 if (ret) {
421 dev_info(&pdev->dev, "pci_set_dma_mask unit %u " 421 /*
422 "fails: %d\n", dd->ipath_unit, ret); 422 * if the 64 bit setup fails, try 32 bit. Some systems
423 goto bail_regions; 423 * do not setup 64 bit maps on systems with 2GB or less
424 * memory installed.
425 */
426 ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
427 if (ret) {
428 dev_info(&pdev->dev, "pci_set_dma_mask unit %u "
429 "fails: %d\n", dd->ipath_unit, ret);
430 goto bail_regions;
431 }
432 else
433 ipath_dbg("No 64bit DMA mask, used 32 bit mask\n");
424 } 434 }
425 435
426 pci_set_master(pdev); 436 pci_set_master(pdev);
@@ -1949,7 +1959,7 @@ int ipath_reset_device(int unit)
1949 } 1959 }
1950 1960
1951 if (dd->ipath_pd) 1961 if (dd->ipath_pd)
1952 for (i = 1; i < dd->ipath_portcnt; i++) { 1962 for (i = 1; i < dd->ipath_cfgports; i++) {
1953 if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) { 1963 if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) {
1954 ipath_dbg("unit %u port %d is in use " 1964 ipath_dbg("unit %u port %d is in use "
1955 "(PID %u cmd %s), can't reset\n", 1965 "(PID %u cmd %s), can't reset\n",
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index 2823ff9c0c62..16f640e1c16e 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -53,13 +53,19 @@ MODULE_PARM_DESC(cfgports, "Set max number of ports to use");
53 53
54/* 54/*
55 * Number of buffers reserved for driver (layered drivers and SMA 55 * Number of buffers reserved for driver (layered drivers and SMA
56 * send). Reserved at end of buffer list. 56 * send). Reserved at end of buffer list. Initialized based on
57 * number of PIO buffers if not set via module interface.
58 * The problem with this is that it's global, but we'll use different
59 * numbers for different chip types. So the default value is not
60 * very useful. I've redefined it for the 1.3 release so that it's
61 * zero unless set by the user to something else, in which case we
62 * try to respect it.
57 */ 63 */
58static ushort ipath_kpiobufs = 32; 64static ushort ipath_kpiobufs;
59 65
60static int ipath_set_kpiobufs(const char *val, struct kernel_param *kp); 66static int ipath_set_kpiobufs(const char *val, struct kernel_param *kp);
61 67
62module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_uint, 68module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_ushort,
63 &ipath_kpiobufs, S_IWUSR | S_IRUGO); 69 &ipath_kpiobufs, S_IWUSR | S_IRUGO);
64MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver"); 70MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver");
65 71
@@ -531,8 +537,11 @@ static int init_housekeeping(struct ipath_devdata *dd,
531 * Don't clear ipath_flags as 8bit mode was set before 537 * Don't clear ipath_flags as 8bit mode was set before
532 * entering this func. However, we do set the linkstate to 538 * entering this func. However, we do set the linkstate to
533 * unknown, so we can watch for a transition. 539 * unknown, so we can watch for a transition.
540 * PRESENT is set because we want register reads to work,
541 * and the kernel infrastructure saw it in config space;
542 * We clear it if we have failures.
534 */ 543 */
535 dd->ipath_flags |= IPATH_LINKUNK; 544 dd->ipath_flags |= IPATH_LINKUNK | IPATH_PRESENT;
536 dd->ipath_flags &= ~(IPATH_LINKACTIVE | IPATH_LINKARMED | 545 dd->ipath_flags &= ~(IPATH_LINKACTIVE | IPATH_LINKARMED |
537 IPATH_LINKDOWN | IPATH_LINKINIT); 546 IPATH_LINKDOWN | IPATH_LINKINIT);
538 547
@@ -560,6 +569,7 @@ static int init_housekeeping(struct ipath_devdata *dd,
560 || (dd->ipath_uregbase & 0xffffffff) == 0xffffffff) { 569 || (dd->ipath_uregbase & 0xffffffff) == 0xffffffff) {
561 ipath_dev_err(dd, "Register read failures from chip, " 570 ipath_dev_err(dd, "Register read failures from chip, "
562 "giving up initialization\n"); 571 "giving up initialization\n");
572 dd->ipath_flags &= ~IPATH_PRESENT;
563 ret = -ENODEV; 573 ret = -ENODEV;
564 goto done; 574 goto done;
565 } 575 }
@@ -682,16 +692,14 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
682 */ 692 */
683 dd->ipath_pioavregs = ALIGN(val, sizeof(u64) * BITS_PER_BYTE / 2) 693 dd->ipath_pioavregs = ALIGN(val, sizeof(u64) * BITS_PER_BYTE / 2)
684 / (sizeof(u64) * BITS_PER_BYTE / 2); 694 / (sizeof(u64) * BITS_PER_BYTE / 2);
685 if (!ipath_kpiobufs) /* have to have at least 1, for SMA */ 695 if (ipath_kpiobufs == 0) {
686 kpiobufs = ipath_kpiobufs = 1; 696 /* not set by user, or set explictly to default */
687 else if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) < 697 if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) > 128)
688 (dd->ipath_cfgports * IPATH_MIN_USER_PORT_BUFCNT)) { 698 kpiobufs = 32;
689 dev_info(&dd->pcidev->dev, "Too few PIO buffers (%u) " 699 else
690 "for %u ports to have %u each!\n", 700 kpiobufs = 16;
691 dd->ipath_piobcnt2k + dd->ipath_piobcnt4k, 701 }
692 dd->ipath_cfgports, IPATH_MIN_USER_PORT_BUFCNT); 702 else
693 kpiobufs = 1; /* reserve just the minimum for SMA/ether */
694 } else
695 kpiobufs = ipath_kpiobufs; 703 kpiobufs = ipath_kpiobufs;
696 704
697 if (kpiobufs > 705 if (kpiobufs >
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index 0bcb428041f3..3e72a1fe3d73 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -665,14 +665,14 @@ static void handle_layer_pioavail(struct ipath_devdata *dd)
665 665
666 ret = __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE); 666 ret = __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE);
667 if (ret > 0) 667 if (ret > 0)
668 goto clear; 668 goto set;
669 669
670 ret = __ipath_verbs_piobufavail(dd); 670 ret = __ipath_verbs_piobufavail(dd);
671 if (ret > 0) 671 if (ret > 0)
672 goto clear; 672 goto set;
673 673
674 return; 674 return;
675clear: 675set:
676 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl); 676 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
677 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 677 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
678 dd->ipath_sendctrl); 678 dd->ipath_sendctrl);
@@ -719,11 +719,24 @@ static void handle_rcv(struct ipath_devdata *dd, u32 istat)
719irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) 719irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
720{ 720{
721 struct ipath_devdata *dd = data; 721 struct ipath_devdata *dd = data;
722 u32 istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus); 722 u32 istat;
723 ipath_err_t estat = 0; 723 ipath_err_t estat = 0;
724 static unsigned unexpected = 0; 724 static unsigned unexpected = 0;
725 irqreturn_t ret; 725 irqreturn_t ret;
726 726
727 if(!(dd->ipath_flags & IPATH_PRESENT)) {
728 /* this is mostly so we don't try to touch the chip while
729 * it is being reset */
730 /*
731 * This return value is perhaps odd, but we do not want the
732 * interrupt core code to remove our interrupt handler
733 * because we don't appear to be handling an interrupt
734 * during a chip reset.
735 */
736 return IRQ_HANDLED;
737 }
738
739 istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus);
727 if (unlikely(!istat)) { 740 if (unlikely(!istat)) {
728 ipath_stats.sps_nullintr++; 741 ipath_stats.sps_nullintr++;
729 ret = IRQ_NONE; /* not our interrupt, or already handled */ 742 ret = IRQ_NONE; /* not our interrupt, or already handled */
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index 0ce5f19c9d62..e6507f8115bc 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -731,7 +731,7 @@ u64 ipath_read_kreg64_port(const struct ipath_devdata *, ipath_kreg,
731static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd, 731static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd,
732 ipath_ureg regno, int port) 732 ipath_ureg regno, int port)
733{ 733{
734 if (!dd->ipath_kregbase) 734 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
735 return 0; 735 return 0;
736 736
737 return readl(regno + (u64 __iomem *) 737 return readl(regno + (u64 __iomem *)
@@ -762,7 +762,7 @@ static inline void ipath_write_ureg(const struct ipath_devdata *dd,
762static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd, 762static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd,
763 ipath_kreg regno) 763 ipath_kreg regno)
764{ 764{
765 if (!dd->ipath_kregbase) 765 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
766 return -1; 766 return -1;
767 return readl((u32 __iomem *) & dd->ipath_kregbase[regno]); 767 return readl((u32 __iomem *) & dd->ipath_kregbase[regno]);
768} 768}
@@ -770,7 +770,7 @@ static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd,
770static inline u64 ipath_read_kreg64(const struct ipath_devdata *dd, 770static inline u64 ipath_read_kreg64(const struct ipath_devdata *dd,
771 ipath_kreg regno) 771 ipath_kreg regno)
772{ 772{
773 if (!dd->ipath_kregbase) 773 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
774 return -1; 774 return -1;
775 775
776 return readq(&dd->ipath_kregbase[regno]); 776 return readq(&dd->ipath_kregbase[regno]);
@@ -786,7 +786,7 @@ static inline void ipath_write_kreg(const struct ipath_devdata *dd,
786static inline u64 ipath_read_creg(const struct ipath_devdata *dd, 786static inline u64 ipath_read_creg(const struct ipath_devdata *dd,
787 ipath_sreg regno) 787 ipath_sreg regno)
788{ 788{
789 if (!dd->ipath_kregbase) 789 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
790 return 0; 790 return 0;
791 791
792 return readq(regno + (u64 __iomem *) 792 return readq(regno + (u64 __iomem *)
@@ -797,7 +797,7 @@ static inline u64 ipath_read_creg(const struct ipath_devdata *dd,
797static inline u32 ipath_read_creg32(const struct ipath_devdata *dd, 797static inline u32 ipath_read_creg32(const struct ipath_devdata *dd,
798 ipath_sreg regno) 798 ipath_sreg regno)
799{ 799{
800 if (!dd->ipath_kregbase) 800 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
801 return 0; 801 return 0;
802 return readl(regno + (u64 __iomem *) 802 return readl(regno + (u64 __iomem *)
803 (dd->ipath_cregbase + 803 (dd->ipath_cregbase +
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.c b/drivers/infiniband/hw/ipath/ipath_layer.c
index 69ed1100701a..9cb5258ffed9 100644
--- a/drivers/infiniband/hw/ipath/ipath_layer.c
+++ b/drivers/infiniband/hw/ipath/ipath_layer.c
@@ -46,13 +46,15 @@
46/* Acquire before ipath_devs_lock. */ 46/* Acquire before ipath_devs_lock. */
47static DEFINE_MUTEX(ipath_layer_mutex); 47static DEFINE_MUTEX(ipath_layer_mutex);
48 48
49static int ipath_verbs_registered;
50
49u16 ipath_layer_rcv_opcode; 51u16 ipath_layer_rcv_opcode;
52
50static int (*layer_intr)(void *, u32); 53static int (*layer_intr)(void *, u32);
51static int (*layer_rcv)(void *, void *, struct sk_buff *); 54static int (*layer_rcv)(void *, void *, struct sk_buff *);
52static int (*layer_rcv_lid)(void *, void *); 55static int (*layer_rcv_lid)(void *, void *);
53static int (*verbs_piobufavail)(void *); 56static int (*verbs_piobufavail)(void *);
54static void (*verbs_rcv)(void *, void *, void *, u32); 57static void (*verbs_rcv)(void *, void *, void *, u32);
55static int ipath_verbs_registered;
56 58
57static void *(*layer_add_one)(int, struct ipath_devdata *); 59static void *(*layer_add_one)(int, struct ipath_devdata *);
58static void (*layer_remove_one)(void *); 60static void (*layer_remove_one)(void *);
@@ -586,6 +588,8 @@ void ipath_verbs_unregister(void)
586 verbs_rcv = NULL; 588 verbs_rcv = NULL;
587 verbs_timer_cb = NULL; 589 verbs_timer_cb = NULL;
588 590
591 ipath_verbs_registered = 0;
592
589 mutex_unlock(&ipath_layer_mutex); 593 mutex_unlock(&ipath_layer_mutex);
590} 594}
591 595
diff --git a/drivers/infiniband/hw/ipath/ipath_pe800.c b/drivers/infiniband/hw/ipath/ipath_pe800.c
index e1dc4f757062..6318067ab5ec 100644
--- a/drivers/infiniband/hw/ipath/ipath_pe800.c
+++ b/drivers/infiniband/hw/ipath/ipath_pe800.c
@@ -972,6 +972,8 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd)
972 /* Use ERROR so it shows up in logs, etc. */ 972 /* Use ERROR so it shows up in logs, etc. */
973 ipath_dev_err(dd, "Resetting PE-800 unit %u\n", 973 ipath_dev_err(dd, "Resetting PE-800 unit %u\n",
974 dd->ipath_unit); 974 dd->ipath_unit);
975 /* keep chip from being accessed in a few places */
976 dd->ipath_flags &= ~(IPATH_INITTED|IPATH_PRESENT);
975 val = dd->ipath_control | INFINIPATH_C_RESET; 977 val = dd->ipath_control | INFINIPATH_C_RESET;
976 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val); 978 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val);
977 mb(); 979 mb();
@@ -997,6 +999,8 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd)
997 if ((r = pci_enable_device(dd->pcidev))) 999 if ((r = pci_enable_device(dd->pcidev)))
998 ipath_dev_err(dd, "pci_enable_device failed after " 1000 ipath_dev_err(dd, "pci_enable_device failed after "
999 "reset: %d\n", r); 1001 "reset: %d\n", r);
1002 /* whether it worked or not, mark as present, again */
1003 dd->ipath_flags |= IPATH_PRESENT;
1000 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision); 1004 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
1001 if (val == dd->ipath_revision) { 1005 if (val == dd->ipath_revision) {
1002 ipath_cdbg(VERBOSE, "Got matching revision " 1006 ipath_cdbg(VERBOSE, "Got matching revision "
diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/infiniband/hw/ipath/ipath_registers.h
index 1e59750c5f63..402126eb79c9 100644
--- a/drivers/infiniband/hw/ipath/ipath_registers.h
+++ b/drivers/infiniband/hw/ipath/ipath_registers.h
@@ -34,8 +34,9 @@
34#define _IPATH_REGISTERS_H 34#define _IPATH_REGISTERS_H
35 35
36/* 36/*
37 * This file should only be included by kernel source, and by the diags. 37 * This file should only be included by kernel source, and by the diags. It
38 * It defines the registers, and their contents, for the InfiniPath HT-400 chip 38 * defines the registers, and their contents, for the InfiniPath HT-400
39 * chip.
39 */ 40 */
40 41
41/* 42/*
@@ -156,8 +157,10 @@
156#define INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT 8 157#define INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT 8
157#define INFINIPATH_IBCC_LINKINITCMD_MASK 0x3ULL 158#define INFINIPATH_IBCC_LINKINITCMD_MASK 0x3ULL
158#define INFINIPATH_IBCC_LINKINITCMD_DISABLE 1 159#define INFINIPATH_IBCC_LINKINITCMD_DISABLE 1
159#define INFINIPATH_IBCC_LINKINITCMD_POLL 2 /* cycle through TS1/TS2 till OK */ 160/* cycle through TS1/TS2 till OK */
160#define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3 /* wait for TS1, then go on */ 161#define INFINIPATH_IBCC_LINKINITCMD_POLL 2
162/* wait for TS1, then go on */
163#define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3
161#define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16 164#define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16
162#define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL 165#define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL
163#define INFINIPATH_IBCC_LINKCMD_INIT 1 /* move to 0x11 */ 166#define INFINIPATH_IBCC_LINKCMD_INIT 1 /* move to 0x11 */
@@ -182,7 +185,8 @@
182#define INFINIPATH_IBCS_LINKSTATE_SHIFT 4 185#define INFINIPATH_IBCS_LINKSTATE_SHIFT 4
183#define INFINIPATH_IBCS_TXREADY 0x40000000 186#define INFINIPATH_IBCS_TXREADY 0x40000000
184#define INFINIPATH_IBCS_TXCREDITOK 0x80000000 187#define INFINIPATH_IBCS_TXCREDITOK 0x80000000
185/* link training states (shift by INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) */ 188/* link training states (shift by
189 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) */
186#define INFINIPATH_IBCS_LT_STATE_DISABLED 0x00 190#define INFINIPATH_IBCS_LT_STATE_DISABLED 0x00
187#define INFINIPATH_IBCS_LT_STATE_LINKUP 0x01 191#define INFINIPATH_IBCS_LT_STATE_LINKUP 0x01
188#define INFINIPATH_IBCS_LT_STATE_POLLACTIVE 0x02 192#define INFINIPATH_IBCS_LT_STATE_POLLACTIVE 0x02
@@ -267,10 +271,12 @@
267/* kr_serdesconfig0 bits */ 271/* kr_serdesconfig0 bits */
268#define INFINIPATH_SERDC0_RESET_MASK 0xfULL /* overal reset bits */ 272#define INFINIPATH_SERDC0_RESET_MASK 0xfULL /* overal reset bits */
269#define INFINIPATH_SERDC0_RESET_PLL 0x10000000ULL /* pll reset */ 273#define INFINIPATH_SERDC0_RESET_PLL 0x10000000ULL /* pll reset */
270#define INFINIPATH_SERDC0_TXIDLE 0xF000ULL /* tx idle enables (per lane) */ 274/* tx idle enables (per lane) */
271#define INFINIPATH_SERDC0_RXDETECT_EN 0xF0000ULL /* rx detect enables (per lane) */ 275#define INFINIPATH_SERDC0_TXIDLE 0xF000ULL
272#define INFINIPATH_SERDC0_L1PWR_DN 0xF0ULL /* L1 Power down; use with RXDETECT, 276/* rx detect enables (per lane) */
273 Otherwise not used on IB side */ 277#define INFINIPATH_SERDC0_RXDETECT_EN 0xF0000ULL
278/* L1 Power down; use with RXDETECT, Otherwise not used on IB side */
279#define INFINIPATH_SERDC0_L1PWR_DN 0xF0ULL
274 280
275/* kr_xgxsconfig bits */ 281/* kr_xgxsconfig bits */
276#define INFINIPATH_XGXS_RESET 0x7ULL 282#define INFINIPATH_XGXS_RESET 0x7ULL
@@ -390,12 +396,13 @@ struct ipath_kregs {
390 ipath_kreg kr_txintmemsize; 396 ipath_kreg kr_txintmemsize;
391 ipath_kreg kr_xgxsconfig; 397 ipath_kreg kr_xgxsconfig;
392 ipath_kreg kr_ibpllcfg; 398 ipath_kreg kr_ibpllcfg;
393 /* use these two (and the following N ports) only with ipath_k*_kreg64_port(); 399 /* use these two (and the following N ports) only with
394 * not *kreg64() */ 400 * ipath_k*_kreg64_port(); not *kreg64() */
395 ipath_kreg kr_rcvhdraddr; 401 ipath_kreg kr_rcvhdraddr;
396 ipath_kreg kr_rcvhdrtailaddr; 402 ipath_kreg kr_rcvhdrtailaddr;
397 403
398 /* remaining registers are not present on all types of infinipath chips */ 404 /* remaining registers are not present on all types of infinipath
405 chips */
399 ipath_kreg kr_rcvpktledcnt; 406 ipath_kreg kr_rcvpktledcnt;
400 ipath_kreg kr_pcierbuftestreg0; 407 ipath_kreg kr_pcierbuftestreg0;
401 ipath_kreg kr_pcierbuftestreg1; 408 ipath_kreg kr_pcierbuftestreg1;
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index f232e77b78ee..eb81424b3c5b 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -531,19 +531,12 @@ int ipath_post_rc_send(struct ipath_qp *qp, struct ib_send_wr *wr)
531 } 531 }
532 wqe->wr.num_sge = j; 532 wqe->wr.num_sge = j;
533 qp->s_head = next; 533 qp->s_head = next;
534 /*
535 * Wake up the send tasklet if the QP is not waiting
536 * for an RNR timeout.
537 */
538 next = qp->s_rnr_timeout;
539 spin_unlock_irqrestore(&qp->s_lock, flags); 534 spin_unlock_irqrestore(&qp->s_lock, flags);
540 535
541 if (next == 0) { 536 if (qp->ibqp.qp_type == IB_QPT_UC)
542 if (qp->ibqp.qp_type == IB_QPT_UC) 537 ipath_do_uc_send((unsigned long) qp);
543 ipath_do_uc_send((unsigned long) qp); 538 else
544 else 539 ipath_do_rc_send((unsigned long) qp);
545 ipath_do_rc_send((unsigned long) qp);
546 }
547 540
548 ret = 0; 541 ret = 0;
549 542
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
index 32acd8048b49..f323791cc495 100644
--- a/drivers/infiniband/hw/ipath/ipath_sysfs.c
+++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
@@ -711,10 +711,22 @@ static struct attribute_group dev_attr_group = {
711 * enters diag mode. A device reset is quite likely to crash the 711 * enters diag mode. A device reset is quite likely to crash the
712 * machine entirely, so we don't want to normally make it 712 * machine entirely, so we don't want to normally make it
713 * available. 713 * available.
714 *
715 * Called with ipath_mutex held.
714 */ 716 */
715int ipath_expose_reset(struct device *dev) 717int ipath_expose_reset(struct device *dev)
716{ 718{
717 return device_create_file(dev, &dev_attr_reset); 719 static int exposed;
720 int ret;
721
722 if (!exposed) {
723 ret = device_create_file(dev, &dev_attr_reset);
724 exposed = 1;
725 }
726 else
727 ret = 0;
728
729 return ret;
718} 730}
719 731
720int ipath_driver_create_group(struct device_driver *drv) 732int ipath_driver_create_group(struct device_driver *drv)
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index 01cfb30ee160..e606daf83210 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -46,8 +46,10 @@
46 * This is called from ipath_post_ud_send() to forward a WQE addressed 46 * This is called from ipath_post_ud_send() to forward a WQE addressed
47 * to the same HCA. 47 * to the same HCA.
48 */ 48 */
49static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_sge_state *ss, 49static void ipath_ud_loopback(struct ipath_qp *sqp,
50 u32 length, struct ib_send_wr *wr, struct ib_wc *wc) 50 struct ipath_sge_state *ss,
51 u32 length, struct ib_send_wr *wr,
52 struct ib_wc *wc)
51{ 53{
52 struct ipath_ibdev *dev = to_idev(sqp->ibqp.device); 54 struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
53 struct ipath_qp *qp; 55 struct ipath_qp *qp;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 8d2558a01f35..cb9e387c301f 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -449,7 +449,6 @@ static void ipath_ib_timer(void *arg)
449{ 449{
450 struct ipath_ibdev *dev = (struct ipath_ibdev *) arg; 450 struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
451 struct ipath_qp *resend = NULL; 451 struct ipath_qp *resend = NULL;
452 struct ipath_qp *rnr = NULL;
453 struct list_head *last; 452 struct list_head *last;
454 struct ipath_qp *qp; 453 struct ipath_qp *qp;
455 unsigned long flags; 454 unsigned long flags;
@@ -465,32 +464,18 @@ static void ipath_ib_timer(void *arg)
465 last = &dev->pending[dev->pending_index]; 464 last = &dev->pending[dev->pending_index];
466 while (!list_empty(last)) { 465 while (!list_empty(last)) {
467 qp = list_entry(last->next, struct ipath_qp, timerwait); 466 qp = list_entry(last->next, struct ipath_qp, timerwait);
468 if (last->next == LIST_POISON1 || 467 list_del(&qp->timerwait);
469 last->next != &qp->timerwait || 468 qp->timer_next = resend;
470 qp->timerwait.prev != last) { 469 resend = qp;
471 INIT_LIST_HEAD(last); 470 atomic_inc(&qp->refcount);
472 } else {
473 list_del(&qp->timerwait);
474 qp->timerwait.prev = (struct list_head *) resend;
475 resend = qp;
476 atomic_inc(&qp->refcount);
477 }
478 } 471 }
479 last = &dev->rnrwait; 472 last = &dev->rnrwait;
480 if (!list_empty(last)) { 473 if (!list_empty(last)) {
481 qp = list_entry(last->next, struct ipath_qp, timerwait); 474 qp = list_entry(last->next, struct ipath_qp, timerwait);
482 if (--qp->s_rnr_timeout == 0) { 475 if (--qp->s_rnr_timeout == 0) {
483 do { 476 do {
484 if (last->next == LIST_POISON1 ||
485 last->next != &qp->timerwait ||
486 qp->timerwait.prev != last) {
487 INIT_LIST_HEAD(last);
488 break;
489 }
490 list_del(&qp->timerwait); 477 list_del(&qp->timerwait);
491 qp->timerwait.prev = 478 tasklet_hi_schedule(&qp->s_task);
492 (struct list_head *) rnr;
493 rnr = qp;
494 if (list_empty(last)) 479 if (list_empty(last))
495 break; 480 break;
496 qp = list_entry(last->next, struct ipath_qp, 481 qp = list_entry(last->next, struct ipath_qp,
@@ -530,8 +515,7 @@ static void ipath_ib_timer(void *arg)
530 spin_unlock_irqrestore(&dev->pending_lock, flags); 515 spin_unlock_irqrestore(&dev->pending_lock, flags);
531 516
532 /* XXX What if timer fires again while this is running? */ 517 /* XXX What if timer fires again while this is running? */
533 for (qp = resend; qp != NULL; 518 for (qp = resend; qp != NULL; qp = qp->timer_next) {
534 qp = (struct ipath_qp *) qp->timerwait.prev) {
535 struct ib_wc wc; 519 struct ib_wc wc;
536 520
537 spin_lock_irqsave(&qp->s_lock, flags); 521 spin_lock_irqsave(&qp->s_lock, flags);
@@ -545,9 +529,6 @@ static void ipath_ib_timer(void *arg)
545 if (atomic_dec_and_test(&qp->refcount)) 529 if (atomic_dec_and_test(&qp->refcount))
546 wake_up(&qp->wait); 530 wake_up(&qp->wait);
547 } 531 }
548 for (qp = rnr; qp != NULL;
549 qp = (struct ipath_qp *) qp->timerwait.prev)
550 tasklet_hi_schedule(&qp->s_task);
551} 532}
552 533
553/** 534/**
@@ -556,9 +537,9 @@ static void ipath_ib_timer(void *arg)
556 * 537 *
557 * This is called from ipath_intr() at interrupt level when a PIO buffer is 538 * This is called from ipath_intr() at interrupt level when a PIO buffer is
558 * available after ipath_verbs_send() returned an error that no buffers were 539 * available after ipath_verbs_send() returned an error that no buffers were
559 * available. Return 0 if we consumed all the PIO buffers and we still have 540 * available. Return 1 if we consumed all the PIO buffers and we still have
560 * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and 541 * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and
561 * return one). 542 * return zero).
562 */ 543 */
563static int ipath_ib_piobufavail(void *arg) 544static int ipath_ib_piobufavail(void *arg)
564{ 545{
@@ -579,7 +560,7 @@ static int ipath_ib_piobufavail(void *arg)
579 spin_unlock_irqrestore(&dev->pending_lock, flags); 560 spin_unlock_irqrestore(&dev->pending_lock, flags);
580 561
581bail: 562bail:
582 return 1; 563 return 0;
583} 564}
584 565
585static int ipath_query_device(struct ib_device *ibdev, 566static int ipath_query_device(struct ib_device *ibdev,
@@ -1159,7 +1140,7 @@ static ssize_t show_stats(struct class_device *cdev, char *buf)
1159 1140
1160 len = sprintf(buf, 1141 len = sprintf(buf,
1161 "RC resends %d\n" 1142 "RC resends %d\n"
1162 "RC QACKs %d\n" 1143 "RC no QACK %d\n"
1163 "RC ACKs %d\n" 1144 "RC ACKs %d\n"
1164 "RC SEQ NAKs %d\n" 1145 "RC SEQ NAKs %d\n"
1165 "RC RDMA seq %d\n" 1146 "RC RDMA seq %d\n"
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index fcafbc7c9e71..4f8d59300e9b 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -282,7 +282,8 @@ struct ipath_srq {
282 */ 282 */
283struct ipath_qp { 283struct ipath_qp {
284 struct ib_qp ibqp; 284 struct ib_qp ibqp;
285 struct ipath_qp *next; /* link list for QPN hash table */ 285 struct ipath_qp *next; /* link list for QPN hash table */
286 struct ipath_qp *timer_next; /* link list for ipath_ib_timer() */
286 struct list_head piowait; /* link for wait PIO buf */ 287 struct list_head piowait; /* link for wait PIO buf */
287 struct list_head timerwait; /* link for waiting for timeouts */ 288 struct list_head timerwait; /* link for waiting for timeouts */
288 struct ib_ah_attr remote_ah_attr; 289 struct ib_ah_attr remote_ah_attr;
diff --git a/drivers/infiniband/hw/ipath/ips_common.h b/drivers/infiniband/hw/ipath/ips_common.h
index 410a764dfcef..ab7cbbbfd03a 100644
--- a/drivers/infiniband/hw/ipath/ips_common.h
+++ b/drivers/infiniband/hw/ipath/ips_common.h
@@ -95,7 +95,7 @@ struct ether_header {
95 __u8 seq_num; 95 __u8 seq_num;
96 __le32 len; 96 __le32 len;
97 /* MUST be of word size due to PIO write requirements */ 97 /* MUST be of word size due to PIO write requirements */
98 __u32 csum; 98 __le32 csum;
99 __le16 csum_offset; 99 __le16 csum_offset;
100 __le16 flags; 100 __le16 flags;
101 __u16 first_2_bytes; 101 __u16 first_2_bytes;
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 312cf90731ea..205854e9c662 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -238,9 +238,9 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
238 spin_lock(&dev->cq_table.lock); 238 spin_lock(&dev->cq_table.lock);
239 239
240 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); 240 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
241
242 if (cq) 241 if (cq)
243 atomic_inc(&cq->refcount); 242 ++cq->refcount;
243
244 spin_unlock(&dev->cq_table.lock); 244 spin_unlock(&dev->cq_table.lock);
245 245
246 if (!cq) { 246 if (!cq) {
@@ -254,8 +254,10 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
254 if (cq->ibcq.event_handler) 254 if (cq->ibcq.event_handler)
255 cq->ibcq.event_handler(&event, cq->ibcq.cq_context); 255 cq->ibcq.event_handler(&event, cq->ibcq.cq_context);
256 256
257 if (atomic_dec_and_test(&cq->refcount)) 257 spin_lock(&dev->cq_table.lock);
258 if (!--cq->refcount)
258 wake_up(&cq->wait); 259 wake_up(&cq->wait);
260 spin_unlock(&dev->cq_table.lock);
259} 261}
260 262
261static inline int is_recv_cqe(struct mthca_cqe *cqe) 263static inline int is_recv_cqe(struct mthca_cqe *cqe)
@@ -267,23 +269,13 @@ static inline int is_recv_cqe(struct mthca_cqe *cqe)
267 return !(cqe->is_send & 0x80); 269 return !(cqe->is_send & 0x80);
268} 270}
269 271
270void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, 272void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
271 struct mthca_srq *srq) 273 struct mthca_srq *srq)
272{ 274{
273 struct mthca_cq *cq;
274 struct mthca_cqe *cqe; 275 struct mthca_cqe *cqe;
275 u32 prod_index; 276 u32 prod_index;
276 int nfreed = 0; 277 int nfreed = 0;
277 278
278 spin_lock_irq(&dev->cq_table.lock);
279 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
280 if (cq)
281 atomic_inc(&cq->refcount);
282 spin_unlock_irq(&dev->cq_table.lock);
283
284 if (!cq)
285 return;
286
287 spin_lock_irq(&cq->lock); 279 spin_lock_irq(&cq->lock);
288 280
289 /* 281 /*
@@ -301,7 +293,7 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
301 293
302 if (0) 294 if (0)
303 mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n", 295 mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n",
304 qpn, cqn, cq->cons_index, prod_index); 296 qpn, cq->cqn, cq->cons_index, prod_index);
305 297
306 /* 298 /*
307 * Now sweep backwards through the CQ, removing CQ entries 299 * Now sweep backwards through the CQ, removing CQ entries
@@ -325,8 +317,6 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
325 } 317 }
326 318
327 spin_unlock_irq(&cq->lock); 319 spin_unlock_irq(&cq->lock);
328 if (atomic_dec_and_test(&cq->refcount))
329 wake_up(&cq->wait);
330} 320}
331 321
332void mthca_cq_resize_copy_cqes(struct mthca_cq *cq) 322void mthca_cq_resize_copy_cqes(struct mthca_cq *cq)
@@ -821,7 +811,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
821 } 811 }
822 812
823 spin_lock_init(&cq->lock); 813 spin_lock_init(&cq->lock);
824 atomic_set(&cq->refcount, 1); 814 cq->refcount = 1;
825 init_waitqueue_head(&cq->wait); 815 init_waitqueue_head(&cq->wait);
826 816
827 memset(cq_context, 0, sizeof *cq_context); 817 memset(cq_context, 0, sizeof *cq_context);
@@ -896,6 +886,17 @@ err_out:
896 return err; 886 return err;
897} 887}
898 888
889static inline int get_cq_refcount(struct mthca_dev *dev, struct mthca_cq *cq)
890{
891 int c;
892
893 spin_lock_irq(&dev->cq_table.lock);
894 c = cq->refcount;
895 spin_unlock_irq(&dev->cq_table.lock);
896
897 return c;
898}
899
899void mthca_free_cq(struct mthca_dev *dev, 900void mthca_free_cq(struct mthca_dev *dev,
900 struct mthca_cq *cq) 901 struct mthca_cq *cq)
901{ 902{
@@ -929,6 +930,7 @@ void mthca_free_cq(struct mthca_dev *dev,
929 spin_lock_irq(&dev->cq_table.lock); 930 spin_lock_irq(&dev->cq_table.lock);
930 mthca_array_clear(&dev->cq_table.cq, 931 mthca_array_clear(&dev->cq_table.cq,
931 cq->cqn & (dev->limits.num_cqs - 1)); 932 cq->cqn & (dev->limits.num_cqs - 1));
933 --cq->refcount;
932 spin_unlock_irq(&dev->cq_table.lock); 934 spin_unlock_irq(&dev->cq_table.lock);
933 935
934 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) 936 if (dev->mthca_flags & MTHCA_FLAG_MSI_X)
@@ -936,8 +938,7 @@ void mthca_free_cq(struct mthca_dev *dev,
936 else 938 else
937 synchronize_irq(dev->pdev->irq); 939 synchronize_irq(dev->pdev->irq);
938 940
939 atomic_dec(&cq->refcount); 941 wait_event(cq->wait, !get_cq_refcount(dev, cq));
940 wait_event(cq->wait, !atomic_read(&cq->refcount));
941 942
942 if (cq->is_kernel) { 943 if (cq->is_kernel) {
943 mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); 944 mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 4c1dcb4c1822..f8160b8de090 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -496,7 +496,7 @@ void mthca_free_cq(struct mthca_dev *dev,
496void mthca_cq_completion(struct mthca_dev *dev, u32 cqn); 496void mthca_cq_completion(struct mthca_dev *dev, u32 cqn);
497void mthca_cq_event(struct mthca_dev *dev, u32 cqn, 497void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
498 enum ib_event_type event_type); 498 enum ib_event_type event_type);
499void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, 499void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
500 struct mthca_srq *srq); 500 struct mthca_srq *srq);
501void mthca_cq_resize_copy_cqes(struct mthca_cq *cq); 501void mthca_cq_resize_copy_cqes(struct mthca_cq *cq);
502int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent); 502int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent);
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 25e1c1db9a40..a486dec1707e 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -761,6 +761,7 @@ void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
761 761
762int __devinit mthca_init_mr_table(struct mthca_dev *dev) 762int __devinit mthca_init_mr_table(struct mthca_dev *dev)
763{ 763{
764 unsigned long addr;
764 int err, i; 765 int err, i;
765 766
766 err = mthca_alloc_init(&dev->mr_table.mpt_alloc, 767 err = mthca_alloc_init(&dev->mr_table.mpt_alloc,
@@ -796,9 +797,12 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev)
796 goto err_fmr_mpt; 797 goto err_fmr_mpt;
797 } 798 }
798 799
800 addr = pci_resource_start(dev->pdev, 4) +
801 ((pci_resource_len(dev->pdev, 4) - 1) &
802 dev->mr_table.mpt_base);
803
799 dev->mr_table.tavor_fmr.mpt_base = 804 dev->mr_table.tavor_fmr.mpt_base =
800 ioremap(dev->mr_table.mpt_base, 805 ioremap(addr, (1 << i) * sizeof(struct mthca_mpt_entry));
801 (1 << i) * sizeof (struct mthca_mpt_entry));
802 806
803 if (!dev->mr_table.tavor_fmr.mpt_base) { 807 if (!dev->mr_table.tavor_fmr.mpt_base) {
804 mthca_warn(dev, "MPT ioremap for FMR failed.\n"); 808 mthca_warn(dev, "MPT ioremap for FMR failed.\n");
@@ -806,9 +810,12 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev)
806 goto err_fmr_mpt; 810 goto err_fmr_mpt;
807 } 811 }
808 812
813 addr = pci_resource_start(dev->pdev, 4) +
814 ((pci_resource_len(dev->pdev, 4) - 1) &
815 dev->mr_table.mtt_base);
816
809 dev->mr_table.tavor_fmr.mtt_base = 817 dev->mr_table.tavor_fmr.mtt_base =
810 ioremap(dev->mr_table.mtt_base, 818 ioremap(addr, (1 << i) * MTHCA_MTT_SEG_SIZE);
811 (1 << i) * MTHCA_MTT_SEG_SIZE);
812 if (!dev->mr_table.tavor_fmr.mtt_base) { 819 if (!dev->mr_table.tavor_fmr.mtt_base) {
813 mthca_warn(dev, "MTT ioremap for FMR failed.\n"); 820 mthca_warn(dev, "MTT ioremap for FMR failed.\n");
814 err = -ENOMEM; 821 err = -ENOMEM;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 565a24b1756f..a2eae8a30167 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -306,7 +306,7 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
306 goto out; 306 goto out;
307 } 307 }
308 308
309 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 16, 8); 309 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
310 310
311 out: 311 out:
312 kfree(in_mad); 312 kfree(in_mad);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 6676a786d690..179a8f610d0f 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -139,11 +139,12 @@ struct mthca_ah {
139 * a qp may be locked, with the send cq locked first. No other 139 * a qp may be locked, with the send cq locked first. No other
140 * nesting should be done. 140 * nesting should be done.
141 * 141 *
142 * Each struct mthca_cq/qp also has an atomic_t ref count. The 142 * Each struct mthca_cq/qp also has an ref count, protected by the
143 * pointer from the cq/qp_table to the struct counts as one reference. 143 * corresponding table lock. The pointer from the cq/qp_table to the
144 * This reference also is good for access through the consumer API, so 144 * struct counts as one reference. This reference also is good for
145 * modifying the CQ/QP etc doesn't need to take another reference. 145 * access through the consumer API, so modifying the CQ/QP etc doesn't
146 * Access because of a completion being polled does need a reference. 146 * need to take another reference. Access to a QP because of a
147 * completion being polled does not need a reference either.
147 * 148 *
148 * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the 149 * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the
149 * destroy function to sleep on. 150 * destroy function to sleep on.
@@ -159,8 +160,9 @@ struct mthca_ah {
159 * - decrement ref count; if zero, wake up waiters 160 * - decrement ref count; if zero, wake up waiters
160 * 161 *
161 * To destroy a CQ/QP, we can do the following: 162 * To destroy a CQ/QP, we can do the following:
162 * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock 163 * - lock cq/qp_table
163 * - decrement ref count 164 * - remove pointer and decrement ref count
165 * - unlock cq/qp_table lock
164 * - wait_event until ref count is zero 166 * - wait_event until ref count is zero
165 * 167 *
166 * It is the consumer's responsibilty to make sure that no QP 168 * It is the consumer's responsibilty to make sure that no QP
@@ -197,7 +199,7 @@ struct mthca_cq_resize {
197struct mthca_cq { 199struct mthca_cq {
198 struct ib_cq ibcq; 200 struct ib_cq ibcq;
199 spinlock_t lock; 201 spinlock_t lock;
200 atomic_t refcount; 202 int refcount;
201 int cqn; 203 int cqn;
202 u32 cons_index; 204 u32 cons_index;
203 struct mthca_cq_buf buf; 205 struct mthca_cq_buf buf;
@@ -217,7 +219,7 @@ struct mthca_cq {
217struct mthca_srq { 219struct mthca_srq {
218 struct ib_srq ibsrq; 220 struct ib_srq ibsrq;
219 spinlock_t lock; 221 spinlock_t lock;
220 atomic_t refcount; 222 int refcount;
221 int srqn; 223 int srqn;
222 int max; 224 int max;
223 int max_gs; 225 int max_gs;
@@ -254,7 +256,7 @@ struct mthca_wq {
254 256
255struct mthca_qp { 257struct mthca_qp {
256 struct ib_qp ibqp; 258 struct ib_qp ibqp;
257 atomic_t refcount; 259 int refcount;
258 u32 qpn; 260 u32 qpn;
259 int is_direct; 261 int is_direct;
260 u8 port; /* for SQP and memfree use only */ 262 u8 port; /* for SQP and memfree use only */
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index f37b0e367323..19765f6f8d58 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -240,7 +240,7 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
240 spin_lock(&dev->qp_table.lock); 240 spin_lock(&dev->qp_table.lock);
241 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); 241 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
242 if (qp) 242 if (qp)
243 atomic_inc(&qp->refcount); 243 ++qp->refcount;
244 spin_unlock(&dev->qp_table.lock); 244 spin_unlock(&dev->qp_table.lock);
245 245
246 if (!qp) { 246 if (!qp) {
@@ -257,8 +257,10 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
257 if (qp->ibqp.event_handler) 257 if (qp->ibqp.event_handler)
258 qp->ibqp.event_handler(&event, qp->ibqp.qp_context); 258 qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
259 259
260 if (atomic_dec_and_test(&qp->refcount)) 260 spin_lock(&dev->qp_table.lock);
261 if (!--qp->refcount)
261 wake_up(&qp->wait); 262 wake_up(&qp->wait);
263 spin_unlock(&dev->qp_table.lock);
262} 264}
263 265
264static int to_mthca_state(enum ib_qp_state ib_state) 266static int to_mthca_state(enum ib_qp_state ib_state)
@@ -833,10 +835,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
833 * entries and reinitialize the QP. 835 * entries and reinitialize the QP.
834 */ 836 */
835 if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { 837 if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
836 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, 838 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,
837 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 839 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
838 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 840 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
839 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, 841 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
840 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 842 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
841 843
842 mthca_wq_init(&qp->sq); 844 mthca_wq_init(&qp->sq);
@@ -1096,7 +1098,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
1096 int ret; 1098 int ret;
1097 int i; 1099 int i;
1098 1100
1099 atomic_set(&qp->refcount, 1); 1101 qp->refcount = 1;
1100 init_waitqueue_head(&qp->wait); 1102 init_waitqueue_head(&qp->wait);
1101 qp->state = IB_QPS_RESET; 1103 qp->state = IB_QPS_RESET;
1102 qp->atomic_rd_en = 0; 1104 qp->atomic_rd_en = 0;
@@ -1318,6 +1320,17 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
1318 return err; 1320 return err;
1319} 1321}
1320 1322
1323static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp)
1324{
1325 int c;
1326
1327 spin_lock_irq(&dev->qp_table.lock);
1328 c = qp->refcount;
1329 spin_unlock_irq(&dev->qp_table.lock);
1330
1331 return c;
1332}
1333
1321void mthca_free_qp(struct mthca_dev *dev, 1334void mthca_free_qp(struct mthca_dev *dev,
1322 struct mthca_qp *qp) 1335 struct mthca_qp *qp)
1323{ 1336{
@@ -1339,14 +1352,14 @@ void mthca_free_qp(struct mthca_dev *dev,
1339 spin_lock(&dev->qp_table.lock); 1352 spin_lock(&dev->qp_table.lock);
1340 mthca_array_clear(&dev->qp_table.qp, 1353 mthca_array_clear(&dev->qp_table.qp,
1341 qp->qpn & (dev->limits.num_qps - 1)); 1354 qp->qpn & (dev->limits.num_qps - 1));
1355 --qp->refcount;
1342 spin_unlock(&dev->qp_table.lock); 1356 spin_unlock(&dev->qp_table.lock);
1343 1357
1344 if (send_cq != recv_cq) 1358 if (send_cq != recv_cq)
1345 spin_unlock(&recv_cq->lock); 1359 spin_unlock(&recv_cq->lock);
1346 spin_unlock_irq(&send_cq->lock); 1360 spin_unlock_irq(&send_cq->lock);
1347 1361
1348 atomic_dec(&qp->refcount); 1362 wait_event(qp->wait, !get_qp_refcount(dev, qp));
1349 wait_event(qp->wait, !atomic_read(&qp->refcount));
1350 1363
1351 if (qp->state != IB_QPS_RESET) 1364 if (qp->state != IB_QPS_RESET)
1352 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, 1365 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,
@@ -1358,10 +1371,10 @@ void mthca_free_qp(struct mthca_dev *dev,
1358 * unref the mem-free tables and free the QPN in our table. 1371 * unref the mem-free tables and free the QPN in our table.
1359 */ 1372 */
1360 if (!qp->ibqp.uobject) { 1373 if (!qp->ibqp.uobject) {
1361 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, 1374 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,
1362 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 1375 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1363 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 1376 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
1364 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, 1377 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
1365 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 1378 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1366 1379
1367 mthca_free_memfree(dev, qp); 1380 mthca_free_memfree(dev, qp);
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index adcaf85355ae..1ea433291fa7 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -241,7 +241,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
241 goto err_out_mailbox; 241 goto err_out_mailbox;
242 242
243 spin_lock_init(&srq->lock); 243 spin_lock_init(&srq->lock);
244 atomic_set(&srq->refcount, 1); 244 srq->refcount = 1;
245 init_waitqueue_head(&srq->wait); 245 init_waitqueue_head(&srq->wait);
246 246
247 if (mthca_is_memfree(dev)) 247 if (mthca_is_memfree(dev))
@@ -308,6 +308,17 @@ err_out:
308 return err; 308 return err;
309} 309}
310 310
311static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq)
312{
313 int c;
314
315 spin_lock_irq(&dev->srq_table.lock);
316 c = srq->refcount;
317 spin_unlock_irq(&dev->srq_table.lock);
318
319 return c;
320}
321
311void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) 322void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
312{ 323{
313 struct mthca_mailbox *mailbox; 324 struct mthca_mailbox *mailbox;
@@ -329,10 +340,10 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
329 spin_lock_irq(&dev->srq_table.lock); 340 spin_lock_irq(&dev->srq_table.lock);
330 mthca_array_clear(&dev->srq_table.srq, 341 mthca_array_clear(&dev->srq_table.srq,
331 srq->srqn & (dev->limits.num_srqs - 1)); 342 srq->srqn & (dev->limits.num_srqs - 1));
343 --srq->refcount;
332 spin_unlock_irq(&dev->srq_table.lock); 344 spin_unlock_irq(&dev->srq_table.lock);
333 345
334 atomic_dec(&srq->refcount); 346 wait_event(srq->wait, !get_srq_refcount(dev, srq));
335 wait_event(srq->wait, !atomic_read(&srq->refcount));
336 347
337 if (!srq->ibsrq.uobject) { 348 if (!srq->ibsrq.uobject) {
338 mthca_free_srq_buf(dev, srq); 349 mthca_free_srq_buf(dev, srq);
@@ -414,7 +425,7 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
414 spin_lock(&dev->srq_table.lock); 425 spin_lock(&dev->srq_table.lock);
415 srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); 426 srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
416 if (srq) 427 if (srq)
417 atomic_inc(&srq->refcount); 428 ++srq->refcount;
418 spin_unlock(&dev->srq_table.lock); 429 spin_unlock(&dev->srq_table.lock);
419 430
420 if (!srq) { 431 if (!srq) {
@@ -431,8 +442,10 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
431 srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); 442 srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
432 443
433out: 444out:
434 if (atomic_dec_and_test(&srq->refcount)) 445 spin_lock(&dev->srq_table.lock);
446 if (!--srq->refcount)
435 wake_up(&srq->wait); 447 wake_up(&srq->wait);
448 spin_unlock(&dev->srq_table.lock);
436} 449}
437 450
438/* 451/*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 4ca175553f9f..f887780e8093 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -158,10 +158,8 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
158 if (priv->pkey == pkey) { 158 if (priv->pkey == pkey) {
159 unregister_netdev(priv->dev); 159 unregister_netdev(priv->dev);
160 ipoib_dev_cleanup(priv->dev); 160 ipoib_dev_cleanup(priv->dev);
161
162 list_del(&priv->list); 161 list_del(&priv->list);
163 162 free_netdev(priv->dev);
164 kfree(priv);
165 163
166 ret = 0; 164 ret = 0;
167 break; 165 break;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 5bb55742ada6..c32ce4348e1b 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -409,6 +409,34 @@ static int srp_connect_target(struct srp_target_port *target)
409 } 409 }
410} 410}
411 411
412static void srp_unmap_data(struct scsi_cmnd *scmnd,
413 struct srp_target_port *target,
414 struct srp_request *req)
415{
416 struct scatterlist *scat;
417 int nents;
418
419 if (!scmnd->request_buffer ||
420 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
421 scmnd->sc_data_direction != DMA_FROM_DEVICE))
422 return;
423
424 /*
425 * This handling of non-SG commands can be killed when the
426 * SCSI midlayer no longer generates non-SG commands.
427 */
428 if (likely(scmnd->use_sg)) {
429 nents = scmnd->use_sg;
430 scat = scmnd->request_buffer;
431 } else {
432 nents = 1;
433 scat = &req->fake_sg;
434 }
435
436 dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents,
437 scmnd->sc_data_direction);
438}
439
412static int srp_reconnect_target(struct srp_target_port *target) 440static int srp_reconnect_target(struct srp_target_port *target)
413{ 441{
414 struct ib_cm_id *new_cm_id; 442 struct ib_cm_id *new_cm_id;
@@ -455,16 +483,16 @@ static int srp_reconnect_target(struct srp_target_port *target)
455 list_for_each_entry(req, &target->req_queue, list) { 483 list_for_each_entry(req, &target->req_queue, list) {
456 req->scmnd->result = DID_RESET << 16; 484 req->scmnd->result = DID_RESET << 16;
457 req->scmnd->scsi_done(req->scmnd); 485 req->scmnd->scsi_done(req->scmnd);
486 srp_unmap_data(req->scmnd, target, req);
458 } 487 }
459 488
460 target->rx_head = 0; 489 target->rx_head = 0;
461 target->tx_head = 0; 490 target->tx_head = 0;
462 target->tx_tail = 0; 491 target->tx_tail = 0;
463 target->req_head = 0; 492 INIT_LIST_HEAD(&target->free_reqs);
464 for (i = 0; i < SRP_SQ_SIZE - 1; ++i)
465 target->req_ring[i].next = i + 1;
466 target->req_ring[SRP_SQ_SIZE - 1].next = -1;
467 INIT_LIST_HEAD(&target->req_queue); 493 INIT_LIST_HEAD(&target->req_queue);
494 for (i = 0; i < SRP_SQ_SIZE; ++i)
495 list_add_tail(&target->req_ring[i].list, &target->free_reqs);
468 496
469 ret = srp_connect_target(target); 497 ret = srp_connect_target(target);
470 if (ret) 498 if (ret)
@@ -589,40 +617,10 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
589 return len; 617 return len;
590} 618}
591 619
592static void srp_unmap_data(struct scsi_cmnd *scmnd, 620static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
593 struct srp_target_port *target,
594 struct srp_request *req)
595{
596 struct scatterlist *scat;
597 int nents;
598
599 if (!scmnd->request_buffer ||
600 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
601 scmnd->sc_data_direction != DMA_FROM_DEVICE))
602 return;
603
604 /*
605 * This handling of non-SG commands can be killed when the
606 * SCSI midlayer no longer generates non-SG commands.
607 */
608 if (likely(scmnd->use_sg)) {
609 nents = scmnd->use_sg;
610 scat = scmnd->request_buffer;
611 } else {
612 nents = 1;
613 scat = &req->fake_sg;
614 }
615
616 dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents,
617 scmnd->sc_data_direction);
618}
619
620static void srp_remove_req(struct srp_target_port *target, struct srp_request *req,
621 int index)
622{ 621{
623 list_del(&req->list); 622 srp_unmap_data(req->scmnd, target, req);
624 req->next = target->req_head; 623 list_move_tail(&req->list, &target->free_reqs);
625 target->req_head = index;
626} 624}
627 625
628static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) 626static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
@@ -647,7 +645,7 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
647 req->tsk_status = rsp->data[3]; 645 req->tsk_status = rsp->data[3];
648 complete(&req->done); 646 complete(&req->done);
649 } else { 647 } else {
650 scmnd = req->scmnd; 648 scmnd = req->scmnd;
651 if (!scmnd) 649 if (!scmnd)
652 printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n", 650 printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n",
653 (unsigned long long) rsp->tag); 651 (unsigned long long) rsp->tag);
@@ -665,14 +663,11 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
665 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) 663 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
666 scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt); 664 scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt);
667 665
668 srp_unmap_data(scmnd, target, req);
669
670 if (!req->tsk_mgmt) { 666 if (!req->tsk_mgmt) {
671 req->scmnd = NULL;
672 scmnd->host_scribble = (void *) -1L; 667 scmnd->host_scribble = (void *) -1L;
673 scmnd->scsi_done(scmnd); 668 scmnd->scsi_done(scmnd);
674 669
675 srp_remove_req(target, req, rsp->tag & ~SRP_TAG_TSK_MGMT); 670 srp_remove_req(target, req);
676 } else 671 } else
677 req->cmd_done = 1; 672 req->cmd_done = 1;
678 } 673 }
@@ -859,7 +854,6 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
859 struct srp_request *req; 854 struct srp_request *req;
860 struct srp_iu *iu; 855 struct srp_iu *iu;
861 struct srp_cmd *cmd; 856 struct srp_cmd *cmd;
862 long req_index;
863 int len; 857 int len;
864 858
865 if (target->state == SRP_TARGET_CONNECTING) 859 if (target->state == SRP_TARGET_CONNECTING)
@@ -879,22 +873,20 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
879 dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, 873 dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma,
880 SRP_MAX_IU_LEN, DMA_TO_DEVICE); 874 SRP_MAX_IU_LEN, DMA_TO_DEVICE);
881 875
882 req_index = target->req_head; 876 req = list_entry(target->free_reqs.next, struct srp_request, list);
883 877
884 scmnd->scsi_done = done; 878 scmnd->scsi_done = done;
885 scmnd->result = 0; 879 scmnd->result = 0;
886 scmnd->host_scribble = (void *) req_index; 880 scmnd->host_scribble = (void *) (long) req->index;
887 881
888 cmd = iu->buf; 882 cmd = iu->buf;
889 memset(cmd, 0, sizeof *cmd); 883 memset(cmd, 0, sizeof *cmd);
890 884
891 cmd->opcode = SRP_CMD; 885 cmd->opcode = SRP_CMD;
892 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); 886 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
893 cmd->tag = req_index; 887 cmd->tag = req->index;
894 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); 888 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
895 889
896 req = &target->req_ring[req_index];
897
898 req->scmnd = scmnd; 890 req->scmnd = scmnd;
899 req->cmd = iu; 891 req->cmd = iu;
900 req->cmd_done = 0; 892 req->cmd_done = 0;
@@ -919,8 +911,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
919 goto err_unmap; 911 goto err_unmap;
920 } 912 }
921 913
922 target->req_head = req->next; 914 list_move_tail(&req->list, &target->req_queue);
923 list_add_tail(&req->list, &target->req_queue);
924 915
925 return 0; 916 return 0;
926 917
@@ -1143,30 +1134,20 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1143 return 0; 1134 return 0;
1144} 1135}
1145 1136
1146static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func) 1137static int srp_send_tsk_mgmt(struct srp_target_port *target,
1138 struct srp_request *req, u8 func)
1147{ 1139{
1148 struct srp_target_port *target = host_to_target(scmnd->device->host);
1149 struct srp_request *req;
1150 struct srp_iu *iu; 1140 struct srp_iu *iu;
1151 struct srp_tsk_mgmt *tsk_mgmt; 1141 struct srp_tsk_mgmt *tsk_mgmt;
1152 int req_index;
1153 int ret = FAILED;
1154 1142
1155 spin_lock_irq(target->scsi_host->host_lock); 1143 spin_lock_irq(target->scsi_host->host_lock);
1156 1144
1157 if (target->state == SRP_TARGET_DEAD || 1145 if (target->state == SRP_TARGET_DEAD ||
1158 target->state == SRP_TARGET_REMOVED) { 1146 target->state == SRP_TARGET_REMOVED) {
1159 scmnd->result = DID_BAD_TARGET << 16; 1147 req->scmnd->result = DID_BAD_TARGET << 16;
1160 goto out; 1148 goto out;
1161 } 1149 }
1162 1150
1163 if (scmnd->host_scribble == (void *) -1L)
1164 goto out;
1165
1166 req_index = (long) scmnd->host_scribble;
1167 printk(KERN_ERR "Abort for req_index %d\n", req_index);
1168
1169 req = &target->req_ring[req_index];
1170 init_completion(&req->done); 1151 init_completion(&req->done);
1171 1152
1172 iu = __srp_get_tx_iu(target); 1153 iu = __srp_get_tx_iu(target);
@@ -1177,10 +1158,10 @@ static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func)
1177 memset(tsk_mgmt, 0, sizeof *tsk_mgmt); 1158 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1178 1159
1179 tsk_mgmt->opcode = SRP_TSK_MGMT; 1160 tsk_mgmt->opcode = SRP_TSK_MGMT;
1180 tsk_mgmt->lun = cpu_to_be64((u64) scmnd->device->lun << 48); 1161 tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48);
1181 tsk_mgmt->tag = req_index | SRP_TAG_TSK_MGMT; 1162 tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT;
1182 tsk_mgmt->tsk_mgmt_func = func; 1163 tsk_mgmt->tsk_mgmt_func = func;
1183 tsk_mgmt->task_tag = req_index; 1164 tsk_mgmt->task_tag = req->index;
1184 1165
1185 if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) 1166 if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
1186 goto out; 1167 goto out;
@@ -1188,37 +1169,85 @@ static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func)
1188 req->tsk_mgmt = iu; 1169 req->tsk_mgmt = iu;
1189 1170
1190 spin_unlock_irq(target->scsi_host->host_lock); 1171 spin_unlock_irq(target->scsi_host->host_lock);
1172
1191 if (!wait_for_completion_timeout(&req->done, 1173 if (!wait_for_completion_timeout(&req->done,
1192 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) 1174 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1193 return FAILED; 1175 return -1;
1194 spin_lock_irq(target->scsi_host->host_lock);
1195 1176
1196 if (req->cmd_done) { 1177 return 0;
1197 srp_remove_req(target, req, req_index);
1198 scmnd->scsi_done(scmnd);
1199 } else if (!req->tsk_status) {
1200 srp_remove_req(target, req, req_index);
1201 scmnd->result = DID_ABORT << 16;
1202 ret = SUCCESS;
1203 }
1204 1178
1205out: 1179out:
1206 spin_unlock_irq(target->scsi_host->host_lock); 1180 spin_unlock_irq(target->scsi_host->host_lock);
1207 return ret; 1181 return -1;
1182}
1183
1184static int srp_find_req(struct srp_target_port *target,
1185 struct scsi_cmnd *scmnd,
1186 struct srp_request **req)
1187{
1188 if (scmnd->host_scribble == (void *) -1L)
1189 return -1;
1190
1191 *req = &target->req_ring[(long) scmnd->host_scribble];
1192
1193 return 0;
1208} 1194}
1209 1195
1210static int srp_abort(struct scsi_cmnd *scmnd) 1196static int srp_abort(struct scsi_cmnd *scmnd)
1211{ 1197{
1198 struct srp_target_port *target = host_to_target(scmnd->device->host);
1199 struct srp_request *req;
1200 int ret = SUCCESS;
1201
1212 printk(KERN_ERR "SRP abort called\n"); 1202 printk(KERN_ERR "SRP abort called\n");
1213 1203
1214 return srp_send_tsk_mgmt(scmnd, SRP_TSK_ABORT_TASK); 1204 if (srp_find_req(target, scmnd, &req))
1205 return FAILED;
1206 if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
1207 return FAILED;
1208
1209 spin_lock_irq(target->scsi_host->host_lock);
1210
1211 if (req->cmd_done) {
1212 srp_remove_req(target, req);
1213 scmnd->scsi_done(scmnd);
1214 } else if (!req->tsk_status) {
1215 srp_remove_req(target, req);
1216 scmnd->result = DID_ABORT << 16;
1217 } else
1218 ret = FAILED;
1219
1220 spin_unlock_irq(target->scsi_host->host_lock);
1221
1222 return ret;
1215} 1223}
1216 1224
1217static int srp_reset_device(struct scsi_cmnd *scmnd) 1225static int srp_reset_device(struct scsi_cmnd *scmnd)
1218{ 1226{
1227 struct srp_target_port *target = host_to_target(scmnd->device->host);
1228 struct srp_request *req, *tmp;
1229
1219 printk(KERN_ERR "SRP reset_device called\n"); 1230 printk(KERN_ERR "SRP reset_device called\n");
1220 1231
1221 return srp_send_tsk_mgmt(scmnd, SRP_TSK_LUN_RESET); 1232 if (srp_find_req(target, scmnd, &req))
1233 return FAILED;
1234 if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
1235 return FAILED;
1236 if (req->tsk_status)
1237 return FAILED;
1238
1239 spin_lock_irq(target->scsi_host->host_lock);
1240
1241 list_for_each_entry_safe(req, tmp, &target->req_queue, list)
1242 if (req->scmnd->device == scmnd->device) {
1243 req->scmnd->result = DID_RESET << 16;
1244 scmnd->scsi_done(scmnd);
1245 srp_remove_req(target, req);
1246 }
1247
1248 spin_unlock_irq(target->scsi_host->host_lock);
1249
1250 return SUCCESS;
1222} 1251}
1223 1252
1224static int srp_reset_host(struct scsi_cmnd *scmnd) 1253static int srp_reset_host(struct scsi_cmnd *scmnd)
@@ -1518,10 +1547,12 @@ static ssize_t srp_create_target(struct class_device *class_dev,
1518 1547
1519 INIT_WORK(&target->work, srp_reconnect_work, target); 1548 INIT_WORK(&target->work, srp_reconnect_work, target);
1520 1549
1521 for (i = 0; i < SRP_SQ_SIZE - 1; ++i) 1550 INIT_LIST_HEAD(&target->free_reqs);
1522 target->req_ring[i].next = i + 1;
1523 target->req_ring[SRP_SQ_SIZE - 1].next = -1;
1524 INIT_LIST_HEAD(&target->req_queue); 1551 INIT_LIST_HEAD(&target->req_queue);
1552 for (i = 0; i < SRP_SQ_SIZE; ++i) {
1553 target->req_ring[i].index = i;
1554 list_add_tail(&target->req_ring[i].list, &target->free_reqs);
1555 }
1525 1556
1526 ret = srp_parse_options(buf, target); 1557 ret = srp_parse_options(buf, target);
1527 if (ret) 1558 if (ret)
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index bd7f7c3115de..c5cd43aae860 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -101,7 +101,7 @@ struct srp_request {
101 */ 101 */
102 struct scatterlist fake_sg; 102 struct scatterlist fake_sg;
103 struct completion done; 103 struct completion done;
104 short next; 104 short index;
105 u8 cmd_done; 105 u8 cmd_done;
106 u8 tsk_status; 106 u8 tsk_status;
107}; 107};
@@ -133,7 +133,7 @@ struct srp_target_port {
133 unsigned tx_tail; 133 unsigned tx_tail;
134 struct srp_iu *tx_ring[SRP_SQ_SIZE + 1]; 134 struct srp_iu *tx_ring[SRP_SQ_SIZE + 1];
135 135
136 int req_head; 136 struct list_head free_reqs;
137 struct list_head req_queue; 137 struct list_head req_queue;
138 struct srp_request req_ring[SRP_SQ_SIZE]; 138 struct srp_request req_ring[SRP_SQ_SIZE];
139 139
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index a34e3d91d9ed..ba325f16d077 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -403,6 +403,27 @@ static long evdev_ioctl_handler(struct file *file, unsigned int cmd,
403 case EVIOCGID: 403 case EVIOCGID:
404 if (copy_to_user(p, &dev->id, sizeof(struct input_id))) 404 if (copy_to_user(p, &dev->id, sizeof(struct input_id)))
405 return -EFAULT; 405 return -EFAULT;
406 return 0;
407
408 case EVIOCGREP:
409 if (!test_bit(EV_REP, dev->evbit))
410 return -ENOSYS;
411 if (put_user(dev->rep[REP_DELAY], ip))
412 return -EFAULT;
413 if (put_user(dev->rep[REP_PERIOD], ip + 1))
414 return -EFAULT;
415 return 0;
416
417 case EVIOCSREP:
418 if (!test_bit(EV_REP, dev->evbit))
419 return -ENOSYS;
420 if (get_user(u, ip))
421 return -EFAULT;
422 if (get_user(v, ip + 1))
423 return -EFAULT;
424
425 input_event(dev, EV_REP, REP_DELAY, u);
426 input_event(dev, EV_REP, REP_PERIOD, v);
406 427
407 return 0; 428 return 0;
408 429
diff --git a/drivers/input/input.c b/drivers/input/input.c
index a935abeffffc..3038c268917d 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -155,6 +155,9 @@ void input_event(struct input_dev *dev, unsigned int type, unsigned int code, in
155 if (code > SND_MAX || !test_bit(code, dev->sndbit)) 155 if (code > SND_MAX || !test_bit(code, dev->sndbit))
156 return; 156 return;
157 157
158 if (!!test_bit(code, dev->snd) != !!value)
159 change_bit(code, dev->snd);
160
158 if (dev->event) dev->event(dev, type, code, value); 161 if (dev->event) dev->event(dev, type, code, value);
159 162
160 break; 163 break;
@@ -286,19 +289,19 @@ static struct input_device_id *input_match_device(struct input_device_id *id, st
286 for (; id->flags || id->driver_info; id++) { 289 for (; id->flags || id->driver_info; id++) {
287 290
288 if (id->flags & INPUT_DEVICE_ID_MATCH_BUS) 291 if (id->flags & INPUT_DEVICE_ID_MATCH_BUS)
289 if (id->id.bustype != dev->id.bustype) 292 if (id->bustype != dev->id.bustype)
290 continue; 293 continue;
291 294
292 if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR) 295 if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR)
293 if (id->id.vendor != dev->id.vendor) 296 if (id->vendor != dev->id.vendor)
294 continue; 297 continue;
295 298
296 if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT) 299 if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT)
297 if (id->id.product != dev->id.product) 300 if (id->product != dev->id.product)
298 continue; 301 continue;
299 302
300 if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION) 303 if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION)
301 if (id->id.version != dev->id.version) 304 if (id->version != dev->id.version)
302 continue; 305 continue;
303 306
304 MATCH_BIT(evbit, EV_MAX); 307 MATCH_BIT(evbit, EV_MAX);
diff --git a/drivers/input/keyboard/spitzkbd.c b/drivers/input/keyboard/spitzkbd.c
index bc61cf8cfc65..1d238a9d52d6 100644
--- a/drivers/input/keyboard/spitzkbd.c
+++ b/drivers/input/keyboard/spitzkbd.c
@@ -53,8 +53,8 @@ static unsigned char spitzkbd_keycode[NR_SCANCODES] = {
53 KEY_LEFTCTRL, KEY_1, KEY_3, KEY_5, KEY_6, KEY_7, KEY_9, KEY_0, KEY_BACKSPACE, SPITZ_KEY_EXOK, SPITZ_KEY_EXCANCEL, 0, 0, 0, 0, 0, /* 1-16 */ 53 KEY_LEFTCTRL, KEY_1, KEY_3, KEY_5, KEY_6, KEY_7, KEY_9, KEY_0, KEY_BACKSPACE, SPITZ_KEY_EXOK, SPITZ_KEY_EXCANCEL, 0, 0, 0, 0, 0, /* 1-16 */
54 0, KEY_2, KEY_4, KEY_R, KEY_Y, KEY_8, KEY_I, KEY_O, KEY_P, SPITZ_KEY_EXJOGDOWN, SPITZ_KEY_EXJOGUP, 0, 0, 0, 0, 0, /* 17-32 */ 54 0, KEY_2, KEY_4, KEY_R, KEY_Y, KEY_8, KEY_I, KEY_O, KEY_P, SPITZ_KEY_EXJOGDOWN, SPITZ_KEY_EXJOGUP, 0, 0, 0, 0, 0, /* 17-32 */
55 KEY_TAB, KEY_Q, KEY_E, KEY_T, KEY_G, KEY_U, KEY_J, KEY_K, 0, 0, 0, 0, 0, 0, 0, 0, /* 33-48 */ 55 KEY_TAB, KEY_Q, KEY_E, KEY_T, KEY_G, KEY_U, KEY_J, KEY_K, 0, 0, 0, 0, 0, 0, 0, 0, /* 33-48 */
56 SPITZ_KEY_CALENDER, KEY_W, KEY_S, KEY_F, KEY_V, KEY_H, KEY_M, KEY_L, 0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0, /* 49-64 */ 56 SPITZ_KEY_ADDRESS, KEY_W, KEY_S, KEY_F, KEY_V, KEY_H, KEY_M, KEY_L, 0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0, /* 49-64 */
57 SPITZ_KEY_ADDRESS, KEY_A, KEY_D, KEY_C, KEY_B, KEY_N, KEY_DOT, 0, KEY_ENTER, KEY_LEFTSHIFT, 0, 0, 0, 0, 0, 0, /* 65-80 */ 57 SPITZ_KEY_CALENDER, KEY_A, KEY_D, KEY_C, KEY_B, KEY_N, KEY_DOT, 0, KEY_ENTER, KEY_LEFTSHIFT, 0, 0, 0, 0, 0, 0, /* 65-80 */
58 SPITZ_KEY_MAIL, KEY_Z, KEY_X, KEY_MINUS, KEY_SPACE, KEY_COMMA, 0, KEY_UP, 0, 0, SPITZ_KEY_FN, 0, 0, 0, 0, 0, /* 81-96 */ 58 SPITZ_KEY_MAIL, KEY_Z, KEY_X, KEY_MINUS, KEY_SPACE, KEY_COMMA, 0, KEY_UP, 0, 0, SPITZ_KEY_FN, 0, 0, 0, 0, 0, /* 81-96 */
59 KEY_SYSRQ, SPITZ_KEY_JAP1, SPITZ_KEY_JAP2, SPITZ_KEY_CANCEL, SPITZ_KEY_OK, SPITZ_KEY_MENU, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0, 0, 0, 0 /* 97-112 */ 59 KEY_SYSRQ, SPITZ_KEY_JAP1, SPITZ_KEY_JAP2, SPITZ_KEY_CANCEL, SPITZ_KEY_OK, SPITZ_KEY_MENU, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0, 0, 0, 0 /* 97-112 */
60}; 60};
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index 4b415d9b0123..36cd2e07fce8 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -273,6 +273,18 @@ static struct key_entry keymap_fs_amilo_pro_v2000[] = {
273 { KE_END, 0 } 273 { KE_END, 0 }
274}; 274};
275 275
276static struct key_entry keymap_fujitsu_n3510[] = {
277 { KE_KEY, 0x11, KEY_PROG1 },
278 { KE_KEY, 0x12, KEY_PROG2 },
279 { KE_KEY, 0x36, KEY_WWW },
280 { KE_KEY, 0x31, KEY_MAIL },
281 { KE_KEY, 0x71, KEY_STOPCD },
282 { KE_KEY, 0x72, KEY_PLAYPAUSE },
283 { KE_KEY, 0x74, KEY_REWIND },
284 { KE_KEY, 0x78, KEY_FORWARD },
285 { KE_END, 0 }
286};
287
276static struct key_entry keymap_wistron_ms2141[] = { 288static struct key_entry keymap_wistron_ms2141[] = {
277 { KE_KEY, 0x11, KEY_PROG1 }, 289 { KE_KEY, 0x11, KEY_PROG1 },
278 { KE_KEY, 0x12, KEY_PROG2 }, 290 { KE_KEY, 0x12, KEY_PROG2 },
@@ -323,6 +335,24 @@ static struct dmi_system_id dmi_ids[] = {
323 }, 335 },
324 { 336 {
325 .callback = dmi_matched, 337 .callback = dmi_matched,
338 .ident = "Fujitsu-Siemens Amilo M7400",
339 .matches = {
340 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
341 DMI_MATCH(DMI_PRODUCT_NAME, "AMILO M "),
342 },
343 .driver_data = keymap_fs_amilo_pro_v2000
344 },
345 {
346 .callback = dmi_matched,
347 .ident = "Fujitsu N3510",
348 .matches = {
349 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
350 DMI_MATCH(DMI_PRODUCT_NAME, "N3510"),
351 },
352 .driver_data = keymap_fujitsu_n3510
353 },
354 {
355 .callback = dmi_matched,
326 .ident = "Acer Aspire 1500", 356 .ident = "Acer Aspire 1500",
327 .matches = { 357 .matches = {
328 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 358 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 32d70ed8f41d..136321a2cfdb 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -302,8 +302,10 @@ static irqreturn_t psmouse_interrupt(struct serio *serio,
302 * Check if this is a new device announcement (0xAA 0x00) 302 * Check if this is a new device announcement (0xAA 0x00)
303 */ 303 */
304 if (unlikely(psmouse->packet[0] == PSMOUSE_RET_BAT && psmouse->pktcnt <= 2)) { 304 if (unlikely(psmouse->packet[0] == PSMOUSE_RET_BAT && psmouse->pktcnt <= 2)) {
305 if (psmouse->pktcnt == 1) 305 if (psmouse->pktcnt == 1) {
306 psmouse->last = jiffies;
306 goto out; 307 goto out;
308 }
307 309
308 if (psmouse->packet[1] == PSMOUSE_RET_ID) { 310 if (psmouse->packet[1] == PSMOUSE_RET_ID) {
309 __psmouse_set_state(psmouse, PSMOUSE_IGNORE); 311 __psmouse_set_state(psmouse, PSMOUSE_IGNORE);
diff --git a/drivers/input/serio/i8042-io.h b/drivers/input/serio/i8042-io.h
index 9a9221644250..cc21914fbc72 100644
--- a/drivers/input/serio/i8042-io.h
+++ b/drivers/input/serio/i8042-io.h
@@ -67,14 +67,14 @@ static inline int i8042_platform_init(void)
67 * On some platforms touching the i8042 data register region can do really 67 * On some platforms touching the i8042 data register region can do really
68 * bad things. Because of this the region is always reserved on such boxes. 68 * bad things. Because of this the region is always reserved on such boxes.
69 */ 69 */
70#if !defined(__sh__) && !defined(__alpha__) && !defined(__mips__) && !defined(CONFIG_PPC64) 70#if !defined(__sh__) && !defined(__alpha__) && !defined(__mips__) && !defined(CONFIG_PPC_MERGE)
71 if (!request_region(I8042_DATA_REG, 16, "i8042")) 71 if (!request_region(I8042_DATA_REG, 16, "i8042"))
72 return -EBUSY; 72 return -EBUSY;
73#endif 73#endif
74 74
75 i8042_reset = 1; 75 i8042_reset = 1;
76 76
77#if defined(CONFIG_PPC64) 77#if defined(CONFIG_PPC_MERGE)
78 if (check_legacy_ioport(I8042_DATA_REG)) 78 if (check_legacy_ioport(I8042_DATA_REG))
79 return -EBUSY; 79 return -EBUSY;
80 if (!request_region(I8042_DATA_REG, 16, "i8042")) 80 if (!request_region(I8042_DATA_REG, 16, "i8042"))
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 46d1fec2cfd8..1494175ac6fe 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -2,6 +2,8 @@
2 * ADS7846 based touchscreen and sensor driver 2 * ADS7846 based touchscreen and sensor driver
3 * 3 *
4 * Copyright (c) 2005 David Brownell 4 * Copyright (c) 2005 David Brownell
5 * Copyright (c) 2006 Nokia Corporation
6 * Various changes: Imre Deak <imre.deak@nokia.com>
5 * 7 *
6 * Using code from: 8 * Using code from:
7 * - corgi_ts.c 9 * - corgi_ts.c
@@ -34,17 +36,25 @@
34 36
35 37
36/* 38/*
37 * This code has been lightly tested on an ads7846. 39 * This code has been tested on an ads7846 / N770 device.
38 * Support for ads7843 and ads7845 has only been stubbed in. 40 * Support for ads7843 and ads7845 has only been stubbed in.
39 * 41 *
40 * Not yet done: investigate the values reported. Are x/y/pressure 42 * Not yet done: How accurate are the temperature and voltage
41 * event values sane enough for X11? How accurate are the temperature 43 * readings? (System-specific calibration should support
42 * and voltage readings? (System-specific calibration should support
43 * accuracy of 0.3 degrees C; otherwise it's 2.0 degrees.) 44 * accuracy of 0.3 degrees C; otherwise it's 2.0 degrees.)
44 * 45 *
46 * IRQ handling needs a workaround because of a shortcoming in handling
47 * edge triggered IRQs on some platforms like the OMAP1/2. These
48 * platforms don't handle the ARM lazy IRQ disabling properly, thus we
49 * have to maintain our own SW IRQ disabled status. This should be
50 * removed as soon as the affected platform's IRQ handling is fixed.
51 *
45 * app note sbaa036 talks in more detail about accurate sampling... 52 * app note sbaa036 talks in more detail about accurate sampling...
46 * that ought to help in situations like LCDs inducing noise (which 53 * that ought to help in situations like LCDs inducing noise (which
47 * can also be helped by using synch signals) and more generally. 54 * can also be helped by using synch signals) and more generally.
55 * This driver tries to utilize the measures described in the app
56 * note. The strength of filtering can be set in the board-* specific
57 * files.
48 */ 58 */
49 59
50#define TS_POLL_PERIOD msecs_to_jiffies(10) 60#define TS_POLL_PERIOD msecs_to_jiffies(10)
@@ -61,6 +71,7 @@ struct ts_event {
61 __be16 x; 71 __be16 x;
62 __be16 y; 72 __be16 y;
63 __be16 z1, z2; 73 __be16 z1, z2;
74 int ignore;
64}; 75};
65 76
66struct ads7846 { 77struct ads7846 {
@@ -71,12 +82,23 @@ struct ads7846 {
71 u16 model; 82 u16 model;
72 u16 vref_delay_usecs; 83 u16 vref_delay_usecs;
73 u16 x_plate_ohms; 84 u16 x_plate_ohms;
85 u16 pressure_max;
74 86
75 u8 read_x, read_y, read_z1, read_z2; 87 u8 read_x, read_y, read_z1, read_z2, pwrdown;
88 u16 dummy; /* for the pwrdown read */
76 struct ts_event tc; 89 struct ts_event tc;
77 90
78 struct spi_transfer xfer[8]; 91 struct spi_transfer xfer[10];
79 struct spi_message msg; 92 struct spi_message msg[5];
93 struct spi_message *last_msg;
94 int msg_idx;
95 int read_cnt;
96 int read_rep;
97 int last_read;
98
99 u16 debounce_max;
100 u16 debounce_tol;
101 u16 debounce_rep;
80 102
81 spinlock_t lock; 103 spinlock_t lock;
82 struct timer_list timer; /* P: lock */ 104 struct timer_list timer; /* P: lock */
@@ -84,6 +106,9 @@ struct ads7846 {
84 unsigned pending:1; /* P: lock */ 106 unsigned pending:1; /* P: lock */
85// FIXME remove "irq_disabled" 107// FIXME remove "irq_disabled"
86 unsigned irq_disabled:1; /* P: lock */ 108 unsigned irq_disabled:1; /* P: lock */
109 unsigned disabled:1;
110
111 int (*get_pendown_state)(void);
87}; 112};
88 113
89/* leave chip selected when we're done, for quicker re-select? */ 114/* leave chip selected when we're done, for quicker re-select? */
@@ -125,7 +150,9 @@ struct ads7846 {
125#define READ_Y (READ_12BIT_DFR(y) | ADS_PD10_ADC_ON) 150#define READ_Y (READ_12BIT_DFR(y) | ADS_PD10_ADC_ON)
126#define READ_Z1 (READ_12BIT_DFR(z1) | ADS_PD10_ADC_ON) 151#define READ_Z1 (READ_12BIT_DFR(z1) | ADS_PD10_ADC_ON)
127#define READ_Z2 (READ_12BIT_DFR(z2) | ADS_PD10_ADC_ON) 152#define READ_Z2 (READ_12BIT_DFR(z2) | ADS_PD10_ADC_ON)
128#define READ_X (READ_12BIT_DFR(x) | ADS_PD10_PDOWN) /* LAST */ 153
154#define READ_X (READ_12BIT_DFR(x) | ADS_PD10_ADC_ON)
155#define PWRDOWN (READ_12BIT_DFR(y) | ADS_PD10_PDOWN) /* LAST */
129 156
130/* single-ended samples need to first power up reference voltage; 157/* single-ended samples need to first power up reference voltage;
131 * we leave both ADC and VREF powered 158 * we leave both ADC and VREF powered
@@ -152,6 +179,15 @@ struct ser_req {
152 struct spi_transfer xfer[6]; 179 struct spi_transfer xfer[6];
153}; 180};
154 181
182static void ads7846_enable(struct ads7846 *ts);
183static void ads7846_disable(struct ads7846 *ts);
184
185static int device_suspended(struct device *dev)
186{
187 struct ads7846 *ts = dev_get_drvdata(dev);
188 return dev->power.power_state.event != PM_EVENT_ON || ts->disabled;
189}
190
155static int ads7846_read12_ser(struct device *dev, unsigned command) 191static int ads7846_read12_ser(struct device *dev, unsigned command)
156{ 192{
157 struct spi_device *spi = to_spi_device(dev); 193 struct spi_device *spi = to_spi_device(dev);
@@ -164,7 +200,7 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
164 if (!req) 200 if (!req)
165 return -ENOMEM; 201 return -ENOMEM;
166 202
167 INIT_LIST_HEAD(&req->msg.transfers); 203 spi_message_init(&req->msg);
168 204
169 /* activate reference, so it has time to settle; */ 205 /* activate reference, so it has time to settle; */
170 req->ref_on = REF_ON; 206 req->ref_on = REF_ON;
@@ -204,8 +240,10 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
204 for (i = 0; i < 6; i++) 240 for (i = 0; i < 6; i++)
205 spi_message_add_tail(&req->xfer[i], &req->msg); 241 spi_message_add_tail(&req->xfer[i], &req->msg);
206 242
243 ts->irq_disabled = 1;
207 disable_irq(spi->irq); 244 disable_irq(spi->irq);
208 status = spi_sync(spi, &req->msg); 245 status = spi_sync(spi, &req->msg);
246 ts->irq_disabled = 0;
209 enable_irq(spi->irq); 247 enable_irq(spi->irq);
210 248
211 if (req->msg.status) 249 if (req->msg.status)
@@ -233,6 +271,52 @@ SHOW(temp1)
233SHOW(vaux) 271SHOW(vaux)
234SHOW(vbatt) 272SHOW(vbatt)
235 273
274static int is_pen_down(struct device *dev)
275{
276 struct ads7846 *ts = dev_get_drvdata(dev);
277
278 return ts->pendown;
279}
280
281static ssize_t ads7846_pen_down_show(struct device *dev,
282 struct device_attribute *attr, char *buf)
283{
284 return sprintf(buf, "%u\n", is_pen_down(dev));
285}
286
287static DEVICE_ATTR(pen_down, S_IRUGO, ads7846_pen_down_show, NULL);
288
289static ssize_t ads7846_disable_show(struct device *dev,
290 struct device_attribute *attr, char *buf)
291{
292 struct ads7846 *ts = dev_get_drvdata(dev);
293
294 return sprintf(buf, "%u\n", ts->disabled);
295}
296
297static ssize_t ads7846_disable_store(struct device *dev,
298 struct device_attribute *attr,
299 const char *buf, size_t count)
300{
301 struct ads7846 *ts = dev_get_drvdata(dev);
302 char *endp;
303 int i;
304
305 i = simple_strtoul(buf, &endp, 10);
306 spin_lock_irq(&ts->lock);
307
308 if (i)
309 ads7846_disable(ts);
310 else
311 ads7846_enable(ts);
312
313 spin_unlock_irq(&ts->lock);
314
315 return count;
316}
317
318static DEVICE_ATTR(disable, 0664, ads7846_disable_show, ads7846_disable_store);
319
236/*--------------------------------------------------------------------------*/ 320/*--------------------------------------------------------------------------*/
237 321
238/* 322/*
@@ -264,7 +348,7 @@ static void ads7846_rx(void *ads)
264 if (x == MAX_12BIT) 348 if (x == MAX_12BIT)
265 x = 0; 349 x = 0;
266 350
267 if (x && z1 && ts->spi->dev.power.power_state.event == PM_EVENT_ON) { 351 if (likely(x && z1 && !device_suspended(&ts->spi->dev))) {
268 /* compute touch pressure resistance using equation #2 */ 352 /* compute touch pressure resistance using equation #2 */
269 Rt = z2; 353 Rt = z2;
270 Rt -= z1; 354 Rt -= z1;
@@ -275,6 +359,14 @@ static void ads7846_rx(void *ads)
275 } else 359 } else
276 Rt = 0; 360 Rt = 0;
277 361
362 /* Sample found inconsistent by debouncing or pressure is beyond
363 * the maximum. Don't report it to user space, repeat at least
364 * once more the measurement */
365 if (ts->tc.ignore || Rt > ts->pressure_max) {
366 mod_timer(&ts->timer, jiffies + TS_POLL_PERIOD);
367 return;
368 }
369
278 /* NOTE: "pendown" is inferred from pressure; we don't rely on 370 /* NOTE: "pendown" is inferred from pressure; we don't rely on
279 * being able to check nPENIRQ status, or "friendly" trigger modes 371 * being able to check nPENIRQ status, or "friendly" trigger modes
280 * (both-edges is much better than just-falling or low-level). 372 * (both-edges is much better than just-falling or low-level).
@@ -296,11 +388,13 @@ static void ads7846_rx(void *ads)
296 if (Rt) { 388 if (Rt) {
297 input_report_abs(input_dev, ABS_X, x); 389 input_report_abs(input_dev, ABS_X, x);
298 input_report_abs(input_dev, ABS_Y, y); 390 input_report_abs(input_dev, ABS_Y, y);
299 input_report_abs(input_dev, ABS_PRESSURE, Rt);
300 sync = 1; 391 sync = 1;
301 } 392 }
302 if (sync) 393
394 if (sync) {
395 input_report_abs(input_dev, ABS_PRESSURE, Rt);
303 input_sync(input_dev); 396 input_sync(input_dev);
397 }
304 398
305#ifdef VERBOSE 399#ifdef VERBOSE
306 if (Rt || ts->pendown) 400 if (Rt || ts->pendown)
@@ -308,80 +402,138 @@ static void ads7846_rx(void *ads)
308 x, y, Rt, Rt ? "" : " UP"); 402 x, y, Rt, Rt ? "" : " UP");
309#endif 403#endif
310 404
311 /* don't retrigger while we're suspended */
312 spin_lock_irqsave(&ts->lock, flags); 405 spin_lock_irqsave(&ts->lock, flags);
313 406
314 ts->pendown = (Rt != 0); 407 ts->pendown = (Rt != 0);
315 ts->pending = 0; 408 mod_timer(&ts->timer, jiffies + TS_POLL_PERIOD);
316 409
317 if (ts->spi->dev.power.power_state.event == PM_EVENT_ON) { 410 spin_unlock_irqrestore(&ts->lock, flags);
318 if (ts->pendown) 411}
319 mod_timer(&ts->timer, jiffies + TS_POLL_PERIOD); 412
320 else if (ts->irq_disabled) { 413static void ads7846_debounce(void *ads)
321 ts->irq_disabled = 0; 414{
322 enable_irq(ts->spi->irq); 415 struct ads7846 *ts = ads;
416 struct spi_message *m;
417 struct spi_transfer *t;
418 int val;
419 int status;
420
421 m = &ts->msg[ts->msg_idx];
422 t = list_entry(m->transfers.prev, struct spi_transfer, transfer_list);
423 val = (*(u16 *)t->rx_buf) >> 3;
424 if (!ts->read_cnt || (abs(ts->last_read - val) > ts->debounce_tol)) {
425 /* Repeat it, if this was the first read or the read
426 * wasn't consistent enough. */
427 if (ts->read_cnt < ts->debounce_max) {
428 ts->last_read = val;
429 ts->read_cnt++;
430 } else {
431 /* Maximum number of debouncing reached and still
432 * not enough number of consistent readings. Abort
433 * the whole sample, repeat it in the next sampling
434 * period.
435 */
436 ts->tc.ignore = 1;
437 ts->read_cnt = 0;
438 /* Last message will contain ads7846_rx() as the
439 * completion function.
440 */
441 m = ts->last_msg;
323 } 442 }
443 /* Start over collecting consistent readings. */
444 ts->read_rep = 0;
445 } else {
446 if (++ts->read_rep > ts->debounce_rep) {
447 /* Got a good reading for this coordinate,
448 * go for the next one. */
449 ts->tc.ignore = 0;
450 ts->msg_idx++;
451 ts->read_cnt = 0;
452 ts->read_rep = 0;
453 m++;
454 } else
455 /* Read more values that are consistent. */
456 ts->read_cnt++;
324 } 457 }
325 458 status = spi_async(ts->spi, m);
326 spin_unlock_irqrestore(&ts->lock, flags); 459 if (status)
460 dev_err(&ts->spi->dev, "spi_async --> %d\n",
461 status);
327} 462}
328 463
329static void ads7846_timer(unsigned long handle) 464static void ads7846_timer(unsigned long handle)
330{ 465{
331 struct ads7846 *ts = (void *)handle; 466 struct ads7846 *ts = (void *)handle;
332 int status = 0; 467 int status = 0;
333 unsigned long flags; 468
469 spin_lock_irq(&ts->lock);
470
471 if (unlikely(ts->msg_idx && !ts->pendown)) {
472 /* measurment cycle ended */
473 if (!device_suspended(&ts->spi->dev)) {
474 ts->irq_disabled = 0;
475 enable_irq(ts->spi->irq);
476 }
477 ts->pending = 0;
478 ts->msg_idx = 0;
479 } else {
480 /* pen is still down, continue with the measurement */
481 ts->msg_idx = 0;
482 status = spi_async(ts->spi, &ts->msg[0]);
483 if (status)
484 dev_err(&ts->spi->dev, "spi_async --> %d\n", status);
485 }
486
487 spin_unlock_irq(&ts->lock);
488}
489
490static irqreturn_t ads7846_irq(int irq, void *handle, struct pt_regs *regs)
491{
492 struct ads7846 *ts = handle;
493 unsigned long flags;
334 494
335 spin_lock_irqsave(&ts->lock, flags); 495 spin_lock_irqsave(&ts->lock, flags);
336 if (!ts->pending) { 496 if (likely(ts->get_pendown_state())) {
337 ts->pending = 1;
338 if (!ts->irq_disabled) { 497 if (!ts->irq_disabled) {
498 /* REVISIT irq logic for many ARM chips has cloned a
499 * bug wherein disabling an irq in its handler won't
500 * work;(it's disabled lazily, and too late to work.
501 * until all their irq logic is fixed, we must shadow
502 * that state here.
503 */
339 ts->irq_disabled = 1; 504 ts->irq_disabled = 1;
340 disable_irq(ts->spi->irq); 505 disable_irq(ts->spi->irq);
506 ts->pending = 1;
507 mod_timer(&ts->timer, jiffies);
341 } 508 }
342 status = spi_async(ts->spi, &ts->msg);
343 if (status)
344 dev_err(&ts->spi->dev, "spi_async --> %d\n",
345 status);
346 } 509 }
347 spin_unlock_irqrestore(&ts->lock, flags); 510 spin_unlock_irqrestore(&ts->lock, flags);
348}
349 511
350static irqreturn_t ads7846_irq(int irq, void *handle, struct pt_regs *regs)
351{
352 ads7846_timer((unsigned long) handle);
353 return IRQ_HANDLED; 512 return IRQ_HANDLED;
354} 513}
355 514
356/*--------------------------------------------------------------------------*/ 515/*--------------------------------------------------------------------------*/
357 516
358static int 517/* Must be called with ts->lock held */
359ads7846_suspend(struct spi_device *spi, pm_message_t message) 518static void ads7846_disable(struct ads7846 *ts)
360{ 519{
361 struct ads7846 *ts = dev_get_drvdata(&spi->dev); 520 if (ts->disabled)
362 unsigned long flags; 521 return;
363 522
364 spin_lock_irqsave(&ts->lock, flags); 523 ts->disabled = 1;
365
366 spi->dev.power.power_state = message;
367 524
368 /* are we waiting for IRQ, or polling? */ 525 /* are we waiting for IRQ, or polling? */
369 if (!ts->pendown) { 526 if (!ts->pending) {
370 if (!ts->irq_disabled) { 527 ts->irq_disabled = 1;
371 ts->irq_disabled = 1; 528 disable_irq(ts->spi->irq);
372 disable_irq(ts->spi->irq);
373 }
374 } else { 529 } else {
375 /* polling; force a final SPI completion; 530 /* the timer will run at least once more, and
376 * that will clean things up neatly 531 * leave everything in a clean state, IRQ disabled
377 */ 532 */
378 if (!ts->pending) 533 while (ts->pending) {
379 mod_timer(&ts->timer, jiffies); 534 spin_unlock_irq(&ts->lock);
380 535 msleep(1);
381 while (ts->pendown || ts->pending) { 536 spin_lock_irq(&ts->lock);
382 spin_unlock_irqrestore(&ts->lock, flags);
383 udelay(10);
384 spin_lock_irqsave(&ts->lock, flags);
385 } 537 }
386 } 538 }
387 539
@@ -389,17 +541,45 @@ ads7846_suspend(struct spi_device *spi, pm_message_t message)
389 * leave it that way after every request 541 * leave it that way after every request
390 */ 542 */
391 543
392 spin_unlock_irqrestore(&ts->lock, flags); 544}
545
546/* Must be called with ts->lock held */
547static void ads7846_enable(struct ads7846 *ts)
548{
549 if (!ts->disabled)
550 return;
551
552 ts->disabled = 0;
553 ts->irq_disabled = 0;
554 enable_irq(ts->spi->irq);
555}
556
557static int ads7846_suspend(struct spi_device *spi, pm_message_t message)
558{
559 struct ads7846 *ts = dev_get_drvdata(&spi->dev);
560
561 spin_lock_irq(&ts->lock);
562
563 spi->dev.power.power_state = message;
564 ads7846_disable(ts);
565
566 spin_unlock_irq(&ts->lock);
567
393 return 0; 568 return 0;
569
394} 570}
395 571
396static int ads7846_resume(struct spi_device *spi) 572static int ads7846_resume(struct spi_device *spi)
397{ 573{
398 struct ads7846 *ts = dev_get_drvdata(&spi->dev); 574 struct ads7846 *ts = dev_get_drvdata(&spi->dev);
399 575
400 ts->irq_disabled = 0; 576 spin_lock_irq(&ts->lock);
401 enable_irq(ts->spi->irq); 577
402 spi->dev.power.power_state = PMSG_ON; 578 spi->dev.power.power_state = PMSG_ON;
579 ads7846_enable(ts);
580
581 spin_unlock_irq(&ts->lock);
582
403 return 0; 583 return 0;
404} 584}
405 585
@@ -408,6 +588,7 @@ static int __devinit ads7846_probe(struct spi_device *spi)
408 struct ads7846 *ts; 588 struct ads7846 *ts;
409 struct input_dev *input_dev; 589 struct input_dev *input_dev;
410 struct ads7846_platform_data *pdata = spi->dev.platform_data; 590 struct ads7846_platform_data *pdata = spi->dev.platform_data;
591 struct spi_message *m;
411 struct spi_transfer *x; 592 struct spi_transfer *x;
412 int err; 593 int err;
413 594
@@ -428,6 +609,11 @@ static int __devinit ads7846_probe(struct spi_device *spi)
428 return -EINVAL; 609 return -EINVAL;
429 } 610 }
430 611
612 if (pdata->get_pendown_state == NULL) {
613 dev_dbg(&spi->dev, "no get_pendown_state function?\n");
614 return -EINVAL;
615 }
616
431 /* We'd set the wordsize to 12 bits ... except that some controllers 617 /* We'd set the wordsize to 12 bits ... except that some controllers
432 * will then treat the 8 bit command words as 12 bits (and drop the 618 * will then treat the 8 bit command words as 12 bits (and drop the
433 * four MSBs of the 12 bit result). Result: inputs must be shifted 619 * four MSBs of the 12 bit result). Result: inputs must be shifted
@@ -451,9 +637,21 @@ static int __devinit ads7846_probe(struct spi_device *spi)
451 ts->timer.data = (unsigned long) ts; 637 ts->timer.data = (unsigned long) ts;
452 ts->timer.function = ads7846_timer; 638 ts->timer.function = ads7846_timer;
453 639
640 spin_lock_init(&ts->lock);
641
454 ts->model = pdata->model ? : 7846; 642 ts->model = pdata->model ? : 7846;
455 ts->vref_delay_usecs = pdata->vref_delay_usecs ? : 100; 643 ts->vref_delay_usecs = pdata->vref_delay_usecs ? : 100;
456 ts->x_plate_ohms = pdata->x_plate_ohms ? : 400; 644 ts->x_plate_ohms = pdata->x_plate_ohms ? : 400;
645 ts->pressure_max = pdata->pressure_max ? : ~0;
646 if (pdata->debounce_max) {
647 ts->debounce_max = pdata->debounce_max;
648 ts->debounce_tol = pdata->debounce_tol;
649 ts->debounce_rep = pdata->debounce_rep;
650 if (ts->debounce_rep > ts->debounce_max + 1)
651 ts->debounce_rep = ts->debounce_max - 1;
652 } else
653 ts->debounce_tol = ~0;
654 ts->get_pendown_state = pdata->get_pendown_state;
457 655
458 snprintf(ts->phys, sizeof(ts->phys), "%s/input0", spi->dev.bus_id); 656 snprintf(ts->phys, sizeof(ts->phys), "%s/input0", spi->dev.bus_id);
459 657
@@ -477,60 +675,100 @@ static int __devinit ads7846_probe(struct spi_device *spi)
477 /* set up the transfers to read touchscreen state; this assumes we 675 /* set up the transfers to read touchscreen state; this assumes we
478 * use formula #2 for pressure, not #3. 676 * use formula #2 for pressure, not #3.
479 */ 677 */
480 INIT_LIST_HEAD(&ts->msg.transfers); 678 m = &ts->msg[0];
481 x = ts->xfer; 679 x = ts->xfer;
482 680
681 spi_message_init(m);
682
483 /* y- still on; turn on only y+ (and ADC) */ 683 /* y- still on; turn on only y+ (and ADC) */
484 ts->read_y = READ_Y; 684 ts->read_y = READ_Y;
485 x->tx_buf = &ts->read_y; 685 x->tx_buf = &ts->read_y;
486 x->len = 1; 686 x->len = 1;
487 spi_message_add_tail(x, &ts->msg); 687 spi_message_add_tail(x, m);
488 688
489 x++; 689 x++;
490 x->rx_buf = &ts->tc.y; 690 x->rx_buf = &ts->tc.y;
491 x->len = 2; 691 x->len = 2;
492 spi_message_add_tail(x, &ts->msg); 692 spi_message_add_tail(x, m);
693
694 m->complete = ads7846_debounce;
695 m->context = ts;
696
697 m++;
698 spi_message_init(m);
699
700 /* turn y- off, x+ on, then leave in lowpower */
701 x++;
702 ts->read_x = READ_X;
703 x->tx_buf = &ts->read_x;
704 x->len = 1;
705 spi_message_add_tail(x, m);
706
707 x++;
708 x->rx_buf = &ts->tc.x;
709 x->len = 2;
710 spi_message_add_tail(x, m);
711
712 m->complete = ads7846_debounce;
713 m->context = ts;
493 714
494 /* turn y+ off, x- on; we'll use formula #2 */ 715 /* turn y+ off, x- on; we'll use formula #2 */
495 if (ts->model == 7846) { 716 if (ts->model == 7846) {
717 m++;
718 spi_message_init(m);
719
496 x++; 720 x++;
497 ts->read_z1 = READ_Z1; 721 ts->read_z1 = READ_Z1;
498 x->tx_buf = &ts->read_z1; 722 x->tx_buf = &ts->read_z1;
499 x->len = 1; 723 x->len = 1;
500 spi_message_add_tail(x, &ts->msg); 724 spi_message_add_tail(x, m);
501 725
502 x++; 726 x++;
503 x->rx_buf = &ts->tc.z1; 727 x->rx_buf = &ts->tc.z1;
504 x->len = 2; 728 x->len = 2;
505 spi_message_add_tail(x, &ts->msg); 729 spi_message_add_tail(x, m);
730
731 m->complete = ads7846_debounce;
732 m->context = ts;
733
734 m++;
735 spi_message_init(m);
506 736
507 x++; 737 x++;
508 ts->read_z2 = READ_Z2; 738 ts->read_z2 = READ_Z2;
509 x->tx_buf = &ts->read_z2; 739 x->tx_buf = &ts->read_z2;
510 x->len = 1; 740 x->len = 1;
511 spi_message_add_tail(x, &ts->msg); 741 spi_message_add_tail(x, m);
512 742
513 x++; 743 x++;
514 x->rx_buf = &ts->tc.z2; 744 x->rx_buf = &ts->tc.z2;
515 x->len = 2; 745 x->len = 2;
516 spi_message_add_tail(x, &ts->msg); 746 spi_message_add_tail(x, m);
747
748 m->complete = ads7846_debounce;
749 m->context = ts;
517 } 750 }
518 751
519 /* turn y- off, x+ on, then leave in lowpower */ 752 /* power down */
753 m++;
754 spi_message_init(m);
755
520 x++; 756 x++;
521 ts->read_x = READ_X; 757 ts->pwrdown = PWRDOWN;
522 x->tx_buf = &ts->read_x; 758 x->tx_buf = &ts->pwrdown;
523 x->len = 1; 759 x->len = 1;
524 spi_message_add_tail(x, &ts->msg); 760 spi_message_add_tail(x, m);
525 761
526 x++; 762 x++;
527 x->rx_buf = &ts->tc.x; 763 x->rx_buf = &ts->dummy;
528 x->len = 2; 764 x->len = 2;
529 CS_CHANGE(*x); 765 CS_CHANGE(*x);
530 spi_message_add_tail(x, &ts->msg); 766 spi_message_add_tail(x, m);
531 767
532 ts->msg.complete = ads7846_rx; 768 m->complete = ads7846_rx;
533 ts->msg.context = ts; 769 m->context = ts;
770
771 ts->last_msg = m;
534 772
535 if (request_irq(spi->irq, ads7846_irq, 773 if (request_irq(spi->irq, ads7846_irq,
536 SA_SAMPLE_RANDOM | SA_TRIGGER_FALLING, 774 SA_SAMPLE_RANDOM | SA_TRIGGER_FALLING,
@@ -559,13 +797,27 @@ static int __devinit ads7846_probe(struct spi_device *spi)
559 device_create_file(&spi->dev, &dev_attr_vbatt); 797 device_create_file(&spi->dev, &dev_attr_vbatt);
560 device_create_file(&spi->dev, &dev_attr_vaux); 798 device_create_file(&spi->dev, &dev_attr_vaux);
561 799
800 device_create_file(&spi->dev, &dev_attr_pen_down);
801
802 device_create_file(&spi->dev, &dev_attr_disable);
803
562 err = input_register_device(input_dev); 804 err = input_register_device(input_dev);
563 if (err) 805 if (err)
564 goto err_free_irq; 806 goto err_remove_attr;
565 807
566 return 0; 808 return 0;
567 809
568 err_free_irq: 810 err_remove_attr:
811 device_remove_file(&spi->dev, &dev_attr_disable);
812 device_remove_file(&spi->dev, &dev_attr_pen_down);
813 if (ts->model == 7846) {
814 device_remove_file(&spi->dev, &dev_attr_temp1);
815 device_remove_file(&spi->dev, &dev_attr_temp0);
816 }
817 if (ts->model != 7845)
818 device_remove_file(&spi->dev, &dev_attr_vbatt);
819 device_remove_file(&spi->dev, &dev_attr_vaux);
820
569 free_irq(spi->irq, ts); 821 free_irq(spi->irq, ts);
570 err_free_mem: 822 err_free_mem:
571 input_free_device(input_dev); 823 input_free_device(input_dev);
@@ -577,20 +829,24 @@ static int __devexit ads7846_remove(struct spi_device *spi)
577{ 829{
578 struct ads7846 *ts = dev_get_drvdata(&spi->dev); 830 struct ads7846 *ts = dev_get_drvdata(&spi->dev);
579 831
832 input_unregister_device(ts->input);
833
580 ads7846_suspend(spi, PMSG_SUSPEND); 834 ads7846_suspend(spi, PMSG_SUSPEND);
581 free_irq(ts->spi->irq, ts);
582 if (ts->irq_disabled)
583 enable_irq(ts->spi->irq);
584 835
836 device_remove_file(&spi->dev, &dev_attr_disable);
837 device_remove_file(&spi->dev, &dev_attr_pen_down);
585 if (ts->model == 7846) { 838 if (ts->model == 7846) {
586 device_remove_file(&spi->dev, &dev_attr_temp0);
587 device_remove_file(&spi->dev, &dev_attr_temp1); 839 device_remove_file(&spi->dev, &dev_attr_temp1);
840 device_remove_file(&spi->dev, &dev_attr_temp0);
588 } 841 }
589 if (ts->model != 7845) 842 if (ts->model != 7845)
590 device_remove_file(&spi->dev, &dev_attr_vbatt); 843 device_remove_file(&spi->dev, &dev_attr_vbatt);
591 device_remove_file(&spi->dev, &dev_attr_vaux); 844 device_remove_file(&spi->dev, &dev_attr_vaux);
592 845
593 input_unregister_device(ts->input); 846 free_irq(ts->spi->irq, ts);
847 /* suspend left the IRQ disabled */
848 enable_irq(ts->spi->irq);
849
594 kfree(ts); 850 kfree(ts);
595 851
596 dev_dbg(&spi->dev, "unregistered touchscreen\n"); 852 dev_dbg(&spi->dev, "unregistered touchscreen\n");
diff --git a/drivers/input/touchscreen/corgi_ts.c b/drivers/input/touchscreen/corgi_ts.c
index 1042987856f7..5013703db0e6 100644
--- a/drivers/input/touchscreen/corgi_ts.c
+++ b/drivers/input/touchscreen/corgi_ts.c
@@ -17,7 +17,7 @@
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <asm/irq.h> 20//#include <asm/irq.h>
21 21
22#include <asm/arch/sharpsl.h> 22#include <asm/arch/sharpsl.h>
23#include <asm/arch/hardware.h> 23#include <asm/arch/hardware.h>
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 6081941de1b3..4070eff6f0f8 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -315,10 +315,11 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
315 if (r1_bio->bios[mirror] == bio) 315 if (r1_bio->bios[mirror] == bio)
316 break; 316 break;
317 317
318 if (error == -ENOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) { 318 if (error == -EOPNOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) {
319 set_bit(BarriersNotsupp, &conf->mirrors[mirror].rdev->flags); 319 set_bit(BarriersNotsupp, &conf->mirrors[mirror].rdev->flags);
320 set_bit(R1BIO_BarrierRetry, &r1_bio->state); 320 set_bit(R1BIO_BarrierRetry, &r1_bio->state);
321 r1_bio->mddev->barriers_work = 0; 321 r1_bio->mddev->barriers_work = 0;
322 /* Don't rdev_dec_pending in this branch - keep it for the retry */
322 } else { 323 } else {
323 /* 324 /*
324 * this branch is our 'one mirror IO has finished' event handler: 325 * this branch is our 'one mirror IO has finished' event handler:
@@ -365,6 +366,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
365 } 366 }
366 } 367 }
367 } 368 }
369 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
368 } 370 }
369 /* 371 /*
370 * 372 *
@@ -374,11 +376,9 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
374 if (atomic_dec_and_test(&r1_bio->remaining)) { 376 if (atomic_dec_and_test(&r1_bio->remaining)) {
375 if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) { 377 if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
376 reschedule_retry(r1_bio); 378 reschedule_retry(r1_bio);
377 /* Don't dec_pending yet, we want to hold
378 * the reference over the retry
379 */
380 goto out; 379 goto out;
381 } 380 }
381 /* it really is the end of this request */
382 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { 382 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
383 /* free extra copy of the data pages */ 383 /* free extra copy of the data pages */
384 int i = bio->bi_vcnt; 384 int i = bio->bi_vcnt;
@@ -393,8 +393,6 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
393 md_write_end(r1_bio->mddev); 393 md_write_end(r1_bio->mddev);
394 raid_end_bio_io(r1_bio); 394 raid_end_bio_io(r1_bio);
395 } 395 }
396
397 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
398 out: 396 out:
399 if (to_put) 397 if (to_put)
400 bio_put(to_put); 398 bio_put(to_put);
@@ -753,18 +751,24 @@ static int make_request(request_queue_t *q, struct bio * bio)
753 const int rw = bio_data_dir(bio); 751 const int rw = bio_data_dir(bio);
754 int do_barriers; 752 int do_barriers;
755 753
756 if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
757 bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
758 return 0;
759 }
760
761 /* 754 /*
762 * Register the new request and wait if the reconstruction 755 * Register the new request and wait if the reconstruction
763 * thread has put up a bar for new requests. 756 * thread has put up a bar for new requests.
764 * Continue immediately if no resync is active currently. 757 * Continue immediately if no resync is active currently.
758 * We test barriers_work *after* md_write_start as md_write_start
759 * may cause the first superblock write, and that will check out
760 * if barriers work.
765 */ 761 */
762
766 md_write_start(mddev, bio); /* wait on superblock update early */ 763 md_write_start(mddev, bio); /* wait on superblock update early */
767 764
765 if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
766 if (rw == WRITE)
767 md_write_end(mddev);
768 bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
769 return 0;
770 }
771
768 wait_barrier(conf); 772 wait_barrier(conf);
769 773
770 disk_stat_inc(mddev->gendisk, ios[rw]); 774 disk_stat_inc(mddev->gendisk, ios[rw]);
@@ -1404,10 +1408,11 @@ static void raid1d(mddev_t *mddev)
1404 unplug = 1; 1408 unplug = 1;
1405 } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) { 1409 } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
1406 /* some requests in the r1bio were BIO_RW_BARRIER 1410 /* some requests in the r1bio were BIO_RW_BARRIER
1407 * requests which failed with -ENOTSUPP. Hohumm.. 1411 * requests which failed with -EOPNOTSUPP. Hohumm..
1408 * Better resubmit without the barrier. 1412 * Better resubmit without the barrier.
1409 * We know which devices to resubmit for, because 1413 * We know which devices to resubmit for, because
1410 * all others have had their bios[] entry cleared. 1414 * all others have had their bios[] entry cleared.
1415 * We already have a nr_pending reference on these rdevs.
1411 */ 1416 */
1412 int i; 1417 int i;
1413 clear_bit(R1BIO_BarrierRetry, &r1_bio->state); 1418 clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 617012bc107a..1440935414e6 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1407,43 +1407,54 @@ static void raid10d(mddev_t *mddev)
1407 if (s > (PAGE_SIZE>>9)) 1407 if (s > (PAGE_SIZE>>9))
1408 s = PAGE_SIZE >> 9; 1408 s = PAGE_SIZE >> 9;
1409 1409
1410 rcu_read_lock();
1410 do { 1411 do {
1411 int d = r10_bio->devs[sl].devnum; 1412 int d = r10_bio->devs[sl].devnum;
1412 rdev = conf->mirrors[d].rdev; 1413 rdev = rcu_dereference(conf->mirrors[d].rdev);
1413 if (rdev && 1414 if (rdev &&
1414 test_bit(In_sync, &rdev->flags) && 1415 test_bit(In_sync, &rdev->flags)) {
1415 sync_page_io(rdev->bdev, 1416 atomic_inc(&rdev->nr_pending);
1416 r10_bio->devs[sl].addr + 1417 rcu_read_unlock();
1417 sect + rdev->data_offset, 1418 success = sync_page_io(rdev->bdev,
1418 s<<9, 1419 r10_bio->devs[sl].addr +
1419 conf->tmppage, READ)) 1420 sect + rdev->data_offset,
1420 success = 1; 1421 s<<9,
1421 else { 1422 conf->tmppage, READ);
1422 sl++; 1423 rdev_dec_pending(rdev, mddev);
1423 if (sl == conf->copies) 1424 rcu_read_lock();
1424 sl = 0; 1425 if (success)
1426 break;
1425 } 1427 }
1428 sl++;
1429 if (sl == conf->copies)
1430 sl = 0;
1426 } while (!success && sl != r10_bio->read_slot); 1431 } while (!success && sl != r10_bio->read_slot);
1432 rcu_read_unlock();
1427 1433
1428 if (success) { 1434 if (success) {
1429 int start = sl; 1435 int start = sl;
1430 /* write it back and re-read */ 1436 /* write it back and re-read */
1437 rcu_read_lock();
1431 while (sl != r10_bio->read_slot) { 1438 while (sl != r10_bio->read_slot) {
1432 int d; 1439 int d;
1433 if (sl==0) 1440 if (sl==0)
1434 sl = conf->copies; 1441 sl = conf->copies;
1435 sl--; 1442 sl--;
1436 d = r10_bio->devs[sl].devnum; 1443 d = r10_bio->devs[sl].devnum;
1437 rdev = conf->mirrors[d].rdev; 1444 rdev = rcu_dereference(conf->mirrors[d].rdev);
1438 atomic_add(s, &rdev->corrected_errors);
1439 if (rdev && 1445 if (rdev &&
1440 test_bit(In_sync, &rdev->flags)) { 1446 test_bit(In_sync, &rdev->flags)) {
1447 atomic_inc(&rdev->nr_pending);
1448 rcu_read_unlock();
1449 atomic_add(s, &rdev->corrected_errors);
1441 if (sync_page_io(rdev->bdev, 1450 if (sync_page_io(rdev->bdev,
1442 r10_bio->devs[sl].addr + 1451 r10_bio->devs[sl].addr +
1443 sect + rdev->data_offset, 1452 sect + rdev->data_offset,
1444 s<<9, conf->tmppage, WRITE) == 0) 1453 s<<9, conf->tmppage, WRITE) == 0)
1445 /* Well, this device is dead */ 1454 /* Well, this device is dead */
1446 md_error(mddev, rdev); 1455 md_error(mddev, rdev);
1456 rdev_dec_pending(rdev, mddev);
1457 rcu_read_lock();
1447 } 1458 }
1448 } 1459 }
1449 sl = start; 1460 sl = start;
@@ -1453,17 +1464,22 @@ static void raid10d(mddev_t *mddev)
1453 sl = conf->copies; 1464 sl = conf->copies;
1454 sl--; 1465 sl--;
1455 d = r10_bio->devs[sl].devnum; 1466 d = r10_bio->devs[sl].devnum;
1456 rdev = conf->mirrors[d].rdev; 1467 rdev = rcu_dereference(conf->mirrors[d].rdev);
1457 if (rdev && 1468 if (rdev &&
1458 test_bit(In_sync, &rdev->flags)) { 1469 test_bit(In_sync, &rdev->flags)) {
1470 atomic_inc(&rdev->nr_pending);
1471 rcu_read_unlock();
1459 if (sync_page_io(rdev->bdev, 1472 if (sync_page_io(rdev->bdev,
1460 r10_bio->devs[sl].addr + 1473 r10_bio->devs[sl].addr +
1461 sect + rdev->data_offset, 1474 sect + rdev->data_offset,
1462 s<<9, conf->tmppage, READ) == 0) 1475 s<<9, conf->tmppage, READ) == 0)
1463 /* Well, this device is dead */ 1476 /* Well, this device is dead */
1464 md_error(mddev, rdev); 1477 md_error(mddev, rdev);
1478 rdev_dec_pending(rdev, mddev);
1479 rcu_read_lock();
1465 } 1480 }
1466 } 1481 }
1482 rcu_read_unlock();
1467 } else { 1483 } else {
1468 /* Cannot read from anywhere -- bye bye array */ 1484 /* Cannot read from anywhere -- bye bye array */
1469 md_error(mddev, conf->mirrors[r10_bio->devs[r10_bio->read_slot].devnum].rdev); 1485 md_error(mddev, conf->mirrors[r10_bio->devs[r10_bio->read_slot].devnum].rdev);
diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
index f9d87b86492c..320b3d9384ba 100644
--- a/drivers/media/video/cx88/cx88-alsa.c
+++ b/drivers/media/video/cx88/cx88-alsa.c
@@ -616,7 +616,7 @@ static struct snd_kcontrol_new snd_cx88_capture_volume = {
616 * Only boards with eeprom and byte 1 at eeprom=1 have it 616 * Only boards with eeprom and byte 1 at eeprom=1 have it
617 */ 617 */
618 618
619static struct pci_device_id cx88_audio_pci_tbl[] = { 619static struct pci_device_id cx88_audio_pci_tbl[] __devinitdata = {
620 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0}, 620 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
621 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0}, 621 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
622 {0, } 622 {0, }
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 266414ca2814..9080853fe283 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1189,7 +1189,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1189 ioc->diagPending = 0; 1189 ioc->diagPending = 0;
1190 spin_lock_init(&ioc->diagLock); 1190 spin_lock_init(&ioc->diagLock);
1191 spin_lock_init(&ioc->fc_rescan_work_lock); 1191 spin_lock_init(&ioc->fc_rescan_work_lock);
1192 spin_lock_init(&ioc->fc_rport_lock);
1193 spin_lock_init(&ioc->initializing_hba_lock); 1192 spin_lock_init(&ioc->initializing_hba_lock);
1194 1193
1195 /* Initialize the event logging. 1194 /* Initialize the event logging.
@@ -5736,11 +5735,13 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
5736 return rc; 5735 return rc;
5737} 5736}
5738 5737
5738# define EVENT_DESCR_STR_SZ 100
5739
5739/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5740/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5740static void 5741static void
5741EventDescriptionStr(u8 event, u32 evData0, char *evStr) 5742EventDescriptionStr(u8 event, u32 evData0, char *evStr)
5742{ 5743{
5743 char *ds; 5744 char *ds = NULL;
5744 5745
5745 switch(event) { 5746 switch(event) {
5746 case MPI_EVENT_NONE: 5747 case MPI_EVENT_NONE:
@@ -5777,9 +5778,9 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
5777 if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP) 5778 if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP)
5778 ds = "Loop State(LIP) Change"; 5779 ds = "Loop State(LIP) Change";
5779 else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE) 5780 else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE)
5780 ds = "Loop State(LPE) Change"; /* ??? */ 5781 ds = "Loop State(LPE) Change"; /* ??? */
5781 else 5782 else
5782 ds = "Loop State(LPB) Change"; /* ??? */ 5783 ds = "Loop State(LPB) Change"; /* ??? */
5783 break; 5784 break;
5784 case MPI_EVENT_LOGOUT: 5785 case MPI_EVENT_LOGOUT:
5785 ds = "Logout"; 5786 ds = "Logout";
@@ -5841,27 +5842,32 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
5841 break; 5842 break;
5842 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 5843 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
5843 { 5844 {
5844 char buf[50];
5845 u8 id = (u8)(evData0); 5845 u8 id = (u8)(evData0);
5846 u8 ReasonCode = (u8)(evData0 >> 16); 5846 u8 ReasonCode = (u8)(evData0 >> 16);
5847 switch (ReasonCode) { 5847 switch (ReasonCode) {
5848 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED: 5848 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
5849 sprintf(buf,"SAS Device Status Change: Added: id=%d", id); 5849 snprintf(evStr, EVENT_DESCR_STR_SZ,
5850 "SAS Device Status Change: Added: id=%d", id);
5850 break; 5851 break;
5851 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING: 5852 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
5852 sprintf(buf,"SAS Device Status Change: Deleted: id=%d", id); 5853 snprintf(evStr, EVENT_DESCR_STR_SZ,
5854 "SAS Device Status Change: Deleted: id=%d", id);
5853 break; 5855 break;
5854 case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA: 5856 case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
5855 sprintf(buf,"SAS Device Status Change: SMART Data: id=%d", id); 5857 snprintf(evStr, EVENT_DESCR_STR_SZ,
5858 "SAS Device Status Change: SMART Data: id=%d",
5859 id);
5856 break; 5860 break;
5857 case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED: 5861 case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
5858 sprintf(buf,"SAS Device Status Change: No Persistancy Added: id=%d", id); 5862 snprintf(evStr, EVENT_DESCR_STR_SZ,
5863 "SAS Device Status Change: No Persistancy "
5864 "Added: id=%d", id);
5859 break; 5865 break;
5860 default: 5866 default:
5861 sprintf(buf,"SAS Device Status Change: Unknown: id=%d", id); 5867 snprintf(evStr, EVENT_DESCR_STR_SZ,
5862 break; 5868 "SAS Device Status Change: Unknown: id=%d", id);
5869 break;
5863 } 5870 }
5864 ds = buf;
5865 break; 5871 break;
5866 } 5872 }
5867 case MPI_EVENT_ON_BUS_TIMER_EXPIRED: 5873 case MPI_EVENT_ON_BUS_TIMER_EXPIRED:
@@ -5878,41 +5884,46 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
5878 break; 5884 break;
5879 case MPI_EVENT_SAS_PHY_LINK_STATUS: 5885 case MPI_EVENT_SAS_PHY_LINK_STATUS:
5880 { 5886 {
5881 char buf[50];
5882 u8 LinkRates = (u8)(evData0 >> 8); 5887 u8 LinkRates = (u8)(evData0 >> 8);
5883 u8 PhyNumber = (u8)(evData0); 5888 u8 PhyNumber = (u8)(evData0);
5884 LinkRates = (LinkRates & MPI_EVENT_SAS_PLS_LR_CURRENT_MASK) >> 5889 LinkRates = (LinkRates & MPI_EVENT_SAS_PLS_LR_CURRENT_MASK) >>
5885 MPI_EVENT_SAS_PLS_LR_CURRENT_SHIFT; 5890 MPI_EVENT_SAS_PLS_LR_CURRENT_SHIFT;
5886 switch (LinkRates) { 5891 switch (LinkRates) {
5887 case MPI_EVENT_SAS_PLS_LR_RATE_UNKNOWN: 5892 case MPI_EVENT_SAS_PLS_LR_RATE_UNKNOWN:
5888 sprintf(buf,"SAS PHY Link Status: Phy=%d:" 5893 snprintf(evStr, EVENT_DESCR_STR_SZ,
5894 "SAS PHY Link Status: Phy=%d:"
5889 " Rate Unknown",PhyNumber); 5895 " Rate Unknown",PhyNumber);
5890 break; 5896 break;
5891 case MPI_EVENT_SAS_PLS_LR_RATE_PHY_DISABLED: 5897 case MPI_EVENT_SAS_PLS_LR_RATE_PHY_DISABLED:
5892 sprintf(buf,"SAS PHY Link Status: Phy=%d:" 5898 snprintf(evStr, EVENT_DESCR_STR_SZ,
5899 "SAS PHY Link Status: Phy=%d:"
5893 " Phy Disabled",PhyNumber); 5900 " Phy Disabled",PhyNumber);
5894 break; 5901 break;
5895 case MPI_EVENT_SAS_PLS_LR_RATE_FAILED_SPEED_NEGOTIATION: 5902 case MPI_EVENT_SAS_PLS_LR_RATE_FAILED_SPEED_NEGOTIATION:
5896 sprintf(buf,"SAS PHY Link Status: Phy=%d:" 5903 snprintf(evStr, EVENT_DESCR_STR_SZ,
5904 "SAS PHY Link Status: Phy=%d:"
5897 " Failed Speed Nego",PhyNumber); 5905 " Failed Speed Nego",PhyNumber);
5898 break; 5906 break;
5899 case MPI_EVENT_SAS_PLS_LR_RATE_SATA_OOB_COMPLETE: 5907 case MPI_EVENT_SAS_PLS_LR_RATE_SATA_OOB_COMPLETE:
5900 sprintf(buf,"SAS PHY Link Status: Phy=%d:" 5908 snprintf(evStr, EVENT_DESCR_STR_SZ,
5909 "SAS PHY Link Status: Phy=%d:"
5901 " Sata OOB Completed",PhyNumber); 5910 " Sata OOB Completed",PhyNumber);
5902 break; 5911 break;
5903 case MPI_EVENT_SAS_PLS_LR_RATE_1_5: 5912 case MPI_EVENT_SAS_PLS_LR_RATE_1_5:
5904 sprintf(buf,"SAS PHY Link Status: Phy=%d:" 5913 snprintf(evStr, EVENT_DESCR_STR_SZ,
5914 "SAS PHY Link Status: Phy=%d:"
5905 " Rate 1.5 Gbps",PhyNumber); 5915 " Rate 1.5 Gbps",PhyNumber);
5906 break; 5916 break;
5907 case MPI_EVENT_SAS_PLS_LR_RATE_3_0: 5917 case MPI_EVENT_SAS_PLS_LR_RATE_3_0:
5908 sprintf(buf,"SAS PHY Link Status: Phy=%d:" 5918 snprintf(evStr, EVENT_DESCR_STR_SZ,
5919 "SAS PHY Link Status: Phy=%d:"
5909 " Rate 3.0 Gpbs",PhyNumber); 5920 " Rate 3.0 Gpbs",PhyNumber);
5910 break; 5921 break;
5911 default: 5922 default:
5912 sprintf(buf,"SAS PHY Link Status: Phy=%d", PhyNumber); 5923 snprintf(evStr, EVENT_DESCR_STR_SZ,
5924 "SAS PHY Link Status: Phy=%d", PhyNumber);
5913 break; 5925 break;
5914 } 5926 }
5915 ds = buf;
5916 break; 5927 break;
5917 } 5928 }
5918 case MPI_EVENT_SAS_DISCOVERY_ERROR: 5929 case MPI_EVENT_SAS_DISCOVERY_ERROR:
@@ -5921,9 +5932,8 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
5921 case MPI_EVENT_IR_RESYNC_UPDATE: 5932 case MPI_EVENT_IR_RESYNC_UPDATE:
5922 { 5933 {
5923 u8 resync_complete = (u8)(evData0 >> 16); 5934 u8 resync_complete = (u8)(evData0 >> 16);
5924 char buf[40]; 5935 snprintf(evStr, EVENT_DESCR_STR_SZ,
5925 sprintf(buf,"IR Resync Update: Complete = %d:",resync_complete); 5936 "IR Resync Update: Complete = %d:",resync_complete);
5926 ds = buf;
5927 break; 5937 break;
5928 } 5938 }
5929 case MPI_EVENT_IR2: 5939 case MPI_EVENT_IR2:
@@ -5976,7 +5986,8 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
5976 ds = "Unknown"; 5986 ds = "Unknown";
5977 break; 5987 break;
5978 } 5988 }
5979 strcpy(evStr,ds); 5989 if (ds)
5990 strncpy(evStr, ds, EVENT_DESCR_STR_SZ);
5980} 5991}
5981 5992
5982/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5993/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5998,7 +6009,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
5998 int ii; 6009 int ii;
5999 int r = 0; 6010 int r = 0;
6000 int handlers = 0; 6011 int handlers = 0;
6001 char evStr[100]; 6012 char evStr[EVENT_DESCR_STR_SZ];
6002 u8 event; 6013 u8 event;
6003 6014
6004 /* 6015 /*
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index be7e8501b53c..f673cca507e1 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
76#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR 76#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR
77#endif 77#endif
78 78
79#define MPT_LINUX_VERSION_COMMON "3.03.08" 79#define MPT_LINUX_VERSION_COMMON "3.03.09"
80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.08" 80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.09"
81#define WHAT_MAGIC_STRING "@" "(" "#" ")" 81#define WHAT_MAGIC_STRING "@" "(" "#" ")"
82 82
83#define show_mptmod_ver(s,ver) \ 83#define show_mptmod_ver(s,ver) \
@@ -489,7 +489,6 @@ typedef struct _RaidCfgData {
489 489
490#define MPT_RPORT_INFO_FLAGS_REGISTERED 0x01 /* rport registered */ 490#define MPT_RPORT_INFO_FLAGS_REGISTERED 0x01 /* rport registered */
491#define MPT_RPORT_INFO_FLAGS_MISSING 0x02 /* missing from DevPage0 scan */ 491#define MPT_RPORT_INFO_FLAGS_MISSING 0x02 /* missing from DevPage0 scan */
492#define MPT_RPORT_INFO_FLAGS_MAPPED_VDEV 0x04 /* target mapped in vdev */
493 492
494/* 493/*
495 * data allocated for each fc rport device 494 * data allocated for each fc rport device
@@ -501,7 +500,6 @@ struct mptfc_rport_info
501 struct scsi_target *starget; 500 struct scsi_target *starget;
502 FCDevicePage0_t pg0; 501 FCDevicePage0_t pg0;
503 u8 flags; 502 u8 flags;
504 u8 remap_needed;
505}; 503};
506 504
507/* 505/*
@@ -628,11 +626,11 @@ typedef struct _MPT_ADAPTER
628 struct work_struct mptscsih_persistTask; 626 struct work_struct mptscsih_persistTask;
629 627
630 struct list_head fc_rports; 628 struct list_head fc_rports;
631 spinlock_t fc_rport_lock; /* list and ri flags */
632 spinlock_t fc_rescan_work_lock; 629 spinlock_t fc_rescan_work_lock;
633 int fc_rescan_work_count; 630 int fc_rescan_work_count;
634 struct work_struct fc_rescan_work; 631 struct work_struct fc_rescan_work;
635 632 char fc_rescan_work_q_name[KOBJ_NAME_LEN];
633 struct workqueue_struct *fc_rescan_work_q;
636} MPT_ADAPTER; 634} MPT_ADAPTER;
637 635
638/* 636/*
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index b343f2a68b1c..856487741ef4 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -341,9 +341,6 @@ mptfc_generate_rport_ids(FCDevicePage0_t *pg0, struct fc_rport_identifiers *rid)
341 rid->port_name = ((u64)pg0->WWPN.High) << 32 | (u64)pg0->WWPN.Low; 341 rid->port_name = ((u64)pg0->WWPN.High) << 32 | (u64)pg0->WWPN.Low;
342 rid->port_id = pg0->PortIdentifier; 342 rid->port_id = pg0->PortIdentifier;
343 rid->roles = FC_RPORT_ROLE_UNKNOWN; 343 rid->roles = FC_RPORT_ROLE_UNKNOWN;
344 rid->roles |= FC_RPORT_ROLE_FCP_TARGET;
345 if (pg0->Protocol & MPI_FC_DEVICE_PAGE0_PROT_FCP_INITIATOR)
346 rid->roles |= FC_RPORT_ROLE_FCP_INITIATOR;
347 344
348 return 0; 345 return 0;
349} 346}
@@ -355,15 +352,18 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
355 struct fc_rport *rport; 352 struct fc_rport *rport;
356 struct mptfc_rport_info *ri; 353 struct mptfc_rport_info *ri;
357 int new_ri = 1; 354 int new_ri = 1;
358 u64 pn; 355 u64 pn, nn;
359 unsigned long flags;
360 VirtTarget *vtarget; 356 VirtTarget *vtarget;
357 u32 roles = FC_RPORT_ROLE_UNKNOWN;
361 358
362 if (mptfc_generate_rport_ids(pg0, &rport_ids) < 0) 359 if (mptfc_generate_rport_ids(pg0, &rport_ids) < 0)
363 return; 360 return;
364 361
362 roles |= FC_RPORT_ROLE_FCP_TARGET;
363 if (pg0->Protocol & MPI_FC_DEVICE_PAGE0_PROT_FCP_INITIATOR)
364 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
365
365 /* scan list looking for a match */ 366 /* scan list looking for a match */
366 spin_lock_irqsave(&ioc->fc_rport_lock, flags);
367 list_for_each_entry(ri, &ioc->fc_rports, list) { 367 list_for_each_entry(ri, &ioc->fc_rports, list) {
368 pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low; 368 pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low;
369 if (pn == rport_ids.port_name) { /* match */ 369 if (pn == rport_ids.port_name) { /* match */
@@ -373,11 +373,9 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
373 } 373 }
374 } 374 }
375 if (new_ri) { /* allocate one */ 375 if (new_ri) { /* allocate one */
376 spin_unlock_irqrestore(&ioc->fc_rport_lock, flags);
377 ri = kzalloc(sizeof(struct mptfc_rport_info), GFP_KERNEL); 376 ri = kzalloc(sizeof(struct mptfc_rport_info), GFP_KERNEL);
378 if (!ri) 377 if (!ri)
379 return; 378 return;
380 spin_lock_irqsave(&ioc->fc_rport_lock, flags);
381 list_add_tail(&ri->list, &ioc->fc_rports); 379 list_add_tail(&ri->list, &ioc->fc_rports);
382 } 380 }
383 381
@@ -387,14 +385,11 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
387 /* MPT_RPORT_INFO_FLAGS_REGISTERED - rport not previously deleted */ 385 /* MPT_RPORT_INFO_FLAGS_REGISTERED - rport not previously deleted */
388 if (!(ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED)) { 386 if (!(ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED)) {
389 ri->flags |= MPT_RPORT_INFO_FLAGS_REGISTERED; 387 ri->flags |= MPT_RPORT_INFO_FLAGS_REGISTERED;
390 spin_unlock_irqrestore(&ioc->fc_rport_lock, flags);
391 rport = fc_remote_port_add(ioc->sh, channel, &rport_ids); 388 rport = fc_remote_port_add(ioc->sh, channel, &rport_ids);
392 spin_lock_irqsave(&ioc->fc_rport_lock, flags);
393 if (rport) { 389 if (rport) {
394 ri->rport = rport; 390 ri->rport = rport;
395 if (new_ri) /* may have been reset by user */ 391 if (new_ri) /* may have been reset by user */
396 rport->dev_loss_tmo = mptfc_dev_loss_tmo; 392 rport->dev_loss_tmo = mptfc_dev_loss_tmo;
397 *((struct mptfc_rport_info **)rport->dd_data) = ri;
398 /* 393 /*
399 * if already mapped, remap here. If not mapped, 394 * if already mapped, remap here. If not mapped,
400 * target_alloc will allocate vtarget and map, 395 * target_alloc will allocate vtarget and map,
@@ -406,16 +401,21 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
406 vtarget->target_id = pg0->CurrentTargetID; 401 vtarget->target_id = pg0->CurrentTargetID;
407 vtarget->bus_id = pg0->CurrentBus; 402 vtarget->bus_id = pg0->CurrentBus;
408 } 403 }
409 ri->remap_needed = 0;
410 } 404 }
405 *((struct mptfc_rport_info **)rport->dd_data) = ri;
406 /* scan will be scheduled once rport becomes a target */
407 fc_remote_port_rolechg(rport,roles);
408
409 pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low;
410 nn = (u64)ri->pg0.WWNN.High << 32 | (u64)ri->pg0.WWNN.Low;
411 dfcprintk ((MYIOC_s_INFO_FMT 411 dfcprintk ((MYIOC_s_INFO_FMT
412 "mptfc_reg_dev.%d: %x, %llx / %llx, tid %d, " 412 "mptfc_reg_dev.%d: %x, %llx / %llx, tid %d, "
413 "rport tid %d, tmo %d\n", 413 "rport tid %d, tmo %d\n",
414 ioc->name, 414 ioc->name,
415 ioc->sh->host_no, 415 ioc->sh->host_no,
416 pg0->PortIdentifier, 416 pg0->PortIdentifier,
417 pg0->WWNN, 417 (unsigned long long)nn,
418 pg0->WWPN, 418 (unsigned long long)pn,
419 pg0->CurrentTargetID, 419 pg0->CurrentTargetID,
420 ri->rport->scsi_target_id, 420 ri->rport->scsi_target_id,
421 ri->rport->dev_loss_tmo)); 421 ri->rport->dev_loss_tmo));
@@ -425,8 +425,6 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
425 ri = NULL; 425 ri = NULL;
426 } 426 }
427 } 427 }
428 spin_unlock_irqrestore(&ioc->fc_rport_lock,flags);
429
430} 428}
431 429
432/* 430/*
@@ -476,7 +474,6 @@ mptfc_target_alloc(struct scsi_target *starget)
476 vtarget->target_id = ri->pg0.CurrentTargetID; 474 vtarget->target_id = ri->pg0.CurrentTargetID;
477 vtarget->bus_id = ri->pg0.CurrentBus; 475 vtarget->bus_id = ri->pg0.CurrentBus;
478 ri->starget = starget; 476 ri->starget = starget;
479 ri->remap_needed = 0;
480 rc = 0; 477 rc = 0;
481 } 478 }
482 } 479 }
@@ -502,10 +499,10 @@ mptfc_slave_alloc(struct scsi_device *sdev)
502 VirtDevice *vdev; 499 VirtDevice *vdev;
503 struct scsi_target *starget; 500 struct scsi_target *starget;
504 struct fc_rport *rport; 501 struct fc_rport *rport;
505 unsigned long flags;
506 502
507 503
508 rport = starget_to_rport(scsi_target(sdev)); 504 starget = scsi_target(sdev);
505 rport = starget_to_rport(starget);
509 506
510 if (!rport || fc_remote_port_chkready(rport)) 507 if (!rport || fc_remote_port_chkready(rport))
511 return -ENXIO; 508 return -ENXIO;
@@ -519,10 +516,8 @@ mptfc_slave_alloc(struct scsi_device *sdev)
519 return -ENOMEM; 516 return -ENOMEM;
520 } 517 }
521 518
522 spin_lock_irqsave(&hd->ioc->fc_rport_lock,flags);
523 519
524 sdev->hostdata = vdev; 520 sdev->hostdata = vdev;
525 starget = scsi_target(sdev);
526 vtarget = starget->hostdata; 521 vtarget = starget->hostdata;
527 522
528 if (vtarget->num_luns == 0) { 523 if (vtarget->num_luns == 0) {
@@ -535,14 +530,16 @@ mptfc_slave_alloc(struct scsi_device *sdev)
535 vdev->vtarget = vtarget; 530 vdev->vtarget = vtarget;
536 vdev->lun = sdev->lun; 531 vdev->lun = sdev->lun;
537 532
538 spin_unlock_irqrestore(&hd->ioc->fc_rport_lock,flags);
539
540 vtarget->num_luns++; 533 vtarget->num_luns++;
541 534
535
542#ifdef DMPT_DEBUG_FC 536#ifdef DMPT_DEBUG_FC
543 { 537 {
538 u64 nn, pn;
544 struct mptfc_rport_info *ri; 539 struct mptfc_rport_info *ri;
545 ri = *((struct mptfc_rport_info **)rport->dd_data); 540 ri = *((struct mptfc_rport_info **)rport->dd_data);
541 pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low;
542 nn = (u64)ri->pg0.WWNN.High << 32 | (u64)ri->pg0.WWNN.Low;
546 dfcprintk ((MYIOC_s_INFO_FMT 543 dfcprintk ((MYIOC_s_INFO_FMT
547 "mptfc_slv_alloc.%d: num_luns %d, sdev.id %d, " 544 "mptfc_slv_alloc.%d: num_luns %d, sdev.id %d, "
548 "CurrentTargetID %d, %x %llx %llx\n", 545 "CurrentTargetID %d, %x %llx %llx\n",
@@ -550,7 +547,9 @@ mptfc_slave_alloc(struct scsi_device *sdev)
550 sdev->host->host_no, 547 sdev->host->host_no,
551 vtarget->num_luns, 548 vtarget->num_luns,
552 sdev->id, ri->pg0.CurrentTargetID, 549 sdev->id, ri->pg0.CurrentTargetID,
553 ri->pg0.PortIdentifier, ri->pg0.WWPN, ri->pg0.WWNN)); 550 ri->pg0.PortIdentifier,
551 (unsigned long long)pn,
552 (unsigned long long)nn));
554 } 553 }
555#endif 554#endif
556 555
@@ -570,11 +569,31 @@ mptfc_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
570 done(SCpnt); 569 done(SCpnt);
571 return 0; 570 return 0;
572 } 571 }
572
573 /* dd_data is null until finished adding target */
573 ri = *((struct mptfc_rport_info **)rport->dd_data); 574 ri = *((struct mptfc_rport_info **)rport->dd_data);
574 if (unlikely(ri->remap_needed)) 575 if (unlikely(!ri)) {
575 return SCSI_MLQUEUE_HOST_BUSY; 576 dfcprintk ((MYIOC_s_INFO_FMT
577 "mptfc_qcmd.%d: %d:%d, dd_data is null.\n",
578 ((MPT_SCSI_HOST *) SCpnt->device->host->hostdata)->ioc->name,
579 ((MPT_SCSI_HOST *) SCpnt->device->host->hostdata)->ioc->sh->host_no,
580 SCpnt->device->id,SCpnt->device->lun));
581 SCpnt->result = DID_IMM_RETRY << 16;
582 done(SCpnt);
583 return 0;
584 }
576 585
577 return mptscsih_qcmd(SCpnt,done); 586 err = mptscsih_qcmd(SCpnt,done);
587#ifdef DMPT_DEBUG_FC
588 if (unlikely(err)) {
589 dfcprintk ((MYIOC_s_INFO_FMT
590 "mptfc_qcmd.%d: %d:%d, mptscsih_qcmd returns non-zero.\n",
591 ((MPT_SCSI_HOST *) SCpnt->device->host->hostdata)->ioc->name,
592 ((MPT_SCSI_HOST *) SCpnt->device->host->hostdata)->ioc->sh->host_no,
593 SCpnt->device->id,SCpnt->device->lun));
594 }
595#endif
596 return err;
578} 597}
579 598
580static void 599static void
@@ -615,18 +634,17 @@ mptfc_rescan_devices(void *arg)
615 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; 634 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
616 int ii; 635 int ii;
617 int work_to_do; 636 int work_to_do;
637 u64 pn;
618 unsigned long flags; 638 unsigned long flags;
619 struct mptfc_rport_info *ri; 639 struct mptfc_rport_info *ri;
620 640
621 do { 641 do {
622 /* start by tagging all ports as missing */ 642 /* start by tagging all ports as missing */
623 spin_lock_irqsave(&ioc->fc_rport_lock,flags);
624 list_for_each_entry(ri, &ioc->fc_rports, list) { 643 list_for_each_entry(ri, &ioc->fc_rports, list) {
625 if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) { 644 if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) {
626 ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING; 645 ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING;
627 } 646 }
628 } 647 }
629 spin_unlock_irqrestore(&ioc->fc_rport_lock,flags);
630 648
631 /* 649 /*
632 * now rescan devices known to adapter, 650 * now rescan devices known to adapter,
@@ -639,33 +657,24 @@ mptfc_rescan_devices(void *arg)
639 } 657 }
640 658
641 /* delete devices still missing */ 659 /* delete devices still missing */
642 spin_lock_irqsave(&ioc->fc_rport_lock, flags);
643 list_for_each_entry(ri, &ioc->fc_rports, list) { 660 list_for_each_entry(ri, &ioc->fc_rports, list) {
644 /* if newly missing, delete it */ 661 /* if newly missing, delete it */
645 if ((ri->flags & (MPT_RPORT_INFO_FLAGS_REGISTERED | 662 if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) {
646 MPT_RPORT_INFO_FLAGS_MISSING))
647 == (MPT_RPORT_INFO_FLAGS_REGISTERED |
648 MPT_RPORT_INFO_FLAGS_MISSING)) {
649 663
650 ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED| 664 ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED|
651 MPT_RPORT_INFO_FLAGS_MISSING); 665 MPT_RPORT_INFO_FLAGS_MISSING);
652 ri->remap_needed = 1; 666 fc_remote_port_delete(ri->rport); /* won't sleep */
653 fc_remote_port_delete(ri->rport);
654 /*
655 * remote port not really deleted 'cause
656 * binding is by WWPN and driver only
657 * registers FCP_TARGETs but cannot trust
658 * data structures.
659 */
660 ri->rport = NULL; 667 ri->rport = NULL;
668
669 pn = (u64)ri->pg0.WWPN.High << 32 |
670 (u64)ri->pg0.WWPN.Low;
661 dfcprintk ((MYIOC_s_INFO_FMT 671 dfcprintk ((MYIOC_s_INFO_FMT
662 "mptfc_rescan.%d: %llx deleted\n", 672 "mptfc_rescan.%d: %llx deleted\n",
663 ioc->name, 673 ioc->name,
664 ioc->sh->host_no, 674 ioc->sh->host_no,
665 ri->pg0.WWPN)); 675 (unsigned long long)pn));
666 } 676 }
667 } 677 }
668 spin_unlock_irqrestore(&ioc->fc_rport_lock,flags);
669 678
670 /* 679 /*
671 * allow multiple passes as target state 680 * allow multiple passes as target state
@@ -870,10 +879,23 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
870 goto out_mptfc_probe; 879 goto out_mptfc_probe;
871 } 880 }
872 881
873 for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { 882 /* initialize workqueue */
874 mptfc_init_host_attr(ioc,ii); 883
875 mptfc_GetFcDevPage0(ioc,ii,mptfc_register_dev); 884 snprintf(ioc->fc_rescan_work_q_name, KOBJ_NAME_LEN, "mptfc_wq_%d",
876 } 885 sh->host_no);
886 ioc->fc_rescan_work_q =
887 create_singlethread_workqueue(ioc->fc_rescan_work_q_name);
888 if (!ioc->fc_rescan_work_q)
889 goto out_mptfc_probe;
890
891 /*
892 * scan for rports -
893 * by doing it via the workqueue, some locking is eliminated
894 */
895
896 ioc->fc_rescan_work_count = 1;
897 queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work);
898 flush_workqueue(ioc->fc_rescan_work_q);
877 899
878 return 0; 900 return 0;
879 901
@@ -949,8 +971,18 @@ mptfc_init(void)
949static void __devexit 971static void __devexit
950mptfc_remove(struct pci_dev *pdev) 972mptfc_remove(struct pci_dev *pdev)
951{ 973{
952 MPT_ADAPTER *ioc = pci_get_drvdata(pdev); 974 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
953 struct mptfc_rport_info *p, *n; 975 struct mptfc_rport_info *p, *n;
976 struct workqueue_struct *work_q;
977 unsigned long flags;
978
979 /* destroy workqueue */
980 if ((work_q=ioc->fc_rescan_work_q)) {
981 spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
982 ioc->fc_rescan_work_q = NULL;
983 spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
984 destroy_workqueue(work_q);
985 }
954 986
955 fc_remove_host(ioc->sh); 987 fc_remove_host(ioc->sh);
956 988
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index e9716b10acea..af6ec553ff7c 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -91,6 +91,7 @@ enum mptsas_hotplug_action {
91 MPTSAS_DEL_DEVICE, 91 MPTSAS_DEL_DEVICE,
92 MPTSAS_ADD_RAID, 92 MPTSAS_ADD_RAID,
93 MPTSAS_DEL_RAID, 93 MPTSAS_DEL_RAID,
94 MPTSAS_IGNORE_EVENT,
94}; 95};
95 96
96struct mptsas_hotplug_event { 97struct mptsas_hotplug_event {
@@ -298,6 +299,26 @@ mptsas_find_portinfo_by_handle(MPT_ADAPTER *ioc, u16 handle)
298 return rc; 299 return rc;
299} 300}
300 301
302/*
303 * Returns true if there is a scsi end device
304 */
305static inline int
306mptsas_is_end_device(struct mptsas_devinfo * attached)
307{
308 if ((attached->handle) &&
309 (attached->device_info &
310 MPI_SAS_DEVICE_INFO_END_DEVICE) &&
311 ((attached->device_info &
312 MPI_SAS_DEVICE_INFO_SSP_TARGET) |
313 (attached->device_info &
314 MPI_SAS_DEVICE_INFO_STP_TARGET) |
315 (attached->device_info &
316 MPI_SAS_DEVICE_INFO_SATA_DEVICE)))
317 return 1;
318 else
319 return 0;
320}
321
301static int 322static int
302mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure, 323mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
303 u32 form, u32 form_specific) 324 u32 form, u32 form_specific)
@@ -872,7 +893,11 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
872 SasDevicePage0_t *buffer; 893 SasDevicePage0_t *buffer;
873 dma_addr_t dma_handle; 894 dma_addr_t dma_handle;
874 __le64 sas_address; 895 __le64 sas_address;
875 int error; 896 int error=0;
897
898 if (ioc->sas_discovery_runtime &&
899 mptsas_is_end_device(device_info))
900 goto out;
876 901
877 hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION; 902 hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION;
878 hdr.ExtPageLength = 0; 903 hdr.ExtPageLength = 0;
@@ -1009,7 +1034,11 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
1009 CONFIGPARMS cfg; 1034 CONFIGPARMS cfg;
1010 SasExpanderPage1_t *buffer; 1035 SasExpanderPage1_t *buffer;
1011 dma_addr_t dma_handle; 1036 dma_addr_t dma_handle;
1012 int error; 1037 int error=0;
1038
1039 if (ioc->sas_discovery_runtime &&
1040 mptsas_is_end_device(&phy_info->attached))
1041 goto out;
1013 1042
1014 hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION; 1043 hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
1015 hdr.ExtPageLength = 0; 1044 hdr.ExtPageLength = 0;
@@ -1068,26 +1097,6 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
1068 return error; 1097 return error;
1069} 1098}
1070 1099
1071/*
1072 * Returns true if there is a scsi end device
1073 */
1074static inline int
1075mptsas_is_end_device(struct mptsas_devinfo * attached)
1076{
1077 if ((attached->handle) &&
1078 (attached->device_info &
1079 MPI_SAS_DEVICE_INFO_END_DEVICE) &&
1080 ((attached->device_info &
1081 MPI_SAS_DEVICE_INFO_SSP_TARGET) |
1082 (attached->device_info &
1083 MPI_SAS_DEVICE_INFO_STP_TARGET) |
1084 (attached->device_info &
1085 MPI_SAS_DEVICE_INFO_SATA_DEVICE)))
1086 return 1;
1087 else
1088 return 0;
1089}
1090
1091static void 1100static void
1092mptsas_parse_device_info(struct sas_identify *identify, 1101mptsas_parse_device_info(struct sas_identify *identify,
1093 struct mptsas_devinfo *device_info) 1102 struct mptsas_devinfo *device_info)
@@ -1737,6 +1746,9 @@ mptsas_hotplug_work(void *arg)
1737 break; 1746 break;
1738 case MPTSAS_ADD_DEVICE: 1747 case MPTSAS_ADD_DEVICE:
1739 1748
1749 if (ev->phys_disk_num_valid)
1750 mpt_findImVolumes(ioc);
1751
1740 /* 1752 /*
1741 * Refresh sas device pg0 data 1753 * Refresh sas device pg0 data
1742 */ 1754 */
@@ -1868,6 +1880,9 @@ mptsas_hotplug_work(void *arg)
1868 scsi_device_put(sdev); 1880 scsi_device_put(sdev);
1869 mpt_findImVolumes(ioc); 1881 mpt_findImVolumes(ioc);
1870 break; 1882 break;
1883 case MPTSAS_IGNORE_EVENT:
1884 default:
1885 break;
1871 } 1886 }
1872 1887
1873 kfree(ev); 1888 kfree(ev);
@@ -1940,7 +1955,8 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc,
1940 EVENT_DATA_RAID *raid_event_data) 1955 EVENT_DATA_RAID *raid_event_data)
1941{ 1956{
1942 struct mptsas_hotplug_event *ev; 1957 struct mptsas_hotplug_event *ev;
1943 RAID_VOL0_STATUS * volumeStatus; 1958 int status = le32_to_cpu(raid_event_data->SettingsStatus);
1959 int state = (status >> 8) & 0xff;
1944 1960
1945 if (ioc->bus_type != SAS) 1961 if (ioc->bus_type != SAS)
1946 return; 1962 return;
@@ -1955,6 +1971,7 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc,
1955 INIT_WORK(&ev->work, mptsas_hotplug_work, ev); 1971 INIT_WORK(&ev->work, mptsas_hotplug_work, ev);
1956 ev->ioc = ioc; 1972 ev->ioc = ioc;
1957 ev->id = raid_event_data->VolumeID; 1973 ev->id = raid_event_data->VolumeID;
1974 ev->event_type = MPTSAS_IGNORE_EVENT;
1958 1975
1959 switch (raid_event_data->ReasonCode) { 1976 switch (raid_event_data->ReasonCode) {
1960 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED: 1977 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
@@ -1966,6 +1983,25 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc,
1966 ev->phys_disk_num = raid_event_data->PhysDiskNum; 1983 ev->phys_disk_num = raid_event_data->PhysDiskNum;
1967 ev->event_type = MPTSAS_DEL_DEVICE; 1984 ev->event_type = MPTSAS_DEL_DEVICE;
1968 break; 1985 break;
1986 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
1987 switch (state) {
1988 case MPI_PD_STATE_ONLINE:
1989 ioc->raid_data.isRaid = 1;
1990 ev->phys_disk_num_valid = 1;
1991 ev->phys_disk_num = raid_event_data->PhysDiskNum;
1992 ev->event_type = MPTSAS_ADD_DEVICE;
1993 break;
1994 case MPI_PD_STATE_MISSING:
1995 case MPI_PD_STATE_NOT_COMPATIBLE:
1996 case MPI_PD_STATE_OFFLINE_AT_HOST_REQUEST:
1997 case MPI_PD_STATE_FAILED_AT_HOST_REQUEST:
1998 case MPI_PD_STATE_OFFLINE_FOR_ANOTHER_REASON:
1999 ev->event_type = MPTSAS_DEL_DEVICE;
2000 break;
2001 default:
2002 break;
2003 }
2004 break;
1969 case MPI_EVENT_RAID_RC_VOLUME_DELETED: 2005 case MPI_EVENT_RAID_RC_VOLUME_DELETED:
1970 ev->event_type = MPTSAS_DEL_RAID; 2006 ev->event_type = MPTSAS_DEL_RAID;
1971 break; 2007 break;
@@ -1973,11 +2009,18 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc,
1973 ev->event_type = MPTSAS_ADD_RAID; 2009 ev->event_type = MPTSAS_ADD_RAID;
1974 break; 2010 break;
1975 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED: 2011 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
1976 volumeStatus = (RAID_VOL0_STATUS *) & 2012 switch (state) {
1977 raid_event_data->SettingsStatus; 2013 case MPI_RAIDVOL0_STATUS_STATE_FAILED:
1978 ev->event_type = (volumeStatus->State == 2014 case MPI_RAIDVOL0_STATUS_STATE_MISSING:
1979 MPI_RAIDVOL0_STATUS_STATE_FAILED) ? 2015 ev->event_type = MPTSAS_DEL_RAID;
1980 MPTSAS_DEL_RAID : MPTSAS_ADD_RAID; 2016 break;
2017 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
2018 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
2019 ev->event_type = MPTSAS_ADD_RAID;
2020 break;
2021 default:
2022 break;
2023 }
1981 break; 2024 break;
1982 default: 2025 default:
1983 break; 2026 break;
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 3729062db317..84fa271eb8f4 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -632,7 +632,11 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
632 632
633 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */ 633 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
634 /* Spoof to SCSI Selection Timeout! */ 634 /* Spoof to SCSI Selection Timeout! */
635 sc->result = DID_NO_CONNECT << 16; 635 if (ioc->bus_type != FC)
636 sc->result = DID_NO_CONNECT << 16;
637 /* else fibre, just stall until rescan event */
638 else
639 sc->result = DID_REQUEUE << 16;
636 640
637 if (hd->sel_timeout[pScsiReq->TargetID] < 0xFFFF) 641 if (hd->sel_timeout[pScsiReq->TargetID] < 0xFFFF)
638 hd->sel_timeout[pScsiReq->TargetID]++; 642 hd->sel_timeout[pScsiReq->TargetID]++;
@@ -877,7 +881,7 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
877 struct scsi_cmnd *sc; 881 struct scsi_cmnd *sc;
878 882
879 dsprintk((KERN_INFO MYNAM ": search_running target %d lun %d max %d\n", 883 dsprintk((KERN_INFO MYNAM ": search_running target %d lun %d max %d\n",
880 vdevice->target_id, vdevice->lun, max)); 884 vdevice->vtarget->target_id, vdevice->lun, max));
881 885
882 for (ii=0; ii < max; ii++) { 886 for (ii=0; ii < max; ii++) {
883 if ((sc = hd->ScsiLookup[ii]) != NULL) { 887 if ((sc = hd->ScsiLookup[ii]) != NULL) {
@@ -1645,7 +1649,6 @@ int
1645mptscsih_abort(struct scsi_cmnd * SCpnt) 1649mptscsih_abort(struct scsi_cmnd * SCpnt)
1646{ 1650{
1647 MPT_SCSI_HOST *hd; 1651 MPT_SCSI_HOST *hd;
1648 MPT_ADAPTER *ioc;
1649 MPT_FRAME_HDR *mf; 1652 MPT_FRAME_HDR *mf;
1650 u32 ctx2abort; 1653 u32 ctx2abort;
1651 int scpnt_idx; 1654 int scpnt_idx;
@@ -1663,14 +1666,6 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1663 return FAILED; 1666 return FAILED;
1664 } 1667 }
1665 1668
1666 ioc = hd->ioc;
1667 if (hd->resetPending) {
1668 return FAILED;
1669 }
1670
1671 if (hd->timeouts < -1)
1672 hd->timeouts++;
1673
1674 /* Find this command 1669 /* Find this command
1675 */ 1670 */
1676 if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) { 1671 if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) {
@@ -1684,6 +1679,13 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1684 return SUCCESS; 1679 return SUCCESS;
1685 } 1680 }
1686 1681
1682 if (hd->resetPending) {
1683 return FAILED;
1684 }
1685
1686 if (hd->timeouts < -1)
1687 hd->timeouts++;
1688
1687 printk(KERN_WARNING MYNAM ": %s: attempting task abort! (sc=%p)\n", 1689 printk(KERN_WARNING MYNAM ": %s: attempting task abort! (sc=%p)\n",
1688 hd->ioc->name, SCpnt); 1690 hd->ioc->name, SCpnt);
1689 scsi_print_command(SCpnt); 1691 scsi_print_command(SCpnt);
@@ -1703,7 +1705,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1703 vdev = SCpnt->device->hostdata; 1705 vdev = SCpnt->device->hostdata;
1704 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 1706 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
1705 vdev->vtarget->bus_id, vdev->vtarget->target_id, vdev->lun, 1707 vdev->vtarget->bus_id, vdev->vtarget->target_id, vdev->lun,
1706 ctx2abort, mptscsih_get_tm_timeout(ioc)); 1708 ctx2abort, mptscsih_get_tm_timeout(hd->ioc));
1707 1709
1708 printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n", 1710 printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n",
1709 hd->ioc->name, 1711 hd->ioc->name,
@@ -2521,15 +2523,15 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
2521 2523
2522 /* 7. FC: Rescan for blocked rports which might have returned. 2524 /* 7. FC: Rescan for blocked rports which might have returned.
2523 */ 2525 */
2524 else if (ioc->bus_type == FC) { 2526 if (ioc->bus_type == FC) {
2525 int work_count;
2526 unsigned long flags;
2527
2528 spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); 2527 spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
2529 work_count = ++ioc->fc_rescan_work_count; 2528 if (ioc->fc_rescan_work_q) {
2529 if (ioc->fc_rescan_work_count++ == 0) {
2530 queue_work(ioc->fc_rescan_work_q,
2531 &ioc->fc_rescan_work);
2532 }
2533 }
2530 spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); 2534 spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
2531 if (work_count == 1)
2532 schedule_work(&ioc->fc_rescan_work);
2533 } 2535 }
2534 dtmprintk((MYIOC_s_WARN_FMT "Post-Reset complete.\n", ioc->name)); 2536 dtmprintk((MYIOC_s_WARN_FMT "Post-Reset complete.\n", ioc->name));
2535 2537
@@ -2544,7 +2546,6 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2544{ 2546{
2545 MPT_SCSI_HOST *hd; 2547 MPT_SCSI_HOST *hd;
2546 u8 event = le32_to_cpu(pEvReply->Event) & 0xFF; 2548 u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
2547 int work_count;
2548 unsigned long flags; 2549 unsigned long flags;
2549 2550
2550 devtverboseprintk((MYIOC_s_INFO_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n", 2551 devtverboseprintk((MYIOC_s_INFO_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n",
@@ -2569,10 +2570,13 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2569 2570
2570 case MPI_EVENT_RESCAN: /* 06 */ 2571 case MPI_EVENT_RESCAN: /* 06 */
2571 spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); 2572 spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
2572 work_count = ++ioc->fc_rescan_work_count; 2573 if (ioc->fc_rescan_work_q) {
2574 if (ioc->fc_rescan_work_count++ == 0) {
2575 queue_work(ioc->fc_rescan_work_q,
2576 &ioc->fc_rescan_work);
2577 }
2578 }
2573 spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); 2579 spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
2574 if (work_count == 1)
2575 schedule_work(&ioc->fc_rescan_work);
2576 break; 2580 break;
2577 2581
2578 /* 2582 /*
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 09c745b19cc8..f2a4d382ea19 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -783,6 +783,70 @@ static struct pci_device_id mptspi_pci_table[] = {
783}; 783};
784MODULE_DEVICE_TABLE(pci, mptspi_pci_table); 784MODULE_DEVICE_TABLE(pci, mptspi_pci_table);
785 785
786
787/*
788 * renegotiate for a given target
789 */
790static void
791mptspi_dv_renegotiate_work(void *data)
792{
793 struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
794 struct _MPT_SCSI_HOST *hd = wqw->hd;
795 struct scsi_device *sdev;
796
797 kfree(wqw);
798
799 shost_for_each_device(sdev, hd->ioc->sh)
800 mptspi_dv_device(hd, sdev);
801}
802
803static void
804mptspi_dv_renegotiate(struct _MPT_SCSI_HOST *hd)
805{
806 struct work_queue_wrapper *wqw = kmalloc(sizeof(*wqw), GFP_ATOMIC);
807
808 if (!wqw)
809 return;
810
811 INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work, wqw);
812 wqw->hd = hd;
813
814 schedule_work(&wqw->work);
815}
816
817/*
818 * spi module reset handler
819 */
820static int
821mptspi_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
822{
823 struct _MPT_SCSI_HOST *hd = (struct _MPT_SCSI_HOST *)ioc->sh->hostdata;
824 int rc;
825
826 rc = mptscsih_ioc_reset(ioc, reset_phase);
827
828 if (reset_phase == MPT_IOC_POST_RESET)
829 mptspi_dv_renegotiate(hd);
830
831 return rc;
832}
833
834/*
835 * spi module resume handler
836 */
837static int
838mptspi_resume(struct pci_dev *pdev)
839{
840 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
841 struct _MPT_SCSI_HOST *hd = (struct _MPT_SCSI_HOST *)ioc->sh->hostdata;
842 int rc;
843
844 rc = mptscsih_resume(pdev);
845 mptspi_dv_renegotiate(hd);
846
847 return rc;
848}
849
786/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 850/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
787/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 851/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
788/* 852/*
@@ -1032,7 +1096,7 @@ static struct pci_driver mptspi_driver = {
1032 .shutdown = mptscsih_shutdown, 1096 .shutdown = mptscsih_shutdown,
1033#ifdef CONFIG_PM 1097#ifdef CONFIG_PM
1034 .suspend = mptscsih_suspend, 1098 .suspend = mptscsih_suspend,
1035 .resume = mptscsih_resume, 1099 .resume = mptspi_resume,
1036#endif 1100#endif
1037}; 1101};
1038 1102
@@ -1061,7 +1125,7 @@ mptspi_init(void)
1061 ": Registered for IOC event notifications\n")); 1125 ": Registered for IOC event notifications\n"));
1062 } 1126 }
1063 1127
1064 if (mpt_reset_register(mptspiDoneCtx, mptscsih_ioc_reset) == 0) { 1128 if (mpt_reset_register(mptspiDoneCtx, mptspi_ioc_reset) == 0) {
1065 dprintk((KERN_INFO MYNAM 1129 dprintk((KERN_INFO MYNAM
1066 ": Registered for IOC reset notifications\n")); 1130 ": Registered for IOC reset notifications\n"));
1067 } 1131 }
diff --git a/drivers/mmc/at91_mci.c b/drivers/mmc/at91_mci.c
index 6061c2d101a0..88f0eef9cf33 100644
--- a/drivers/mmc/at91_mci.c
+++ b/drivers/mmc/at91_mci.c
@@ -621,9 +621,6 @@ static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
621 struct at91mci_host *host = mmc_priv(mmc); 621 struct at91mci_host *host = mmc_priv(mmc);
622 unsigned long at91_master_clock = clk_get_rate(mci_clk); 622 unsigned long at91_master_clock = clk_get_rate(mci_clk);
623 623
624 DBG("Clock %uHz, busmode %u, powermode %u, Vdd %u\n",
625 ios->clock, ios->bus_mode, ios->power_mode, ios->vdd);
626
627 if (host) 624 if (host)
628 host->bus_mode = ios->bus_mode; 625 host->bus_mode = ios->bus_mode;
629 else 626 else
diff --git a/drivers/mmc/au1xmmc.c b/drivers/mmc/au1xmmc.c
index c0326bbc5f28..914d62b24064 100644
--- a/drivers/mmc/au1xmmc.c
+++ b/drivers/mmc/au1xmmc.c
@@ -720,10 +720,6 @@ static void au1xmmc_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
720{ 720{
721 struct au1xmmc_host *host = mmc_priv(mmc); 721 struct au1xmmc_host *host = mmc_priv(mmc);
722 722
723 DBG("set_ios (power=%u, clock=%uHz, vdd=%u, mode=%u)\n",
724 host->id, ios->power_mode, ios->clock, ios->vdd,
725 ios->bus_mode);
726
727 if (ios->power_mode == MMC_POWER_OFF) 723 if (ios->power_mode == MMC_POWER_OFF)
728 au1xmmc_set_power(host, 0); 724 au1xmmc_set_power(host, 0);
729 else if (ios->power_mode == MMC_POWER_ON) { 725 else if (ios->power_mode == MMC_POWER_ON) {
diff --git a/drivers/mmc/imxmmc.c b/drivers/mmc/imxmmc.c
index ffb7f55d3467..79358e223f57 100644
--- a/drivers/mmc/imxmmc.c
+++ b/drivers/mmc/imxmmc.c
@@ -102,6 +102,7 @@ struct imxmci_host {
102#define IMXMCI_PEND_CPU_DATA_b 5 102#define IMXMCI_PEND_CPU_DATA_b 5
103#define IMXMCI_PEND_CARD_XCHG_b 6 103#define IMXMCI_PEND_CARD_XCHG_b 6
104#define IMXMCI_PEND_SET_INIT_b 7 104#define IMXMCI_PEND_SET_INIT_b 7
105#define IMXMCI_PEND_STARTED_b 8
105 106
106#define IMXMCI_PEND_IRQ_m (1 << IMXMCI_PEND_IRQ_b) 107#define IMXMCI_PEND_IRQ_m (1 << IMXMCI_PEND_IRQ_b)
107#define IMXMCI_PEND_DMA_END_m (1 << IMXMCI_PEND_DMA_END_b) 108#define IMXMCI_PEND_DMA_END_m (1 << IMXMCI_PEND_DMA_END_b)
@@ -111,6 +112,7 @@ struct imxmci_host {
111#define IMXMCI_PEND_CPU_DATA_m (1 << IMXMCI_PEND_CPU_DATA_b) 112#define IMXMCI_PEND_CPU_DATA_m (1 << IMXMCI_PEND_CPU_DATA_b)
112#define IMXMCI_PEND_CARD_XCHG_m (1 << IMXMCI_PEND_CARD_XCHG_b) 113#define IMXMCI_PEND_CARD_XCHG_m (1 << IMXMCI_PEND_CARD_XCHG_b)
113#define IMXMCI_PEND_SET_INIT_m (1 << IMXMCI_PEND_SET_INIT_b) 114#define IMXMCI_PEND_SET_INIT_m (1 << IMXMCI_PEND_SET_INIT_b)
115#define IMXMCI_PEND_STARTED_m (1 << IMXMCI_PEND_STARTED_b)
114 116
115static void imxmci_stop_clock(struct imxmci_host *host) 117static void imxmci_stop_clock(struct imxmci_host *host)
116{ 118{
@@ -131,23 +133,52 @@ static void imxmci_stop_clock(struct imxmci_host *host)
131 dev_dbg(mmc_dev(host->mmc), "imxmci_stop_clock blocked, no luck\n"); 133 dev_dbg(mmc_dev(host->mmc), "imxmci_stop_clock blocked, no luck\n");
132} 134}
133 135
134static void imxmci_start_clock(struct imxmci_host *host) 136static int imxmci_start_clock(struct imxmci_host *host)
135{ 137{
136 int i = 0; 138 unsigned int trials = 0;
139 unsigned int delay_limit = 128;
140 unsigned long flags;
141
137 MMC_STR_STP_CLK &= ~STR_STP_CLK_STOP_CLK; 142 MMC_STR_STP_CLK &= ~STR_STP_CLK_STOP_CLK;
138 while(i < 0x1000) {
139 if(!(i & 0x7f))
140 MMC_STR_STP_CLK |= STR_STP_CLK_START_CLK;
141 143
142 if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN) { 144 clear_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
143 /* Check twice before cut */ 145
146 /*
147 * Command start of the clock, this usually succeeds in less
148 * then 6 delay loops, but during card detection (low clockrate)
149 * it takes up to 5000 delay loops and sometimes fails for the first time
150 */
151 MMC_STR_STP_CLK |= STR_STP_CLK_START_CLK;
152
153 do {
154 unsigned int delay = delay_limit;
155
156 while(delay--){
144 if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN) 157 if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)
145 return; 158 /* Check twice before cut */
159 if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)
160 return 0;
161
162 if(test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
163 return 0;
146 } 164 }
147 165
148 i++; 166 local_irq_save(flags);
149 } 167 /*
150 dev_dbg(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n"); 168 * Ensure, that request is not doubled under all possible circumstances.
169 * It is possible, that cock running state is missed, because some other
170 * IRQ or schedule delays this function execution and the clocks has
171 * been already stopped by other means (response processing, SDHC HW)
172 */
173 if(!test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
174 MMC_STR_STP_CLK |= STR_STP_CLK_START_CLK;
175 local_irq_restore(flags);
176
177 } while(++trials<256);
178
179 dev_err(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n");
180
181 return -1;
151} 182}
152 183
153static void imxmci_softreset(void) 184static void imxmci_softreset(void)
@@ -498,7 +529,7 @@ static int imxmci_data_done(struct imxmci_host *host, unsigned int stat)
498 529
499 data_error = imxmci_finish_data(host, stat); 530 data_error = imxmci_finish_data(host, stat);
500 531
501 if (host->req->stop && (data_error == MMC_ERR_NONE)) { 532 if (host->req->stop) {
502 imxmci_stop_clock(host); 533 imxmci_stop_clock(host);
503 imxmci_start_cmd(host, host->req->stop, 0); 534 imxmci_start_cmd(host, host->req->stop, 0);
504 } else { 535 } else {
@@ -622,6 +653,7 @@ static irqreturn_t imxmci_irq(int irq, void *devid, struct pt_regs *regs)
622 atomic_set(&host->stuck_timeout, 0); 653 atomic_set(&host->stuck_timeout, 0);
623 host->status_reg = stat; 654 host->status_reg = stat;
624 set_bit(IMXMCI_PEND_IRQ_b, &host->pending_events); 655 set_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
656 set_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
625 tasklet_schedule(&host->tasklet); 657 tasklet_schedule(&host->tasklet);
626 658
627 return IRQ_RETVAL(handled);; 659 return IRQ_RETVAL(handled);;
@@ -775,10 +807,6 @@ static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
775 struct imxmci_host *host = mmc_priv(mmc); 807 struct imxmci_host *host = mmc_priv(mmc);
776 int prescaler; 808 int prescaler;
777 809
778 dev_dbg(mmc_dev(host->mmc), "clock %u power %u vdd %u width %u\n",
779 ios->clock, ios->power_mode, ios->vdd,
780 (ios->bus_width==MMC_BUS_WIDTH_4)?4:1);
781
782 if( ios->bus_width==MMC_BUS_WIDTH_4 ) { 810 if( ios->bus_width==MMC_BUS_WIDTH_4 ) {
783 host->actual_bus_width = MMC_BUS_WIDTH_4; 811 host->actual_bus_width = MMC_BUS_WIDTH_4;
784 imx_gpio_mode(PB11_PF_SD_DAT3); 812 imx_gpio_mode(PB11_PF_SD_DAT3);
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index da6ddd910fc5..1ca2c8b9c9b5 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -59,21 +59,23 @@ static const unsigned int tacc_mant[] = {
59 59
60 60
61/** 61/**
62 * mmc_request_done - finish processing an MMC command 62 * mmc_request_done - finish processing an MMC request
63 * @host: MMC host which completed command 63 * @host: MMC host which completed request
64 * @mrq: MMC request which completed 64 * @mrq: MMC request which request
65 * 65 *
66 * MMC drivers should call this function when they have completed 66 * MMC drivers should call this function when they have completed
67 * their processing of a command. This should be called before the 67 * their processing of a request.
68 * data part of the command has completed.
69 */ 68 */
70void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) 69void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
71{ 70{
72 struct mmc_command *cmd = mrq->cmd; 71 struct mmc_command *cmd = mrq->cmd;
73 int err = mrq->cmd->error; 72 int err = cmd->error;
74 pr_debug("MMC: req done (%02x): %d: %08x %08x %08x %08x\n", 73
75 cmd->opcode, err, cmd->resp[0], cmd->resp[1], 74 pr_debug("%s: req done (CMD%u): %d/%d/%d: %08x %08x %08x %08x\n",
76 cmd->resp[2], cmd->resp[3]); 75 mmc_hostname(host), cmd->opcode, err,
76 mrq->data ? mrq->data->error : 0,
77 mrq->stop ? mrq->stop->error : 0,
78 cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
77 79
78 if (err && cmd->retries) { 80 if (err && cmd->retries) {
79 cmd->retries--; 81 cmd->retries--;
@@ -97,8 +99,9 @@ EXPORT_SYMBOL(mmc_request_done);
97void 99void
98mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) 100mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
99{ 101{
100 pr_debug("MMC: starting cmd %02x arg %08x flags %08x\n", 102 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
101 mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags); 103 mmc_hostname(host), mrq->cmd->opcode,
104 mrq->cmd->arg, mrq->cmd->flags);
102 105
103 WARN_ON(host->card_busy == NULL); 106 WARN_ON(host->card_busy == NULL);
104 107
@@ -312,6 +315,18 @@ void mmc_release_host(struct mmc_host *host)
312 315
313EXPORT_SYMBOL(mmc_release_host); 316EXPORT_SYMBOL(mmc_release_host);
314 317
318static inline void mmc_set_ios(struct mmc_host *host)
319{
320 struct mmc_ios *ios = &host->ios;
321
322 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u width %u\n",
323 mmc_hostname(host), ios->clock, ios->bus_mode,
324 ios->power_mode, ios->chip_select, ios->vdd,
325 ios->bus_width);
326
327 host->ops->set_ios(host, ios);
328}
329
315static int mmc_select_card(struct mmc_host *host, struct mmc_card *card) 330static int mmc_select_card(struct mmc_host *host, struct mmc_card *card)
316{ 331{
317 int err; 332 int err;
@@ -364,7 +379,7 @@ static int mmc_select_card(struct mmc_host *host, struct mmc_card *card)
364 } 379 }
365 } 380 }
366 381
367 host->ops->set_ios(host, &host->ios); 382 mmc_set_ios(host);
368 383
369 return MMC_ERR_NONE; 384 return MMC_ERR_NONE;
370} 385}
@@ -415,7 +430,7 @@ static u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
415 ocr = 3 << bit; 430 ocr = 3 << bit;
416 431
417 host->ios.vdd = bit; 432 host->ios.vdd = bit;
418 host->ops->set_ios(host, &host->ios); 433 mmc_set_ios(host);
419 } else { 434 } else {
420 ocr = 0; 435 ocr = 0;
421 } 436 }
@@ -549,6 +564,7 @@ static void mmc_decode_csd(struct mmc_card *card)
549 csd->read_partial = UNSTUFF_BITS(resp, 79, 1); 564 csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
550 csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); 565 csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
551 csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); 566 csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
567 csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
552 csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); 568 csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
553 csd->write_partial = UNSTUFF_BITS(resp, 21, 1); 569 csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
554 } else { 570 } else {
@@ -583,6 +599,7 @@ static void mmc_decode_csd(struct mmc_card *card)
583 csd->read_partial = UNSTUFF_BITS(resp, 79, 1); 599 csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
584 csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); 600 csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
585 csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); 601 csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
602 csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
586 csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); 603 csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
587 csd->write_partial = UNSTUFF_BITS(resp, 21, 1); 604 csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
588 } 605 }
@@ -666,7 +683,7 @@ static void mmc_idle_cards(struct mmc_host *host)
666 struct mmc_command cmd; 683 struct mmc_command cmd;
667 684
668 host->ios.chip_select = MMC_CS_HIGH; 685 host->ios.chip_select = MMC_CS_HIGH;
669 host->ops->set_ios(host, &host->ios); 686 mmc_set_ios(host);
670 687
671 mmc_delay(1); 688 mmc_delay(1);
672 689
@@ -679,7 +696,7 @@ static void mmc_idle_cards(struct mmc_host *host)
679 mmc_delay(1); 696 mmc_delay(1);
680 697
681 host->ios.chip_select = MMC_CS_DONTCARE; 698 host->ios.chip_select = MMC_CS_DONTCARE;
682 host->ops->set_ios(host, &host->ios); 699 mmc_set_ios(host);
683 700
684 mmc_delay(1); 701 mmc_delay(1);
685} 702}
@@ -704,13 +721,13 @@ static void mmc_power_up(struct mmc_host *host)
704 host->ios.chip_select = MMC_CS_DONTCARE; 721 host->ios.chip_select = MMC_CS_DONTCARE;
705 host->ios.power_mode = MMC_POWER_UP; 722 host->ios.power_mode = MMC_POWER_UP;
706 host->ios.bus_width = MMC_BUS_WIDTH_1; 723 host->ios.bus_width = MMC_BUS_WIDTH_1;
707 host->ops->set_ios(host, &host->ios); 724 mmc_set_ios(host);
708 725
709 mmc_delay(1); 726 mmc_delay(1);
710 727
711 host->ios.clock = host->f_min; 728 host->ios.clock = host->f_min;
712 host->ios.power_mode = MMC_POWER_ON; 729 host->ios.power_mode = MMC_POWER_ON;
713 host->ops->set_ios(host, &host->ios); 730 mmc_set_ios(host);
714 731
715 mmc_delay(2); 732 mmc_delay(2);
716} 733}
@@ -723,7 +740,7 @@ static void mmc_power_off(struct mmc_host *host)
723 host->ios.chip_select = MMC_CS_DONTCARE; 740 host->ios.chip_select = MMC_CS_DONTCARE;
724 host->ios.power_mode = MMC_POWER_OFF; 741 host->ios.power_mode = MMC_POWER_OFF;
725 host->ios.bus_width = MMC_BUS_WIDTH_1; 742 host->ios.bus_width = MMC_BUS_WIDTH_1;
726 host->ops->set_ios(host, &host->ios); 743 mmc_set_ios(host);
727} 744}
728 745
729static int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) 746static int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
@@ -971,7 +988,8 @@ static unsigned int mmc_calculate_clock(struct mmc_host *host)
971 if (!mmc_card_dead(card) && max_dtr > card->csd.max_dtr) 988 if (!mmc_card_dead(card) && max_dtr > card->csd.max_dtr)
972 max_dtr = card->csd.max_dtr; 989 max_dtr = card->csd.max_dtr;
973 990
974 pr_debug("MMC: selected %d.%03dMHz transfer rate\n", 991 pr_debug("%s: selected %d.%03dMHz transfer rate\n",
992 mmc_hostname(host),
975 max_dtr / 1000000, (max_dtr / 1000) % 1000); 993 max_dtr / 1000000, (max_dtr / 1000) % 1000);
976 994
977 return max_dtr; 995 return max_dtr;
@@ -1046,7 +1064,7 @@ static void mmc_setup(struct mmc_host *host)
1046 } else { 1064 } else {
1047 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 1065 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1048 host->ios.clock = host->f_min; 1066 host->ios.clock = host->f_min;
1049 host->ops->set_ios(host, &host->ios); 1067 mmc_set_ios(host);
1050 1068
1051 /* 1069 /*
1052 * We should remember the OCR mask from the existing 1070 * We should remember the OCR mask from the existing
@@ -1082,7 +1100,7 @@ static void mmc_setup(struct mmc_host *host)
1082 * Ok, now switch to push-pull mode. 1100 * Ok, now switch to push-pull mode.
1083 */ 1101 */
1084 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; 1102 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1085 host->ops->set_ios(host, &host->ios); 1103 mmc_set_ios(host);
1086 1104
1087 mmc_read_csds(host); 1105 mmc_read_csds(host);
1088 1106
@@ -1128,7 +1146,7 @@ static void mmc_rescan(void *data)
1128 * attached cards and the host support. 1146 * attached cards and the host support.
1129 */ 1147 */
1130 host->ios.clock = mmc_calculate_clock(host); 1148 host->ios.clock = mmc_calculate_clock(host);
1131 host->ops->set_ios(host, &host->ios); 1149 mmc_set_ios(host);
1132 } 1150 }
1133 1151
1134 mmc_release_host(host); 1152 mmc_release_host(host);
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index 8eb2a2ede64b..06bd1f4cb9b1 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -187,6 +187,12 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
187 brq.cmd.opcode = MMC_WRITE_BLOCK; 187 brq.cmd.opcode = MMC_WRITE_BLOCK;
188 brq.data.flags |= MMC_DATA_WRITE; 188 brq.data.flags |= MMC_DATA_WRITE;
189 brq.data.blocks = 1; 189 brq.data.blocks = 1;
190
191 /*
192 * Scale up the timeout by the r2w factor
193 */
194 brq.data.timeout_ns <<= card->csd.r2w_factor;
195 brq.data.timeout_clks <<= card->csd.r2w_factor;
190 } 196 }
191 197
192 if (brq.data.blocks > 1) { 198 if (brq.data.blocks > 1) {
diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c
index df7e861e2fc7..da8e4d7339cc 100644
--- a/drivers/mmc/mmci.c
+++ b/drivers/mmc/mmci.c
@@ -402,9 +402,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
402 struct mmci_host *host = mmc_priv(mmc); 402 struct mmci_host *host = mmc_priv(mmc);
403 u32 clk = 0, pwr = 0; 403 u32 clk = 0, pwr = 0;
404 404
405 DBG(host, "clock %uHz busmode %u powermode %u Vdd %u\n",
406 ios->clock, ios->bus_mode, ios->power_mode, ios->vdd);
407
408 if (ios->clock) { 405 if (ios->clock) {
409 if (ios->clock >= host->mclk) { 406 if (ios->clock >= host->mclk) {
410 clk = MCI_CLK_BYPASS; 407 clk = MCI_CLK_BYPASS;
diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c
index eb42cb349420..f97b472085cb 100644
--- a/drivers/mmc/pxamci.c
+++ b/drivers/mmc/pxamci.c
@@ -198,7 +198,6 @@ static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd,
198 198
199static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq) 199static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
200{ 200{
201 pr_debug("PXAMCI: request done\n");
202 host->mrq = NULL; 201 host->mrq = NULL;
203 host->cmd = NULL; 202 host->cmd = NULL;
204 host->data = NULL; 203 host->data = NULL;
@@ -291,7 +290,7 @@ static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
291 pxamci_disable_irq(host, DATA_TRAN_DONE); 290 pxamci_disable_irq(host, DATA_TRAN_DONE);
292 291
293 host->data = NULL; 292 host->data = NULL;
294 if (host->mrq->stop && data->error == MMC_ERR_NONE) { 293 if (host->mrq->stop) {
295 pxamci_stop_clock(host); 294 pxamci_stop_clock(host);
296 pxamci_start_cmd(host, host->mrq->stop, 0); 295 pxamci_start_cmd(host, host->mrq->stop, 0);
297 } else { 296 } else {
@@ -309,12 +308,10 @@ static irqreturn_t pxamci_irq(int irq, void *devid, struct pt_regs *regs)
309 308
310 ireg = readl(host->base + MMC_I_REG); 309 ireg = readl(host->base + MMC_I_REG);
311 310
312 pr_debug("PXAMCI: irq %08x\n", ireg);
313
314 if (ireg) { 311 if (ireg) {
315 unsigned stat = readl(host->base + MMC_STAT); 312 unsigned stat = readl(host->base + MMC_STAT);
316 313
317 pr_debug("PXAMCI: stat %08x\n", stat); 314 pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat);
318 315
319 if (ireg & END_CMD_RES) 316 if (ireg & END_CMD_RES)
320 handled |= pxamci_cmd_done(host, stat); 317 handled |= pxamci_cmd_done(host, stat);
@@ -368,10 +365,6 @@ static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
368{ 365{
369 struct pxamci_host *host = mmc_priv(mmc); 366 struct pxamci_host *host = mmc_priv(mmc);
370 367
371 pr_debug("pxamci_set_ios: clock %u power %u vdd %u.%02u\n",
372 ios->clock, ios->power_mode, ios->vdd / 100,
373 ios->vdd % 100);
374
375 if (ios->clock) { 368 if (ios->clock) {
376 unsigned int clk = CLOCKRATE / ios->clock; 369 unsigned int clk = CLOCKRATE / ios->clock;
377 if (CLOCKRATE / clk > ios->clock) 370 if (CLOCKRATE / clk > ios->clock)
@@ -397,7 +390,7 @@ static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
397 host->cmdat |= CMDAT_INIT; 390 host->cmdat |= CMDAT_INIT;
398 } 391 }
399 392
400 pr_debug("pxamci_set_ios: clkrt = %x cmdat = %x\n", 393 pr_debug("PXAMCI: clkrt = %x cmdat = %x\n",
401 host->clkrt, host->cmdat); 394 host->clkrt, host->cmdat);
402} 395}
403 396
diff --git a/drivers/mmc/sdhci.c b/drivers/mmc/sdhci.c
index bdbfca050029..b0053280ff2d 100644
--- a/drivers/mmc/sdhci.c
+++ b/drivers/mmc/sdhci.c
@@ -570,10 +570,6 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
570 570
571 spin_lock_irqsave(&host->lock, flags); 571 spin_lock_irqsave(&host->lock, flags);
572 572
573 DBG("clock %uHz busmode %u powermode %u cs %u Vdd %u width %u\n",
574 ios->clock, ios->bus_mode, ios->power_mode, ios->chip_select,
575 ios->vdd, ios->bus_width);
576
577 /* 573 /*
578 * Reset the chip on each power off. 574 * Reset the chip on each power off.
579 * Should clear out any weird states. 575 * Should clear out any weird states.
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c
index 511f7b0b31d2..39b3d97f891e 100644
--- a/drivers/mmc/wbsd.c
+++ b/drivers/mmc/wbsd.c
@@ -931,10 +931,6 @@ static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
931 struct wbsd_host *host = mmc_priv(mmc); 931 struct wbsd_host *host = mmc_priv(mmc);
932 u8 clk, setup, pwr; 932 u8 clk, setup, pwr;
933 933
934 DBGF("clock %uHz busmode %u powermode %u cs %u Vdd %u width %u\n",
935 ios->clock, ios->bus_mode, ios->power_mode, ios->chip_select,
936 ios->vdd, ios->bus_width);
937
938 spin_lock_bh(&host->lock); 934 spin_lock_bh(&host->lock);
939 935
940 /* 936 /*
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 1363083b4d83..14dbad14afb6 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -52,6 +52,7 @@
52#include <linux/mii.h> 52#include <linux/mii.h>
53#include <linux/skbuff.h> 53#include <linux/skbuff.h>
54#include <linux/delay.h> 54#include <linux/delay.h>
55#include <linux/crc32.h>
55#include <asm/mipsregs.h> 56#include <asm/mipsregs.h>
56#include <asm/irq.h> 57#include <asm/irq.h>
57#include <asm/io.h> 58#include <asm/io.h>
@@ -2070,23 +2071,6 @@ static void au1000_tx_timeout(struct net_device *dev)
2070 netif_wake_queue(dev); 2071 netif_wake_queue(dev);
2071} 2072}
2072 2073
2073
2074static unsigned const ethernet_polynomial = 0x04c11db7U;
2075static inline u32 ether_crc(int length, unsigned char *data)
2076{
2077 int crc = -1;
2078
2079 while(--length >= 0) {
2080 unsigned char current_octet = *data++;
2081 int bit;
2082 for (bit = 0; bit < 8; bit++, current_octet >>= 1)
2083 crc = (crc << 1) ^
2084 ((crc < 0) ^ (current_octet & 1) ?
2085 ethernet_polynomial : 0);
2086 }
2087 return crc;
2088}
2089
2090static void set_rx_mode(struct net_device *dev) 2074static void set_rx_mode(struct net_device *dev)
2091{ 2075{
2092 struct au1000_private *aup = (struct au1000_private *) dev->priv; 2076 struct au1000_private *aup = (struct au1000_private *) dev->priv;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 1f3627470c95..1ddefd281213 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -765,7 +765,7 @@ rio_free_tx (struct net_device *dev, int irq)
765 break; 765 break;
766 skb = np->tx_skbuff[entry]; 766 skb = np->tx_skbuff[entry];
767 pci_unmap_single (np->pdev, 767 pci_unmap_single (np->pdev,
768 np->tx_ring[entry].fraginfo & 0xffffffffffff, 768 np->tx_ring[entry].fraginfo & DMA_48BIT_MASK,
769 skb->len, PCI_DMA_TODEVICE); 769 skb->len, PCI_DMA_TODEVICE);
770 if (irq) 770 if (irq)
771 dev_kfree_skb_irq (skb); 771 dev_kfree_skb_irq (skb);
@@ -893,7 +893,7 @@ receive_packet (struct net_device *dev)
893 /* Small skbuffs for short packets */ 893 /* Small skbuffs for short packets */
894 if (pkt_len > copy_thresh) { 894 if (pkt_len > copy_thresh) {
895 pci_unmap_single (np->pdev, 895 pci_unmap_single (np->pdev,
896 desc->fraginfo & 0xffffffffffff, 896 desc->fraginfo & DMA_48BIT_MASK,
897 np->rx_buf_sz, 897 np->rx_buf_sz,
898 PCI_DMA_FROMDEVICE); 898 PCI_DMA_FROMDEVICE);
899 skb_put (skb = np->rx_skbuff[entry], pkt_len); 899 skb_put (skb = np->rx_skbuff[entry], pkt_len);
@@ -901,7 +901,7 @@ receive_packet (struct net_device *dev)
901 } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) { 901 } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) {
902 pci_dma_sync_single_for_cpu(np->pdev, 902 pci_dma_sync_single_for_cpu(np->pdev,
903 desc->fraginfo & 903 desc->fraginfo &
904 0xffffffffffff, 904 DMA_48BIT_MASK,
905 np->rx_buf_sz, 905 np->rx_buf_sz,
906 PCI_DMA_FROMDEVICE); 906 PCI_DMA_FROMDEVICE);
907 skb->dev = dev; 907 skb->dev = dev;
@@ -913,7 +913,7 @@ receive_packet (struct net_device *dev)
913 skb_put (skb, pkt_len); 913 skb_put (skb, pkt_len);
914 pci_dma_sync_single_for_device(np->pdev, 914 pci_dma_sync_single_for_device(np->pdev,
915 desc->fraginfo & 915 desc->fraginfo &
916 0xffffffffffff, 916 DMA_48BIT_MASK,
917 np->rx_buf_sz, 917 np->rx_buf_sz,
918 PCI_DMA_FROMDEVICE); 918 PCI_DMA_FROMDEVICE);
919 } 919 }
@@ -1800,7 +1800,7 @@ rio_close (struct net_device *dev)
1800 skb = np->rx_skbuff[i]; 1800 skb = np->rx_skbuff[i];
1801 if (skb) { 1801 if (skb) {
1802 pci_unmap_single(np->pdev, 1802 pci_unmap_single(np->pdev,
1803 np->rx_ring[i].fraginfo & 0xffffffffffff, 1803 np->rx_ring[i].fraginfo & DMA_48BIT_MASK,
1804 skb->len, PCI_DMA_FROMDEVICE); 1804 skb->len, PCI_DMA_FROMDEVICE);
1805 dev_kfree_skb (skb); 1805 dev_kfree_skb (skb);
1806 np->rx_skbuff[i] = NULL; 1806 np->rx_skbuff[i] = NULL;
@@ -1810,7 +1810,7 @@ rio_close (struct net_device *dev)
1810 skb = np->tx_skbuff[i]; 1810 skb = np->tx_skbuff[i];
1811 if (skb) { 1811 if (skb) {
1812 pci_unmap_single(np->pdev, 1812 pci_unmap_single(np->pdev,
1813 np->tx_ring[i].fraginfo & 0xffffffffffff, 1813 np->tx_ring[i].fraginfo & DMA_48BIT_MASK,
1814 skb->len, PCI_DMA_TODEVICE); 1814 skb->len, PCI_DMA_TODEVICE);
1815 dev_kfree_skb (skb); 1815 dev_kfree_skb (skb);
1816 np->tx_skbuff[i] = NULL; 1816 np->tx_skbuff[i] = NULL;
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 9788b1ef2e7d..f7235c9bc421 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -106,6 +106,7 @@
106 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. 106 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
107 * 0.52: 20 Jan 2006: Add MSI/MSIX support. 107 * 0.52: 20 Jan 2006: Add MSI/MSIX support.
108 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. 108 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
109 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
109 * 110 *
110 * Known bugs: 111 * Known bugs:
111 * We suspect that on some hardware no TX done interrupts are generated. 112 * We suspect that on some hardware no TX done interrupts are generated.
@@ -117,7 +118,7 @@
117 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 118 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
118 * superfluous timer interrupts from the nic. 119 * superfluous timer interrupts from the nic.
119 */ 120 */
120#define FORCEDETH_VERSION "0.53" 121#define FORCEDETH_VERSION "0.54"
121#define DRV_NAME "forcedeth" 122#define DRV_NAME "forcedeth"
122 123
123#include <linux/module.h> 124#include <linux/module.h>
@@ -710,6 +711,72 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
710 } 711 }
711} 712}
712 713
714static int using_multi_irqs(struct net_device *dev)
715{
716 struct fe_priv *np = get_nvpriv(dev);
717
718 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
719 ((np->msi_flags & NV_MSI_X_ENABLED) &&
720 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
721 return 0;
722 else
723 return 1;
724}
725
726static void nv_enable_irq(struct net_device *dev)
727{
728 struct fe_priv *np = get_nvpriv(dev);
729
730 if (!using_multi_irqs(dev)) {
731 if (np->msi_flags & NV_MSI_X_ENABLED)
732 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
733 else
734 enable_irq(dev->irq);
735 } else {
736 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
737 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
738 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
739 }
740}
741
742static void nv_disable_irq(struct net_device *dev)
743{
744 struct fe_priv *np = get_nvpriv(dev);
745
746 if (!using_multi_irqs(dev)) {
747 if (np->msi_flags & NV_MSI_X_ENABLED)
748 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
749 else
750 disable_irq(dev->irq);
751 } else {
752 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
753 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
754 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
755 }
756}
757
758/* In MSIX mode, a write to irqmask behaves as XOR */
759static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
760{
761 u8 __iomem *base = get_hwbase(dev);
762
763 writel(mask, base + NvRegIrqMask);
764}
765
766static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
767{
768 struct fe_priv *np = get_nvpriv(dev);
769 u8 __iomem *base = get_hwbase(dev);
770
771 if (np->msi_flags & NV_MSI_X_ENABLED) {
772 writel(mask, base + NvRegIrqMask);
773 } else {
774 if (np->msi_flags & NV_MSI_ENABLED)
775 writel(0, base + NvRegMSIIrqMask);
776 writel(0, base + NvRegIrqMask);
777 }
778}
779
713#define MII_READ (-1) 780#define MII_READ (-1)
714/* mii_rw: read/write a register on the PHY. 781/* mii_rw: read/write a register on the PHY.
715 * 782 *
@@ -1019,24 +1086,25 @@ static void nv_do_rx_refill(unsigned long data)
1019 struct net_device *dev = (struct net_device *) data; 1086 struct net_device *dev = (struct net_device *) data;
1020 struct fe_priv *np = netdev_priv(dev); 1087 struct fe_priv *np = netdev_priv(dev);
1021 1088
1022 1089 if (!using_multi_irqs(dev)) {
1023 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1090 if (np->msi_flags & NV_MSI_X_ENABLED)
1024 ((np->msi_flags & NV_MSI_X_ENABLED) && 1091 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1025 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { 1092 else
1026 disable_irq(dev->irq); 1093 disable_irq(dev->irq);
1027 } else { 1094 } else {
1028 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1095 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1029 } 1096 }
1030 if (nv_alloc_rx(dev)) { 1097 if (nv_alloc_rx(dev)) {
1031 spin_lock(&np->lock); 1098 spin_lock_irq(&np->lock);
1032 if (!np->in_shutdown) 1099 if (!np->in_shutdown)
1033 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 1100 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1034 spin_unlock(&np->lock); 1101 spin_unlock_irq(&np->lock);
1035 } 1102 }
1036 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1103 if (!using_multi_irqs(dev)) {
1037 ((np->msi_flags & NV_MSI_X_ENABLED) && 1104 if (np->msi_flags & NV_MSI_X_ENABLED)
1038 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { 1105 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1039 enable_irq(dev->irq); 1106 else
1107 enable_irq(dev->irq);
1040 } else { 1108 } else {
1041 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1109 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1042 } 1110 }
@@ -1668,15 +1736,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
1668 * guessed, there is probably a simpler approach. 1736 * guessed, there is probably a simpler approach.
1669 * Changing the MTU is a rare event, it shouldn't matter. 1737 * Changing the MTU is a rare event, it shouldn't matter.
1670 */ 1738 */
1671 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1739 nv_disable_irq(dev);
1672 ((np->msi_flags & NV_MSI_X_ENABLED) &&
1673 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
1674 disable_irq(dev->irq);
1675 } else {
1676 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1677 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1678 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1679 }
1680 spin_lock_bh(&dev->xmit_lock); 1740 spin_lock_bh(&dev->xmit_lock);
1681 spin_lock(&np->lock); 1741 spin_lock(&np->lock);
1682 /* stop engines */ 1742 /* stop engines */
@@ -1709,15 +1769,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
1709 nv_start_tx(dev); 1769 nv_start_tx(dev);
1710 spin_unlock(&np->lock); 1770 spin_unlock(&np->lock);
1711 spin_unlock_bh(&dev->xmit_lock); 1771 spin_unlock_bh(&dev->xmit_lock);
1712 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1772 nv_enable_irq(dev);
1713 ((np->msi_flags & NV_MSI_X_ENABLED) &&
1714 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
1715 enable_irq(dev->irq);
1716 } else {
1717 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1718 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1719 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1720 }
1721 } 1773 }
1722 return 0; 1774 return 0;
1723} 1775}
@@ -2108,16 +2160,16 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
2108 if (!(events & np->irqmask)) 2160 if (!(events & np->irqmask))
2109 break; 2161 break;
2110 2162
2111 spin_lock(&np->lock); 2163 spin_lock_irq(&np->lock);
2112 nv_tx_done(dev); 2164 nv_tx_done(dev);
2113 spin_unlock(&np->lock); 2165 spin_unlock_irq(&np->lock);
2114 2166
2115 if (events & (NVREG_IRQ_TX_ERR)) { 2167 if (events & (NVREG_IRQ_TX_ERR)) {
2116 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 2168 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2117 dev->name, events); 2169 dev->name, events);
2118 } 2170 }
2119 if (i > max_interrupt_work) { 2171 if (i > max_interrupt_work) {
2120 spin_lock(&np->lock); 2172 spin_lock_irq(&np->lock);
2121 /* disable interrupts on the nic */ 2173 /* disable interrupts on the nic */
2122 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); 2174 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
2123 pci_push(base); 2175 pci_push(base);
@@ -2127,7 +2179,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
2127 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2179 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2128 } 2180 }
2129 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); 2181 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
2130 spin_unlock(&np->lock); 2182 spin_unlock_irq(&np->lock);
2131 break; 2183 break;
2132 } 2184 }
2133 2185
@@ -2157,14 +2209,14 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2157 2209
2158 nv_rx_process(dev); 2210 nv_rx_process(dev);
2159 if (nv_alloc_rx(dev)) { 2211 if (nv_alloc_rx(dev)) {
2160 spin_lock(&np->lock); 2212 spin_lock_irq(&np->lock);
2161 if (!np->in_shutdown) 2213 if (!np->in_shutdown)
2162 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2214 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2163 spin_unlock(&np->lock); 2215 spin_unlock_irq(&np->lock);
2164 } 2216 }
2165 2217
2166 if (i > max_interrupt_work) { 2218 if (i > max_interrupt_work) {
2167 spin_lock(&np->lock); 2219 spin_lock_irq(&np->lock);
2168 /* disable interrupts on the nic */ 2220 /* disable interrupts on the nic */
2169 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 2221 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2170 pci_push(base); 2222 pci_push(base);
@@ -2174,7 +2226,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2174 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2226 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2175 } 2227 }
2176 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); 2228 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
2177 spin_unlock(&np->lock); 2229 spin_unlock_irq(&np->lock);
2178 break; 2230 break;
2179 } 2231 }
2180 2232
@@ -2203,14 +2255,14 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
2203 break; 2255 break;
2204 2256
2205 if (events & NVREG_IRQ_LINK) { 2257 if (events & NVREG_IRQ_LINK) {
2206 spin_lock(&np->lock); 2258 spin_lock_irq(&np->lock);
2207 nv_link_irq(dev); 2259 nv_link_irq(dev);
2208 spin_unlock(&np->lock); 2260 spin_unlock_irq(&np->lock);
2209 } 2261 }
2210 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 2262 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
2211 spin_lock(&np->lock); 2263 spin_lock_irq(&np->lock);
2212 nv_linkchange(dev); 2264 nv_linkchange(dev);
2213 spin_unlock(&np->lock); 2265 spin_unlock_irq(&np->lock);
2214 np->link_timeout = jiffies + LINK_TIMEOUT; 2266 np->link_timeout = jiffies + LINK_TIMEOUT;
2215 } 2267 }
2216 if (events & (NVREG_IRQ_UNKNOWN)) { 2268 if (events & (NVREG_IRQ_UNKNOWN)) {
@@ -2218,7 +2270,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
2218 dev->name, events); 2270 dev->name, events);
2219 } 2271 }
2220 if (i > max_interrupt_work) { 2272 if (i > max_interrupt_work) {
2221 spin_lock(&np->lock); 2273 spin_lock_irq(&np->lock);
2222 /* disable interrupts on the nic */ 2274 /* disable interrupts on the nic */
2223 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 2275 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
2224 pci_push(base); 2276 pci_push(base);
@@ -2228,7 +2280,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
2228 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2280 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2229 } 2281 }
2230 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); 2282 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
2231 spin_unlock(&np->lock); 2283 spin_unlock_irq(&np->lock);
2232 break; 2284 break;
2233 } 2285 }
2234 2286
@@ -2251,10 +2303,11 @@ static void nv_do_nic_poll(unsigned long data)
2251 * nv_nic_irq because that may decide to do otherwise 2303 * nv_nic_irq because that may decide to do otherwise
2252 */ 2304 */
2253 2305
2254 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 2306 if (!using_multi_irqs(dev)) {
2255 ((np->msi_flags & NV_MSI_X_ENABLED) && 2307 if (np->msi_flags & NV_MSI_X_ENABLED)
2256 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { 2308 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
2257 disable_irq(dev->irq); 2309 else
2310 disable_irq(dev->irq);
2258 mask = np->irqmask; 2311 mask = np->irqmask;
2259 } else { 2312 } else {
2260 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 2313 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
@@ -2277,11 +2330,12 @@ static void nv_do_nic_poll(unsigned long data)
2277 writel(mask, base + NvRegIrqMask); 2330 writel(mask, base + NvRegIrqMask);
2278 pci_push(base); 2331 pci_push(base);
2279 2332
2280 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 2333 if (!using_multi_irqs(dev)) {
2281 ((np->msi_flags & NV_MSI_X_ENABLED) &&
2282 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
2283 nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); 2334 nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
2284 enable_irq(dev->irq); 2335 if (np->msi_flags & NV_MSI_X_ENABLED)
2336 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
2337 else
2338 enable_irq(dev->irq);
2285 } else { 2339 } else {
2286 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 2340 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
2287 nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL); 2341 nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL);
@@ -2628,6 +2682,113 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
2628 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); 2682 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
2629} 2683}
2630 2684
2685static int nv_request_irq(struct net_device *dev)
2686{
2687 struct fe_priv *np = get_nvpriv(dev);
2688 u8 __iomem *base = get_hwbase(dev);
2689 int ret = 1;
2690 int i;
2691
2692 if (np->msi_flags & NV_MSI_X_CAPABLE) {
2693 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
2694 np->msi_x_entry[i].entry = i;
2695 }
2696 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
2697 np->msi_flags |= NV_MSI_X_ENABLED;
2698 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
2699 /* Request irq for rx handling */
2700 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
2701 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
2702 pci_disable_msix(np->pci_dev);
2703 np->msi_flags &= ~NV_MSI_X_ENABLED;
2704 goto out_err;
2705 }
2706 /* Request irq for tx handling */
2707 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
2708 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
2709 pci_disable_msix(np->pci_dev);
2710 np->msi_flags &= ~NV_MSI_X_ENABLED;
2711 goto out_free_rx;
2712 }
2713 /* Request irq for link and timer handling */
2714 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
2715 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
2716 pci_disable_msix(np->pci_dev);
2717 np->msi_flags &= ~NV_MSI_X_ENABLED;
2718 goto out_free_tx;
2719 }
2720 /* map interrupts to their respective vector */
2721 writel(0, base + NvRegMSIXMap0);
2722 writel(0, base + NvRegMSIXMap1);
2723 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
2724 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
2725 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
2726 } else {
2727 /* Request irq for all interrupts */
2728 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
2729 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2730 pci_disable_msix(np->pci_dev);
2731 np->msi_flags &= ~NV_MSI_X_ENABLED;
2732 goto out_err;
2733 }
2734
2735 /* map interrupts to vector 0 */
2736 writel(0, base + NvRegMSIXMap0);
2737 writel(0, base + NvRegMSIXMap1);
2738 }
2739 }
2740 }
2741 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
2742 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
2743 np->msi_flags |= NV_MSI_ENABLED;
2744 if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
2745 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2746 pci_disable_msi(np->pci_dev);
2747 np->msi_flags &= ~NV_MSI_ENABLED;
2748 goto out_err;
2749 }
2750
2751 /* map interrupts to vector 0 */
2752 writel(0, base + NvRegMSIMap0);
2753 writel(0, base + NvRegMSIMap1);
2754 /* enable msi vector 0 */
2755 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
2756 }
2757 }
2758 if (ret != 0) {
2759 if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0)
2760 goto out_err;
2761 }
2762
2763 return 0;
2764out_free_tx:
2765 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
2766out_free_rx:
2767 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
2768out_err:
2769 return 1;
2770}
2771
2772static void nv_free_irq(struct net_device *dev)
2773{
2774 struct fe_priv *np = get_nvpriv(dev);
2775 int i;
2776
2777 if (np->msi_flags & NV_MSI_X_ENABLED) {
2778 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
2779 free_irq(np->msi_x_entry[i].vector, dev);
2780 }
2781 pci_disable_msix(np->pci_dev);
2782 np->msi_flags &= ~NV_MSI_X_ENABLED;
2783 } else {
2784 free_irq(np->pci_dev->irq, dev);
2785 if (np->msi_flags & NV_MSI_ENABLED) {
2786 pci_disable_msi(np->pci_dev);
2787 np->msi_flags &= ~NV_MSI_ENABLED;
2788 }
2789 }
2790}
2791
2631static int nv_open(struct net_device *dev) 2792static int nv_open(struct net_device *dev)
2632{ 2793{
2633 struct fe_priv *np = netdev_priv(dev); 2794 struct fe_priv *np = netdev_priv(dev);
@@ -2720,12 +2881,16 @@ static int nv_open(struct net_device *dev)
2720 udelay(10); 2881 udelay(10);
2721 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); 2882 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
2722 2883
2723 writel(0, base + NvRegIrqMask); 2884 nv_disable_hw_interrupts(dev, np->irqmask);
2724 pci_push(base); 2885 pci_push(base);
2725 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); 2886 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
2726 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 2887 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
2727 pci_push(base); 2888 pci_push(base);
2728 2889
2890 if (nv_request_irq(dev)) {
2891 goto out_drain;
2892 }
2893
2729 if (np->msi_flags & NV_MSI_X_CAPABLE) { 2894 if (np->msi_flags & NV_MSI_X_CAPABLE) {
2730 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 2895 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
2731 np->msi_x_entry[i].entry = i; 2896 np->msi_x_entry[i].entry = i;
@@ -2799,7 +2964,7 @@ static int nv_open(struct net_device *dev)
2799 } 2964 }
2800 2965
2801 /* ask for interrupts */ 2966 /* ask for interrupts */
2802 writel(np->irqmask, base + NvRegIrqMask); 2967 nv_enable_hw_interrupts(dev, np->irqmask);
2803 2968
2804 spin_lock_irq(&np->lock); 2969 spin_lock_irq(&np->lock);
2805 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 2970 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
@@ -2843,7 +3008,6 @@ static int nv_close(struct net_device *dev)
2843{ 3008{
2844 struct fe_priv *np = netdev_priv(dev); 3009 struct fe_priv *np = netdev_priv(dev);
2845 u8 __iomem *base; 3010 u8 __iomem *base;
2846 int i;
2847 3011
2848 spin_lock_irq(&np->lock); 3012 spin_lock_irq(&np->lock);
2849 np->in_shutdown = 1; 3013 np->in_shutdown = 1;
@@ -2861,31 +3025,13 @@ static int nv_close(struct net_device *dev)
2861 3025
2862 /* disable interrupts on the nic or we will lock up */ 3026 /* disable interrupts on the nic or we will lock up */
2863 base = get_hwbase(dev); 3027 base = get_hwbase(dev);
2864 if (np->msi_flags & NV_MSI_X_ENABLED) { 3028 nv_disable_hw_interrupts(dev, np->irqmask);
2865 writel(np->irqmask, base + NvRegIrqMask);
2866 } else {
2867 if (np->msi_flags & NV_MSI_ENABLED)
2868 writel(0, base + NvRegMSIIrqMask);
2869 writel(0, base + NvRegIrqMask);
2870 }
2871 pci_push(base); 3029 pci_push(base);
2872 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); 3030 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
2873 3031
2874 spin_unlock_irq(&np->lock); 3032 spin_unlock_irq(&np->lock);
2875 3033
2876 if (np->msi_flags & NV_MSI_X_ENABLED) { 3034 nv_free_irq(dev);
2877 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
2878 free_irq(np->msi_x_entry[i].vector, dev);
2879 }
2880 pci_disable_msix(np->pci_dev);
2881 np->msi_flags &= ~NV_MSI_X_ENABLED;
2882 } else {
2883 free_irq(np->pci_dev->irq, dev);
2884 if (np->msi_flags & NV_MSI_ENABLED) {
2885 pci_disable_msi(np->pci_dev);
2886 np->msi_flags &= ~NV_MSI_ENABLED;
2887 }
2888 }
2889 3035
2890 drain_ring(dev); 3036 drain_ring(dev);
2891 3037
@@ -2974,20 +3120,18 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2974 if (id->driver_data & DEV_HAS_HIGH_DMA) { 3120 if (id->driver_data & DEV_HAS_HIGH_DMA) {
2975 /* packet format 3: supports 40-bit addressing */ 3121 /* packet format 3: supports 40-bit addressing */
2976 np->desc_ver = DESC_VER_3; 3122 np->desc_ver = DESC_VER_3;
3123 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
2977 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { 3124 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
2978 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", 3125 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
2979 pci_name(pci_dev)); 3126 pci_name(pci_dev));
2980 } else { 3127 } else {
2981 if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) { 3128 dev->features |= NETIF_F_HIGHDMA;
2982 printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n", 3129 printk(KERN_INFO "forcedeth: using HIGHDMA\n");
2983 pci_name(pci_dev)); 3130 }
2984 goto out_relreg; 3131 if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
2985 } else { 3132 printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
2986 dev->features |= NETIF_F_HIGHDMA; 3133 pci_name(pci_dev));
2987 printk(KERN_INFO "forcedeth: using HIGHDMA\n");
2988 }
2989 } 3134 }
2990 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
2991 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 3135 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
2992 /* packet format 2: supports jumbo frames */ 3136 /* packet format 2: supports jumbo frames */
2993 np->desc_ver = DESC_VER_2; 3137 np->desc_ver = DESC_VER_2;
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index 79a8fbcf5f93..0d5fccc984bb 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -582,7 +582,6 @@ static int __init setup_adapter(int card_base, int type, int n)
582 INIT_WORK(&priv->rx_work, rx_bh, priv); 582 INIT_WORK(&priv->rx_work, rx_bh, priv);
583 dev->priv = priv; 583 dev->priv = priv;
584 sprintf(dev->name, "dmascc%i", 2 * n + i); 584 sprintf(dev->name, "dmascc%i", 2 * n + i);
585 SET_MODULE_OWNER(dev);
586 dev->base_addr = card_base; 585 dev->base_addr = card_base;
587 dev->irq = irq; 586 dev->irq = irq;
588 dev->open = scc_open; 587 dev->open = scc_open;
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index 6ace0e914fd1..5927784df3f9 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1550,7 +1550,6 @@ static unsigned char ax25_nocall[AX25_ADDR_LEN] =
1550 1550
1551static void scc_net_setup(struct net_device *dev) 1551static void scc_net_setup(struct net_device *dev)
1552{ 1552{
1553 SET_MODULE_OWNER(dev);
1554 dev->tx_queue_len = 16; /* should be enough... */ 1553 dev->tx_queue_len = 16; /* should be enough... */
1555 1554
1556 dev->open = scc_net_open; 1555 dev->open = scc_net_open;
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index fe22479eb202..b49884048caa 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1098,7 +1098,6 @@ static void yam_setup(struct net_device *dev)
1098 1098
1099 dev->base_addr = yp->iobase; 1099 dev->base_addr = yp->iobase;
1100 dev->irq = yp->irq; 1100 dev->irq = yp->irq;
1101 SET_MODULE_OWNER(dev);
1102 1101
1103 dev->open = yam_open; 1102 dev->open = yam_open;
1104 dev->stop = yam_close; 1103 dev->stop = yam_close;
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index 27ab75f20799..c1ce2398efea 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -46,4 +46,4 @@ obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o
46obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o 46obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o
47 47
48# The SIR helper module 48# The SIR helper module
49sir-dev-objs := sir_dev.o sir_dongle.o sir_kthread.o 49sir-dev-objs := sir_dev.o sir_dongle.o
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 96bdb73c2283..cd87593e4e8a 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1778,7 +1778,7 @@ static int irda_usb_probe(struct usb_interface *intf,
1778 1778
1779 if (self->needspatch) { 1779 if (self->needspatch) {
1780 ret = usb_control_msg (self->usbdev, usb_sndctrlpipe (self->usbdev, 0), 1780 ret = usb_control_msg (self->usbdev, usb_sndctrlpipe (self->usbdev, 0),
1781 0x02, 0x40, 0, 0, 0, 0, msecs_to_jiffies(500)); 1781 0x02, 0x40, 0, 0, NULL, 0, 500);
1782 if (ret < 0) { 1782 if (ret < 0) {
1783 IRDA_DEBUG (0, "usb_control_msg failed %d\n", ret); 1783 IRDA_DEBUG (0, "usb_control_msg failed %d\n", ret);
1784 goto err_out_3; 1784 goto err_out_3;
diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h
index f69fb4cec76f..9fa294a546d6 100644
--- a/drivers/net/irda/sir-dev.h
+++ b/drivers/net/irda/sir-dev.h
@@ -15,23 +15,14 @@
15#define IRDA_SIR_H 15#define IRDA_SIR_H
16 16
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <linux/workqueue.h>
18 19
19#include <net/irda/irda.h> 20#include <net/irda/irda.h>
20#include <net/irda/irda_device.h> // iobuff_t 21#include <net/irda/irda_device.h> // iobuff_t
21 22
22/* FIXME: unify irda_request with sir_fsm! */
23
24struct irda_request {
25 struct list_head lh_request;
26 unsigned long pending;
27 void (*func)(void *);
28 void *data;
29 struct timer_list timer;
30};
31
32struct sir_fsm { 23struct sir_fsm {
33 struct semaphore sem; 24 struct semaphore sem;
34 struct irda_request rq; 25 struct work_struct work;
35 unsigned state, substate; 26 unsigned state, substate;
36 int param; 27 int param;
37 int result; 28 int result;
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index ea7c9464d46a..3b5854d10c17 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -23,6 +23,298 @@
23 23
24#include "sir-dev.h" 24#include "sir-dev.h"
25 25
26
27static struct workqueue_struct *irda_sir_wq;
28
29/* STATE MACHINE */
30
31/* substate handler of the config-fsm to handle the cases where we want
32 * to wait for transmit completion before changing the port configuration
33 */
34
35static int sirdev_tx_complete_fsm(struct sir_dev *dev)
36{
37 struct sir_fsm *fsm = &dev->fsm;
38 unsigned next_state, delay;
39 unsigned bytes_left;
40
41 do {
42 next_state = fsm->substate; /* default: stay in current substate */
43 delay = 0;
44
45 switch(fsm->substate) {
46
47 case SIRDEV_STATE_WAIT_XMIT:
48 if (dev->drv->chars_in_buffer)
49 bytes_left = dev->drv->chars_in_buffer(dev);
50 else
51 bytes_left = 0;
52 if (!bytes_left) {
53 next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
54 break;
55 }
56
57 if (dev->speed > 115200)
58 delay = (bytes_left*8*10000) / (dev->speed/100);
59 else if (dev->speed > 0)
60 delay = (bytes_left*10*10000) / (dev->speed/100);
61 else
62 delay = 0;
63 /* expected delay (usec) until remaining bytes are sent */
64 if (delay < 100) {
65 udelay(delay);
66 delay = 0;
67 break;
68 }
69 /* sleep some longer delay (msec) */
70 delay = (delay+999) / 1000;
71 break;
72
73 case SIRDEV_STATE_WAIT_UNTIL_SENT:
74 /* block until underlaying hardware buffer are empty */
75 if (dev->drv->wait_until_sent)
76 dev->drv->wait_until_sent(dev);
77 next_state = SIRDEV_STATE_TX_DONE;
78 break;
79
80 case SIRDEV_STATE_TX_DONE:
81 return 0;
82
83 default:
84 IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
85 return -EINVAL;
86 }
87 fsm->substate = next_state;
88 } while (delay == 0);
89 return delay;
90}
91
92/*
93 * Function sirdev_config_fsm
94 *
95 * State machine to handle the configuration of the device (and attached dongle, if any).
96 * This handler is scheduled for execution in kIrDAd context, so we can sleep.
97 * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too
98 * long. Instead, for longer delays we start a timer to reschedule us later.
99 * On entry, fsm->sem is always locked and the netdev xmit queue stopped.
100 * Both must be unlocked/restarted on completion - but only on final exit.
101 */
102
103static void sirdev_config_fsm(void *data)
104{
105 struct sir_dev *dev = data;
106 struct sir_fsm *fsm = &dev->fsm;
107 int next_state;
108 int ret = -1;
109 unsigned delay;
110
111 IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies);
112
113 do {
114 IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
115 __FUNCTION__, fsm->state, fsm->substate);
116
117 next_state = fsm->state;
118 delay = 0;
119
120 switch(fsm->state) {
121
122 case SIRDEV_STATE_DONGLE_OPEN:
123 if (dev->dongle_drv != NULL) {
124 ret = sirdev_put_dongle(dev);
125 if (ret) {
126 fsm->result = -EINVAL;
127 next_state = SIRDEV_STATE_ERROR;
128 break;
129 }
130 }
131
132 /* Initialize dongle */
133 ret = sirdev_get_dongle(dev, fsm->param);
134 if (ret) {
135 fsm->result = ret;
136 next_state = SIRDEV_STATE_ERROR;
137 break;
138 }
139
140 /* Dongles are powered through the modem control lines which
141 * were just set during open. Before resetting, let's wait for
142 * the power to stabilize. This is what some dongle drivers did
143 * in open before, while others didn't - should be safe anyway.
144 */
145
146 delay = 50;
147 fsm->substate = SIRDEV_STATE_DONGLE_RESET;
148 next_state = SIRDEV_STATE_DONGLE_RESET;
149
150 fsm->param = 9600;
151
152 break;
153
154 case SIRDEV_STATE_DONGLE_CLOSE:
155 /* shouldn't we just treat this as success=? */
156 if (dev->dongle_drv == NULL) {
157 fsm->result = -EINVAL;
158 next_state = SIRDEV_STATE_ERROR;
159 break;
160 }
161
162 ret = sirdev_put_dongle(dev);
163 if (ret) {
164 fsm->result = ret;
165 next_state = SIRDEV_STATE_ERROR;
166 break;
167 }
168 next_state = SIRDEV_STATE_DONE;
169 break;
170
171 case SIRDEV_STATE_SET_DTR_RTS:
172 ret = sirdev_set_dtr_rts(dev,
173 (fsm->param&0x02) ? TRUE : FALSE,
174 (fsm->param&0x01) ? TRUE : FALSE);
175 next_state = SIRDEV_STATE_DONE;
176 break;
177
178 case SIRDEV_STATE_SET_SPEED:
179 fsm->substate = SIRDEV_STATE_WAIT_XMIT;
180 next_state = SIRDEV_STATE_DONGLE_CHECK;
181 break;
182
183 case SIRDEV_STATE_DONGLE_CHECK:
184 ret = sirdev_tx_complete_fsm(dev);
185 if (ret < 0) {
186 fsm->result = ret;
187 next_state = SIRDEV_STATE_ERROR;
188 break;
189 }
190 if ((delay=ret) != 0)
191 break;
192
193 if (dev->dongle_drv) {
194 fsm->substate = SIRDEV_STATE_DONGLE_RESET;
195 next_state = SIRDEV_STATE_DONGLE_RESET;
196 }
197 else {
198 dev->speed = fsm->param;
199 next_state = SIRDEV_STATE_PORT_SPEED;
200 }
201 break;
202
203 case SIRDEV_STATE_DONGLE_RESET:
204 if (dev->dongle_drv->reset) {
205 ret = dev->dongle_drv->reset(dev);
206 if (ret < 0) {
207 fsm->result = ret;
208 next_state = SIRDEV_STATE_ERROR;
209 break;
210 }
211 }
212 else
213 ret = 0;
214 if ((delay=ret) == 0) {
215 /* set serial port according to dongle default speed */
216 if (dev->drv->set_speed)
217 dev->drv->set_speed(dev, dev->speed);
218 fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
219 next_state = SIRDEV_STATE_DONGLE_SPEED;
220 }
221 break;
222
223 case SIRDEV_STATE_DONGLE_SPEED:
224 if (dev->dongle_drv->reset) {
225 ret = dev->dongle_drv->set_speed(dev, fsm->param);
226 if (ret < 0) {
227 fsm->result = ret;
228 next_state = SIRDEV_STATE_ERROR;
229 break;
230 }
231 }
232 else
233 ret = 0;
234 if ((delay=ret) == 0)
235 next_state = SIRDEV_STATE_PORT_SPEED;
236 break;
237
238 case SIRDEV_STATE_PORT_SPEED:
239 /* Finally we are ready to change the serial port speed */
240 if (dev->drv->set_speed)
241 dev->drv->set_speed(dev, dev->speed);
242 dev->new_speed = 0;
243 next_state = SIRDEV_STATE_DONE;
244 break;
245
246 case SIRDEV_STATE_DONE:
247 /* Signal network layer so it can send more frames */
248 netif_wake_queue(dev->netdev);
249 next_state = SIRDEV_STATE_COMPLETE;
250 break;
251
252 default:
253 IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
254 fsm->result = -EINVAL;
255 /* fall thru */
256
257 case SIRDEV_STATE_ERROR:
258 IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result);
259
260#if 0 /* don't enable this before we have netdev->tx_timeout to recover */
261 netif_stop_queue(dev->netdev);
262#else
263 netif_wake_queue(dev->netdev);
264#endif
265 /* fall thru */
266
267 case SIRDEV_STATE_COMPLETE:
268 /* config change finished, so we are not busy any longer */
269 sirdev_enable_rx(dev);
270 up(&fsm->sem);
271 return;
272 }
273 fsm->state = next_state;
274 } while(!delay);
275
276 queue_delayed_work(irda_sir_wq, &fsm->work, msecs_to_jiffies(delay));
277}
278
279/* schedule some device configuration task for execution by kIrDAd
280 * on behalf of the above state machine.
281 * can be called from process or interrupt/tasklet context.
282 */
283
284int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
285{
286 struct sir_fsm *fsm = &dev->fsm;
287
288 IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param);
289
290 if (down_trylock(&fsm->sem)) {
291 if (in_interrupt() || in_atomic() || irqs_disabled()) {
292 IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__);
293 return -EWOULDBLOCK;
294 } else
295 down(&fsm->sem);
296 }
297
298 if (fsm->state == SIRDEV_STATE_DEAD) {
299 /* race with sirdev_close should never happen */
300 IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__);
301 up(&fsm->sem);
302 return -ESTALE; /* or better EPIPE? */
303 }
304
305 netif_stop_queue(dev->netdev);
306 atomic_set(&dev->enable_rx, 0);
307
308 fsm->state = initial_state;
309 fsm->param = param;
310 fsm->result = 0;
311
312 INIT_WORK(&fsm->work, sirdev_config_fsm, dev);
313 queue_work(irda_sir_wq, &fsm->work);
314 return 0;
315}
316
317
26/***************************************************************************/ 318/***************************************************************************/
27 319
28void sirdev_enable_rx(struct sir_dev *dev) 320void sirdev_enable_rx(struct sir_dev *dev)
@@ -619,10 +911,6 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
619 spin_lock_init(&dev->tx_lock); 911 spin_lock_init(&dev->tx_lock);
620 init_MUTEX(&dev->fsm.sem); 912 init_MUTEX(&dev->fsm.sem);
621 913
622 INIT_LIST_HEAD(&dev->fsm.rq.lh_request);
623 dev->fsm.rq.pending = 0;
624 init_timer(&dev->fsm.rq.timer);
625
626 dev->drv = drv; 914 dev->drv = drv;
627 dev->netdev = ndev; 915 dev->netdev = ndev;
628 916
@@ -682,3 +970,22 @@ int sirdev_put_instance(struct sir_dev *dev)
682} 970}
683EXPORT_SYMBOL(sirdev_put_instance); 971EXPORT_SYMBOL(sirdev_put_instance);
684 972
973static int __init sir_wq_init(void)
974{
975 irda_sir_wq = create_singlethread_workqueue("irda_sir_wq");
976 if (!irda_sir_wq)
977 return -ENOMEM;
978 return 0;
979}
980
981static void __exit sir_wq_exit(void)
982{
983 destroy_workqueue(irda_sir_wq);
984}
985
986module_init(sir_wq_init);
987module_exit(sir_wq_exit);
988
989MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
990MODULE_DESCRIPTION("IrDA SIR core");
991MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/sir_kthread.c b/drivers/net/irda/sir_kthread.c
deleted file mode 100644
index e3904d6bfecd..000000000000
--- a/drivers/net/irda/sir_kthread.c
+++ /dev/null
@@ -1,508 +0,0 @@
1/*********************************************************************
2 *
3 * sir_kthread.c: dedicated thread to process scheduled
4 * sir device setup requests
5 *
6 * Copyright (c) 2002 Martin Diehl
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
12 *
13 ********************************************************************/
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/version.h>
18#include <linux/init.h>
19#include <linux/smp_lock.h>
20#include <linux/completion.h>
21#include <linux/delay.h>
22
23#include <net/irda/irda.h>
24
25#include "sir-dev.h"
26
27/**************************************************************************
28 *
29 * kIrDAd kernel thread and config state machine
30 *
31 */
32
33struct irda_request_queue {
34 struct list_head request_list;
35 spinlock_t lock;
36 task_t *thread;
37 struct completion exit;
38 wait_queue_head_t kick, done;
39 atomic_t num_pending;
40};
41
42static struct irda_request_queue irda_rq_queue;
43
44static int irda_queue_request(struct irda_request *rq)
45{
46 int ret = 0;
47 unsigned long flags;
48
49 if (!test_and_set_bit(0, &rq->pending)) {
50 spin_lock_irqsave(&irda_rq_queue.lock, flags);
51 list_add_tail(&rq->lh_request, &irda_rq_queue.request_list);
52 wake_up(&irda_rq_queue.kick);
53 atomic_inc(&irda_rq_queue.num_pending);
54 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
55 ret = 1;
56 }
57 return ret;
58}
59
60static void irda_request_timer(unsigned long data)
61{
62 struct irda_request *rq = (struct irda_request *)data;
63 unsigned long flags;
64
65 spin_lock_irqsave(&irda_rq_queue.lock, flags);
66 list_add_tail(&rq->lh_request, &irda_rq_queue.request_list);
67 wake_up(&irda_rq_queue.kick);
68 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
69}
70
71static int irda_queue_delayed_request(struct irda_request *rq, unsigned long delay)
72{
73 int ret = 0;
74 struct timer_list *timer = &rq->timer;
75
76 if (!test_and_set_bit(0, &rq->pending)) {
77 timer->expires = jiffies + delay;
78 timer->function = irda_request_timer;
79 timer->data = (unsigned long)rq;
80 atomic_inc(&irda_rq_queue.num_pending);
81 add_timer(timer);
82 ret = 1;
83 }
84 return ret;
85}
86
87static void run_irda_queue(void)
88{
89 unsigned long flags;
90 struct list_head *entry, *tmp;
91 struct irda_request *rq;
92
93 spin_lock_irqsave(&irda_rq_queue.lock, flags);
94 list_for_each_safe(entry, tmp, &irda_rq_queue.request_list) {
95 rq = list_entry(entry, struct irda_request, lh_request);
96 list_del_init(entry);
97 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
98
99 clear_bit(0, &rq->pending);
100 rq->func(rq->data);
101
102 if (atomic_dec_and_test(&irda_rq_queue.num_pending))
103 wake_up(&irda_rq_queue.done);
104
105 spin_lock_irqsave(&irda_rq_queue.lock, flags);
106 }
107 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
108}
109
110static int irda_thread(void *startup)
111{
112 DECLARE_WAITQUEUE(wait, current);
113
114 daemonize("kIrDAd");
115
116 irda_rq_queue.thread = current;
117
118 complete((struct completion *)startup);
119
120 while (irda_rq_queue.thread != NULL) {
121
122 /* We use TASK_INTERRUPTIBLE, rather than
123 * TASK_UNINTERRUPTIBLE. Andrew Morton made this
124 * change ; he told me that it is safe, because "signal
125 * blocking is now handled in daemonize()", he added
126 * that the problem is that "uninterruptible sleep
127 * contributes to load average", making user worry.
128 * Jean II */
129 set_task_state(current, TASK_INTERRUPTIBLE);
130 add_wait_queue(&irda_rq_queue.kick, &wait);
131 if (list_empty(&irda_rq_queue.request_list))
132 schedule();
133 else
134 __set_task_state(current, TASK_RUNNING);
135 remove_wait_queue(&irda_rq_queue.kick, &wait);
136
137 /* make swsusp happy with our thread */
138 try_to_freeze();
139
140 run_irda_queue();
141 }
142
143#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,35)
144 reparent_to_init();
145#endif
146 complete_and_exit(&irda_rq_queue.exit, 0);
147 /* never reached */
148 return 0;
149}
150
151
152static void flush_irda_queue(void)
153{
154 if (atomic_read(&irda_rq_queue.num_pending)) {
155
156 DECLARE_WAITQUEUE(wait, current);
157
158 if (!list_empty(&irda_rq_queue.request_list))
159 run_irda_queue();
160
161 set_task_state(current, TASK_UNINTERRUPTIBLE);
162 add_wait_queue(&irda_rq_queue.done, &wait);
163 if (atomic_read(&irda_rq_queue.num_pending))
164 schedule();
165 else
166 __set_task_state(current, TASK_RUNNING);
167 remove_wait_queue(&irda_rq_queue.done, &wait);
168 }
169}
170
171/* substate handler of the config-fsm to handle the cases where we want
172 * to wait for transmit completion before changing the port configuration
173 */
174
175static int irda_tx_complete_fsm(struct sir_dev *dev)
176{
177 struct sir_fsm *fsm = &dev->fsm;
178 unsigned next_state, delay;
179 unsigned bytes_left;
180
181 do {
182 next_state = fsm->substate; /* default: stay in current substate */
183 delay = 0;
184
185 switch(fsm->substate) {
186
187 case SIRDEV_STATE_WAIT_XMIT:
188 if (dev->drv->chars_in_buffer)
189 bytes_left = dev->drv->chars_in_buffer(dev);
190 else
191 bytes_left = 0;
192 if (!bytes_left) {
193 next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
194 break;
195 }
196
197 if (dev->speed > 115200)
198 delay = (bytes_left*8*10000) / (dev->speed/100);
199 else if (dev->speed > 0)
200 delay = (bytes_left*10*10000) / (dev->speed/100);
201 else
202 delay = 0;
203 /* expected delay (usec) until remaining bytes are sent */
204 if (delay < 100) {
205 udelay(delay);
206 delay = 0;
207 break;
208 }
209 /* sleep some longer delay (msec) */
210 delay = (delay+999) / 1000;
211 break;
212
213 case SIRDEV_STATE_WAIT_UNTIL_SENT:
214 /* block until underlaying hardware buffer are empty */
215 if (dev->drv->wait_until_sent)
216 dev->drv->wait_until_sent(dev);
217 next_state = SIRDEV_STATE_TX_DONE;
218 break;
219
220 case SIRDEV_STATE_TX_DONE:
221 return 0;
222
223 default:
224 IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
225 return -EINVAL;
226 }
227 fsm->substate = next_state;
228 } while (delay == 0);
229 return delay;
230}
231
232/*
233 * Function irda_config_fsm
234 *
235 * State machine to handle the configuration of the device (and attached dongle, if any).
236 * This handler is scheduled for execution in kIrDAd context, so we can sleep.
237 * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too
238 * long. Instead, for longer delays we start a timer to reschedule us later.
239 * On entry, fsm->sem is always locked and the netdev xmit queue stopped.
240 * Both must be unlocked/restarted on completion - but only on final exit.
241 */
242
243static void irda_config_fsm(void *data)
244{
245 struct sir_dev *dev = data;
246 struct sir_fsm *fsm = &dev->fsm;
247 int next_state;
248 int ret = -1;
249 unsigned delay;
250
251 IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies);
252
253 do {
254 IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
255 __FUNCTION__, fsm->state, fsm->substate);
256
257 next_state = fsm->state;
258 delay = 0;
259
260 switch(fsm->state) {
261
262 case SIRDEV_STATE_DONGLE_OPEN:
263 if (dev->dongle_drv != NULL) {
264 ret = sirdev_put_dongle(dev);
265 if (ret) {
266 fsm->result = -EINVAL;
267 next_state = SIRDEV_STATE_ERROR;
268 break;
269 }
270 }
271
272 /* Initialize dongle */
273 ret = sirdev_get_dongle(dev, fsm->param);
274 if (ret) {
275 fsm->result = ret;
276 next_state = SIRDEV_STATE_ERROR;
277 break;
278 }
279
280 /* Dongles are powered through the modem control lines which
281 * were just set during open. Before resetting, let's wait for
282 * the power to stabilize. This is what some dongle drivers did
283 * in open before, while others didn't - should be safe anyway.
284 */
285
286 delay = 50;
287 fsm->substate = SIRDEV_STATE_DONGLE_RESET;
288 next_state = SIRDEV_STATE_DONGLE_RESET;
289
290 fsm->param = 9600;
291
292 break;
293
294 case SIRDEV_STATE_DONGLE_CLOSE:
295 /* shouldn't we just treat this as success=? */
296 if (dev->dongle_drv == NULL) {
297 fsm->result = -EINVAL;
298 next_state = SIRDEV_STATE_ERROR;
299 break;
300 }
301
302 ret = sirdev_put_dongle(dev);
303 if (ret) {
304 fsm->result = ret;
305 next_state = SIRDEV_STATE_ERROR;
306 break;
307 }
308 next_state = SIRDEV_STATE_DONE;
309 break;
310
311 case SIRDEV_STATE_SET_DTR_RTS:
312 ret = sirdev_set_dtr_rts(dev,
313 (fsm->param&0x02) ? TRUE : FALSE,
314 (fsm->param&0x01) ? TRUE : FALSE);
315 next_state = SIRDEV_STATE_DONE;
316 break;
317
318 case SIRDEV_STATE_SET_SPEED:
319 fsm->substate = SIRDEV_STATE_WAIT_XMIT;
320 next_state = SIRDEV_STATE_DONGLE_CHECK;
321 break;
322
323 case SIRDEV_STATE_DONGLE_CHECK:
324 ret = irda_tx_complete_fsm(dev);
325 if (ret < 0) {
326 fsm->result = ret;
327 next_state = SIRDEV_STATE_ERROR;
328 break;
329 }
330 if ((delay=ret) != 0)
331 break;
332
333 if (dev->dongle_drv) {
334 fsm->substate = SIRDEV_STATE_DONGLE_RESET;
335 next_state = SIRDEV_STATE_DONGLE_RESET;
336 }
337 else {
338 dev->speed = fsm->param;
339 next_state = SIRDEV_STATE_PORT_SPEED;
340 }
341 break;
342
343 case SIRDEV_STATE_DONGLE_RESET:
344 if (dev->dongle_drv->reset) {
345 ret = dev->dongle_drv->reset(dev);
346 if (ret < 0) {
347 fsm->result = ret;
348 next_state = SIRDEV_STATE_ERROR;
349 break;
350 }
351 }
352 else
353 ret = 0;
354 if ((delay=ret) == 0) {
355 /* set serial port according to dongle default speed */
356 if (dev->drv->set_speed)
357 dev->drv->set_speed(dev, dev->speed);
358 fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
359 next_state = SIRDEV_STATE_DONGLE_SPEED;
360 }
361 break;
362
363 case SIRDEV_STATE_DONGLE_SPEED:
364 if (dev->dongle_drv->reset) {
365 ret = dev->dongle_drv->set_speed(dev, fsm->param);
366 if (ret < 0) {
367 fsm->result = ret;
368 next_state = SIRDEV_STATE_ERROR;
369 break;
370 }
371 }
372 else
373 ret = 0;
374 if ((delay=ret) == 0)
375 next_state = SIRDEV_STATE_PORT_SPEED;
376 break;
377
378 case SIRDEV_STATE_PORT_SPEED:
379 /* Finally we are ready to change the serial port speed */
380 if (dev->drv->set_speed)
381 dev->drv->set_speed(dev, dev->speed);
382 dev->new_speed = 0;
383 next_state = SIRDEV_STATE_DONE;
384 break;
385
386 case SIRDEV_STATE_DONE:
387 /* Signal network layer so it can send more frames */
388 netif_wake_queue(dev->netdev);
389 next_state = SIRDEV_STATE_COMPLETE;
390 break;
391
392 default:
393 IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
394 fsm->result = -EINVAL;
395 /* fall thru */
396
397 case SIRDEV_STATE_ERROR:
398 IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result);
399
400#if 0 /* don't enable this before we have netdev->tx_timeout to recover */
401 netif_stop_queue(dev->netdev);
402#else
403 netif_wake_queue(dev->netdev);
404#endif
405 /* fall thru */
406
407 case SIRDEV_STATE_COMPLETE:
408 /* config change finished, so we are not busy any longer */
409 sirdev_enable_rx(dev);
410 up(&fsm->sem);
411 return;
412 }
413 fsm->state = next_state;
414 } while(!delay);
415
416 irda_queue_delayed_request(&fsm->rq, msecs_to_jiffies(delay));
417}
418
419/* schedule some device configuration task for execution by kIrDAd
420 * on behalf of the above state machine.
421 * can be called from process or interrupt/tasklet context.
422 */
423
424int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
425{
426 struct sir_fsm *fsm = &dev->fsm;
427 int xmit_was_down;
428
429 IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param);
430
431 if (down_trylock(&fsm->sem)) {
432 if (in_interrupt() || in_atomic() || irqs_disabled()) {
433 IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__);
434 return -EWOULDBLOCK;
435 } else
436 down(&fsm->sem);
437 }
438
439 if (fsm->state == SIRDEV_STATE_DEAD) {
440 /* race with sirdev_close should never happen */
441 IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__);
442 up(&fsm->sem);
443 return -ESTALE; /* or better EPIPE? */
444 }
445
446 xmit_was_down = netif_queue_stopped(dev->netdev);
447 netif_stop_queue(dev->netdev);
448 atomic_set(&dev->enable_rx, 0);
449
450 fsm->state = initial_state;
451 fsm->param = param;
452 fsm->result = 0;
453
454 INIT_LIST_HEAD(&fsm->rq.lh_request);
455 fsm->rq.pending = 0;
456 fsm->rq.func = irda_config_fsm;
457 fsm->rq.data = dev;
458
459 if (!irda_queue_request(&fsm->rq)) { /* returns 0 on error! */
460 atomic_set(&dev->enable_rx, 1);
461 if (!xmit_was_down)
462 netif_wake_queue(dev->netdev);
463 up(&fsm->sem);
464 return -EAGAIN;
465 }
466 return 0;
467}
468
469static int __init irda_thread_create(void)
470{
471 struct completion startup;
472 int pid;
473
474 spin_lock_init(&irda_rq_queue.lock);
475 irda_rq_queue.thread = NULL;
476 INIT_LIST_HEAD(&irda_rq_queue.request_list);
477 init_waitqueue_head(&irda_rq_queue.kick);
478 init_waitqueue_head(&irda_rq_queue.done);
479 atomic_set(&irda_rq_queue.num_pending, 0);
480
481 init_completion(&startup);
482 pid = kernel_thread(irda_thread, &startup, CLONE_FS|CLONE_FILES);
483 if (pid <= 0)
484 return -EAGAIN;
485 else
486 wait_for_completion(&startup);
487
488 return 0;
489}
490
491static void __exit irda_thread_join(void)
492{
493 if (irda_rq_queue.thread) {
494 flush_irda_queue();
495 init_completion(&irda_rq_queue.exit);
496 irda_rq_queue.thread = NULL;
497 wake_up(&irda_rq_queue.kick);
498 wait_for_completion(&irda_rq_queue.exit);
499 }
500}
501
502module_init(irda_thread_create);
503module_exit(irda_thread_join);
504
505MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
506MODULE_DESCRIPTION("IrDA SIR core");
507MODULE_LICENSE("GPL");
508
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 58f76cefbc83..a4674044bd6f 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -54,6 +54,7 @@
54#include <linux/rtnetlink.h> 54#include <linux/rtnetlink.h>
55#include <linux/serial_reg.h> 55#include <linux/serial_reg.h>
56#include <linux/dma-mapping.h> 56#include <linux/dma-mapping.h>
57#include <linux/pnp.h>
57#include <linux/platform_device.h> 58#include <linux/platform_device.h>
58 59
59#include <asm/io.h> 60#include <asm/io.h>
@@ -358,6 +359,16 @@ static inline void register_bank(int iobase, int bank)
358 iobase + IRCC_MASTER); 359 iobase + IRCC_MASTER);
359} 360}
360 361
362#ifdef CONFIG_PNP
363/* PNP hotplug support */
364static const struct pnp_device_id smsc_ircc_pnp_table[] = {
365 { .id = "SMCf010", .driver_data = 0 },
366 /* and presumably others */
367 { }
368};
369MODULE_DEVICE_TABLE(pnp, smsc_ircc_pnp_table);
370#endif
371
361 372
362/******************************************************************************* 373/*******************************************************************************
363 * 374 *
@@ -2072,7 +2083,8 @@ static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self)
2072 2083
2073/* PROBING 2084/* PROBING
2074 * 2085 *
2075 * 2086 * REVISIT we can be told about the device by PNP, and should use that info
2087 * instead of probing hardware and creating a platform_device ...
2076 */ 2088 */
2077 2089
2078static int __init smsc_ircc_look_for_chips(void) 2090static int __init smsc_ircc_look_for_chips(void)
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index ea62a3e7d586..411f4d809c47 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1419,6 +1419,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1419 mv643xx_eth_update_pscr(dev, &cmd); 1419 mv643xx_eth_update_pscr(dev, &cmd);
1420 mv643xx_set_settings(dev, &cmd); 1420 mv643xx_set_settings(dev, &cmd);
1421 1421
1422 SET_MODULE_OWNER(dev);
1423 SET_NETDEV_DEV(dev, &pdev->dev);
1422 err = register_netdev(dev); 1424 err = register_netdev(dev);
1423 if (err) 1425 if (err)
1424 goto out; 1426 goto out;
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index 93c494bcd18d..b32765215f75 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -139,8 +139,9 @@ bad_clone_list[] __initdata = {
139 139
140#if defined(CONFIG_PLAT_MAPPI) 140#if defined(CONFIG_PLAT_MAPPI)
141# define DCR_VAL 0x4b 141# define DCR_VAL 0x4b
142#elif defined(CONFIG_PLAT_OAKS32R) 142#elif defined(CONFIG_PLAT_OAKS32R) || \
143# define DCR_VAL 0x48 143 defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938)
144# define DCR_VAL 0x48 /* 8-bit mode */
144#else 145#else
145# define DCR_VAL 0x49 146# define DCR_VAL 0x49
146#endif 147#endif
@@ -396,10 +397,22 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
396 /* We must set the 8390 for word mode. */ 397 /* We must set the 8390 for word mode. */
397 outb_p(DCR_VAL, ioaddr + EN0_DCFG); 398 outb_p(DCR_VAL, ioaddr + EN0_DCFG);
398 start_page = NESM_START_PG; 399 start_page = NESM_START_PG;
399 stop_page = NESM_STOP_PG; 400
401 /*
402 * Realtek RTL8019AS datasheet says that the PSTOP register
403 * shouldn't exceed 0x60 in 8-bit mode.
404 * This chip can be identified by reading the signature from
405 * the remote byte count registers (otherwise write-only)...
406 */
407 if ((DCR_VAL & 0x01) == 0 && /* 8-bit mode */
408 inb(ioaddr + EN0_RCNTLO) == 0x50 &&
409 inb(ioaddr + EN0_RCNTHI) == 0x70)
410 stop_page = 0x60;
411 else
412 stop_page = NESM_STOP_PG;
400 } else { 413 } else {
401 start_page = NE1SM_START_PG; 414 start_page = NE1SM_START_PG;
402 stop_page = NE1SM_STOP_PG; 415 stop_page = NE1SM_STOP_PG;
403 } 416 }
404 417
405#if defined(CONFIG_PLAT_MAPPI) || defined(CONFIG_PLAT_OAKS32R) 418#if defined(CONFIG_PLAT_MAPPI) || defined(CONFIG_PLAT_OAKS32R)
@@ -509,15 +522,9 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
509 ei_status.name = name; 522 ei_status.name = name;
510 ei_status.tx_start_page = start_page; 523 ei_status.tx_start_page = start_page;
511 ei_status.stop_page = stop_page; 524 ei_status.stop_page = stop_page;
512#if defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938)
513 wordlength = 1;
514#endif
515 525
516#ifdef CONFIG_PLAT_OAKS32R 526 /* Use 16-bit mode only if this wasn't overridden by DCR_VAL */
517 ei_status.word16 = 0; 527 ei_status.word16 = (wordlength == 2 && (DCR_VAL & 0x01));
518#else
519 ei_status.word16 = (wordlength == 2);
520#endif
521 528
522 ei_status.rx_start_page = start_page + TX_PAGES; 529 ei_status.rx_start_page = start_page + TX_PAGES;
523#ifdef PACKETBUF_MEMSIZE 530#ifdef PACKETBUF_MEMSIZE
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 459443b572ce..1b236bdf6b92 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -60,8 +60,10 @@ int mdiobus_register(struct mii_bus *bus)
60 for (i = 0; i < PHY_MAX_ADDR; i++) { 60 for (i = 0; i < PHY_MAX_ADDR; i++) {
61 struct phy_device *phydev; 61 struct phy_device *phydev;
62 62
63 if (bus->phy_mask & (1 << i)) 63 if (bus->phy_mask & (1 << i)) {
64 bus->phy_map[i] = NULL;
64 continue; 65 continue;
66 }
65 67
66 phydev = get_phy_device(bus, i); 68 phydev = get_phy_device(bus, i);
67 69
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index b82191d2bee1..f5a3bf4d959a 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -127,6 +127,7 @@ static const struct mii_chip_info {
127} mii_chip_table[] = { 127} mii_chip_table[] = {
128 { "SiS 900 Internal MII PHY", 0x001d, 0x8000, LAN }, 128 { "SiS 900 Internal MII PHY", 0x001d, 0x8000, LAN },
129 { "SiS 7014 Physical Layer Solution", 0x0016, 0xf830, LAN }, 129 { "SiS 7014 Physical Layer Solution", 0x0016, 0xf830, LAN },
130 { "SiS 900 on Foxconn 661 7MI", 0x0143, 0xBC70, LAN },
130 { "Altimata AC101LF PHY", 0x0022, 0x5520, LAN }, 131 { "Altimata AC101LF PHY", 0x0022, 0x5520, LAN },
131 { "ADM 7001 LAN PHY", 0x002e, 0xcc60, LAN }, 132 { "ADM 7001 LAN PHY", 0x002e, 0xcc60, LAN },
132 { "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN }, 133 { "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN },
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 227df9876a2c..ffd267fab21d 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -51,7 +51,7 @@
51#include "sky2.h" 51#include "sky2.h"
52 52
53#define DRV_NAME "sky2" 53#define DRV_NAME "sky2"
54#define DRV_VERSION "1.2" 54#define DRV_VERSION "1.3"
55#define PFX DRV_NAME " " 55#define PFX DRV_NAME " "
56 56
57/* 57/*
@@ -79,6 +79,8 @@
79#define NAPI_WEIGHT 64 79#define NAPI_WEIGHT 64
80#define PHY_RETRIES 1000 80#define PHY_RETRIES 1000
81 81
82#define RING_NEXT(x,s) (((x)+1) & ((s)-1))
83
82static const u32 default_msg = 84static const u32 default_msg =
83 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK 85 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
84 | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR 86 | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
@@ -96,6 +98,10 @@ static int disable_msi = 0;
96module_param(disable_msi, int, 0); 98module_param(disable_msi, int, 0);
97MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); 99MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
98 100
101static int idle_timeout = 100;
102module_param(idle_timeout, int, 0);
103MODULE_PARM_DESC(idle_timeout, "Idle timeout workaround for lost interrupts (ms)");
104
99static const struct pci_device_id sky2_id_table[] = { 105static const struct pci_device_id sky2_id_table[] = {
100 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, 106 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
101 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, 107 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
@@ -122,6 +128,7 @@ MODULE_DEVICE_TABLE(pci, sky2_id_table);
122/* Avoid conditionals by using array */ 128/* Avoid conditionals by using array */
123static const unsigned txqaddr[] = { Q_XA1, Q_XA2 }; 129static const unsigned txqaddr[] = { Q_XA1, Q_XA2 };
124static const unsigned rxqaddr[] = { Q_R1, Q_R2 }; 130static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
131static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 };
125 132
126/* This driver supports yukon2 chipset only */ 133/* This driver supports yukon2 chipset only */
127static const char *yukon2_name[] = { 134static const char *yukon2_name[] = {
@@ -298,7 +305,8 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
298 struct sky2_port *sky2 = netdev_priv(hw->dev[port]); 305 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
299 u16 ctrl, ct1000, adv, pg, ledctrl, ledover; 306 u16 ctrl, ct1000, adv, pg, ledctrl, ledover;
300 307
301 if (sky2->autoneg == AUTONEG_ENABLE && hw->chip_id != CHIP_ID_YUKON_XL) { 308 if (sky2->autoneg == AUTONEG_ENABLE &&
309 (hw->chip_id != CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
302 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); 310 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
303 311
304 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | 312 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
@@ -326,7 +334,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
326 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO); 334 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
327 335
328 if (sky2->autoneg == AUTONEG_ENABLE && 336 if (sky2->autoneg == AUTONEG_ENABLE &&
329 hw->chip_id == CHIP_ID_YUKON_XL) { 337 (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
330 ctrl &= ~PHY_M_PC_DSC_MSK; 338 ctrl &= ~PHY_M_PC_DSC_MSK;
331 ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA; 339 ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
332 } 340 }
@@ -442,10 +450,11 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
442 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); 450 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
443 451
444 /* set LED Function Control register */ 452 /* set LED Function Control register */
445 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ 453 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
446 PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */ 454 (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
447 PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */ 455 PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */
448 PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */ 456 PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
457 PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */
449 458
450 /* set Polarity Control register */ 459 /* set Polarity Control register */
451 gm_phy_write(hw, port, PHY_MARV_PHY_STAT, 460 gm_phy_write(hw, port, PHY_MARV_PHY_STAT,
@@ -459,6 +468,25 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
459 /* restore page register */ 468 /* restore page register */
460 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); 469 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
461 break; 470 break;
471 case CHIP_ID_YUKON_EC_U:
472 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
473
474 /* select page 3 to access LED control register */
475 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
476
477 /* set LED Function Control register */
478 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
479 (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
480 PHY_M_LEDC_INIT_CTRL(8) | /* 10 Mbps */
481 PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
482 PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */
483
484 /* set Blink Rate in LED Timer Control Register */
485 gm_phy_write(hw, port, PHY_MARV_INT_MASK,
486 ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS));
487 /* restore page register */
488 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
489 break;
462 490
463 default: 491 default:
464 /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */ 492 /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
@@ -467,19 +495,21 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
467 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF); 495 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
468 } 496 }
469 497
470 if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) { 498 if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == CHIP_REV_YU_EC_A1) {
471 /* apply fixes in PHY AFE */ 499 /* apply fixes in PHY AFE */
472 gm_phy_write(hw, port, 22, 255); 500 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
501 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255);
502
473 /* increase differential signal amplitude in 10BASE-T */ 503 /* increase differential signal amplitude in 10BASE-T */
474 gm_phy_write(hw, port, 24, 0xaa99); 504 gm_phy_write(hw, port, 0x18, 0xaa99);
475 gm_phy_write(hw, port, 23, 0x2011); 505 gm_phy_write(hw, port, 0x17, 0x2011);
476 506
477 /* fix for IEEE A/B Symmetry failure in 1000BASE-T */ 507 /* fix for IEEE A/B Symmetry failure in 1000BASE-T */
478 gm_phy_write(hw, port, 24, 0xa204); 508 gm_phy_write(hw, port, 0x18, 0xa204);
479 gm_phy_write(hw, port, 23, 0x2002); 509 gm_phy_write(hw, port, 0x17, 0x2002);
480 510
481 /* set page register to 0 */ 511 /* set page register to 0 */
482 gm_phy_write(hw, port, 22, 0); 512 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
483 } else { 513 } else {
484 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); 514 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
485 515
@@ -553,6 +583,11 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
553 583
554 if (sky2->duplex == DUPLEX_FULL) 584 if (sky2->duplex == DUPLEX_FULL)
555 reg |= GM_GPCR_DUP_FULL; 585 reg |= GM_GPCR_DUP_FULL;
586
587 /* turn off pause in 10/100mbps half duplex */
588 else if (sky2->speed != SPEED_1000 &&
589 hw->chip_id != CHIP_ID_YUKON_EC_U)
590 sky2->tx_pause = sky2->rx_pause = 0;
556 } else 591 } else
557 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; 592 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
558 593
@@ -719,7 +754,7 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
719{ 754{
720 struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod; 755 struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod;
721 756
722 sky2->tx_prod = (sky2->tx_prod + 1) % TX_RING_SIZE; 757 sky2->tx_prod = RING_NEXT(sky2->tx_prod, TX_RING_SIZE);
723 return le; 758 return le;
724} 759}
725 760
@@ -735,7 +770,7 @@ static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
735static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2) 770static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
736{ 771{
737 struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put; 772 struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
738 sky2->rx_put = (sky2->rx_put + 1) % RX_LE_SIZE; 773 sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE);
739 return le; 774 return le;
740} 775}
741 776
@@ -1050,7 +1085,7 @@ static int sky2_up(struct net_device *dev)
1050 1085
1051 /* Enable interrupts from phy/mac for port */ 1086 /* Enable interrupts from phy/mac for port */
1052 imask = sky2_read32(hw, B0_IMSK); 1087 imask = sky2_read32(hw, B0_IMSK);
1053 imask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; 1088 imask |= portirq_msk[port];
1054 sky2_write32(hw, B0_IMSK, imask); 1089 sky2_write32(hw, B0_IMSK, imask);
1055 1090
1056 return 0; 1091 return 0;
@@ -1078,7 +1113,7 @@ err_out:
1078/* Modular subtraction in ring */ 1113/* Modular subtraction in ring */
1079static inline int tx_dist(unsigned tail, unsigned head) 1114static inline int tx_dist(unsigned tail, unsigned head)
1080{ 1115{
1081 return (head - tail) % TX_RING_SIZE; 1116 return (head - tail) & (TX_RING_SIZE - 1);
1082} 1117}
1083 1118
1084/* Number of list elements available for next tx */ 1119/* Number of list elements available for next tx */
@@ -1255,7 +1290,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1255 le->opcode = OP_BUFFER | HW_OWNER; 1290 le->opcode = OP_BUFFER | HW_OWNER;
1256 1291
1257 fre = sky2->tx_ring 1292 fre = sky2->tx_ring
1258 + ((re - sky2->tx_ring) + i + 1) % TX_RING_SIZE; 1293 + RING_NEXT((re - sky2->tx_ring) + i, TX_RING_SIZE);
1259 pci_unmap_addr_set(fre, mapaddr, mapping); 1294 pci_unmap_addr_set(fre, mapaddr, mapping);
1260 } 1295 }
1261 1296
@@ -1315,7 +1350,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1315 1350
1316 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1351 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1317 struct tx_ring_info *fre; 1352 struct tx_ring_info *fre;
1318 fre = sky2->tx_ring + (put + i + 1) % TX_RING_SIZE; 1353 fre = sky2->tx_ring + RING_NEXT(put + i, TX_RING_SIZE);
1319 pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr), 1354 pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr),
1320 skb_shinfo(skb)->frags[i].size, 1355 skb_shinfo(skb)->frags[i].size,
1321 PCI_DMA_TODEVICE); 1356 PCI_DMA_TODEVICE);
@@ -1401,7 +1436,7 @@ static int sky2_down(struct net_device *dev)
1401 1436
1402 /* Disable port IRQ */ 1437 /* Disable port IRQ */
1403 imask = sky2_read32(hw, B0_IMSK); 1438 imask = sky2_read32(hw, B0_IMSK);
1404 imask &= ~(sky2->port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; 1439 imask &= ~portirq_msk[port];
1405 sky2_write32(hw, B0_IMSK, imask); 1440 sky2_write32(hw, B0_IMSK, imask);
1406 1441
1407 /* turn off LED's */ 1442 /* turn off LED's */
@@ -1498,17 +1533,26 @@ static void sky2_link_up(struct sky2_port *sky2)
1498 sky2_write8(hw, SK_REG(port, LNK_LED_REG), 1533 sky2_write8(hw, SK_REG(port, LNK_LED_REG),
1499 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF); 1534 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
1500 1535
1501 if (hw->chip_id == CHIP_ID_YUKON_XL) { 1536 if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U) {
1502 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); 1537 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
1538 u16 led = PHY_M_LEDC_LOS_CTRL(1); /* link active */
1539
1540 switch(sky2->speed) {
1541 case SPEED_10:
1542 led |= PHY_M_LEDC_INIT_CTRL(7);
1543 break;
1544
1545 case SPEED_100:
1546 led |= PHY_M_LEDC_STA1_CTRL(7);
1547 break;
1548
1549 case SPEED_1000:
1550 led |= PHY_M_LEDC_STA0_CTRL(7);
1551 break;
1552 }
1503 1553
1504 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); 1554 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
1505 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ 1555 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, led);
1506 PHY_M_LEDC_INIT_CTRL(sky2->speed ==
1507 SPEED_10 ? 7 : 0) |
1508 PHY_M_LEDC_STA1_CTRL(sky2->speed ==
1509 SPEED_100 ? 7 : 0) |
1510 PHY_M_LEDC_STA0_CTRL(sky2->speed ==
1511 SPEED_1000 ? 7 : 0));
1512 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); 1556 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
1513 } 1557 }
1514 1558
@@ -1583,7 +1627,7 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
1583 sky2->speed = sky2_phy_speed(hw, aux); 1627 sky2->speed = sky2_phy_speed(hw, aux);
1584 1628
1585 /* Pause bits are offset (9..8) */ 1629 /* Pause bits are offset (9..8) */
1586 if (hw->chip_id == CHIP_ID_YUKON_XL) 1630 if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)
1587 aux >>= 6; 1631 aux >>= 6;
1588 1632
1589 sky2->rx_pause = (aux & PHY_M_PS_RX_P_EN) != 0; 1633 sky2->rx_pause = (aux & PHY_M_PS_RX_P_EN) != 0;
@@ -1859,35 +1903,28 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
1859static int sky2_status_intr(struct sky2_hw *hw, int to_do) 1903static int sky2_status_intr(struct sky2_hw *hw, int to_do)
1860{ 1904{
1861 int work_done = 0; 1905 int work_done = 0;
1906 u16 hwidx = sky2_read16(hw, STAT_PUT_IDX);
1862 1907
1863 rmb(); 1908 rmb();
1864 1909
1865 for(;;) { 1910 while (hw->st_idx != hwidx) {
1866 struct sky2_status_le *le = hw->st_le + hw->st_idx; 1911 struct sky2_status_le *le = hw->st_le + hw->st_idx;
1867 struct net_device *dev; 1912 struct net_device *dev;
1868 struct sky2_port *sky2; 1913 struct sky2_port *sky2;
1869 struct sk_buff *skb; 1914 struct sk_buff *skb;
1870 u32 status; 1915 u32 status;
1871 u16 length; 1916 u16 length;
1872 u8 link, opcode;
1873 1917
1874 opcode = le->opcode; 1918 hw->st_idx = RING_NEXT(hw->st_idx, STATUS_RING_SIZE);
1875 if (!opcode)
1876 break;
1877 opcode &= ~HW_OWNER;
1878
1879 hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE;
1880 le->opcode = 0;
1881 1919
1882 link = le->link; 1920 BUG_ON(le->link >= 2);
1883 BUG_ON(link >= 2); 1921 dev = hw->dev[le->link];
1884 dev = hw->dev[link];
1885 1922
1886 sky2 = netdev_priv(dev); 1923 sky2 = netdev_priv(dev);
1887 length = le->length; 1924 length = le->length;
1888 status = le->status; 1925 status = le->status;
1889 1926
1890 switch (opcode) { 1927 switch (le->opcode & ~HW_OWNER) {
1891 case OP_RXSTAT: 1928 case OP_RXSTAT:
1892 skb = sky2_receive(sky2, length, status); 1929 skb = sky2_receive(sky2, length, status);
1893 if (!skb) 1930 if (!skb)
@@ -1927,7 +1964,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
1927 1964
1928 case OP_TXINDEXLE: 1965 case OP_TXINDEXLE:
1929 /* TX index reports status for both ports */ 1966 /* TX index reports status for both ports */
1930 sky2_tx_done(hw->dev[0], status & 0xffff); 1967 BUILD_BUG_ON(TX_RING_SIZE > 0x1000);
1968 sky2_tx_done(hw->dev[0], status & 0xfff);
1931 if (hw->dev[1]) 1969 if (hw->dev[1])
1932 sky2_tx_done(hw->dev[1], 1970 sky2_tx_done(hw->dev[1],
1933 ((status >> 24) & 0xff) 1971 ((status >> 24) & 0xff)
@@ -1937,8 +1975,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
1937 default: 1975 default:
1938 if (net_ratelimit()) 1976 if (net_ratelimit())
1939 printk(KERN_WARNING PFX 1977 printk(KERN_WARNING PFX
1940 "unknown status opcode 0x%x\n", opcode); 1978 "unknown status opcode 0x%x\n", le->opcode);
1941 break; 1979 goto exit_loop;
1942 } 1980 }
1943 } 1981 }
1944 1982
@@ -2089,12 +2127,13 @@ static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port,
2089 */ 2127 */
2090static void sky2_idle(unsigned long arg) 2128static void sky2_idle(unsigned long arg)
2091{ 2129{
2092 struct net_device *dev = (struct net_device *) arg; 2130 struct sky2_hw *hw = (struct sky2_hw *) arg;
2131 struct net_device *dev = hw->dev[0];
2093 2132
2094 local_irq_disable();
2095 if (__netif_rx_schedule_prep(dev)) 2133 if (__netif_rx_schedule_prep(dev))
2096 __netif_rx_schedule(dev); 2134 __netif_rx_schedule(dev);
2097 local_irq_enable(); 2135
2136 mod_timer(&hw->idle_timer, jiffies + msecs_to_jiffies(idle_timeout));
2098} 2137}
2099 2138
2100 2139
@@ -2105,65 +2144,46 @@ static int sky2_poll(struct net_device *dev0, int *budget)
2105 int work_done = 0; 2144 int work_done = 0;
2106 u32 status = sky2_read32(hw, B0_Y2_SP_EISR); 2145 u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
2107 2146
2108 restart_poll: 2147 if (status & Y2_IS_HW_ERR)
2109 if (unlikely(status & ~Y2_IS_STAT_BMU)) { 2148 sky2_hw_intr(hw);
2110 if (status & Y2_IS_HW_ERR)
2111 sky2_hw_intr(hw);
2112
2113 if (status & Y2_IS_IRQ_PHY1)
2114 sky2_phy_intr(hw, 0);
2115
2116 if (status & Y2_IS_IRQ_PHY2)
2117 sky2_phy_intr(hw, 1);
2118 2149
2119 if (status & Y2_IS_IRQ_MAC1) 2150 if (status & Y2_IS_IRQ_PHY1)
2120 sky2_mac_intr(hw, 0); 2151 sky2_phy_intr(hw, 0);
2121 2152
2122 if (status & Y2_IS_IRQ_MAC2) 2153 if (status & Y2_IS_IRQ_PHY2)
2123 sky2_mac_intr(hw, 1); 2154 sky2_phy_intr(hw, 1);
2124 2155
2125 if (status & Y2_IS_CHK_RX1) 2156 if (status & Y2_IS_IRQ_MAC1)
2126 sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1); 2157 sky2_mac_intr(hw, 0);
2127 2158
2128 if (status & Y2_IS_CHK_RX2) 2159 if (status & Y2_IS_IRQ_MAC2)
2129 sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2); 2160 sky2_mac_intr(hw, 1);
2130 2161
2131 if (status & Y2_IS_CHK_TXA1) 2162 if (status & Y2_IS_CHK_RX1)
2132 sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1); 2163 sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1);
2133 2164
2134 if (status & Y2_IS_CHK_TXA2) 2165 if (status & Y2_IS_CHK_RX2)
2135 sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2); 2166 sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2);
2136 }
2137 2167
2138 if (status & Y2_IS_STAT_BMU) { 2168 if (status & Y2_IS_CHK_TXA1)
2139 work_done += sky2_status_intr(hw, work_limit - work_done); 2169 sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1);
2140 *budget -= work_done;
2141 dev0->quota -= work_done;
2142 2170
2143 if (work_done >= work_limit) 2171 if (status & Y2_IS_CHK_TXA2)
2144 return 1; 2172 sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2);
2145 2173
2174 if (status & Y2_IS_STAT_BMU)
2146 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); 2175 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
2147 }
2148
2149 mod_timer(&hw->idle_timer, jiffies + HZ);
2150 2176
2151 local_irq_disable(); 2177 work_done = sky2_status_intr(hw, work_limit);
2152 __netif_rx_complete(dev0); 2178 *budget -= work_done;
2179 dev0->quota -= work_done;
2153 2180
2154 status = sky2_read32(hw, B0_Y2_SP_LISR); 2181 if (work_done >= work_limit)
2182 return 1;
2155 2183
2156 if (unlikely(status)) { 2184 netif_rx_complete(dev0);
2157 /* More work pending, try and keep going */
2158 if (__netif_rx_schedule_prep(dev0)) {
2159 __netif_rx_reschedule(dev0, work_done);
2160 status = sky2_read32(hw, B0_Y2_SP_EISR);
2161 local_irq_enable();
2162 goto restart_poll;
2163 }
2164 }
2165 2185
2166 local_irq_enable(); 2186 status = sky2_read32(hw, B0_Y2_SP_LISR);
2167 return 0; 2187 return 0;
2168} 2188}
2169 2189
@@ -2244,13 +2264,6 @@ static int __devinit sky2_reset(struct sky2_hw *hw)
2244 return -EOPNOTSUPP; 2264 return -EOPNOTSUPP;
2245 } 2265 }
2246 2266
2247 /* This chip is new and not tested yet */
2248 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
2249 pr_info(PFX "%s: is a version of Yukon 2 chipset that has not been tested yet.\n",
2250 pci_name(hw->pdev));
2251 pr_info("Please report success/failure to maintainer <shemminger@osdl.org>\n");
2252 }
2253
2254 /* disable ASF */ 2267 /* disable ASF */
2255 if (hw->chip_id <= CHIP_ID_YUKON_EC) { 2268 if (hw->chip_id <= CHIP_ID_YUKON_EC) {
2256 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 2269 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
@@ -3302,7 +3315,10 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3302 3315
3303 sky2_write32(hw, B0_IMSK, Y2_IS_BASE); 3316 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
3304 3317
3305 setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) dev); 3318 setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw);
3319 if (idle_timeout > 0)
3320 mod_timer(&hw->idle_timer,
3321 jiffies + msecs_to_jiffies(idle_timeout));
3306 3322
3307 pci_set_drvdata(pdev, hw); 3323 pci_set_drvdata(pdev, hw);
3308 3324
@@ -3342,6 +3358,8 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
3342 del_timer_sync(&hw->idle_timer); 3358 del_timer_sync(&hw->idle_timer);
3343 3359
3344 sky2_write32(hw, B0_IMSK, 0); 3360 sky2_write32(hw, B0_IMSK, 0);
3361 synchronize_irq(hw->pdev->irq);
3362
3345 dev0 = hw->dev[0]; 3363 dev0 = hw->dev[0];
3346 dev1 = hw->dev[1]; 3364 dev1 = hw->dev[1];
3347 if (dev1) 3365 if (dev1)
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index b026f5653f04..8012994c9b93 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -378,6 +378,9 @@ enum {
378 CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */ 378 CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */
379 CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */ 379 CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */
380 CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */ 380 CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */
381
382 CHIP_REV_YU_EC_U_A0 = 0,
383 CHIP_REV_YU_EC_U_A1 = 1,
381}; 384};
382 385
383/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */ 386/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 43f5e86fc559..394339d5e87c 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1652,6 +1652,8 @@ spider_net_enable_card(struct spider_net_card *card)
1652 { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE }, 1652 { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE },
1653 1653
1654 { SPIDER_NET_GMRWOLCTRL, 0 }, 1654 { SPIDER_NET_GMRWOLCTRL, 0 },
1655 { SPIDER_NET_GTESTMD, 0x10000000 },
1656 { SPIDER_NET_GTTQMSK, 0x00400040 },
1655 { SPIDER_NET_GTESTMD, 0 }, 1657 { SPIDER_NET_GTESTMD, 0 },
1656 1658
1657 { SPIDER_NET_GMACINTEN, 0 }, 1659 { SPIDER_NET_GMACINTEN, 0 },
@@ -1792,15 +1794,7 @@ spider_net_setup_phy(struct spider_net_card *card)
1792 if (phy->def->ops->setup_forced) 1794 if (phy->def->ops->setup_forced)
1793 phy->def->ops->setup_forced(phy, SPEED_1000, DUPLEX_FULL); 1795 phy->def->ops->setup_forced(phy, SPEED_1000, DUPLEX_FULL);
1794 1796
1795 /* the following two writes could be moved to sungem_phy.c */ 1797 phy->def->ops->enable_fiber(phy);
1796 /* enable fiber mode */
1797 spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0x9020);
1798 /* LEDs active in both modes, autosense prio = fiber */
1799 spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0x945f);
1800
1801 /* switch off fibre autoneg */
1802 spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0xfc01);
1803 spider_net_write_phy(card->netdev, 1, 0x0b, 0x0004);
1804 1798
1805 phy->def->ops->read_link(phy); 1799 phy->def->ops->read_link(phy);
1806 pr_info("Found %s with %i Mbps, %s-duplex.\n", phy->def->name, 1800 pr_info("Found %s with %i Mbps, %s-duplex.\n", phy->def->name,
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index 5922b529a048..3b8d951cf73c 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -120,6 +120,8 @@ extern char spider_net_driver_name[];
120#define SPIDER_NET_GMRUAFILnR 0x00000500 120#define SPIDER_NET_GMRUAFILnR 0x00000500
121#define SPIDER_NET_GMRUA0FIL15R 0x00000578 121#define SPIDER_NET_GMRUA0FIL15R 0x00000578
122 122
123#define SPIDER_NET_GTTQMSK 0x00000934
124
123/* RX DMA controller registers, all 0x00000a.. are for DMA controller A, 125/* RX DMA controller registers, all 0x00000a.. are for DMA controller A,
124 * 0x00000b.. for DMA controller B, etc. */ 126 * 0x00000b.. for DMA controller B, etc. */
125#define SPIDER_NET_GDADCHA 0x00000a00 127#define SPIDER_NET_GDADCHA 0x00000a00
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index 046371ee5bbe..b2ddd5e79303 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -329,6 +329,30 @@ static int bcm5421_init(struct mii_phy* phy)
329 return 0; 329 return 0;
330} 330}
331 331
332static int bcm5421_enable_fiber(struct mii_phy* phy)
333{
334 /* enable fiber mode */
335 phy_write(phy, MII_NCONFIG, 0x9020);
336 /* LEDs active in both modes, autosense prio = fiber */
337 phy_write(phy, MII_NCONFIG, 0x945f);
338
339 /* switch off fibre autoneg */
340 phy_write(phy, MII_NCONFIG, 0xfc01);
341 phy_write(phy, 0x0b, 0x0004);
342
343 return 0;
344}
345
346static int bcm5461_enable_fiber(struct mii_phy* phy)
347{
348 phy_write(phy, MII_NCONFIG, 0xfc0c);
349 phy_write(phy, MII_BMCR, 0x4140);
350 phy_write(phy, MII_NCONFIG, 0xfc0b);
351 phy_write(phy, MII_BMCR, 0x0140);
352
353 return 0;
354}
355
332static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise) 356static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise)
333{ 357{
334 u16 ctl, adv; 358 u16 ctl, adv;
@@ -762,6 +786,7 @@ static struct mii_phy_ops bcm5421_phy_ops = {
762 .setup_forced = bcm54xx_setup_forced, 786 .setup_forced = bcm54xx_setup_forced,
763 .poll_link = genmii_poll_link, 787 .poll_link = genmii_poll_link,
764 .read_link = bcm54xx_read_link, 788 .read_link = bcm54xx_read_link,
789 .enable_fiber = bcm5421_enable_fiber,
765}; 790};
766 791
767static struct mii_phy_def bcm5421_phy_def = { 792static struct mii_phy_def bcm5421_phy_def = {
@@ -792,6 +817,25 @@ static struct mii_phy_def bcm5421k2_phy_def = {
792 .ops = &bcm5421k2_phy_ops 817 .ops = &bcm5421k2_phy_ops
793}; 818};
794 819
820static struct mii_phy_ops bcm5461_phy_ops = {
821 .init = bcm5421_init,
822 .suspend = generic_suspend,
823 .setup_aneg = bcm54xx_setup_aneg,
824 .setup_forced = bcm54xx_setup_forced,
825 .poll_link = genmii_poll_link,
826 .read_link = bcm54xx_read_link,
827 .enable_fiber = bcm5461_enable_fiber,
828};
829
830static struct mii_phy_def bcm5461_phy_def = {
831 .phy_id = 0x002060c0,
832 .phy_id_mask = 0xfffffff0,
833 .name = "BCM5461",
834 .features = MII_GBIT_FEATURES,
835 .magic_aneg = 1,
836 .ops = &bcm5461_phy_ops
837};
838
795/* Broadcom BCM 5462 built-in Vesta */ 839/* Broadcom BCM 5462 built-in Vesta */
796static struct mii_phy_ops bcm5462V_phy_ops = { 840static struct mii_phy_ops bcm5462V_phy_ops = {
797 .init = bcm5421_init, 841 .init = bcm5421_init,
@@ -857,6 +901,7 @@ static struct mii_phy_def* mii_phy_table[] = {
857 &bcm5411_phy_def, 901 &bcm5411_phy_def,
858 &bcm5421_phy_def, 902 &bcm5421_phy_def,
859 &bcm5421k2_phy_def, 903 &bcm5421k2_phy_def,
904 &bcm5461_phy_def,
860 &bcm5462V_phy_def, 905 &bcm5462V_phy_def,
861 &marvell_phy_def, 906 &marvell_phy_def,
862 &genmii_phy_def, 907 &genmii_phy_def,
diff --git a/drivers/net/sungem_phy.h b/drivers/net/sungem_phy.h
index 430544496c52..69e125197fcf 100644
--- a/drivers/net/sungem_phy.h
+++ b/drivers/net/sungem_phy.h
@@ -12,6 +12,7 @@ struct mii_phy_ops
12 int (*setup_forced)(struct mii_phy *phy, int speed, int fd); 12 int (*setup_forced)(struct mii_phy *phy, int speed, int fd);
13 int (*poll_link)(struct mii_phy *phy); 13 int (*poll_link)(struct mii_phy *phy);
14 int (*read_link)(struct mii_phy *phy); 14 int (*read_link)(struct mii_phy *phy);
15 int (*enable_fiber)(struct mii_phy *phy);
15}; 16};
16 17
17/* Structure used to statically define an mii/gii based PHY */ 18/* Structure used to statically define an mii/gii based PHY */
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 73e271e59c6a..2bd9592b75cd 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -69,8 +69,8 @@
69 69
70#define DRV_MODULE_NAME "tg3" 70#define DRV_MODULE_NAME "tg3"
71#define PFX DRV_MODULE_NAME ": " 71#define PFX DRV_MODULE_NAME ": "
72#define DRV_MODULE_VERSION "3.56" 72#define DRV_MODULE_VERSION "3.57"
73#define DRV_MODULE_RELDATE "Apr 1, 2006" 73#define DRV_MODULE_RELDATE "Apr 28, 2006"
74 74
75#define TG3_DEF_MAC_MODE 0 75#define TG3_DEF_MAC_MODE 0
76#define TG3_DEF_RX_MODE 0 76#define TG3_DEF_RX_MODE 0
@@ -974,6 +974,8 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
974 return err; 974 return err;
975} 975}
976 976
977static void tg3_link_report(struct tg3 *);
978
977/* This will reset the tigon3 PHY if there is no valid 979/* This will reset the tigon3 PHY if there is no valid
978 * link unless the FORCE argument is non-zero. 980 * link unless the FORCE argument is non-zero.
979 */ 981 */
@@ -987,6 +989,11 @@ static int tg3_phy_reset(struct tg3 *tp)
987 if (err != 0) 989 if (err != 0)
988 return -EBUSY; 990 return -EBUSY;
989 991
992 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
993 netif_carrier_off(tp->dev);
994 tg3_link_report(tp);
995 }
996
990 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || 997 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
991 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || 998 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
992 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { 999 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
@@ -1023,6 +1030,12 @@ out:
1023 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2); 1030 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1024 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); 1031 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1025 } 1032 }
1033 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1034 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1035 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1036 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1037 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1038 }
1026 /* Set Extended packet length bit (bit 14) on all chips that */ 1039 /* Set Extended packet length bit (bit 14) on all chips that */
1027 /* support jumbo frames */ 1040 /* support jumbo frames */
1028 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { 1041 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
@@ -3531,7 +3544,7 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3531 return IRQ_RETVAL(0); 3544 return IRQ_RETVAL(0);
3532} 3545}
3533 3546
3534static int tg3_init_hw(struct tg3 *); 3547static int tg3_init_hw(struct tg3 *, int);
3535static int tg3_halt(struct tg3 *, int, int); 3548static int tg3_halt(struct tg3 *, int, int);
3536 3549
3537#ifdef CONFIG_NET_POLL_CONTROLLER 3550#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3567,7 +3580,7 @@ static void tg3_reset_task(void *_data)
3567 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; 3580 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3568 3581
3569 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 3582 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3570 tg3_init_hw(tp); 3583 tg3_init_hw(tp, 1);
3571 3584
3572 tg3_netif_start(tp); 3585 tg3_netif_start(tp);
3573 3586
@@ -4042,7 +4055,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4042 4055
4043 tg3_set_mtu(dev, tp, new_mtu); 4056 tg3_set_mtu(dev, tp, new_mtu);
4044 4057
4045 tg3_init_hw(tp); 4058 tg3_init_hw(tp, 0);
4046 4059
4047 tg3_netif_start(tp); 4060 tg3_netif_start(tp);
4048 4061
@@ -5719,9 +5732,23 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
5719 if (!netif_running(dev)) 5732 if (!netif_running(dev))
5720 return 0; 5733 return 0;
5721 5734
5722 spin_lock_bh(&tp->lock); 5735 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5723 __tg3_set_mac_addr(tp); 5736 /* Reset chip so that ASF can re-init any MAC addresses it
5724 spin_unlock_bh(&tp->lock); 5737 * needs.
5738 */
5739 tg3_netif_stop(tp);
5740 tg3_full_lock(tp, 1);
5741
5742 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5743 tg3_init_hw(tp, 0);
5744
5745 tg3_netif_start(tp);
5746 tg3_full_unlock(tp);
5747 } else {
5748 spin_lock_bh(&tp->lock);
5749 __tg3_set_mac_addr(tp);
5750 spin_unlock_bh(&tp->lock);
5751 }
5725 5752
5726 return 0; 5753 return 0;
5727} 5754}
@@ -5771,7 +5798,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5771} 5798}
5772 5799
5773/* tp->lock is held. */ 5800/* tp->lock is held. */
5774static int tg3_reset_hw(struct tg3 *tp) 5801static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
5775{ 5802{
5776 u32 val, rdmac_mode; 5803 u32 val, rdmac_mode;
5777 int i, err, limit; 5804 int i, err, limit;
@@ -5786,7 +5813,7 @@ static int tg3_reset_hw(struct tg3 *tp)
5786 tg3_abort_hw(tp, 1); 5813 tg3_abort_hw(tp, 1);
5787 } 5814 }
5788 5815
5789 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) 5816 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
5790 tg3_phy_reset(tp); 5817 tg3_phy_reset(tp);
5791 5818
5792 err = tg3_chip_reset(tp); 5819 err = tg3_chip_reset(tp);
@@ -6327,7 +6354,7 @@ static int tg3_reset_hw(struct tg3 *tp)
6327 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 6354 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6328 } 6355 }
6329 6356
6330 err = tg3_setup_phy(tp, 1); 6357 err = tg3_setup_phy(tp, reset_phy);
6331 if (err) 6358 if (err)
6332 return err; 6359 return err;
6333 6360
@@ -6400,7 +6427,7 @@ static int tg3_reset_hw(struct tg3 *tp)
6400/* Called at device open time to get the chip ready for 6427/* Called at device open time to get the chip ready for
6401 * packet processing. Invoked with tp->lock held. 6428 * packet processing. Invoked with tp->lock held.
6402 */ 6429 */
6403static int tg3_init_hw(struct tg3 *tp) 6430static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6404{ 6431{
6405 int err; 6432 int err;
6406 6433
@@ -6413,7 +6440,7 @@ static int tg3_init_hw(struct tg3 *tp)
6413 6440
6414 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); 6441 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6415 6442
6416 err = tg3_reset_hw(tp); 6443 err = tg3_reset_hw(tp, reset_phy);
6417 6444
6418out: 6445out:
6419 return err; 6446 return err;
@@ -6683,7 +6710,7 @@ static int tg3_test_msi(struct tg3 *tp)
6683 tg3_full_lock(tp, 1); 6710 tg3_full_lock(tp, 1);
6684 6711
6685 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 6712 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6686 err = tg3_init_hw(tp); 6713 err = tg3_init_hw(tp, 1);
6687 6714
6688 tg3_full_unlock(tp); 6715 tg3_full_unlock(tp);
6689 6716
@@ -6748,7 +6775,7 @@ static int tg3_open(struct net_device *dev)
6748 6775
6749 tg3_full_lock(tp, 0); 6776 tg3_full_lock(tp, 0);
6750 6777
6751 err = tg3_init_hw(tp); 6778 err = tg3_init_hw(tp, 1);
6752 if (err) { 6779 if (err) {
6753 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 6780 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6754 tg3_free_rings(tp); 6781 tg3_free_rings(tp);
@@ -7839,7 +7866,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
7839 7866
7840 if (netif_running(dev)) { 7867 if (netif_running(dev)) {
7841 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 7868 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7842 tg3_init_hw(tp); 7869 tg3_init_hw(tp, 1);
7843 tg3_netif_start(tp); 7870 tg3_netif_start(tp);
7844 } 7871 }
7845 7872
@@ -7884,7 +7911,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
7884 7911
7885 if (netif_running(dev)) { 7912 if (netif_running(dev)) {
7886 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 7913 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7887 tg3_init_hw(tp); 7914 tg3_init_hw(tp, 1);
7888 tg3_netif_start(tp); 7915 tg3_netif_start(tp);
7889 } 7916 }
7890 7917
@@ -8427,6 +8454,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8427 8454
8428 tx_len = 1514; 8455 tx_len = 1514;
8429 skb = dev_alloc_skb(tx_len); 8456 skb = dev_alloc_skb(tx_len);
8457 if (!skb)
8458 return -ENOMEM;
8459
8430 tx_data = skb_put(skb, tx_len); 8460 tx_data = skb_put(skb, tx_len);
8431 memcpy(tx_data, tp->dev->dev_addr, 6); 8461 memcpy(tx_data, tp->dev->dev_addr, 6);
8432 memset(tx_data + 6, 0x0, 8); 8462 memset(tx_data + 6, 0x0, 8);
@@ -8522,7 +8552,7 @@ static int tg3_test_loopback(struct tg3 *tp)
8522 if (!netif_running(tp->dev)) 8552 if (!netif_running(tp->dev))
8523 return TG3_LOOPBACK_FAILED; 8553 return TG3_LOOPBACK_FAILED;
8524 8554
8525 tg3_reset_hw(tp); 8555 tg3_reset_hw(tp, 1);
8526 8556
8527 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) 8557 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8528 err |= TG3_MAC_LOOPBACK_FAILED; 8558 err |= TG3_MAC_LOOPBACK_FAILED;
@@ -8596,7 +8626,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8596 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 8626 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8597 if (netif_running(dev)) { 8627 if (netif_running(dev)) {
8598 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 8628 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8599 tg3_init_hw(tp); 8629 tg3_init_hw(tp, 1);
8600 tg3_netif_start(tp); 8630 tg3_netif_start(tp);
8601 } 8631 }
8602 8632
@@ -9377,7 +9407,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9377 9407
9378 if ((page_off == 0) || (i == 0)) 9408 if ((page_off == 0) || (i == 0))
9379 nvram_cmd |= NVRAM_CMD_FIRST; 9409 nvram_cmd |= NVRAM_CMD_FIRST;
9380 else if (page_off == (tp->nvram_pagesize - 4)) 9410 if (page_off == (tp->nvram_pagesize - 4))
9381 nvram_cmd |= NVRAM_CMD_LAST; 9411 nvram_cmd |= NVRAM_CMD_LAST;
9382 9412
9383 if (i == (len - 4)) 9413 if (i == (len - 4))
@@ -10353,10 +10383,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
10353 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) 10383 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10354 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG; 10384 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10355 10385
10356 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && 10386 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10357 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) && 10387 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10358 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)) 10388 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10359 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; 10389 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10390 else
10391 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10392 }
10360 10393
10361 tp->coalesce_mode = 0; 10394 tp->coalesce_mode = 0;
10362 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && 10395 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
@@ -11569,7 +11602,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11569 tg3_full_lock(tp, 0); 11602 tg3_full_lock(tp, 0);
11570 11603
11571 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 11604 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11572 tg3_init_hw(tp); 11605 tg3_init_hw(tp, 1);
11573 11606
11574 tp->timer.expires = jiffies + tp->timer_offset; 11607 tp->timer.expires = jiffies + tp->timer_offset;
11575 add_timer(&tp->timer); 11608 add_timer(&tp->timer);
@@ -11603,7 +11636,7 @@ static int tg3_resume(struct pci_dev *pdev)
11603 tg3_full_lock(tp, 0); 11636 tg3_full_lock(tp, 0);
11604 11637
11605 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 11638 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11606 tg3_init_hw(tp); 11639 tg3_init_hw(tp, 1);
11607 11640
11608 tp->timer.expires = jiffies + tp->timer_offset; 11641 tp->timer.expires = jiffies + tp->timer_offset;
11609 add_timer(&tp->timer); 11642 add_timer(&tp->timer);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 8c8b987d1250..0e29b885d449 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2215,6 +2215,7 @@ struct tg3 {
2215#define TG3_FLG2_HW_TSO_2 0x08000000 2215#define TG3_FLG2_HW_TSO_2 0x08000000
2216#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2) 2216#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2)
2217#define TG3_FLG2_1SHOT_MSI 0x10000000 2217#define TG3_FLG2_1SHOT_MSI 0x10000000
2218#define TG3_FLG2_PHY_JITTER_BUG 0x20000000
2218 2219
2219 u32 split_mode_max_reqs; 2220 u32 split_mode_max_reqs;
2220#define SPLIT_MODE_5704_MAX_REQ 3 2221#define SPLIT_MODE_5704_MAX_REQ 3
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 6a23964c1317..a6dc53b4250d 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -129,6 +129,7 @@
129 - Massive clean-up 129 - Massive clean-up
130 - Rewrite PHY, media handling (remove options, full_duplex, backoff) 130 - Rewrite PHY, media handling (remove options, full_duplex, backoff)
131 - Fix Tx engine race for good 131 - Fix Tx engine race for good
132 - Craig Brind: Zero padded aligned buffers for short packets.
132 133
133*/ 134*/
134 135
@@ -1326,7 +1327,12 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1326 rp->stats.tx_dropped++; 1327 rp->stats.tx_dropped++;
1327 return 0; 1328 return 0;
1328 } 1329 }
1330
1331 /* Padding is not copied and so must be redone. */
1329 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]); 1332 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1333 if (skb->len < ETH_ZLEN)
1334 memset(rp->tx_buf[entry] + skb->len, 0,
1335 ETH_ZLEN - skb->len);
1330 rp->tx_skbuff_dma[entry] = 0; 1336 rp->tx_skbuff_dma[entry] = 0;
1331 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma + 1337 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1332 (rp->tx_buf[entry] - 1338 (rp->tx_buf[entry] -
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index 9a06e61df0a2..e2982a83ae42 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -939,9 +939,9 @@ static int bcm43xx_sprom_extract(struct bcm43xx_private *bcm)
939 return 0; 939 return 0;
940} 940}
941 941
942static void bcm43xx_geo_init(struct bcm43xx_private *bcm) 942static int bcm43xx_geo_init(struct bcm43xx_private *bcm)
943{ 943{
944 struct ieee80211_geo geo; 944 struct ieee80211_geo *geo;
945 struct ieee80211_channel *chan; 945 struct ieee80211_channel *chan;
946 int have_a = 0, have_bg = 0; 946 int have_a = 0, have_bg = 0;
947 int i; 947 int i;
@@ -949,7 +949,10 @@ static void bcm43xx_geo_init(struct bcm43xx_private *bcm)
949 struct bcm43xx_phyinfo *phy; 949 struct bcm43xx_phyinfo *phy;
950 const char *iso_country; 950 const char *iso_country;
951 951
952 memset(&geo, 0, sizeof(geo)); 952 geo = kzalloc(sizeof(*geo), GFP_KERNEL);
953 if (!geo)
954 return -ENOMEM;
955
953 for (i = 0; i < bcm->nr_80211_available; i++) { 956 for (i = 0; i < bcm->nr_80211_available; i++) {
954 phy = &(bcm->core_80211_ext[i].phy); 957 phy = &(bcm->core_80211_ext[i].phy);
955 switch (phy->type) { 958 switch (phy->type) {
@@ -967,31 +970,36 @@ static void bcm43xx_geo_init(struct bcm43xx_private *bcm)
967 iso_country = bcm43xx_locale_iso(bcm->sprom.locale); 970 iso_country = bcm43xx_locale_iso(bcm->sprom.locale);
968 971
969 if (have_a) { 972 if (have_a) {
970 for (i = 0, channel = 0; channel < 201; channel++) { 973 for (i = 0, channel = IEEE80211_52GHZ_MIN_CHANNEL;
971 chan = &geo.a[i++]; 974 channel <= IEEE80211_52GHZ_MAX_CHANNEL; channel++) {
975 chan = &geo->a[i++];
972 chan->freq = bcm43xx_channel_to_freq_a(channel); 976 chan->freq = bcm43xx_channel_to_freq_a(channel);
973 chan->channel = channel; 977 chan->channel = channel;
974 } 978 }
975 geo.a_channels = i; 979 geo->a_channels = i;
976 } 980 }
977 if (have_bg) { 981 if (have_bg) {
978 for (i = 0, channel = 1; channel < 15; channel++) { 982 for (i = 0, channel = IEEE80211_24GHZ_MIN_CHANNEL;
979 chan = &geo.bg[i++]; 983 channel <= IEEE80211_24GHZ_MAX_CHANNEL; channel++) {
984 chan = &geo->bg[i++];
980 chan->freq = bcm43xx_channel_to_freq_bg(channel); 985 chan->freq = bcm43xx_channel_to_freq_bg(channel);
981 chan->channel = channel; 986 chan->channel = channel;
982 } 987 }
983 geo.bg_channels = i; 988 geo->bg_channels = i;
984 } 989 }
985 memcpy(geo.name, iso_country, 2); 990 memcpy(geo->name, iso_country, 2);
986 if (0 /*TODO: Outdoor use only */) 991 if (0 /*TODO: Outdoor use only */)
987 geo.name[2] = 'O'; 992 geo->name[2] = 'O';
988 else if (0 /*TODO: Indoor use only */) 993 else if (0 /*TODO: Indoor use only */)
989 geo.name[2] = 'I'; 994 geo->name[2] = 'I';
990 else 995 else
991 geo.name[2] = ' '; 996 geo->name[2] = ' ';
992 geo.name[3] = '\0'; 997 geo->name[3] = '\0';
998
999 ieee80211_set_geo(bcm->ieee, geo);
1000 kfree(geo);
993 1001
994 ieee80211_set_geo(bcm->ieee, &geo); 1002 return 0;
995} 1003}
996 1004
997/* DummyTransmission function, as documented on 1005/* DummyTransmission function, as documented on
@@ -3479,16 +3487,17 @@ static int bcm43xx_attach_board(struct bcm43xx_private *bcm)
3479 goto err_80211_unwind; 3487 goto err_80211_unwind;
3480 bcm43xx_wireless_core_disable(bcm); 3488 bcm43xx_wireless_core_disable(bcm);
3481 } 3489 }
3490 err = bcm43xx_geo_init(bcm);
3491 if (err)
3492 goto err_80211_unwind;
3482 bcm43xx_pctl_set_crystal(bcm, 0); 3493 bcm43xx_pctl_set_crystal(bcm, 0);
3483 3494
3484 /* Set the MAC address in the networking subsystem */ 3495 /* Set the MAC address in the networking subsystem */
3485 if (bcm43xx_current_phy(bcm)->type == BCM43xx_PHYTYPE_A) 3496 if (is_valid_ether_addr(bcm->sprom.et1macaddr))
3486 memcpy(bcm->net_dev->dev_addr, bcm->sprom.et1macaddr, 6); 3497 memcpy(bcm->net_dev->dev_addr, bcm->sprom.et1macaddr, 6);
3487 else 3498 else
3488 memcpy(bcm->net_dev->dev_addr, bcm->sprom.il0macaddr, 6); 3499 memcpy(bcm->net_dev->dev_addr, bcm->sprom.il0macaddr, 6);
3489 3500
3490 bcm43xx_geo_init(bcm);
3491
3492 snprintf(bcm->nick, IW_ESSID_MAX_SIZE, 3501 snprintf(bcm->nick, IW_ESSID_MAX_SIZE,
3493 "Broadcom %04X", bcm->chip_id); 3502 "Broadcom %04X", bcm->chip_id);
3494 3503
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.h b/drivers/net/wireless/bcm43xx/bcm43xx_main.h
index eca79a38594a..30a202b258b5 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.h
@@ -118,12 +118,14 @@ int bcm43xx_channel_to_freq(struct bcm43xx_private *bcm,
118static inline 118static inline
119int bcm43xx_is_valid_channel_a(u8 channel) 119int bcm43xx_is_valid_channel_a(u8 channel)
120{ 120{
121 return (channel <= 200); 121 return (channel >= IEEE80211_52GHZ_MIN_CHANNEL
122 && channel <= IEEE80211_52GHZ_MAX_CHANNEL);
122} 123}
123static inline 124static inline
124int bcm43xx_is_valid_channel_bg(u8 channel) 125int bcm43xx_is_valid_channel_bg(u8 channel)
125{ 126{
126 return (channel >= 1 && channel <= 14); 127 return (channel >= IEEE80211_24GHZ_MIN_CHANNEL
128 && channel <= IEEE80211_24GHZ_MAX_CHANNEL);
127} 129}
128static inline 130static inline
129int bcm43xx_is_valid_channel(struct bcm43xx_private *bcm, 131int bcm43xx_is_valid_channel(struct bcm43xx_private *bcm,
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
index 33137165727f..b0abac515530 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
@@ -1287,7 +1287,7 @@ static void bcm43xx_phy_initg(struct bcm43xx_private *bcm)
1287 if (radio->revision == 8) 1287 if (radio->revision == 8)
1288 bcm43xx_phy_write(bcm, 0x0805, 0x3230); 1288 bcm43xx_phy_write(bcm, 0x0805, 0x3230);
1289 bcm43xx_phy_init_pctl(bcm); 1289 bcm43xx_phy_init_pctl(bcm);
1290 if (bcm->chip_id == 0x4306 && bcm->chip_package != 2) { 1290 if (bcm->chip_id == 0x4306 && bcm->chip_package == 2) {
1291 bcm43xx_phy_write(bcm, 0x0429, 1291 bcm43xx_phy_write(bcm, 0x0429,
1292 bcm43xx_phy_read(bcm, 0x0429) & 0xBFFF); 1292 bcm43xx_phy_read(bcm, 0x0429) & 0xBFFF);
1293 bcm43xx_phy_write(bcm, 0x04C3, 1293 bcm43xx_phy_write(bcm, 0x04C3,
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
index 3edbb481a0a0..b45063974ae9 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
@@ -182,8 +182,11 @@ static int bcm43xx_wx_set_mode(struct net_device *net_dev,
182 mode = BCM43xx_INITIAL_IWMODE; 182 mode = BCM43xx_INITIAL_IWMODE;
183 183
184 bcm43xx_lock_mmio(bcm, flags); 184 bcm43xx_lock_mmio(bcm, flags);
185 if (bcm->ieee->iw_mode != mode) 185 if (bcm->initialized) {
186 bcm43xx_set_iwmode(bcm, mode); 186 if (bcm->ieee->iw_mode != mode)
187 bcm43xx_set_iwmode(bcm, mode);
188 } else
189 bcm->ieee->iw_mode = mode;
187 bcm43xx_unlock_mmio(bcm, flags); 190 bcm43xx_unlock_mmio(bcm, flags);
188 191
189 return 0; 192 return 0;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 2087a397ef16..9855c4c920b8 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -793,8 +793,10 @@ static int msix_capability_init(struct pci_dev *dev,
793 if (!entry) 793 if (!entry)
794 break; 794 break;
795 vector = get_msi_vector(dev); 795 vector = get_msi_vector(dev);
796 if (vector < 0) 796 if (vector < 0) {
797 kmem_cache_free(msi_cachep, entry);
797 break; 798 break;
799 }
798 800
799 j = entries[i].entry; 801 j = entries[i].entry;
800 entries[i].vector = vector; 802 entries[i].vector = vector;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index c42ae2cf8d64..19e2b174d33c 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -642,13 +642,15 @@ static void quirk_via_irq(struct pci_dev *dev)
642 new_irq = dev->irq & 0xf; 642 new_irq = dev->irq & 0xf;
643 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); 643 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
644 if (new_irq != irq) { 644 if (new_irq != irq) {
645 printk(KERN_INFO "PCI: Via IRQ fixup for %s, from %d to %d\n", 645 printk(KERN_INFO "PCI: VIA IRQ fixup for %s, from %d to %d\n",
646 pci_name(dev), irq, new_irq); 646 pci_name(dev), irq, new_irq);
647 udelay(15); /* unknown if delay really needed */ 647 udelay(15); /* unknown if delay really needed */
648 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq); 648 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq);
649 } 649 }
650} 650}
651DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_irq); 651DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_irq);
652DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_irq);
653DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_5, quirk_via_irq);
652 654
653/* 655/*
654 * VIA VT82C598 has its device ID settable and many BIOSes 656 * VIA VT82C598 has its device ID settable and many BIOSes
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c
index bd0308e89815..a2f05f485156 100644
--- a/drivers/pcmcia/i82365.c
+++ b/drivers/pcmcia/i82365.c
@@ -509,7 +509,8 @@ static irqreturn_t i365_count_irq(int irq, void *dev, struct pt_regs *regs)
509static u_int __init test_irq(u_short sock, int irq) 509static u_int __init test_irq(u_short sock, int irq)
510{ 510{
511 debug(2, " testing ISA irq %d\n", irq); 511 debug(2, " testing ISA irq %d\n", irq);
512 if (request_irq(irq, i365_count_irq, 0, "scan", i365_count_irq) != 0) 512 if (request_irq(irq, i365_count_irq, SA_PROBEIRQ, "scan",
513 i365_count_irq) != 0)
513 return 1; 514 return 1;
514 irq_hits = 0; irq_sock = sock; 515 irq_hits = 0; irq_sock = sock;
515 msleep(10); 516 msleep(10);
@@ -561,7 +562,7 @@ static u_int __init isa_scan(u_short sock, u_int mask0)
561 } else { 562 } else {
562 /* Fallback: just find interrupts that aren't in use */ 563 /* Fallback: just find interrupts that aren't in use */
563 for (i = 0; i < 16; i++) 564 for (i = 0; i < 16; i++)
564 if ((mask0 & (1 << i)) && (_check_irq(i, 0) == 0)) 565 if ((mask0 & (1 << i)) && (_check_irq(i, SA_PROBEIRQ) == 0))
565 mask1 |= (1 << i); 566 mask1 |= (1 << i);
566 printk("default"); 567 printk("default");
567 /* If scan failed, default to polled status */ 568 /* If scan failed, default to polled status */
@@ -725,7 +726,7 @@ static void __init add_pcic(int ns, int type)
725 u_int cs_mask = mask & ((cs_irq) ? (1<<cs_irq) : ~(1<<12)); 726 u_int cs_mask = mask & ((cs_irq) ? (1<<cs_irq) : ~(1<<12));
726 for (cs_irq = 15; cs_irq > 0; cs_irq--) 727 for (cs_irq = 15; cs_irq > 0; cs_irq--)
727 if ((cs_mask & (1 << cs_irq)) && 728 if ((cs_mask & (1 << cs_irq)) &&
728 (_check_irq(cs_irq, 0) == 0)) 729 (_check_irq(cs_irq, SA_PROBEIRQ) == 0))
729 break; 730 break;
730 if (cs_irq) { 731 if (cs_irq) {
731 grab_irq = 1; 732 grab_irq = 1;
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index b1e3e6179e56..6c9ad92747fd 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -58,7 +58,7 @@ rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
58 unsigned long data; 58 unsigned long data;
59 ssize_t ret; 59 ssize_t ret;
60 60
61 if (count < sizeof(unsigned long)) 61 if (count != sizeof(unsigned int) && count < sizeof(unsigned long))
62 return -EINVAL; 62 return -EINVAL;
63 63
64 add_wait_queue(&rtc->irq_queue, &wait); 64 add_wait_queue(&rtc->irq_queue, &wait);
@@ -90,11 +90,16 @@ rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
90 if (ret == 0) { 90 if (ret == 0) {
91 /* Check for any data updates */ 91 /* Check for any data updates */
92 if (rtc->ops->read_callback) 92 if (rtc->ops->read_callback)
93 data = rtc->ops->read_callback(rtc->class_dev.dev, data); 93 data = rtc->ops->read_callback(rtc->class_dev.dev,
94 94 data);
95 ret = put_user(data, (unsigned long __user *)buf); 95
96 if (ret == 0) 96 if (sizeof(int) != sizeof(long) &&
97 ret = sizeof(unsigned long); 97 count == sizeof(unsigned int))
98 ret = put_user(data, (unsigned int __user *)buf) ?:
99 sizeof(unsigned int);
100 else
101 ret = put_user(data, (unsigned long __user *)buf) ?:
102 sizeof(unsigned long);
98 } 103 }
99 return ret; 104 return ret;
100} 105}
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index a23ec54989f6..2bc8aad47219 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -178,9 +178,9 @@ static int sa1100_rtc_open(struct device *dev)
178 return 0; 178 return 0;
179 179
180 fail_pi: 180 fail_pi:
181 free_irq(IRQ_RTCAlrm, NULL); 181 free_irq(IRQ_RTCAlrm, dev);
182 fail_ai: 182 fail_ai:
183 free_irq(IRQ_RTC1Hz, NULL); 183 free_irq(IRQ_RTC1Hz, dev);
184 fail_ui: 184 fail_ui:
185 return ret; 185 return ret;
186} 186}
@@ -295,7 +295,7 @@ static int sa1100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
295 295
296static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq) 296static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq)
297{ 297{
298 seq_printf(seq, "trim/divider\t: 0x%08x\n", RTTR); 298 seq_printf(seq, "trim/divider\t: 0x%08lx\n", RTTR);
299 seq_printf(seq, "alarm_IRQ\t: %s\n", 299 seq_printf(seq, "alarm_IRQ\t: %s\n",
300 (RTSR & RTSR_ALE) ? "yes" : "no" ); 300 (RTSR & RTSR_ALE) ? "yes" : "no" );
301 seq_printf(seq, "update_IRQ\t: %s\n", 301 seq_printf(seq, "update_IRQ\t: %s\n",
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index a3bfebcf31ef..cfb1fff3787c 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -315,6 +315,11 @@ dasd_increase_state(struct dasd_device *device)
315 rc = dasd_state_basic_to_ready(device); 315 rc = dasd_state_basic_to_ready(device);
316 316
317 if (!rc && 317 if (!rc &&
318 device->state == DASD_STATE_UNFMT &&
319 device->target > DASD_STATE_UNFMT)
320 rc = -EPERM;
321
322 if (!rc &&
318 device->state == DASD_STATE_READY && 323 device->state == DASD_STATE_READY &&
319 device->target >= DASD_STATE_ONLINE) 324 device->target >= DASD_STATE_ONLINE)
320 rc = dasd_state_ready_to_online(device); 325 rc = dasd_state_ready_to_online(device);
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index c1c6f1381150..216bc4fba199 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -45,6 +45,7 @@ struct dasd_devmap {
45 unsigned int devindex; 45 unsigned int devindex;
46 unsigned short features; 46 unsigned short features;
47 struct dasd_device *device; 47 struct dasd_device *device;
48 struct dasd_uid uid;
48}; 49};
49 50
50/* 51/*
@@ -716,6 +717,68 @@ dasd_discipline_show(struct device *dev, struct device_attribute *attr, char *bu
716 717
717static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL); 718static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL);
718 719
720static ssize_t
721dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf)
722{
723 struct dasd_devmap *devmap;
724 int alias;
725
726 devmap = dasd_find_busid(dev->bus_id);
727 spin_lock(&dasd_devmap_lock);
728 if (!IS_ERR(devmap))
729 alias = devmap->uid.alias;
730 else
731 alias = 0;
732 spin_unlock(&dasd_devmap_lock);
733
734 return sprintf(buf, alias ? "1\n" : "0\n");
735}
736
737static DEVICE_ATTR(alias, 0444, dasd_alias_show, NULL);
738
739static ssize_t
740dasd_vendor_show(struct device *dev, struct device_attribute *attr, char *buf)
741{
742 struct dasd_devmap *devmap;
743 char *vendor;
744
745 devmap = dasd_find_busid(dev->bus_id);
746 spin_lock(&dasd_devmap_lock);
747 if (!IS_ERR(devmap) && strlen(devmap->uid.vendor) > 0)
748 vendor = devmap->uid.vendor;
749 else
750 vendor = "";
751 spin_unlock(&dasd_devmap_lock);
752
753 return snprintf(buf, PAGE_SIZE, "%s\n", vendor);
754}
755
756static DEVICE_ATTR(vendor, 0444, dasd_vendor_show, NULL);
757
758#define UID_STRLEN ( /* vendor */ 3 + 1 + /* serial */ 14 + 1 +\
759 /* SSID */ 4 + 1 + /* unit addr */ 2 + 1)
760
761static ssize_t
762dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
763{
764 struct dasd_devmap *devmap;
765 char uid[UID_STRLEN];
766
767 devmap = dasd_find_busid(dev->bus_id);
768 spin_lock(&dasd_devmap_lock);
769 if (!IS_ERR(devmap) && strlen(devmap->uid.vendor) > 0)
770 snprintf(uid, sizeof(uid), "%s.%s.%04x.%02x",
771 devmap->uid.vendor, devmap->uid.serial,
772 devmap->uid.ssid, devmap->uid.unit_addr);
773 else
774 uid[0] = 0;
775 spin_unlock(&dasd_devmap_lock);
776
777 return snprintf(buf, PAGE_SIZE, "%s\n", uid);
778}
779
780static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL);
781
719/* 782/*
720 * extended error-reporting 783 * extended error-reporting
721 */ 784 */
@@ -759,6 +822,9 @@ static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store);
759static struct attribute * dasd_attrs[] = { 822static struct attribute * dasd_attrs[] = {
760 &dev_attr_readonly.attr, 823 &dev_attr_readonly.attr,
761 &dev_attr_discipline.attr, 824 &dev_attr_discipline.attr,
825 &dev_attr_alias.attr,
826 &dev_attr_vendor.attr,
827 &dev_attr_uid.attr,
762 &dev_attr_use_diag.attr, 828 &dev_attr_use_diag.attr,
763 &dev_attr_eer_enabled.attr, 829 &dev_attr_eer_enabled.attr,
764 NULL, 830 NULL,
@@ -768,6 +834,42 @@ static struct attribute_group dasd_attr_group = {
768 .attrs = dasd_attrs, 834 .attrs = dasd_attrs,
769}; 835};
770 836
837
838/*
839 * Return copy of the device unique identifier.
840 */
841int
842dasd_get_uid(struct ccw_device *cdev, struct dasd_uid *uid)
843{
844 struct dasd_devmap *devmap;
845
846 devmap = dasd_find_busid(cdev->dev.bus_id);
847 if (IS_ERR(devmap))
848 return PTR_ERR(devmap);
849 spin_lock(&dasd_devmap_lock);
850 *uid = devmap->uid;
851 spin_unlock(&dasd_devmap_lock);
852 return 0;
853}
854
855/*
856 * Register the given device unique identifier into devmap struct.
857 */
858int
859dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid)
860{
861 struct dasd_devmap *devmap;
862
863 devmap = dasd_find_busid(cdev->dev.bus_id);
864 if (IS_ERR(devmap))
865 return PTR_ERR(devmap);
866 spin_lock(&dasd_devmap_lock);
867 devmap->uid = *uid;
868 spin_unlock(&dasd_devmap_lock);
869 return 0;
870}
871EXPORT_SYMBOL(dasd_set_uid);
872
771/* 873/*
772 * Return value of the specified feature. 874 * Return value of the specified feature.
773 */ 875 */
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index ee09ef33d08d..7d5a6cee4bd8 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -446,6 +446,39 @@ dasd_eckd_cdl_reclen(int recid)
446 return LABEL_SIZE; 446 return LABEL_SIZE;
447} 447}
448 448
449/*
450 * Generate device unique id that specifies the physical device.
451 */
452static int
453dasd_eckd_generate_uid(struct dasd_device *device, struct dasd_uid *uid)
454{
455 struct dasd_eckd_private *private;
456 struct dasd_eckd_confdata *confdata;
457
458 private = (struct dasd_eckd_private *) device->private;
459 if (!private)
460 return -ENODEV;
461 confdata = &private->conf_data;
462 if (!confdata)
463 return -ENODEV;
464
465 memset(uid, 0, sizeof(struct dasd_uid));
466 strncpy(uid->vendor, confdata->ned1.HDA_manufacturer,
467 sizeof(uid->vendor) - 1);
468 EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
469 strncpy(uid->serial, confdata->ned1.HDA_location,
470 sizeof(uid->serial) - 1);
471 EBCASC(uid->serial, sizeof(uid->serial) - 1);
472 uid->ssid = confdata->neq.subsystemID;
473 if (confdata->ned2.sneq.flags == 0x40) {
474 uid->alias = 1;
475 uid->unit_addr = confdata->ned2.sneq.base_unit_addr;
476 } else
477 uid->unit_addr = confdata->ned1.unit_addr;
478
479 return 0;
480}
481
449static int 482static int
450dasd_eckd_read_conf(struct dasd_device *device) 483dasd_eckd_read_conf(struct dasd_device *device)
451{ 484{
@@ -507,11 +540,15 @@ dasd_eckd_read_conf(struct dasd_device *device)
507 return 0; 540 return 0;
508} 541}
509 542
510 543/*
544 * Check device characteristics.
545 * If the device is accessible using ECKD discipline, the device is enabled.
546 */
511static int 547static int
512dasd_eckd_check_characteristics(struct dasd_device *device) 548dasd_eckd_check_characteristics(struct dasd_device *device)
513{ 549{
514 struct dasd_eckd_private *private; 550 struct dasd_eckd_private *private;
551 struct dasd_uid uid;
515 void *rdc_data; 552 void *rdc_data;
516 int rc; 553 int rc;
517 554
@@ -536,6 +573,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
536 573
537 /* Read Device Characteristics */ 574 /* Read Device Characteristics */
538 rdc_data = (void *) &(private->rdc_data); 575 rdc_data = (void *) &(private->rdc_data);
576 memset(rdc_data, 0, sizeof(rdc_data));
539 rc = read_dev_chars(device->cdev, &rdc_data, 64); 577 rc = read_dev_chars(device->cdev, &rdc_data, 64);
540 if (rc) { 578 if (rc) {
541 DEV_MESSAGE(KERN_WARNING, device, 579 DEV_MESSAGE(KERN_WARNING, device,
@@ -556,8 +594,17 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
556 594
557 /* Read Configuration Data */ 595 /* Read Configuration Data */
558 rc = dasd_eckd_read_conf (device); 596 rc = dasd_eckd_read_conf (device);
559 return rc; 597 if (rc)
598 return rc;
599
600 /* Generate device unique id and register in devmap */
601 rc = dasd_eckd_generate_uid(device, &uid);
602 if (rc)
603 return rc;
560 604
605 rc = dasd_set_uid(device->cdev, &uid);
606
607 return rc;
561} 608}
562 609
563static struct dasd_ccw_req * 610static struct dasd_ccw_req *
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index ad8524bb7bb3..d5734e976e1c 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -228,26 +228,36 @@ struct dasd_eckd_confdata {
228 unsigned char HDA_manufacturer[3]; 228 unsigned char HDA_manufacturer[3];
229 unsigned char HDA_location[2]; 229 unsigned char HDA_location[2];
230 unsigned char HDA_seqno[12]; 230 unsigned char HDA_seqno[12];
231 __u16 ID; 231 __u8 ID;
232 __u8 unit_addr;
232 } __attribute__ ((packed)) ned1; 233 } __attribute__ ((packed)) ned1;
233 struct { 234 union {
234 struct { 235 struct {
235 unsigned char identifier:2; 236 struct {
236 unsigned char token_id:1; 237 unsigned char identifier:2;
237 unsigned char sno_valid:1; 238 unsigned char token_id:1;
238 unsigned char subst_sno:1; 239 unsigned char sno_valid:1;
239 unsigned char recNED:1; 240 unsigned char subst_sno:1;
240 unsigned char emuNED:1; 241 unsigned char recNED:1;
241 unsigned char reserved:1; 242 unsigned char emuNED:1;
242 } __attribute__ ((packed)) flags; 243 unsigned char reserved:1;
243 __u8 descriptor; 244 } __attribute__ ((packed)) flags;
244 __u8 reserved[2]; 245 __u8 descriptor;
245 unsigned char dev_type[6]; 246 __u8 reserved[2];
246 unsigned char dev_model[3]; 247 unsigned char dev_type[6];
247 unsigned char DASD_manufacturer[3]; 248 unsigned char dev_model[3];
248 unsigned char DASD_location[2]; 249 unsigned char DASD_manufacturer[3];
249 unsigned char DASD_seqno[12]; 250 unsigned char DASD_location[2];
250 __u16 ID; 251 unsigned char DASD_seqno[12];
252 __u16 ID;
253 } __attribute__ ((packed)) ned;
254 struct {
255 unsigned char flags; /* byte 0 */
256 unsigned char res2[7]; /* byte 1- 7 */
257 unsigned char sua_flags; /* byte 8 */
258 __u8 base_unit_addr; /* byte 9 */
259 unsigned char res3[22]; /* byte 10-31 */
260 } __attribute__ ((packed)) sneq;
251 } __attribute__ ((packed)) ned2; 261 } __attribute__ ((packed)) ned2;
252 struct { 262 struct {
253 struct { 263 struct {
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 4293ba827523..d4b13e300a76 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -268,6 +268,16 @@ struct dasd_discipline {
268 268
269extern struct dasd_discipline *dasd_diag_discipline_pointer; 269extern struct dasd_discipline *dasd_diag_discipline_pointer;
270 270
271/*
272 * Unique identifier for dasd device.
273 */
274struct dasd_uid {
275 __u8 alias;
276 char vendor[4];
277 char serial[15];
278 __u16 ssid;
279 __u8 unit_addr;
280};
271 281
272/* 282/*
273 * Notification numbers for extended error reporting notifications: 283 * Notification numbers for extended error reporting notifications:
@@ -516,6 +526,8 @@ void dasd_devmap_exit(void);
516struct dasd_device *dasd_create_device(struct ccw_device *); 526struct dasd_device *dasd_create_device(struct ccw_device *);
517void dasd_delete_device(struct dasd_device *); 527void dasd_delete_device(struct dasd_device *);
518 528
529int dasd_get_uid(struct ccw_device *, struct dasd_uid *);
530int dasd_set_uid(struct ccw_device *, struct dasd_uid *);
519int dasd_get_feature(struct ccw_device *, int); 531int dasd_get_feature(struct ccw_device *, int);
520int dasd_set_feature(struct ccw_device *, int, int); 532int dasd_set_feature(struct ccw_device *, int, int);
521 533
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index c3915f60a3aa..d71ef1adea59 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -230,14 +230,16 @@ tape_3590_read_attmsg(struct tape_device *device)
230 * These functions are used to schedule follow-up actions from within an 230 * These functions are used to schedule follow-up actions from within an
231 * interrupt context (like unsolicited interrupts). 231 * interrupt context (like unsolicited interrupts).
232 */ 232 */
233struct work_handler_data {
234 struct tape_device *device;
235 enum tape_op op;
236 struct work_struct work;
237};
238
233static void 239static void
234tape_3590_work_handler(void *data) 240tape_3590_work_handler(void *data)
235{ 241{
236 struct { 242 struct work_handler_data *p = data;
237 struct tape_device *device;
238 enum tape_op op;
239 struct work_struct work;
240 } *p = data;
241 243
242 switch (p->op) { 244 switch (p->op) {
243 case TO_MSEN: 245 case TO_MSEN:
@@ -257,11 +259,7 @@ tape_3590_work_handler(void *data)
257static int 259static int
258tape_3590_schedule_work(struct tape_device *device, enum tape_op op) 260tape_3590_schedule_work(struct tape_device *device, enum tape_op op)
259{ 261{
260 struct { 262 struct work_handler_data *p;
261 struct tape_device *device;
262 enum tape_op op;
263 struct work_struct work;
264 } *p;
265 263
266 if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL) 264 if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
267 return -ENOMEM; 265 return -ENOMEM;
@@ -316,7 +314,7 @@ tape_3590_bread(struct tape_device *device, struct request *req)
316 314
317 rq_for_each_bio(bio, req) { 315 rq_for_each_bio(bio, req) {
318 bio_for_each_segment(bv, bio, i) { 316 bio_for_each_segment(bv, bio, i) {
319 dst = kmap(bv->bv_page) + bv->bv_offset; 317 dst = page_address(bv->bv_page) + bv->bv_offset;
320 for (off = 0; off < bv->bv_len; 318 for (off = 0; off < bv->bv_len;
321 off += TAPEBLOCK_HSEC_SIZE) { 319 off += TAPEBLOCK_HSEC_SIZE) {
322 ccw->flags = CCW_FLAG_CC; 320 ccw->flags = CCW_FLAG_CC;
@@ -1168,6 +1166,7 @@ tape_3590_setup_device(struct tape_device *device)
1168static void 1166static void
1169tape_3590_cleanup_device(struct tape_device *device) 1167tape_3590_cleanup_device(struct tape_device *device)
1170{ 1168{
1169 flush_scheduled_work();
1171 tape_std_unassign(device); 1170 tape_std_unassign(device);
1172 1171
1173 kfree(device->discdata); 1172 kfree(device->discdata);
@@ -1234,6 +1233,7 @@ static struct tape_discipline tape_discipline_3590 = {
1234 1233
1235static struct ccw_device_id tape_3590_ids[] = { 1234static struct ccw_device_id tape_3590_ids[] = {
1236 {CCW_DEVICE_DEVTYPE(0x3590, 0, 0x3590, 0), .driver_info = tape_3590}, 1235 {CCW_DEVICE_DEVTYPE(0x3590, 0, 0x3590, 0), .driver_info = tape_3590},
1236 {CCW_DEVICE_DEVTYPE(0x3592, 0, 0x3592, 0), .driver_info = tape_3592},
1237 { /* end of list */ } 1237 { /* end of list */ }
1238}; 1238};
1239 1239
diff --git a/drivers/s390/char/tape_std.h b/drivers/s390/char/tape_std.h
index 2d311798edf4..1fc952359341 100644
--- a/drivers/s390/char/tape_std.h
+++ b/drivers/s390/char/tape_std.h
@@ -153,6 +153,7 @@ enum s390_tape_type {
153 tape_3480, 153 tape_3480,
154 tape_3490, 154 tape_3490,
155 tape_3590, 155 tape_3590,
156 tape_3592,
156}; 157};
157 158
158#endif // _TAPE_STD_H 159#endif // _TAPE_STD_H
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 6412b2c3edd3..72187e54dcac 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -242,28 +242,10 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
242 if (sch->vpm == mask) 242 if (sch->vpm == mask)
243 goto out_unreg; 243 goto out_unreg;
244 244
245 if ((sch->schib.scsw.actl & (SCSW_ACTL_CLEAR_PEND | 245 if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
246 SCSW_ACTL_HALT_PEND | 246 (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
247 SCSW_ACTL_START_PEND | 247 (sch->schib.pmcw.lpum == mask) &&
248 SCSW_ACTL_RESUME_PEND)) && 248 (sch->vpm == 0)) {
249 (sch->schib.pmcw.lpum == mask)) {
250 int cc = cio_cancel(sch);
251
252 if (cc == -ENODEV)
253 goto out_unreg;
254
255 if (cc == -EINVAL) {
256 cc = cio_clear(sch);
257 if (cc == -ENODEV)
258 goto out_unreg;
259 /* Call handler. */
260 if (sch->driver && sch->driver->termination)
261 sch->driver->termination(&sch->dev);
262 goto out_unlock;
263 }
264 } else if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
265 (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
266 (sch->schib.pmcw.lpum == mask)) {
267 int cc; 249 int cc;
268 250
269 cc = cio_clear(sch); 251 cc = cio_clear(sch);
@@ -653,13 +635,13 @@ __chp_add(struct subchannel_id schid, void *data)
653 if (sch->schib.pmcw.chpid[i] == chp->id) { 635 if (sch->schib.pmcw.chpid[i] == chp->id) {
654 if (stsch(sch->schid, &sch->schib) != 0) { 636 if (stsch(sch->schid, &sch->schib) != 0) {
655 /* Endgame. */ 637 /* Endgame. */
656 spin_unlock(&sch->lock); 638 spin_unlock_irq(&sch->lock);
657 return -ENXIO; 639 return -ENXIO;
658 } 640 }
659 break; 641 break;
660 } 642 }
661 if (i==8) { 643 if (i==8) {
662 spin_unlock(&sch->lock); 644 spin_unlock_irq(&sch->lock);
663 return 0; 645 return 0;
664 } 646 }
665 sch->lpm = ((sch->schib.pmcw.pim & 647 sch->lpm = ((sch->schib.pmcw.pim &
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 814f9258ce00..96f519281d92 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -38,6 +38,7 @@
38#include <linux/kernel.h> 38#include <linux/kernel.h>
39#include <linux/proc_fs.h> 39#include <linux/proc_fs.h>
40#include <linux/timer.h> 40#include <linux/timer.h>
41#include <linux/mempool.h>
41 42
42#include <asm/ccwdev.h> 43#include <asm/ccwdev.h>
43#include <asm/io.h> 44#include <asm/io.h>
@@ -80,6 +81,8 @@ static int indicator_used[INDICATORS_PER_CACHELINE];
80static __u32 * volatile indicators; 81static __u32 * volatile indicators;
81static __u32 volatile spare_indicator; 82static __u32 volatile spare_indicator;
82static atomic_t spare_indicator_usecount; 83static atomic_t spare_indicator_usecount;
84#define QDIO_MEMPOOL_SCSSC_ELEMENTS 2
85static mempool_t *qdio_mempool_scssc;
83 86
84static debug_info_t *qdio_dbf_setup; 87static debug_info_t *qdio_dbf_setup;
85static debug_info_t *qdio_dbf_sbal; 88static debug_info_t *qdio_dbf_sbal;
@@ -1637,7 +1640,7 @@ next:
1637 1640
1638 } 1641 }
1639 kfree(irq_ptr->qdr); 1642 kfree(irq_ptr->qdr);
1640 kfree(irq_ptr); 1643 free_page((unsigned long) irq_ptr);
1641} 1644}
1642 1645
1643static void 1646static void
@@ -2304,7 +2307,7 @@ qdio_get_ssqd_information(struct qdio_irq *irq_ptr)
2304 2307
2305 QDIO_DBF_TEXT0(0,setup,"getssqd"); 2308 QDIO_DBF_TEXT0(0,setup,"getssqd");
2306 qdioac = 0; 2309 qdioac = 0;
2307 ssqd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 2310 ssqd_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2308 if (!ssqd_area) { 2311 if (!ssqd_area) {
2309 QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \ 2312 QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \
2310 "SIGAs for sch x%x.\n", irq_ptr->schid.sch_no); 2313 "SIGAs for sch x%x.\n", irq_ptr->schid.sch_no);
@@ -2364,7 +2367,7 @@ qdio_get_ssqd_information(struct qdio_irq *irq_ptr)
2364out: 2367out:
2365 qdio_check_subchannel_qebsm(irq_ptr, qdioac, 2368 qdio_check_subchannel_qebsm(irq_ptr, qdioac,
2366 ssqd_area->sch_token); 2369 ssqd_area->sch_token);
2367 free_page ((unsigned long) ssqd_area); 2370 mempool_free(ssqd_area, qdio_mempool_scssc);
2368 irq_ptr->qdioac = qdioac; 2371 irq_ptr->qdioac = qdioac;
2369} 2372}
2370 2373
@@ -2458,7 +2461,7 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
2458 virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind); 2461 virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind);
2459 } 2462 }
2460 2463
2461 scssc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 2464 scssc_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2462 if (!scssc_area) { 2465 if (!scssc_area) {
2463 QDIO_PRINT_WARN("No memory for setting indicators on " \ 2466 QDIO_PRINT_WARN("No memory for setting indicators on " \
2464 "subchannel 0.%x.%x.\n", 2467 "subchannel 0.%x.%x.\n",
@@ -2514,7 +2517,7 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
2514 QDIO_DBF_HEX2(0,setup,&real_addr_dev_st_chg_ind,sizeof(unsigned long)); 2517 QDIO_DBF_HEX2(0,setup,&real_addr_dev_st_chg_ind,sizeof(unsigned long));
2515 result = 0; 2518 result = 0;
2516out: 2519out:
2517 free_page ((unsigned long) scssc_area); 2520 mempool_free(scssc_area, qdio_mempool_scssc);
2518 return result; 2521 return result;
2519 2522
2520} 2523}
@@ -2543,7 +2546,7 @@ tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
2543 if (!irq_ptr->is_thinint_irq) 2546 if (!irq_ptr->is_thinint_irq)
2544 return -ENODEV; 2547 return -ENODEV;
2545 2548
2546 scsscf_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 2549 scsscf_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2547 if (!scsscf_area) { 2550 if (!scsscf_area) {
2548 QDIO_PRINT_WARN("No memory for setting delay target on " \ 2551 QDIO_PRINT_WARN("No memory for setting delay target on " \
2549 "subchannel 0.%x.%x.\n", 2552 "subchannel 0.%x.%x.\n",
@@ -2581,7 +2584,7 @@ tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
2581 QDIO_DBF_HEX2(0,trace,&delay_target,sizeof(unsigned long)); 2584 QDIO_DBF_HEX2(0,trace,&delay_target,sizeof(unsigned long));
2582 result = 0; /* not critical */ 2585 result = 0; /* not critical */
2583out: 2586out:
2584 free_page ((unsigned long) scsscf_area); 2587 mempool_free(scsscf_area, qdio_mempool_scssc);
2585 return result; 2588 return result;
2586} 2589}
2587 2590
@@ -2980,7 +2983,7 @@ qdio_allocate(struct qdio_initialize *init_data)
2980 qdio_allocate_do_dbf(init_data); 2983 qdio_allocate_do_dbf(init_data);
2981 2984
2982 /* create irq */ 2985 /* create irq */
2983 irq_ptr = kzalloc(sizeof(struct qdio_irq), GFP_KERNEL | GFP_DMA); 2986 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2984 2987
2985 QDIO_DBF_TEXT0(0,setup,"irq_ptr:"); 2988 QDIO_DBF_TEXT0(0,setup,"irq_ptr:");
2986 QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*)); 2989 QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*));
@@ -2995,7 +2998,7 @@ qdio_allocate(struct qdio_initialize *init_data)
2995 /* QDR must be in DMA area since CCW data address is only 32 bit */ 2998 /* QDR must be in DMA area since CCW data address is only 32 bit */
2996 irq_ptr->qdr=kmalloc(sizeof(struct qdr), GFP_KERNEL | GFP_DMA); 2999 irq_ptr->qdr=kmalloc(sizeof(struct qdr), GFP_KERNEL | GFP_DMA);
2997 if (!(irq_ptr->qdr)) { 3000 if (!(irq_ptr->qdr)) {
2998 kfree(irq_ptr); 3001 free_page((unsigned long) irq_ptr);
2999 QDIO_PRINT_ERR("kmalloc of irq_ptr->qdr failed!\n"); 3002 QDIO_PRINT_ERR("kmalloc of irq_ptr->qdr failed!\n");
3000 return -ENOMEM; 3003 return -ENOMEM;
3001 } 3004 }
@@ -3780,6 +3783,16 @@ oom:
3780 return -ENOMEM; 3783 return -ENOMEM;
3781} 3784}
3782 3785
3786static void *qdio_mempool_alloc(gfp_t gfp_mask, void *size)
3787{
3788 return (void *) get_zeroed_page(gfp_mask|GFP_DMA);
3789}
3790
3791static void qdio_mempool_free(void *element, void *size)
3792{
3793 free_page((unsigned long) element);
3794}
3795
3783static int __init 3796static int __init
3784init_QDIO(void) 3797init_QDIO(void)
3785{ 3798{
@@ -3809,6 +3822,10 @@ init_QDIO(void)
3809 3822
3810 qdio_add_procfs_entry(); 3823 qdio_add_procfs_entry();
3811 3824
3825 qdio_mempool_scssc = mempool_create(QDIO_MEMPOOL_SCSSC_ELEMENTS,
3826 qdio_mempool_alloc,
3827 qdio_mempool_free, NULL);
3828
3812 if (tiqdio_check_chsc_availability()) 3829 if (tiqdio_check_chsc_availability())
3813 QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n"); 3830 QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n");
3814 3831
@@ -3824,6 +3841,7 @@ cleanup_QDIO(void)
3824 qdio_remove_procfs_entry(); 3841 qdio_remove_procfs_entry();
3825 qdio_release_qdio_memory(); 3842 qdio_release_qdio_memory();
3826 qdio_unregister_dbf_views(); 3843 qdio_unregister_dbf_views();
3844 mempool_destroy(qdio_mempool_scssc);
3827 3845
3828 printk("qdio: %s: module removed\n",version); 3846 printk("qdio: %s: module removed\n",version);
3829} 3847}
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index b3c6e7907790..cb14642d97aa 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -8014,7 +8014,6 @@ static int (*qeth_old_arp_constructor) (struct neighbour *);
8014 8014
8015static struct neigh_ops arp_direct_ops_template = { 8015static struct neigh_ops arp_direct_ops_template = {
8016 .family = AF_INET, 8016 .family = AF_INET,
8017 .destructor = NULL,
8018 .solicit = NULL, 8017 .solicit = NULL,
8019 .error_report = NULL, 8018 .error_report = NULL,
8020 .output = dev_queue_xmit, 8019 .output = dev_queue_xmit,
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 3bf466603512..f99e55308b32 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -13,6 +13,7 @@
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/workqueue.h> 15#include <linux/workqueue.h>
16#include <linux/time.h>
16 17
17#include <asm/lowcore.h> 18#include <asm/lowcore.h>
18 19
@@ -362,12 +363,19 @@ s390_revalidate_registers(struct mci *mci)
362 return kill_task; 363 return kill_task;
363} 364}
364 365
366#define MAX_IPD_COUNT 29
367#define MAX_IPD_TIME (5 * 60 * USEC_PER_SEC) /* 5 minutes */
368
365/* 369/*
366 * machine check handler. 370 * machine check handler.
367 */ 371 */
368void 372void
369s390_do_machine_check(struct pt_regs *regs) 373s390_do_machine_check(struct pt_regs *regs)
370{ 374{
375 static DEFINE_SPINLOCK(ipd_lock);
376 static unsigned long long last_ipd;
377 static int ipd_count;
378 unsigned long long tmp;
371 struct mci *mci; 379 struct mci *mci;
372 struct mcck_struct *mcck; 380 struct mcck_struct *mcck;
373 int umode; 381 int umode;
@@ -404,11 +412,27 @@ s390_do_machine_check(struct pt_regs *regs)
404 s390_handle_damage("processing backup machine " 412 s390_handle_damage("processing backup machine "
405 "check with damage."); 413 "check with damage.");
406 } 414 }
407 if (!umode) 415
408 s390_handle_damage("processing backup machine " 416 /*
409 "check in kernel mode."); 417 * Nullifying exigent condition, therefore we might
410 mcck->kill_task = 1; 418 * retry this instruction.
411 mcck->mcck_code = *(unsigned long long *) mci; 419 */
420
421 spin_lock(&ipd_lock);
422
423 tmp = get_clock();
424
425 if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME)
426 ipd_count++;
427 else
428 ipd_count = 1;
429
430 last_ipd = tmp;
431
432 if (ipd_count == MAX_IPD_COUNT)
433 s390_handle_damage("too many ipd retries.");
434
435 spin_unlock(&ipd_lock);
412 } 436 }
413 else { 437 else {
414 /* Processing damage -> stopping machine */ 438 /* Processing damage -> stopping machine */
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 3e7302692dbe..a480a3742d47 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -446,7 +446,9 @@ config SCSI_DPT_I2O
446 446
447config SCSI_ADVANSYS 447config SCSI_ADVANSYS
448 tristate "AdvanSys SCSI support" 448 tristate "AdvanSys SCSI support"
449 depends on (ISA || EISA || PCI) && SCSI && BROKEN 449 depends on SCSI
450 depends on ISA || EISA || PCI
451 depends on BROKEN || X86_32
450 help 452 help
451 This is a driver for all SCSI host adapters manufactured by 453 This is a driver for all SCSI host adapters manufactured by
452 AdvanSys. It is documented in the kernel source in 454 AdvanSys. It is documented in the kernel source in
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 28b93057b607..2a419634b256 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -2051,7 +2051,7 @@ STATIC ASC_DCNT AscGetMaxDmaCount(ushort);
2051#define ADV_VADDR_TO_U32 virt_to_bus 2051#define ADV_VADDR_TO_U32 virt_to_bus
2052#define ADV_U32_TO_VADDR bus_to_virt 2052#define ADV_U32_TO_VADDR bus_to_virt
2053 2053
2054#define AdvPortAddr ulong /* Virtual memory address size */ 2054#define AdvPortAddr void __iomem * /* Virtual memory address size */
2055 2055
2056/* 2056/*
2057 * Define Adv Library required memory access macros. 2057 * Define Adv Library required memory access macros.
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index cb30d9c1153d..0c9c2f400bf6 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -219,6 +219,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
219 ahc->flags |= AHC_39BIT_ADDRESSING; 219 ahc->flags |= AHC_39BIT_ADDRESSING;
220 } else { 220 } else {
221 if (dma_set_mask(dev, DMA_32BIT_MASK)) { 221 if (dma_set_mask(dev, DMA_32BIT_MASK)) {
222 ahc_free(ahc);
222 printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n"); 223 printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
223 return (-ENODEV); 224 return (-ENODEV);
224 } 225 }
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
index 5f586140e057..3adecef21783 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -2036,12 +2036,12 @@ ahc_pci_resume(struct ahc_softc *ahc)
2036 * that the OS doesn't know about and rely on our chip 2036 * that the OS doesn't know about and rely on our chip
2037 * reset handler to handle the rest. 2037 * reset handler to handle the rest.
2038 */ 2038 */
2039 ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4, 2039 ahc_pci_write_config(ahc->dev_softc, DEVCONFIG,
2040 ahc->bus_softc.pci_softc.devconfig); 2040 ahc->bus_softc.pci_softc.devconfig, /*bytes*/4);
2041 ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/1, 2041 ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND,
2042 ahc->bus_softc.pci_softc.command); 2042 ahc->bus_softc.pci_softc.command, /*bytes*/1);
2043 ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME, /*bytes*/1, 2043 ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME,
2044 ahc->bus_softc.pci_softc.csize_lattime); 2044 ahc->bus_softc.pci_softc.csize_lattime, /*bytes*/1);
2045 if ((ahc->flags & AHC_HAS_TERM_LOGIC) != 0) { 2045 if ((ahc->flags & AHC_HAS_TERM_LOGIC) != 0) {
2046 struct seeprom_descriptor sd; 2046 struct seeprom_descriptor sd;
2047 u_int sxfrctl1; 2047 u_int sxfrctl1;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 0a8ad37ae899..2e9be83a697f 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -739,7 +739,8 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
739{ 739{
740 struct viosrp_adapter_info *req; 740 struct viosrp_adapter_info *req;
741 struct srp_event_struct *evt_struct; 741 struct srp_event_struct *evt_struct;
742 742 dma_addr_t addr;
743
743 evt_struct = get_event_struct(&hostdata->pool); 744 evt_struct = get_event_struct(&hostdata->pool);
744 if (!evt_struct) { 745 if (!evt_struct) {
745 printk(KERN_ERR "ibmvscsi: couldn't allocate an event " 746 printk(KERN_ERR "ibmvscsi: couldn't allocate an event "
@@ -757,10 +758,10 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
757 758
758 req->common.type = VIOSRP_ADAPTER_INFO_TYPE; 759 req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
759 req->common.length = sizeof(hostdata->madapter_info); 760 req->common.length = sizeof(hostdata->madapter_info);
760 req->buffer = dma_map_single(hostdata->dev, 761 req->buffer = addr = dma_map_single(hostdata->dev,
761 &hostdata->madapter_info, 762 &hostdata->madapter_info,
762 sizeof(hostdata->madapter_info), 763 sizeof(hostdata->madapter_info),
763 DMA_BIDIRECTIONAL); 764 DMA_BIDIRECTIONAL);
764 765
765 if (dma_mapping_error(req->buffer)) { 766 if (dma_mapping_error(req->buffer)) {
766 printk(KERN_ERR 767 printk(KERN_ERR
@@ -770,8 +771,13 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
770 return; 771 return;
771 } 772 }
772 773
773 if (ibmvscsi_send_srp_event(evt_struct, hostdata)) 774 if (ibmvscsi_send_srp_event(evt_struct, hostdata)) {
774 printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n"); 775 printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n");
776 dma_unmap_single(hostdata->dev,
777 addr,
778 sizeof(hostdata->madapter_info),
779 DMA_BIDIRECTIONAL);
780 }
775}; 781};
776 782
777/** 783/**
@@ -1259,6 +1265,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1259{ 1265{
1260 struct viosrp_host_config *host_config; 1266 struct viosrp_host_config *host_config;
1261 struct srp_event_struct *evt_struct; 1267 struct srp_event_struct *evt_struct;
1268 dma_addr_t addr;
1262 int rc; 1269 int rc;
1263 1270
1264 evt_struct = get_event_struct(&hostdata->pool); 1271 evt_struct = get_event_struct(&hostdata->pool);
@@ -1279,8 +1286,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1279 memset(host_config, 0x00, sizeof(*host_config)); 1286 memset(host_config, 0x00, sizeof(*host_config));
1280 host_config->common.type = VIOSRP_HOST_CONFIG_TYPE; 1287 host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
1281 host_config->common.length = length; 1288 host_config->common.length = length;
1282 host_config->buffer = dma_map_single(hostdata->dev, buffer, length, 1289 host_config->buffer = addr = dma_map_single(hostdata->dev, buffer,
1283 DMA_BIDIRECTIONAL); 1290 length,
1291 DMA_BIDIRECTIONAL);
1284 1292
1285 if (dma_mapping_error(host_config->buffer)) { 1293 if (dma_mapping_error(host_config->buffer)) {
1286 printk(KERN_ERR 1294 printk(KERN_ERR
@@ -1291,11 +1299,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1291 1299
1292 init_completion(&evt_struct->comp); 1300 init_completion(&evt_struct->comp);
1293 rc = ibmvscsi_send_srp_event(evt_struct, hostdata); 1301 rc = ibmvscsi_send_srp_event(evt_struct, hostdata);
1294 if (rc == 0) { 1302 if (rc == 0)
1295 wait_for_completion(&evt_struct->comp); 1303 wait_for_completion(&evt_struct->comp);
1296 dma_unmap_single(hostdata->dev, host_config->buffer, 1304 dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL);
1297 length, DMA_BIDIRECTIONAL);
1298 }
1299 1305
1300 return rc; 1306 return rc;
1301} 1307}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index fad607b2e6f4..ee22173fce43 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -27,7 +27,6 @@ void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
27int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *); 27int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
28void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); 28void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
29void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); 29void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
30void lpfc_set_slim(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
31int lpfc_reg_login(struct lpfc_hba *, uint32_t, uint8_t *, LPFC_MBOXQ_t *, 30int lpfc_reg_login(struct lpfc_hba *, uint32_t, uint8_t *, LPFC_MBOXQ_t *,
32 uint32_t); 31 uint32_t);
33void lpfc_unreg_login(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *); 32void lpfc_unreg_login(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *);
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 8932b1be2b60..41cf5d3ea6ce 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -113,6 +113,7 @@ struct lpfc_nodelist {
113#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from 113#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from
114 NPR list */ 114 NPR list */
115#define NLP_DELAY_REMOVE 0x4000000 /* Defer removal till end of DSM */ 115#define NLP_DELAY_REMOVE 0x4000000 /* Defer removal till end of DSM */
116#define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */
116 117
117/* Defines for list searchs */ 118/* Defines for list searchs */
118#define NLP_SEARCH_MAPPED 0x1 /* search mapped */ 119#define NLP_SEARCH_MAPPED 0x1 /* search mapped */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 4813beaaca8f..283b7d824c34 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -302,10 +302,6 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
302 if (lpfc_reg_login(phba, Fabric_DID, (uint8_t *) sp, mbox, 0)) 302 if (lpfc_reg_login(phba, Fabric_DID, (uint8_t *) sp, mbox, 0))
303 goto fail_free_mbox; 303 goto fail_free_mbox;
304 304
305 /*
306 * set_slim mailbox command needs to execute first,
307 * queue this command to be processed later.
308 */
309 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 305 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
310 mbox->context2 = ndlp; 306 mbox->context2 = ndlp;
311 307
@@ -781,25 +777,26 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
781 if (disc && phba->num_disc_nodes) { 777 if (disc && phba->num_disc_nodes) {
782 /* Check to see if there are more PLOGIs to be sent */ 778 /* Check to see if there are more PLOGIs to be sent */
783 lpfc_more_plogi(phba); 779 lpfc_more_plogi(phba);
784 }
785 780
786 if (phba->num_disc_nodes == 0) { 781 if (phba->num_disc_nodes == 0) {
787 spin_lock_irq(phba->host->host_lock); 782 spin_lock_irq(phba->host->host_lock);
788 phba->fc_flag &= ~FC_NDISC_ACTIVE; 783 phba->fc_flag &= ~FC_NDISC_ACTIVE;
789 spin_unlock_irq(phba->host->host_lock); 784 spin_unlock_irq(phba->host->host_lock);
790 785
791 lpfc_can_disctmo(phba); 786 lpfc_can_disctmo(phba);
792 if (phba->fc_flag & FC_RSCN_MODE) { 787 if (phba->fc_flag & FC_RSCN_MODE) {
793 /* Check to see if more RSCNs came in while we were 788 /*
794 * processing this one. 789 * Check to see if more RSCNs came in while
795 */ 790 * we were processing this one.
796 if ((phba->fc_rscn_id_cnt == 0) && 791 */
797 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) { 792 if ((phba->fc_rscn_id_cnt == 0) &&
798 spin_lock_irq(phba->host->host_lock); 793 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
799 phba->fc_flag &= ~FC_RSCN_MODE; 794 spin_lock_irq(phba->host->host_lock);
800 spin_unlock_irq(phba->host->host_lock); 795 phba->fc_flag &= ~FC_RSCN_MODE;
801 } else { 796 spin_unlock_irq(phba->host->host_lock);
802 lpfc_els_handle_rscn(phba); 797 } else {
798 lpfc_els_handle_rscn(phba);
799 }
803 } 800 }
804 } 801 }
805 } 802 }
@@ -1263,7 +1260,7 @@ lpfc_issue_els_logo(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
1263 psli = &phba->sli; 1260 psli = &phba->sli;
1264 pring = &psli->ring[LPFC_ELS_RING]; 1261 pring = &psli->ring[LPFC_ELS_RING];
1265 1262
1266 cmdsize = 2 * (sizeof (uint32_t) + sizeof (struct lpfc_name)); 1263 cmdsize = (2 * sizeof (uint32_t)) + sizeof (struct lpfc_name);
1267 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp, 1264 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
1268 ndlp->nlp_DID, ELS_CMD_LOGO); 1265 ndlp->nlp_DID, ELS_CMD_LOGO);
1269 if (!elsiocb) 1266 if (!elsiocb)
@@ -1451,22 +1448,23 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_hba *phba, struct lpfc_nodelist * nlp)
1451 * PLOGIs to be sent 1448 * PLOGIs to be sent
1452 */ 1449 */
1453 lpfc_more_plogi(phba); 1450 lpfc_more_plogi(phba);
1454 }
1455 1451
1456 if (phba->num_disc_nodes == 0) { 1452 if (phba->num_disc_nodes == 0) {
1457 phba->fc_flag &= ~FC_NDISC_ACTIVE; 1453 phba->fc_flag &= ~FC_NDISC_ACTIVE;
1458 lpfc_can_disctmo(phba); 1454 lpfc_can_disctmo(phba);
1459 if (phba->fc_flag & FC_RSCN_MODE) { 1455 if (phba->fc_flag & FC_RSCN_MODE) {
1460 /* Check to see if more RSCNs 1456 /*
1461 * came in while we were 1457 * Check to see if more RSCNs
1462 * processing this one. 1458 * came in while we were
1463 */ 1459 * processing this one.
1464 if((phba->fc_rscn_id_cnt==0) && 1460 */
1465 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) { 1461 if((phba->fc_rscn_id_cnt==0) &&
1466 phba->fc_flag &= ~FC_RSCN_MODE; 1462 !(phba->fc_flag & FC_RSCN_DISCOVERY)) {
1467 } 1463 phba->fc_flag &= ~FC_RSCN_MODE;
1468 else { 1464 }
1469 lpfc_els_handle_rscn(phba); 1465 else {
1466 lpfc_els_handle_rscn(phba);
1467 }
1470 } 1468 }
1471 } 1469 }
1472 } 1470 }
@@ -1872,9 +1870,6 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1872 if (mbox) { 1870 if (mbox) {
1873 if ((rspiocb->iocb.ulpStatus == 0) 1871 if ((rspiocb->iocb.ulpStatus == 0)
1874 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 1872 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
1875 /* set_slim mailbox command needs to execute first,
1876 * queue this command to be processed later.
1877 */
1878 lpfc_unreg_rpi(phba, ndlp); 1873 lpfc_unreg_rpi(phba, ndlp);
1879 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 1874 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
1880 mbox->context2 = ndlp; 1875 mbox->context2 = ndlp;
@@ -1920,6 +1915,7 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
1920 uint8_t *pcmd; 1915 uint8_t *pcmd;
1921 uint16_t cmdsize; 1916 uint16_t cmdsize;
1922 int rc; 1917 int rc;
1918 ELS_PKT *els_pkt_ptr;
1923 1919
1924 psli = &phba->sli; 1920 psli = &phba->sli;
1925 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1921 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
@@ -1958,6 +1954,23 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
1958 pcmd += sizeof (uint32_t); 1954 pcmd += sizeof (uint32_t);
1959 memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm)); 1955 memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
1960 break; 1956 break;
1957 case ELS_CMD_PRLO:
1958 cmdsize = sizeof (uint32_t) + sizeof (PRLO);
1959 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
1960 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
1961 if (!elsiocb)
1962 return 1;
1963
1964 icmd = &elsiocb->iocb;
1965 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
1966 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1967
1968 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
1969 sizeof (uint32_t) + sizeof (PRLO));
1970 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
1971 els_pkt_ptr = (ELS_PKT *) pcmd;
1972 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
1973 break;
1961 default: 1974 default:
1962 return 1; 1975 return 1;
1963 } 1976 }
@@ -2498,7 +2511,7 @@ lpfc_els_rcv_rscn(struct lpfc_hba * phba,
2498 /* If we are about to begin discovery, just ACC the RSCN. 2511 /* If we are about to begin discovery, just ACC the RSCN.
2499 * Discovery processing will satisfy it. 2512 * Discovery processing will satisfy it.
2500 */ 2513 */
2501 if (phba->hba_state < LPFC_NS_QRY) { 2514 if (phba->hba_state <= LPFC_NS_QRY) {
2502 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 2515 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
2503 newnode); 2516 newnode);
2504 return 0; 2517 return 0;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 6721e679df62..adb086009ae0 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -311,8 +311,8 @@ lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
311 evtp->evt_arg2 = arg2; 311 evtp->evt_arg2 = arg2;
312 evtp->evt = evt; 312 evtp->evt = evt;
313 313
314 list_add_tail(&evtp->evt_listp, &phba->work_list);
315 spin_lock_irq(phba->host->host_lock); 314 spin_lock_irq(phba->host->host_lock);
315 list_add_tail(&evtp->evt_listp, &phba->work_list);
316 if (phba->work_wait) 316 if (phba->work_wait)
317 wake_up(phba->work_wait); 317 wake_up(phba->work_wait);
318 spin_unlock_irq(phba->host->host_lock); 318 spin_unlock_irq(phba->host->host_lock);
@@ -1071,10 +1071,6 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
1071 /* initialize static port data */ 1071 /* initialize static port data */
1072 rport->maxframe_size = ndlp->nlp_maxframe; 1072 rport->maxframe_size = ndlp->nlp_maxframe;
1073 rport->supported_classes = ndlp->nlp_class_sup; 1073 rport->supported_classes = ndlp->nlp_class_sup;
1074 if ((rport->scsi_target_id != -1) &&
1075 (rport->scsi_target_id < MAX_FCP_TARGET)) {
1076 ndlp->nlp_sid = rport->scsi_target_id;
1077 }
1078 rdata = rport->dd_data; 1074 rdata = rport->dd_data;
1079 rdata->pnode = ndlp; 1075 rdata->pnode = ndlp;
1080 1076
@@ -1087,6 +1083,10 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
1087 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) 1083 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
1088 fc_remote_port_rolechg(rport, rport_ids.roles); 1084 fc_remote_port_rolechg(rport, rport_ids.roles);
1089 1085
1086 if ((rport->scsi_target_id != -1) &&
1087 (rport->scsi_target_id < MAX_FCP_TARGET)) {
1088 ndlp->nlp_sid = rport->scsi_target_id;
1089 }
1090 1090
1091 return; 1091 return;
1092} 1092}
@@ -1238,6 +1238,7 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1238 evt_listp); 1238 evt_listp);
1239 1239
1240 } 1240 }
1241 nlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1241 nlp->nlp_type |= NLP_FC_NODE; 1242 nlp->nlp_type |= NLP_FC_NODE;
1242 break; 1243 break;
1243 case NLP_MAPPED_LIST: 1244 case NLP_MAPPED_LIST:
@@ -1258,6 +1259,7 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1258 evt_listp); 1259 evt_listp);
1259 1260
1260 } 1261 }
1262 nlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1261 break; 1263 break;
1262 case NLP_NPR_LIST: 1264 case NLP_NPR_LIST:
1263 nlp->nlp_flag |= list; 1265 nlp->nlp_flag |= list;
@@ -1402,6 +1404,8 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba,
1402 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) 1404 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
1403 return 1; 1405 return 1;
1404 case CMD_ELS_REQUEST64_CR: 1406 case CMD_ELS_REQUEST64_CR:
1407 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
1408 return 1;
1405 case CMD_XMIT_ELS_RSP64_CX: 1409 case CMD_XMIT_ELS_RSP64_CX:
1406 if (iocb->context1 == (uint8_t *) ndlp) 1410 if (iocb->context1 == (uint8_t *) ndlp)
1407 return 1; 1411 return 1;
@@ -1901,10 +1905,8 @@ lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
1901 */ 1905 */
1902 if (ndlp->nlp_flag & NLP_DELAY_TMO) 1906 if (ndlp->nlp_flag & NLP_DELAY_TMO)
1903 lpfc_cancel_retry_delay_tmo(phba, ndlp); 1907 lpfc_cancel_retry_delay_tmo(phba, ndlp);
1904 } else { 1908 } else
1905 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1906 ndlp = NULL; 1909 ndlp = NULL;
1907 }
1908 } else { 1910 } else {
1909 flg = ndlp->nlp_flag & NLP_LIST_MASK; 1911 flg = ndlp->nlp_flag & NLP_LIST_MASK;
1910 if ((flg == NLP_ADISC_LIST) || (flg == NLP_PLOGI_LIST)) 1912 if ((flg == NLP_ADISC_LIST) || (flg == NLP_PLOGI_LIST))
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 54d04188f7cc..eedf98801366 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -449,6 +449,7 @@ struct serv_parm { /* Structure is in Big Endian format */
449#define ELS_CMD_RRQ 0x12000000 449#define ELS_CMD_RRQ 0x12000000
450#define ELS_CMD_PRLI 0x20100014 450#define ELS_CMD_PRLI 0x20100014
451#define ELS_CMD_PRLO 0x21100014 451#define ELS_CMD_PRLO 0x21100014
452#define ELS_CMD_PRLO_ACC 0x02100014
452#define ELS_CMD_PDISC 0x50000000 453#define ELS_CMD_PDISC 0x50000000
453#define ELS_CMD_FDISC 0x51000000 454#define ELS_CMD_FDISC 0x51000000
454#define ELS_CMD_ADISC 0x52000000 455#define ELS_CMD_ADISC 0x52000000
@@ -484,6 +485,7 @@ struct serv_parm { /* Structure is in Big Endian format */
484#define ELS_CMD_RRQ 0x12 485#define ELS_CMD_RRQ 0x12
485#define ELS_CMD_PRLI 0x14001020 486#define ELS_CMD_PRLI 0x14001020
486#define ELS_CMD_PRLO 0x14001021 487#define ELS_CMD_PRLO 0x14001021
488#define ELS_CMD_PRLO_ACC 0x14001002
487#define ELS_CMD_PDISC 0x50 489#define ELS_CMD_PDISC 0x50
488#define ELS_CMD_FDISC 0x51 490#define ELS_CMD_FDISC 0x51
489#define ELS_CMD_ADISC 0x52 491#define ELS_CMD_ADISC 0x52
@@ -1539,6 +1541,7 @@ typedef struct {
1539 1541
1540#define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */ 1542#define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */
1541#define FLAGS_LINK_SPEED 0x0800 /* Bit 11 */ 1543#define FLAGS_LINK_SPEED 0x0800 /* Bit 11 */
1544#define FLAGS_IMED_ABORT 0x04000 /* Bit 14 */
1542 1545
1543 uint32_t link_speed; 1546 uint32_t link_speed;
1544#define LINK_SPEED_AUTO 0 /* Auto selection */ 1547#define LINK_SPEED_AUTO 0 /* Auto selection */
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 66d5d003555d..908d0f27706f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -294,15 +294,6 @@ lpfc_config_port_post(struct lpfc_hba * phba)
294 } 294 }
295 } 295 }
296 296
297 /* This should turn on DELAYED ABTS for ELS timeouts */
298 lpfc_set_slim(phba, pmb, 0x052198, 0x1);
299 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
300 phba->hba_state = LPFC_HBA_ERROR;
301 mempool_free( pmb, phba->mbox_mem_pool);
302 return -EIO;
303 }
304
305
306 lpfc_read_config(phba, pmb); 297 lpfc_read_config(phba, pmb);
307 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 298 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
308 lpfc_printf_log(phba, 299 lpfc_printf_log(phba,
@@ -804,7 +795,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
804 int max_speed; 795 int max_speed;
805 char * ports; 796 char * ports;
806 char * bus; 797 char * bus;
807 } m; 798 } m = {"<Unknown>", 0, "", ""};
808 799
809 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 800 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
810 ports = (hdrtype == 0x80) ? "2-port " : ""; 801 ports = (hdrtype == 0x80) ? "2-port " : "";
@@ -1627,7 +1618,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1627 1618
1628 error = lpfc_alloc_sysfs_attr(phba); 1619 error = lpfc_alloc_sysfs_attr(phba);
1629 if (error) 1620 if (error)
1630 goto out_kthread_stop; 1621 goto out_remove_host;
1631 1622
1632 error = request_irq(phba->pcidev->irq, lpfc_intr_handler, SA_SHIRQ, 1623 error = request_irq(phba->pcidev->irq, lpfc_intr_handler, SA_SHIRQ,
1633 LPFC_DRIVER_NAME, phba); 1624 LPFC_DRIVER_NAME, phba);
@@ -1644,8 +1635,10 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1644 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 1635 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
1645 1636
1646 error = lpfc_sli_hba_setup(phba); 1637 error = lpfc_sli_hba_setup(phba);
1647 if (error) 1638 if (error) {
1639 error = -ENODEV;
1648 goto out_free_irq; 1640 goto out_free_irq;
1641 }
1649 1642
1650 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1643 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1651 spin_lock_irq(phba->host->host_lock); 1644 spin_lock_irq(phba->host->host_lock);
@@ -1700,6 +1693,9 @@ out_free_irq:
1700 free_irq(phba->pcidev->irq, phba); 1693 free_irq(phba->pcidev->irq, phba);
1701out_free_sysfs_attr: 1694out_free_sysfs_attr:
1702 lpfc_free_sysfs_attr(phba); 1695 lpfc_free_sysfs_attr(phba);
1696out_remove_host:
1697 fc_remove_host(phba->host);
1698 scsi_remove_host(phba->host);
1703out_kthread_stop: 1699out_kthread_stop:
1704 kthread_stop(phba->worker_thread); 1700 kthread_stop(phba->worker_thread);
1705out_free_iocbq: 1701out_free_iocbq:
@@ -1721,12 +1717,14 @@ out_iounmap_slim:
1721out_idr_remove: 1717out_idr_remove:
1722 idr_remove(&lpfc_hba_index, phba->brd_no); 1718 idr_remove(&lpfc_hba_index, phba->brd_no);
1723out_put_host: 1719out_put_host:
1720 phba->host = NULL;
1724 scsi_host_put(host); 1721 scsi_host_put(host);
1725out_release_regions: 1722out_release_regions:
1726 pci_release_regions(pdev); 1723 pci_release_regions(pdev);
1727out_disable_device: 1724out_disable_device:
1728 pci_disable_device(pdev); 1725 pci_disable_device(pdev);
1729out: 1726out:
1727 pci_set_drvdata(pdev, NULL);
1730 return error; 1728 return error;
1731} 1729}
1732 1730
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index c585e2b2e589..e42f22aaf71b 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -200,6 +200,9 @@ lpfc_init_link(struct lpfc_hba * phba,
200 break; 200 break;
201 } 201 }
202 202
203 /* Enable asynchronous ABTS responses from firmware */
204 mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
205
203 /* NEW_FEATURE 206 /* NEW_FEATURE
204 * Setting up the link speed 207 * Setting up the link speed
205 */ 208 */
@@ -292,36 +295,6 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint32_t did, LPFC_MBOXQ_t * pmb)
292 return; 295 return;
293} 296}
294 297
295/***********************************************/
296
297/* command to write slim */
298/***********************************************/
299void
300lpfc_set_slim(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint32_t addr,
301 uint32_t value)
302{
303 MAILBOX_t *mb;
304
305 mb = &pmb->mb;
306 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
307
308 /* addr = 0x090597 is AUTO ABTS disable for ELS commands */
309 /* addr = 0x052198 is DELAYED ABTS enable for ELS commands */
310
311 /*
312 * Always turn on DELAYED ABTS for ELS timeouts
313 */
314 if ((addr == 0x052198) && (value == 0))
315 value = 1;
316
317 mb->un.varWords[0] = addr;
318 mb->un.varWords[1] = value;
319
320 mb->mbxCommand = MBX_SET_SLIM;
321 mb->mbxOwner = OWN_HOST;
322 return;
323}
324
325/**********************************************/ 298/**********************************************/
326/* lpfc_read_nv Issue a READ CONFIG */ 299/* lpfc_read_nv Issue a READ CONFIG */
327/* mailbox command */ 300/* mailbox command */
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 3d77bd999b70..27d60ad897cd 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -465,14 +465,18 @@ lpfc_rcv_padisc(struct lpfc_hba * phba,
465static int 465static int
466lpfc_rcv_logo(struct lpfc_hba * phba, 466lpfc_rcv_logo(struct lpfc_hba * phba,
467 struct lpfc_nodelist * ndlp, 467 struct lpfc_nodelist * ndlp,
468 struct lpfc_iocbq *cmdiocb) 468 struct lpfc_iocbq *cmdiocb,
469 uint32_t els_cmd)
469{ 470{
470 /* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */ 471 /* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */
471 /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary 472 /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
472 * PLOGIs during LOGO storms from a device. 473 * PLOGIs during LOGO storms from a device.
473 */ 474 */
474 ndlp->nlp_flag |= NLP_LOGO_ACC; 475 ndlp->nlp_flag |= NLP_LOGO_ACC;
475 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 476 if (els_cmd == ELS_CMD_PRLO)
477 lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
478 else
479 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
476 480
477 if (!(ndlp->nlp_type & NLP_FABRIC) || 481 if (!(ndlp->nlp_type & NLP_FABRIC) ||
478 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { 482 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
@@ -681,7 +685,7 @@ lpfc_rcv_logo_plogi_issue(struct lpfc_hba * phba,
681 /* software abort outstanding PLOGI */ 685 /* software abort outstanding PLOGI */
682 lpfc_els_abort(phba, ndlp, 1); 686 lpfc_els_abort(phba, ndlp, 1);
683 687
684 lpfc_rcv_logo(phba, ndlp, cmdiocb); 688 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
685 return ndlp->nlp_state; 689 return ndlp->nlp_state;
686} 690}
687 691
@@ -788,10 +792,6 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
788 if (lpfc_reg_login 792 if (lpfc_reg_login
789 (phba, irsp->un.elsreq64.remoteID, 793 (phba, irsp->un.elsreq64.remoteID,
790 (uint8_t *) sp, mbox, 0) == 0) { 794 (uint8_t *) sp, mbox, 0) == 0) {
791 /* set_slim mailbox command needs to
792 * execute first, queue this command to
793 * be processed later.
794 */
795 switch (ndlp->nlp_DID) { 795 switch (ndlp->nlp_DID) {
796 case NameServer_DID: 796 case NameServer_DID:
797 mbox->mbox_cmpl = 797 mbox->mbox_cmpl =
@@ -832,11 +832,17 @@ static uint32_t
832lpfc_device_rm_plogi_issue(struct lpfc_hba * phba, 832lpfc_device_rm_plogi_issue(struct lpfc_hba * phba,
833 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 833 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
834{ 834{
835 /* software abort outstanding PLOGI */ 835 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
836 lpfc_els_abort(phba, ndlp, 1); 836 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
837 return ndlp->nlp_state;
838 }
839 else {
840 /* software abort outstanding PLOGI */
841 lpfc_els_abort(phba, ndlp, 1);
837 842
838 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 843 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
839 return NLP_STE_FREED_NODE; 844 return NLP_STE_FREED_NODE;
845 }
840} 846}
841 847
842static uint32_t 848static uint32_t
@@ -851,7 +857,7 @@ lpfc_device_recov_plogi_issue(struct lpfc_hba * phba,
851 ndlp->nlp_state = NLP_STE_NPR_NODE; 857 ndlp->nlp_state = NLP_STE_NPR_NODE;
852 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 858 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
853 spin_lock_irq(phba->host->host_lock); 859 spin_lock_irq(phba->host->host_lock);
854 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 860 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
855 spin_unlock_irq(phba->host->host_lock); 861 spin_unlock_irq(phba->host->host_lock);
856 862
857 return ndlp->nlp_state; 863 return ndlp->nlp_state;
@@ -905,7 +911,7 @@ lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba,
905 /* software abort outstanding ADISC */ 911 /* software abort outstanding ADISC */
906 lpfc_els_abort(phba, ndlp, 0); 912 lpfc_els_abort(phba, ndlp, 0);
907 913
908 lpfc_rcv_logo(phba, ndlp, cmdiocb); 914 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
909 return ndlp->nlp_state; 915 return ndlp->nlp_state;
910} 916}
911 917
@@ -932,7 +938,7 @@ lpfc_rcv_prlo_adisc_issue(struct lpfc_hba * phba,
932 cmdiocb = (struct lpfc_iocbq *) arg; 938 cmdiocb = (struct lpfc_iocbq *) arg;
933 939
934 /* Treat like rcv logo */ 940 /* Treat like rcv logo */
935 lpfc_rcv_logo(phba, ndlp, cmdiocb); 941 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO);
936 return ndlp->nlp_state; 942 return ndlp->nlp_state;
937} 943}
938 944
@@ -987,11 +993,17 @@ lpfc_device_rm_adisc_issue(struct lpfc_hba * phba,
987 struct lpfc_nodelist * ndlp, void *arg, 993 struct lpfc_nodelist * ndlp, void *arg,
988 uint32_t evt) 994 uint32_t evt)
989{ 995{
990 /* software abort outstanding ADISC */ 996 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
991 lpfc_els_abort(phba, ndlp, 1); 997 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
998 return ndlp->nlp_state;
999 }
1000 else {
1001 /* software abort outstanding ADISC */
1002 lpfc_els_abort(phba, ndlp, 1);
992 1003
993 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1004 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
994 return NLP_STE_FREED_NODE; 1005 return NLP_STE_FREED_NODE;
1006 }
995} 1007}
996 1008
997static uint32_t 1009static uint32_t
@@ -1006,7 +1018,7 @@ lpfc_device_recov_adisc_issue(struct lpfc_hba * phba,
1006 ndlp->nlp_state = NLP_STE_NPR_NODE; 1018 ndlp->nlp_state = NLP_STE_NPR_NODE;
1007 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 1019 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1008 spin_lock_irq(phba->host->host_lock); 1020 spin_lock_irq(phba->host->host_lock);
1009 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1021 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1010 ndlp->nlp_flag |= NLP_NPR_ADISC; 1022 ndlp->nlp_flag |= NLP_NPR_ADISC;
1011 spin_unlock_irq(phba->host->host_lock); 1023 spin_unlock_irq(phba->host->host_lock);
1012 1024
@@ -1048,7 +1060,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
1048 1060
1049 cmdiocb = (struct lpfc_iocbq *) arg; 1061 cmdiocb = (struct lpfc_iocbq *) arg;
1050 1062
1051 lpfc_rcv_logo(phba, ndlp, cmdiocb); 1063 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1052 return ndlp->nlp_state; 1064 return ndlp->nlp_state;
1053} 1065}
1054 1066
@@ -1073,7 +1085,7 @@ lpfc_rcv_prlo_reglogin_issue(struct lpfc_hba * phba,
1073 struct lpfc_iocbq *cmdiocb; 1085 struct lpfc_iocbq *cmdiocb;
1074 1086
1075 cmdiocb = (struct lpfc_iocbq *) arg; 1087 cmdiocb = (struct lpfc_iocbq *) arg;
1076 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 1088 lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
1077 return ndlp->nlp_state; 1089 return ndlp->nlp_state;
1078} 1090}
1079 1091
@@ -1133,8 +1145,14 @@ lpfc_device_rm_reglogin_issue(struct lpfc_hba * phba,
1133 struct lpfc_nodelist * ndlp, void *arg, 1145 struct lpfc_nodelist * ndlp, void *arg,
1134 uint32_t evt) 1146 uint32_t evt)
1135{ 1147{
1136 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1148 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1137 return NLP_STE_FREED_NODE; 1149 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1150 return ndlp->nlp_state;
1151 }
1152 else {
1153 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1154 return NLP_STE_FREED_NODE;
1155 }
1138} 1156}
1139 1157
1140static uint32_t 1158static uint32_t
@@ -1146,7 +1164,7 @@ lpfc_device_recov_reglogin_issue(struct lpfc_hba * phba,
1146 ndlp->nlp_state = NLP_STE_NPR_NODE; 1164 ndlp->nlp_state = NLP_STE_NPR_NODE;
1147 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 1165 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1148 spin_lock_irq(phba->host->host_lock); 1166 spin_lock_irq(phba->host->host_lock);
1149 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1167 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1150 spin_unlock_irq(phba->host->host_lock); 1168 spin_unlock_irq(phba->host->host_lock);
1151 return ndlp->nlp_state; 1169 return ndlp->nlp_state;
1152} 1170}
@@ -1186,7 +1204,7 @@ lpfc_rcv_logo_prli_issue(struct lpfc_hba * phba,
1186 /* Software abort outstanding PRLI before sending acc */ 1204 /* Software abort outstanding PRLI before sending acc */
1187 lpfc_els_abort(phba, ndlp, 1); 1205 lpfc_els_abort(phba, ndlp, 1);
1188 1206
1189 lpfc_rcv_logo(phba, ndlp, cmdiocb); 1207 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1190 return ndlp->nlp_state; 1208 return ndlp->nlp_state;
1191} 1209}
1192 1210
@@ -1214,7 +1232,7 @@ lpfc_rcv_prlo_prli_issue(struct lpfc_hba * phba,
1214 struct lpfc_iocbq *cmdiocb; 1232 struct lpfc_iocbq *cmdiocb;
1215 1233
1216 cmdiocb = (struct lpfc_iocbq *) arg; 1234 cmdiocb = (struct lpfc_iocbq *) arg;
1217 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 1235 lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
1218 return ndlp->nlp_state; 1236 return ndlp->nlp_state;
1219} 1237}
1220 1238
@@ -1278,11 +1296,17 @@ static uint32_t
1278lpfc_device_rm_prli_issue(struct lpfc_hba * phba, 1296lpfc_device_rm_prli_issue(struct lpfc_hba * phba,
1279 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1297 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1280{ 1298{
1281 /* software abort outstanding PRLI */ 1299 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1282 lpfc_els_abort(phba, ndlp, 1); 1300 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1301 return ndlp->nlp_state;
1302 }
1303 else {
1304 /* software abort outstanding PLOGI */
1305 lpfc_els_abort(phba, ndlp, 1);
1283 1306
1284 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1307 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1285 return NLP_STE_FREED_NODE; 1308 return NLP_STE_FREED_NODE;
1309 }
1286} 1310}
1287 1311
1288 1312
@@ -1313,7 +1337,7 @@ lpfc_device_recov_prli_issue(struct lpfc_hba * phba,
1313 ndlp->nlp_state = NLP_STE_NPR_NODE; 1337 ndlp->nlp_state = NLP_STE_NPR_NODE;
1314 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 1338 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1315 spin_lock_irq(phba->host->host_lock); 1339 spin_lock_irq(phba->host->host_lock);
1316 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1340 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1317 spin_unlock_irq(phba->host->host_lock); 1341 spin_unlock_irq(phba->host->host_lock);
1318 return ndlp->nlp_state; 1342 return ndlp->nlp_state;
1319} 1343}
@@ -1351,7 +1375,7 @@ lpfc_rcv_logo_unmap_node(struct lpfc_hba * phba,
1351 1375
1352 cmdiocb = (struct lpfc_iocbq *) arg; 1376 cmdiocb = (struct lpfc_iocbq *) arg;
1353 1377
1354 lpfc_rcv_logo(phba, ndlp, cmdiocb); 1378 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1355 return ndlp->nlp_state; 1379 return ndlp->nlp_state;
1356} 1380}
1357 1381
@@ -1375,7 +1399,7 @@ lpfc_rcv_prlo_unmap_node(struct lpfc_hba * phba,
1375 1399
1376 cmdiocb = (struct lpfc_iocbq *) arg; 1400 cmdiocb = (struct lpfc_iocbq *) arg;
1377 1401
1378 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 1402 lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
1379 return ndlp->nlp_state; 1403 return ndlp->nlp_state;
1380} 1404}
1381 1405
@@ -1386,7 +1410,7 @@ lpfc_device_recov_unmap_node(struct lpfc_hba * phba,
1386 ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE; 1410 ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
1387 ndlp->nlp_state = NLP_STE_NPR_NODE; 1411 ndlp->nlp_state = NLP_STE_NPR_NODE;
1388 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 1412 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1389 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1413 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1390 lpfc_disc_set_adisc(phba, ndlp); 1414 lpfc_disc_set_adisc(phba, ndlp);
1391 1415
1392 return ndlp->nlp_state; 1416 return ndlp->nlp_state;
@@ -1424,7 +1448,7 @@ lpfc_rcv_logo_mapped_node(struct lpfc_hba * phba,
1424 1448
1425 cmdiocb = (struct lpfc_iocbq *) arg; 1449 cmdiocb = (struct lpfc_iocbq *) arg;
1426 1450
1427 lpfc_rcv_logo(phba, ndlp, cmdiocb); 1451 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1428 return ndlp->nlp_state; 1452 return ndlp->nlp_state;
1429} 1453}
1430 1454
@@ -1456,7 +1480,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_hba * phba,
1456 spin_unlock_irq(phba->host->host_lock); 1480 spin_unlock_irq(phba->host->host_lock);
1457 1481
1458 /* Treat like rcv logo */ 1482 /* Treat like rcv logo */
1459 lpfc_rcv_logo(phba, ndlp, cmdiocb); 1483 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO);
1460 return ndlp->nlp_state; 1484 return ndlp->nlp_state;
1461} 1485}
1462 1486
@@ -1469,7 +1493,7 @@ lpfc_device_recov_mapped_node(struct lpfc_hba * phba,
1469 ndlp->nlp_state = NLP_STE_NPR_NODE; 1493 ndlp->nlp_state = NLP_STE_NPR_NODE;
1470 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); 1494 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1471 spin_lock_irq(phba->host->host_lock); 1495 spin_lock_irq(phba->host->host_lock);
1472 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1496 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1473 spin_unlock_irq(phba->host->host_lock); 1497 spin_unlock_irq(phba->host->host_lock);
1474 lpfc_disc_set_adisc(phba, ndlp); 1498 lpfc_disc_set_adisc(phba, ndlp);
1475 return ndlp->nlp_state; 1499 return ndlp->nlp_state;
@@ -1551,7 +1575,7 @@ lpfc_rcv_logo_npr_node(struct lpfc_hba * phba,
1551 1575
1552 cmdiocb = (struct lpfc_iocbq *) arg; 1576 cmdiocb = (struct lpfc_iocbq *) arg;
1553 1577
1554 lpfc_rcv_logo(phba, ndlp, cmdiocb); 1578 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1555 return ndlp->nlp_state; 1579 return ndlp->nlp_state;
1556} 1580}
1557 1581
@@ -1617,9 +1641,16 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_hba * phba,
1617 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1641 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1618{ 1642{
1619 struct lpfc_iocbq *cmdiocb, *rspiocb; 1643 struct lpfc_iocbq *cmdiocb, *rspiocb;
1644 IOCB_t *irsp;
1620 1645
1621 cmdiocb = (struct lpfc_iocbq *) arg; 1646 cmdiocb = (struct lpfc_iocbq *) arg;
1622 rspiocb = cmdiocb->context_un.rsp_iocb; 1647 rspiocb = cmdiocb->context_un.rsp_iocb;
1648
1649 irsp = &rspiocb->iocb;
1650 if (irsp->ulpStatus) {
1651 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1652 return NLP_STE_FREED_NODE;
1653 }
1623 return ndlp->nlp_state; 1654 return ndlp->nlp_state;
1624} 1655}
1625 1656
@@ -1628,9 +1659,16 @@ lpfc_cmpl_prli_npr_node(struct lpfc_hba * phba,
1628 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1659 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1629{ 1660{
1630 struct lpfc_iocbq *cmdiocb, *rspiocb; 1661 struct lpfc_iocbq *cmdiocb, *rspiocb;
1662 IOCB_t *irsp;
1631 1663
1632 cmdiocb = (struct lpfc_iocbq *) arg; 1664 cmdiocb = (struct lpfc_iocbq *) arg;
1633 rspiocb = cmdiocb->context_un.rsp_iocb; 1665 rspiocb = cmdiocb->context_un.rsp_iocb;
1666
1667 irsp = &rspiocb->iocb;
1668 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
1669 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1670 return NLP_STE_FREED_NODE;
1671 }
1634 return ndlp->nlp_state; 1672 return ndlp->nlp_state;
1635} 1673}
1636 1674
@@ -1649,9 +1687,16 @@ lpfc_cmpl_adisc_npr_node(struct lpfc_hba * phba,
1649 uint32_t evt) 1687 uint32_t evt)
1650{ 1688{
1651 struct lpfc_iocbq *cmdiocb, *rspiocb; 1689 struct lpfc_iocbq *cmdiocb, *rspiocb;
1690 IOCB_t *irsp;
1652 1691
1653 cmdiocb = (struct lpfc_iocbq *) arg; 1692 cmdiocb = (struct lpfc_iocbq *) arg;
1654 rspiocb = cmdiocb->context_un.rsp_iocb; 1693 rspiocb = cmdiocb->context_un.rsp_iocb;
1694
1695 irsp = &rspiocb->iocb;
1696 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
1697 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1698 return NLP_STE_FREED_NODE;
1699 }
1655 return ndlp->nlp_state; 1700 return ndlp->nlp_state;
1656} 1701}
1657 1702
@@ -1668,7 +1713,12 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba,
1668 1713
1669 if (!mb->mbxStatus) 1714 if (!mb->mbxStatus)
1670 ndlp->nlp_rpi = mb->un.varWords[0]; 1715 ndlp->nlp_rpi = mb->un.varWords[0];
1671 1716 else {
1717 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
1718 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1719 return NLP_STE_FREED_NODE;
1720 }
1721 }
1672 return ndlp->nlp_state; 1722 return ndlp->nlp_state;
1673} 1723}
1674 1724
@@ -1677,6 +1727,10 @@ lpfc_device_rm_npr_node(struct lpfc_hba * phba,
1677 struct lpfc_nodelist * ndlp, void *arg, 1727 struct lpfc_nodelist * ndlp, void *arg,
1678 uint32_t evt) 1728 uint32_t evt)
1679{ 1729{
1730 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1731 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1732 return ndlp->nlp_state;
1733 }
1680 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1734 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1681 return NLP_STE_FREED_NODE; 1735 return NLP_STE_FREED_NODE;
1682} 1736}
@@ -1687,7 +1741,7 @@ lpfc_device_recov_npr_node(struct lpfc_hba * phba,
1687 uint32_t evt) 1741 uint32_t evt)
1688{ 1742{
1689 spin_lock_irq(phba->host->host_lock); 1743 spin_lock_irq(phba->host->host_lock);
1690 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1744 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1691 spin_unlock_irq(phba->host->host_lock); 1745 spin_unlock_irq(phba->host->host_lock);
1692 if (ndlp->nlp_flag & NLP_DELAY_TMO) { 1746 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1693 lpfc_cancel_retry_delay_tmo(phba, ndlp); 1747 lpfc_cancel_retry_delay_tmo(phba, ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index f93799873721..7dc4c2e6bed2 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -629,8 +629,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
629 struct lpfc_iocbq *piocbq; 629 struct lpfc_iocbq *piocbq;
630 IOCB_t *piocb; 630 IOCB_t *piocb;
631 struct fcp_cmnd *fcp_cmnd; 631 struct fcp_cmnd *fcp_cmnd;
632 struct scsi_device *scsi_dev = lpfc_cmd->pCmd->device; 632 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
633 struct lpfc_rport_data *rdata = scsi_dev->hostdata;
634 struct lpfc_nodelist *ndlp = rdata->pnode; 633 struct lpfc_nodelist *ndlp = rdata->pnode;
635 634
636 if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 635 if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
@@ -665,56 +664,18 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
665 piocb->ulpTimeout = lpfc_cmd->timeout; 664 piocb->ulpTimeout = lpfc_cmd->timeout;
666 } 665 }
667 666
668 lpfc_cmd->rdata = rdata;
669
670 switch (task_mgmt_cmd) {
671 case FCP_LUN_RESET:
672 /* Issue LUN Reset to TGT <num> LUN <num> */
673 lpfc_printf_log(phba,
674 KERN_INFO,
675 LOG_FCP,
676 "%d:0703 Issue LUN Reset to TGT %d LUN %d "
677 "Data: x%x x%x\n",
678 phba->brd_no,
679 scsi_dev->id, scsi_dev->lun,
680 ndlp->nlp_rpi, ndlp->nlp_flag);
681
682 break;
683 case FCP_ABORT_TASK_SET:
684 /* Issue Abort Task Set to TGT <num> LUN <num> */
685 lpfc_printf_log(phba,
686 KERN_INFO,
687 LOG_FCP,
688 "%d:0701 Issue Abort Task Set to TGT %d LUN %d "
689 "Data: x%x x%x\n",
690 phba->brd_no,
691 scsi_dev->id, scsi_dev->lun,
692 ndlp->nlp_rpi, ndlp->nlp_flag);
693
694 break;
695 case FCP_TARGET_RESET:
696 /* Issue Target Reset to TGT <num> */
697 lpfc_printf_log(phba,
698 KERN_INFO,
699 LOG_FCP,
700 "%d:0702 Issue Target Reset to TGT %d "
701 "Data: x%x x%x\n",
702 phba->brd_no,
703 scsi_dev->id, ndlp->nlp_rpi,
704 ndlp->nlp_flag);
705 break;
706 }
707
708 return (1); 667 return (1);
709} 668}
710 669
711static int 670static int
712lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba) 671lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
672 unsigned tgt_id, struct lpfc_rport_data *rdata)
713{ 673{
714 struct lpfc_iocbq *iocbq; 674 struct lpfc_iocbq *iocbq;
715 struct lpfc_iocbq *iocbqrsp; 675 struct lpfc_iocbq *iocbqrsp;
716 int ret; 676 int ret;
717 677
678 lpfc_cmd->rdata = rdata;
718 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET); 679 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET);
719 if (!ret) 680 if (!ret)
720 return FAILED; 681 return FAILED;
@@ -726,6 +687,13 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba)
726 if (!iocbqrsp) 687 if (!iocbqrsp)
727 return FAILED; 688 return FAILED;
728 689
690 /* Issue Target Reset to TGT <num> */
691 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
692 "%d:0702 Issue Target Reset to TGT %d "
693 "Data: x%x x%x\n",
694 phba->brd_no, tgt_id, rdata->pnode->nlp_rpi,
695 rdata->pnode->nlp_flag);
696
729 ret = lpfc_sli_issue_iocb_wait(phba, 697 ret = lpfc_sli_issue_iocb_wait(phba,
730 &phba->sli.ring[phba->sli.fcp_ring], 698 &phba->sli.ring[phba->sli.fcp_ring],
731 iocbq, iocbqrsp, lpfc_cmd->timeout); 699 iocbq, iocbqrsp, lpfc_cmd->timeout);
@@ -1021,6 +989,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1021 lpfc_cmd->pCmd = cmnd; 989 lpfc_cmd->pCmd = cmnd;
1022 lpfc_cmd->timeout = 60; 990 lpfc_cmd->timeout = 60;
1023 lpfc_cmd->scsi_hba = phba; 991 lpfc_cmd->scsi_hba = phba;
992 lpfc_cmd->rdata = rdata;
1024 993
1025 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET); 994 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET);
1026 if (!ret) 995 if (!ret)
@@ -1033,6 +1002,11 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1033 if (iocbqrsp == NULL) 1002 if (iocbqrsp == NULL)
1034 goto out_free_scsi_buf; 1003 goto out_free_scsi_buf;
1035 1004
1005 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
1006 "%d:0703 Issue LUN Reset to TGT %d LUN %d "
1007 "Data: x%x x%x\n", phba->brd_no, cmnd->device->id,
1008 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
1009
1036 ret = lpfc_sli_issue_iocb_wait(phba, 1010 ret = lpfc_sli_issue_iocb_wait(phba,
1037 &phba->sli.ring[phba->sli.fcp_ring], 1011 &phba->sli.ring[phba->sli.fcp_ring],
1038 iocbq, iocbqrsp, lpfc_cmd->timeout); 1012 iocbq, iocbqrsp, lpfc_cmd->timeout);
@@ -1104,7 +1078,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1104 int match; 1078 int match;
1105 int ret = FAILED, i, err_count = 0; 1079 int ret = FAILED, i, err_count = 0;
1106 int cnt, loopcnt; 1080 int cnt, loopcnt;
1107 unsigned int midlayer_id = 0;
1108 struct lpfc_scsi_buf * lpfc_cmd; 1081 struct lpfc_scsi_buf * lpfc_cmd;
1109 1082
1110 lpfc_block_requests(phba); 1083 lpfc_block_requests(phba);
@@ -1124,7 +1097,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1124 * targets known to the driver. Should any target reset 1097 * targets known to the driver. Should any target reset
1125 * fail, this routine returns failure to the midlayer. 1098 * fail, this routine returns failure to the midlayer.
1126 */ 1099 */
1127 midlayer_id = cmnd->device->id;
1128 for (i = 0; i < MAX_FCP_TARGET; i++) { 1100 for (i = 0; i < MAX_FCP_TARGET; i++) {
1129 /* Search the mapped list for this target ID */ 1101 /* Search the mapped list for this target ID */
1130 match = 0; 1102 match = 0;
@@ -1137,9 +1109,8 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1137 if (!match) 1109 if (!match)
1138 continue; 1110 continue;
1139 1111
1140 lpfc_cmd->pCmd->device->id = i; 1112 ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba,
1141 lpfc_cmd->pCmd->device->hostdata = ndlp->rport->dd_data; 1113 i, ndlp->rport->dd_data);
1142 ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba);
1143 if (ret != SUCCESS) { 1114 if (ret != SUCCESS) {
1144 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1115 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1145 "%d:0713 Bus Reset on target %d failed\n", 1116 "%d:0713 Bus Reset on target %d failed\n",
@@ -1158,7 +1129,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1158 * the targets. Unfortunately, some targets do not abide by 1129 * the targets. Unfortunately, some targets do not abide by
1159 * this forcing the driver to double check. 1130 * this forcing the driver to double check.
1160 */ 1131 */
1161 cmnd->device->id = midlayer_id;
1162 cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 1132 cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
1163 0, 0, LPFC_CTX_HOST); 1133 0, 0, LPFC_CTX_HOST);
1164 if (cnt) 1134 if (cnt)
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 4cf1366108b7..6b737568b831 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.1.4" 21#define LPFC_DRIVER_VERSION "8.1.6"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24 24
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 80b68a2481b3..de35ffe2f79d 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4471,7 +4471,6 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
4471{ 4471{
4472 Scsi_Cmnd *scmd; 4472 Scsi_Cmnd *scmd;
4473 struct scsi_device *sdev; 4473 struct scsi_device *sdev;
4474 unsigned long flags = 0;
4475 scb_t *scb; 4474 scb_t *scb;
4476 int rval; 4475 int rval;
4477 4476
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index c11e5ce6865e..bec1424eda85 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * FILE : megaraid_mbox.c 12 * FILE : megaraid_mbox.c
13 * Version : v2.20.4.7 (Nov 14 2005) 13 * Version : v2.20.4.8 (Apr 11 2006)
14 * 14 *
15 * Authors: 15 * Authors:
16 * Atul Mukker <Atul.Mukker@lsil.com> 16 * Atul Mukker <Atul.Mukker@lsil.com>
@@ -2278,6 +2278,7 @@ megaraid_mbox_dpc(unsigned long devp)
2278 unsigned long flags; 2278 unsigned long flags;
2279 uint8_t c; 2279 uint8_t c;
2280 int status; 2280 int status;
2281 uioc_t *kioc;
2281 2282
2282 2283
2283 if (!adapter) return; 2284 if (!adapter) return;
@@ -2320,6 +2321,9 @@ megaraid_mbox_dpc(unsigned long devp)
2320 // remove from local clist 2321 // remove from local clist
2321 list_del_init(&scb->list); 2322 list_del_init(&scb->list);
2322 2323
2324 kioc = (uioc_t *)scb->gp;
2325 kioc->status = 0;
2326
2323 megaraid_mbox_mm_done(adapter, scb); 2327 megaraid_mbox_mm_done(adapter, scb);
2324 2328
2325 continue; 2329 continue;
@@ -2636,6 +2640,7 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
2636 int recovery_window; 2640 int recovery_window;
2637 int recovering; 2641 int recovering;
2638 int i; 2642 int i;
2643 uioc_t *kioc;
2639 2644
2640 adapter = SCP2ADAPTER(scp); 2645 adapter = SCP2ADAPTER(scp);
2641 raid_dev = ADAP2RAIDDEV(adapter); 2646 raid_dev = ADAP2RAIDDEV(adapter);
@@ -2655,32 +2660,51 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
2655 // Also, reset all the commands currently owned by the driver 2660 // Also, reset all the commands currently owned by the driver
2656 spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); 2661 spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
2657 list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) { 2662 list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) {
2658
2659 list_del_init(&scb->list); // from pending list 2663 list_del_init(&scb->list); // from pending list
2660 2664
2661 con_log(CL_ANN, (KERN_WARNING 2665 if (scb->sno >= MBOX_MAX_SCSI_CMDS) {
2662 "megaraid: %ld:%d[%d:%d], reset from pending list\n", 2666 con_log(CL_ANN, (KERN_WARNING
2663 scp->serial_number, scb->sno, 2667 "megaraid: IOCTL packet with %d[%d:%d] being reset\n",
2664 scb->dev_channel, scb->dev_target)); 2668 scb->sno, scb->dev_channel, scb->dev_target));
2665 2669
2666 scp->result = (DID_RESET << 16); 2670 scb->status = -1;
2667 scp->scsi_done(scp);
2668 2671
2669 megaraid_dealloc_scb(adapter, scb); 2672 kioc = (uioc_t *)scb->gp;
2673 kioc->status = -EFAULT;
2674
2675 megaraid_mbox_mm_done(adapter, scb);
2676 } else {
2677 if (scb->scp == scp) { // Found command
2678 con_log(CL_ANN, (KERN_WARNING
2679 "megaraid: %ld:%d[%d:%d], reset from pending list\n",
2680 scp->serial_number, scb->sno,
2681 scb->dev_channel, scb->dev_target));
2682 } else {
2683 con_log(CL_ANN, (KERN_WARNING
2684 "megaraid: IO packet with %d[%d:%d] being reset\n",
2685 scb->sno, scb->dev_channel, scb->dev_target));
2686 }
2687
2688 scb->scp->result = (DID_RESET << 16);
2689 scb->scp->scsi_done(scb->scp);
2690
2691 megaraid_dealloc_scb(adapter, scb);
2692 }
2670 } 2693 }
2671 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); 2694 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
2672 2695
2673 if (adapter->outstanding_cmds) { 2696 if (adapter->outstanding_cmds) {
2674 con_log(CL_ANN, (KERN_NOTICE 2697 con_log(CL_ANN, (KERN_NOTICE
2675 "megaraid: %d outstanding commands. Max wait %d sec\n", 2698 "megaraid: %d outstanding commands. Max wait %d sec\n",
2676 adapter->outstanding_cmds, MBOX_RESET_WAIT)); 2699 adapter->outstanding_cmds,
2700 (MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT)));
2677 } 2701 }
2678 2702
2679 recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT; 2703 recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
2680 2704
2681 recovering = adapter->outstanding_cmds; 2705 recovering = adapter->outstanding_cmds;
2682 2706
2683 for (i = 0; i < recovery_window && adapter->outstanding_cmds; i++) { 2707 for (i = 0; i < recovery_window; i++) {
2684 2708
2685 megaraid_ack_sequence(adapter); 2709 megaraid_ack_sequence(adapter);
2686 2710
@@ -2689,12 +2713,11 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
2689 con_log(CL_ANN, ( 2713 con_log(CL_ANN, (
2690 "megaraid mbox: Wait for %d commands to complete:%d\n", 2714 "megaraid mbox: Wait for %d commands to complete:%d\n",
2691 adapter->outstanding_cmds, 2715 adapter->outstanding_cmds,
2692 MBOX_RESET_WAIT - i)); 2716 (MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT) - i));
2693 } 2717 }
2694 2718
2695 // bailout if no recovery happended in reset time 2719 // bailout if no recovery happended in reset time
2696 if ((i == MBOX_RESET_WAIT) && 2720 if (adapter->outstanding_cmds == 0) {
2697 (recovering == adapter->outstanding_cmds)) {
2698 break; 2721 break;
2699 } 2722 }
2700 2723
@@ -2918,12 +2941,13 @@ mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[])
2918 wmb(); 2941 wmb();
2919 WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1); 2942 WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
2920 2943
2921 for (i = 0; i < 0xFFFFF; i++) { 2944 for (i = 0; i < MBOX_SYNC_WAIT_CNT; i++) {
2922 if (mbox->numstatus != 0xFF) break; 2945 if (mbox->numstatus != 0xFF) break;
2923 rmb(); 2946 rmb();
2947 udelay(MBOX_SYNC_DELAY_200);
2924 } 2948 }
2925 2949
2926 if (i == 0xFFFFF) { 2950 if (i == MBOX_SYNC_WAIT_CNT) {
2927 // We may need to re-calibrate the counter 2951 // We may need to re-calibrate the counter
2928 con_log(CL_ANN, (KERN_CRIT 2952 con_log(CL_ANN, (KERN_CRIT
2929 "megaraid: fast sync command timed out\n")); 2953 "megaraid: fast sync command timed out\n"));
@@ -3475,7 +3499,7 @@ megaraid_cmm_register(adapter_t *adapter)
3475 adp.drvr_data = (unsigned long)adapter; 3499 adp.drvr_data = (unsigned long)adapter;
3476 adp.pdev = adapter->pdev; 3500 adp.pdev = adapter->pdev;
3477 adp.issue_uioc = megaraid_mbox_mm_handler; 3501 adp.issue_uioc = megaraid_mbox_mm_handler;
3478 adp.timeout = 300; 3502 adp.timeout = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
3479 adp.max_kioc = MBOX_MAX_USER_CMDS; 3503 adp.max_kioc = MBOX_MAX_USER_CMDS;
3480 3504
3481 if ((rval = mraid_mm_register_adp(&adp)) != 0) { 3505 if ((rval = mraid_mm_register_adp(&adp)) != 0) {
@@ -3702,7 +3726,6 @@ megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb)
3702 unsigned long flags; 3726 unsigned long flags;
3703 3727
3704 kioc = (uioc_t *)scb->gp; 3728 kioc = (uioc_t *)scb->gp;
3705 kioc->status = 0;
3706 mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf; 3729 mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
3707 mbox64->mbox32.status = scb->status; 3730 mbox64->mbox32.status = scb->status;
3708 raw_mbox = (uint8_t *)&mbox64->mbox32; 3731 raw_mbox = (uint8_t *)&mbox64->mbox32;
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
index 882fb1a0b575..868fb0ec93e7 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.h
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -21,8 +21,8 @@
21#include "megaraid_ioctl.h" 21#include "megaraid_ioctl.h"
22 22
23 23
24#define MEGARAID_VERSION "2.20.4.7" 24#define MEGARAID_VERSION "2.20.4.8"
25#define MEGARAID_EXT_VERSION "(Release Date: Mon Nov 14 12:27:22 EST 2005)" 25#define MEGARAID_EXT_VERSION "(Release Date: Mon Apr 11 12:27:22 EST 2006)"
26 26
27 27
28/* 28/*
@@ -100,6 +100,9 @@
100#define MBOX_BUSY_WAIT 10 // max usec to wait for busy mailbox 100#define MBOX_BUSY_WAIT 10 // max usec to wait for busy mailbox
101#define MBOX_RESET_WAIT 180 // wait these many seconds in reset 101#define MBOX_RESET_WAIT 180 // wait these many seconds in reset
102#define MBOX_RESET_EXT_WAIT 120 // extended wait reset 102#define MBOX_RESET_EXT_WAIT 120 // extended wait reset
103#define MBOX_SYNC_WAIT_CNT 0xFFFF // wait loop index for synchronous mode
104
105#define MBOX_SYNC_DELAY_200 200 // 200 micro-seconds
103 106
104/* 107/*
105 * maximum transfer that can happen through the firmware commands issued 108 * maximum transfer that can happen through the firmware commands issued
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 8f3ce0432295..e8f534fb336b 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -898,10 +898,8 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
898 898
899 adapter = kmalloc(sizeof(mraid_mmadp_t), GFP_KERNEL); 899 adapter = kmalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
900 900
901 if (!adapter) { 901 if (!adapter)
902 rval = -ENOMEM; 902 return -ENOMEM;
903 goto memalloc_error;
904 }
905 903
906 memset(adapter, 0, sizeof(mraid_mmadp_t)); 904 memset(adapter, 0, sizeof(mraid_mmadp_t));
907 905
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 017729c59a49..584fe5d8e507 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -599,6 +599,7 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
599* Either SUCCESS or FAILED. 599* Either SUCCESS or FAILED.
600* 600*
601* Note: 601* Note:
602* Only return FAILED if command not returned by firmware.
602**************************************************************************/ 603**************************************************************************/
603int 604int
604qla2xxx_eh_abort(struct scsi_cmnd *cmd) 605qla2xxx_eh_abort(struct scsi_cmnd *cmd)
@@ -609,11 +610,12 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
609 unsigned int id, lun; 610 unsigned int id, lun;
610 unsigned long serial; 611 unsigned long serial;
611 unsigned long flags; 612 unsigned long flags;
613 int wait = 0;
612 614
613 if (!CMD_SP(cmd)) 615 if (!CMD_SP(cmd))
614 return FAILED; 616 return SUCCESS;
615 617
616 ret = FAILED; 618 ret = SUCCESS;
617 619
618 id = cmd->device->id; 620 id = cmd->device->id;
619 lun = cmd->device->lun; 621 lun = cmd->device->lun;
@@ -642,7 +644,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
642 } else { 644 } else {
643 DEBUG3(printk("%s(%ld): abort_command " 645 DEBUG3(printk("%s(%ld): abort_command "
644 "mbx success.\n", __func__, ha->host_no)); 646 "mbx success.\n", __func__, ha->host_no));
645 ret = SUCCESS; 647 wait = 1;
646 } 648 }
647 spin_lock_irqsave(&ha->hardware_lock, flags); 649 spin_lock_irqsave(&ha->hardware_lock, flags);
648 650
@@ -651,17 +653,18 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
651 spin_unlock_irqrestore(&ha->hardware_lock, flags); 653 spin_unlock_irqrestore(&ha->hardware_lock, flags);
652 654
653 /* Wait for the command to be returned. */ 655 /* Wait for the command to be returned. */
654 if (ret == SUCCESS) { 656 if (wait) {
655 if (qla2x00_eh_wait_on_command(ha, cmd) != QLA_SUCCESS) { 657 if (qla2x00_eh_wait_on_command(ha, cmd) != QLA_SUCCESS) {
656 qla_printk(KERN_ERR, ha, 658 qla_printk(KERN_ERR, ha,
657 "scsi(%ld:%d:%d): Abort handler timed out -- %lx " 659 "scsi(%ld:%d:%d): Abort handler timed out -- %lx "
658 "%x.\n", ha->host_no, id, lun, serial, ret); 660 "%x.\n", ha->host_no, id, lun, serial, ret);
661 ret = FAILED;
659 } 662 }
660 } 663 }
661 664
662 qla_printk(KERN_INFO, ha, 665 qla_printk(KERN_INFO, ha,
663 "scsi(%ld:%d:%d): Abort command issued -- %lx %x.\n", ha->host_no, 666 "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n",
664 id, lun, serial, ret); 667 ha->host_no, id, lun, wait, serial, ret);
665 668
666 return ret; 669 return ret;
667} 670}
@@ -1700,8 +1703,8 @@ qla2x00_free_device(scsi_qla_host_t *ha)
1700 ha->flags.online = 0; 1703 ha->flags.online = 0;
1701 1704
1702 /* Detach interrupts */ 1705 /* Detach interrupts */
1703 if (ha->pdev->irq) 1706 if (ha->host->irq)
1704 free_irq(ha->pdev->irq, ha); 1707 free_irq(ha->host->irq, ha);
1705 1708
1706 /* release io space registers */ 1709 /* release io space registers */
1707 if (ha->iobase) 1710 if (ha->iobase)
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index c750d3399a97..941c1e15c899 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -56,6 +56,8 @@ static struct {
56 {"DENON", "DRD-25X", "V", BLIST_NOLUN}, /* locks up */ 56 {"DENON", "DRD-25X", "V", BLIST_NOLUN}, /* locks up */
57 {"HITACHI", "DK312C", "CM81", BLIST_NOLUN}, /* responds to all lun */ 57 {"HITACHI", "DK312C", "CM81", BLIST_NOLUN}, /* responds to all lun */
58 {"HITACHI", "DK314C", "CR21", BLIST_NOLUN}, /* responds to all lun */ 58 {"HITACHI", "DK314C", "CR21", BLIST_NOLUN}, /* responds to all lun */
59 {"IBM", "2104-DU3", NULL, BLIST_NOLUN}, /* locks up */
60 {"IBM", "2104-TU3", NULL, BLIST_NOLUN}, /* locks up */
59 {"IMS", "CDD521/10", "2.06", BLIST_NOLUN}, /* locks up */ 61 {"IMS", "CDD521/10", "2.06", BLIST_NOLUN}, /* locks up */
60 {"MAXTOR", "XT-3280", "PR02", BLIST_NOLUN}, /* locks up */ 62 {"MAXTOR", "XT-3280", "PR02", BLIST_NOLUN}, /* locks up */
61 {"MAXTOR", "XT-4380S", "B3C", BLIST_NOLUN}, /* locks up */ 63 {"MAXTOR", "XT-4380S", "B3C", BLIST_NOLUN}, /* locks up */
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 7b0f9a3810d2..764a8b375ead 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1067,16 +1067,29 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
1067 break; 1067 break;
1068 case NOT_READY: 1068 case NOT_READY:
1069 /* 1069 /*
1070 * If the device is in the process of becoming ready, 1070 * If the device is in the process of becoming
1071 * retry. 1071 * ready, or has a temporary blockage, retry.
1072 */ 1072 */
1073 if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) { 1073 if (sshdr.asc == 0x04) {
1074 scsi_requeue_command(q, cmd); 1074 switch (sshdr.ascq) {
1075 return; 1075 case 0x01: /* becoming ready */
1076 case 0x04: /* format in progress */
1077 case 0x05: /* rebuild in progress */
1078 case 0x06: /* recalculation in progress */
1079 case 0x07: /* operation in progress */
1080 case 0x08: /* Long write in progress */
1081 case 0x09: /* self test in progress */
1082 scsi_requeue_command(q, cmd);
1083 return;
1084 default:
1085 break;
1086 }
1076 } 1087 }
1077 if (!(req->flags & REQ_QUIET)) 1088 if (!(req->flags & REQ_QUIET)) {
1078 scmd_printk(KERN_INFO, cmd, 1089 scmd_printk(KERN_INFO, cmd,
1079 "Device not ready.\n"); 1090 "Device not ready: ");
1091 scsi_print_sense_hdr("", &sshdr);
1092 }
1080 scsi_end_request(cmd, 0, this_count, 1); 1093 scsi_end_request(cmd, 0, this_count, 1);
1081 return; 1094 return;
1082 case VOLUME_OVERFLOW: 1095 case VOLUME_OVERFLOW:
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index 3274ab76c8d3..255886a9ac55 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -75,7 +75,7 @@ param_setup(char *str)
75 else if(!strncmp(pos, "id:", 3)) { 75 else if(!strncmp(pos, "id:", 3)) {
76 if(slot == -1) { 76 if(slot == -1) {
77 printk(KERN_WARNING "sim710: Must specify slot for id parameter\n"); 77 printk(KERN_WARNING "sim710: Must specify slot for id parameter\n");
78 } else if(slot > MAX_SLOTS) { 78 } else if(slot >= MAX_SLOTS) {
79 printk(KERN_WARNING "sim710: Illegal slot %d for id %d\n", slot, val); 79 printk(KERN_WARNING "sim710: Illegal slot %d for id %d\n", slot, val);
80 } else { 80 } else {
81 id_array[slot] = val; 81 id_array[slot] = val;
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 674b15c78f68..bbf78aaf9e01 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -362,6 +362,40 @@ serial_out(struct uart_8250_port *up, int offset, int value)
362#define serial_inp(up, offset) serial_in(up, offset) 362#define serial_inp(up, offset) serial_in(up, offset)
363#define serial_outp(up, offset, value) serial_out(up, offset, value) 363#define serial_outp(up, offset, value) serial_out(up, offset, value)
364 364
365/* Uart divisor latch read */
366static inline int _serial_dl_read(struct uart_8250_port *up)
367{
368 return serial_inp(up, UART_DLL) | serial_inp(up, UART_DLM) << 8;
369}
370
371/* Uart divisor latch write */
372static inline void _serial_dl_write(struct uart_8250_port *up, int value)
373{
374 serial_outp(up, UART_DLL, value & 0xff);
375 serial_outp(up, UART_DLM, value >> 8 & 0xff);
376}
377
378#ifdef CONFIG_SERIAL_8250_AU1X00
379/* Au1x00 haven't got a standard divisor latch */
380static int serial_dl_read(struct uart_8250_port *up)
381{
382 if (up->port.iotype == UPIO_AU)
383 return __raw_readl(up->port.membase + 0x28);
384 else
385 return _serial_dl_read(up);
386}
387
388static void serial_dl_write(struct uart_8250_port *up, int value)
389{
390 if (up->port.iotype == UPIO_AU)
391 __raw_writel(value, up->port.membase + 0x28);
392 else
393 _serial_dl_write(up, value);
394}
395#else
396#define serial_dl_read(up) _serial_dl_read(up)
397#define serial_dl_write(up, value) _serial_dl_write(up, value)
398#endif
365 399
366/* 400/*
367 * For the 16C950 401 * For the 16C950
@@ -494,7 +528,8 @@ static void disable_rsa(struct uart_8250_port *up)
494 */ 528 */
495static int size_fifo(struct uart_8250_port *up) 529static int size_fifo(struct uart_8250_port *up)
496{ 530{
497 unsigned char old_fcr, old_mcr, old_dll, old_dlm, old_lcr; 531 unsigned char old_fcr, old_mcr, old_lcr;
532 unsigned short old_dl;
498 int count; 533 int count;
499 534
500 old_lcr = serial_inp(up, UART_LCR); 535 old_lcr = serial_inp(up, UART_LCR);
@@ -505,10 +540,8 @@ static int size_fifo(struct uart_8250_port *up)
505 UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); 540 UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
506 serial_outp(up, UART_MCR, UART_MCR_LOOP); 541 serial_outp(up, UART_MCR, UART_MCR_LOOP);
507 serial_outp(up, UART_LCR, UART_LCR_DLAB); 542 serial_outp(up, UART_LCR, UART_LCR_DLAB);
508 old_dll = serial_inp(up, UART_DLL); 543 old_dl = serial_dl_read(up);
509 old_dlm = serial_inp(up, UART_DLM); 544 serial_dl_write(up, 0x0001);
510 serial_outp(up, UART_DLL, 0x01);
511 serial_outp(up, UART_DLM, 0x00);
512 serial_outp(up, UART_LCR, 0x03); 545 serial_outp(up, UART_LCR, 0x03);
513 for (count = 0; count < 256; count++) 546 for (count = 0; count < 256; count++)
514 serial_outp(up, UART_TX, count); 547 serial_outp(up, UART_TX, count);
@@ -519,8 +552,7 @@ static int size_fifo(struct uart_8250_port *up)
519 serial_outp(up, UART_FCR, old_fcr); 552 serial_outp(up, UART_FCR, old_fcr);
520 serial_outp(up, UART_MCR, old_mcr); 553 serial_outp(up, UART_MCR, old_mcr);
521 serial_outp(up, UART_LCR, UART_LCR_DLAB); 554 serial_outp(up, UART_LCR, UART_LCR_DLAB);
522 serial_outp(up, UART_DLL, old_dll); 555 serial_dl_write(up, old_dl);
523 serial_outp(up, UART_DLM, old_dlm);
524 serial_outp(up, UART_LCR, old_lcr); 556 serial_outp(up, UART_LCR, old_lcr);
525 557
526 return count; 558 return count;
@@ -750,8 +782,7 @@ static void autoconfig_16550a(struct uart_8250_port *up)
750 782
751 serial_outp(up, UART_LCR, 0xE0); 783 serial_outp(up, UART_LCR, 0xE0);
752 784
753 quot = serial_inp(up, UART_DLM) << 8; 785 quot = serial_dl_read(up);
754 quot += serial_inp(up, UART_DLL);
755 quot <<= 3; 786 quot <<= 3;
756 787
757 status1 = serial_in(up, 0x04); /* EXCR1 */ 788 status1 = serial_in(up, 0x04); /* EXCR1 */
@@ -759,8 +790,7 @@ static void autoconfig_16550a(struct uart_8250_port *up)
759 status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ 790 status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */
760 serial_outp(up, 0x04, status1); 791 serial_outp(up, 0x04, status1);
761 792
762 serial_outp(up, UART_DLL, quot & 0xff); 793 serial_dl_write(up, quot);
763 serial_outp(up, UART_DLM, quot >> 8);
764 794
765 serial_outp(up, UART_LCR, 0); 795 serial_outp(up, UART_LCR, 0);
766 796
@@ -1862,8 +1892,7 @@ serial8250_set_termios(struct uart_port *port, struct termios *termios,
1862 serial_outp(up, UART_LCR, cval | UART_LCR_DLAB);/* set DLAB */ 1892 serial_outp(up, UART_LCR, cval | UART_LCR_DLAB);/* set DLAB */
1863 } 1893 }
1864 1894
1865 serial_outp(up, UART_DLL, quot & 0xff); /* LS of divisor */ 1895 serial_dl_write(up, quot);
1866 serial_outp(up, UART_DLM, quot >> 8); /* MS of divisor */
1867 1896
1868 /* 1897 /*
1869 * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR 1898 * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR
@@ -1906,6 +1935,9 @@ static int serial8250_request_std_resource(struct uart_8250_port *up)
1906 int ret = 0; 1935 int ret = 0;
1907 1936
1908 switch (up->port.iotype) { 1937 switch (up->port.iotype) {
1938 case UPIO_AU:
1939 size = 0x100000;
1940 /* fall thru */
1909 case UPIO_MEM: 1941 case UPIO_MEM:
1910 if (!up->port.mapbase) 1942 if (!up->port.mapbase)
1911 break; 1943 break;
@@ -1938,6 +1970,9 @@ static void serial8250_release_std_resource(struct uart_8250_port *up)
1938 unsigned int size = 8 << up->port.regshift; 1970 unsigned int size = 8 << up->port.regshift;
1939 1971
1940 switch (up->port.iotype) { 1972 switch (up->port.iotype) {
1973 case UPIO_AU:
1974 size = 0x100000;
1975 /* fall thru */
1941 case UPIO_MEM: 1976 case UPIO_MEM:
1942 if (!up->port.mapbase) 1977 if (!up->port.mapbase)
1943 break; 1978 break;
@@ -2200,10 +2235,17 @@ static void
2200serial8250_console_write(struct console *co, const char *s, unsigned int count) 2235serial8250_console_write(struct console *co, const char *s, unsigned int count)
2201{ 2236{
2202 struct uart_8250_port *up = &serial8250_ports[co->index]; 2237 struct uart_8250_port *up = &serial8250_ports[co->index];
2238 unsigned long flags;
2203 unsigned int ier; 2239 unsigned int ier;
2240 int locked = 1;
2204 2241
2205 touch_nmi_watchdog(); 2242 touch_nmi_watchdog();
2206 2243
2244 if (oops_in_progress) {
2245 locked = spin_trylock_irqsave(&up->port.lock, flags);
2246 } else
2247 spin_lock_irqsave(&up->port.lock, flags);
2248
2207 /* 2249 /*
2208 * First save the IER then disable the interrupts 2250 * First save the IER then disable the interrupts
2209 */ 2251 */
@@ -2221,8 +2263,10 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
2221 * and restore the IER 2263 * and restore the IER
2222 */ 2264 */
2223 wait_for_xmitr(up, BOTH_EMPTY); 2265 wait_for_xmitr(up, BOTH_EMPTY);
2224 up->ier |= UART_IER_THRI; 2266 serial_out(up, UART_IER, ier);
2225 serial_out(up, UART_IER, ier | UART_IER_THRI); 2267
2268 if (locked)
2269 spin_unlock_irqrestore(&up->port.lock, flags);
2226} 2270}
2227 2271
2228static int serial8250_console_setup(struct console *co, char *options) 2272static int serial8250_console_setup(struct console *co, char *options)
diff --git a/drivers/serial/8250_au1x00.c b/drivers/serial/8250_au1x00.c
index 3d1bfd07208d..58015fd14be9 100644
--- a/drivers/serial/8250_au1x00.c
+++ b/drivers/serial/8250_au1x00.c
@@ -30,13 +30,12 @@
30 { \ 30 { \
31 .iobase = _base, \ 31 .iobase = _base, \
32 .membase = (void __iomem *)_base,\ 32 .membase = (void __iomem *)_base,\
33 .mapbase = _base, \ 33 .mapbase = CPHYSADDR(_base), \
34 .irq = _irq, \ 34 .irq = _irq, \
35 .uartclk = 0, /* filled */ \ 35 .uartclk = 0, /* filled */ \
36 .regshift = 2, \ 36 .regshift = 2, \
37 .iotype = UPIO_AU, \ 37 .iotype = UPIO_AU, \
38 .flags = UPF_SKIP_TEST | \ 38 .flags = UPF_SKIP_TEST \
39 UPF_IOREMAP, \
40 } 39 }
41 40
42static struct plat_serial8250_port au1x00_data[] = { 41static struct plat_serial8250_port au1x00_data[] = {
diff --git a/drivers/serial/cpm_uart/cpm_uart.h b/drivers/serial/cpm_uart/cpm_uart.h
index 73c8a088c160..3b35cb779539 100644
--- a/drivers/serial/cpm_uart/cpm_uart.h
+++ b/drivers/serial/cpm_uart/cpm_uart.h
@@ -5,11 +5,20 @@
5 * 5 *
6 * Copyright (C) 2004 Freescale Semiconductor, Inc. 6 * Copyright (C) 2004 Freescale Semiconductor, Inc.
7 * 7 *
8 * 2006 (c) MontaVista Software, Inc.
9 * Vitaly Bordug <vbordug@ru.mvista.com>
10 *
11 * This file is licensed under the terms of the GNU General Public License
12 * version 2. This program is licensed "as is" without any warranty of any
13 * kind, whether express or implied.
14 *
8 */ 15 */
9#ifndef CPM_UART_H 16#ifndef CPM_UART_H
10#define CPM_UART_H 17#define CPM_UART_H
11 18
12#include <linux/config.h> 19#include <linux/config.h>
20#include <linux/platform_device.h>
21#include <linux/fs_uart_pd.h>
13 22
14#if defined(CONFIG_CPM2) 23#if defined(CONFIG_CPM2)
15#include "cpm_uart_cpm2.h" 24#include "cpm_uart_cpm2.h"
@@ -26,14 +35,14 @@
26#define FLAG_SMC 0x00000002 35#define FLAG_SMC 0x00000002
27#define FLAG_CONSOLE 0x00000001 36#define FLAG_CONSOLE 0x00000001
28 37
29#define UART_SMC1 0 38#define UART_SMC1 fsid_smc1_uart
30#define UART_SMC2 1 39#define UART_SMC2 fsid_smc2_uart
31#define UART_SCC1 2 40#define UART_SCC1 fsid_scc1_uart
32#define UART_SCC2 3 41#define UART_SCC2 fsid_scc2_uart
33#define UART_SCC3 4 42#define UART_SCC3 fsid_scc3_uart
34#define UART_SCC4 5 43#define UART_SCC4 fsid_scc4_uart
35 44
36#define UART_NR 6 45#define UART_NR fs_uart_nr
37 46
38#define RX_NUM_FIFO 4 47#define RX_NUM_FIFO 4
39#define RX_BUF_SIZE 32 48#define RX_BUF_SIZE 32
@@ -64,6 +73,7 @@ struct uart_cpm_port {
64 uint dp_addr; 73 uint dp_addr;
65 void *mem_addr; 74 void *mem_addr;
66 dma_addr_t dma_addr; 75 dma_addr_t dma_addr;
76 u32 mem_size;
67 /* helpers */ 77 /* helpers */
68 int baud; 78 int baud;
69 int bits; 79 int bits;
@@ -90,4 +100,38 @@ void scc2_lineif(struct uart_cpm_port *pinfo);
90void scc3_lineif(struct uart_cpm_port *pinfo); 100void scc3_lineif(struct uart_cpm_port *pinfo);
91void scc4_lineif(struct uart_cpm_port *pinfo); 101void scc4_lineif(struct uart_cpm_port *pinfo);
92 102
103/*
104 virtual to phys transtalion
105*/
106static inline unsigned long cpu2cpm_addr(void* addr, struct uart_cpm_port *pinfo)
107{
108 int offset;
109 u32 val = (u32)addr;
110 /* sane check */
111 if (likely((val >= (u32)pinfo->mem_addr)) &&
112 (val<((u32)pinfo->mem_addr + pinfo->mem_size))) {
113 offset = val - (u32)pinfo->mem_addr;
114 return pinfo->dma_addr+offset;
115 }
116 /* something nasty happened */
117 BUG();
118 return 0;
119}
120
121static inline void *cpm2cpu_addr(unsigned long addr, struct uart_cpm_port *pinfo)
122{
123 int offset;
124 u32 val = addr;
125 /* sane check */
126 if (likely((val >= pinfo->dma_addr) &&
127 (val<(pinfo->dma_addr + pinfo->mem_size)))) {
128 offset = val - (u32)pinfo->dma_addr;
129 return (void*)(pinfo->mem_addr+offset);
130 }
131 /* something nasty happened */
132 BUG();
133 return 0;
134}
135
136
93#endif /* CPM_UART_H */ 137#endif /* CPM_UART_H */
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c
index b7bf4c698a47..969f94900431 100644
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/serial/cpm_uart/cpm_uart_core.c
@@ -12,7 +12,8 @@
12 * 12 *
13 * Copyright (C) 2004 Freescale Semiconductor, Inc. 13 * Copyright (C) 2004 Freescale Semiconductor, Inc.
14 * (C) 2004 Intracom, S.A. 14 * (C) 2004 Intracom, S.A.
15 * (C) 2005 MontaVista Software, Inc. by Vitaly Bordug <vbordug@ru.mvista.com> 15 * (C) 2005-2006 MontaVista Software, Inc.
16 * Vitaly Bordug <vbordug@ru.mvista.com>
16 * 17 *
17 * This program is free software; you can redistribute it and/or modify 18 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by 19 * it under the terms of the GNU General Public License as published by
@@ -41,6 +42,7 @@
41#include <linux/device.h> 42#include <linux/device.h>
42#include <linux/bootmem.h> 43#include <linux/bootmem.h>
43#include <linux/dma-mapping.h> 44#include <linux/dma-mapping.h>
45#include <linux/fs_uart_pd.h>
44 46
45#include <asm/io.h> 47#include <asm/io.h>
46#include <asm/irq.h> 48#include <asm/irq.h>
@@ -60,7 +62,7 @@
60/* Track which ports are configured as uarts */ 62/* Track which ports are configured as uarts */
61int cpm_uart_port_map[UART_NR]; 63int cpm_uart_port_map[UART_NR];
62/* How many ports did we config as uarts */ 64/* How many ports did we config as uarts */
63int cpm_uart_nr; 65int cpm_uart_nr = 0;
64 66
65/**************************************************************/ 67/**************************************************************/
66 68
@@ -71,18 +73,51 @@ static void cpm_uart_initbd(struct uart_cpm_port *pinfo);
71 73
72/**************************************************************/ 74/**************************************************************/
73 75
74static inline unsigned long cpu2cpm_addr(void *addr) 76
77/* Place-holder for board-specific stuff */
78struct platform_device* __attribute__ ((weak)) __init
79early_uart_get_pdev(int index)
80{
81 return NULL;
82}
83
84
85static void cpm_uart_count(void)
75{ 86{
76 if ((unsigned long)addr >= CPM_ADDR) 87 cpm_uart_nr = 0;
77 return (unsigned long)addr; 88#ifdef CONFIG_SERIAL_CPM_SMC1
78 return virt_to_bus(addr); 89 cpm_uart_port_map[cpm_uart_nr++] = UART_SMC1;
90#endif
91#ifdef CONFIG_SERIAL_CPM_SMC2
92 cpm_uart_port_map[cpm_uart_nr++] = UART_SMC2;
93#endif
94#ifdef CONFIG_SERIAL_CPM_SCC1
95 cpm_uart_port_map[cpm_uart_nr++] = UART_SCC1;
96#endif
97#ifdef CONFIG_SERIAL_CPM_SCC2
98 cpm_uart_port_map[cpm_uart_nr++] = UART_SCC2;
99#endif
100#ifdef CONFIG_SERIAL_CPM_SCC3
101 cpm_uart_port_map[cpm_uart_nr++] = UART_SCC3;
102#endif
103#ifdef CONFIG_SERIAL_CPM_SCC4
104 cpm_uart_port_map[cpm_uart_nr++] = UART_SCC4;
105#endif
79} 106}
80 107
81static inline void *cpm2cpu_addr(unsigned long addr) 108/* Get UART number by its id */
109static int cpm_uart_id2nr(int id)
82{ 110{
83 if (addr >= CPM_ADDR) 111 int i;
84 return (void *)addr; 112 if (id < UART_NR) {
85 return bus_to_virt(addr); 113 for (i=0; i<UART_NR; i++) {
114 if (cpm_uart_port_map[i] == id)
115 return i;
116 }
117 }
118
119 /* not found or invalid argument */
120 return -1;
86} 121}
87 122
88/* 123/*
@@ -258,7 +293,7 @@ static void cpm_uart_int_rx(struct uart_port *port, struct pt_regs *regs)
258 } 293 }
259 294
260 /* get pointer */ 295 /* get pointer */
261 cp = cpm2cpu_addr(bdp->cbd_bufaddr); 296 cp = cpm2cpu_addr(bdp->cbd_bufaddr, pinfo);
262 297
263 /* loop through the buffer */ 298 /* loop through the buffer */
264 while (i-- > 0) { 299 while (i-- > 0) {
@@ -438,7 +473,11 @@ static void cpm_uart_shutdown(struct uart_port *port)
438 } 473 }
439 474
440 /* Shut them really down and reinit buffer descriptors */ 475 /* Shut them really down and reinit buffer descriptors */
441 cpm_line_cr_cmd(line, CPM_CR_STOP_TX); 476 if (IS_SMC(pinfo))
477 cpm_line_cr_cmd(line, CPM_CR_STOP_TX);
478 else
479 cpm_line_cr_cmd(line, CPM_CR_GRA_STOP_TX);
480
442 cpm_uart_initbd(pinfo); 481 cpm_uart_initbd(pinfo);
443 } 482 }
444} 483}
@@ -601,7 +640,7 @@ static int cpm_uart_tx_pump(struct uart_port *port)
601 /* Pick next descriptor and fill from buffer */ 640 /* Pick next descriptor and fill from buffer */
602 bdp = pinfo->tx_cur; 641 bdp = pinfo->tx_cur;
603 642
604 p = cpm2cpu_addr(bdp->cbd_bufaddr); 643 p = cpm2cpu_addr(bdp->cbd_bufaddr, pinfo);
605 644
606 *p++ = port->x_char; 645 *p++ = port->x_char;
607 bdp->cbd_datlen = 1; 646 bdp->cbd_datlen = 1;
@@ -628,7 +667,7 @@ static int cpm_uart_tx_pump(struct uart_port *port)
628 667
629 while (!(bdp->cbd_sc & BD_SC_READY) && (xmit->tail != xmit->head)) { 668 while (!(bdp->cbd_sc & BD_SC_READY) && (xmit->tail != xmit->head)) {
630 count = 0; 669 count = 0;
631 p = cpm2cpu_addr(bdp->cbd_bufaddr); 670 p = cpm2cpu_addr(bdp->cbd_bufaddr, pinfo);
632 while (count < pinfo->tx_fifosize) { 671 while (count < pinfo->tx_fifosize) {
633 *p++ = xmit->buf[xmit->tail]; 672 *p++ = xmit->buf[xmit->tail];
634 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 673 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
@@ -677,12 +716,12 @@ static void cpm_uart_initbd(struct uart_cpm_port *pinfo)
677 mem_addr = pinfo->mem_addr; 716 mem_addr = pinfo->mem_addr;
678 bdp = pinfo->rx_cur = pinfo->rx_bd_base; 717 bdp = pinfo->rx_cur = pinfo->rx_bd_base;
679 for (i = 0; i < (pinfo->rx_nrfifos - 1); i++, bdp++) { 718 for (i = 0; i < (pinfo->rx_nrfifos - 1); i++, bdp++) {
680 bdp->cbd_bufaddr = cpu2cpm_addr(mem_addr); 719 bdp->cbd_bufaddr = cpu2cpm_addr(mem_addr, pinfo);
681 bdp->cbd_sc = BD_SC_EMPTY | BD_SC_INTRPT; 720 bdp->cbd_sc = BD_SC_EMPTY | BD_SC_INTRPT;
682 mem_addr += pinfo->rx_fifosize; 721 mem_addr += pinfo->rx_fifosize;
683 } 722 }
684 723
685 bdp->cbd_bufaddr = cpu2cpm_addr(mem_addr); 724 bdp->cbd_bufaddr = cpu2cpm_addr(mem_addr, pinfo);
686 bdp->cbd_sc = BD_SC_WRAP | BD_SC_EMPTY | BD_SC_INTRPT; 725 bdp->cbd_sc = BD_SC_WRAP | BD_SC_EMPTY | BD_SC_INTRPT;
687 726
688 /* Set the physical address of the host memory 727 /* Set the physical address of the host memory
@@ -692,12 +731,12 @@ static void cpm_uart_initbd(struct uart_cpm_port *pinfo)
692 mem_addr = pinfo->mem_addr + L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize); 731 mem_addr = pinfo->mem_addr + L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize);
693 bdp = pinfo->tx_cur = pinfo->tx_bd_base; 732 bdp = pinfo->tx_cur = pinfo->tx_bd_base;
694 for (i = 0; i < (pinfo->tx_nrfifos - 1); i++, bdp++) { 733 for (i = 0; i < (pinfo->tx_nrfifos - 1); i++, bdp++) {
695 bdp->cbd_bufaddr = cpu2cpm_addr(mem_addr); 734 bdp->cbd_bufaddr = cpu2cpm_addr(mem_addr, pinfo);
696 bdp->cbd_sc = BD_SC_INTRPT; 735 bdp->cbd_sc = BD_SC_INTRPT;
697 mem_addr += pinfo->tx_fifosize; 736 mem_addr += pinfo->tx_fifosize;
698 } 737 }
699 738
700 bdp->cbd_bufaddr = cpu2cpm_addr(mem_addr); 739 bdp->cbd_bufaddr = cpu2cpm_addr(mem_addr, pinfo);
701 bdp->cbd_sc = BD_SC_WRAP | BD_SC_INTRPT; 740 bdp->cbd_sc = BD_SC_WRAP | BD_SC_INTRPT;
702} 741}
703 742
@@ -829,14 +868,6 @@ static int cpm_uart_request_port(struct uart_port *port)
829 if (pinfo->flags & FLAG_CONSOLE) 868 if (pinfo->flags & FLAG_CONSOLE)
830 return 0; 869 return 0;
831 870
832 /*
833 * Setup any port IO, connect any baud rate generators,
834 * etc. This is expected to be handled by board
835 * dependant code
836 */
837 if (pinfo->set_lineif)
838 pinfo->set_lineif(pinfo);
839
840 if (IS_SMC(pinfo)) { 871 if (IS_SMC(pinfo)) {
841 pinfo->smcp->smc_smcm &= ~(SMCM_RX | SMCM_TX); 872 pinfo->smcp->smc_smcm &= ~(SMCM_RX | SMCM_TX);
842 pinfo->smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN); 873 pinfo->smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN);
@@ -988,6 +1019,58 @@ struct uart_cpm_port cpm_uart_ports[UART_NR] = {
988 }, 1019 },
989}; 1020};
990 1021
1022int cpm_uart_drv_get_platform_data(struct platform_device *pdev, int is_con)
1023{
1024 struct resource *r;
1025 struct fs_uart_platform_info *pdata = pdev->dev.platform_data;
1026 int idx = pdata->fs_no; /* It is UART_SMCx or UART_SCCx index */
1027 struct uart_cpm_port *pinfo;
1028 int line;
1029 u32 mem, pram;
1030
1031 line = cpm_uart_id2nr(idx);
1032 if(line < 0) {
1033 printk(KERN_ERR"%s(): port %d is not registered", __FUNCTION__, idx);
1034 return -1;
1035 }
1036
1037 pinfo = (struct uart_cpm_port *) &cpm_uart_ports[idx];
1038
1039 pinfo->brg = pdata->brg;
1040
1041 if (!is_con) {
1042 pinfo->port.line = line;
1043 pinfo->port.flags = UPF_BOOT_AUTOCONF;
1044 }
1045
1046 if (!(r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs")))
1047 return -EINVAL;
1048 mem = r->start;
1049
1050 if (!(r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pram")))
1051 return -EINVAL;
1052 pram = r->start;
1053
1054 if(idx > fsid_smc2_uart) {
1055 pinfo->sccp = (scc_t *)mem;
1056 pinfo->sccup = (scc_uart_t *)pram;
1057 } else {
1058 pinfo->smcp = (smc_t *)mem;
1059 pinfo->smcup = (smc_uart_t *)pram;
1060 }
1061 pinfo->tx_nrfifos = pdata->tx_num_fifo;
1062 pinfo->tx_fifosize = pdata->tx_buf_size;
1063
1064 pinfo->rx_nrfifos = pdata->rx_num_fifo;
1065 pinfo->rx_fifosize = pdata->rx_buf_size;
1066
1067 pinfo->port.uartclk = pdata->uart_clk;
1068 pinfo->port.mapbase = (unsigned long)mem;
1069 pinfo->port.irq = platform_get_irq(pdev, 0);
1070
1071 return 0;
1072}
1073
991#ifdef CONFIG_SERIAL_CPM_CONSOLE 1074#ifdef CONFIG_SERIAL_CPM_CONSOLE
992/* 1075/*
993 * Print a string to the serial port trying not to disturb 1076 * Print a string to the serial port trying not to disturb
@@ -1027,7 +1110,7 @@ static void cpm_uart_console_write(struct console *co, const char *s,
1027 * If the buffer address is in the CPM DPRAM, don't 1110 * If the buffer address is in the CPM DPRAM, don't
1028 * convert it. 1111 * convert it.
1029 */ 1112 */
1030 cp = cpm2cpu_addr(bdp->cbd_bufaddr); 1113 cp = cpm2cpu_addr(bdp->cbd_bufaddr, pinfo);
1031 1114
1032 *cp = *s; 1115 *cp = *s;
1033 1116
@@ -1044,7 +1127,7 @@ static void cpm_uart_console_write(struct console *co, const char *s,
1044 while ((bdp->cbd_sc & BD_SC_READY) != 0) 1127 while ((bdp->cbd_sc & BD_SC_READY) != 0)
1045 ; 1128 ;
1046 1129
1047 cp = cpm2cpu_addr(bdp->cbd_bufaddr); 1130 cp = cpm2cpu_addr(bdp->cbd_bufaddr, pinfo);
1048 1131
1049 *cp = 13; 1132 *cp = 13;
1050 bdp->cbd_datlen = 1; 1133 bdp->cbd_datlen = 1;
@@ -1067,9 +1150,7 @@ static void cpm_uart_console_write(struct console *co, const char *s,
1067 pinfo->tx_cur = (volatile cbd_t *) bdp; 1150 pinfo->tx_cur = (volatile cbd_t *) bdp;
1068} 1151}
1069 1152
1070/* 1153
1071 * Setup console. Be careful is called early !
1072 */
1073static int __init cpm_uart_console_setup(struct console *co, char *options) 1154static int __init cpm_uart_console_setup(struct console *co, char *options)
1074{ 1155{
1075 struct uart_port *port; 1156 struct uart_port *port;
@@ -1080,9 +1161,27 @@ static int __init cpm_uart_console_setup(struct console *co, char *options)
1080 int flow = 'n'; 1161 int flow = 'n';
1081 int ret; 1162 int ret;
1082 1163
1164 struct fs_uart_platform_info *pdata;
1165 struct platform_device* pdev = early_uart_get_pdev(co->index);
1166
1083 port = 1167 port =
1084 (struct uart_port *)&cpm_uart_ports[cpm_uart_port_map[co->index]]; 1168 (struct uart_port *)&cpm_uart_ports[cpm_uart_port_map[co->index]];
1085 pinfo = (struct uart_cpm_port *)port; 1169 pinfo = (struct uart_cpm_port *)port;
1170 if (!pdev) {
1171 pr_info("cpm_uart: console: compat mode\n");
1172 /* compatibility - will be cleaned up */
1173 cpm_uart_init_portdesc();
1174
1175 if (pinfo->set_lineif)
1176 pinfo->set_lineif(pinfo);
1177 } else {
1178 pdata = pdev->dev.platform_data;
1179 if (pdata)
1180 if (pdata->init_ioports)
1181 pdata->init_ioports();
1182
1183 cpm_uart_drv_get_platform_data(pdev, 1);
1184 }
1086 1185
1087 pinfo->flags |= FLAG_CONSOLE; 1186 pinfo->flags |= FLAG_CONSOLE;
1088 1187
@@ -1097,14 +1196,6 @@ static int __init cpm_uart_console_setup(struct console *co, char *options)
1097 baud = 9600; 1196 baud = 9600;
1098 } 1197 }
1099 1198
1100 /*
1101 * Setup any port IO, connect any baud rate generators,
1102 * etc. This is expected to be handled by board
1103 * dependant code
1104 */
1105 if (pinfo->set_lineif)
1106 pinfo->set_lineif(pinfo);
1107
1108 if (IS_SMC(pinfo)) { 1199 if (IS_SMC(pinfo)) {
1109 pinfo->smcp->smc_smcm &= ~(SMCM_RX | SMCM_TX); 1200 pinfo->smcp->smc_smcm &= ~(SMCM_RX | SMCM_TX);
1110 pinfo->smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN); 1201 pinfo->smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN);
@@ -1143,11 +1234,8 @@ static struct console cpm_scc_uart_console = {
1143 1234
1144int __init cpm_uart_console_init(void) 1235int __init cpm_uart_console_init(void)
1145{ 1236{
1146 int ret = cpm_uart_init_portdesc(); 1237 register_console(&cpm_scc_uart_console);
1147 1238 return 0;
1148 if (!ret)
1149 register_console(&cpm_scc_uart_console);
1150 return ret;
1151} 1239}
1152 1240
1153console_initcall(cpm_uart_console_init); 1241console_initcall(cpm_uart_console_init);
@@ -1165,44 +1253,129 @@ static struct uart_driver cpm_reg = {
1165 .minor = SERIAL_CPM_MINOR, 1253 .minor = SERIAL_CPM_MINOR,
1166 .cons = CPM_UART_CONSOLE, 1254 .cons = CPM_UART_CONSOLE,
1167}; 1255};
1168 1256static int cpm_uart_drv_probe(struct device *dev)
1169static int __init cpm_uart_init(void)
1170{ 1257{
1171 int ret, i; 1258 struct platform_device *pdev = to_platform_device(dev);
1172 1259 struct fs_uart_platform_info *pdata;
1173 printk(KERN_INFO "Serial: CPM driver $Revision: 0.01 $\n"); 1260 int ret = -ENODEV;
1174 1261
1175#ifndef CONFIG_SERIAL_CPM_CONSOLE 1262 if(!pdev) {
1176 ret = cpm_uart_init_portdesc(); 1263 printk(KERN_ERR"CPM UART: platform data missing!\n");
1177 if (ret)
1178 return ret; 1264 return ret;
1179#endif 1265 }
1180 1266
1181 cpm_reg.nr = cpm_uart_nr; 1267 pdata = pdev->dev.platform_data;
1182 ret = uart_register_driver(&cpm_reg); 1268 pr_debug("cpm_uart_drv_probe: Adding CPM UART %d\n", cpm_uart_id2nr(pdata->fs_no));
1183 1269
1184 if (ret) 1270 if ((ret = cpm_uart_drv_get_platform_data(pdev, 0)))
1185 return ret; 1271 return ret;
1186 1272
1187 for (i = 0; i < cpm_uart_nr; i++) { 1273 if (pdata->init_ioports)
1188 int con = cpm_uart_port_map[i]; 1274 pdata->init_ioports();
1189 cpm_uart_ports[con].port.line = i;
1190 cpm_uart_ports[con].port.flags = UPF_BOOT_AUTOCONF;
1191 uart_add_one_port(&cpm_reg, &cpm_uart_ports[con].port);
1192 }
1193 1275
1194 return ret; 1276 ret = uart_add_one_port(&cpm_reg, &cpm_uart_ports[pdata->fs_no].port);
1277
1278 return ret;
1195} 1279}
1196 1280
1197static void __exit cpm_uart_exit(void) 1281static int cpm_uart_drv_remove(struct device *dev)
1198{ 1282{
1283 struct platform_device *pdev = to_platform_device(dev);
1284 struct fs_uart_platform_info *pdata = pdev->dev.platform_data;
1285
1286 pr_debug("cpm_uart_drv_remove: Removing CPM UART %d\n",
1287 cpm_uart_id2nr(pdata->fs_no));
1288
1289 uart_remove_one_port(&cpm_reg, &cpm_uart_ports[pdata->fs_no].port);
1290 return 0;
1291}
1292
1293static struct device_driver cpm_smc_uart_driver = {
1294 .name = "fsl-cpm-smc:uart",
1295 .bus = &platform_bus_type,
1296 .probe = cpm_uart_drv_probe,
1297 .remove = cpm_uart_drv_remove,
1298};
1299
1300static struct device_driver cpm_scc_uart_driver = {
1301 .name = "fsl-cpm-scc:uart",
1302 .bus = &platform_bus_type,
1303 .probe = cpm_uart_drv_probe,
1304 .remove = cpm_uart_drv_remove,
1305};
1306
1307/*
1308 This is supposed to match uart devices on platform bus,
1309 */
1310static int match_is_uart (struct device* dev, void* data)
1311{
1312 struct platform_device* pdev = container_of(dev, struct platform_device, dev);
1313 int ret = 0;
1314 /* this was setfunc as uart */
1315 if(strstr(pdev->name,":uart")) {
1316 ret = 1;
1317 }
1318 return ret;
1319}
1320
1321
1322static int cpm_uart_init(void) {
1323
1324 int ret;
1199 int i; 1325 int i;
1326 struct device *dev;
1327 printk(KERN_INFO "Serial: CPM driver $Revision: 0.02 $\n");
1328
1329 /* lookup the bus for uart devices */
1330 dev = bus_find_device(&platform_bus_type, NULL, 0, match_is_uart);
1331
1332 /* There are devices on the bus - all should be OK */
1333 if (dev) {
1334 cpm_uart_count();
1335 cpm_reg.nr = cpm_uart_nr;
1336
1337 if (!(ret = uart_register_driver(&cpm_reg))) {
1338 if ((ret = driver_register(&cpm_smc_uart_driver))) {
1339 uart_unregister_driver(&cpm_reg);
1340 return ret;
1341 }
1342 if ((ret = driver_register(&cpm_scc_uart_driver))) {
1343 driver_unregister(&cpm_scc_uart_driver);
1344 uart_unregister_driver(&cpm_reg);
1345 }
1346 }
1347 } else {
1348 /* No capable platform devices found - falling back to legacy mode */
1349 pr_info("cpm_uart: WARNING: no UART devices found on platform bus!\n");
1350 pr_info(
1351 "cpm_uart: the driver will guess configuration, but this mode is no longer supported.\n");
1352#ifndef CONFIG_SERIAL_CPM_CONSOLE
1353 ret = cpm_uart_init_portdesc();
1354 if (ret)
1355 return ret;
1356#endif
1357
1358 cpm_reg.nr = cpm_uart_nr;
1359 ret = uart_register_driver(&cpm_reg);
1360
1361 if (ret)
1362 return ret;
1363
1364 for (i = 0; i < cpm_uart_nr; i++) {
1365 int con = cpm_uart_port_map[i];
1366 cpm_uart_ports[con].port.line = i;
1367 cpm_uart_ports[con].port.flags = UPF_BOOT_AUTOCONF;
1368 uart_add_one_port(&cpm_reg, &cpm_uart_ports[con].port);
1369 }
1200 1370
1201 for (i = 0; i < cpm_uart_nr; i++) {
1202 int con = cpm_uart_port_map[i];
1203 uart_remove_one_port(&cpm_reg, &cpm_uart_ports[con].port);
1204 } 1371 }
1372 return ret;
1373}
1205 1374
1375static void __exit cpm_uart_exit(void)
1376{
1377 driver_unregister(&cpm_scc_uart_driver);
1378 driver_unregister(&cpm_smc_uart_driver);
1206 uart_unregister_driver(&cpm_reg); 1379 uart_unregister_driver(&cpm_reg);
1207} 1380}
1208 1381
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.c b/drivers/serial/cpm_uart/cpm_uart_cpm1.c
index d789ee55cbb7..17406a05ce1f 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm1.c
+++ b/drivers/serial/cpm_uart/cpm_uart_cpm1.c
@@ -8,6 +8,8 @@
8 * 8 *
9 * Copyright (C) 2004 Freescale Semiconductor, Inc. 9 * Copyright (C) 2004 Freescale Semiconductor, Inc.
10 * (C) 2004 Intracom, S.A. 10 * (C) 2004 Intracom, S.A.
11 * (C) 2006 MontaVista Software, Inc.
12 * Vitaly Bordug <vbordug@ru.mvista.com>
11 * 13 *
12 * This program is free software; you can redistribute it and/or modify 14 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by 15 * it under the terms of the GNU General Public License as published by
@@ -81,58 +83,11 @@ void cpm_line_cr_cmd(int line, int cmd)
81 83
82void smc1_lineif(struct uart_cpm_port *pinfo) 84void smc1_lineif(struct uart_cpm_port *pinfo)
83{ 85{
84 volatile cpm8xx_t *cp = cpmp;
85
86 (void)cp; /* fix warning */
87#if defined (CONFIG_MPC885ADS)
88 /* Enable SMC1 transceivers */
89 {
90 cp->cp_pepar |= 0x000000c0;
91 cp->cp_pedir &= ~0x000000c0;
92 cp->cp_peso &= ~0x00000040;
93 cp->cp_peso |= 0x00000080;
94 }
95#elif defined (CONFIG_MPC86XADS)
96 unsigned int iobits = 0x000000c0;
97
98 if (!pinfo->is_portb) {
99 cp->cp_pbpar |= iobits;
100 cp->cp_pbdir &= ~iobits;
101 cp->cp_pbodr &= ~iobits;
102 } else {
103 ((immap_t *)IMAP_ADDR)->im_ioport.iop_papar |= iobits;
104 ((immap_t *)IMAP_ADDR)->im_ioport.iop_padir &= ~iobits;
105 ((immap_t *)IMAP_ADDR)->im_ioport.iop_paodr &= ~iobits;
106 }
107#endif
108 pinfo->brg = 1; 86 pinfo->brg = 1;
109} 87}
110 88
111void smc2_lineif(struct uart_cpm_port *pinfo) 89void smc2_lineif(struct uart_cpm_port *pinfo)
112{ 90{
113 volatile cpm8xx_t *cp = cpmp;
114
115 (void)cp; /* fix warning */
116#if defined (CONFIG_MPC885ADS)
117 cp->cp_pepar |= 0x00000c00;
118 cp->cp_pedir &= ~0x00000c00;
119 cp->cp_peso &= ~0x00000400;
120 cp->cp_peso |= 0x00000800;
121#elif defined (CONFIG_MPC86XADS)
122 unsigned int iobits = 0x00000c00;
123
124 if (!pinfo->is_portb) {
125 cp->cp_pbpar |= iobits;
126 cp->cp_pbdir &= ~iobits;
127 cp->cp_pbodr &= ~iobits;
128 } else {
129 ((immap_t *)IMAP_ADDR)->im_ioport.iop_papar |= iobits;
130 ((immap_t *)IMAP_ADDR)->im_ioport.iop_padir &= ~iobits;
131 ((immap_t *)IMAP_ADDR)->im_ioport.iop_paodr &= ~iobits;
132 }
133
134#endif
135
136 pinfo->brg = 2; 91 pinfo->brg = 2;
137} 92}
138 93
@@ -191,7 +146,7 @@ int cpm_uart_allocbuf(struct uart_cpm_port *pinfo, unsigned int is_con)
191 /* was hostalloc but changed cause it blows away the */ 146 /* was hostalloc but changed cause it blows away the */
192 /* large tlb mapping when pinning the kernel area */ 147 /* large tlb mapping when pinning the kernel area */
193 mem_addr = (u8 *) cpm_dpram_addr(cpm_dpalloc(memsz, 8)); 148 mem_addr = (u8 *) cpm_dpram_addr(cpm_dpalloc(memsz, 8));
194 dma_addr = 0; 149 dma_addr = (u32)mem_addr;
195 } else 150 } else
196 mem_addr = dma_alloc_coherent(NULL, memsz, &dma_addr, 151 mem_addr = dma_alloc_coherent(NULL, memsz, &dma_addr,
197 GFP_KERNEL); 152 GFP_KERNEL);
@@ -204,8 +159,9 @@ int cpm_uart_allocbuf(struct uart_cpm_port *pinfo, unsigned int is_con)
204 } 159 }
205 160
206 pinfo->dp_addr = dp_offset; 161 pinfo->dp_addr = dp_offset;
207 pinfo->mem_addr = mem_addr; 162 pinfo->mem_addr = mem_addr; /* virtual address*/
208 pinfo->dma_addr = dma_addr; 163 pinfo->dma_addr = dma_addr; /* physical address*/
164 pinfo->mem_size = memsz;
209 165
210 pinfo->rx_buf = mem_addr; 166 pinfo->rx_buf = mem_addr;
211 pinfo->tx_buf = pinfo->rx_buf + L1_CACHE_ALIGN(pinfo->rx_nrfifos 167 pinfo->tx_buf = pinfo->rx_buf + L1_CACHE_ALIGN(pinfo->rx_nrfifos
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/serial/cpm_uart/cpm_uart_cpm2.c
index fd9e53ed3feb..4b2de08f46d0 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm2.c
+++ b/drivers/serial/cpm_uart/cpm_uart_cpm2.c
@@ -8,6 +8,8 @@
8 * 8 *
9 * Copyright (C) 2004 Freescale Semiconductor, Inc. 9 * Copyright (C) 2004 Freescale Semiconductor, Inc.
10 * (C) 2004 Intracom, S.A. 10 * (C) 2004 Intracom, S.A.
11 * (C) 2006 MontaVista Software, Inc.
12 * Vitaly Bordug <vbordug@ru.mvista.com>
11 * 13 *
12 * This program is free software; you can redistribute it and/or modify 14 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by 15 * it under the terms of the GNU General Public License as published by
@@ -142,14 +144,6 @@ void scc2_lineif(struct uart_cpm_port *pinfo)
142 * be supported in a sane fashion. 144 * be supported in a sane fashion.
143 */ 145 */
144#ifndef CONFIG_STX_GP3 146#ifndef CONFIG_STX_GP3
145#ifdef CONFIG_MPC8560_ADS
146 volatile iop_cpm2_t *io = &cpm2_immr->im_ioport;
147 io->iop_ppard |= 0x00000018;
148 io->iop_psord &= ~0x00000008; /* Rx */
149 io->iop_psord &= ~0x00000010; /* Tx */
150 io->iop_pdird &= ~0x00000008; /* Rx */
151 io->iop_pdird |= 0x00000010; /* Tx */
152#else
153 volatile iop_cpm2_t *io = &cpm2_immr->im_ioport; 147 volatile iop_cpm2_t *io = &cpm2_immr->im_ioport;
154 io->iop_pparb |= 0x008b0000; 148 io->iop_pparb |= 0x008b0000;
155 io->iop_pdirb |= 0x00880000; 149 io->iop_pdirb |= 0x00880000;
@@ -157,7 +151,6 @@ void scc2_lineif(struct uart_cpm_port *pinfo)
157 io->iop_pdirb &= ~0x00030000; 151 io->iop_pdirb &= ~0x00030000;
158 io->iop_psorb &= ~0x00030000; 152 io->iop_psorb &= ~0x00030000;
159#endif 153#endif
160#endif
161 cpm2_immr->im_cpmux.cmx_scr &= 0xff00ffff; 154 cpm2_immr->im_cpmux.cmx_scr &= 0xff00ffff;
162 cpm2_immr->im_cpmux.cmx_scr |= 0x00090000; 155 cpm2_immr->im_cpmux.cmx_scr |= 0x00090000;
163 pinfo->brg = 2; 156 pinfo->brg = 2;
@@ -218,8 +211,10 @@ int cpm_uart_allocbuf(struct uart_cpm_port *pinfo, unsigned int is_con)
218 211
219 memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) + 212 memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) +
220 L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize); 213 L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize);
221 if (is_con) 214 if (is_con) {
222 mem_addr = alloc_bootmem(memsz); 215 mem_addr = alloc_bootmem(memsz);
216 dma_addr = mem_addr;
217 }
223 else 218 else
224 mem_addr = dma_alloc_coherent(NULL, memsz, &dma_addr, 219 mem_addr = dma_alloc_coherent(NULL, memsz, &dma_addr,
225 GFP_KERNEL); 220 GFP_KERNEL);
@@ -234,6 +229,7 @@ int cpm_uart_allocbuf(struct uart_cpm_port *pinfo, unsigned int is_con)
234 pinfo->dp_addr = dp_offset; 229 pinfo->dp_addr = dp_offset;
235 pinfo->mem_addr = mem_addr; 230 pinfo->mem_addr = mem_addr;
236 pinfo->dma_addr = dma_addr; 231 pinfo->dma_addr = dma_addr;
232 pinfo->mem_size = memsz;
237 233
238 pinfo->rx_buf = mem_addr; 234 pinfo->rx_buf = mem_addr;
239 pinfo->tx_buf = pinfo->rx_buf + L1_CACHE_ALIGN(pinfo->rx_nrfifos 235 pinfo->tx_buf = pinfo->rx_buf + L1_CACHE_ALIGN(pinfo->rx_nrfifos
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index c3b7a6673e9c..d202eb4f3848 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -45,6 +45,7 @@
45#include <asm/io.h> 45#include <asm/io.h>
46#include <asm/irq.h> 46#include <asm/irq.h>
47#include <asm/hardware.h> 47#include <asm/hardware.h>
48#include <asm/arch/imx-uart.h>
48 49
49/* We've been assigned a range on the "Low-density serial ports" major */ 50/* We've been assigned a range on the "Low-density serial ports" major */
50#define SERIAL_IMX_MAJOR 204 51#define SERIAL_IMX_MAJOR 204
@@ -73,7 +74,8 @@ struct imx_port {
73 struct uart_port port; 74 struct uart_port port;
74 struct timer_list timer; 75 struct timer_list timer;
75 unsigned int old_status; 76 unsigned int old_status;
76 int txirq,rxirq,rtsirq; 77 int txirq,rxirq,rtsirq;
78 int have_rtscts:1;
77}; 79};
78 80
79/* 81/*
@@ -491,8 +493,12 @@ imx_set_termios(struct uart_port *port, struct termios *termios,
491 ucr2 = UCR2_SRST | UCR2_IRTS; 493 ucr2 = UCR2_SRST | UCR2_IRTS;
492 494
493 if (termios->c_cflag & CRTSCTS) { 495 if (termios->c_cflag & CRTSCTS) {
494 ucr2 &= ~UCR2_IRTS; 496 if( sport->have_rtscts ) {
495 ucr2 |= UCR2_CTSC; 497 ucr2 &= ~UCR2_IRTS;
498 ucr2 |= UCR2_CTSC;
499 } else {
500 termios->c_cflag &= ~CRTSCTS;
501 }
496 } 502 }
497 503
498 if (termios->c_cflag & CSTOPB) 504 if (termios->c_cflag & CSTOPB)
@@ -719,27 +725,6 @@ static void __init imx_init_ports(void)
719 imx_ports[i].timer.function = imx_timeout; 725 imx_ports[i].timer.function = imx_timeout;
720 imx_ports[i].timer.data = (unsigned long)&imx_ports[i]; 726 imx_ports[i].timer.data = (unsigned long)&imx_ports[i];
721 } 727 }
722
723 imx_gpio_mode(PC9_PF_UART1_CTS);
724 imx_gpio_mode(PC10_PF_UART1_RTS);
725 imx_gpio_mode(PC11_PF_UART1_TXD);
726 imx_gpio_mode(PC12_PF_UART1_RXD);
727 imx_gpio_mode(PB28_PF_UART2_CTS);
728 imx_gpio_mode(PB29_PF_UART2_RTS);
729
730 imx_gpio_mode(PB30_PF_UART2_TXD);
731 imx_gpio_mode(PB31_PF_UART2_RXD);
732
733#if 0 /* We don't need these, on the mx1 the _modem_ side of the uart
734 * is implemented.
735 */
736 imx_gpio_mode(PD7_AF_UART2_DTR);
737 imx_gpio_mode(PD8_AF_UART2_DCD);
738 imx_gpio_mode(PD9_AF_UART2_RI);
739 imx_gpio_mode(PD10_AF_UART2_DSR);
740#endif
741
742
743} 728}
744 729
745#ifdef CONFIG_SERIAL_IMX_CONSOLE 730#ifdef CONFIG_SERIAL_IMX_CONSOLE
@@ -932,7 +917,14 @@ static int serial_imx_resume(struct platform_device *dev)
932 917
933static int serial_imx_probe(struct platform_device *dev) 918static int serial_imx_probe(struct platform_device *dev)
934{ 919{
920 struct imxuart_platform_data *pdata;
921
935 imx_ports[dev->id].port.dev = &dev->dev; 922 imx_ports[dev->id].port.dev = &dev->dev;
923
924 pdata = (struct imxuart_platform_data *)dev->dev.platform_data;
925 if(pdata && (pdata->flags & IMXUART_HAVE_RTSCTS))
926 imx_ports[dev->id].have_rtscts = 1;
927
936 uart_add_one_port(&imx_reg, &imx_ports[dev->id].port); 928 uart_add_one_port(&imx_reg, &imx_ports[dev->id].port);
937 platform_set_drvdata(dev, &imx_ports[dev->id]); 929 platform_set_drvdata(dev, &imx_ports[dev->id]);
938 return 0; 930 return 0;
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index fcd7744c4253..aeb8153ccf24 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -1500,20 +1500,18 @@ uart_block_til_ready(struct file *filp, struct uart_state *state)
1500static struct uart_state *uart_get(struct uart_driver *drv, int line) 1500static struct uart_state *uart_get(struct uart_driver *drv, int line)
1501{ 1501{
1502 struct uart_state *state; 1502 struct uart_state *state;
1503 int ret = 0;
1503 1504
1504 mutex_lock(&port_mutex);
1505 state = drv->state + line; 1505 state = drv->state + line;
1506 if (mutex_lock_interruptible(&state->mutex)) { 1506 if (mutex_lock_interruptible(&state->mutex)) {
1507 state = ERR_PTR(-ERESTARTSYS); 1507 ret = -ERESTARTSYS;
1508 goto out; 1508 goto err;
1509 } 1509 }
1510 1510
1511 state->count++; 1511 state->count++;
1512 if (!state->port) { 1512 if (!state->port || state->port->flags & UPF_DEAD) {
1513 state->count--; 1513 ret = -ENXIO;
1514 mutex_unlock(&state->mutex); 1514 goto err_unlock;
1515 state = ERR_PTR(-ENXIO);
1516 goto out;
1517 } 1515 }
1518 1516
1519 if (!state->info) { 1517 if (!state->info) {
@@ -1531,15 +1529,17 @@ static struct uart_state *uart_get(struct uart_driver *drv, int line)
1531 tasklet_init(&state->info->tlet, uart_tasklet_action, 1529 tasklet_init(&state->info->tlet, uart_tasklet_action,
1532 (unsigned long)state); 1530 (unsigned long)state);
1533 } else { 1531 } else {
1534 state->count--; 1532 ret = -ENOMEM;
1535 mutex_unlock(&state->mutex); 1533 goto err_unlock;
1536 state = ERR_PTR(-ENOMEM);
1537 } 1534 }
1538 } 1535 }
1539
1540 out:
1541 mutex_unlock(&port_mutex);
1542 return state; 1536 return state;
1537
1538 err_unlock:
1539 state->count--;
1540 mutex_unlock(&state->mutex);
1541 err:
1542 return ERR_PTR(ret);
1543} 1543}
1544 1544
1545/* 1545/*
@@ -2085,45 +2085,6 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
2085 } 2085 }
2086} 2086}
2087 2087
2088/*
2089 * This reverses the effects of uart_configure_port, hanging up the
2090 * port before removal.
2091 */
2092static void
2093uart_unconfigure_port(struct uart_driver *drv, struct uart_state *state)
2094{
2095 struct uart_port *port = state->port;
2096 struct uart_info *info = state->info;
2097
2098 if (info && info->tty)
2099 tty_vhangup(info->tty);
2100
2101 mutex_lock(&state->mutex);
2102
2103 state->info = NULL;
2104
2105 /*
2106 * Free the port IO and memory resources, if any.
2107 */
2108 if (port->type != PORT_UNKNOWN)
2109 port->ops->release_port(port);
2110
2111 /*
2112 * Indicate that there isn't a port here anymore.
2113 */
2114 port->type = PORT_UNKNOWN;
2115
2116 /*
2117 * Kill the tasklet, and free resources.
2118 */
2119 if (info) {
2120 tasklet_kill(&info->tlet);
2121 kfree(info);
2122 }
2123
2124 mutex_unlock(&state->mutex);
2125}
2126
2127static struct tty_operations uart_ops = { 2088static struct tty_operations uart_ops = {
2128 .open = uart_open, 2089 .open = uart_open,
2129 .close = uart_close, 2090 .close = uart_close,
@@ -2270,6 +2231,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *port)
2270 state = drv->state + port->line; 2231 state = drv->state + port->line;
2271 2232
2272 mutex_lock(&port_mutex); 2233 mutex_lock(&port_mutex);
2234 mutex_lock(&state->mutex);
2273 if (state->port) { 2235 if (state->port) {
2274 ret = -EINVAL; 2236 ret = -EINVAL;
2275 goto out; 2237 goto out;
@@ -2304,7 +2266,13 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *port)
2304 port->cons && !(port->cons->flags & CON_ENABLED)) 2266 port->cons && !(port->cons->flags & CON_ENABLED))
2305 register_console(port->cons); 2267 register_console(port->cons);
2306 2268
2269 /*
2270 * Ensure UPF_DEAD is not set.
2271 */
2272 port->flags &= ~UPF_DEAD;
2273
2307 out: 2274 out:
2275 mutex_unlock(&state->mutex);
2308 mutex_unlock(&port_mutex); 2276 mutex_unlock(&port_mutex);
2309 2277
2310 return ret; 2278 return ret;
@@ -2322,6 +2290,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *port)
2322int uart_remove_one_port(struct uart_driver *drv, struct uart_port *port) 2290int uart_remove_one_port(struct uart_driver *drv, struct uart_port *port)
2323{ 2291{
2324 struct uart_state *state = drv->state + port->line; 2292 struct uart_state *state = drv->state + port->line;
2293 struct uart_info *info;
2325 2294
2326 BUG_ON(in_interrupt()); 2295 BUG_ON(in_interrupt());
2327 2296
@@ -2332,11 +2301,48 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *port)
2332 mutex_lock(&port_mutex); 2301 mutex_lock(&port_mutex);
2333 2302
2334 /* 2303 /*
2304 * Mark the port "dead" - this prevents any opens from
2305 * succeeding while we shut down the port.
2306 */
2307 mutex_lock(&state->mutex);
2308 port->flags |= UPF_DEAD;
2309 mutex_unlock(&state->mutex);
2310
2311 /*
2335 * Remove the devices from devfs 2312 * Remove the devices from devfs
2336 */ 2313 */
2337 tty_unregister_device(drv->tty_driver, port->line); 2314 tty_unregister_device(drv->tty_driver, port->line);
2338 2315
2339 uart_unconfigure_port(drv, state); 2316 info = state->info;
2317 if (info && info->tty)
2318 tty_vhangup(info->tty);
2319
2320 /*
2321 * All users of this port should now be disconnected from
2322 * this driver, and the port shut down. We should be the
2323 * only thread fiddling with this port from now on.
2324 */
2325 state->info = NULL;
2326
2327 /*
2328 * Free the port IO and memory resources, if any.
2329 */
2330 if (port->type != PORT_UNKNOWN)
2331 port->ops->release_port(port);
2332
2333 /*
2334 * Indicate that there isn't a port here anymore.
2335 */
2336 port->type = PORT_UNKNOWN;
2337
2338 /*
2339 * Kill the tasklet, and free resources.
2340 */
2341 if (info) {
2342 tasklet_kill(&info->tlet);
2343 kfree(info);
2344 }
2345
2340 state->port = NULL; 2346 state->port = NULL;
2341 mutex_unlock(&port_mutex); 2347 mutex_unlock(&port_mutex);
2342 2348
diff --git a/drivers/sn/ioc3.c b/drivers/sn/ioc3.c
index 0b49ff78efc1..501316b198e5 100644
--- a/drivers/sn/ioc3.c
+++ b/drivers/sn/ioc3.c
@@ -678,7 +678,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
678 /* Track PCI-device specific data */ 678 /* Track PCI-device specific data */
679 pci_set_drvdata(pdev, idd); 679 pci_set_drvdata(pdev, idd);
680 down_write(&ioc3_devices_rwsem); 680 down_write(&ioc3_devices_rwsem);
681 list_add(&idd->list, &ioc3_devices); 681 list_add_tail(&idd->list, &ioc3_devices);
682 idd->id = ioc3_counter++; 682 idd->id = ioc3_counter++;
683 up_write(&ioc3_devices_rwsem); 683 up_write(&ioc3_devices_rwsem);
684 684
diff --git a/drivers/sn/ioc4.c b/drivers/sn/ioc4.c
index 67140a5804f5..cdeff909403e 100644
--- a/drivers/sn/ioc4.c
+++ b/drivers/sn/ioc4.c
@@ -310,7 +310,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
310 pci_set_drvdata(idd->idd_pdev, idd); 310 pci_set_drvdata(idd->idd_pdev, idd);
311 311
312 mutex_lock(&ioc4_mutex); 312 mutex_lock(&ioc4_mutex);
313 list_add(&idd->idd_list, &ioc4_devices); 313 list_add_tail(&idd->idd_list, &ioc4_devices);
314 314
315 /* Add this IOC4 to all submodules */ 315 /* Add this IOC4 to all submodules */
316 list_for_each_entry(is, &ioc4_submodules, is_list) { 316 list_for_each_entry(is, &ioc4_submodules, is_list) {
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index 6a4b93ad1082..0b9293493957 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -2166,7 +2166,7 @@ static void handle_ep_small (struct net2280_ep *ep)
2166 ep->stopped = 1; 2166 ep->stopped = 1;
2167 set_halt (ep); 2167 set_halt (ep);
2168 mode = 2; 2168 mode = 2;
2169 } else if (!req && ep->stopped) 2169 } else if (!req && !ep->stopped)
2170 write_fifo (ep, NULL); 2170 write_fifo (ep, NULL);
2171 } 2171 }
2172 } else { 2172 } else {
@@ -2280,9 +2280,7 @@ static void handle_ep_small (struct net2280_ep *ep)
2280 /* if we wrote it all, we're usually done */ 2280 /* if we wrote it all, we're usually done */
2281 if (req->req.actual == req->req.length) { 2281 if (req->req.actual == req->req.length) {
2282 if (ep->num == 0) { 2282 if (ep->num == 0) {
2283 /* wait for control status */ 2283 /* send zlps until the status stage */
2284 if (mode != 2)
2285 req = NULL;
2286 } else if (!req->req.zero || len != ep->ep.maxpacket) 2284 } else if (!req->req.zero || len != ep->ep.maxpacket)
2287 mode = 2; 2285 mode = 2;
2288 } 2286 }
@@ -2744,6 +2742,10 @@ static irqreturn_t net2280_irq (int irq, void *_dev, struct pt_regs * r)
2744{ 2742{
2745 struct net2280 *dev = _dev; 2743 struct net2280 *dev = _dev;
2746 2744
2745 /* shared interrupt, not ours */
2746 if (!(readl(&dev->regs->irqstat0) & (1 << INTA_ASSERTED)))
2747 return IRQ_NONE;
2748
2747 spin_lock (&dev->lock); 2749 spin_lock (&dev->lock);
2748 2750
2749 /* handle disconnect, dma, and more */ 2751 /* handle disconnect, dma, and more */
@@ -2831,13 +2833,13 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
2831 } 2833 }
2832 2834
2833 /* alloc, and start init */ 2835 /* alloc, and start init */
2834 dev = kmalloc (sizeof *dev, SLAB_KERNEL); 2836 dev = kzalloc (sizeof *dev, SLAB_KERNEL);
2835 if (dev == NULL){ 2837 if (dev == NULL){
2836 retval = -ENOMEM; 2838 retval = -ENOMEM;
2837 goto done; 2839 goto done;
2838 } 2840 }
2839 2841
2840 memset (dev, 0, sizeof *dev); 2842 pci_set_drvdata (pdev, dev);
2841 spin_lock_init (&dev->lock); 2843 spin_lock_init (&dev->lock);
2842 dev->pdev = pdev; 2844 dev->pdev = pdev;
2843 dev->gadget.ops = &net2280_ops; 2845 dev->gadget.ops = &net2280_ops;
@@ -2950,7 +2952,6 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
2950 dev->chiprev = get_idx_reg (dev->regs, REG_CHIPREV) & 0xffff; 2952 dev->chiprev = get_idx_reg (dev->regs, REG_CHIPREV) & 0xffff;
2951 2953
2952 /* done */ 2954 /* done */
2953 pci_set_drvdata (pdev, dev);
2954 INFO (dev, "%s\n", driver_desc); 2955 INFO (dev, "%s\n", driver_desc);
2955 INFO (dev, "irq %s, pci mem %p, chip rev %04x\n", 2956 INFO (dev, "irq %s, pci mem %p, chip rev %04x\n",
2956 bufp, base, dev->chiprev); 2957 bufp, base, dev->chiprev);
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 1e03f1a5a5fd..a1bd2bea6deb 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -350,7 +350,7 @@ static const struct hc_driver ehci_pci_hc_driver = {
350/* PCI driver selection metadata; PCI hotplugging uses this */ 350/* PCI driver selection metadata; PCI hotplugging uses this */
351static const struct pci_device_id pci_ids [] = { { 351static const struct pci_device_id pci_ids [] = { {
352 /* handle any USB 2.0 EHCI controller */ 352 /* handle any USB 2.0 EHCI controller */
353 PCI_DEVICE_CLASS(((PCI_CLASS_SERIAL_USB << 8) | 0x20), ~0), 353 PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_EHCI, ~0),
354 .driver_data = (unsigned long) &ehci_pci_hc_driver, 354 .driver_data = (unsigned long) &ehci_pci_hc_driver,
355 }, 355 },
356 { /* end: all zeroes */ } 356 { /* end: all zeroes */ }
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 1bfe96f4d045..b268537e389e 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -206,7 +206,7 @@ static const struct hc_driver ohci_pci_hc_driver = {
206 206
207static const struct pci_device_id pci_ids [] = { { 207static const struct pci_device_id pci_ids [] = { {
208 /* handle any USB OHCI controller */ 208 /* handle any USB OHCI controller */
209 PCI_DEVICE_CLASS((PCI_CLASS_SERIAL_USB << 8) | 0x10, ~0), 209 PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_OHCI, ~0),
210 .driver_data = (unsigned long) &ohci_pci_hc_driver, 210 .driver_data = (unsigned long) &ohci_pci_hc_driver,
211 }, { /* end: all zeroes */ } 211 }, { /* end: all zeroes */ }
212}; 212};
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index c0c4db78b590..d225e11f4055 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -858,7 +858,7 @@ static const struct hc_driver uhci_driver = {
858 858
859static const struct pci_device_id uhci_pci_ids[] = { { 859static const struct pci_device_id uhci_pci_ids[] = { {
860 /* handle any USB UHCI controller */ 860 /* handle any USB UHCI controller */
861 PCI_DEVICE_CLASS(((PCI_CLASS_SERIAL_USB << 8) | 0x00), ~0), 861 PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_UHCI, ~0),
862 .driver_data = (unsigned long) &uhci_driver, 862 .driver_data = (unsigned long) &uhci_driver,
863 }, { /* end: all zeroes */ } 863 }, { /* end: all zeroes */ }
864}; 864};
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index f5851db67f5b..82151207d814 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -308,6 +308,7 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
308 308
309static struct usb_device_id id_table_combined [] = { 309static struct usb_device_id id_table_combined [] = {
310 { USB_DEVICE(FTDI_VID, FTDI_IRTRANS_PID) }, 310 { USB_DEVICE(FTDI_VID, FTDI_IRTRANS_PID) },
311 { USB_DEVICE(FTDI_VID, FTDI_IPLUS_PID) },
311 { USB_DEVICE(FTDI_VID, FTDI_SIO_PID) }, 312 { USB_DEVICE(FTDI_VID, FTDI_SIO_PID) },
312 { USB_DEVICE(FTDI_VID, FTDI_8U232AM_PID) }, 313 { USB_DEVICE(FTDI_VID, FTDI_8U232AM_PID) },
313 { USB_DEVICE(FTDI_VID, FTDI_8U232AM_ALT_PID) }, 314 { USB_DEVICE(FTDI_VID, FTDI_8U232AM_ALT_PID) },
@@ -493,6 +494,8 @@ static struct usb_device_id id_table_combined [] = {
493 { USB_DEVICE(FTDI_VID, FTDI_WESTREX_MODEL_777_PID) }, 494 { USB_DEVICE(FTDI_VID, FTDI_WESTREX_MODEL_777_PID) },
494 { USB_DEVICE(FTDI_VID, FTDI_WESTREX_MODEL_8900F_PID) }, 495 { USB_DEVICE(FTDI_VID, FTDI_WESTREX_MODEL_8900F_PID) },
495 { USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) }, 496 { USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) },
497 { USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) },
498 { USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) },
496 { USB_DEVICE(ICOM_ID1_VID, ICOM_ID1_PID) }, 499 { USB_DEVICE(ICOM_ID1_VID, ICOM_ID1_PID) },
497 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_TMU_PID) }, 500 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_TMU_PID) },
498 { }, /* Optional parameter entry */ 501 { }, /* Optional parameter entry */
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 2155f0e4a378..2c55a5ea9c99 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -39,6 +39,9 @@
39/* www.thoughttechnology.com/ TT-USB provide with procomp use ftdi_sio */ 39/* www.thoughttechnology.com/ TT-USB provide with procomp use ftdi_sio */
40#define FTDI_TTUSB_PID 0xFF20 /* Product Id */ 40#define FTDI_TTUSB_PID 0xFF20 /* Product Id */
41 41
42/* iPlus device */
43#define FTDI_IPLUS_PID 0xD070 /* Product Id */
44
42/* www.crystalfontz.com devices - thanx for providing free devices for evaluation ! */ 45/* www.crystalfontz.com devices - thanx for providing free devices for evaluation ! */
43/* they use the ftdi chipset for the USB interface and the vendor id is the same */ 46/* they use the ftdi chipset for the USB interface and the vendor id is the same */
44#define FTDI_XF_632_PID 0xFC08 /* 632: 16x2 Character Display */ 47#define FTDI_XF_632_PID 0xFC08 /* 632: 16x2 Character Display */
@@ -153,6 +156,11 @@
153#define ICOM_ID1_PID 0x0004 156#define ICOM_ID1_PID 0x0004
154 157
155/* 158/*
159 * ASK.fr devices
160 */
161#define FTDI_ASK_RDR400_PID 0xC991 /* ASK RDR 400 series card reader */
162
163/*
156 * DSS-20 Sync Station for Sony Ericsson P800 164 * DSS-20 Sync Station for Sony Ericsson P800
157 */ 165 */
158 166
@@ -400,6 +408,11 @@
400#define FTDI_WESTREX_MODEL_8900F_PID 0xDC01 /* Model 8900F */ 408#define FTDI_WESTREX_MODEL_8900F_PID 0xDC01 /* Model 8900F */
401 409
402/* 410/*
411 * RR-CirKits LocoBuffer USB (http://www.rr-cirkits.com)
412 */
413#define FTDI_RRCIRKITS_LOCOBUFFER_PID 0xc7d0 /* LocoBuffer USB */
414
415/*
403 * Eclo (http://www.eclo.pt/) product IDs. 416 * Eclo (http://www.eclo.pt/) product IDs.
404 * PID 0xEA90 submitted by Martin Grill. 417 * PID 0xEA90 submitted by Martin Grill.
405 */ 418 */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index ccf746b27d4e..c96714bb1cb8 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -61,6 +61,7 @@ static struct usb_device_id id_table [] = {
61 { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) }, 61 { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
62 { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) }, 62 { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) },
63 { USB_DEVICE(ITEGNO_VENDOR_ID, ITEGNO_PRODUCT_ID) }, 63 { USB_DEVICE(ITEGNO_VENDOR_ID, ITEGNO_PRODUCT_ID) },
64 { USB_DEVICE(ITEGNO_VENDOR_ID, ITEGNO_PRODUCT_ID_2080) },
64 { USB_DEVICE(MA620_VENDOR_ID, MA620_PRODUCT_ID) }, 65 { USB_DEVICE(MA620_VENDOR_ID, MA620_PRODUCT_ID) },
65 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID) }, 66 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID) },
66 { USB_DEVICE(TRIPP_VENDOR_ID, TRIPP_PRODUCT_ID) }, 67 { USB_DEVICE(TRIPP_VENDOR_ID, TRIPP_PRODUCT_ID) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 09f379b19e98..7f29e81d3e35 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -26,6 +26,7 @@
26 26
27#define ITEGNO_VENDOR_ID 0x0eba 27#define ITEGNO_VENDOR_ID 0x0eba
28#define ITEGNO_PRODUCT_ID 0x1080 28#define ITEGNO_PRODUCT_ID 0x1080
29#define ITEGNO_PRODUCT_ID_2080 0x2080
29 30
30#define MA620_VENDOR_ID 0x0df7 31#define MA620_VENDOR_ID 0x0df7
31#define MA620_PRODUCT_ID 0x0620 32#define MA620_PRODUCT_ID 0x0620
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 557411c6e7c7..f806553cd9a4 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -508,6 +508,7 @@ no_firmware:
508 err("%s: Unable to retrieve firmware version, try replugging\n", serial->type->description); 508 err("%s: Unable to retrieve firmware version, try replugging\n", serial->type->description);
509 err("%s: If the firmware is not running (status led not blinking)\n", serial->type->description); 509 err("%s: If the firmware is not running (status led not blinking)\n", serial->type->description);
510 err("%s: please contact support@connecttech.com\n", serial->type->description); 510 err("%s: please contact support@connecttech.com\n", serial->type->description);
511 kfree(result);
511 return -ENODEV; 512 return -ENODEV;
512 513
513no_command_private: 514no_command_private:
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index c4a9dcff5f2b..aec5ea8682d5 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -411,7 +411,7 @@ UNUSUAL_DEV( 0x050d, 0x0115, 0x0133, 0x0133,
411UNUSUAL_DEV( 0x0525, 0xa140, 0x0100, 0x0100, 411UNUSUAL_DEV( 0x0525, 0xa140, 0x0100, 0x0100,
412 "Iomega", 412 "Iomega",
413 "USB Clik! 40", 413 "USB Clik! 40",
414 US_SC_8070, US_PR_BULK, NULL, 414 US_SC_8070, US_PR_DEVICE, NULL,
415 US_FL_FIX_INQUIRY ), 415 US_FL_FIX_INQUIRY ),
416 416
417/* Yakumo Mega Image 37 417/* Yakumo Mega Image 37
@@ -773,6 +773,13 @@ UNUSUAL_DEV( 0x069b, 0x3004, 0x0001, 0x0001,
773 US_SC_DEVICE, US_PR_DEVICE, NULL, 773 US_SC_DEVICE, US_PR_DEVICE, NULL,
774 US_FL_FIX_CAPACITY ), 774 US_FL_FIX_CAPACITY ),
775 775
776/* Reported by Olivier Blondeau <zeitoun@gmail.com> */
777UNUSUAL_DEV( 0x0727, 0x0306, 0x0100, 0x0100,
778 "ATMEL",
779 "SND1 Storage",
780 US_SC_DEVICE, US_PR_DEVICE, NULL,
781 US_FL_IGNORE_RESIDUE),
782
776/* Submitted by Roman Hodek <roman@hodek.net> */ 783/* Submitted by Roman Hodek <roman@hodek.net> */
777UNUSUAL_DEV( 0x0781, 0x0001, 0x0200, 0x0200, 784UNUSUAL_DEV( 0x0781, 0x0001, 0x0200, 0x0200,
778 "Sandisk", 785 "Sandisk",
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 9060e7137441..4587087d777a 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -400,6 +400,8 @@ config FB_ASILIANT
400 select FB_CFB_FILLRECT 400 select FB_CFB_FILLRECT
401 select FB_CFB_COPYAREA 401 select FB_CFB_COPYAREA
402 select FB_CFB_IMAGEBLIT 402 select FB_CFB_IMAGEBLIT
403 help
404 This is the frame buffer device driver for the Asiliant 69030 chipset
403 405
404config FB_IMSTT 406config FB_IMSTT
405 bool "IMS Twin Turbo display support" 407 bool "IMS Twin Turbo display support"
diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
index b367de30b98c..600d3e0e08b7 100644
--- a/drivers/video/au1200fb.c
+++ b/drivers/video/au1200fb.c
@@ -1920,1925 +1920,3 @@ module_exit(au1200fb_cleanup);
1920 1920
1921MODULE_DESCRIPTION(DRIVER_DESC); 1921MODULE_DESCRIPTION(DRIVER_DESC);
1922MODULE_LICENSE("GPL"); 1922MODULE_LICENSE("GPL");
1923/*
1924 * BRIEF MODULE DESCRIPTION
1925 * Au1200 LCD Driver.
1926 *
1927 * Copyright 2004-2005 AMD
1928 * Author: AMD
1929 *
1930 * Based on:
1931 * linux/drivers/video/skeletonfb.c -- Skeleton for a frame buffer device
1932 * Created 28 Dec 1997 by Geert Uytterhoeven
1933 *
1934 * This program is free software; you can redistribute it and/or modify it
1935 * under the terms of the GNU General Public License as published by the
1936 * Free Software Foundation; either version 2 of the License, or (at your
1937 * option) any later version.
1938 *
1939 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
1940 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
1941 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
1942 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
1943 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
1944 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
1945 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
1946 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1947 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
1948 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1949 *
1950 * You should have received a copy of the GNU General Public License along
1951 * with this program; if not, write to the Free Software Foundation, Inc.,
1952 * 675 Mass Ave, Cambridge, MA 02139, USA.
1953 */
1954
1955#include <linux/module.h>
1956#include <linux/platform_device.h>
1957#include <linux/kernel.h>
1958#include <linux/errno.h>
1959#include <linux/string.h>
1960#include <linux/mm.h>
1961#include <linux/fb.h>
1962#include <linux/init.h>
1963#include <linux/interrupt.h>
1964#include <linux/ctype.h>
1965#include <linux/dma-mapping.h>
1966
1967#include <asm/mach-au1x00/au1000.h>
1968#include "au1200fb.h"
1969
1970#ifdef CONFIG_PM
1971#include <asm/mach-au1x00/au1xxx_pm.h>
1972#endif
1973
1974#ifndef CONFIG_FB_AU1200_DEVS
1975#define CONFIG_FB_AU1200_DEVS 4
1976#endif
1977
1978#define DRIVER_NAME "au1200fb"
1979#define DRIVER_DESC "LCD controller driver for AU1200 processors"
1980
1981#define DEBUG 1
1982
1983#define print_err(f, arg...) printk(KERN_ERR DRIVER_NAME ": " f "\n", ## arg)
1984#define print_warn(f, arg...) printk(KERN_WARNING DRIVER_NAME ": " f "\n", ## arg)
1985#define print_info(f, arg...) printk(KERN_INFO DRIVER_NAME ": " f "\n", ## arg)
1986
1987#if DEBUG
1988#define print_dbg(f, arg...) printk(KERN_DEBUG __FILE__ ": " f "\n", ## arg)
1989#else
1990#define print_dbg(f, arg...) do {} while (0)
1991#endif
1992
1993
1994#define AU1200_LCD_FB_IOCTL 0x46FF
1995
1996#define AU1200_LCD_SET_SCREEN 1
1997#define AU1200_LCD_GET_SCREEN 2
1998#define AU1200_LCD_SET_WINDOW 3
1999#define AU1200_LCD_GET_WINDOW 4
2000#define AU1200_LCD_SET_PANEL 5
2001#define AU1200_LCD_GET_PANEL 6
2002
2003#define SCREEN_SIZE (1<< 1)
2004#define SCREEN_BACKCOLOR (1<< 2)
2005#define SCREEN_BRIGHTNESS (1<< 3)
2006#define SCREEN_COLORKEY (1<< 4)
2007#define SCREEN_MASK (1<< 5)
2008
2009struct au1200_lcd_global_regs_t {
2010 unsigned int flags;
2011 unsigned int xsize;
2012 unsigned int ysize;
2013 unsigned int backcolor;
2014 unsigned int brightness;
2015 unsigned int colorkey;
2016 unsigned int mask;
2017 unsigned int panel_choice;
2018 char panel_desc[80];
2019
2020};
2021
2022#define WIN_POSITION (1<< 0)
2023#define WIN_ALPHA_COLOR (1<< 1)
2024#define WIN_ALPHA_MODE (1<< 2)
2025#define WIN_PRIORITY (1<< 3)
2026#define WIN_CHANNEL (1<< 4)
2027#define WIN_BUFFER_FORMAT (1<< 5)
2028#define WIN_COLOR_ORDER (1<< 6)
2029#define WIN_PIXEL_ORDER (1<< 7)
2030#define WIN_SIZE (1<< 8)
2031#define WIN_COLORKEY_MODE (1<< 9)
2032#define WIN_DOUBLE_BUFFER_MODE (1<< 10)
2033#define WIN_RAM_ARRAY_MODE (1<< 11)
2034#define WIN_BUFFER_SCALE (1<< 12)
2035#define WIN_ENABLE (1<< 13)
2036
2037struct au1200_lcd_window_regs_t {
2038 unsigned int flags;
2039 unsigned int xpos;
2040 unsigned int ypos;
2041 unsigned int alpha_color;
2042 unsigned int alpha_mode;
2043 unsigned int priority;
2044 unsigned int channel;
2045 unsigned int buffer_format;
2046 unsigned int color_order;
2047 unsigned int pixel_order;
2048 unsigned int xsize;
2049 unsigned int ysize;
2050 unsigned int colorkey_mode;
2051 unsigned int double_buffer_mode;
2052 unsigned int ram_array_mode;
2053 unsigned int xscale;
2054 unsigned int yscale;
2055 unsigned int enable;
2056};
2057
2058
2059struct au1200_lcd_iodata_t {
2060 unsigned int subcmd;
2061 struct au1200_lcd_global_regs_t global;
2062 struct au1200_lcd_window_regs_t window;
2063};
2064
2065#if defined(__BIG_ENDIAN)
2066#define LCD_CONTROL_DEFAULT_PO LCD_CONTROL_PO_11
2067#else
2068#define LCD_CONTROL_DEFAULT_PO LCD_CONTROL_PO_00
2069#endif
2070#define LCD_CONTROL_DEFAULT_SBPPF LCD_CONTROL_SBPPF_565
2071
2072/* Private, per-framebuffer management information (independent of the panel itself) */
2073struct au1200fb_device {
2074 struct fb_info fb_info; /* FB driver info record */
2075
2076 int plane;
2077 unsigned char* fb_mem; /* FrameBuffer memory map */
2078 unsigned int fb_len;
2079 dma_addr_t fb_phys;
2080};
2081
2082static struct au1200fb_device _au1200fb_devices[CONFIG_FB_AU1200_DEVS];
2083/********************************************************************/
2084
2085/* LCD controller restrictions */
2086#define AU1200_LCD_MAX_XRES 1280
2087#define AU1200_LCD_MAX_YRES 1024
2088#define AU1200_LCD_MAX_BPP 32
2089#define AU1200_LCD_MAX_CLK 96000000 /* fixme: this needs to go away ? */
2090#define AU1200_LCD_NBR_PALETTE_ENTRIES 256
2091
2092/* Default number of visible screen buffer to allocate */
2093#define AU1200FB_NBR_VIDEO_BUFFERS 1
2094
2095/********************************************************************/
2096
2097static struct au1200_lcd *lcd = (struct au1200_lcd *) AU1200_LCD_ADDR;
2098static int window_index = 2; /* default is zero */
2099static int panel_index = 2; /* default is zero */
2100static struct window_settings *win;
2101static struct panel_settings *panel;
2102static int noblanking = 1;
2103static int nohwcursor = 0;
2104
2105struct window_settings {
2106 unsigned char name[64];
2107 uint32 mode_backcolor;
2108 uint32 mode_colorkey;
2109 uint32 mode_colorkeymsk;
2110 struct {
2111 int xres;
2112 int yres;
2113 int xpos;
2114 int ypos;
2115 uint32 mode_winctrl1; /* winctrl1[FRM,CCO,PO,PIPE] */
2116 uint32 mode_winenable;
2117 } w[4];
2118};
2119
2120#if defined(__BIG_ENDIAN)
2121#define LCD_WINCTRL1_PO_16BPP LCD_WINCTRL1_PO_00
2122#else
2123#define LCD_WINCTRL1_PO_16BPP LCD_WINCTRL1_PO_01
2124#endif
2125
2126extern int board_au1200fb_panel_init (void);
2127extern int board_au1200fb_panel_shutdown (void);
2128
2129#ifdef CONFIG_PM
2130int au1200fb_pm_callback(au1xxx_power_dev_t *dev,
2131 au1xxx_request_t request, void *data);
2132au1xxx_power_dev_t *LCD_pm_dev;
2133#endif
2134
2135/*
2136 * Default window configurations
2137 */
2138static struct window_settings windows[] = {
2139 { /* Index 0 */
2140 "0-FS gfx, 1-video, 2-ovly gfx, 3-ovly gfx",
2141 /* mode_backcolor */ 0x006600ff,
2142 /* mode_colorkey,msk*/ 0, 0,
2143 {
2144 {
2145 /* xres, yres, xpos, ypos */ 0, 0, 0, 0,
2146 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
2147 LCD_WINCTRL1_PO_16BPP,
2148 /* mode_winenable*/ LCD_WINENABLE_WEN0,
2149 },
2150 {
2151 /* xres, yres, xpos, ypos */ 100, 100, 100, 100,
2152 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
2153 LCD_WINCTRL1_PO_16BPP |
2154 LCD_WINCTRL1_PIPE,
2155 /* mode_winenable*/ LCD_WINENABLE_WEN1,
2156 },
2157 {
2158 /* xres, yres, xpos, ypos */ 0, 0, 0, 0,
2159 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
2160 LCD_WINCTRL1_PO_16BPP,
2161 /* mode_winenable*/ 0,
2162 },
2163 {
2164 /* xres, yres, xpos, ypos */ 0, 0, 0, 0,
2165 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
2166 LCD_WINCTRL1_PO_16BPP |
2167 LCD_WINCTRL1_PIPE,
2168 /* mode_winenable*/ 0,
2169 },
2170 },
2171 },
2172
2173 { /* Index 1 */
2174 "0-FS gfx, 1-video, 2-ovly gfx, 3-ovly gfx",
2175 /* mode_backcolor */ 0x006600ff,
2176 /* mode_colorkey,msk*/ 0, 0,
2177 {
2178 {
2179 /* xres, yres, xpos, ypos */ 320, 240, 5, 5,
2180 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_24BPP |
2181 LCD_WINCTRL1_PO_00,
2182 /* mode_winenable*/ LCD_WINENABLE_WEN0,
2183 },
2184 {
2185 /* xres, yres, xpos, ypos */ 0, 0, 0, 0,
2186 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565
2187 | LCD_WINCTRL1_PO_16BPP,
2188 /* mode_winenable*/ 0,
2189 },
2190 {
2191 /* xres, yres, xpos, ypos */ 100, 100, 0, 0,
2192 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
2193 LCD_WINCTRL1_PO_16BPP |
2194 LCD_WINCTRL1_PIPE,
2195 /* mode_winenable*/ 0/*LCD_WINENABLE_WEN2*/,
2196 },
2197 {
2198 /* xres, yres, xpos, ypos */ 200, 25, 0, 0,
2199 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
2200 LCD_WINCTRL1_PO_16BPP |
2201 LCD_WINCTRL1_PIPE,
2202 /* mode_winenable*/ 0,
2203 },
2204 },
2205 },
2206 { /* Index 2 */
2207 "0-FS gfx, 1-video, 2-ovly gfx, 3-ovly gfx",
2208 /* mode_backcolor */ 0x006600ff,
2209 /* mode_colorkey,msk*/ 0, 0,
2210 {
2211 {
2212 /* xres, yres, xpos, ypos */ 0, 0, 0, 0,
2213 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
2214 LCD_WINCTRL1_PO_16BPP,
2215 /* mode_winenable*/ LCD_WINENABLE_WEN0,
2216 },
2217 {
2218 /* xres, yres, xpos, ypos */ 0, 0, 0, 0,
2219 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
2220 LCD_WINCTRL1_PO_16BPP,
2221 /* mode_winenable*/ 0,
2222 },
2223 {
2224 /* xres, yres, xpos, ypos */ 0, 0, 0, 0,
2225 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_32BPP |
2226 LCD_WINCTRL1_PO_00|LCD_WINCTRL1_PIPE,
2227 /* mode_winenable*/ 0/*LCD_WINENABLE_WEN2*/,
2228 },
2229 {
2230 /* xres, yres, xpos, ypos */ 0, 0, 0, 0,
2231 /* mode_winctrl1 */ LCD_WINCTRL1_FRM_16BPP565 |
2232 LCD_WINCTRL1_PO_16BPP |
2233 LCD_WINCTRL1_PIPE,
2234 /* mode_winenable*/ 0,
2235 },
2236 },
2237 },
2238 /* Need VGA 640 @ 24bpp, @ 32bpp */
2239 /* Need VGA 800 @ 24bpp, @ 32bpp */
2240 /* Need VGA 1024 @ 24bpp, @ 32bpp */
2241};
2242
2243/*
2244 * Controller configurations for various panels.
2245 */
2246
2247struct panel_settings
2248{
2249 const char name[25]; /* Full name <vendor>_<model> */
2250
2251 struct fb_monspecs monspecs; /* FB monitor specs */
2252
2253 /* panel timings */
2254 uint32 mode_screen;
2255 uint32 mode_horztiming;
2256 uint32 mode_verttiming;
2257 uint32 mode_clkcontrol;
2258 uint32 mode_pwmdiv;
2259 uint32 mode_pwmhi;
2260 uint32 mode_outmask;
2261 uint32 mode_fifoctrl;
2262 uint32 mode_toyclksrc;
2263 uint32 mode_backlight;
2264 uint32 mode_auxpll;
2265 int (*device_init)(void);
2266 int (*device_shutdown)(void);
2267#define Xres min_xres
2268#define Yres min_yres
2269 u32 min_xres; /* Minimum horizontal resolution */
2270 u32 max_xres; /* Maximum horizontal resolution */
2271 u32 min_yres; /* Minimum vertical resolution */
2272 u32 max_yres; /* Maximum vertical resolution */
2273};
2274
2275/********************************************************************/
2276/* fixme: Maybe a modedb for the CRT ? otherwise panels should be as-is */
2277
2278/* List of panels known to work with the AU1200 LCD controller.
2279 * To add a new panel, enter the same specifications as the
2280 * Generic_TFT one, and MAKE SURE that it doesn't conflicts
2281 * with the controller restrictions. Restrictions are:
2282 *
2283 * STN color panels: max_bpp <= 12
2284 * STN mono panels: max_bpp <= 4
2285 * TFT panels: max_bpp <= 16
2286 * max_xres <= 800
2287 * max_yres <= 600
2288 */
2289static struct panel_settings known_lcd_panels[] =
2290{
2291 [0] = { /* QVGA 320x240 H:33.3kHz V:110Hz */
2292 .name = "QVGA_320x240",
2293 .monspecs = {
2294 .modedb = NULL,
2295 .modedb_len = 0,
2296 .hfmin = 30000,
2297 .hfmax = 70000,
2298 .vfmin = 60,
2299 .vfmax = 60,
2300 .dclkmin = 6000000,
2301 .dclkmax = 28000000,
2302 .input = FB_DISP_RGB,
2303 },
2304 .mode_screen = LCD_SCREEN_SX_N(320) |
2305 LCD_SCREEN_SY_N(240),
2306 .mode_horztiming = 0x00c4623b,
2307 .mode_verttiming = 0x00502814,
2308 .mode_clkcontrol = 0x00020002, /* /4=24Mhz */
2309 .mode_pwmdiv = 0x00000000,
2310 .mode_pwmhi = 0x00000000,
2311 .mode_outmask = 0x00FFFFFF,
2312 .mode_fifoctrl = 0x2f2f2f2f,
2313 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
2314 .mode_backlight = 0x00000000,
2315 .mode_auxpll = 8, /* 96MHz AUXPLL */
2316 .device_init = NULL,
2317 .device_shutdown = NULL,
2318 320, 320,
2319 240, 240,
2320 },
2321
2322 [1] = { /* VGA 640x480 H:30.3kHz V:58Hz */
2323 .name = "VGA_640x480",
2324 .monspecs = {
2325 .modedb = NULL,
2326 .modedb_len = 0,
2327 .hfmin = 30000,
2328 .hfmax = 70000,
2329 .vfmin = 60,
2330 .vfmax = 60,
2331 .dclkmin = 6000000,
2332 .dclkmax = 28000000,
2333 .input = FB_DISP_RGB,
2334 },
2335 .mode_screen = 0x13f9df80,
2336 .mode_horztiming = 0x003c5859,
2337 .mode_verttiming = 0x00741201,
2338 .mode_clkcontrol = 0x00020001, /* /4=24Mhz */
2339 .mode_pwmdiv = 0x00000000,
2340 .mode_pwmhi = 0x00000000,
2341 .mode_outmask = 0x00FFFFFF,
2342 .mode_fifoctrl = 0x2f2f2f2f,
2343 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
2344 .mode_backlight = 0x00000000,
2345 .mode_auxpll = 8, /* 96MHz AUXPLL */
2346 .device_init = NULL,
2347 .device_shutdown = NULL,
2348 640, 480,
2349 640, 480,
2350 },
2351
2352 [2] = { /* SVGA 800x600 H:46.1kHz V:69Hz */
2353 .name = "SVGA_800x600",
2354 .monspecs = {
2355 .modedb = NULL,
2356 .modedb_len = 0,
2357 .hfmin = 30000,
2358 .hfmax = 70000,
2359 .vfmin = 60,
2360 .vfmax = 60,
2361 .dclkmin = 6000000,
2362 .dclkmax = 28000000,
2363 .input = FB_DISP_RGB,
2364 },
2365 .mode_screen = 0x18fa5780,
2366 .mode_horztiming = 0x00dc7e77,
2367 .mode_verttiming = 0x00584805,
2368 .mode_clkcontrol = 0x00020000, /* /2=48Mhz */
2369 .mode_pwmdiv = 0x00000000,
2370 .mode_pwmhi = 0x00000000,
2371 .mode_outmask = 0x00FFFFFF,
2372 .mode_fifoctrl = 0x2f2f2f2f,
2373 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
2374 .mode_backlight = 0x00000000,
2375 .mode_auxpll = 8, /* 96MHz AUXPLL */
2376 .device_init = NULL,
2377 .device_shutdown = NULL,
2378 800, 800,
2379 600, 600,
2380 },
2381
2382 [3] = { /* XVGA 1024x768 H:56.2kHz V:70Hz */
2383 .name = "XVGA_1024x768",
2384 .monspecs = {
2385 .modedb = NULL,
2386 .modedb_len = 0,
2387 .hfmin = 30000,
2388 .hfmax = 70000,
2389 .vfmin = 60,
2390 .vfmax = 60,
2391 .dclkmin = 6000000,
2392 .dclkmax = 28000000,
2393 .input = FB_DISP_RGB,
2394 },
2395 .mode_screen = 0x1ffaff80,
2396 .mode_horztiming = 0x007d0e57,
2397 .mode_verttiming = 0x00740a01,
2398 .mode_clkcontrol = 0x000A0000, /* /1 */
2399 .mode_pwmdiv = 0x00000000,
2400 .mode_pwmhi = 0x00000000,
2401 .mode_outmask = 0x00FFFFFF,
2402 .mode_fifoctrl = 0x2f2f2f2f,
2403 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
2404 .mode_backlight = 0x00000000,
2405 .mode_auxpll = 6, /* 72MHz AUXPLL */
2406 .device_init = NULL,
2407 .device_shutdown = NULL,
2408 1024, 1024,
2409 768, 768,
2410 },
2411
2412 [4] = { /* XVGA XVGA 1280x1024 H:68.5kHz V:65Hz */
2413 .name = "XVGA_1280x1024",
2414 .monspecs = {
2415 .modedb = NULL,
2416 .modedb_len = 0,
2417 .hfmin = 30000,
2418 .hfmax = 70000,
2419 .vfmin = 60,
2420 .vfmax = 60,
2421 .dclkmin = 6000000,
2422 .dclkmax = 28000000,
2423 .input = FB_DISP_RGB,
2424 },
2425 .mode_screen = 0x27fbff80,
2426 .mode_horztiming = 0x00cdb2c7,
2427 .mode_verttiming = 0x00600002,
2428 .mode_clkcontrol = 0x000A0000, /* /1 */
2429 .mode_pwmdiv = 0x00000000,
2430 .mode_pwmhi = 0x00000000,
2431 .mode_outmask = 0x00FFFFFF,
2432 .mode_fifoctrl = 0x2f2f2f2f,
2433 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
2434 .mode_backlight = 0x00000000,
2435 .mode_auxpll = 10, /* 120MHz AUXPLL */
2436 .device_init = NULL,
2437 .device_shutdown = NULL,
2438 1280, 1280,
2439 1024, 1024,
2440 },
2441
2442 [5] = { /* Samsung 1024x768 TFT */
2443 .name = "Samsung_1024x768_TFT",
2444 .monspecs = {
2445 .modedb = NULL,
2446 .modedb_len = 0,
2447 .hfmin = 30000,
2448 .hfmax = 70000,
2449 .vfmin = 60,
2450 .vfmax = 60,
2451 .dclkmin = 6000000,
2452 .dclkmax = 28000000,
2453 .input = FB_DISP_RGB,
2454 },
2455 .mode_screen = 0x1ffaff80,
2456 .mode_horztiming = 0x018cc677,
2457 .mode_verttiming = 0x00241217,
2458 .mode_clkcontrol = 0x00000000, /* SCB 0x1 /4=24Mhz */
2459 .mode_pwmdiv = 0x8000063f, /* SCB 0x0 */
2460 .mode_pwmhi = 0x03400000, /* SCB 0x0 */
2461 .mode_outmask = 0x00FFFFFF,
2462 .mode_fifoctrl = 0x2f2f2f2f,
2463 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
2464 .mode_backlight = 0x00000000,
2465 .mode_auxpll = 8, /* 96MHz AUXPLL */
2466 .device_init = board_au1200fb_panel_init,
2467 .device_shutdown = board_au1200fb_panel_shutdown,
2468 1024, 1024,
2469 768, 768,
2470 },
2471
2472 [6] = { /* Toshiba 640x480 TFT */
2473 .name = "Toshiba_640x480_TFT",
2474 .monspecs = {
2475 .modedb = NULL,
2476 .modedb_len = 0,
2477 .hfmin = 30000,
2478 .hfmax = 70000,
2479 .vfmin = 60,
2480 .vfmax = 60,
2481 .dclkmin = 6000000,
2482 .dclkmax = 28000000,
2483 .input = FB_DISP_RGB,
2484 },
2485 .mode_screen = LCD_SCREEN_SX_N(640) |
2486 LCD_SCREEN_SY_N(480),
2487 .mode_horztiming = LCD_HORZTIMING_HPW_N(96) |
2488 LCD_HORZTIMING_HND1_N(13) | LCD_HORZTIMING_HND2_N(51),
2489 .mode_verttiming = LCD_VERTTIMING_VPW_N(2) |
2490 LCD_VERTTIMING_VND1_N(11) | LCD_VERTTIMING_VND2_N(32),
2491 .mode_clkcontrol = 0x00000000, /* /4=24Mhz */
2492 .mode_pwmdiv = 0x8000063f,
2493 .mode_pwmhi = 0x03400000,
2494 .mode_outmask = 0x00fcfcfc,
2495 .mode_fifoctrl = 0x2f2f2f2f,
2496 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
2497 .mode_backlight = 0x00000000,
2498 .mode_auxpll = 8, /* 96MHz AUXPLL */
2499 .device_init = board_au1200fb_panel_init,
2500 .device_shutdown = board_au1200fb_panel_shutdown,
2501 640, 480,
2502 640, 480,
2503 },
2504
2505 [7] = { /* Sharp 320x240 TFT */
2506 .name = "Sharp_320x240_TFT",
2507 .monspecs = {
2508 .modedb = NULL,
2509 .modedb_len = 0,
2510 .hfmin = 12500,
2511 .hfmax = 20000,
2512 .vfmin = 38,
2513 .vfmax = 81,
2514 .dclkmin = 4500000,
2515 .dclkmax = 6800000,
2516 .input = FB_DISP_RGB,
2517 },
2518 .mode_screen = LCD_SCREEN_SX_N(320) |
2519 LCD_SCREEN_SY_N(240),
2520 .mode_horztiming = LCD_HORZTIMING_HPW_N(60) |
2521 LCD_HORZTIMING_HND1_N(13) | LCD_HORZTIMING_HND2_N(2),
2522 .mode_verttiming = LCD_VERTTIMING_VPW_N(2) |
2523 LCD_VERTTIMING_VND1_N(2) | LCD_VERTTIMING_VND2_N(5),
2524 .mode_clkcontrol = LCD_CLKCONTROL_PCD_N(7), /*16=6Mhz*/
2525 .mode_pwmdiv = 0x8000063f,
2526 .mode_pwmhi = 0x03400000,
2527 .mode_outmask = 0x00fcfcfc,
2528 .mode_fifoctrl = 0x2f2f2f2f,
2529 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
2530 .mode_backlight = 0x00000000,
2531 .mode_auxpll = 8, /* 96MHz AUXPLL */
2532 .device_init = board_au1200fb_panel_init,
2533 .device_shutdown = board_au1200fb_panel_shutdown,
2534 320, 320,
2535 240, 240,
2536 },
2537
2538 [8] = { /* Toppoly TD070WGCB2 7" 856x480 TFT */
2539 .name = "Toppoly_TD070WGCB2",
2540 .monspecs = {
2541 .modedb = NULL,
2542 .modedb_len = 0,
2543 .hfmin = 30000,
2544 .hfmax = 70000,
2545 .vfmin = 60,
2546 .vfmax = 60,
2547 .dclkmin = 6000000,
2548 .dclkmax = 28000000,
2549 .input = FB_DISP_RGB,
2550 },
2551 .mode_screen = LCD_SCREEN_SX_N(856) |
2552 LCD_SCREEN_SY_N(480),
2553 .mode_horztiming = LCD_HORZTIMING_HND2_N(43) |
2554 LCD_HORZTIMING_HND1_N(43) | LCD_HORZTIMING_HPW_N(114),
2555 .mode_verttiming = LCD_VERTTIMING_VND2_N(20) |
2556 LCD_VERTTIMING_VND1_N(21) | LCD_VERTTIMING_VPW_N(4),
2557 .mode_clkcontrol = 0x00020001, /* /4=24Mhz */
2558 .mode_pwmdiv = 0x8000063f,
2559 .mode_pwmhi = 0x03400000,
2560 .mode_outmask = 0x00fcfcfc,
2561 .mode_fifoctrl = 0x2f2f2f2f,
2562 .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
2563 .mode_backlight = 0x00000000,
2564 .mode_auxpll = 8, /* 96MHz AUXPLL */
2565 .device_init = board_au1200fb_panel_init,
2566 .device_shutdown = board_au1200fb_panel_shutdown,
2567 856, 856,
2568 480, 480,
2569 },
2570};
2571
2572#define NUM_PANELS (ARRAY_SIZE(known_lcd_panels))
2573
2574/********************************************************************/
2575
2576#ifdef CONFIG_PM
2577static int set_brightness(unsigned int brightness)
2578{
2579 unsigned int hi1, divider;
2580
2581 /* limit brightness pwm duty to >= 30/1600 */
2582 if (brightness < 30) {
2583 brightness = 30;
2584 }
2585 divider = (lcd->pwmdiv & 0x3FFFF) + 1;
2586 hi1 = (lcd->pwmhi >> 16) + 1;
2587 hi1 = (((brightness & 0xFF) + 1) * divider >> 8);
2588 lcd->pwmhi &= 0xFFFF;
2589 lcd->pwmhi |= (hi1 << 16);
2590
2591 return brightness;
2592}
2593#endif /* CONFIG_PM */
2594
2595static int winbpp (unsigned int winctrl1)
2596{
2597 int bits = 0;
2598
2599 /* how many bits are needed for each pixel format */
2600 switch (winctrl1 & LCD_WINCTRL1_FRM) {
2601 case LCD_WINCTRL1_FRM_1BPP:
2602 bits = 1;
2603 break;
2604 case LCD_WINCTRL1_FRM_2BPP:
2605 bits = 2;
2606 break;
2607 case LCD_WINCTRL1_FRM_4BPP:
2608 bits = 4;
2609 break;
2610 case LCD_WINCTRL1_FRM_8BPP:
2611 bits = 8;
2612 break;
2613 case LCD_WINCTRL1_FRM_12BPP:
2614 case LCD_WINCTRL1_FRM_16BPP655:
2615 case LCD_WINCTRL1_FRM_16BPP565:
2616 case LCD_WINCTRL1_FRM_16BPP556:
2617 case LCD_WINCTRL1_FRM_16BPPI1555:
2618 case LCD_WINCTRL1_FRM_16BPPI5551:
2619 case LCD_WINCTRL1_FRM_16BPPA1555:
2620 case LCD_WINCTRL1_FRM_16BPPA5551:
2621 bits = 16;
2622 break;
2623 case LCD_WINCTRL1_FRM_24BPP:
2624 case LCD_WINCTRL1_FRM_32BPP:
2625 bits = 32;
2626 break;
2627 }
2628
2629 return bits;
2630}
2631
2632static int fbinfo2index (struct fb_info *fb_info)
2633{
2634 int i;
2635
2636 for (i = 0; i < CONFIG_FB_AU1200_DEVS; ++i) {
2637 if (fb_info == (struct fb_info *)(&_au1200fb_devices[i].fb_info))
2638 return i;
2639 }
2640 printk("au1200fb: ERROR: fbinfo2index failed!\n");
2641 return -1;
2642}
2643
2644static int au1200_setlocation (struct au1200fb_device *fbdev, int plane,
2645 int xpos, int ypos)
2646{
2647 uint32 winctrl0, winctrl1, winenable, fb_offset = 0;
2648 int xsz, ysz;
2649
2650 /* FIX!!! NOT CHECKING FOR COMPLETE OFFSCREEN YET */
2651
2652 winctrl0 = lcd->window[plane].winctrl0;
2653 winctrl1 = lcd->window[plane].winctrl1;
2654 winctrl0 &= (LCD_WINCTRL0_A | LCD_WINCTRL0_AEN);
2655 winctrl1 &= ~(LCD_WINCTRL1_SZX | LCD_WINCTRL1_SZY);
2656
2657 /* Check for off-screen adjustments */
2658 xsz = win->w[plane].xres;
2659 ysz = win->w[plane].yres;
2660 if ((xpos + win->w[plane].xres) > panel->Xres) {
2661 /* Off-screen to the right */
2662 xsz = panel->Xres - xpos; /* off by 1 ??? */
2663 /*printk("off screen right\n");*/
2664 }
2665
2666 if ((ypos + win->w[plane].yres) > panel->Yres) {
2667 /* Off-screen to the bottom */
2668 ysz = panel->Yres - ypos; /* off by 1 ??? */
2669 /*printk("off screen bottom\n");*/
2670 }
2671
2672 if (xpos < 0) {
2673 /* Off-screen to the left */
2674 xsz = win->w[plane].xres + xpos;
2675 fb_offset += (((0 - xpos) * winbpp(lcd->window[plane].winctrl1))/8);
2676 xpos = 0;
2677 /*printk("off screen left\n");*/
2678 }
2679
2680 if (ypos < 0) {
2681 /* Off-screen to the top */
2682 ysz = win->w[plane].yres + ypos;
2683 /* fixme: fb_offset += ((0-ypos)*fb_pars[plane].line_length); */
2684 ypos = 0;
2685 /*printk("off screen top\n");*/
2686 }
2687
2688 /* record settings */
2689 win->w[plane].xpos = xpos;
2690 win->w[plane].ypos = ypos;
2691
2692 xsz -= 1;
2693 ysz -= 1;
2694 winctrl0 |= (xpos << 21);
2695 winctrl0 |= (ypos << 10);
2696 winctrl1 |= (xsz << 11);
2697 winctrl1 |= (ysz << 0);
2698
2699 /* Disable the window while making changes, then restore WINEN */
2700 winenable = lcd->winenable & (1 << plane);
2701 au_sync();
2702 lcd->winenable &= ~(1 << plane);
2703 lcd->window[plane].winctrl0 = winctrl0;
2704 lcd->window[plane].winctrl1 = winctrl1;
2705 lcd->window[plane].winbuf0 =
2706 lcd->window[plane].winbuf1 = fbdev->fb_phys;
2707 lcd->window[plane].winbufctrl = 0; /* select winbuf0 */
2708 lcd->winenable |= winenable;
2709 au_sync();
2710
2711 return 0;
2712}
2713
2714static void au1200_setpanel (struct panel_settings *newpanel)
2715{
2716 /*
2717 * Perform global setup/init of LCD controller
2718 */
2719 uint32 winenable;
2720
2721 /* Make sure all windows disabled */
2722 winenable = lcd->winenable;
2723 lcd->winenable = 0;
2724 au_sync();
2725 /*
2726 * Ensure everything is disabled before reconfiguring
2727 */
2728 if (lcd->screen & LCD_SCREEN_SEN) {
2729 /* Wait for vertical sync period */
2730 lcd->intstatus = LCD_INT_SS;
2731 while ((lcd->intstatus & LCD_INT_SS) == 0) {
2732 au_sync();
2733 }
2734
2735 lcd->screen &= ~LCD_SCREEN_SEN; /*disable the controller*/
2736
2737 do {
2738 lcd->intstatus = lcd->intstatus; /*clear interrupts*/
2739 au_sync();
2740 /*wait for controller to shut down*/
2741 } while ((lcd->intstatus & LCD_INT_SD) == 0);
2742
2743 /* Call shutdown of current panel (if up) */
2744 /* this must occur last, because if an external clock is driving
2745 the controller, the clock cannot be turned off before first
2746 shutting down the controller.
2747 */
2748 if (panel->device_shutdown != NULL)
2749 panel->device_shutdown();
2750 }
2751
2752 /* Newpanel == NULL indicates a shutdown operation only */
2753 if (newpanel == NULL)
2754 return;
2755
2756 panel = newpanel;
2757
2758 printk("Panel(%s), %dx%d\n", panel->name, panel->Xres, panel->Yres);
2759
2760 /*
2761 * Setup clocking if internal LCD clock source (assumes sys_auxpll valid)
2762 */
2763 if (!(panel->mode_clkcontrol & LCD_CLKCONTROL_EXT))
2764 {
2765 uint32 sys_clksrc;
2766 au_writel(panel->mode_auxpll, SYS_AUXPLL);
2767 sys_clksrc = au_readl(SYS_CLKSRC) & ~0x0000001f;
2768 sys_clksrc |= panel->mode_toyclksrc;
2769 au_writel(sys_clksrc, SYS_CLKSRC);
2770 }
2771
2772 /*
2773 * Configure panel timings
2774 */
2775 lcd->screen = panel->mode_screen;
2776 lcd->horztiming = panel->mode_horztiming;
2777 lcd->verttiming = panel->mode_verttiming;
2778 lcd->clkcontrol = panel->mode_clkcontrol;
2779 lcd->pwmdiv = panel->mode_pwmdiv;
2780 lcd->pwmhi = panel->mode_pwmhi;
2781 lcd->outmask = panel->mode_outmask;
2782 lcd->fifoctrl = panel->mode_fifoctrl;
2783 au_sync();
2784
2785 /* fixme: Check window settings to make sure still valid
2786 * for new geometry */
2787#if 0
2788 au1200_setlocation(fbdev, 0, win->w[0].xpos, win->w[0].ypos);
2789 au1200_setlocation(fbdev, 1, win->w[1].xpos, win->w[1].ypos);
2790 au1200_setlocation(fbdev, 2, win->w[2].xpos, win->w[2].ypos);
2791 au1200_setlocation(fbdev, 3, win->w[3].xpos, win->w[3].ypos);
2792#endif
2793 lcd->winenable = winenable;
2794
2795 /*
2796 * Re-enable screen now that it is configured
2797 */
2798 lcd->screen |= LCD_SCREEN_SEN;
2799 au_sync();
2800
2801 /* Call init of panel */
2802 if (panel->device_init != NULL) panel->device_init();
2803
2804 /* FIX!!!! not appropriate on panel change!!! Global setup/init */
2805 lcd->intenable = 0;
2806 lcd->intstatus = ~0;
2807 lcd->backcolor = win->mode_backcolor;
2808
2809 /* Setup Color Key - FIX!!! */
2810 lcd->colorkey = win->mode_colorkey;
2811 lcd->colorkeymsk = win->mode_colorkeymsk;
2812
2813 /* Setup HWCursor - FIX!!! Need to support this eventually */
2814 lcd->hwc.cursorctrl = 0;
2815 lcd->hwc.cursorpos = 0;
2816 lcd->hwc.cursorcolor0 = 0;
2817 lcd->hwc.cursorcolor1 = 0;
2818 lcd->hwc.cursorcolor2 = 0;
2819 lcd->hwc.cursorcolor3 = 0;
2820
2821
2822#if 0
2823#define D(X) printk("%25s: %08X\n", #X, X)
2824 D(lcd->screen);
2825 D(lcd->horztiming);
2826 D(lcd->verttiming);
2827 D(lcd->clkcontrol);
2828 D(lcd->pwmdiv);
2829 D(lcd->pwmhi);
2830 D(lcd->outmask);
2831 D(lcd->fifoctrl);
2832 D(lcd->window[0].winctrl0);
2833 D(lcd->window[0].winctrl1);
2834 D(lcd->window[0].winctrl2);
2835 D(lcd->window[0].winbuf0);
2836 D(lcd->window[0].winbuf1);
2837 D(lcd->window[0].winbufctrl);
2838 D(lcd->window[1].winctrl0);
2839 D(lcd->window[1].winctrl1);
2840 D(lcd->window[1].winctrl2);
2841 D(lcd->window[1].winbuf0);
2842 D(lcd->window[1].winbuf1);
2843 D(lcd->window[1].winbufctrl);
2844 D(lcd->window[2].winctrl0);
2845 D(lcd->window[2].winctrl1);
2846 D(lcd->window[2].winctrl2);
2847 D(lcd->window[2].winbuf0);
2848 D(lcd->window[2].winbuf1);
2849 D(lcd->window[2].winbufctrl);
2850 D(lcd->window[3].winctrl0);
2851 D(lcd->window[3].winctrl1);
2852 D(lcd->window[3].winctrl2);
2853 D(lcd->window[3].winbuf0);
2854 D(lcd->window[3].winbuf1);
2855 D(lcd->window[3].winbufctrl);
2856 D(lcd->winenable);
2857 D(lcd->intenable);
2858 D(lcd->intstatus);
2859 D(lcd->backcolor);
2860 D(lcd->winenable);
2861 D(lcd->colorkey);
2862 D(lcd->colorkeymsk);
2863 D(lcd->hwc.cursorctrl);
2864 D(lcd->hwc.cursorpos);
2865 D(lcd->hwc.cursorcolor0);
2866 D(lcd->hwc.cursorcolor1);
2867 D(lcd->hwc.cursorcolor2);
2868 D(lcd->hwc.cursorcolor3);
2869#endif
2870}
2871
2872static void au1200_setmode(struct au1200fb_device *fbdev)
2873{
2874 int plane = fbdev->plane;
2875 /* Window/plane setup */
2876 lcd->window[plane].winctrl1 = ( 0
2877 | LCD_WINCTRL1_PRI_N(plane)
2878 | win->w[plane].mode_winctrl1 /* FRM,CCO,PO,PIPE */
2879 ) ;
2880
2881 au1200_setlocation(fbdev, plane, win->w[plane].xpos, win->w[plane].ypos);
2882
2883 lcd->window[plane].winctrl2 = ( 0
2884 | LCD_WINCTRL2_CKMODE_00
2885 | LCD_WINCTRL2_DBM
2886 | LCD_WINCTRL2_BX_N( fbdev->fb_info.fix.line_length)
2887 | LCD_WINCTRL2_SCX_1
2888 | LCD_WINCTRL2_SCY_1
2889 ) ;
2890 lcd->winenable |= win->w[plane].mode_winenable;
2891 au_sync();
2892}
2893
2894
2895/* Inline helpers */
2896
2897/*#define panel_is_dual(panel) ((panel->mode_screen & LCD_SCREEN_PT) == LCD_SCREEN_PT_010)*/
2898/*#define panel_is_active(panel)((panel->mode_screen & LCD_SCREEN_PT) == LCD_SCREEN_PT_010)*/
2899
2900#define panel_is_color(panel) ((panel->mode_screen & LCD_SCREEN_PT) <= LCD_SCREEN_PT_CDSTN)
2901
2902/* Bitfields format supported by the controller. */
2903static struct fb_bitfield rgb_bitfields[][4] = {
2904 /* Red, Green, Blue, Transp */
2905 [LCD_WINCTRL1_FRM_16BPP655 >> 25] =
2906 { { 10, 6, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 0, 0, 0 } },
2907
2908 [LCD_WINCTRL1_FRM_16BPP565 >> 25] =
2909 { { 11, 5, 0 }, { 5, 6, 0 }, { 0, 5, 0 }, { 0, 0, 0 } },
2910
2911 [LCD_WINCTRL1_FRM_16BPP556 >> 25] =
2912 { { 11, 5, 0 }, { 6, 5, 0 }, { 0, 6, 0 }, { 0, 0, 0 } },
2913
2914 [LCD_WINCTRL1_FRM_16BPPI1555 >> 25] =
2915 { { 10, 5, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 0, 0, 0 } },
2916
2917 [LCD_WINCTRL1_FRM_16BPPI5551 >> 25] =
2918 { { 11, 5, 0 }, { 6, 5, 0 }, { 1, 5, 0 }, { 0, 0, 0 } },
2919
2920 [LCD_WINCTRL1_FRM_16BPPA1555 >> 25] =
2921 { { 10, 5, 0 }, { 5, 5, 0 }, { 0, 5, 0 }, { 15, 1, 0 } },
2922
2923 [LCD_WINCTRL1_FRM_16BPPA5551 >> 25] =
2924 { { 11, 5, 0 }, { 6, 5, 0 }, { 1, 5, 0 }, { 0, 1, 0 } },
2925
2926 [LCD_WINCTRL1_FRM_24BPP >> 25] =
2927 { { 16, 8, 0 }, { 8, 8, 0 }, { 0, 8, 0 }, { 0, 0, 0 } },
2928
2929 [LCD_WINCTRL1_FRM_32BPP >> 25] =
2930 { { 16, 8, 0 }, { 8, 8, 0 }, { 0, 8, 0 }, { 24, 0, 0 } },
2931};
2932
2933/*-------------------------------------------------------------------------*/
2934
2935/* Helpers */
2936
2937static void au1200fb_update_fbinfo(struct fb_info *fbi)
2938{
2939 /* FIX!!!! This also needs to take the window pixel format into account!!! */
2940
2941 /* Update var-dependent FB info */
2942 if (panel_is_color(panel)) {
2943 if (fbi->var.bits_per_pixel <= 8) {
2944 /* palettized */
2945 fbi->fix.visual = FB_VISUAL_PSEUDOCOLOR;
2946 fbi->fix.line_length = fbi->var.xres_virtual /
2947 (8/fbi->var.bits_per_pixel);
2948 } else {
2949 /* non-palettized */
2950 fbi->fix.visual = FB_VISUAL_TRUECOLOR;
2951 fbi->fix.line_length = fbi->var.xres_virtual * (fbi->var.bits_per_pixel / 8);
2952 }
2953 } else {
2954 /* mono FIX!!! mono 8 and 4 bits */
2955 fbi->fix.visual = FB_VISUAL_MONO10;
2956 fbi->fix.line_length = fbi->var.xres_virtual / 8;
2957 }
2958
2959 fbi->screen_size = fbi->fix.line_length * fbi->var.yres_virtual;
2960 print_dbg("line length: %d\n", fbi->fix.line_length);
2961 print_dbg("bits_per_pixel: %d\n", fbi->var.bits_per_pixel);
2962}
2963
2964/*-------------------------------------------------------------------------*/
2965
2966/* AU1200 framebuffer driver */
2967
2968/* fb_check_var
2969 * Validate var settings with hardware restrictions and modify it if necessary
2970 */
2971static int au1200fb_fb_check_var(struct fb_var_screeninfo *var,
2972 struct fb_info *fbi)
2973{
2974 struct au1200fb_device *fbdev = (struct au1200fb_device *)fbi;
2975 u32 pixclock;
2976 int screen_size, plane;
2977
2978 plane = fbdev->plane;
2979
2980 /* Make sure that the mode respect all LCD controller and
2981 * panel restrictions. */
2982 var->xres = win->w[plane].xres;
2983 var->yres = win->w[plane].yres;
2984
2985 /* No need for virtual resolution support */
2986 var->xres_virtual = var->xres;
2987 var->yres_virtual = var->yres;
2988
2989 var->bits_per_pixel = winbpp(win->w[plane].mode_winctrl1);
2990
2991 screen_size = var->xres_virtual * var->yres_virtual;
2992 if (var->bits_per_pixel > 8) screen_size *= (var->bits_per_pixel / 8);
2993 else screen_size /= (8/var->bits_per_pixel);
2994
2995 if (fbdev->fb_len < screen_size)
2996 return -EINVAL; /* Virtual screen is to big, abort */
2997
2998 /* FIX!!!! what are the implicaitons of ignoring this for windows ??? */
2999 /* The max LCD clock is fixed to 48MHz (value of AUX_CLK). The pixel
3000 * clock can only be obtain by dividing this value by an even integer.
3001 * Fallback to a slower pixel clock if necessary. */
3002 pixclock = max((u32)(PICOS2KHZ(var->pixclock) * 1000), fbi->monspecs.dclkmin);
3003 pixclock = min(pixclock, min(fbi->monspecs.dclkmax, (u32)AU1200_LCD_MAX_CLK/2));
3004
3005 if (AU1200_LCD_MAX_CLK % pixclock) {
3006 int diff = AU1200_LCD_MAX_CLK % pixclock;
3007 pixclock -= diff;
3008 }
3009
3010 var->pixclock = KHZ2PICOS(pixclock/1000);
3011#if 0
3012 if (!panel_is_active(panel)) {
3013 int pcd = AU1200_LCD_MAX_CLK / (pixclock * 2) - 1;
3014
3015 if (!panel_is_color(panel)
3016 && (panel->control_base & LCD_CONTROL_MPI) && (pcd < 3)) {
3017 /* STN 8bit mono panel support is up to 6MHz pixclock */
3018 var->pixclock = KHZ2PICOS(6000);
3019 } else if (!pcd) {
3020 /* Other STN panel support is up to 12MHz */
3021 var->pixclock = KHZ2PICOS(12000);
3022 }
3023 }
3024#endif
3025 /* Set bitfield accordingly */
3026 switch (var->bits_per_pixel) {
3027 case 16:
3028 {
3029 /* 16bpp True color.
3030 * These must be set to MATCH WINCTRL[FORM] */
3031 int idx;
3032 idx = (win->w[0].mode_winctrl1 & LCD_WINCTRL1_FRM) >> 25;
3033 var->red = rgb_bitfields[idx][0];
3034 var->green = rgb_bitfields[idx][1];
3035 var->blue = rgb_bitfields[idx][2];
3036 var->transp = rgb_bitfields[idx][3];
3037 break;
3038 }
3039
3040 case 32:
3041 {
3042 /* 32bpp True color.
3043 * These must be set to MATCH WINCTRL[FORM] */
3044 int idx;
3045 idx = (win->w[0].mode_winctrl1 & LCD_WINCTRL1_FRM) >> 25;
3046 var->red = rgb_bitfields[idx][0];
3047 var->green = rgb_bitfields[idx][1];
3048 var->blue = rgb_bitfields[idx][2];
3049 var->transp = rgb_bitfields[idx][3];
3050 break;
3051 }
3052 default:
3053 print_dbg("Unsupported depth %dbpp", var->bits_per_pixel);
3054 return -EINVAL;
3055 }
3056
3057 return 0;
3058}
3059
3060/* fb_set_par
3061 * Set hardware with var settings. This will enable the controller with a
3062 * specific mode, normally validated with the fb_check_var method
3063 */
3064static int au1200fb_fb_set_par(struct fb_info *fbi)
3065{
3066 struct au1200fb_device *fbdev = (struct au1200fb_device *)fbi;
3067
3068 au1200fb_update_fbinfo(fbi);
3069 au1200_setmode(fbdev);
3070
3071 return 0;
3072}
3073
3074/* fb_setcolreg
3075 * Set color in LCD palette.
3076 */
3077static int au1200fb_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
3078 unsigned blue, unsigned transp, struct fb_info *fbi)
3079{
3080 volatile u32 *palette = lcd->palette;
3081 u32 value;
3082
3083 if (regno > (AU1200_LCD_NBR_PALETTE_ENTRIES - 1))
3084 return -EINVAL;
3085
3086 if (fbi->var.grayscale) {
3087 /* Convert color to grayscale */
3088 red = green = blue =
3089 (19595 * red + 38470 * green + 7471 * blue) >> 16;
3090 }
3091
3092 if (fbi->fix.visual == FB_VISUAL_TRUECOLOR) {
3093 /* Place color in the pseudopalette */
3094 if (regno > 16)
3095 return -EINVAL;
3096
3097 palette = (u32*) fbi->pseudo_palette;
3098
3099 red >>= (16 - fbi->var.red.length);
3100 green >>= (16 - fbi->var.green.length);
3101 blue >>= (16 - fbi->var.blue.length);
3102
3103 value = (red << fbi->var.red.offset) |
3104 (green << fbi->var.green.offset)|
3105 (blue << fbi->var.blue.offset);
3106 value &= 0xFFFF;
3107
3108 } else if (1 /*FIX!!! panel_is_active(fbdev->panel)*/) {
3109 /* COLOR TFT PALLETTIZED (use RGB 565) */
3110 value = (red & 0xF800)|((green >> 5) &
3111 0x07E0)|((blue >> 11) & 0x001F);
3112 value &= 0xFFFF;
3113
3114 } else if (0 /*panel_is_color(fbdev->panel)*/) {
3115 /* COLOR STN MODE */
3116 value = 0x1234;
3117 value &= 0xFFF;
3118 } else {
3119 /* MONOCHROME MODE */
3120 value = (green >> 12) & 0x000F;
3121 value &= 0xF;
3122 }
3123
3124 palette[regno] = value;
3125
3126 return 0;
3127}
3128
3129/* fb_blank
3130 * Blank the screen. Depending on the mode, the screen will be
3131 * activated with the backlight color, or desactivated
3132 */
3133static int au1200fb_fb_blank(int blank_mode, struct fb_info *fbi)
3134{
3135 /* Short-circuit screen blanking */
3136 if (noblanking)
3137 return 0;
3138
3139 switch (blank_mode) {
3140
3141 case FB_BLANK_UNBLANK:
3142 case FB_BLANK_NORMAL:
3143 /* printk("turn on panel\n"); */
3144 au1200_setpanel(panel);
3145 break;
3146 case FB_BLANK_VSYNC_SUSPEND:
3147 case FB_BLANK_HSYNC_SUSPEND:
3148 case FB_BLANK_POWERDOWN:
3149 /* printk("turn off panel\n"); */
3150 au1200_setpanel(NULL);
3151 break;
3152 default:
3153 break;
3154
3155 }
3156
3157 /* FB_BLANK_NORMAL is a soft blank */
3158 return (blank_mode == FB_BLANK_NORMAL) ? -EINVAL : 0;
3159}
3160
3161/* fb_mmap
3162 * Map video memory in user space. We don't use the generic fb_mmap
3163 * method mainly to allow the use of the TLB streaming flag (CCA=6)
3164 */
3165static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
3166
3167{
3168 unsigned int len;
3169 unsigned long start=0, off;
3170 struct au1200fb_device *fbdev = (struct au1200fb_device *) info;
3171
3172#ifdef CONFIG_PM
3173 au1xxx_pm_access(LCD_pm_dev);
3174#endif
3175
3176 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
3177 return -EINVAL;
3178 }
3179
3180 start = fbdev->fb_phys & PAGE_MASK;
3181 len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
3182
3183 off = vma->vm_pgoff << PAGE_SHIFT;
3184
3185 if ((vma->vm_end - vma->vm_start + off) > len) {
3186 return -EINVAL;
3187 }
3188
3189 off += start;
3190 vma->vm_pgoff = off >> PAGE_SHIFT;
3191
3192 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3193 pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */
3194
3195 vma->vm_flags |= VM_IO;
3196
3197 return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
3198 vma->vm_end - vma->vm_start,
3199 vma->vm_page_prot);
3200
3201 return 0;
3202}
3203
3204static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
3205{
3206
3207 unsigned int hi1, divider;
3208
3209 /* SCREEN_SIZE: user cannot reset size, must switch panel choice */
3210
3211 if (pdata->flags & SCREEN_BACKCOLOR)
3212 lcd->backcolor = pdata->backcolor;
3213
3214 if (pdata->flags & SCREEN_BRIGHTNESS) {
3215
3216 // limit brightness pwm duty to >= 30/1600
3217 if (pdata->brightness < 30) {
3218 pdata->brightness = 30;
3219 }
3220 divider = (lcd->pwmdiv & 0x3FFFF) + 1;
3221 hi1 = (lcd->pwmhi >> 16) + 1;
3222 hi1 = (((pdata->brightness & 0xFF)+1) * divider >> 8);
3223 lcd->pwmhi &= 0xFFFF;
3224 lcd->pwmhi |= (hi1 << 16);
3225 }
3226
3227 if (pdata->flags & SCREEN_COLORKEY)
3228 lcd->colorkey = pdata->colorkey;
3229
3230 if (pdata->flags & SCREEN_MASK)
3231 lcd->colorkeymsk = pdata->mask;
3232 au_sync();
3233}
3234
3235static void get_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
3236{
3237 unsigned int hi1, divider;
3238
3239 pdata->xsize = ((lcd->screen & LCD_SCREEN_SX) >> 19) + 1;
3240 pdata->ysize = ((lcd->screen & LCD_SCREEN_SY) >> 8) + 1;
3241
3242 pdata->backcolor = lcd->backcolor;
3243 pdata->colorkey = lcd->colorkey;
3244 pdata->mask = lcd->colorkeymsk;
3245
3246 // brightness
3247 hi1 = (lcd->pwmhi >> 16) + 1;
3248 divider = (lcd->pwmdiv & 0x3FFFF) + 1;
3249 pdata->brightness = ((hi1 << 8) / divider) - 1;
3250 au_sync();
3251}
3252
3253static void set_window(unsigned int plane,
3254 struct au1200_lcd_window_regs_t *pdata)
3255{
3256 unsigned int val, bpp;
3257
3258 /* Window control register 0 */
3259 if (pdata->flags & WIN_POSITION) {
3260 val = lcd->window[plane].winctrl0 & ~(LCD_WINCTRL0_OX |
3261 LCD_WINCTRL0_OY);
3262 val |= ((pdata->xpos << 21) & LCD_WINCTRL0_OX);
3263 val |= ((pdata->ypos << 10) & LCD_WINCTRL0_OY);
3264 lcd->window[plane].winctrl0 = val;
3265 }
3266 if (pdata->flags & WIN_ALPHA_COLOR) {
3267 val = lcd->window[plane].winctrl0 & ~(LCD_WINCTRL0_A);
3268 val |= ((pdata->alpha_color << 2) & LCD_WINCTRL0_A);
3269 lcd->window[plane].winctrl0 = val;
3270 }
3271 if (pdata->flags & WIN_ALPHA_MODE) {
3272 val = lcd->window[plane].winctrl0 & ~(LCD_WINCTRL0_AEN);
3273 val |= ((pdata->alpha_mode << 1) & LCD_WINCTRL0_AEN);
3274 lcd->window[plane].winctrl0 = val;
3275 }
3276
3277 /* Window control register 1 */
3278 if (pdata->flags & WIN_PRIORITY) {
3279 val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_PRI);
3280 val |= ((pdata->priority << 30) & LCD_WINCTRL1_PRI);
3281 lcd->window[plane].winctrl1 = val;
3282 }
3283 if (pdata->flags & WIN_CHANNEL) {
3284 val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_PIPE);
3285 val |= ((pdata->channel << 29) & LCD_WINCTRL1_PIPE);
3286 lcd->window[plane].winctrl1 = val;
3287 }
3288 if (pdata->flags & WIN_BUFFER_FORMAT) {
3289 val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_FRM);
3290 val |= ((pdata->buffer_format << 25) & LCD_WINCTRL1_FRM);
3291 lcd->window[plane].winctrl1 = val;
3292 }
3293 if (pdata->flags & WIN_COLOR_ORDER) {
3294 val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_CCO);
3295 val |= ((pdata->color_order << 24) & LCD_WINCTRL1_CCO);
3296 lcd->window[plane].winctrl1 = val;
3297 }
3298 if (pdata->flags & WIN_PIXEL_ORDER) {
3299 val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_PO);
3300 val |= ((pdata->pixel_order << 22) & LCD_WINCTRL1_PO);
3301 lcd->window[plane].winctrl1 = val;
3302 }
3303 if (pdata->flags & WIN_SIZE) {
3304 val = lcd->window[plane].winctrl1 & ~(LCD_WINCTRL1_SZX |
3305 LCD_WINCTRL1_SZY);
3306 val |= (((pdata->xsize << 11) - 1) & LCD_WINCTRL1_SZX);
3307 val |= (((pdata->ysize) - 1) & LCD_WINCTRL1_SZY);
3308 lcd->window[plane].winctrl1 = val;
3309 /* program buffer line width */
3310 bpp = winbpp(val) / 8;
3311 val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_BX);
3312 val |= (((pdata->xsize * bpp) << 8) & LCD_WINCTRL2_BX);
3313 lcd->window[plane].winctrl2 = val;
3314 }
3315
3316 /* Window control register 2 */
3317 if (pdata->flags & WIN_COLORKEY_MODE) {
3318 val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_CKMODE);
3319 val |= ((pdata->colorkey_mode << 24) & LCD_WINCTRL2_CKMODE);
3320 lcd->window[plane].winctrl2 = val;
3321 }
3322 if (pdata->flags & WIN_DOUBLE_BUFFER_MODE) {
3323 val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_DBM);
3324 val |= ((pdata->double_buffer_mode << 23) & LCD_WINCTRL2_DBM);
3325 lcd->window[plane].winctrl2 = val;
3326 }
3327 if (pdata->flags & WIN_RAM_ARRAY_MODE) {
3328 val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_RAM);
3329 val |= ((pdata->ram_array_mode << 21) & LCD_WINCTRL2_RAM);
3330 lcd->window[plane].winctrl2 = val;
3331 }
3332
3333 /* Buffer line width programmed with WIN_SIZE */
3334
3335 if (pdata->flags & WIN_BUFFER_SCALE) {
3336 val = lcd->window[plane].winctrl2 & ~(LCD_WINCTRL2_SCX |
3337 LCD_WINCTRL2_SCY);
3338 val |= ((pdata->xsize << 11) & LCD_WINCTRL2_SCX);
3339 val |= ((pdata->ysize) & LCD_WINCTRL2_SCY);
3340 lcd->window[plane].winctrl2 = val;
3341 }
3342
3343 if (pdata->flags & WIN_ENABLE) {
3344 val = lcd->winenable;
3345 val &= ~(1<<plane);
3346 val |= (pdata->enable & 1) << plane;
3347 lcd->winenable = val;
3348 }
3349 au_sync();
3350}
3351
3352static void get_window(unsigned int plane,
3353 struct au1200_lcd_window_regs_t *pdata)
3354{
3355 /* Window control register 0 */
3356 pdata->xpos = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_OX) >> 21;
3357 pdata->ypos = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_OY) >> 10;
3358 pdata->alpha_color = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_A) >> 2;
3359 pdata->alpha_mode = (lcd->window[plane].winctrl0 & LCD_WINCTRL0_AEN) >> 1;
3360
3361 /* Window control register 1 */
3362 pdata->priority = (lcd->window[plane].winctrl1& LCD_WINCTRL1_PRI) >> 30;
3363 pdata->channel = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_PIPE) >> 29;
3364 pdata->buffer_format = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_FRM) >> 25;
3365 pdata->color_order = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_CCO) >> 24;
3366 pdata->pixel_order = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_PO) >> 22;
3367 pdata->xsize = ((lcd->window[plane].winctrl1 & LCD_WINCTRL1_SZX) >> 11) + 1;
3368 pdata->ysize = (lcd->window[plane].winctrl1 & LCD_WINCTRL1_SZY) + 1;
3369
3370 /* Window control register 2 */
3371 pdata->colorkey_mode = (lcd->window[plane].winctrl2 & LCD_WINCTRL2_CKMODE) >> 24;
3372 pdata->double_buffer_mode = (lcd->window[plane].winctrl2 & LCD_WINCTRL2_DBM) >> 23;
3373 pdata->ram_array_mode = (lcd->window[plane].winctrl2 & LCD_WINCTRL2_RAM) >> 21;
3374
3375 pdata->enable = (lcd->winenable >> plane) & 1;
3376 au_sync();
3377}
3378
3379static int au1200fb_ioctl(struct fb_info *info, unsigned int cmd,
3380 unsigned long arg)
3381{
3382 int plane;
3383 int val;
3384
3385#ifdef CONFIG_PM
3386 au1xxx_pm_access(LCD_pm_dev);
3387#endif
3388
3389 plane = fbinfo2index(info);
3390 print_dbg("au1200fb: ioctl %d on plane %d\n", cmd, plane);
3391
3392 if (cmd == AU1200_LCD_FB_IOCTL) {
3393 struct au1200_lcd_iodata_t iodata;
3394
3395 if (copy_from_user(&iodata, (void __user *) arg, sizeof(iodata)))
3396 return -EFAULT;
3397
3398 print_dbg("FB IOCTL called\n");
3399
3400 switch (iodata.subcmd) {
3401 case AU1200_LCD_SET_SCREEN:
3402 print_dbg("AU1200_LCD_SET_SCREEN\n");
3403 set_global(cmd, &iodata.global);
3404 break;
3405
3406 case AU1200_LCD_GET_SCREEN:
3407 print_dbg("AU1200_LCD_GET_SCREEN\n");
3408 get_global(cmd, &iodata.global);
3409 break;
3410
3411 case AU1200_LCD_SET_WINDOW:
3412 print_dbg("AU1200_LCD_SET_WINDOW\n");
3413 set_window(plane, &iodata.window);
3414 break;
3415
3416 case AU1200_LCD_GET_WINDOW:
3417 print_dbg("AU1200_LCD_GET_WINDOW\n");
3418 get_window(plane, &iodata.window);
3419 break;
3420
3421 case AU1200_LCD_SET_PANEL:
3422 print_dbg("AU1200_LCD_SET_PANEL\n");
3423 if ((iodata.global.panel_choice >= 0) &&
3424 (iodata.global.panel_choice <
3425 NUM_PANELS))
3426 {
3427 struct panel_settings *newpanel;
3428 panel_index = iodata.global.panel_choice;
3429 newpanel = &known_lcd_panels[panel_index];
3430 au1200_setpanel(newpanel);
3431 }
3432 break;
3433
3434 case AU1200_LCD_GET_PANEL:
3435 print_dbg("AU1200_LCD_GET_PANEL\n");
3436 iodata.global.panel_choice = panel_index;
3437 break;
3438
3439 default:
3440 return -EINVAL;
3441 }
3442
3443 val = copy_to_user((void __user *) arg, &iodata, sizeof(iodata));
3444 if (val) {
3445 print_dbg("error: could not copy %d bytes\n", val);
3446 return -EFAULT;
3447 }
3448 }
3449
3450 return 0;
3451}
3452
3453
3454static struct fb_ops au1200fb_fb_ops = {
3455 .owner = THIS_MODULE,
3456 .fb_check_var = au1200fb_fb_check_var,
3457 .fb_set_par = au1200fb_fb_set_par,
3458 .fb_setcolreg = au1200fb_fb_setcolreg,
3459 .fb_blank = au1200fb_fb_blank,
3460 .fb_fillrect = cfb_fillrect,
3461 .fb_copyarea = cfb_copyarea,
3462 .fb_imageblit = cfb_imageblit,
3463 .fb_sync = NULL,
3464 .fb_ioctl = au1200fb_ioctl,
3465 .fb_mmap = au1200fb_fb_mmap,
3466};
3467
3468/*-------------------------------------------------------------------------*/
3469
3470static irqreturn_t au1200fb_handle_irq(int irq, void* dev_id, struct pt_regs *regs)
3471{
3472 /* Nothing to do for now, just clear any pending interrupt */
3473 lcd->intstatus = lcd->intstatus;
3474 au_sync();
3475
3476 return IRQ_HANDLED;
3477}
3478
3479/*-------------------------------------------------------------------------*/
3480
3481/* AU1200 LCD device probe helpers */
3482
3483static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev)
3484{
3485 struct fb_info *fbi = &fbdev->fb_info;
3486 int bpp;
3487
3488 memset(fbi, 0, sizeof(struct fb_info));
3489 fbi->fbops = &au1200fb_fb_ops;
3490
3491 bpp = winbpp(win->w[fbdev->plane].mode_winctrl1);
3492
3493 /* Copy monitor specs from panel data */
3494 /* fixme: we're setting up LCD controller windows, so these dont give a
3495 damn as to what the monitor specs are (the panel itself does, but that
3496 isnt done here...so maybe need a generic catchall monitor setting??? */
3497 memcpy(&fbi->monspecs, &panel->monspecs, sizeof(struct fb_monspecs));
3498
3499 /* We first try the user mode passed in argument. If that failed,
3500 * or if no one has been specified, we default to the first mode of the
3501 * panel list. Note that after this call, var data will be set */
3502 if (!fb_find_mode(&fbi->var,
3503 fbi,
3504 NULL, /* drv_info.opt_mode, */
3505 fbi->monspecs.modedb,
3506 fbi->monspecs.modedb_len,
3507 fbi->monspecs.modedb,
3508 bpp)) {
3509
3510 print_err("Cannot find valid mode for panel %s", panel->name);
3511 return -EFAULT;
3512 }
3513
3514 fbi->pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL);
3515 if (!fbi->pseudo_palette) {
3516 return -ENOMEM;
3517 }
3518 memset(fbi->pseudo_palette, 0, sizeof(u32) * 16);
3519
3520 if (fb_alloc_cmap(&fbi->cmap, AU1200_LCD_NBR_PALETTE_ENTRIES, 0) < 0) {
3521 print_err("Fail to allocate colormap (%d entries)",
3522 AU1200_LCD_NBR_PALETTE_ENTRIES);
3523 kfree(fbi->pseudo_palette);
3524 return -EFAULT;
3525 }
3526
3527 strncpy(fbi->fix.id, "AU1200", sizeof(fbi->fix.id));
3528 fbi->fix.smem_start = fbdev->fb_phys;
3529 fbi->fix.smem_len = fbdev->fb_len;
3530 fbi->fix.type = FB_TYPE_PACKED_PIXELS;
3531 fbi->fix.xpanstep = 0;
3532 fbi->fix.ypanstep = 0;
3533 fbi->fix.mmio_start = 0;
3534 fbi->fix.mmio_len = 0;
3535 fbi->fix.accel = FB_ACCEL_NONE;
3536
3537 fbi->screen_base = (char __iomem *) fbdev->fb_mem;
3538
3539 au1200fb_update_fbinfo(fbi);
3540
3541 return 0;
3542}
3543
3544/*-------------------------------------------------------------------------*/
3545
3546/* AU1200 LCD controller device driver */
3547
3548static int au1200fb_drv_probe(struct device *dev)
3549{
3550 struct au1200fb_device *fbdev;
3551 unsigned long page;
3552 int bpp, plane, ret;
3553
3554 if (!dev)
3555 return -EINVAL;
3556
3557 for (plane = 0; plane < CONFIG_FB_AU1200_DEVS; ++plane) {
3558 bpp = winbpp(win->w[plane].mode_winctrl1);
3559 if (win->w[plane].xres == 0)
3560 win->w[plane].xres = panel->Xres;
3561 if (win->w[plane].yres == 0)
3562 win->w[plane].yres = panel->Yres;
3563
3564 fbdev = &_au1200fb_devices[plane];
3565 memset(fbdev, 0, sizeof(struct au1200fb_device));
3566 fbdev->plane = plane;
3567
3568 /* Allocate the framebuffer to the maximum screen size */
3569 fbdev->fb_len = (win->w[plane].xres * win->w[plane].yres * bpp) / 8;
3570
3571 fbdev->fb_mem = dma_alloc_noncoherent(dev,
3572 PAGE_ALIGN(fbdev->fb_len),
3573 &fbdev->fb_phys, GFP_KERNEL);
3574 if (!fbdev->fb_mem) {
3575 print_err("fail to allocate frambuffer (size: %dK))",
3576 fbdev->fb_len / 1024);
3577 return -ENOMEM;
3578 }
3579
3580 /*
3581 * Set page reserved so that mmap will work. This is necessary
3582 * since we'll be remapping normal memory.
3583 */
3584 for (page = (unsigned long)fbdev->fb_phys;
3585 page < PAGE_ALIGN((unsigned long)fbdev->fb_phys +
3586 fbdev->fb_len);
3587 page += PAGE_SIZE) {
3588 SetPageReserved(pfn_to_page(page >> PAGE_SHIFT)); /* LCD DMA is NOT coherent on Au1200 */
3589 }
3590 print_dbg("Framebuffer memory map at %p", fbdev->fb_mem);
3591 print_dbg("phys=0x%08x, size=%dK", fbdev->fb_phys, fbdev->fb_len / 1024);
3592
3593 /* Init FB data */
3594 if ((ret = au1200fb_init_fbinfo(fbdev)) < 0)
3595 goto failed;
3596
3597 /* Register new framebuffer */
3598 if ((ret = register_framebuffer(&fbdev->fb_info)) < 0) {
3599 print_err("cannot register new framebuffer");
3600 goto failed;
3601 }
3602
3603 au1200fb_fb_set_par(&fbdev->fb_info);
3604
3605#if !defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_LOGO)
3606 if (plane == 0)
3607 if (fb_prepare_logo(&fbdev->fb_info, FB_ROTATE_UR)) {
3608 /* Start display and show logo on boot */
3609 fb_set_cmap(&fbdev->fb_info.cmap,
3610 &fbdev->fb_info);
3611
3612 fb_show_logo(&fbdev->fb_info, FB_ROTATE_UR);
3613 }
3614#endif
3615 }
3616
3617 /* Now hook interrupt too */
3618 if ((ret = request_irq(AU1200_LCD_INT, au1200fb_handle_irq,
3619 SA_INTERRUPT | SA_SHIRQ, "lcd", (void *)dev)) < 0) {
3620 print_err("fail to request interrupt line %d (err: %d)",
3621 AU1200_LCD_INT, ret);
3622 goto failed;
3623 }
3624
3625 return 0;
3626
3627failed:
3628 /* NOTE: This only does the current plane/window that failed; others are still active */
3629 if (fbdev->fb_mem)
3630 dma_free_noncoherent(dev, PAGE_ALIGN(fbdev->fb_len),
3631 fbdev->fb_mem, fbdev->fb_phys);
3632 if (fbdev->fb_info.cmap.len != 0)
3633 fb_dealloc_cmap(&fbdev->fb_info.cmap);
3634 if (fbdev->fb_info.pseudo_palette)
3635 kfree(fbdev->fb_info.pseudo_palette);
3636 if (plane == 0)
3637 free_irq(AU1200_LCD_INT, (void*)dev);
3638 return ret;
3639}
3640
3641static int au1200fb_drv_remove(struct device *dev)
3642{
3643 struct au1200fb_device *fbdev;
3644 int plane;
3645
3646 if (!dev)
3647 return -ENODEV;
3648
3649 /* Turn off the panel */
3650 au1200_setpanel(NULL);
3651
3652 for (plane = 0; plane < CONFIG_FB_AU1200_DEVS; ++plane)
3653 {
3654 fbdev = &_au1200fb_devices[plane];
3655
3656 /* Clean up all probe data */
3657 unregister_framebuffer(&fbdev->fb_info);
3658 if (fbdev->fb_mem)
3659 dma_free_noncoherent(dev, PAGE_ALIGN(fbdev->fb_len),
3660 fbdev->fb_mem, fbdev->fb_phys);
3661 if (fbdev->fb_info.cmap.len != 0)
3662 fb_dealloc_cmap(&fbdev->fb_info.cmap);
3663 if (fbdev->fb_info.pseudo_palette)
3664 kfree(fbdev->fb_info.pseudo_palette);
3665 }
3666
3667 free_irq(AU1200_LCD_INT, (void *)dev);
3668
3669 return 0;
3670}
3671
3672#ifdef CONFIG_PM
3673static int au1200fb_drv_suspend(struct device *dev, u32 state, u32 level)
3674{
3675 /* TODO */
3676 return 0;
3677}
3678
3679static int au1200fb_drv_resume(struct device *dev, u32 level)
3680{
3681 /* TODO */
3682 return 0;
3683}
3684#endif /* CONFIG_PM */
3685
3686static struct device_driver au1200fb_driver = {
3687 .name = "au1200-lcd",
3688 .bus = &platform_bus_type,
3689 .probe = au1200fb_drv_probe,
3690 .remove = au1200fb_drv_remove,
3691#ifdef CONFIG_PM
3692 .suspend = au1200fb_drv_suspend,
3693 .resume = au1200fb_drv_resume,
3694#endif
3695};
3696
3697/*-------------------------------------------------------------------------*/
3698
3699/* Kernel driver */
3700
3701static void au1200fb_setup(void)
3702{
3703 char* options = NULL;
3704 char* this_opt;
3705 int num_panels = ARRAY_SIZE(known_lcd_panels);
3706 int panel_idx = -1;
3707
3708 fb_get_options(DRIVER_NAME, &options);
3709
3710 if (options) {
3711 while ((this_opt = strsep(&options,",")) != NULL) {
3712 /* Panel option - can be panel name,
3713 * "bs" for board-switch, or number/index */
3714 if (!strncmp(this_opt, "panel:", 6)) {
3715 int i;
3716 long int li;
3717 char *endptr;
3718 this_opt += 6;
3719 /* First check for index, which allows
3720 * to short circuit this mess */
3721 li = simple_strtol(this_opt, &endptr, 0);
3722 if (*endptr == '\0') {
3723 panel_idx = (int)li;
3724 }
3725 else if (strcmp(this_opt, "bs") == 0) {
3726 extern int board_au1200fb_panel(void);
3727 panel_idx = board_au1200fb_panel();
3728 }
3729
3730 else
3731 for (i = 0; i < num_panels; i++) {
3732 if (!strcmp(this_opt, known_lcd_panels[i].name)) {
3733 panel_idx = i;
3734 break;
3735 }
3736 }
3737
3738 if ((panel_idx < 0) || (panel_idx >= num_panels)) {
3739 print_warn("Panel %s not supported!", this_opt);
3740 }
3741 else
3742 panel_index = panel_idx;
3743 }
3744
3745 else if (strncmp(this_opt, "nohwcursor", 10) == 0) {
3746 nohwcursor = 1;
3747 }
3748
3749 /* Unsupported option */
3750 else {
3751 print_warn("Unsupported option \"%s\"", this_opt);
3752 }
3753 }
3754 }
3755}
3756
3757#ifdef CONFIG_PM
3758static int au1200fb_pm_callback(au1xxx_power_dev_t *dev,
3759 au1xxx_request_t request, void *data) {
3760 int retval = -1;
3761 unsigned int d = 0;
3762 unsigned int brightness = 0;
3763
3764 if (request == AU1XXX_PM_SLEEP) {
3765 board_au1200fb_panel_shutdown();
3766 }
3767 else if (request == AU1XXX_PM_WAKEUP) {
3768 if(dev->prev_state == SLEEP_STATE)
3769 {
3770 int plane;
3771 au1200_setpanel(panel);
3772 for (plane = 0; plane < CONFIG_FB_AU1200_DEVS; ++plane) {
3773 struct au1200fb_device *fbdev;
3774 fbdev = &_au1200fb_devices[plane];
3775 au1200fb_fb_set_par(&fbdev->fb_info);
3776 }
3777 }
3778
3779 d = *((unsigned int*)data);
3780 if(d <=10) brightness = 26;
3781 else if(d<=20) brightness = 51;
3782 else if(d<=30) brightness = 77;
3783 else if(d<=40) brightness = 102;
3784 else if(d<=50) brightness = 128;
3785 else if(d<=60) brightness = 153;
3786 else if(d<=70) brightness = 179;
3787 else if(d<=80) brightness = 204;
3788 else if(d<=90) brightness = 230;
3789 else brightness = 255;
3790 set_brightness(brightness);
3791 } else if (request == AU1XXX_PM_GETSTATUS) {
3792 return dev->cur_state;
3793 } else if (request == AU1XXX_PM_ACCESS) {
3794 if (dev->cur_state != SLEEP_STATE)
3795 return retval;
3796 else {
3797 au1200_setpanel(panel);
3798 }
3799 } else if (request == AU1XXX_PM_IDLE) {
3800 } else if (request == AU1XXX_PM_CLEANUP) {
3801 }
3802
3803 return retval;
3804}
3805#endif
3806
3807static int __init au1200fb_init(void)
3808{
3809 print_info("" DRIVER_DESC "");
3810
3811 /* Setup driver with options */
3812 au1200fb_setup();
3813
3814 /* Point to the panel selected */
3815 panel = &known_lcd_panels[panel_index];
3816 win = &windows[window_index];
3817
3818 printk(DRIVER_NAME ": Panel %d %s\n", panel_index, panel->name);
3819 printk(DRIVER_NAME ": Win %d %s\n", window_index, win->name);
3820
3821 /* Kickstart the panel, the framebuffers/windows come soon enough */
3822 au1200_setpanel(panel);
3823
3824 #ifdef CONFIG_PM
3825 LCD_pm_dev = new_au1xxx_power_device("LCD", &au1200fb_pm_callback, NULL);
3826 if ( LCD_pm_dev == NULL)
3827 printk(KERN_INFO "Unable to create a power management device entry for the au1200fb.\n");
3828 else
3829 printk(KERN_INFO "Power management device entry for the au1200fb loaded.\n");
3830 #endif
3831
3832 return driver_register(&au1200fb_driver);
3833}
3834
3835static void __exit au1200fb_cleanup(void)
3836{
3837 driver_unregister(&au1200fb_driver);
3838}
3839
3840module_init(au1200fb_init);
3841module_exit(au1200fb_cleanup);
3842
3843MODULE_DESCRIPTION(DRIVER_DESC);
3844MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index b72b05250a9d..34e07399756b 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -305,94 +305,6 @@ static ssize_t show_stride(struct class_device *class_device, char *buf)
305 return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->fix.line_length); 305 return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->fix.line_length);
306} 306}
307 307
308/* Format for cmap is "%02x%c%4x%4x%4x\n" */
309/* %02x entry %c transp %4x red %4x blue %4x green \n */
310/* 256 rows at 16 chars equals 4096, the normal page size */
311/* the code will automatically adjust for different page sizes */
312static ssize_t store_cmap(struct class_device *class_device, const char *buf,
313 size_t count)
314{
315 struct fb_info *fb_info = class_get_devdata(class_device);
316 int rc, i, start, length, transp = 0;
317
318 if ((count > PAGE_SIZE) || ((count % 16) != 0))
319 return -EINVAL;
320
321 if (!fb_info->fbops->fb_setcolreg && !fb_info->fbops->fb_setcmap)
322 return -EINVAL;
323
324 sscanf(buf, "%02x", &start);
325 length = count / 16;
326
327 for (i = 0; i < length; i++)
328 if (buf[i * 16 + 2] != ' ')
329 transp = 1;
330
331 /* If we can batch, do it */
332 if (fb_info->fbops->fb_setcmap && length > 1) {
333 struct fb_cmap umap;
334
335 memset(&umap, 0, sizeof(umap));
336 if ((rc = fb_alloc_cmap(&umap, length, transp)))
337 return rc;
338
339 umap.start = start;
340 for (i = 0; i < length; i++) {
341 sscanf(&buf[i * 16 + 3], "%4hx", &umap.red[i]);
342 sscanf(&buf[i * 16 + 7], "%4hx", &umap.blue[i]);
343 sscanf(&buf[i * 16 + 11], "%4hx", &umap.green[i]);
344 if (transp)
345 umap.transp[i] = (buf[i * 16 + 2] != ' ');
346 }
347 rc = fb_info->fbops->fb_setcmap(&umap, fb_info);
348 fb_copy_cmap(&umap, &fb_info->cmap);
349 fb_dealloc_cmap(&umap);
350
351 return rc ?: count;
352 }
353 for (i = 0; i < length; i++) {
354 u16 red, blue, green, tsp;
355
356 sscanf(&buf[i * 16 + 3], "%4hx", &red);
357 sscanf(&buf[i * 16 + 7], "%4hx", &blue);
358 sscanf(&buf[i * 16 + 11], "%4hx", &green);
359 tsp = (buf[i * 16 + 2] != ' ');
360 if ((rc = fb_info->fbops->fb_setcolreg(start++,
361 red, green, blue, tsp, fb_info)))
362 return rc;
363
364 fb_info->cmap.red[i] = red;
365 fb_info->cmap.blue[i] = blue;
366 fb_info->cmap.green[i] = green;
367 if (transp)
368 fb_info->cmap.transp[i] = tsp;
369 }
370 return count;
371}
372
373static ssize_t show_cmap(struct class_device *class_device, char *buf)
374{
375 struct fb_info *fb_info = class_get_devdata(class_device);
376 unsigned int i;
377
378 if (!fb_info->cmap.red || !fb_info->cmap.blue ||
379 !fb_info->cmap.green)
380 return -EINVAL;
381
382 if (fb_info->cmap.len > PAGE_SIZE / 16)
383 return -EINVAL;
384
385 /* don't mess with the format, the buffer is PAGE_SIZE */
386 /* 256 entries at 16 chars per line equals 4096 = PAGE_SIZE */
387 for (i = 0; i < fb_info->cmap.len; i++) {
388 snprintf(&buf[ i * 16], PAGE_SIZE - i * 16, "%02x%c%4x%4x%4x\n", i + fb_info->cmap.start,
389 ((fb_info->cmap.transp && fb_info->cmap.transp[i]) ? '*' : ' '),
390 fb_info->cmap.red[i], fb_info->cmap.blue[i],
391 fb_info->cmap.green[i]);
392 }
393 return 16 * fb_info->cmap.len;
394}
395
396static ssize_t store_blank(struct class_device *class_device, const char * buf, 308static ssize_t store_blank(struct class_device *class_device, const char * buf,
397 size_t count) 309 size_t count)
398{ 310{
@@ -502,10 +414,12 @@ static ssize_t show_fbstate(struct class_device *class_device, char *buf)
502 return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->state); 414 return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->state);
503} 415}
504 416
417/* When cmap is added back in it should be a binary attribute
418 * not a text one. Consideration should also be given to converting
419 * fbdev to use configfs instead of sysfs */
505static struct class_device_attribute class_device_attrs[] = { 420static struct class_device_attribute class_device_attrs[] = {
506 __ATTR(bits_per_pixel, S_IRUGO|S_IWUSR, show_bpp, store_bpp), 421 __ATTR(bits_per_pixel, S_IRUGO|S_IWUSR, show_bpp, store_bpp),
507 __ATTR(blank, S_IRUGO|S_IWUSR, show_blank, store_blank), 422 __ATTR(blank, S_IRUGO|S_IWUSR, show_blank, store_blank),
508 __ATTR(color_map, S_IRUGO|S_IWUSR, show_cmap, store_cmap),
509 __ATTR(console, S_IRUGO|S_IWUSR, show_console, store_console), 423 __ATTR(console, S_IRUGO|S_IWUSR, show_console, store_console),
510 __ATTR(cursor, S_IRUGO|S_IWUSR, show_cursor, store_cursor), 424 __ATTR(cursor, S_IRUGO|S_IWUSR, show_cursor, store_cursor),
511 __ATTR(mode, S_IRUGO|S_IWUSR, show_mode, store_mode), 425 __ATTR(mode, S_IRUGO|S_IWUSR, show_mode, store_mode),
diff --git a/drivers/video/logo/Makefile b/drivers/video/logo/Makefile
index 4ef5cd19609d..b985dfad6c63 100644
--- a/drivers/video/logo/Makefile
+++ b/drivers/video/logo/Makefile
@@ -34,7 +34,7 @@ extra-y += $(call logo-cfiles,_clut224,ppm)
34extra-y += $(call logo-cfiles,_gray256,pgm) 34extra-y += $(call logo-cfiles,_gray256,pgm)
35 35
36# Create commands like "pnmtologo -t mono -n logo_mac_mono -o ..." 36# Create commands like "pnmtologo -t mono -n logo_mac_mono -o ..."
37quiet_cmd_logo = LOGO $@ 37quiet_cmd_logo = LOGO $@
38 cmd_logo = scripts/pnmtologo \ 38 cmd_logo = scripts/pnmtologo \
39 -t $(patsubst $*_%,%,$(notdir $(basename $<))) \ 39 -t $(patsubst $*_%,%,$(notdir $(basename $<))) \
40 -n $(notdir $(basename $<)) -o $@ $< 40 -n $(notdir $(basename $<)) -o $@ $<