aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/agp/hp-agp.c5
-rw-r--r--drivers/char/dtlk.c6
-rw-r--r--drivers/char/isicom.c2
-rw-r--r--drivers/char/istallion.c2
-rw-r--r--drivers/char/mem.c115
-rw-r--r--drivers/char/moxa.c7
-rw-r--r--drivers/char/ppdev.c29
-rw-r--r--drivers/char/sysrq.c15
-rw-r--r--drivers/clocksource/sh_cmt.c4
-rw-r--r--drivers/clocksource/sh_mtu2.c1
-rw-r--r--drivers/clocksource/sh_tmu.c4
-rw-r--r--drivers/edac/Kconfig11
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/amd8111_edac.c3
-rw-r--r--drivers/edac/cell_edac.c2
-rw-r--r--drivers/edac/cpc925_edac.c1017
-rw-r--r--drivers/edac/edac_core.h1
-rw-r--r--drivers/edac/edac_device.c14
-rw-r--r--drivers/firmware/pcdp.c4
-rw-r--r--drivers/gpio/Kconfig6
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/max7301.c2
-rw-r--r--drivers/gpio/pca953x.c80
-rw-r--r--drivers/gpio/pl061.c341
-rw-r--r--drivers/isdn/i4l/isdn_net.c2
-rw-r--r--drivers/md/faulty.c21
-rw-r--r--drivers/md/linear.c218
-rw-r--r--drivers/md/linear.h12
-rw-r--r--drivers/md/md.c196
-rw-r--r--drivers/md/md.h14
-rw-r--r--drivers/md/multipath.c23
-rw-r--r--drivers/md/multipath.h6
-rw-r--r--drivers/md/raid0.c403
-rw-r--r--drivers/md/raid0.h10
-rw-r--r--drivers/md/raid1.c46
-rw-r--r--drivers/md/raid1.h6
-rw-r--r--drivers/md/raid10.c62
-rw-r--r--drivers/md/raid10.h6
-rw-r--r--drivers/md/raid5.c218
-rw-r--r--drivers/md/raid5.h8
-rw-r--r--drivers/media/video/ov772x.c6
-rw-r--r--drivers/media/video/tw9910.c6
-rw-r--r--drivers/message/fusion/mptbase.c8
-rw-r--r--drivers/mfd/Kconfig24
-rw-r--r--drivers/mfd/Makefile5
-rw-r--r--drivers/mfd/ab3100-core.c991
-rw-r--r--drivers/mfd/asic3.c312
-rw-r--r--drivers/mfd/da903x.c2
-rw-r--r--drivers/mfd/ezx-pcap.c505
-rw-r--r--drivers/mfd/pcf50633-core.c2
-rw-r--r--drivers/mfd/pcf50633-gpio.c3
-rw-r--r--drivers/mfd/t7l66xb.c2
-rw-r--r--drivers/mfd/tc6387xb.c2
-rw-r--r--drivers/mfd/tc6393xb.c2
-rw-r--r--drivers/mfd/twl4030-core.c2
-rw-r--r--drivers/mfd/twl4030-irq.c2
-rw-r--r--drivers/mfd/wm8350-regmap.c4
-rw-r--r--drivers/mfd/wm8400-core.c2
-rw-r--r--drivers/misc/sgi-gru/Makefile2
-rw-r--r--drivers/misc/sgi-gru/gru_instructions.h68
-rw-r--r--drivers/misc/sgi-gru/grufault.c118
-rw-r--r--drivers/misc/sgi-gru/grufile.c69
-rw-r--r--drivers/misc/sgi-gru/gruhandles.c17
-rw-r--r--drivers/misc/sgi-gru/gruhandles.h30
-rw-r--r--drivers/misc/sgi-gru/grukdump.c232
-rw-r--r--drivers/misc/sgi-gru/grukservices.c562
-rw-r--r--drivers/misc/sgi-gru/grukservices.h51
-rw-r--r--drivers/misc/sgi-gru/grulib.h69
-rw-r--r--drivers/misc/sgi-gru/grumain.c187
-rw-r--r--drivers/misc/sgi-gru/gruprocfs.c17
-rw-r--r--drivers/misc/sgi-gru/grutables.h60
-rw-r--r--drivers/net/bnx2.c4
-rw-r--r--drivers/net/davinci_emac.c1
-rw-r--r--drivers/net/e100.c11
-rw-r--r--drivers/net/e1000/e1000_main.c4
-rw-r--r--drivers/net/forcedeth.c46
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hp100.c35
-rw-r--r--drivers/net/igbvf/netdev.c13
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c6
-rw-r--r--drivers/net/mv643xx_eth.c2
-rw-r--r--drivers/net/netxen/netxen_nic.h11
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h1
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c6
-rw-r--r--drivers/net/netxen/netxen_nic_init.c11
-rw-r--r--drivers/net/netxen/netxen_nic_main.c32
-rw-r--r--drivers/net/niu.c4
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/r8169.c19
-rw-r--r--drivers/net/sis190.c2
-rw-r--r--drivers/net/sky2.c161
-rw-r--r--drivers/net/sky2.h1
-rw-r--r--drivers/net/sonic.c2
-rw-r--r--drivers/net/ucc_geth.c113
-rw-r--r--drivers/net/ucc_geth.h2
-rw-r--r--drivers/net/via-velocity.c4
-rw-r--r--drivers/net/virtio_net.c10
-rw-r--r--drivers/net/vxge/vxge-config.c12
-rw-r--r--drivers/net/vxge/vxge-main.c13
-rw-r--r--drivers/net/vxge/vxge-version.h4
-rw-r--r--drivers/net/wan/lapbether.c16
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c130
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c141
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c4
-rw-r--r--drivers/net/wireless/libertas/if_spi.c11
-rw-r--r--drivers/pci/setup-res.c4
-rw-r--r--drivers/platform/x86/dell-laptop.c2
-rw-r--r--drivers/platform/x86/sony-laptop.c5
-rw-r--r--drivers/pps/Kconfig33
-rw-r--r--drivers/pps/Makefile8
-rw-r--r--drivers/pps/kapi.c329
-rw-r--r--drivers/pps/pps.c312
-rw-r--r--drivers/pps/sysfs.c98
-rw-r--r--drivers/rtc/Kconfig9
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/interface.c12
-rw-r--r--drivers/rtc/rtc-dev.c6
-rw-r--r--drivers/rtc/rtc-ds1305.c3
-rw-r--r--drivers/rtc/rtc-ds1307.c46
-rw-r--r--drivers/rtc/rtc-ds1374.c5
-rw-r--r--drivers/rtc/rtc-ds1553.c3
-rw-r--r--drivers/rtc/rtc-ds1742.c31
-rw-r--r--drivers/rtc/rtc-rx8025.c688
-rw-r--r--drivers/rtc/rtc-test.c2
-rw-r--r--drivers/rtc/rtc-tx4939.c4
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/serial/sh-sci.c18
-rw-r--r--drivers/sh/intc.c13
-rw-r--r--drivers/spi/Kconfig16
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/atmel_spi.c14
-rw-r--r--drivers/spi/au1550_spi.c14
-rw-r--r--drivers/spi/mpc52xx_psc_spi.c12
-rw-r--r--drivers/spi/omap2_mcspi.c16
-rw-r--r--drivers/spi/omap_uwire.c14
-rw-r--r--drivers/spi/orion_spi.c12
-rw-r--r--drivers/spi/pxa2xx_spi.c23
-rw-r--r--drivers/spi/spi.c70
-rw-r--r--drivers/spi/spi_bfin5xx.c15
-rw-r--r--drivers/spi/spi_bitbang.c16
-rw-r--r--drivers/spi/spi_imx.c17
-rw-r--r--drivers/spi/spi_mpc8xxx.c (renamed from drivers/spi/spi_mpc83xx.c)529
-rw-r--r--drivers/spi/spi_s3c24xx.c19
-rw-r--r--drivers/spi/spi_txx9.c11
-rw-r--r--drivers/spi/xilinx_spi.c18
-rw-r--r--drivers/video/Kconfig6
-rw-r--r--drivers/w1/masters/w1-gpio.c35
-rw-r--r--drivers/watchdog/alim7101_wdt.c15
-rw-r--r--drivers/watchdog/ar7_wdt.c3
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c3
-rw-r--r--drivers/watchdog/at91sam9_wdt.c3
-rw-r--r--drivers/watchdog/bfin_wdt.c14
-rw-r--r--drivers/watchdog/cpwd.c6
-rw-r--r--drivers/watchdog/davinci_wdt.c6
-rw-r--r--drivers/watchdog/hpwdt.c59
-rw-r--r--drivers/watchdog/iTCO_vendor_support.c88
-rw-r--r--drivers/watchdog/iTCO_wdt.c36
-rw-r--r--drivers/watchdog/indydog.c4
-rw-r--r--drivers/watchdog/it8712f_wdt.c3
-rw-r--r--drivers/watchdog/ks8695_wdt.c4
-rw-r--r--drivers/watchdog/machzwd.c9
-rw-r--r--drivers/watchdog/mpcore_wdt.c7
-rw-r--r--drivers/watchdog/mtx-1_wdt.c6
-rw-r--r--drivers/watchdog/pnx4008_wdt.c6
-rw-r--r--drivers/watchdog/rdc321x_wdt.c4
-rw-r--r--drivers/watchdog/rm9k_wdt.c6
-rw-r--r--drivers/watchdog/s3c2410_wdt.c32
-rw-r--r--drivers/watchdog/sb_wdog.c9
-rw-r--r--drivers/watchdog/sbc60xxwdt.c5
-rw-r--r--drivers/watchdog/sbc8360.c4
-rw-r--r--drivers/watchdog/sbc_epx_c3.c12
-rw-r--r--drivers/watchdog/scx200_wdt.c7
-rw-r--r--drivers/watchdog/shwdt.c4
-rw-r--r--drivers/watchdog/softdog.c7
-rw-r--r--drivers/watchdog/w83697hf_wdt.c3
-rw-r--r--drivers/watchdog/wdrtas.c7
187 files changed, 7851 insertions, 2193 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index a442c8f29fc1..48bbdbe43e69 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -52,6 +52,8 @@ source "drivers/i2c/Kconfig"
52 52
53source "drivers/spi/Kconfig" 53source "drivers/spi/Kconfig"
54 54
55source "drivers/pps/Kconfig"
56
55source "drivers/gpio/Kconfig" 57source "drivers/gpio/Kconfig"
56 58
57source "drivers/w1/Kconfig" 59source "drivers/w1/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 00b44f4ccf03..bc4205d2fc3c 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -72,6 +72,7 @@ obj-$(CONFIG_INPUT) += input/
72obj-$(CONFIG_I2O) += message/ 72obj-$(CONFIG_I2O) += message/
73obj-$(CONFIG_RTC_LIB) += rtc/ 73obj-$(CONFIG_RTC_LIB) += rtc/
74obj-y += i2c/ media/ 74obj-y += i2c/ media/
75obj-$(CONFIG_PPS) += pps/
75obj-$(CONFIG_W1) += w1/ 76obj-$(CONFIG_W1) += w1/
76obj-$(CONFIG_POWER_SUPPLY) += power/ 77obj-$(CONFIG_POWER_SUPPLY) += power/
77obj-$(CONFIG_HWMON) += hwmon/ 78obj-$(CONFIG_HWMON) += hwmon/
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 30bae6de6a0d..0bd01f49cfd8 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -807,7 +807,7 @@ if RTC_LIB=n
807config RTC 807config RTC
808 tristate "Enhanced Real Time Clock Support (legacy PC RTC driver)" 808 tristate "Enhanced Real Time Clock Support (legacy PC RTC driver)"
809 depends on !PPC && !PARISC && !IA64 && !M68K && !SPARC && !FRV \ 809 depends on !PPC && !PARISC && !IA64 && !M68K && !SPARC && !FRV \
810 && !ARM && !SUPERH && !S390 && !AVR32 810 && !ARM && !SUPERH && !S390 && !AVR32 && !BLACKFIN
811 ---help--- 811 ---help---
812 If you say Y here and create a character special file /dev/rtc with 812 If you say Y here and create a character special file /dev/rtc with
813 major number 10 and minor number 135 using mknod ("man mknod"), you 813 major number 10 and minor number 135 using mknod ("man mknod"), you
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
index 183ac3fe44fb..9c7e2343c399 100644
--- a/drivers/char/agp/hp-agp.c
+++ b/drivers/char/agp/hp-agp.c
@@ -518,8 +518,9 @@ zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
518 if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa)) 518 if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa))
519 return AE_OK; 519 return AE_OK;
520 520
521 printk(KERN_INFO PFX "Detected HP ZX1 %s AGP chipset (ioc=%lx, lba=%lx)\n", 521 printk(KERN_INFO PFX "Detected HP ZX1 %s AGP chipset "
522 (char *) context, sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa); 522 "(ioc=%llx, lba=%llx)\n", (char *)context,
523 sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa);
523 524
524 hp_zx1_gart_found = 1; 525 hp_zx1_gart_found = 1;
525 return AE_CTRL_TERMINATE; /* we only support one bridge; quit looking */ 526 return AE_CTRL_TERMINATE; /* we only support one bridge; quit looking */
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index 6b900b297cc6..52e06589821d 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -571,7 +571,7 @@ static char dtlk_read_tts(void)
571 portval = inb_p(dtlk_port_tts); 571 portval = inb_p(dtlk_port_tts);
572 } while ((portval & TTS_READABLE) == 0 && 572 } while ((portval & TTS_READABLE) == 0 &&
573 retries++ < DTLK_MAX_RETRIES); 573 retries++ < DTLK_MAX_RETRIES);
574 if (retries == DTLK_MAX_RETRIES) 574 if (retries > DTLK_MAX_RETRIES)
575 printk(KERN_ERR "dtlk_read_tts() timeout\n"); 575 printk(KERN_ERR "dtlk_read_tts() timeout\n");
576 576
577 ch = inb_p(dtlk_port_tts); /* input from TTS port */ 577 ch = inb_p(dtlk_port_tts); /* input from TTS port */
@@ -583,7 +583,7 @@ static char dtlk_read_tts(void)
583 portval = inb_p(dtlk_port_tts); 583 portval = inb_p(dtlk_port_tts);
584 } while ((portval & TTS_READABLE) != 0 && 584 } while ((portval & TTS_READABLE) != 0 &&
585 retries++ < DTLK_MAX_RETRIES); 585 retries++ < DTLK_MAX_RETRIES);
586 if (retries == DTLK_MAX_RETRIES) 586 if (retries > DTLK_MAX_RETRIES)
587 printk(KERN_ERR "dtlk_read_tts() timeout\n"); 587 printk(KERN_ERR "dtlk_read_tts() timeout\n");
588 588
589 TRACE_RET; 589 TRACE_RET;
@@ -640,7 +640,7 @@ static char dtlk_write_tts(char ch)
640 while ((inb_p(dtlk_port_tts) & TTS_WRITABLE) == 0 && 640 while ((inb_p(dtlk_port_tts) & TTS_WRITABLE) == 0 &&
641 retries++ < DTLK_MAX_RETRIES) /* DT ready? */ 641 retries++ < DTLK_MAX_RETRIES) /* DT ready? */
642 ; 642 ;
643 if (retries == DTLK_MAX_RETRIES) 643 if (retries > DTLK_MAX_RETRIES)
644 printk(KERN_ERR "dtlk_write_tts() timeout\n"); 644 printk(KERN_ERR "dtlk_write_tts() timeout\n");
645 645
646 outb_p(ch, dtlk_port_tts); /* output to TTS port */ 646 outb_p(ch, dtlk_port_tts); /* output to TTS port */
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index 4d745a89504f..4159292e35cf 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -1593,7 +1593,7 @@ static unsigned int card_count;
1593static int __devinit isicom_probe(struct pci_dev *pdev, 1593static int __devinit isicom_probe(struct pci_dev *pdev,
1594 const struct pci_device_id *ent) 1594 const struct pci_device_id *ent)
1595{ 1595{
1596 unsigned int signature, index; 1596 unsigned int uninitialized_var(signature), index;
1597 int retval = -EPERM; 1597 int retval = -EPERM;
1598 struct isi_board *board = NULL; 1598 struct isi_board *board = NULL;
1599 1599
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index e18800c400b1..0c999f5bb3db 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -3785,7 +3785,7 @@ err:
3785 return retval; 3785 return retval;
3786} 3786}
3787 3787
3788static void stli_pciremove(struct pci_dev *pdev) 3788static void __devexit stli_pciremove(struct pci_dev *pdev)
3789{ 3789{
3790 struct stlibrd *brdp = pci_get_drvdata(pdev); 3790 struct stlibrd *brdp = pci_get_drvdata(pdev);
3791 3791
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index f96d0bef855e..afa8813e737a 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -863,59 +863,58 @@ static const struct file_operations kmsg_fops = {
863 .write = kmsg_write, 863 .write = kmsg_write,
864}; 864};
865 865
866static int memory_open(struct inode * inode, struct file * filp) 866static const struct {
867{ 867 unsigned int minor;
868 int ret = 0; 868 char *name;
869 869 umode_t mode;
870 lock_kernel(); 870 const struct file_operations *fops;
871 switch (iminor(inode)) { 871 struct backing_dev_info *dev_info;
872 case 1: 872} devlist[] = { /* list of minor devices */
873 filp->f_op = &mem_fops; 873 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops,
874 filp->f_mapping->backing_dev_info = 874 &directly_mappable_cdev_bdi},
875 &directly_mappable_cdev_bdi;
876 break;
877#ifdef CONFIG_DEVKMEM 875#ifdef CONFIG_DEVKMEM
878 case 2: 876 {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops,
879 filp->f_op = &kmem_fops; 877 &directly_mappable_cdev_bdi},
880 filp->f_mapping->backing_dev_info =
881 &directly_mappable_cdev_bdi;
882 break;
883#endif 878#endif
884 case 3: 879 {3, "null", S_IRUGO | S_IWUGO, &null_fops, NULL},
885 filp->f_op = &null_fops;
886 break;
887#ifdef CONFIG_DEVPORT 880#ifdef CONFIG_DEVPORT
888 case 4: 881 {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops, NULL},
889 filp->f_op = &port_fops;
890 break;
891#endif 882#endif
892 case 5: 883 {5, "zero", S_IRUGO | S_IWUGO, &zero_fops, &zero_bdi},
893 filp->f_mapping->backing_dev_info = &zero_bdi; 884 {7, "full", S_IRUGO | S_IWUGO, &full_fops, NULL},
894 filp->f_op = &zero_fops; 885 {8, "random", S_IRUGO | S_IWUSR, &random_fops, NULL},
895 break; 886 {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops, NULL},
896 case 7: 887 {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops, NULL},
897 filp->f_op = &full_fops;
898 break;
899 case 8:
900 filp->f_op = &random_fops;
901 break;
902 case 9:
903 filp->f_op = &urandom_fops;
904 break;
905 case 11:
906 filp->f_op = &kmsg_fops;
907 break;
908#ifdef CONFIG_CRASH_DUMP 888#ifdef CONFIG_CRASH_DUMP
909 case 12: 889 {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops, NULL},
910 filp->f_op = &oldmem_fops;
911 break;
912#endif 890#endif
913 default: 891};
914 unlock_kernel(); 892
915 return -ENXIO; 893static int memory_open(struct inode *inode, struct file *filp)
894{
895 int ret = 0;
896 int i;
897
898 lock_kernel();
899
900 for (i = 0; i < ARRAY_SIZE(devlist); i++) {
901 if (devlist[i].minor == iminor(inode)) {
902 filp->f_op = devlist[i].fops;
903 if (devlist[i].dev_info) {
904 filp->f_mapping->backing_dev_info =
905 devlist[i].dev_info;
906 }
907
908 break;
909 }
916 } 910 }
917 if (filp->f_op && filp->f_op->open) 911
918 ret = filp->f_op->open(inode,filp); 912 if (i == ARRAY_SIZE(devlist))
913 ret = -ENXIO;
914 else
915 if (filp->f_op && filp->f_op->open)
916 ret = filp->f_op->open(inode, filp);
917
919 unlock_kernel(); 918 unlock_kernel();
920 return ret; 919 return ret;
921} 920}
@@ -924,30 +923,6 @@ static const struct file_operations memory_fops = {
924 .open = memory_open, /* just a selector for the real open */ 923 .open = memory_open, /* just a selector for the real open */
925}; 924};
926 925
927static const struct {
928 unsigned int minor;
929 char *name;
930 umode_t mode;
931 const struct file_operations *fops;
932} devlist[] = { /* list of minor devices */
933 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
934#ifdef CONFIG_DEVKMEM
935 {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
936#endif
937 {3, "null", S_IRUGO | S_IWUGO, &null_fops},
938#ifdef CONFIG_DEVPORT
939 {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
940#endif
941 {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
942 {7, "full", S_IRUGO | S_IWUGO, &full_fops},
943 {8, "random", S_IRUGO | S_IWUSR, &random_fops},
944 {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops},
945 {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},
946#ifdef CONFIG_CRASH_DUMP
947 {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
948#endif
949};
950
951static struct class *mem_class; 926static struct class *mem_class;
952 927
953static int __init chr_dev_init(void) 928static int __init chr_dev_init(void)
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 65b6ff2442c6..6799588b0099 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -1189,6 +1189,11 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
1189 return -ENODEV; 1189 return -ENODEV;
1190 } 1190 }
1191 1191
1192 if (port % MAX_PORTS_PER_BOARD >= brd->numPorts) {
1193 retval = -ENODEV;
1194 goto out_unlock;
1195 }
1196
1192 ch = &brd->ports[port % MAX_PORTS_PER_BOARD]; 1197 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
1193 ch->port.count++; 1198 ch->port.count++;
1194 tty->driver_data = ch; 1199 tty->driver_data = ch;
@@ -1213,8 +1218,8 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
1213 moxa_close_port(tty); 1218 moxa_close_port(tty);
1214 } else 1219 } else
1215 ch->port.flags |= ASYNC_NORMAL_ACTIVE; 1220 ch->port.flags |= ASYNC_NORMAL_ACTIVE;
1221out_unlock:
1216 mutex_unlock(&moxa_openlock); 1222 mutex_unlock(&moxa_openlock);
1217
1218 return retval; 1223 return retval;
1219} 1224}
1220 1225
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index c84c34fb1231..432655bcb04c 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -114,8 +114,7 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
114 114
115 if (!(pp->flags & PP_CLAIMED)) { 115 if (!(pp->flags & PP_CLAIMED)) {
116 /* Don't have the port claimed */ 116 /* Don't have the port claimed */
117 printk (KERN_DEBUG CHRDEV "%x: claim the port first\n", 117 pr_debug(CHRDEV "%x: claim the port first\n", minor);
118 minor);
119 return -EINVAL; 118 return -EINVAL;
120 } 119 }
121 120
@@ -198,8 +197,7 @@ static ssize_t pp_write (struct file * file, const char __user * buf,
198 197
199 if (!(pp->flags & PP_CLAIMED)) { 198 if (!(pp->flags & PP_CLAIMED)) {
200 /* Don't have the port claimed */ 199 /* Don't have the port claimed */
201 printk (KERN_DEBUG CHRDEV "%x: claim the port first\n", 200 pr_debug(CHRDEV "%x: claim the port first\n", minor);
202 minor);
203 return -EINVAL; 201 return -EINVAL;
204 } 202 }
205 203
@@ -313,7 +311,7 @@ static int register_device (int minor, struct pp_struct *pp)
313 } 311 }
314 312
315 pp->pdev = pdev; 313 pp->pdev = pdev;
316 printk (KERN_DEBUG "%s: registered pardevice\n", name); 314 pr_debug("%s: registered pardevice\n", name);
317 return 0; 315 return 0;
318} 316}
319 317
@@ -343,8 +341,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
343 int ret; 341 int ret;
344 342
345 if (pp->flags & PP_CLAIMED) { 343 if (pp->flags & PP_CLAIMED) {
346 printk (KERN_DEBUG CHRDEV 344 pr_debug(CHRDEV "%x: you've already got it!\n", minor);
347 "%x: you've already got it!\n", minor);
348 return -EINVAL; 345 return -EINVAL;
349 } 346 }
350 347
@@ -379,7 +376,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
379 } 376 }
380 case PPEXCL: 377 case PPEXCL:
381 if (pp->pdev) { 378 if (pp->pdev) {
382 printk (KERN_DEBUG CHRDEV "%x: too late for PPEXCL; " 379 pr_debug(CHRDEV "%x: too late for PPEXCL; "
383 "already registered\n", minor); 380 "already registered\n", minor);
384 if (pp->flags & PP_EXCL) 381 if (pp->flags & PP_EXCL)
385 /* But it's not really an error. */ 382 /* But it's not really an error. */
@@ -491,8 +488,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
491 /* Everything else requires the port to be claimed, so check 488 /* Everything else requires the port to be claimed, so check
492 * that now. */ 489 * that now. */
493 if ((pp->flags & PP_CLAIMED) == 0) { 490 if ((pp->flags & PP_CLAIMED) == 0) {
494 printk (KERN_DEBUG CHRDEV "%x: claim the port first\n", 491 pr_debug(CHRDEV "%x: claim the port first\n", minor);
495 minor);
496 return -EINVAL; 492 return -EINVAL;
497 } 493 }
498 494
@@ -624,8 +620,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
624 return 0; 620 return 0;
625 621
626 default: 622 default:
627 printk (KERN_DEBUG CHRDEV "%x: What? (cmd=0x%x)\n", minor, 623 pr_debug(CHRDEV "%x: What? (cmd=0x%x)\n", minor, cmd);
628 cmd);
629 return -EINVAL; 624 return -EINVAL;
630 } 625 }
631 626
@@ -698,9 +693,8 @@ static int pp_release (struct inode * inode, struct file * file)
698 } 693 }
699 if (compat_negot) { 694 if (compat_negot) {
700 parport_negotiate (pp->pdev->port, IEEE1284_MODE_COMPAT); 695 parport_negotiate (pp->pdev->port, IEEE1284_MODE_COMPAT);
701 printk (KERN_DEBUG CHRDEV 696 pr_debug(CHRDEV "%x: negotiated back to compatibility "
702 "%x: negotiated back to compatibility mode because " 697 "mode because user-space forgot\n", minor);
703 "user-space forgot\n", minor);
704 } 698 }
705 699
706 if (pp->flags & PP_CLAIMED) { 700 if (pp->flags & PP_CLAIMED) {
@@ -713,7 +707,7 @@ static int pp_release (struct inode * inode, struct file * file)
713 info->phase = pp->saved_state.phase; 707 info->phase = pp->saved_state.phase;
714 parport_release (pp->pdev); 708 parport_release (pp->pdev);
715 if (compat_negot != 1) { 709 if (compat_negot != 1) {
716 printk (KERN_DEBUG CHRDEV "%x: released pardevice " 710 pr_debug(CHRDEV "%x: released pardevice "
717 "because user-space forgot\n", minor); 711 "because user-space forgot\n", minor);
718 } 712 }
719 } 713 }
@@ -723,8 +717,7 @@ static int pp_release (struct inode * inode, struct file * file)
723 parport_unregister_device (pp->pdev); 717 parport_unregister_device (pp->pdev);
724 kfree (name); 718 kfree (name);
725 pp->pdev = NULL; 719 pp->pdev = NULL;
726 printk (KERN_DEBUG CHRDEV "%x: unregistered pardevice\n", 720 pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
727 minor);
728 } 721 }
729 722
730 kfree (pp); 723 kfree (pp);
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 39a05b5fa9cb..0db35857e4d8 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -121,20 +121,17 @@ static struct sysrq_key_op sysrq_unraw_op = {
121#define sysrq_unraw_op (*(struct sysrq_key_op *)0) 121#define sysrq_unraw_op (*(struct sysrq_key_op *)0)
122#endif /* CONFIG_VT */ 122#endif /* CONFIG_VT */
123 123
124#ifdef CONFIG_KEXEC 124static void sysrq_handle_crash(int key, struct tty_struct *tty)
125static void sysrq_handle_crashdump(int key, struct tty_struct *tty)
126{ 125{
127 crash_kexec(get_irq_regs()); 126 char *killer = NULL;
127 *killer = 1;
128} 128}
129static struct sysrq_key_op sysrq_crashdump_op = { 129static struct sysrq_key_op sysrq_crashdump_op = {
130 .handler = sysrq_handle_crashdump, 130 .handler = sysrq_handle_crash,
131 .help_msg = "Crashdump", 131 .help_msg = "Crash",
132 .action_msg = "Trigger a crashdump", 132 .action_msg = "Trigger a crash",
133 .enable_mask = SYSRQ_ENABLE_DUMP, 133 .enable_mask = SYSRQ_ENABLE_DUMP,
134}; 134};
135#else
136#define sysrq_crashdump_op (*(struct sysrq_key_op *)0)
137#endif
138 135
139static void sysrq_handle_reboot(int key, struct tty_struct *tty) 136static void sysrq_handle_reboot(int key, struct tty_struct *tty)
140{ 137{
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index cf56a2af5fe1..2964f5f4a7ef 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -184,6 +184,9 @@ static void sh_cmt_disable(struct sh_cmt_priv *p)
184 /* disable channel */ 184 /* disable channel */
185 sh_cmt_start_stop_ch(p, 0); 185 sh_cmt_start_stop_ch(p, 0);
186 186
187 /* disable interrupts in CMT block */
188 sh_cmt_write(p, CMCSR, 0);
189
187 /* stop clock */ 190 /* stop clock */
188 clk_disable(p->clk); 191 clk_disable(p->clk);
189} 192}
@@ -599,7 +602,6 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
599 p->irqaction.handler = sh_cmt_interrupt; 602 p->irqaction.handler = sh_cmt_interrupt;
600 p->irqaction.dev_id = p; 603 p->irqaction.dev_id = p;
601 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL; 604 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
602 p->irqaction.mask = CPU_MASK_NONE;
603 ret = setup_irq(irq, &p->irqaction); 605 ret = setup_irq(irq, &p->irqaction);
604 if (ret) { 606 if (ret) {
605 pr_err("sh_cmt: failed to request irq %d\n", irq); 607 pr_err("sh_cmt: failed to request irq %d\n", irq);
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index d1ae75454d10..973e714d6051 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -283,7 +283,6 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
283 p->irqaction.dev_id = p; 283 p->irqaction.dev_id = p;
284 p->irqaction.irq = irq; 284 p->irqaction.irq = irq;
285 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL; 285 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
286 p->irqaction.mask = CPU_MASK_NONE;
287 286
288 /* get hold of clock */ 287 /* get hold of clock */
289 p->clk = clk_get(&p->pdev->dev, cfg->clk); 288 p->clk = clk_get(&p->pdev->dev, cfg->clk);
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index d6ea4398bf62..9ffb05f4095d 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -138,6 +138,9 @@ static void sh_tmu_disable(struct sh_tmu_priv *p)
138 /* disable channel */ 138 /* disable channel */
139 sh_tmu_start_stop_ch(p, 0); 139 sh_tmu_start_stop_ch(p, 0);
140 140
141 /* disable interrupts in TMU block */
142 sh_tmu_write(p, TCR, 0x0000);
143
141 /* stop clock */ 144 /* stop clock */
142 clk_disable(p->clk); 145 clk_disable(p->clk);
143} 146}
@@ -385,7 +388,6 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
385 p->irqaction.dev_id = p; 388 p->irqaction.dev_id = p;
386 p->irqaction.irq = irq; 389 p->irqaction.irq = irq;
387 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL; 390 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
388 p->irqaction.mask = CPU_MASK_NONE;
389 391
390 /* get hold of clock */ 392 /* get hold of clock */
391 p->clk = clk_get(&p->pdev->dev, cfg->clk); 393 p->clk = clk_get(&p->pdev->dev, cfg->clk);
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index ab4f3592a11c..4339b1a879cd 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -5,7 +5,7 @@
5# 5#
6 6
7menuconfig EDAC 7menuconfig EDAC
8 bool "EDAC - error detection and reporting" 8 bool "EDAC (Error Detection And Correction) reporting"
9 depends on HAS_IOMEM 9 depends on HAS_IOMEM
10 depends on X86 || PPC 10 depends on X86 || PPC
11 help 11 help
@@ -232,4 +232,13 @@ config EDAC_AMD8111
232 Note, add more Kconfig dependency if it's adopted 232 Note, add more Kconfig dependency if it's adopted
233 on some machine other than Maple. 233 on some machine other than Maple.
234 234
235config EDAC_CPC925
236 tristate "IBM CPC925 Memory Controller (PPC970FX)"
237 depends on EDAC_MM_EDAC && PPC64
238 help
239 Support for error detection and correction on the
240 IBM CPC925 Bridge and Memory Controller, which is
241 a companion chip to the PowerPC 970 family of
242 processors.
243
235endif # EDAC 244endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 633dc5604ee3..98aa4a7db412 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -18,6 +18,7 @@ edac_core-objs += edac_pci.o edac_pci_sysfs.o
18endif 18endif
19 19
20obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o 20obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o
21obj-$(CONFIG_EDAC_CPC925) += cpc925_edac.o
21obj-$(CONFIG_EDAC_I5000) += i5000_edac.o 22obj-$(CONFIG_EDAC_I5000) += i5000_edac.o
22obj-$(CONFIG_EDAC_I5100) += i5100_edac.o 23obj-$(CONFIG_EDAC_I5100) += i5100_edac.o
23obj-$(CONFIG_EDAC_I5400) += i5400_edac.o 24obj-$(CONFIG_EDAC_I5400) += i5400_edac.o
diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c
index 2cb58ef743e0..35b78d04bbfa 100644
--- a/drivers/edac/amd8111_edac.c
+++ b/drivers/edac/amd8111_edac.c
@@ -37,7 +37,6 @@
37#define AMD8111_EDAC_MOD_STR "amd8111_edac" 37#define AMD8111_EDAC_MOD_STR "amd8111_edac"
38 38
39#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460 39#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460
40static int edac_dev_idx;
41 40
42enum amd8111_edac_devs { 41enum amd8111_edac_devs {
43 LPC_BRIDGE = 0, 42 LPC_BRIDGE = 0,
@@ -377,7 +376,7 @@ static int amd8111_dev_probe(struct pci_dev *dev,
377 * edac_device_ctl_info, but make use of existing 376 * edac_device_ctl_info, but make use of existing
378 * one instead. 377 * one instead.
379 */ 378 */
380 dev_info->edac_idx = edac_dev_idx++; 379 dev_info->edac_idx = edac_device_alloc_index();
381 dev_info->edac_dev = 380 dev_info->edac_dev =
382 edac_device_alloc_ctl_info(0, dev_info->ctl_name, 1, 381 edac_device_alloc_ctl_info(0, dev_info->ctl_name, 1,
383 NULL, 0, 0, 382 NULL, 0, 0,
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index cb0f639f049d..c973004c002c 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -227,7 +227,7 @@ static struct platform_driver cell_edac_driver = {
227 .owner = THIS_MODULE, 227 .owner = THIS_MODULE,
228 }, 228 },
229 .probe = cell_edac_probe, 229 .probe = cell_edac_probe,
230 .remove = cell_edac_remove, 230 .remove = __devexit_p(cell_edac_remove),
231}; 231};
232 232
233static int __init cell_edac_init(void) 233static int __init cell_edac_init(void)
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
new file mode 100644
index 000000000000..8c54196b5aba
--- /dev/null
+++ b/drivers/edac/cpc925_edac.c
@@ -0,0 +1,1017 @@
1/*
2 * cpc925_edac.c, EDAC driver for IBM CPC925 Bridge and Memory Controller.
3 *
4 * Copyright (c) 2008 Wind River Systems, Inc.
5 *
6 * Authors: Cao Qingtao <qingtao.cao@windriver.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 * See the GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/io.h>
25#include <linux/edac.h>
26#include <linux/of.h>
27#include <linux/platform_device.h>
28
29#include "edac_core.h"
30#include "edac_module.h"
31
32#define CPC925_EDAC_REVISION " Ver: 1.0.0 " __DATE__
33#define CPC925_EDAC_MOD_STR "cpc925_edac"
34
35#define cpc925_printk(level, fmt, arg...) \
36 edac_printk(level, "CPC925", fmt, ##arg)
37
38#define cpc925_mc_printk(mci, level, fmt, arg...) \
39 edac_mc_chipset_printk(mci, level, "CPC925", fmt, ##arg)
40
41/*
42 * CPC925 registers are of 32 bits with bit0 defined at the
43 * most significant bit and bit31 at that of least significant.
44 */
45#define CPC925_BITS_PER_REG 32
46#define CPC925_BIT(nr) (1UL << (CPC925_BITS_PER_REG - 1 - nr))
47
48/*
49 * EDAC device names for the error detections of
50 * CPU Interface and Hypertransport Link.
51 */
52#define CPC925_CPU_ERR_DEV "cpu"
53#define CPC925_HT_LINK_DEV "htlink"
54
55/* Suppose DDR Refresh cycle is 15.6 microsecond */
56#define CPC925_REF_FREQ 0xFA69
57#define CPC925_SCRUB_BLOCK_SIZE 64 /* bytes */
58#define CPC925_NR_CSROWS 8
59
60/*
61 * All registers and bits definitions are taken from
62 * "CPC925 Bridge and Memory Controller User Manual, SA14-2761-02".
63 */
64
65/*
66 * CPU and Memory Controller Registers
67 */
68/************************************************************
69 * Processor Interface Exception Mask Register (APIMASK)
70 ************************************************************/
71#define REG_APIMASK_OFFSET 0x30070
72enum apimask_bits {
73 APIMASK_DART = CPC925_BIT(0), /* DART Exception */
74 APIMASK_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */
75 APIMASK_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */
76 APIMASK_STAT = CPC925_BIT(3), /* Status Exception */
77 APIMASK_DERR = CPC925_BIT(4), /* Data Error Exception */
78 APIMASK_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */
79 APIMASK_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */
80 /* BIT(7) Reserved */
81 APIMASK_ECC_UE_H = CPC925_BIT(8), /* UECC upper */
82 APIMASK_ECC_CE_H = CPC925_BIT(9), /* CECC upper */
83 APIMASK_ECC_UE_L = CPC925_BIT(10), /* UECC lower */
84 APIMASK_ECC_CE_L = CPC925_BIT(11), /* CECC lower */
85
86 CPU_MASK_ENABLE = (APIMASK_DART | APIMASK_ADI0 | APIMASK_ADI1 |
87 APIMASK_STAT | APIMASK_DERR | APIMASK_ADRS0 |
88 APIMASK_ADRS1),
89 ECC_MASK_ENABLE = (APIMASK_ECC_UE_H | APIMASK_ECC_CE_H |
90 APIMASK_ECC_UE_L | APIMASK_ECC_CE_L),
91};
92
93/************************************************************
94 * Processor Interface Exception Register (APIEXCP)
95 ************************************************************/
96#define REG_APIEXCP_OFFSET 0x30060
97enum apiexcp_bits {
98 APIEXCP_DART = CPC925_BIT(0), /* DART Exception */
99 APIEXCP_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */
100 APIEXCP_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */
101 APIEXCP_STAT = CPC925_BIT(3), /* Status Exception */
102 APIEXCP_DERR = CPC925_BIT(4), /* Data Error Exception */
103 APIEXCP_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */
104 APIEXCP_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */
105 /* BIT(7) Reserved */
106 APIEXCP_ECC_UE_H = CPC925_BIT(8), /* UECC upper */
107 APIEXCP_ECC_CE_H = CPC925_BIT(9), /* CECC upper */
108 APIEXCP_ECC_UE_L = CPC925_BIT(10), /* UECC lower */
109 APIEXCP_ECC_CE_L = CPC925_BIT(11), /* CECC lower */
110
111 CPU_EXCP_DETECTED = (APIEXCP_DART | APIEXCP_ADI0 | APIEXCP_ADI1 |
112 APIEXCP_STAT | APIEXCP_DERR | APIEXCP_ADRS0 |
113 APIEXCP_ADRS1),
114 UECC_EXCP_DETECTED = (APIEXCP_ECC_UE_H | APIEXCP_ECC_UE_L),
115 CECC_EXCP_DETECTED = (APIEXCP_ECC_CE_H | APIEXCP_ECC_CE_L),
116 ECC_EXCP_DETECTED = (UECC_EXCP_DETECTED | CECC_EXCP_DETECTED),
117};
118
119/************************************************************
120 * Memory Bus Configuration Register (MBCR)
121************************************************************/
122#define REG_MBCR_OFFSET 0x2190
123#define MBCR_64BITCFG_SHIFT 23
124#define MBCR_64BITCFG_MASK (1UL << MBCR_64BITCFG_SHIFT)
125#define MBCR_64BITBUS_SHIFT 22
126#define MBCR_64BITBUS_MASK (1UL << MBCR_64BITBUS_SHIFT)
127
128/************************************************************
129 * Memory Bank Mode Register (MBMR)
130************************************************************/
131#define REG_MBMR_OFFSET 0x21C0
132#define MBMR_MODE_MAX_VALUE 0xF
133#define MBMR_MODE_SHIFT 25
134#define MBMR_MODE_MASK (MBMR_MODE_MAX_VALUE << MBMR_MODE_SHIFT)
135#define MBMR_BBA_SHIFT 24
136#define MBMR_BBA_MASK (1UL << MBMR_BBA_SHIFT)
137
138/************************************************************
139 * Memory Bank Boundary Address Register (MBBAR)
140 ************************************************************/
141#define REG_MBBAR_OFFSET 0x21D0
142#define MBBAR_BBA_MAX_VALUE 0xFF
143#define MBBAR_BBA_SHIFT 24
144#define MBBAR_BBA_MASK (MBBAR_BBA_MAX_VALUE << MBBAR_BBA_SHIFT)
145
146/************************************************************
147 * Memory Scrub Control Register (MSCR)
148 ************************************************************/
149#define REG_MSCR_OFFSET 0x2400
150#define MSCR_SCRUB_MOD_MASK 0xC0000000 /* scrub_mod - bit0:1*/
151#define MSCR_BACKGR_SCRUB 0x40000000 /* 01 */
152#define MSCR_SI_SHIFT 16 /* si - bit8:15*/
153#define MSCR_SI_MAX_VALUE 0xFF
154#define MSCR_SI_MASK (MSCR_SI_MAX_VALUE << MSCR_SI_SHIFT)
155
156/************************************************************
157 * Memory Scrub Range Start Register (MSRSR)
158 ************************************************************/
159#define REG_MSRSR_OFFSET 0x2410
160
161/************************************************************
162 * Memory Scrub Range End Register (MSRER)
163 ************************************************************/
164#define REG_MSRER_OFFSET 0x2420
165
166/************************************************************
167 * Memory Scrub Pattern Register (MSPR)
168 ************************************************************/
169#define REG_MSPR_OFFSET 0x2430
170
171/************************************************************
172 * Memory Check Control Register (MCCR)
173 ************************************************************/
174#define REG_MCCR_OFFSET 0x2440
175enum mccr_bits {
176 MCCR_ECC_EN = CPC925_BIT(0), /* ECC high and low check */
177};
178
179/************************************************************
180 * Memory Check Range End Register (MCRER)
181 ************************************************************/
182#define REG_MCRER_OFFSET 0x2450
183
184/************************************************************
185 * Memory Error Address Register (MEAR)
186 ************************************************************/
187#define REG_MEAR_OFFSET 0x2460
188#define MEAR_BCNT_MAX_VALUE 0x3
189#define MEAR_BCNT_SHIFT 30
190#define MEAR_BCNT_MASK (MEAR_BCNT_MAX_VALUE << MEAR_BCNT_SHIFT)
191#define MEAR_RANK_MAX_VALUE 0x7
192#define MEAR_RANK_SHIFT 27
193#define MEAR_RANK_MASK (MEAR_RANK_MAX_VALUE << MEAR_RANK_SHIFT)
194#define MEAR_COL_MAX_VALUE 0x7FF
195#define MEAR_COL_SHIFT 16
196#define MEAR_COL_MASK (MEAR_COL_MAX_VALUE << MEAR_COL_SHIFT)
197#define MEAR_BANK_MAX_VALUE 0x3
198#define MEAR_BANK_SHIFT 14
199#define MEAR_BANK_MASK (MEAR_BANK_MAX_VALUE << MEAR_BANK_SHIFT)
200#define MEAR_ROW_MASK 0x00003FFF
201
202/************************************************************
203 * Memory Error Syndrome Register (MESR)
204 ************************************************************/
205#define REG_MESR_OFFSET 0x2470
206#define MESR_ECC_SYN_H_MASK 0xFF00
207#define MESR_ECC_SYN_L_MASK 0x00FF
208
209/************************************************************
210 * Memory Mode Control Register (MMCR)
211 ************************************************************/
212#define REG_MMCR_OFFSET 0x2500
213enum mmcr_bits {
214 MMCR_REG_DIMM_MODE = CPC925_BIT(3),
215};
216
217/*
218 * HyperTransport Link Registers
219 */
220/************************************************************
221 * Error Handling/Enumeration Scratch Pad Register (ERRCTRL)
222 ************************************************************/
223#define REG_ERRCTRL_OFFSET 0x70140
224enum errctrl_bits { /* nonfatal interrupts for */
225 ERRCTRL_SERR_NF = CPC925_BIT(0), /* system error */
226 ERRCTRL_CRC_NF = CPC925_BIT(1), /* CRC error */
227 ERRCTRL_RSP_NF = CPC925_BIT(2), /* Response error */
228 ERRCTRL_EOC_NF = CPC925_BIT(3), /* End-Of-Chain error */
229 ERRCTRL_OVF_NF = CPC925_BIT(4), /* Overflow error */
230 ERRCTRL_PROT_NF = CPC925_BIT(5), /* Protocol error */
231
232 ERRCTRL_RSP_ERR = CPC925_BIT(6), /* Response error received */
233 ERRCTRL_CHN_FAL = CPC925_BIT(7), /* Sync flooding detected */
234
235 HT_ERRCTRL_ENABLE = (ERRCTRL_SERR_NF | ERRCTRL_CRC_NF |
236 ERRCTRL_RSP_NF | ERRCTRL_EOC_NF |
237 ERRCTRL_OVF_NF | ERRCTRL_PROT_NF),
238 HT_ERRCTRL_DETECTED = (ERRCTRL_RSP_ERR | ERRCTRL_CHN_FAL),
239};
240
241/************************************************************
242 * Link Configuration and Link Control Register (LINKCTRL)
243 ************************************************************/
244#define REG_LINKCTRL_OFFSET 0x70110
245enum linkctrl_bits {
246 LINKCTRL_CRC_ERR = (CPC925_BIT(22) | CPC925_BIT(23)),
247 LINKCTRL_LINK_FAIL = CPC925_BIT(27),
248
249 HT_LINKCTRL_DETECTED = (LINKCTRL_CRC_ERR | LINKCTRL_LINK_FAIL),
250};
251
252/************************************************************
253 * Link FreqCap/Error/Freq/Revision ID Register (LINKERR)
254 ************************************************************/
255#define REG_LINKERR_OFFSET 0x70120
256enum linkerr_bits {
257 LINKERR_EOC_ERR = CPC925_BIT(17), /* End-Of-Chain error */
258 LINKERR_OVF_ERR = CPC925_BIT(18), /* Receive Buffer Overflow */
259 LINKERR_PROT_ERR = CPC925_BIT(19), /* Protocol error */
260
261 HT_LINKERR_DETECTED = (LINKERR_EOC_ERR | LINKERR_OVF_ERR |
262 LINKERR_PROT_ERR),
263};
264
265/************************************************************
266 * Bridge Control Register (BRGCTRL)
267 ************************************************************/
268#define REG_BRGCTRL_OFFSET 0x70300
269enum brgctrl_bits {
270 BRGCTRL_DETSERR = CPC925_BIT(0), /* SERR on Secondary Bus */
271 BRGCTRL_SECBUSRESET = CPC925_BIT(9), /* Secondary Bus Reset */
272};
273
274/* Private structure for edac memory controller */
275struct cpc925_mc_pdata {
276 void __iomem *vbase;
277 unsigned long total_mem;
278 const char *name;
279 int edac_idx;
280};
281
282/* Private structure for common edac device */
283struct cpc925_dev_info {
284 void __iomem *vbase;
285 struct platform_device *pdev;
286 char *ctl_name;
287 int edac_idx;
288 struct edac_device_ctl_info *edac_dev;
289 void (*init)(struct cpc925_dev_info *dev_info);
290 void (*exit)(struct cpc925_dev_info *dev_info);
291 void (*check)(struct edac_device_ctl_info *edac_dev);
292};
293
294/* Get total memory size from Open Firmware DTB */
295static void get_total_mem(struct cpc925_mc_pdata *pdata)
296{
297 struct device_node *np = NULL;
298 const unsigned int *reg, *reg_end;
299 int len, sw, aw;
300 unsigned long start, size;
301
302 np = of_find_node_by_type(NULL, "memory");
303 if (!np)
304 return;
305
306 aw = of_n_addr_cells(np);
307 sw = of_n_size_cells(np);
308 reg = (const unsigned int *)of_get_property(np, "reg", &len);
309 reg_end = reg + len/4;
310
311 pdata->total_mem = 0;
312 do {
313 start = of_read_number(reg, aw);
314 reg += aw;
315 size = of_read_number(reg, sw);
316 reg += sw;
317 debugf1("%s: start 0x%lx, size 0x%lx\n", __func__,
318 start, size);
319 pdata->total_mem += size;
320 } while (reg < reg_end);
321
322 of_node_put(np);
323 debugf0("%s: total_mem 0x%lx\n", __func__, pdata->total_mem);
324}
325
326static void cpc925_init_csrows(struct mem_ctl_info *mci)
327{
328 struct cpc925_mc_pdata *pdata = mci->pvt_info;
329 struct csrow_info *csrow;
330 int index;
331 u32 mbmr, mbbar, bba;
332 unsigned long row_size, last_nr_pages = 0;
333
334 get_total_mem(pdata);
335
336 for (index = 0; index < mci->nr_csrows; index++) {
337 mbmr = __raw_readl(pdata->vbase + REG_MBMR_OFFSET +
338 0x20 * index);
339 mbbar = __raw_readl(pdata->vbase + REG_MBBAR_OFFSET +
340 0x20 + index);
341 bba = (((mbmr & MBMR_BBA_MASK) >> MBMR_BBA_SHIFT) << 8) |
342 ((mbbar & MBBAR_BBA_MASK) >> MBBAR_BBA_SHIFT);
343
344 if (bba == 0)
345 continue; /* not populated */
346
347 csrow = &mci->csrows[index];
348
349 row_size = bba * (1UL << 28); /* 256M */
350 csrow->first_page = last_nr_pages;
351 csrow->nr_pages = row_size >> PAGE_SHIFT;
352 csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
353 last_nr_pages = csrow->last_page + 1;
354
355 csrow->mtype = MEM_RDDR;
356 csrow->edac_mode = EDAC_SECDED;
357
358 switch (csrow->nr_channels) {
359 case 1: /* Single channel */
360 csrow->grain = 32; /* four-beat burst of 32 bytes */
361 break;
362 case 2: /* Dual channel */
363 default:
364 csrow->grain = 64; /* four-beat burst of 64 bytes */
365 break;
366 }
367
368 switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
369 case 6: /* 0110, no way to differentiate X8 VS X16 */
370 case 5: /* 0101 */
371 case 8: /* 1000 */
372 csrow->dtype = DEV_X16;
373 break;
374 case 7: /* 0111 */
375 case 9: /* 1001 */
376 csrow->dtype = DEV_X8;
377 break;
378 default:
379 csrow->dtype = DEV_UNKNOWN;
380 break;
381 }
382 }
383}
384
385/* Enable memory controller ECC detection */
386static void cpc925_mc_init(struct mem_ctl_info *mci)
387{
388 struct cpc925_mc_pdata *pdata = mci->pvt_info;
389 u32 apimask;
390 u32 mccr;
391
392 /* Enable various ECC error exceptions */
393 apimask = __raw_readl(pdata->vbase + REG_APIMASK_OFFSET);
394 if ((apimask & ECC_MASK_ENABLE) == 0) {
395 apimask |= ECC_MASK_ENABLE;
396 __raw_writel(apimask, pdata->vbase + REG_APIMASK_OFFSET);
397 }
398
399 /* Enable ECC detection */
400 mccr = __raw_readl(pdata->vbase + REG_MCCR_OFFSET);
401 if ((mccr & MCCR_ECC_EN) == 0) {
402 mccr |= MCCR_ECC_EN;
403 __raw_writel(mccr, pdata->vbase + REG_MCCR_OFFSET);
404 }
405}
406
407/* Disable memory controller ECC detection */
408static void cpc925_mc_exit(struct mem_ctl_info *mci)
409{
410 /*
411 * WARNING:
412 * We are supposed to clear the ECC error detection bits,
413 * and it will be no problem to do so. However, once they
414 * are cleared here if we want to re-install CPC925 EDAC
415 * module later, setting them up in cpc925_mc_init() will
416 * trigger machine check exception.
417 * Also, it's ok to leave ECC error detection bits enabled,
418 * since they are reset to 1 by default or by boot loader.
419 */
420
421 return;
422}
423
424/*
425 * Revert DDR column/row/bank addresses into page frame number and
426 * offset in page.
427 *
428 * Suppose memory mode is 0x0111(128-bit mode, identical DIMM pairs),
429 * physical address(PA) bits to column address(CA) bits mappings are:
430 * CA 0 1 2 3 4 5 6 7 8 9 10
431 * PA 59 58 57 56 55 54 53 52 51 50 49
432 *
433 * physical address(PA) bits to bank address(BA) bits mappings are:
434 * BA 0 1
435 * PA 43 44
436 *
437 * physical address(PA) bits to row address(RA) bits mappings are:
438 * RA 0 1 2 3 4 5 6 7 8 9 10 11 12
439 * PA 36 35 34 48 47 46 45 40 41 42 39 38 37
440 */
441static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear,
442 unsigned long *pfn, unsigned long *offset, int *csrow)
443{
444 u32 bcnt, rank, col, bank, row;
445 u32 c;
446 unsigned long pa;
447 int i;
448
449 bcnt = (mear & MEAR_BCNT_MASK) >> MEAR_BCNT_SHIFT;
450 rank = (mear & MEAR_RANK_MASK) >> MEAR_RANK_SHIFT;
451 col = (mear & MEAR_COL_MASK) >> MEAR_COL_SHIFT;
452 bank = (mear & MEAR_BANK_MASK) >> MEAR_BANK_SHIFT;
453 row = mear & MEAR_ROW_MASK;
454
455 *csrow = rank;
456
457#ifdef CONFIG_EDAC_DEBUG
458 if (mci->csrows[rank].first_page == 0) {
459 cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a "
460 "non-populated csrow, broken hardware?\n");
461 return;
462 }
463#endif
464
465 /* Revert csrow number */
466 pa = mci->csrows[rank].first_page << PAGE_SHIFT;
467
468 /* Revert column address */
469 col += bcnt;
470 for (i = 0; i < 11; i++) {
471 c = col & 0x1;
472 col >>= 1;
473 pa |= c << (14 - i);
474 }
475
476 /* Revert bank address */
477 pa |= bank << 19;
478
479 /* Revert row address, in 4 steps */
480 for (i = 0; i < 3; i++) {
481 c = row & 0x1;
482 row >>= 1;
483 pa |= c << (26 - i);
484 }
485
486 for (i = 0; i < 3; i++) {
487 c = row & 0x1;
488 row >>= 1;
489 pa |= c << (21 + i);
490 }
491
492 for (i = 0; i < 4; i++) {
493 c = row & 0x1;
494 row >>= 1;
495 pa |= c << (18 - i);
496 }
497
498 for (i = 0; i < 3; i++) {
499 c = row & 0x1;
500 row >>= 1;
501 pa |= c << (29 - i);
502 }
503
504 *offset = pa & (PAGE_SIZE - 1);
505 *pfn = pa >> PAGE_SHIFT;
506
507 debugf0("%s: ECC physical address 0x%lx\n", __func__, pa);
508}
509
510static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome)
511{
512 if ((syndrome & MESR_ECC_SYN_H_MASK) == 0)
513 return 0;
514
515 if ((syndrome & MESR_ECC_SYN_L_MASK) == 0)
516 return 1;
517
518 cpc925_mc_printk(mci, KERN_INFO, "Unexpected syndrome value: 0x%x\n",
519 syndrome);
520 return 1;
521}
522
523/* Check memory controller registers for ECC errors */
524static void cpc925_mc_check(struct mem_ctl_info *mci)
525{
526 struct cpc925_mc_pdata *pdata = mci->pvt_info;
527 u32 apiexcp;
528 u32 mear;
529 u32 mesr;
530 u16 syndrome;
531 unsigned long pfn = 0, offset = 0;
532 int csrow = 0, channel = 0;
533
534 /* APIEXCP is cleared when read */
535 apiexcp = __raw_readl(pdata->vbase + REG_APIEXCP_OFFSET);
536 if ((apiexcp & ECC_EXCP_DETECTED) == 0)
537 return;
538
539 mesr = __raw_readl(pdata->vbase + REG_MESR_OFFSET);
540 syndrome = mesr | (MESR_ECC_SYN_H_MASK | MESR_ECC_SYN_L_MASK);
541
542 mear = __raw_readl(pdata->vbase + REG_MEAR_OFFSET);
543
544 /* Revert column/row addresses into page frame number, etc */
545 cpc925_mc_get_pfn(mci, mear, &pfn, &offset, &csrow);
546
547 if (apiexcp & CECC_EXCP_DETECTED) {
548 cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n");
549 channel = cpc925_mc_find_channel(mci, syndrome);
550 edac_mc_handle_ce(mci, pfn, offset, syndrome,
551 csrow, channel, mci->ctl_name);
552 }
553
554 if (apiexcp & UECC_EXCP_DETECTED) {
555 cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
556 edac_mc_handle_ue(mci, pfn, offset, csrow, mci->ctl_name);
557 }
558
559 cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n");
560 cpc925_mc_printk(mci, KERN_INFO, "APIMASK 0x%08x\n",
561 __raw_readl(pdata->vbase + REG_APIMASK_OFFSET));
562 cpc925_mc_printk(mci, KERN_INFO, "APIEXCP 0x%08x\n",
563 apiexcp);
564 cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Ctrl 0x%08x\n",
565 __raw_readl(pdata->vbase + REG_MSCR_OFFSET));
566 cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge Start 0x%08x\n",
567 __raw_readl(pdata->vbase + REG_MSRSR_OFFSET));
568 cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge End 0x%08x\n",
569 __raw_readl(pdata->vbase + REG_MSRER_OFFSET));
570 cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Pattern 0x%08x\n",
571 __raw_readl(pdata->vbase + REG_MSPR_OFFSET));
572 cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Ctrl 0x%08x\n",
573 __raw_readl(pdata->vbase + REG_MCCR_OFFSET));
574 cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Rge End 0x%08x\n",
575 __raw_readl(pdata->vbase + REG_MCRER_OFFSET));
576 cpc925_mc_printk(mci, KERN_INFO, "Mem Err Address 0x%08x\n",
577 mesr);
578 cpc925_mc_printk(mci, KERN_INFO, "Mem Err Syndrome 0x%08x\n",
579 syndrome);
580}
581
582/******************** CPU err device********************************/
583/* Enable CPU Errors detection */
584static void cpc925_cpu_init(struct cpc925_dev_info *dev_info)
585{
586 u32 apimask;
587
588 apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET);
589 if ((apimask & CPU_MASK_ENABLE) == 0) {
590 apimask |= CPU_MASK_ENABLE;
591 __raw_writel(apimask, dev_info->vbase + REG_APIMASK_OFFSET);
592 }
593}
594
595/* Disable CPU Errors detection */
596static void cpc925_cpu_exit(struct cpc925_dev_info *dev_info)
597{
598 /*
599 * WARNING:
600 * We are supposed to clear the CPU error detection bits,
601 * and it will be no problem to do so. However, once they
602 * are cleared here if we want to re-install CPC925 EDAC
603 * module later, setting them up in cpc925_cpu_init() will
604 * trigger machine check exception.
605 * Also, it's ok to leave CPU error detection bits enabled,
606 * since they are reset to 1 by default.
607 */
608
609 return;
610}
611
612/* Check for CPU Errors */
613static void cpc925_cpu_check(struct edac_device_ctl_info *edac_dev)
614{
615 struct cpc925_dev_info *dev_info = edac_dev->pvt_info;
616 u32 apiexcp;
617 u32 apimask;
618
619 /* APIEXCP is cleared when read */
620 apiexcp = __raw_readl(dev_info->vbase + REG_APIEXCP_OFFSET);
621 if ((apiexcp & CPU_EXCP_DETECTED) == 0)
622 return;
623
624 apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET);
625 cpc925_printk(KERN_INFO, "Processor Interface Fault\n"
626 "Processor Interface register dump:\n");
627 cpc925_printk(KERN_INFO, "APIMASK 0x%08x\n", apimask);
628 cpc925_printk(KERN_INFO, "APIEXCP 0x%08x\n", apiexcp);
629
630 edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
631}
632
633/******************** HT Link err device****************************/
634/* Enable HyperTransport Link Error detection */
635static void cpc925_htlink_init(struct cpc925_dev_info *dev_info)
636{
637 u32 ht_errctrl;
638
639 ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
640 if ((ht_errctrl & HT_ERRCTRL_ENABLE) == 0) {
641 ht_errctrl |= HT_ERRCTRL_ENABLE;
642 __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET);
643 }
644}
645
646/* Disable HyperTransport Link Error detection */
647static void cpc925_htlink_exit(struct cpc925_dev_info *dev_info)
648{
649 u32 ht_errctrl;
650
651 ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
652 ht_errctrl &= ~HT_ERRCTRL_ENABLE;
653 __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET);
654}
655
656/* Check for HyperTransport Link errors */
657static void cpc925_htlink_check(struct edac_device_ctl_info *edac_dev)
658{
659 struct cpc925_dev_info *dev_info = edac_dev->pvt_info;
660 u32 brgctrl = __raw_readl(dev_info->vbase + REG_BRGCTRL_OFFSET);
661 u32 linkctrl = __raw_readl(dev_info->vbase + REG_LINKCTRL_OFFSET);
662 u32 errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
663 u32 linkerr = __raw_readl(dev_info->vbase + REG_LINKERR_OFFSET);
664
665 if (!((brgctrl & BRGCTRL_DETSERR) ||
666 (linkctrl & HT_LINKCTRL_DETECTED) ||
667 (errctrl & HT_ERRCTRL_DETECTED) ||
668 (linkerr & HT_LINKERR_DETECTED)))
669 return;
670
671 cpc925_printk(KERN_INFO, "HT Link Fault\n"
672 "HT register dump:\n");
673 cpc925_printk(KERN_INFO, "Bridge Ctrl 0x%08x\n",
674 brgctrl);
675 cpc925_printk(KERN_INFO, "Link Config Ctrl 0x%08x\n",
676 linkctrl);
677 cpc925_printk(KERN_INFO, "Error Enum and Ctrl 0x%08x\n",
678 errctrl);
679 cpc925_printk(KERN_INFO, "Link Error 0x%08x\n",
680 linkerr);
681
682 /* Clear by write 1 */
683 if (brgctrl & BRGCTRL_DETSERR)
684 __raw_writel(BRGCTRL_DETSERR,
685 dev_info->vbase + REG_BRGCTRL_OFFSET);
686
687 if (linkctrl & HT_LINKCTRL_DETECTED)
688 __raw_writel(HT_LINKCTRL_DETECTED,
689 dev_info->vbase + REG_LINKCTRL_OFFSET);
690
691 /* Initiate Secondary Bus Reset to clear the chain failure */
692 if (errctrl & ERRCTRL_CHN_FAL)
693 __raw_writel(BRGCTRL_SECBUSRESET,
694 dev_info->vbase + REG_BRGCTRL_OFFSET);
695
696 if (errctrl & ERRCTRL_RSP_ERR)
697 __raw_writel(ERRCTRL_RSP_ERR,
698 dev_info->vbase + REG_ERRCTRL_OFFSET);
699
700 if (linkerr & HT_LINKERR_DETECTED)
701 __raw_writel(HT_LINKERR_DETECTED,
702 dev_info->vbase + REG_LINKERR_OFFSET);
703
704 edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
705}
706
707static struct cpc925_dev_info cpc925_devs[] = {
708 {
709 .ctl_name = CPC925_CPU_ERR_DEV,
710 .init = cpc925_cpu_init,
711 .exit = cpc925_cpu_exit,
712 .check = cpc925_cpu_check,
713 },
714 {
715 .ctl_name = CPC925_HT_LINK_DEV,
716 .init = cpc925_htlink_init,
717 .exit = cpc925_htlink_exit,
718 .check = cpc925_htlink_check,
719 },
720 {0}, /* Terminated by NULL */
721};
722
723/*
724 * Add CPU Err detection and HyperTransport Link Err detection
725 * as common "edac_device", they have no corresponding device
726 * nodes in the Open Firmware DTB and we have to add platform
727 * devices for them. Also, they will share the MMIO with that
728 * of memory controller.
729 */
730static void cpc925_add_edac_devices(void __iomem *vbase)
731{
732 struct cpc925_dev_info *dev_info;
733
734 if (!vbase) {
735 cpc925_printk(KERN_ERR, "MMIO not established yet\n");
736 return;
737 }
738
739 for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) {
740 dev_info->vbase = vbase;
741 dev_info->pdev = platform_device_register_simple(
742 dev_info->ctl_name, 0, NULL, 0);
743 if (IS_ERR(dev_info->pdev)) {
744 cpc925_printk(KERN_ERR,
745 "Can't register platform device for %s\n",
746 dev_info->ctl_name);
747 continue;
748 }
749
750 /*
751 * Don't have to allocate private structure but
752 * make use of cpc925_devs[] instead.
753 */
754 dev_info->edac_idx = edac_device_alloc_index();
755 dev_info->edac_dev =
756 edac_device_alloc_ctl_info(0, dev_info->ctl_name,
757 1, NULL, 0, 0, NULL, 0, dev_info->edac_idx);
758 if (!dev_info->edac_dev) {
759 cpc925_printk(KERN_ERR, "No memory for edac device\n");
760 goto err1;
761 }
762
763 dev_info->edac_dev->pvt_info = dev_info;
764 dev_info->edac_dev->dev = &dev_info->pdev->dev;
765 dev_info->edac_dev->ctl_name = dev_info->ctl_name;
766 dev_info->edac_dev->mod_name = CPC925_EDAC_MOD_STR;
767 dev_info->edac_dev->dev_name = dev_name(&dev_info->pdev->dev);
768
769 if (edac_op_state == EDAC_OPSTATE_POLL)
770 dev_info->edac_dev->edac_check = dev_info->check;
771
772 if (dev_info->init)
773 dev_info->init(dev_info);
774
775 if (edac_device_add_device(dev_info->edac_dev) > 0) {
776 cpc925_printk(KERN_ERR,
777 "Unable to add edac device for %s\n",
778 dev_info->ctl_name);
779 goto err2;
780 }
781
782 debugf0("%s: Successfully added edac device for %s\n",
783 __func__, dev_info->ctl_name);
784
785 continue;
786
787err2:
788 if (dev_info->exit)
789 dev_info->exit(dev_info);
790 edac_device_free_ctl_info(dev_info->edac_dev);
791err1:
792 platform_device_unregister(dev_info->pdev);
793 }
794}
795
796/*
797 * Delete the common "edac_device" for CPU Err Detection
798 * and HyperTransport Link Err Detection
799 */
800static void cpc925_del_edac_devices(void)
801{
802 struct cpc925_dev_info *dev_info;
803
804 for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) {
805 if (dev_info->edac_dev) {
806 edac_device_del_device(dev_info->edac_dev->dev);
807 edac_device_free_ctl_info(dev_info->edac_dev);
808 platform_device_unregister(dev_info->pdev);
809 }
810
811 if (dev_info->exit)
812 dev_info->exit(dev_info);
813
814 debugf0("%s: Successfully deleted edac device for %s\n",
815 __func__, dev_info->ctl_name);
816 }
817}
818
819/* Convert current back-ground scrub rate into byte/sec bandwith */
820static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
821{
822 struct cpc925_mc_pdata *pdata = mci->pvt_info;
823 u32 mscr;
824 u8 si;
825
826 mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET);
827 si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT;
828
829 debugf0("%s, Mem Scrub Ctrl Register 0x%x\n", __func__, mscr);
830
831 if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) ||
832 (si == 0)) {
833 cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n");
834 *bw = 0;
835 } else
836 *bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si;
837
838 return 0;
839}
840
841/* Return 0 for single channel; 1 for dual channel */
842static int cpc925_mc_get_channels(void __iomem *vbase)
843{
844 int dual = 0;
845 u32 mbcr;
846
847 mbcr = __raw_readl(vbase + REG_MBCR_OFFSET);
848
849 /*
850 * Dual channel only when 128-bit wide physical bus
851 * and 128-bit configuration.
852 */
853 if (((mbcr & MBCR_64BITCFG_MASK) == 0) &&
854 ((mbcr & MBCR_64BITBUS_MASK) == 0))
855 dual = 1;
856
857 debugf0("%s: %s channel\n", __func__,
858 (dual > 0) ? "Dual" : "Single");
859
860 return dual;
861}
862
863static int __devinit cpc925_probe(struct platform_device *pdev)
864{
865 static int edac_mc_idx;
866 struct mem_ctl_info *mci;
867 void __iomem *vbase;
868 struct cpc925_mc_pdata *pdata;
869 struct resource *r;
870 int res = 0, nr_channels;
871
872 debugf0("%s: %s platform device found!\n", __func__, pdev->name);
873
874 if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) {
875 res = -ENOMEM;
876 goto out;
877 }
878
879 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
880 if (!r) {
881 cpc925_printk(KERN_ERR, "Unable to get resource\n");
882 res = -ENOENT;
883 goto err1;
884 }
885
886 if (!devm_request_mem_region(&pdev->dev,
887 r->start,
888 r->end - r->start + 1,
889 pdev->name)) {
890 cpc925_printk(KERN_ERR, "Unable to request mem region\n");
891 res = -EBUSY;
892 goto err1;
893 }
894
895 vbase = devm_ioremap(&pdev->dev, r->start, r->end - r->start + 1);
896 if (!vbase) {
897 cpc925_printk(KERN_ERR, "Unable to ioremap device\n");
898 res = -ENOMEM;
899 goto err2;
900 }
901
902 nr_channels = cpc925_mc_get_channels(vbase);
903 mci = edac_mc_alloc(sizeof(struct cpc925_mc_pdata),
904 CPC925_NR_CSROWS, nr_channels + 1, edac_mc_idx);
905 if (!mci) {
906 cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n");
907 res = -ENOMEM;
908 goto err2;
909 }
910
911 pdata = mci->pvt_info;
912 pdata->vbase = vbase;
913 pdata->edac_idx = edac_mc_idx++;
914 pdata->name = pdev->name;
915
916 mci->dev = &pdev->dev;
917 platform_set_drvdata(pdev, mci);
918 mci->dev_name = dev_name(&pdev->dev);
919 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
920 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
921 mci->edac_cap = EDAC_FLAG_SECDED;
922 mci->mod_name = CPC925_EDAC_MOD_STR;
923 mci->mod_ver = CPC925_EDAC_REVISION;
924 mci->ctl_name = pdev->name;
925
926 if (edac_op_state == EDAC_OPSTATE_POLL)
927 mci->edac_check = cpc925_mc_check;
928
929 mci->ctl_page_to_phys = NULL;
930 mci->scrub_mode = SCRUB_SW_SRC;
931 mci->set_sdram_scrub_rate = NULL;
932 mci->get_sdram_scrub_rate = cpc925_get_sdram_scrub_rate;
933
934 cpc925_init_csrows(mci);
935
936 /* Setup memory controller registers */
937 cpc925_mc_init(mci);
938
939 if (edac_mc_add_mc(mci) > 0) {
940 cpc925_mc_printk(mci, KERN_ERR, "Failed edac_mc_add_mc()\n");
941 goto err3;
942 }
943
944 cpc925_add_edac_devices(vbase);
945
946 /* get this far and it's successful */
947 debugf0("%s: success\n", __func__);
948
949 res = 0;
950 goto out;
951
952err3:
953 cpc925_mc_exit(mci);
954 edac_mc_free(mci);
955err2:
956 devm_release_mem_region(&pdev->dev, r->start, r->end-r->start+1);
957err1:
958 devres_release_group(&pdev->dev, cpc925_probe);
959out:
960 return res;
961}
962
963static int cpc925_remove(struct platform_device *pdev)
964{
965 struct mem_ctl_info *mci = platform_get_drvdata(pdev);
966
967 /*
968 * Delete common edac devices before edac mc, because
969 * the former share the MMIO of the latter.
970 */
971 cpc925_del_edac_devices();
972 cpc925_mc_exit(mci);
973
974 edac_mc_del_mc(&pdev->dev);
975 edac_mc_free(mci);
976
977 return 0;
978}
979
980static struct platform_driver cpc925_edac_driver = {
981 .probe = cpc925_probe,
982 .remove = cpc925_remove,
983 .driver = {
984 .name = "cpc925_edac",
985 }
986};
987
988static int __init cpc925_edac_init(void)
989{
990 int ret = 0;
991
992 printk(KERN_INFO "IBM CPC925 EDAC driver " CPC925_EDAC_REVISION "\n");
993 printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc\n");
994
995 /* Only support POLL mode so far */
996 edac_op_state = EDAC_OPSTATE_POLL;
997
998 ret = platform_driver_register(&cpc925_edac_driver);
999 if (ret) {
1000 printk(KERN_WARNING "Failed to register %s\n",
1001 CPC925_EDAC_MOD_STR);
1002 }
1003
1004 return ret;
1005}
1006
1007static void __exit cpc925_edac_exit(void)
1008{
1009 platform_driver_unregister(&cpc925_edac_driver);
1010}
1011
1012module_init(cpc925_edac_init);
1013module_exit(cpc925_edac_exit);
1014
1015MODULE_LICENSE("GPL");
1016MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>");
1017MODULE_DESCRIPTION("IBM CPC925 Bridge and MC EDAC kernel module");
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 48d3b1409834..3493c6bdb820 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -841,6 +841,7 @@ extern void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
841 int inst_nr, int block_nr, const char *msg); 841 int inst_nr, int block_nr, const char *msg);
842extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev, 842extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
843 int inst_nr, int block_nr, const char *msg); 843 int inst_nr, int block_nr, const char *msg);
844extern int edac_device_alloc_index(void);
844 845
845/* 846/*
846 * edac_pci APIs 847 * edac_pci APIs
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index a7d2c717d033..b02a6a69a8f0 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -490,6 +490,20 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
490 mutex_unlock(&device_ctls_mutex); 490 mutex_unlock(&device_ctls_mutex);
491} 491}
492 492
493/*
494 * edac_device_alloc_index: Allocate a unique device index number
495 *
496 * Return:
497 * allocated index number
498 */
499int edac_device_alloc_index(void)
500{
501 static atomic_t device_indexes = ATOMIC_INIT(0);
502
503 return atomic_inc_return(&device_indexes) - 1;
504}
505EXPORT_SYMBOL_GPL(edac_device_alloc_index);
506
493/** 507/**
494 * edac_device_add_device: Insert the 'edac_dev' structure into the 508 * edac_device_add_device: Insert the 'edac_dev' structure into the
495 * edac_device global list and create sysfs entries associated with 509 * edac_device global list and create sysfs entries associated with
diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
index 58e9f8e457f8..51e0e2d8fac6 100644
--- a/drivers/firmware/pcdp.c
+++ b/drivers/firmware/pcdp.c
@@ -28,10 +28,10 @@ setup_serial_console(struct pcdp_uart *uart)
28 char parity; 28 char parity;
29 29
30 mmio = (uart->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY); 30 mmio = (uart->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY);
31 p += sprintf(p, "uart8250,%s,0x%lx", 31 p += sprintf(p, "uart8250,%s,0x%llx",
32 mmio ? "mmio" : "io", uart->addr.address); 32 mmio ? "mmio" : "io", uart->addr.address);
33 if (uart->baud) { 33 if (uart->baud) {
34 p += sprintf(p, ",%lu", uart->baud); 34 p += sprintf(p, ",%llu", uart->baud);
35 if (uart->bits) { 35 if (uart->bits) {
36 switch (uart->parity) { 36 switch (uart->parity) {
37 case 0x2: parity = 'e'; break; 37 case 0x2: parity = 'e'; break;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 11f373971fa5..3582c39f9725 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -67,6 +67,12 @@ config GPIO_SYSFS
67 67
68comment "Memory mapped GPIO expanders:" 68comment "Memory mapped GPIO expanders:"
69 69
70config GPIO_PL061
71 bool "PrimeCell PL061 GPIO support"
72 depends on ARM_AMBA
73 help
74 Say yes here to support the PrimeCell PL061 GPIO device
75
70config GPIO_XILINX 76config GPIO_XILINX
71 bool "Xilinx GPIO support" 77 bool "Xilinx GPIO support"
72 depends on PPC_OF || MICROBLAZE 78 depends on PPC_OF || MICROBLAZE
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 49ac64e515e6..ef90203e8f3c 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_GPIO_MAX732X) += max732x.o
9obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o 9obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
10obj-$(CONFIG_GPIO_PCA953X) += pca953x.o 10obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
11obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o 11obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
12obj-$(CONFIG_GPIO_PL061) += pl061.o
12obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o 13obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
13obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o 14obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o
14obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o 15obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o
diff --git a/drivers/gpio/max7301.c b/drivers/gpio/max7301.c
index 3e7f4e06386e..7b82eaae2621 100644
--- a/drivers/gpio/max7301.c
+++ b/drivers/gpio/max7301.c
@@ -287,7 +287,7 @@ exit_destroy:
287 return ret; 287 return ret;
288} 288}
289 289
290static int max7301_remove(struct spi_device *spi) 290static int __devexit max7301_remove(struct spi_device *spi)
291{ 291{
292 struct max7301 *ts; 292 struct max7301 *ts;
293 int ret; 293 int ret;
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index 8dc0164bd51e..cdb6574d25a6 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -15,6 +15,10 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/i2c.h> 16#include <linux/i2c.h>
17#include <linux/i2c/pca953x.h> 17#include <linux/i2c/pca953x.h>
18#ifdef CONFIG_OF_GPIO
19#include <linux/of_platform.h>
20#include <linux/of_gpio.h>
21#endif
18 22
19#include <asm/gpio.h> 23#include <asm/gpio.h>
20 24
@@ -32,6 +36,7 @@ static const struct i2c_device_id pca953x_id[] = {
32 { "pca9539", 16, }, 36 { "pca9539", 16, },
33 { "pca9554", 8, }, 37 { "pca9554", 8, },
34 { "pca9555", 16, }, 38 { "pca9555", 16, },
39 { "pca9556", 8, },
35 { "pca9557", 8, }, 40 { "pca9557", 8, },
36 41
37 { "max7310", 8, }, 42 { "max7310", 8, },
@@ -49,7 +54,9 @@ struct pca953x_chip {
49 uint16_t reg_direction; 54 uint16_t reg_direction;
50 55
51 struct i2c_client *client; 56 struct i2c_client *client;
57 struct pca953x_platform_data *dyn_pdata;
52 struct gpio_chip gpio_chip; 58 struct gpio_chip gpio_chip;
59 char **names;
53}; 60};
54 61
55static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val) 62static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val)
@@ -192,8 +199,57 @@ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
192 gc->label = chip->client->name; 199 gc->label = chip->client->name;
193 gc->dev = &chip->client->dev; 200 gc->dev = &chip->client->dev;
194 gc->owner = THIS_MODULE; 201 gc->owner = THIS_MODULE;
202 gc->names = chip->names;
195} 203}
196 204
205/*
206 * Handlers for alternative sources of platform_data
207 */
208#ifdef CONFIG_OF_GPIO
209/*
210 * Translate OpenFirmware node properties into platform_data
211 */
212static struct pca953x_platform_data *
213pca953x_get_alt_pdata(struct i2c_client *client)
214{
215 struct pca953x_platform_data *pdata;
216 struct device_node *node;
217 const uint16_t *val;
218
219 node = dev_archdata_get_node(&client->dev.archdata);
220 if (node == NULL)
221 return NULL;
222
223 pdata = kzalloc(sizeof(struct pca953x_platform_data), GFP_KERNEL);
224 if (pdata == NULL) {
225 dev_err(&client->dev, "Unable to allocate platform_data\n");
226 return NULL;
227 }
228
229 pdata->gpio_base = -1;
230 val = of_get_property(node, "linux,gpio-base", NULL);
231 if (val) {
232 if (*val < 0)
233 dev_warn(&client->dev,
234 "invalid gpio-base in device tree\n");
235 else
236 pdata->gpio_base = *val;
237 }
238
239 val = of_get_property(node, "polarity", NULL);
240 if (val)
241 pdata->invert = *val;
242
243 return pdata;
244}
245#else
246static struct pca953x_platform_data *
247pca953x_get_alt_pdata(struct i2c_client *client)
248{
249 return NULL;
250}
251#endif
252
197static int __devinit pca953x_probe(struct i2c_client *client, 253static int __devinit pca953x_probe(struct i2c_client *client,
198 const struct i2c_device_id *id) 254 const struct i2c_device_id *id)
199{ 255{
@@ -201,20 +257,32 @@ static int __devinit pca953x_probe(struct i2c_client *client,
201 struct pca953x_chip *chip; 257 struct pca953x_chip *chip;
202 int ret; 258 int ret;
203 259
260 chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL);
261 if (chip == NULL)
262 return -ENOMEM;
263
204 pdata = client->dev.platform_data; 264 pdata = client->dev.platform_data;
205 if (pdata == NULL) { 265 if (pdata == NULL) {
206 dev_dbg(&client->dev, "no platform data\n"); 266 pdata = pca953x_get_alt_pdata(client);
207 return -EINVAL; 267 /*
268 * Unlike normal platform_data, this is allocated
269 * dynamically and must be freed in the driver
270 */
271 chip->dyn_pdata = pdata;
208 } 272 }
209 273
210 chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL); 274 if (pdata == NULL) {
211 if (chip == NULL) 275 dev_dbg(&client->dev, "no platform data\n");
212 return -ENOMEM; 276 ret = -EINVAL;
277 goto out_failed;
278 }
213 279
214 chip->client = client; 280 chip->client = client;
215 281
216 chip->gpio_start = pdata->gpio_base; 282 chip->gpio_start = pdata->gpio_base;
217 283
284 chip->names = pdata->names;
285
218 /* initialize cached registers from their original values. 286 /* initialize cached registers from their original values.
219 * we can't share this chip with another i2c master. 287 * we can't share this chip with another i2c master.
220 */ 288 */
@@ -249,6 +317,7 @@ static int __devinit pca953x_probe(struct i2c_client *client,
249 return 0; 317 return 0;
250 318
251out_failed: 319out_failed:
320 kfree(chip->dyn_pdata);
252 kfree(chip); 321 kfree(chip);
253 return ret; 322 return ret;
254} 323}
@@ -276,6 +345,7 @@ static int pca953x_remove(struct i2c_client *client)
276 return ret; 345 return ret;
277 } 346 }
278 347
348 kfree(chip->dyn_pdata);
279 kfree(chip); 349 kfree(chip);
280 return 0; 350 return 0;
281} 351}
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
new file mode 100644
index 000000000000..aa8e7cb020d9
--- /dev/null
+++ b/drivers/gpio/pl061.c
@@ -0,0 +1,341 @@
1/*
2 * linux/drivers/gpio/pl061.c
3 *
4 * Copyright (C) 2008, 2009 Provigent Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Driver for the ARM PrimeCell(tm) General Purpose Input/Output (PL061)
11 *
12 * Data sheet: ARM DDI 0190B, September 2000
13 */
14#include <linux/spinlock.h>
15#include <linux/errno.h>
16#include <linux/module.h>
17#include <linux/list.h>
18#include <linux/io.h>
19#include <linux/ioport.h>
20#include <linux/irq.h>
21#include <linux/bitops.h>
22#include <linux/workqueue.h>
23#include <linux/gpio.h>
24#include <linux/device.h>
25#include <linux/amba/bus.h>
26#include <linux/amba/pl061.h>
27
28#define GPIODIR 0x400
29#define GPIOIS 0x404
30#define GPIOIBE 0x408
31#define GPIOIEV 0x40C
32#define GPIOIE 0x410
33#define GPIORIS 0x414
34#define GPIOMIS 0x418
35#define GPIOIC 0x41C
36
37#define PL061_GPIO_NR 8
38
39struct pl061_gpio {
40 /* We use a list of pl061_gpio structs for each trigger IRQ in the main
41 * interrupts controller of the system. We need this to support systems
42 * in which more that one PL061s are connected to the same IRQ. The ISR
43 * interates through this list to find the source of the interrupt.
44 */
45 struct list_head list;
46
47 /* Each of the two spinlocks protects a different set of hardware
48 * regiters and data structurs. This decouples the code of the IRQ from
49 * the GPIO code. This also makes the case of a GPIO routine call from
50 * the IRQ code simpler.
51 */
52 spinlock_t lock; /* GPIO registers */
53 spinlock_t irq_lock; /* IRQ registers */
54
55 void __iomem *base;
56 unsigned irq_base;
57 struct gpio_chip gc;
58};
59
60static int pl061_direction_input(struct gpio_chip *gc, unsigned offset)
61{
62 struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
63 unsigned long flags;
64 unsigned char gpiodir;
65
66 if (offset >= gc->ngpio)
67 return -EINVAL;
68
69 spin_lock_irqsave(&chip->lock, flags);
70 gpiodir = readb(chip->base + GPIODIR);
71 gpiodir &= ~(1 << offset);
72 writeb(gpiodir, chip->base + GPIODIR);
73 spin_unlock_irqrestore(&chip->lock, flags);
74
75 return 0;
76}
77
78static int pl061_direction_output(struct gpio_chip *gc, unsigned offset,
79 int value)
80{
81 struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
82 unsigned long flags;
83 unsigned char gpiodir;
84
85 if (offset >= gc->ngpio)
86 return -EINVAL;
87
88 spin_lock_irqsave(&chip->lock, flags);
89 writeb(!!value << offset, chip->base + (1 << (offset + 2)));
90 gpiodir = readb(chip->base + GPIODIR);
91 gpiodir |= 1 << offset;
92 writeb(gpiodir, chip->base + GPIODIR);
93 spin_unlock_irqrestore(&chip->lock, flags);
94
95 return 0;
96}
97
98static int pl061_get_value(struct gpio_chip *gc, unsigned offset)
99{
100 struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
101
102 return !!readb(chip->base + (1 << (offset + 2)));
103}
104
105static void pl061_set_value(struct gpio_chip *gc, unsigned offset, int value)
106{
107 struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
108
109 writeb(!!value << offset, chip->base + (1 << (offset + 2)));
110}
111
112/*
113 * PL061 GPIO IRQ
114 */
115static void pl061_irq_disable(unsigned irq)
116{
117 struct pl061_gpio *chip = get_irq_chip_data(irq);
118 int offset = irq - chip->irq_base;
119 unsigned long flags;
120 u8 gpioie;
121
122 spin_lock_irqsave(&chip->irq_lock, flags);
123 gpioie = readb(chip->base + GPIOIE);
124 gpioie &= ~(1 << offset);
125 writeb(gpioie, chip->base + GPIOIE);
126 spin_unlock_irqrestore(&chip->irq_lock, flags);
127}
128
129static void pl061_irq_enable(unsigned irq)
130{
131 struct pl061_gpio *chip = get_irq_chip_data(irq);
132 int offset = irq - chip->irq_base;
133 unsigned long flags;
134 u8 gpioie;
135
136 spin_lock_irqsave(&chip->irq_lock, flags);
137 gpioie = readb(chip->base + GPIOIE);
138 gpioie |= 1 << offset;
139 writeb(gpioie, chip->base + GPIOIE);
140 spin_unlock_irqrestore(&chip->irq_lock, flags);
141}
142
143static int pl061_irq_type(unsigned irq, unsigned trigger)
144{
145 struct pl061_gpio *chip = get_irq_chip_data(irq);
146 int offset = irq - chip->irq_base;
147 unsigned long flags;
148 u8 gpiois, gpioibe, gpioiev;
149
150 if (offset < 0 || offset > PL061_GPIO_NR)
151 return -EINVAL;
152
153 spin_lock_irqsave(&chip->irq_lock, flags);
154
155 gpioiev = readb(chip->base + GPIOIEV);
156
157 gpiois = readb(chip->base + GPIOIS);
158 if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
159 gpiois |= 1 << offset;
160 if (trigger & IRQ_TYPE_LEVEL_HIGH)
161 gpioiev |= 1 << offset;
162 else
163 gpioiev &= ~(1 << offset);
164 } else
165 gpiois &= ~(1 << offset);
166 writeb(gpiois, chip->base + GPIOIS);
167
168 gpioibe = readb(chip->base + GPIOIBE);
169 if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
170 gpioibe |= 1 << offset;
171 else {
172 gpioibe &= ~(1 << offset);
173 if (trigger & IRQ_TYPE_EDGE_RISING)
174 gpioiev |= 1 << offset;
175 else
176 gpioiev &= ~(1 << offset);
177 }
178 writeb(gpioibe, chip->base + GPIOIBE);
179
180 writeb(gpioiev, chip->base + GPIOIEV);
181
182 spin_unlock_irqrestore(&chip->irq_lock, flags);
183
184 return 0;
185}
186
187static struct irq_chip pl061_irqchip = {
188 .name = "GPIO",
189 .enable = pl061_irq_enable,
190 .disable = pl061_irq_disable,
191 .set_type = pl061_irq_type,
192};
193
194static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
195{
196 struct list_head *chip_list = get_irq_chip_data(irq);
197 struct list_head *ptr;
198 struct pl061_gpio *chip;
199
200 desc->chip->ack(irq);
201 list_for_each(ptr, chip_list) {
202 unsigned long pending;
203 int gpio;
204
205 chip = list_entry(ptr, struct pl061_gpio, list);
206 pending = readb(chip->base + GPIOMIS);
207 writeb(pending, chip->base + GPIOIC);
208
209 if (pending == 0)
210 continue;
211
212 for_each_bit(gpio, &pending, PL061_GPIO_NR)
213 generic_handle_irq(gpio_to_irq(gpio));
214 }
215 desc->chip->unmask(irq);
216}
217
218static int __init pl061_probe(struct amba_device *dev, struct amba_id *id)
219{
220 struct pl061_platform_data *pdata;
221 struct pl061_gpio *chip;
222 struct list_head *chip_list;
223 int ret, irq, i;
224 static unsigned long init_irq[BITS_TO_LONGS(NR_IRQS)];
225
226 pdata = dev->dev.platform_data;
227 if (pdata == NULL)
228 return -ENODEV;
229
230 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
231 if (chip == NULL)
232 return -ENOMEM;
233
234 if (!request_mem_region(dev->res.start,
235 resource_size(&dev->res), "pl061")) {
236 ret = -EBUSY;
237 goto free_mem;
238 }
239
240 chip->base = ioremap(dev->res.start, resource_size(&dev->res));
241 if (chip->base == NULL) {
242 ret = -ENOMEM;
243 goto release_region;
244 }
245
246 spin_lock_init(&chip->lock);
247 spin_lock_init(&chip->irq_lock);
248 INIT_LIST_HEAD(&chip->list);
249
250 chip->gc.direction_input = pl061_direction_input;
251 chip->gc.direction_output = pl061_direction_output;
252 chip->gc.get = pl061_get_value;
253 chip->gc.set = pl061_set_value;
254 chip->gc.base = pdata->gpio_base;
255 chip->gc.ngpio = PL061_GPIO_NR;
256 chip->gc.label = dev_name(&dev->dev);
257 chip->gc.dev = &dev->dev;
258 chip->gc.owner = THIS_MODULE;
259
260 chip->irq_base = pdata->irq_base;
261
262 ret = gpiochip_add(&chip->gc);
263 if (ret)
264 goto iounmap;
265
266 /*
267 * irq_chip support
268 */
269
270 if (chip->irq_base == (unsigned) -1)
271 return 0;
272
273 writeb(0, chip->base + GPIOIE); /* disable irqs */
274 irq = dev->irq[0];
275 if (irq < 0) {
276 ret = -ENODEV;
277 goto iounmap;
278 }
279 set_irq_chained_handler(irq, pl061_irq_handler);
280 if (!test_and_set_bit(irq, init_irq)) { /* list initialized? */
281 chip_list = kmalloc(sizeof(*chip_list), GFP_KERNEL);
282 if (chip_list == NULL) {
283 ret = -ENOMEM;
284 goto iounmap;
285 }
286 INIT_LIST_HEAD(chip_list);
287 set_irq_chip_data(irq, chip_list);
288 } else
289 chip_list = get_irq_chip_data(irq);
290 list_add(&chip->list, chip_list);
291
292 for (i = 0; i < PL061_GPIO_NR; i++) {
293 if (pdata->directions & (1 << i))
294 pl061_direction_output(&chip->gc, i,
295 pdata->values & (1 << i));
296 else
297 pl061_direction_input(&chip->gc, i);
298
299 set_irq_chip(i+chip->irq_base, &pl061_irqchip);
300 set_irq_handler(i+chip->irq_base, handle_simple_irq);
301 set_irq_flags(i+chip->irq_base, IRQF_VALID);
302 set_irq_chip_data(i+chip->irq_base, chip);
303 }
304
305 return 0;
306
307iounmap:
308 iounmap(chip->base);
309release_region:
310 release_mem_region(dev->res.start, resource_size(&dev->res));
311free_mem:
312 kfree(chip);
313
314 return ret;
315}
316
317static struct amba_id pl061_ids[] __initdata = {
318 {
319 .id = 0x00041061,
320 .mask = 0x000fffff,
321 },
322 { 0, 0 },
323};
324
325static struct amba_driver pl061_gpio_driver = {
326 .drv = {
327 .name = "pl061_gpio",
328 },
329 .id_table = pl061_ids,
330 .probe = pl061_probe,
331};
332
333static int __init pl061_gpio_init(void)
334{
335 return amba_driver_register(&pl061_gpio_driver);
336}
337subsys_initcall(pl061_gpio_init);
338
339MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
340MODULE_DESCRIPTION("PL061 GPIO driver");
341MODULE_LICENSE("GPL");
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 34d54e7281fd..de4aad076ebc 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -1300,7 +1300,7 @@ isdn_net_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1300 netif_stop_queue(ndev); 1300 netif_stop_queue(ndev);
1301 } 1301 }
1302 } 1302 }
1303 return 1; 1303 return NETDEV_TX_BUSY;
1304} 1304}
1305 1305
1306/* 1306/*
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 8695809b24b0..87d88dbb667f 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -255,14 +255,14 @@ static void status(struct seq_file *seq, mddev_t *mddev)
255} 255}
256 256
257 257
258static int reconfig(mddev_t *mddev, int layout, int chunk_size) 258static int reshape(mddev_t *mddev)
259{ 259{
260 int mode = layout & ModeMask; 260 int mode = mddev->new_layout & ModeMask;
261 int count = layout >> ModeShift; 261 int count = mddev->new_layout >> ModeShift;
262 conf_t *conf = mddev->private; 262 conf_t *conf = mddev->private;
263 263
264 if (chunk_size != -1) 264 if (mddev->new_layout < 0)
265 return -EINVAL; 265 return 0;
266 266
267 /* new layout */ 267 /* new layout */
268 if (mode == ClearFaults) 268 if (mode == ClearFaults)
@@ -279,6 +279,7 @@ static int reconfig(mddev_t *mddev, int layout, int chunk_size)
279 atomic_set(&conf->counters[mode], count); 279 atomic_set(&conf->counters[mode], count);
280 } else 280 } else
281 return -EINVAL; 281 return -EINVAL;
282 mddev->new_layout = -1;
282 mddev->layout = -1; /* makes sure further changes come through */ 283 mddev->layout = -1; /* makes sure further changes come through */
283 return 0; 284 return 0;
284} 285}
@@ -298,8 +299,12 @@ static int run(mddev_t *mddev)
298{ 299{
299 mdk_rdev_t *rdev; 300 mdk_rdev_t *rdev;
300 int i; 301 int i;
302 conf_t *conf;
303
304 if (md_check_no_bitmap(mddev))
305 return -EINVAL;
301 306
302 conf_t *conf = kmalloc(sizeof(*conf), GFP_KERNEL); 307 conf = kmalloc(sizeof(*conf), GFP_KERNEL);
303 if (!conf) 308 if (!conf)
304 return -ENOMEM; 309 return -ENOMEM;
305 310
@@ -315,7 +320,7 @@ static int run(mddev_t *mddev)
315 md_set_array_sectors(mddev, faulty_size(mddev, 0, 0)); 320 md_set_array_sectors(mddev, faulty_size(mddev, 0, 0));
316 mddev->private = conf; 321 mddev->private = conf;
317 322
318 reconfig(mddev, mddev->layout, -1); 323 reshape(mddev);
319 324
320 return 0; 325 return 0;
321} 326}
@@ -338,7 +343,7 @@ static struct mdk_personality faulty_personality =
338 .run = run, 343 .run = run,
339 .stop = stop, 344 .stop = stop,
340 .status = status, 345 .status = status,
341 .reconfig = reconfig, 346 .check_reshape = reshape,
342 .size = faulty_size, 347 .size = faulty_size,
343}; 348};
344 349
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 64f1f3e046e0..15c8b7b25a9b 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -27,19 +27,27 @@
27 */ 27 */
28static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector) 28static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
29{ 29{
30 dev_info_t *hash; 30 int lo, mid, hi;
31 linear_conf_t *conf = mddev_to_conf(mddev); 31 linear_conf_t *conf;
32 sector_t idx = sector >> conf->sector_shift; 32
33 lo = 0;
34 hi = mddev->raid_disks - 1;
35 conf = rcu_dereference(mddev->private);
33 36
34 /* 37 /*
35 * sector_div(a,b) returns the remainer and sets a to a/b 38 * Binary Search
36 */ 39 */
37 (void)sector_div(idx, conf->spacing);
38 hash = conf->hash_table[idx];
39 40
40 while (sector >= hash->num_sectors + hash->start_sector) 41 while (hi > lo) {
41 hash++; 42
42 return hash; 43 mid = (hi + lo) / 2;
44 if (sector < conf->disks[mid].end_sector)
45 hi = mid;
46 else
47 lo = mid + 1;
48 }
49
50 return conf->disks + lo;
43} 51}
44 52
45/** 53/**
@@ -59,8 +67,10 @@ static int linear_mergeable_bvec(struct request_queue *q,
59 unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9; 67 unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9;
60 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 68 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
61 69
70 rcu_read_lock();
62 dev0 = which_dev(mddev, sector); 71 dev0 = which_dev(mddev, sector);
63 maxsectors = dev0->num_sectors - (sector - dev0->start_sector); 72 maxsectors = dev0->end_sector - sector;
73 rcu_read_unlock();
64 74
65 if (maxsectors < bio_sectors) 75 if (maxsectors < bio_sectors)
66 maxsectors = 0; 76 maxsectors = 0;
@@ -79,46 +89,57 @@ static int linear_mergeable_bvec(struct request_queue *q,
79static void linear_unplug(struct request_queue *q) 89static void linear_unplug(struct request_queue *q)
80{ 90{
81 mddev_t *mddev = q->queuedata; 91 mddev_t *mddev = q->queuedata;
82 linear_conf_t *conf = mddev_to_conf(mddev); 92 linear_conf_t *conf;
83 int i; 93 int i;
84 94
95 rcu_read_lock();
96 conf = rcu_dereference(mddev->private);
97
85 for (i=0; i < mddev->raid_disks; i++) { 98 for (i=0; i < mddev->raid_disks; i++) {
86 struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev); 99 struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
87 blk_unplug(r_queue); 100 blk_unplug(r_queue);
88 } 101 }
102 rcu_read_unlock();
89} 103}
90 104
91static int linear_congested(void *data, int bits) 105static int linear_congested(void *data, int bits)
92{ 106{
93 mddev_t *mddev = data; 107 mddev_t *mddev = data;
94 linear_conf_t *conf = mddev_to_conf(mddev); 108 linear_conf_t *conf;
95 int i, ret = 0; 109 int i, ret = 0;
96 110
111 rcu_read_lock();
112 conf = rcu_dereference(mddev->private);
113
97 for (i = 0; i < mddev->raid_disks && !ret ; i++) { 114 for (i = 0; i < mddev->raid_disks && !ret ; i++) {
98 struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); 115 struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
99 ret |= bdi_congested(&q->backing_dev_info, bits); 116 ret |= bdi_congested(&q->backing_dev_info, bits);
100 } 117 }
118
119 rcu_read_unlock();
101 return ret; 120 return ret;
102} 121}
103 122
104static sector_t linear_size(mddev_t *mddev, sector_t sectors, int raid_disks) 123static sector_t linear_size(mddev_t *mddev, sector_t sectors, int raid_disks)
105{ 124{
106 linear_conf_t *conf = mddev_to_conf(mddev); 125 linear_conf_t *conf;
126 sector_t array_sectors;
107 127
128 rcu_read_lock();
129 conf = rcu_dereference(mddev->private);
108 WARN_ONCE(sectors || raid_disks, 130 WARN_ONCE(sectors || raid_disks,
109 "%s does not support generic reshape\n", __func__); 131 "%s does not support generic reshape\n", __func__);
132 array_sectors = conf->array_sectors;
133 rcu_read_unlock();
110 134
111 return conf->array_sectors; 135 return array_sectors;
112} 136}
113 137
114static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) 138static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
115{ 139{
116 linear_conf_t *conf; 140 linear_conf_t *conf;
117 dev_info_t **table;
118 mdk_rdev_t *rdev; 141 mdk_rdev_t *rdev;
119 int i, nb_zone, cnt; 142 int i, cnt;
120 sector_t min_sectors;
121 sector_t curr_sector;
122 143
123 conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t), 144 conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t),
124 GFP_KERNEL); 145 GFP_KERNEL);
@@ -131,6 +152,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
131 list_for_each_entry(rdev, &mddev->disks, same_set) { 152 list_for_each_entry(rdev, &mddev->disks, same_set) {
132 int j = rdev->raid_disk; 153 int j = rdev->raid_disk;
133 dev_info_t *disk = conf->disks + j; 154 dev_info_t *disk = conf->disks + j;
155 sector_t sectors;
134 156
135 if (j < 0 || j >= raid_disks || disk->rdev) { 157 if (j < 0 || j >= raid_disks || disk->rdev) {
136 printk("linear: disk numbering problem. Aborting!\n"); 158 printk("linear: disk numbering problem. Aborting!\n");
@@ -138,6 +160,11 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
138 } 160 }
139 161
140 disk->rdev = rdev; 162 disk->rdev = rdev;
163 if (mddev->chunk_sectors) {
164 sectors = rdev->sectors;
165 sector_div(sectors, mddev->chunk_sectors);
166 rdev->sectors = sectors * mddev->chunk_sectors;
167 }
141 168
142 blk_queue_stack_limits(mddev->queue, 169 blk_queue_stack_limits(mddev->queue,
143 rdev->bdev->bd_disk->queue); 170 rdev->bdev->bd_disk->queue);
@@ -149,102 +176,24 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
149 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) 176 queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
150 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 177 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
151 178
152 disk->num_sectors = rdev->sectors;
153 conf->array_sectors += rdev->sectors; 179 conf->array_sectors += rdev->sectors;
154
155 cnt++; 180 cnt++;
181
156 } 182 }
157 if (cnt != raid_disks) { 183 if (cnt != raid_disks) {
158 printk("linear: not enough drives present. Aborting!\n"); 184 printk("linear: not enough drives present. Aborting!\n");
159 goto out; 185 goto out;
160 } 186 }
161 187
162 min_sectors = conf->array_sectors;
163 sector_div(min_sectors, PAGE_SIZE/sizeof(struct dev_info *));
164 if (min_sectors == 0)
165 min_sectors = 1;
166
167 /* min_sectors is the minimum spacing that will fit the hash
168 * table in one PAGE. This may be much smaller than needed.
169 * We find the smallest non-terminal set of consecutive devices
170 * that is larger than min_sectors and use the size of that as
171 * the actual spacing
172 */
173 conf->spacing = conf->array_sectors;
174 for (i=0; i < cnt-1 ; i++) {
175 sector_t tmp = 0;
176 int j;
177 for (j = i; j < cnt - 1 && tmp < min_sectors; j++)
178 tmp += conf->disks[j].num_sectors;
179 if (tmp >= min_sectors && tmp < conf->spacing)
180 conf->spacing = tmp;
181 }
182
183 /* spacing may be too large for sector_div to work with,
184 * so we might need to pre-shift
185 */
186 conf->sector_shift = 0;
187 if (sizeof(sector_t) > sizeof(u32)) {
188 sector_t space = conf->spacing;
189 while (space > (sector_t)(~(u32)0)) {
190 space >>= 1;
191 conf->sector_shift++;
192 }
193 }
194 /* 188 /*
195 * This code was restructured to work around a gcc-2.95.3 internal 189 * Here we calculate the device offsets.
196 * compiler error. Alter it with care.
197 */ 190 */
198 { 191 conf->disks[0].end_sector = conf->disks[0].rdev->sectors;
199 sector_t sz;
200 unsigned round;
201 unsigned long base;
202
203 sz = conf->array_sectors >> conf->sector_shift;
204 sz += 1; /* force round-up */
205 base = conf->spacing >> conf->sector_shift;
206 round = sector_div(sz, base);
207 nb_zone = sz + (round ? 1 : 0);
208 }
209 BUG_ON(nb_zone > PAGE_SIZE / sizeof(struct dev_info *));
210
211 conf->hash_table = kmalloc (sizeof (struct dev_info *) * nb_zone,
212 GFP_KERNEL);
213 if (!conf->hash_table)
214 goto out;
215 192
216 /*
217 * Here we generate the linear hash table
218 * First calculate the device offsets.
219 */
220 conf->disks[0].start_sector = 0;
221 for (i = 1; i < raid_disks; i++) 193 for (i = 1; i < raid_disks; i++)
222 conf->disks[i].start_sector = 194 conf->disks[i].end_sector =
223 conf->disks[i-1].start_sector + 195 conf->disks[i-1].end_sector +
224 conf->disks[i-1].num_sectors; 196 conf->disks[i].rdev->sectors;
225
226 table = conf->hash_table;
227 i = 0;
228 for (curr_sector = 0;
229 curr_sector < conf->array_sectors;
230 curr_sector += conf->spacing) {
231
232 while (i < raid_disks-1 &&
233 curr_sector >= conf->disks[i+1].start_sector)
234 i++;
235
236 *table ++ = conf->disks + i;
237 }
238
239 if (conf->sector_shift) {
240 conf->spacing >>= conf->sector_shift;
241 /* round spacing up so that when we divide by it,
242 * we err on the side of "too-low", which is safest.
243 */
244 conf->spacing++;
245 }
246
247 BUG_ON(table - conf->hash_table > nb_zone);
248 197
249 return conf; 198 return conf;
250 199
@@ -257,6 +206,8 @@ static int linear_run (mddev_t *mddev)
257{ 206{
258 linear_conf_t *conf; 207 linear_conf_t *conf;
259 208
209 if (md_check_no_bitmap(mddev))
210 return -EINVAL;
260 mddev->queue->queue_lock = &mddev->queue->__queue_lock; 211 mddev->queue->queue_lock = &mddev->queue->__queue_lock;
261 conf = linear_conf(mddev, mddev->raid_disks); 212 conf = linear_conf(mddev, mddev->raid_disks);
262 213
@@ -272,6 +223,12 @@ static int linear_run (mddev_t *mddev)
272 return 0; 223 return 0;
273} 224}
274 225
226static void free_conf(struct rcu_head *head)
227{
228 linear_conf_t *conf = container_of(head, linear_conf_t, rcu);
229 kfree(conf);
230}
231
275static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev) 232static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
276{ 233{
277 /* Adding a drive to a linear array allows the array to grow. 234 /* Adding a drive to a linear array allows the array to grow.
@@ -282,7 +239,7 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
282 * The current one is never freed until the array is stopped. 239 * The current one is never freed until the array is stopped.
283 * This avoids races. 240 * This avoids races.
284 */ 241 */
285 linear_conf_t *newconf; 242 linear_conf_t *newconf, *oldconf;
286 243
287 if (rdev->saved_raid_disk != mddev->raid_disks) 244 if (rdev->saved_raid_disk != mddev->raid_disks)
288 return -EINVAL; 245 return -EINVAL;
@@ -294,25 +251,29 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
294 if (!newconf) 251 if (!newconf)
295 return -ENOMEM; 252 return -ENOMEM;
296 253
297 newconf->prev = mddev_to_conf(mddev); 254 oldconf = rcu_dereference(mddev->private);
298 mddev->private = newconf;
299 mddev->raid_disks++; 255 mddev->raid_disks++;
256 rcu_assign_pointer(mddev->private, newconf);
300 md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); 257 md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
301 set_capacity(mddev->gendisk, mddev->array_sectors); 258 set_capacity(mddev->gendisk, mddev->array_sectors);
259 call_rcu(&oldconf->rcu, free_conf);
302 return 0; 260 return 0;
303} 261}
304 262
305static int linear_stop (mddev_t *mddev) 263static int linear_stop (mddev_t *mddev)
306{ 264{
307 linear_conf_t *conf = mddev_to_conf(mddev); 265 linear_conf_t *conf = mddev->private;
308 266
267 /*
268 * We do not require rcu protection here since
269 * we hold reconfig_mutex for both linear_add and
270 * linear_stop, so they cannot race.
271 * We should make sure any old 'conf's are properly
272 * freed though.
273 */
274 rcu_barrier();
309 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 275 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
310 do { 276 kfree(conf);
311 linear_conf_t *t = conf->prev;
312 kfree(conf->hash_table);
313 kfree(conf);
314 conf = t;
315 } while (conf);
316 277
317 return 0; 278 return 0;
318} 279}
@@ -322,6 +283,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
322 const int rw = bio_data_dir(bio); 283 const int rw = bio_data_dir(bio);
323 mddev_t *mddev = q->queuedata; 284 mddev_t *mddev = q->queuedata;
324 dev_info_t *tmp_dev; 285 dev_info_t *tmp_dev;
286 sector_t start_sector;
325 int cpu; 287 int cpu;
326 288
327 if (unlikely(bio_barrier(bio))) { 289 if (unlikely(bio_barrier(bio))) {
@@ -335,33 +297,36 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
335 bio_sectors(bio)); 297 bio_sectors(bio));
336 part_stat_unlock(); 298 part_stat_unlock();
337 299
300 rcu_read_lock();
338 tmp_dev = which_dev(mddev, bio->bi_sector); 301 tmp_dev = which_dev(mddev, bio->bi_sector);
339 302 start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
340 if (unlikely(bio->bi_sector >= (tmp_dev->num_sectors + 303
341 tmp_dev->start_sector) 304
342 || (bio->bi_sector < 305 if (unlikely(bio->bi_sector >= (tmp_dev->end_sector)
343 tmp_dev->start_sector))) { 306 || (bio->bi_sector < start_sector))) {
344 char b[BDEVNAME_SIZE]; 307 char b[BDEVNAME_SIZE];
345 308
346 printk("linear_make_request: Sector %llu out of bounds on " 309 printk("linear_make_request: Sector %llu out of bounds on "
347 "dev %s: %llu sectors, offset %llu\n", 310 "dev %s: %llu sectors, offset %llu\n",
348 (unsigned long long)bio->bi_sector, 311 (unsigned long long)bio->bi_sector,
349 bdevname(tmp_dev->rdev->bdev, b), 312 bdevname(tmp_dev->rdev->bdev, b),
350 (unsigned long long)tmp_dev->num_sectors, 313 (unsigned long long)tmp_dev->rdev->sectors,
351 (unsigned long long)tmp_dev->start_sector); 314 (unsigned long long)start_sector);
315 rcu_read_unlock();
352 bio_io_error(bio); 316 bio_io_error(bio);
353 return 0; 317 return 0;
354 } 318 }
355 if (unlikely(bio->bi_sector + (bio->bi_size >> 9) > 319 if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
356 tmp_dev->start_sector + tmp_dev->num_sectors)) { 320 tmp_dev->end_sector)) {
357 /* This bio crosses a device boundary, so we have to 321 /* This bio crosses a device boundary, so we have to
358 * split it. 322 * split it.
359 */ 323 */
360 struct bio_pair *bp; 324 struct bio_pair *bp;
325 sector_t end_sector = tmp_dev->end_sector;
326
327 rcu_read_unlock();
361 328
362 bp = bio_split(bio, 329 bp = bio_split(bio, end_sector - bio->bi_sector);
363 tmp_dev->start_sector + tmp_dev->num_sectors
364 - bio->bi_sector);
365 330
366 if (linear_make_request(q, &bp->bio1)) 331 if (linear_make_request(q, &bp->bio1))
367 generic_make_request(&bp->bio1); 332 generic_make_request(&bp->bio1);
@@ -372,8 +337,9 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
372 } 337 }
373 338
374 bio->bi_bdev = tmp_dev->rdev->bdev; 339 bio->bi_bdev = tmp_dev->rdev->bdev;
375 bio->bi_sector = bio->bi_sector - tmp_dev->start_sector 340 bio->bi_sector = bio->bi_sector - start_sector
376 + tmp_dev->rdev->data_offset; 341 + tmp_dev->rdev->data_offset;
342 rcu_read_unlock();
377 343
378 return 1; 344 return 1;
379} 345}
@@ -381,7 +347,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
381static void linear_status (struct seq_file *seq, mddev_t *mddev) 347static void linear_status (struct seq_file *seq, mddev_t *mddev)
382{ 348{
383 349
384 seq_printf(seq, " %dk rounding", mddev->chunk_size/1024); 350 seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
385} 351}
386 352
387 353
diff --git a/drivers/md/linear.h b/drivers/md/linear.h
index bf8179587f95..0ce29b61605a 100644
--- a/drivers/md/linear.h
+++ b/drivers/md/linear.h
@@ -3,27 +3,19 @@
3 3
4struct dev_info { 4struct dev_info {
5 mdk_rdev_t *rdev; 5 mdk_rdev_t *rdev;
6 sector_t num_sectors; 6 sector_t end_sector;
7 sector_t start_sector;
8}; 7};
9 8
10typedef struct dev_info dev_info_t; 9typedef struct dev_info dev_info_t;
11 10
12struct linear_private_data 11struct linear_private_data
13{ 12{
14 struct linear_private_data *prev; /* earlier version */
15 dev_info_t **hash_table;
16 sector_t spacing;
17 sector_t array_sectors; 13 sector_t array_sectors;
18 int sector_shift; /* shift before dividing
19 * by spacing
20 */
21 dev_info_t disks[0]; 14 dev_info_t disks[0];
15 struct rcu_head rcu;
22}; 16};
23 17
24 18
25typedef struct linear_private_data linear_conf_t; 19typedef struct linear_private_data linear_conf_t;
26 20
27#define mddev_to_conf(mddev) ((linear_conf_t *) mddev->private)
28
29#endif 21#endif
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 20f6ac338349..09be637d52cb 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -440,15 +440,6 @@ static inline sector_t calc_dev_sboffset(struct block_device *bdev)
440 return MD_NEW_SIZE_SECTORS(num_sectors); 440 return MD_NEW_SIZE_SECTORS(num_sectors);
441} 441}
442 442
443static sector_t calc_num_sectors(mdk_rdev_t *rdev, unsigned chunk_size)
444{
445 sector_t num_sectors = rdev->sb_start;
446
447 if (chunk_size)
448 num_sectors &= ~((sector_t)chunk_size/512 - 1);
449 return num_sectors;
450}
451
452static int alloc_disk_sb(mdk_rdev_t * rdev) 443static int alloc_disk_sb(mdk_rdev_t * rdev)
453{ 444{
454 if (rdev->sb_page) 445 if (rdev->sb_page)
@@ -745,6 +736,24 @@ struct super_type {
745}; 736};
746 737
747/* 738/*
739 * Check that the given mddev has no bitmap.
740 *
741 * This function is called from the run method of all personalities that do not
742 * support bitmaps. It prints an error message and returns non-zero if mddev
743 * has a bitmap. Otherwise, it returns 0.
744 *
745 */
746int md_check_no_bitmap(mddev_t *mddev)
747{
748 if (!mddev->bitmap_file && !mddev->bitmap_offset)
749 return 0;
750 printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
751 mdname(mddev), mddev->pers->name);
752 return 1;
753}
754EXPORT_SYMBOL(md_check_no_bitmap);
755
756/*
748 * load_super for 0.90.0 757 * load_super for 0.90.0
749 */ 758 */
750static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 759static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
@@ -797,17 +806,6 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
797 rdev->data_offset = 0; 806 rdev->data_offset = 0;
798 rdev->sb_size = MD_SB_BYTES; 807 rdev->sb_size = MD_SB_BYTES;
799 808
800 if (sb->state & (1<<MD_SB_BITMAP_PRESENT)) {
801 if (sb->level != 1 && sb->level != 4
802 && sb->level != 5 && sb->level != 6
803 && sb->level != 10) {
804 /* FIXME use a better test */
805 printk(KERN_WARNING
806 "md: bitmaps not supported for this level.\n");
807 goto abort;
808 }
809 }
810
811 if (sb->level == LEVEL_MULTIPATH) 809 if (sb->level == LEVEL_MULTIPATH)
812 rdev->desc_nr = -1; 810 rdev->desc_nr = -1;
813 else 811 else
@@ -836,7 +834,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
836 else 834 else
837 ret = 0; 835 ret = 0;
838 } 836 }
839 rdev->sectors = calc_num_sectors(rdev, sb->chunk_size); 837 rdev->sectors = rdev->sb_start;
840 838
841 if (rdev->sectors < sb->size * 2 && sb->level > 1) 839 if (rdev->sectors < sb->size * 2 && sb->level > 1)
842 /* "this cannot possibly happen" ... */ 840 /* "this cannot possibly happen" ... */
@@ -866,7 +864,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
866 mddev->minor_version = sb->minor_version; 864 mddev->minor_version = sb->minor_version;
867 mddev->patch_version = sb->patch_version; 865 mddev->patch_version = sb->patch_version;
868 mddev->external = 0; 866 mddev->external = 0;
869 mddev->chunk_size = sb->chunk_size; 867 mddev->chunk_sectors = sb->chunk_size >> 9;
870 mddev->ctime = sb->ctime; 868 mddev->ctime = sb->ctime;
871 mddev->utime = sb->utime; 869 mddev->utime = sb->utime;
872 mddev->level = sb->level; 870 mddev->level = sb->level;
@@ -883,13 +881,13 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
883 mddev->delta_disks = sb->delta_disks; 881 mddev->delta_disks = sb->delta_disks;
884 mddev->new_level = sb->new_level; 882 mddev->new_level = sb->new_level;
885 mddev->new_layout = sb->new_layout; 883 mddev->new_layout = sb->new_layout;
886 mddev->new_chunk = sb->new_chunk; 884 mddev->new_chunk_sectors = sb->new_chunk >> 9;
887 } else { 885 } else {
888 mddev->reshape_position = MaxSector; 886 mddev->reshape_position = MaxSector;
889 mddev->delta_disks = 0; 887 mddev->delta_disks = 0;
890 mddev->new_level = mddev->level; 888 mddev->new_level = mddev->level;
891 mddev->new_layout = mddev->layout; 889 mddev->new_layout = mddev->layout;
892 mddev->new_chunk = mddev->chunk_size; 890 mddev->new_chunk_sectors = mddev->chunk_sectors;
893 } 891 }
894 892
895 if (sb->state & (1<<MD_SB_CLEAN)) 893 if (sb->state & (1<<MD_SB_CLEAN))
@@ -1004,7 +1002,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1004 sb->new_level = mddev->new_level; 1002 sb->new_level = mddev->new_level;
1005 sb->delta_disks = mddev->delta_disks; 1003 sb->delta_disks = mddev->delta_disks;
1006 sb->new_layout = mddev->new_layout; 1004 sb->new_layout = mddev->new_layout;
1007 sb->new_chunk = mddev->new_chunk; 1005 sb->new_chunk = mddev->new_chunk_sectors << 9;
1008 } 1006 }
1009 mddev->minor_version = sb->minor_version; 1007 mddev->minor_version = sb->minor_version;
1010 if (mddev->in_sync) 1008 if (mddev->in_sync)
@@ -1018,7 +1016,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1018 sb->recovery_cp = 0; 1016 sb->recovery_cp = 0;
1019 1017
1020 sb->layout = mddev->layout; 1018 sb->layout = mddev->layout;
1021 sb->chunk_size = mddev->chunk_size; 1019 sb->chunk_size = mddev->chunk_sectors << 9;
1022 1020
1023 if (mddev->bitmap && mddev->bitmap_file == NULL) 1021 if (mddev->bitmap && mddev->bitmap_file == NULL)
1024 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 1022 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
@@ -1185,17 +1183,6 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1185 bdevname(rdev->bdev,b)); 1183 bdevname(rdev->bdev,b));
1186 return -EINVAL; 1184 return -EINVAL;
1187 } 1185 }
1188 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET)) {
1189 if (sb->level != cpu_to_le32(1) &&
1190 sb->level != cpu_to_le32(4) &&
1191 sb->level != cpu_to_le32(5) &&
1192 sb->level != cpu_to_le32(6) &&
1193 sb->level != cpu_to_le32(10)) {
1194 printk(KERN_WARNING
1195 "md: bitmaps not supported for this level.\n");
1196 return -EINVAL;
1197 }
1198 }
1199 1186
1200 rdev->preferred_minor = 0xffff; 1187 rdev->preferred_minor = 0xffff;
1201 rdev->data_offset = le64_to_cpu(sb->data_offset); 1188 rdev->data_offset = le64_to_cpu(sb->data_offset);
@@ -1248,9 +1235,6 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1248 if (rdev->sectors < le64_to_cpu(sb->data_size)) 1235 if (rdev->sectors < le64_to_cpu(sb->data_size))
1249 return -EINVAL; 1236 return -EINVAL;
1250 rdev->sectors = le64_to_cpu(sb->data_size); 1237 rdev->sectors = le64_to_cpu(sb->data_size);
1251 if (le32_to_cpu(sb->chunksize))
1252 rdev->sectors &= ~((sector_t)le32_to_cpu(sb->chunksize) - 1);
1253
1254 if (le64_to_cpu(sb->size) > rdev->sectors) 1238 if (le64_to_cpu(sb->size) > rdev->sectors)
1255 return -EINVAL; 1239 return -EINVAL;
1256 return ret; 1240 return ret;
@@ -1271,7 +1255,7 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1271 mddev->major_version = 1; 1255 mddev->major_version = 1;
1272 mddev->patch_version = 0; 1256 mddev->patch_version = 0;
1273 mddev->external = 0; 1257 mddev->external = 0;
1274 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9; 1258 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1275 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); 1259 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1276 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); 1260 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1277 mddev->level = le32_to_cpu(sb->level); 1261 mddev->level = le32_to_cpu(sb->level);
@@ -1297,13 +1281,13 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1297 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1281 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1298 mddev->new_level = le32_to_cpu(sb->new_level); 1282 mddev->new_level = le32_to_cpu(sb->new_level);
1299 mddev->new_layout = le32_to_cpu(sb->new_layout); 1283 mddev->new_layout = le32_to_cpu(sb->new_layout);
1300 mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9; 1284 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1301 } else { 1285 } else {
1302 mddev->reshape_position = MaxSector; 1286 mddev->reshape_position = MaxSector;
1303 mddev->delta_disks = 0; 1287 mddev->delta_disks = 0;
1304 mddev->new_level = mddev->level; 1288 mddev->new_level = mddev->level;
1305 mddev->new_layout = mddev->layout; 1289 mddev->new_layout = mddev->layout;
1306 mddev->new_chunk = mddev->chunk_size; 1290 mddev->new_chunk_sectors = mddev->chunk_sectors;
1307 } 1291 }
1308 1292
1309 } else if (mddev->pers == NULL) { 1293 } else if (mddev->pers == NULL) {
@@ -1375,7 +1359,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1375 1359
1376 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1360 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1377 sb->size = cpu_to_le64(mddev->dev_sectors); 1361 sb->size = cpu_to_le64(mddev->dev_sectors);
1378 sb->chunksize = cpu_to_le32(mddev->chunk_size >> 9); 1362 sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1379 sb->level = cpu_to_le32(mddev->level); 1363 sb->level = cpu_to_le32(mddev->level);
1380 sb->layout = cpu_to_le32(mddev->layout); 1364 sb->layout = cpu_to_le32(mddev->layout);
1381 1365
@@ -1402,7 +1386,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1402 sb->new_layout = cpu_to_le32(mddev->new_layout); 1386 sb->new_layout = cpu_to_le32(mddev->new_layout);
1403 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 1387 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1404 sb->new_level = cpu_to_le32(mddev->new_level); 1388 sb->new_level = cpu_to_le32(mddev->new_level);
1405 sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9); 1389 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1406 } 1390 }
1407 1391
1408 max_dev = 0; 1392 max_dev = 0;
@@ -1897,6 +1881,7 @@ static void md_update_sb(mddev_t * mddev, int force_change)
1897 int sync_req; 1881 int sync_req;
1898 int nospares = 0; 1882 int nospares = 0;
1899 1883
1884 mddev->utime = get_seconds();
1900 if (mddev->external) 1885 if (mddev->external)
1901 return; 1886 return;
1902repeat: 1887repeat:
@@ -1926,7 +1911,6 @@ repeat:
1926 nospares = 0; 1911 nospares = 0;
1927 1912
1928 sync_req = mddev->in_sync; 1913 sync_req = mddev->in_sync;
1929 mddev->utime = get_seconds();
1930 1914
1931 /* If this is just a dirty<->clean transition, and the array is clean 1915 /* If this is just a dirty<->clean transition, and the array is clean
1932 * and 'events' is odd, we can roll back to the previous clean state */ 1916 * and 'events' is odd, we can roll back to the previous clean state */
@@ -2597,15 +2581,6 @@ static void analyze_sbs(mddev_t * mddev)
2597 clear_bit(In_sync, &rdev->flags); 2581 clear_bit(In_sync, &rdev->flags);
2598 } 2582 }
2599 } 2583 }
2600
2601
2602
2603 if (mddev->recovery_cp != MaxSector &&
2604 mddev->level >= 1)
2605 printk(KERN_ERR "md: %s: raid array is not clean"
2606 " -- starting background reconstruction\n",
2607 mdname(mddev));
2608
2609} 2584}
2610 2585
2611static void md_safemode_timeout(unsigned long data); 2586static void md_safemode_timeout(unsigned long data);
@@ -2746,7 +2721,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
2746 if (IS_ERR(priv)) { 2721 if (IS_ERR(priv)) {
2747 mddev->new_level = mddev->level; 2722 mddev->new_level = mddev->level;
2748 mddev->new_layout = mddev->layout; 2723 mddev->new_layout = mddev->layout;
2749 mddev->new_chunk = mddev->chunk_size; 2724 mddev->new_chunk_sectors = mddev->chunk_sectors;
2750 mddev->raid_disks -= mddev->delta_disks; 2725 mddev->raid_disks -= mddev->delta_disks;
2751 mddev->delta_disks = 0; 2726 mddev->delta_disks = 0;
2752 module_put(pers->owner); 2727 module_put(pers->owner);
@@ -2764,7 +2739,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
2764 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 2739 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
2765 mddev->level = mddev->new_level; 2740 mddev->level = mddev->new_level;
2766 mddev->layout = mddev->new_layout; 2741 mddev->layout = mddev->new_layout;
2767 mddev->chunk_size = mddev->new_chunk; 2742 mddev->chunk_sectors = mddev->new_chunk_sectors;
2768 mddev->delta_disks = 0; 2743 mddev->delta_disks = 0;
2769 pers->run(mddev); 2744 pers->run(mddev);
2770 mddev_resume(mddev); 2745 mddev_resume(mddev);
@@ -2800,11 +2775,14 @@ layout_store(mddev_t *mddev, const char *buf, size_t len)
2800 2775
2801 if (mddev->pers) { 2776 if (mddev->pers) {
2802 int err; 2777 int err;
2803 if (mddev->pers->reconfig == NULL) 2778 if (mddev->pers->check_reshape == NULL)
2804 return -EBUSY; 2779 return -EBUSY;
2805 err = mddev->pers->reconfig(mddev, n, -1); 2780 mddev->new_layout = n;
2806 if (err) 2781 err = mddev->pers->check_reshape(mddev);
2782 if (err) {
2783 mddev->new_layout = mddev->layout;
2807 return err; 2784 return err;
2785 }
2808 } else { 2786 } else {
2809 mddev->new_layout = n; 2787 mddev->new_layout = n;
2810 if (mddev->reshape_position == MaxSector) 2788 if (mddev->reshape_position == MaxSector)
@@ -2857,10 +2835,11 @@ static ssize_t
2857chunk_size_show(mddev_t *mddev, char *page) 2835chunk_size_show(mddev_t *mddev, char *page)
2858{ 2836{
2859 if (mddev->reshape_position != MaxSector && 2837 if (mddev->reshape_position != MaxSector &&
2860 mddev->chunk_size != mddev->new_chunk) 2838 mddev->chunk_sectors != mddev->new_chunk_sectors)
2861 return sprintf(page, "%d (%d)\n", mddev->new_chunk, 2839 return sprintf(page, "%d (%d)\n",
2862 mddev->chunk_size); 2840 mddev->new_chunk_sectors << 9,
2863 return sprintf(page, "%d\n", mddev->chunk_size); 2841 mddev->chunk_sectors << 9);
2842 return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
2864} 2843}
2865 2844
2866static ssize_t 2845static ssize_t
@@ -2874,15 +2853,18 @@ chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
2874 2853
2875 if (mddev->pers) { 2854 if (mddev->pers) {
2876 int err; 2855 int err;
2877 if (mddev->pers->reconfig == NULL) 2856 if (mddev->pers->check_reshape == NULL)
2878 return -EBUSY; 2857 return -EBUSY;
2879 err = mddev->pers->reconfig(mddev, -1, n); 2858 mddev->new_chunk_sectors = n >> 9;
2880 if (err) 2859 err = mddev->pers->check_reshape(mddev);
2860 if (err) {
2861 mddev->new_chunk_sectors = mddev->chunk_sectors;
2881 return err; 2862 return err;
2863 }
2882 } else { 2864 } else {
2883 mddev->new_chunk = n; 2865 mddev->new_chunk_sectors = n >> 9;
2884 if (mddev->reshape_position == MaxSector) 2866 if (mddev->reshape_position == MaxSector)
2885 mddev->chunk_size = n; 2867 mddev->chunk_sectors = n >> 9;
2886 } 2868 }
2887 return len; 2869 return len;
2888} 2870}
@@ -3527,8 +3509,9 @@ min_sync_store(mddev_t *mddev, const char *buf, size_t len)
3527 return -EBUSY; 3509 return -EBUSY;
3528 3510
3529 /* Must be a multiple of chunk_size */ 3511 /* Must be a multiple of chunk_size */
3530 if (mddev->chunk_size) { 3512 if (mddev->chunk_sectors) {
3531 if (min & (sector_t)((mddev->chunk_size>>9)-1)) 3513 sector_t temp = min;
3514 if (sector_div(temp, mddev->chunk_sectors))
3532 return -EINVAL; 3515 return -EINVAL;
3533 } 3516 }
3534 mddev->resync_min = min; 3517 mddev->resync_min = min;
@@ -3564,8 +3547,9 @@ max_sync_store(mddev_t *mddev, const char *buf, size_t len)
3564 return -EBUSY; 3547 return -EBUSY;
3565 3548
3566 /* Must be a multiple of chunk_size */ 3549 /* Must be a multiple of chunk_size */
3567 if (mddev->chunk_size) { 3550 if (mddev->chunk_sectors) {
3568 if (max & (sector_t)((mddev->chunk_size>>9)-1)) 3551 sector_t temp = max;
3552 if (sector_div(temp, mddev->chunk_sectors))
3569 return -EINVAL; 3553 return -EINVAL;
3570 } 3554 }
3571 mddev->resync_max = max; 3555 mddev->resync_max = max;
@@ -3656,7 +3640,7 @@ reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
3656 mddev->delta_disks = 0; 3640 mddev->delta_disks = 0;
3657 mddev->new_level = mddev->level; 3641 mddev->new_level = mddev->level;
3658 mddev->new_layout = mddev->layout; 3642 mddev->new_layout = mddev->layout;
3659 mddev->new_chunk = mddev->chunk_size; 3643 mddev->new_chunk_sectors = mddev->chunk_sectors;
3660 return len; 3644 return len;
3661} 3645}
3662 3646
@@ -3976,11 +3960,9 @@ static int start_dirty_degraded;
3976static int do_md_run(mddev_t * mddev) 3960static int do_md_run(mddev_t * mddev)
3977{ 3961{
3978 int err; 3962 int err;
3979 int chunk_size;
3980 mdk_rdev_t *rdev; 3963 mdk_rdev_t *rdev;
3981 struct gendisk *disk; 3964 struct gendisk *disk;
3982 struct mdk_personality *pers; 3965 struct mdk_personality *pers;
3983 char b[BDEVNAME_SIZE];
3984 3966
3985 if (list_empty(&mddev->disks)) 3967 if (list_empty(&mddev->disks))
3986 /* cannot run an array with no devices.. */ 3968 /* cannot run an array with no devices.. */
@@ -3998,38 +3980,6 @@ static int do_md_run(mddev_t * mddev)
3998 analyze_sbs(mddev); 3980 analyze_sbs(mddev);
3999 } 3981 }
4000 3982
4001 chunk_size = mddev->chunk_size;
4002
4003 if (chunk_size) {
4004 if (chunk_size > MAX_CHUNK_SIZE) {
4005 printk(KERN_ERR "too big chunk_size: %d > %d\n",
4006 chunk_size, MAX_CHUNK_SIZE);
4007 return -EINVAL;
4008 }
4009 /*
4010 * chunk-size has to be a power of 2
4011 */
4012 if ( (1 << ffz(~chunk_size)) != chunk_size) {
4013 printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
4014 return -EINVAL;
4015 }
4016
4017 /* devices must have minimum size of one chunk */
4018 list_for_each_entry(rdev, &mddev->disks, same_set) {
4019 if (test_bit(Faulty, &rdev->flags))
4020 continue;
4021 if (rdev->sectors < chunk_size / 512) {
4022 printk(KERN_WARNING
4023 "md: Dev %s smaller than chunk_size:"
4024 " %llu < %d\n",
4025 bdevname(rdev->bdev,b),
4026 (unsigned long long)rdev->sectors,
4027 chunk_size / 512);
4028 return -EINVAL;
4029 }
4030 }
4031 }
4032
4033 if (mddev->level != LEVEL_NONE) 3983 if (mddev->level != LEVEL_NONE)
4034 request_module("md-level-%d", mddev->level); 3984 request_module("md-level-%d", mddev->level);
4035 else if (mddev->clevel[0]) 3985 else if (mddev->clevel[0])
@@ -4405,7 +4355,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4405 mddev->flags = 0; 4355 mddev->flags = 0;
4406 mddev->ro = 0; 4356 mddev->ro = 0;
4407 mddev->metadata_type[0] = 0; 4357 mddev->metadata_type[0] = 0;
4408 mddev->chunk_size = 0; 4358 mddev->chunk_sectors = 0;
4409 mddev->ctime = mddev->utime = 0; 4359 mddev->ctime = mddev->utime = 0;
4410 mddev->layout = 0; 4360 mddev->layout = 0;
4411 mddev->max_disks = 0; 4361 mddev->max_disks = 0;
@@ -4413,7 +4363,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4413 mddev->delta_disks = 0; 4363 mddev->delta_disks = 0;
4414 mddev->new_level = LEVEL_NONE; 4364 mddev->new_level = LEVEL_NONE;
4415 mddev->new_layout = 0; 4365 mddev->new_layout = 0;
4416 mddev->new_chunk = 0; 4366 mddev->new_chunk_sectors = 0;
4417 mddev->curr_resync = 0; 4367 mddev->curr_resync = 0;
4418 mddev->resync_mismatches = 0; 4368 mddev->resync_mismatches = 0;
4419 mddev->suspend_lo = mddev->suspend_hi = 0; 4369 mddev->suspend_lo = mddev->suspend_hi = 0;
@@ -4618,7 +4568,7 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
4618 info.spare_disks = spare; 4568 info.spare_disks = spare;
4619 4569
4620 info.layout = mddev->layout; 4570 info.layout = mddev->layout;
4621 info.chunk_size = mddev->chunk_size; 4571 info.chunk_size = mddev->chunk_sectors << 9;
4622 4572
4623 if (copy_to_user(arg, &info, sizeof(info))) 4573 if (copy_to_user(arg, &info, sizeof(info)))
4624 return -EFAULT; 4574 return -EFAULT;
@@ -4843,7 +4793,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
4843 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; 4793 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
4844 } else 4794 } else
4845 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 4795 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
4846 rdev->sectors = calc_num_sectors(rdev, mddev->chunk_size); 4796 rdev->sectors = rdev->sb_start;
4847 4797
4848 err = bind_rdev_to_array(rdev, mddev); 4798 err = bind_rdev_to_array(rdev, mddev);
4849 if (err) { 4799 if (err) {
@@ -4913,7 +4863,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
4913 else 4863 else
4914 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; 4864 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
4915 4865
4916 rdev->sectors = calc_num_sectors(rdev, mddev->chunk_size); 4866 rdev->sectors = rdev->sb_start;
4917 4867
4918 if (test_bit(Faulty, &rdev->flags)) { 4868 if (test_bit(Faulty, &rdev->flags)) {
4919 printk(KERN_WARNING 4869 printk(KERN_WARNING
@@ -5062,7 +5012,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
5062 mddev->external = 0; 5012 mddev->external = 0;
5063 5013
5064 mddev->layout = info->layout; 5014 mddev->layout = info->layout;
5065 mddev->chunk_size = info->chunk_size; 5015 mddev->chunk_sectors = info->chunk_size >> 9;
5066 5016
5067 mddev->max_disks = MD_SB_DISKS; 5017 mddev->max_disks = MD_SB_DISKS;
5068 5018
@@ -5081,7 +5031,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
5081 get_random_bytes(mddev->uuid, 16); 5031 get_random_bytes(mddev->uuid, 16);
5082 5032
5083 mddev->new_level = mddev->level; 5033 mddev->new_level = mddev->level;
5084 mddev->new_chunk = mddev->chunk_size; 5034 mddev->new_chunk_sectors = mddev->chunk_sectors;
5085 mddev->new_layout = mddev->layout; 5035 mddev->new_layout = mddev->layout;
5086 mddev->delta_disks = 0; 5036 mddev->delta_disks = 0;
5087 5037
@@ -5191,7 +5141,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
5191 mddev->level != info->level || 5141 mddev->level != info->level ||
5192/* mddev->layout != info->layout || */ 5142/* mddev->layout != info->layout || */
5193 !mddev->persistent != info->not_persistent|| 5143 !mddev->persistent != info->not_persistent||
5194 mddev->chunk_size != info->chunk_size || 5144 mddev->chunk_sectors != info->chunk_size >> 9 ||
5195 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 5145 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
5196 ((state^info->state) & 0xfffffe00) 5146 ((state^info->state) & 0xfffffe00)
5197 ) 5147 )
@@ -5215,10 +5165,15 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
5215 * we don't need to do anything at the md level, the 5165 * we don't need to do anything at the md level, the
5216 * personality will take care of it all. 5166 * personality will take care of it all.
5217 */ 5167 */
5218 if (mddev->pers->reconfig == NULL) 5168 if (mddev->pers->check_reshape == NULL)
5219 return -EINVAL; 5169 return -EINVAL;
5220 else 5170 else {
5221 return mddev->pers->reconfig(mddev, info->layout, -1); 5171 mddev->new_layout = info->layout;
5172 rv = mddev->pers->check_reshape(mddev);
5173 if (rv)
5174 mddev->new_layout = mddev->layout;
5175 return rv;
5176 }
5222 } 5177 }
5223 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 5178 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
5224 rv = update_size(mddev, (sector_t)info->size * 2); 5179 rv = update_size(mddev, (sector_t)info->size * 2);
@@ -6717,7 +6672,8 @@ void md_check_recovery(mddev_t *mddev)
6717 */ 6672 */
6718 6673
6719 if (mddev->reshape_position != MaxSector) { 6674 if (mddev->reshape_position != MaxSector) {
6720 if (mddev->pers->check_reshape(mddev) != 0) 6675 if (mddev->pers->check_reshape == NULL ||
6676 mddev->pers->check_reshape(mddev) != 0)
6721 /* Cannot proceed */ 6677 /* Cannot proceed */
6722 goto unlock; 6678 goto unlock;
6723 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 6679 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 8227ab909d44..9430a110db93 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -30,13 +30,6 @@ typedef struct mddev_s mddev_t;
30typedef struct mdk_rdev_s mdk_rdev_t; 30typedef struct mdk_rdev_s mdk_rdev_t;
31 31
32/* 32/*
33 * options passed in raidrun:
34 */
35
36/* Currently this must fit in an 'int' */
37#define MAX_CHUNK_SIZE (1<<30)
38
39/*
40 * MD's 'extended' device 33 * MD's 'extended' device
41 */ 34 */
42struct mdk_rdev_s 35struct mdk_rdev_s
@@ -145,7 +138,7 @@ struct mddev_s
145 int external; /* metadata is 138 int external; /* metadata is
146 * managed externally */ 139 * managed externally */
147 char metadata_type[17]; /* externally set*/ 140 char metadata_type[17]; /* externally set*/
148 int chunk_size; 141 int chunk_sectors;
149 time_t ctime, utime; 142 time_t ctime, utime;
150 int level, layout; 143 int level, layout;
151 char clevel[16]; 144 char clevel[16];
@@ -166,7 +159,8 @@ struct mddev_s
166 * If reshape_position is MaxSector, then no reshape is happening (yet). 159 * If reshape_position is MaxSector, then no reshape is happening (yet).
167 */ 160 */
168 sector_t reshape_position; 161 sector_t reshape_position;
169 int delta_disks, new_level, new_layout, new_chunk; 162 int delta_disks, new_level, new_layout;
163 int new_chunk_sectors;
170 164
171 struct mdk_thread_s *thread; /* management thread */ 165 struct mdk_thread_s *thread; /* management thread */
172 struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ 166 struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
@@ -325,7 +319,6 @@ struct mdk_personality
325 int (*check_reshape) (mddev_t *mddev); 319 int (*check_reshape) (mddev_t *mddev);
326 int (*start_reshape) (mddev_t *mddev); 320 int (*start_reshape) (mddev_t *mddev);
327 void (*finish_reshape) (mddev_t *mddev); 321 void (*finish_reshape) (mddev_t *mddev);
328 int (*reconfig) (mddev_t *mddev, int layout, int chunk_size);
329 /* quiesce moves between quiescence states 322 /* quiesce moves between quiescence states
330 * 0 - fully active 323 * 0 - fully active
331 * 1 - no new requests allowed 324 * 1 - no new requests allowed
@@ -437,5 +430,6 @@ extern void md_new_event(mddev_t *mddev);
437extern int md_allow_write(mddev_t *mddev); 430extern int md_allow_write(mddev_t *mddev);
438extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev); 431extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
439extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors); 432extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors);
433extern int md_check_no_bitmap(mddev_t *mddev);
440 434
441#endif /* _MD_MD_H */ 435#endif /* _MD_MD_H */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 4ee31aa13c40..cbe368fa6598 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -58,7 +58,7 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
58{ 58{
59 unsigned long flags; 59 unsigned long flags;
60 mddev_t *mddev = mp_bh->mddev; 60 mddev_t *mddev = mp_bh->mddev;
61 multipath_conf_t *conf = mddev_to_conf(mddev); 61 multipath_conf_t *conf = mddev->private;
62 62
63 spin_lock_irqsave(&conf->device_lock, flags); 63 spin_lock_irqsave(&conf->device_lock, flags);
64 list_add(&mp_bh->retry_list, &conf->retry_list); 64 list_add(&mp_bh->retry_list, &conf->retry_list);
@@ -75,7 +75,7 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
75static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err) 75static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
76{ 76{
77 struct bio *bio = mp_bh->master_bio; 77 struct bio *bio = mp_bh->master_bio;
78 multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev); 78 multipath_conf_t *conf = mp_bh->mddev->private;
79 79
80 bio_endio(bio, err); 80 bio_endio(bio, err);
81 mempool_free(mp_bh, conf->pool); 81 mempool_free(mp_bh, conf->pool);
@@ -85,7 +85,7 @@ static void multipath_end_request(struct bio *bio, int error)
85{ 85{
86 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 86 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
87 struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private); 87 struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private);
88 multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev); 88 multipath_conf_t *conf = mp_bh->mddev->private;
89 mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev; 89 mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev;
90 90
91 if (uptodate) 91 if (uptodate)
@@ -107,7 +107,7 @@ static void multipath_end_request(struct bio *bio, int error)
107 107
108static void unplug_slaves(mddev_t *mddev) 108static void unplug_slaves(mddev_t *mddev)
109{ 109{
110 multipath_conf_t *conf = mddev_to_conf(mddev); 110 multipath_conf_t *conf = mddev->private;
111 int i; 111 int i;
112 112
113 rcu_read_lock(); 113 rcu_read_lock();
@@ -138,7 +138,7 @@ static void multipath_unplug(struct request_queue *q)
138static int multipath_make_request (struct request_queue *q, struct bio * bio) 138static int multipath_make_request (struct request_queue *q, struct bio * bio)
139{ 139{
140 mddev_t *mddev = q->queuedata; 140 mddev_t *mddev = q->queuedata;
141 multipath_conf_t *conf = mddev_to_conf(mddev); 141 multipath_conf_t *conf = mddev->private;
142 struct multipath_bh * mp_bh; 142 struct multipath_bh * mp_bh;
143 struct multipath_info *multipath; 143 struct multipath_info *multipath;
144 const int rw = bio_data_dir(bio); 144 const int rw = bio_data_dir(bio);
@@ -180,7 +180,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
180 180
181static void multipath_status (struct seq_file *seq, mddev_t *mddev) 181static void multipath_status (struct seq_file *seq, mddev_t *mddev)
182{ 182{
183 multipath_conf_t *conf = mddev_to_conf(mddev); 183 multipath_conf_t *conf = mddev->private;
184 int i; 184 int i;
185 185
186 seq_printf (seq, " [%d/%d] [", conf->raid_disks, 186 seq_printf (seq, " [%d/%d] [", conf->raid_disks,
@@ -195,7 +195,7 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev)
195static int multipath_congested(void *data, int bits) 195static int multipath_congested(void *data, int bits)
196{ 196{
197 mddev_t *mddev = data; 197 mddev_t *mddev = data;
198 multipath_conf_t *conf = mddev_to_conf(mddev); 198 multipath_conf_t *conf = mddev->private;
199 int i, ret = 0; 199 int i, ret = 0;
200 200
201 rcu_read_lock(); 201 rcu_read_lock();
@@ -220,7 +220,7 @@ static int multipath_congested(void *data, int bits)
220 */ 220 */
221static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev) 221static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev)
222{ 222{
223 multipath_conf_t *conf = mddev_to_conf(mddev); 223 multipath_conf_t *conf = mddev->private;
224 224
225 if (conf->working_disks <= 1) { 225 if (conf->working_disks <= 1) {
226 /* 226 /*
@@ -367,7 +367,7 @@ static void multipathd (mddev_t *mddev)
367 struct multipath_bh *mp_bh; 367 struct multipath_bh *mp_bh;
368 struct bio *bio; 368 struct bio *bio;
369 unsigned long flags; 369 unsigned long flags;
370 multipath_conf_t *conf = mddev_to_conf(mddev); 370 multipath_conf_t *conf = mddev->private;
371 struct list_head *head = &conf->retry_list; 371 struct list_head *head = &conf->retry_list;
372 372
373 md_check_recovery(mddev); 373 md_check_recovery(mddev);
@@ -421,6 +421,9 @@ static int multipath_run (mddev_t *mddev)
421 struct multipath_info *disk; 421 struct multipath_info *disk;
422 mdk_rdev_t *rdev; 422 mdk_rdev_t *rdev;
423 423
424 if (md_check_no_bitmap(mddev))
425 return -EINVAL;
426
424 if (mddev->level != LEVEL_MULTIPATH) { 427 if (mddev->level != LEVEL_MULTIPATH) {
425 printk("multipath: %s: raid level not set to multipath IO (%d)\n", 428 printk("multipath: %s: raid level not set to multipath IO (%d)\n",
426 mdname(mddev), mddev->level); 429 mdname(mddev), mddev->level);
@@ -531,7 +534,7 @@ out:
531 534
532static int multipath_stop (mddev_t *mddev) 535static int multipath_stop (mddev_t *mddev)
533{ 536{
534 multipath_conf_t *conf = mddev_to_conf(mddev); 537 multipath_conf_t *conf = mddev->private;
535 538
536 md_unregister_thread(mddev->thread); 539 md_unregister_thread(mddev->thread);
537 mddev->thread = NULL; 540 mddev->thread = NULL;
diff --git a/drivers/md/multipath.h b/drivers/md/multipath.h
index 6fa70b400cda..d1c2a8d78395 100644
--- a/drivers/md/multipath.h
+++ b/drivers/md/multipath.h
@@ -19,12 +19,6 @@ struct multipath_private_data {
19typedef struct multipath_private_data multipath_conf_t; 19typedef struct multipath_private_data multipath_conf_t;
20 20
21/* 21/*
22 * this is the only point in the RAID code where we violate
23 * C type safety. mddev->private is an 'opaque' pointer.
24 */
25#define mddev_to_conf(mddev) ((multipath_conf_t *) mddev->private)
26
27/*
28 * this is our 'private' 'collective' MULTIPATH buffer head. 22 * this is our 'private' 'collective' MULTIPATH buffer head.
29 * it contains information about what kind of IO operations were started 23 * it contains information about what kind of IO operations were started
30 * for this MULTIPATH operation, and about their status: 24 * for this MULTIPATH operation, and about their status:
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 925507e7d673..ab4a489d8695 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -26,8 +26,8 @@
26static void raid0_unplug(struct request_queue *q) 26static void raid0_unplug(struct request_queue *q)
27{ 27{
28 mddev_t *mddev = q->queuedata; 28 mddev_t *mddev = q->queuedata;
29 raid0_conf_t *conf = mddev_to_conf(mddev); 29 raid0_conf_t *conf = mddev->private;
30 mdk_rdev_t **devlist = conf->strip_zone[0].dev; 30 mdk_rdev_t **devlist = conf->devlist;
31 int i; 31 int i;
32 32
33 for (i=0; i<mddev->raid_disks; i++) { 33 for (i=0; i<mddev->raid_disks; i++) {
@@ -40,8 +40,8 @@ static void raid0_unplug(struct request_queue *q)
40static int raid0_congested(void *data, int bits) 40static int raid0_congested(void *data, int bits)
41{ 41{
42 mddev_t *mddev = data; 42 mddev_t *mddev = data;
43 raid0_conf_t *conf = mddev_to_conf(mddev); 43 raid0_conf_t *conf = mddev->private;
44 mdk_rdev_t **devlist = conf->strip_zone[0].dev; 44 mdk_rdev_t **devlist = conf->devlist;
45 int i, ret = 0; 45 int i, ret = 0;
46 46
47 for (i = 0; i < mddev->raid_disks && !ret ; i++) { 47 for (i = 0; i < mddev->raid_disks && !ret ; i++) {
@@ -52,27 +52,60 @@ static int raid0_congested(void *data, int bits)
52 return ret; 52 return ret;
53} 53}
54 54
55/*
56 * inform the user of the raid configuration
57*/
58static void dump_zones(mddev_t *mddev)
59{
60 int j, k, h;
61 sector_t zone_size = 0;
62 sector_t zone_start = 0;
63 char b[BDEVNAME_SIZE];
64 raid0_conf_t *conf = mddev->private;
65 printk(KERN_INFO "******* %s configuration *********\n",
66 mdname(mddev));
67 h = 0;
68 for (j = 0; j < conf->nr_strip_zones; j++) {
69 printk(KERN_INFO "zone%d=[", j);
70 for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
71 printk("%s/",
72 bdevname(conf->devlist[j*mddev->raid_disks
73 + k]->bdev, b));
74 printk("]\n");
75
76 zone_size = conf->strip_zone[j].zone_end - zone_start;
77 printk(KERN_INFO " zone offset=%llukb "
78 "device offset=%llukb size=%llukb\n",
79 (unsigned long long)zone_start>>1,
80 (unsigned long long)conf->strip_zone[j].dev_start>>1,
81 (unsigned long long)zone_size>>1);
82 zone_start = conf->strip_zone[j].zone_end;
83 }
84 printk(KERN_INFO "**********************************\n\n");
85}
55 86
56static int create_strip_zones (mddev_t *mddev) 87static int create_strip_zones(mddev_t *mddev)
57{ 88{
58 int i, c, j; 89 int i, c, j, err;
59 sector_t current_start, curr_zone_start; 90 sector_t curr_zone_end, sectors;
60 sector_t min_spacing; 91 mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev, **dev;
61 raid0_conf_t *conf = mddev_to_conf(mddev);
62 mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev;
63 struct strip_zone *zone; 92 struct strip_zone *zone;
64 int cnt; 93 int cnt;
65 char b[BDEVNAME_SIZE]; 94 char b[BDEVNAME_SIZE];
66 95 raid0_conf_t *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
67 /* 96
68 * The number of 'same size groups' 97 if (!conf)
69 */ 98 return -ENOMEM;
70 conf->nr_strip_zones = 0;
71
72 list_for_each_entry(rdev1, &mddev->disks, same_set) { 99 list_for_each_entry(rdev1, &mddev->disks, same_set) {
73 printk(KERN_INFO "raid0: looking at %s\n", 100 printk(KERN_INFO "raid0: looking at %s\n",
74 bdevname(rdev1->bdev,b)); 101 bdevname(rdev1->bdev,b));
75 c = 0; 102 c = 0;
103
104 /* round size to chunk_size */
105 sectors = rdev1->sectors;
106 sector_div(sectors, mddev->chunk_sectors);
107 rdev1->sectors = sectors * mddev->chunk_sectors;
108
76 list_for_each_entry(rdev2, &mddev->disks, same_set) { 109 list_for_each_entry(rdev2, &mddev->disks, same_set) {
77 printk(KERN_INFO "raid0: comparing %s(%llu)", 110 printk(KERN_INFO "raid0: comparing %s(%llu)",
78 bdevname(rdev1->bdev,b), 111 bdevname(rdev1->bdev,b),
@@ -103,16 +136,16 @@ static int create_strip_zones (mddev_t *mddev)
103 } 136 }
104 } 137 }
105 printk(KERN_INFO "raid0: FINAL %d zones\n", conf->nr_strip_zones); 138 printk(KERN_INFO "raid0: FINAL %d zones\n", conf->nr_strip_zones);
106 139 err = -ENOMEM;
107 conf->strip_zone = kzalloc(sizeof(struct strip_zone)* 140 conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
108 conf->nr_strip_zones, GFP_KERNEL); 141 conf->nr_strip_zones, GFP_KERNEL);
109 if (!conf->strip_zone) 142 if (!conf->strip_zone)
110 return 1; 143 goto abort;
111 conf->devlist = kzalloc(sizeof(mdk_rdev_t*)* 144 conf->devlist = kzalloc(sizeof(mdk_rdev_t*)*
112 conf->nr_strip_zones*mddev->raid_disks, 145 conf->nr_strip_zones*mddev->raid_disks,
113 GFP_KERNEL); 146 GFP_KERNEL);
114 if (!conf->devlist) 147 if (!conf->devlist)
115 return 1; 148 goto abort;
116 149
117 /* The first zone must contain all devices, so here we check that 150 /* The first zone must contain all devices, so here we check that
118 * there is a proper alignment of slots to devices and find them all 151 * there is a proper alignment of slots to devices and find them all
@@ -120,7 +153,8 @@ static int create_strip_zones (mddev_t *mddev)
120 zone = &conf->strip_zone[0]; 153 zone = &conf->strip_zone[0];
121 cnt = 0; 154 cnt = 0;
122 smallest = NULL; 155 smallest = NULL;
123 zone->dev = conf->devlist; 156 dev = conf->devlist;
157 err = -EINVAL;
124 list_for_each_entry(rdev1, &mddev->disks, same_set) { 158 list_for_each_entry(rdev1, &mddev->disks, same_set) {
125 int j = rdev1->raid_disk; 159 int j = rdev1->raid_disk;
126 160
@@ -129,12 +163,12 @@ static int create_strip_zones (mddev_t *mddev)
129 "aborting!\n", j); 163 "aborting!\n", j);
130 goto abort; 164 goto abort;
131 } 165 }
132 if (zone->dev[j]) { 166 if (dev[j]) {
133 printk(KERN_ERR "raid0: multiple devices for %d - " 167 printk(KERN_ERR "raid0: multiple devices for %d - "
134 "aborting!\n", j); 168 "aborting!\n", j);
135 goto abort; 169 goto abort;
136 } 170 }
137 zone->dev[j] = rdev1; 171 dev[j] = rdev1;
138 172
139 blk_queue_stack_limits(mddev->queue, 173 blk_queue_stack_limits(mddev->queue,
140 rdev1->bdev->bd_disk->queue); 174 rdev1->bdev->bd_disk->queue);
@@ -157,34 +191,32 @@ static int create_strip_zones (mddev_t *mddev)
157 goto abort; 191 goto abort;
158 } 192 }
159 zone->nb_dev = cnt; 193 zone->nb_dev = cnt;
160 zone->sectors = smallest->sectors * cnt; 194 zone->zone_end = smallest->sectors * cnt;
161 zone->zone_start = 0;
162 195
163 current_start = smallest->sectors; 196 curr_zone_end = zone->zone_end;
164 curr_zone_start = zone->sectors;
165 197
166 /* now do the other zones */ 198 /* now do the other zones */
167 for (i = 1; i < conf->nr_strip_zones; i++) 199 for (i = 1; i < conf->nr_strip_zones; i++)
168 { 200 {
169 zone = conf->strip_zone + i; 201 zone = conf->strip_zone + i;
170 zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks; 202 dev = conf->devlist + i * mddev->raid_disks;
171 203
172 printk(KERN_INFO "raid0: zone %d\n", i); 204 printk(KERN_INFO "raid0: zone %d\n", i);
173 zone->dev_start = current_start; 205 zone->dev_start = smallest->sectors;
174 smallest = NULL; 206 smallest = NULL;
175 c = 0; 207 c = 0;
176 208
177 for (j=0; j<cnt; j++) { 209 for (j=0; j<cnt; j++) {
178 char b[BDEVNAME_SIZE]; 210 char b[BDEVNAME_SIZE];
179 rdev = conf->strip_zone[0].dev[j]; 211 rdev = conf->devlist[j];
180 printk(KERN_INFO "raid0: checking %s ...", 212 printk(KERN_INFO "raid0: checking %s ...",
181 bdevname(rdev->bdev, b)); 213 bdevname(rdev->bdev, b));
182 if (rdev->sectors <= current_start) { 214 if (rdev->sectors <= zone->dev_start) {
183 printk(KERN_INFO " nope.\n"); 215 printk(KERN_INFO " nope.\n");
184 continue; 216 continue;
185 } 217 }
186 printk(KERN_INFO " contained as device %d\n", c); 218 printk(KERN_INFO " contained as device %d\n", c);
187 zone->dev[c] = rdev; 219 dev[c] = rdev;
188 c++; 220 c++;
189 if (!smallest || rdev->sectors < smallest->sectors) { 221 if (!smallest || rdev->sectors < smallest->sectors) {
190 smallest = rdev; 222 smallest = rdev;
@@ -194,47 +226,39 @@ static int create_strip_zones (mddev_t *mddev)
194 } 226 }
195 227
196 zone->nb_dev = c; 228 zone->nb_dev = c;
197 zone->sectors = (smallest->sectors - current_start) * c; 229 sectors = (smallest->sectors - zone->dev_start) * c;
198 printk(KERN_INFO "raid0: zone->nb_dev: %d, sectors: %llu\n", 230 printk(KERN_INFO "raid0: zone->nb_dev: %d, sectors: %llu\n",
199 zone->nb_dev, (unsigned long long)zone->sectors); 231 zone->nb_dev, (unsigned long long)sectors);
200 232
201 zone->zone_start = curr_zone_start; 233 curr_zone_end += sectors;
202 curr_zone_start += zone->sectors; 234 zone->zone_end = curr_zone_end;
203 235
204 current_start = smallest->sectors;
205 printk(KERN_INFO "raid0: current zone start: %llu\n", 236 printk(KERN_INFO "raid0: current zone start: %llu\n",
206 (unsigned long long)current_start); 237 (unsigned long long)smallest->sectors);
207 }
208
209 /* Now find appropriate hash spacing.
210 * We want a number which causes most hash entries to cover
211 * at most two strips, but the hash table must be at most
212 * 1 PAGE. We choose the smallest strip, or contiguous collection
213 * of strips, that has big enough size. We never consider the last
214 * strip though as it's size has no bearing on the efficacy of the hash
215 * table.
216 */
217 conf->spacing = curr_zone_start;
218 min_spacing = curr_zone_start;
219 sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*));
220 for (i=0; i < conf->nr_strip_zones-1; i++) {
221 sector_t s = 0;
222 for (j = i; j < conf->nr_strip_zones - 1 &&
223 s < min_spacing; j++)
224 s += conf->strip_zone[j].sectors;
225 if (s >= min_spacing && s < conf->spacing)
226 conf->spacing = s;
227 } 238 }
228
229 mddev->queue->unplug_fn = raid0_unplug; 239 mddev->queue->unplug_fn = raid0_unplug;
230
231 mddev->queue->backing_dev_info.congested_fn = raid0_congested; 240 mddev->queue->backing_dev_info.congested_fn = raid0_congested;
232 mddev->queue->backing_dev_info.congested_data = mddev; 241 mddev->queue->backing_dev_info.congested_data = mddev;
233 242
243 /*
244 * now since we have the hard sector sizes, we can make sure
245 * chunk size is a multiple of that sector size
246 */
247 if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
248 printk(KERN_ERR "%s chunk_size of %d not valid\n",
249 mdname(mddev),
250 mddev->chunk_sectors << 9);
251 goto abort;
252 }
234 printk(KERN_INFO "raid0: done.\n"); 253 printk(KERN_INFO "raid0: done.\n");
254 mddev->private = conf;
235 return 0; 255 return 0;
236 abort: 256abort:
237 return 1; 257 kfree(conf->strip_zone);
258 kfree(conf->devlist);
259 kfree(conf);
260 mddev->private = NULL;
261 return err;
238} 262}
239 263
240/** 264/**
@@ -252,10 +276,15 @@ static int raid0_mergeable_bvec(struct request_queue *q,
252 mddev_t *mddev = q->queuedata; 276 mddev_t *mddev = q->queuedata;
253 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 277 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
254 int max; 278 int max;
255 unsigned int chunk_sectors = mddev->chunk_size >> 9; 279 unsigned int chunk_sectors = mddev->chunk_sectors;
256 unsigned int bio_sectors = bvm->bi_size >> 9; 280 unsigned int bio_sectors = bvm->bi_size >> 9;
257 281
258 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 282 if (is_power_of_2(chunk_sectors))
283 max = (chunk_sectors - ((sector & (chunk_sectors-1))
284 + bio_sectors)) << 9;
285 else
286 max = (chunk_sectors - (sector_div(sector, chunk_sectors)
287 + bio_sectors)) << 9;
259 if (max < 0) max = 0; /* bio_add cannot handle a negative return */ 288 if (max < 0) max = 0; /* bio_add cannot handle a negative return */
260 if (max <= biovec->bv_len && bio_sectors == 0) 289 if (max <= biovec->bv_len && bio_sectors == 0)
261 return biovec->bv_len; 290 return biovec->bv_len;
@@ -277,84 +306,28 @@ static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks)
277 return array_sectors; 306 return array_sectors;
278} 307}
279 308
280static int raid0_run (mddev_t *mddev) 309static int raid0_run(mddev_t *mddev)
281{ 310{
282 unsigned cur=0, i=0, nb_zone; 311 int ret;
283 s64 sectors;
284 raid0_conf_t *conf;
285 312
286 if (mddev->chunk_size == 0) { 313 if (mddev->chunk_sectors == 0) {
287 printk(KERN_ERR "md/raid0: non-zero chunk size required.\n"); 314 printk(KERN_ERR "md/raid0: chunk size must be set.\n");
288 return -EINVAL; 315 return -EINVAL;
289 } 316 }
290 printk(KERN_INFO "%s: setting max_sectors to %d, segment boundary to %d\n", 317 if (md_check_no_bitmap(mddev))
291 mdname(mddev), 318 return -EINVAL;
292 mddev->chunk_size >> 9, 319 blk_queue_max_sectors(mddev->queue, mddev->chunk_sectors);
293 (mddev->chunk_size>>1)-1);
294 blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9);
295 blk_queue_segment_boundary(mddev->queue, (mddev->chunk_size>>1) - 1);
296 mddev->queue->queue_lock = &mddev->queue->__queue_lock; 320 mddev->queue->queue_lock = &mddev->queue->__queue_lock;
297 321
298 conf = kmalloc(sizeof (raid0_conf_t), GFP_KERNEL); 322 ret = create_strip_zones(mddev);
299 if (!conf) 323 if (ret < 0)
300 goto out; 324 return ret;
301 mddev->private = (void *)conf;
302
303 conf->strip_zone = NULL;
304 conf->devlist = NULL;
305 if (create_strip_zones (mddev))
306 goto out_free_conf;
307 325
308 /* calculate array device size */ 326 /* calculate array device size */
309 md_set_array_sectors(mddev, raid0_size(mddev, 0, 0)); 327 md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
310 328
311 printk(KERN_INFO "raid0 : md_size is %llu sectors.\n", 329 printk(KERN_INFO "raid0 : md_size is %llu sectors.\n",
312 (unsigned long long)mddev->array_sectors); 330 (unsigned long long)mddev->array_sectors);
313 printk(KERN_INFO "raid0 : conf->spacing is %llu sectors.\n",
314 (unsigned long long)conf->spacing);
315 {
316 sector_t s = raid0_size(mddev, 0, 0);
317 sector_t space = conf->spacing;
318 int round;
319 conf->sector_shift = 0;
320 if (sizeof(sector_t) > sizeof(u32)) {
321 /*shift down space and s so that sector_div will work */
322 while (space > (sector_t) (~(u32)0)) {
323 s >>= 1;
324 space >>= 1;
325 s += 1; /* force round-up */
326 conf->sector_shift++;
327 }
328 }
329 round = sector_div(s, (u32)space) ? 1 : 0;
330 nb_zone = s + round;
331 }
332 printk(KERN_INFO "raid0 : nb_zone is %d.\n", nb_zone);
333
334 printk(KERN_INFO "raid0 : Allocating %zu bytes for hash.\n",
335 nb_zone*sizeof(struct strip_zone*));
336 conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL);
337 if (!conf->hash_table)
338 goto out_free_conf;
339 sectors = conf->strip_zone[cur].sectors;
340
341 conf->hash_table[0] = conf->strip_zone + cur;
342 for (i=1; i< nb_zone; i++) {
343 while (sectors <= conf->spacing) {
344 cur++;
345 sectors += conf->strip_zone[cur].sectors;
346 }
347 sectors -= conf->spacing;
348 conf->hash_table[i] = conf->strip_zone + cur;
349 }
350 if (conf->sector_shift) {
351 conf->spacing >>= conf->sector_shift;
352 /* round spacing up so when we divide by it, we
353 * err on the side of too-low, which is safest
354 */
355 conf->spacing++;
356 }
357
358 /* calculate the max read-ahead size. 331 /* calculate the max read-ahead size.
359 * For read-ahead of large files to be effective, we need to 332 * For read-ahead of large files to be effective, we need to
360 * readahead at least twice a whole stripe. i.e. number of devices 333 * readahead at least twice a whole stripe. i.e. number of devices
@@ -365,48 +338,107 @@ static int raid0_run (mddev_t *mddev)
365 * chunksize should be used in that case. 338 * chunksize should be used in that case.
366 */ 339 */
367 { 340 {
368 int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE; 341 int stripe = mddev->raid_disks *
342 (mddev->chunk_sectors << 9) / PAGE_SIZE;
369 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) 343 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
370 mddev->queue->backing_dev_info.ra_pages = 2* stripe; 344 mddev->queue->backing_dev_info.ra_pages = 2* stripe;
371 } 345 }
372 346
373
374 blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec); 347 blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
348 dump_zones(mddev);
375 return 0; 349 return 0;
350}
376 351
377out_free_conf: 352static int raid0_stop(mddev_t *mddev)
353{
354 raid0_conf_t *conf = mddev->private;
355
356 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
378 kfree(conf->strip_zone); 357 kfree(conf->strip_zone);
379 kfree(conf->devlist); 358 kfree(conf->devlist);
380 kfree(conf); 359 kfree(conf);
381 mddev->private = NULL; 360 mddev->private = NULL;
382out: 361 return 0;
383 return -ENOMEM;
384} 362}
385 363
386static int raid0_stop (mddev_t *mddev) 364/* Find the zone which holds a particular offset
365 * Update *sectorp to be an offset in that zone
366 */
367static struct strip_zone *find_zone(struct raid0_private_data *conf,
368 sector_t *sectorp)
387{ 369{
388 raid0_conf_t *conf = mddev_to_conf(mddev); 370 int i;
371 struct strip_zone *z = conf->strip_zone;
372 sector_t sector = *sectorp;
373
374 for (i = 0; i < conf->nr_strip_zones; i++)
375 if (sector < z[i].zone_end) {
376 if (i)
377 *sectorp = sector - z[i-1].zone_end;
378 return z + i;
379 }
380 BUG();
381}
389 382
390 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 383/*
391 kfree(conf->hash_table); 384 * remaps the bio to the target device. we separate two flows.
392 conf->hash_table = NULL; 385 * power 2 flow and a general flow for the sake of perfromance
393 kfree(conf->strip_zone); 386*/
394 conf->strip_zone = NULL; 387static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
395 kfree(conf); 388 sector_t sector, sector_t *sector_offset)
396 mddev->private = NULL; 389{
390 unsigned int sect_in_chunk;
391 sector_t chunk;
392 raid0_conf_t *conf = mddev->private;
393 unsigned int chunk_sects = mddev->chunk_sectors;
394
395 if (is_power_of_2(chunk_sects)) {
396 int chunksect_bits = ffz(~chunk_sects);
397 /* find the sector offset inside the chunk */
398 sect_in_chunk = sector & (chunk_sects - 1);
399 sector >>= chunksect_bits;
400 /* chunk in zone */
401 chunk = *sector_offset;
402 /* quotient is the chunk in real device*/
403 sector_div(chunk, zone->nb_dev << chunksect_bits);
404 } else{
405 sect_in_chunk = sector_div(sector, chunk_sects);
406 chunk = *sector_offset;
407 sector_div(chunk, chunk_sects * zone->nb_dev);
408 }
409 /*
410 * position the bio over the real device
411 * real sector = chunk in device + starting of zone
412 * + the position in the chunk
413 */
414 *sector_offset = (chunk * chunk_sects) + sect_in_chunk;
415 return conf->devlist[(zone - conf->strip_zone)*mddev->raid_disks
416 + sector_div(sector, zone->nb_dev)];
417}
397 418
398 return 0; 419/*
420 * Is io distribute over 1 or more chunks ?
421*/
422static inline int is_io_in_chunk_boundary(mddev_t *mddev,
423 unsigned int chunk_sects, struct bio *bio)
424{
425 if (likely(is_power_of_2(chunk_sects))) {
426 return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
427 + (bio->bi_size >> 9));
428 } else{
429 sector_t sector = bio->bi_sector;
430 return chunk_sects >= (sector_div(sector, chunk_sects)
431 + (bio->bi_size >> 9));
432 }
399} 433}
400 434
401static int raid0_make_request (struct request_queue *q, struct bio *bio) 435static int raid0_make_request(struct request_queue *q, struct bio *bio)
402{ 436{
403 mddev_t *mddev = q->queuedata; 437 mddev_t *mddev = q->queuedata;
404 unsigned int sect_in_chunk, chunksect_bits, chunk_sects; 438 unsigned int chunk_sects;
405 raid0_conf_t *conf = mddev_to_conf(mddev); 439 sector_t sector_offset;
406 struct strip_zone *zone; 440 struct strip_zone *zone;
407 mdk_rdev_t *tmp_dev; 441 mdk_rdev_t *tmp_dev;
408 sector_t chunk;
409 sector_t sector, rsect;
410 const int rw = bio_data_dir(bio); 442 const int rw = bio_data_dir(bio);
411 int cpu; 443 int cpu;
412 444
@@ -421,11 +453,9 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
421 bio_sectors(bio)); 453 bio_sectors(bio));
422 part_stat_unlock(); 454 part_stat_unlock();
423 455
424 chunk_sects = mddev->chunk_size >> 9; 456 chunk_sects = mddev->chunk_sectors;
425 chunksect_bits = ffz(~chunk_sects); 457 if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
426 sector = bio->bi_sector; 458 sector_t sector = bio->bi_sector;
427
428 if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) {
429 struct bio_pair *bp; 459 struct bio_pair *bp;
430 /* Sanity check -- queue functions should prevent this happening */ 460 /* Sanity check -- queue functions should prevent this happening */
431 if (bio->bi_vcnt != 1 || 461 if (bio->bi_vcnt != 1 ||
@@ -434,7 +464,12 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
434 /* This is a one page bio that upper layers 464 /* This is a one page bio that upper layers
435 * refuse to split for us, so we need to split it. 465 * refuse to split for us, so we need to split it.
436 */ 466 */
437 bp = bio_split(bio, chunk_sects - (bio->bi_sector & (chunk_sects - 1))); 467 if (likely(is_power_of_2(chunk_sects)))
468 bp = bio_split(bio, chunk_sects - (sector &
469 (chunk_sects-1)));
470 else
471 bp = bio_split(bio, chunk_sects -
472 sector_div(sector, chunk_sects));
438 if (raid0_make_request(q, &bp->bio1)) 473 if (raid0_make_request(q, &bp->bio1))
439 generic_make_request(&bp->bio1); 474 generic_make_request(&bp->bio1);
440 if (raid0_make_request(q, &bp->bio2)) 475 if (raid0_make_request(q, &bp->bio2))
@@ -443,34 +478,14 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
443 bio_pair_release(bp); 478 bio_pair_release(bp);
444 return 0; 479 return 0;
445 } 480 }
446
447
448 {
449 sector_t x = sector >> conf->sector_shift;
450 sector_div(x, (u32)conf->spacing);
451 zone = conf->hash_table[x];
452 }
453 481
454 while (sector >= zone->zone_start + zone->sectors) 482 sector_offset = bio->bi_sector;
455 zone++; 483 zone = find_zone(mddev->private, &sector_offset);
456 484 tmp_dev = map_sector(mddev, zone, bio->bi_sector,
457 sect_in_chunk = bio->bi_sector & (chunk_sects - 1); 485 &sector_offset);
458
459
460 {
461 sector_t x = (sector - zone->zone_start) >> chunksect_bits;
462
463 sector_div(x, zone->nb_dev);
464 chunk = x;
465
466 x = sector >> chunksect_bits;
467 tmp_dev = zone->dev[sector_div(x, zone->nb_dev)];
468 }
469 rsect = (chunk << chunksect_bits) + zone->dev_start + sect_in_chunk;
470
471 bio->bi_bdev = tmp_dev->bdev; 486 bio->bi_bdev = tmp_dev->bdev;
472 bio->bi_sector = rsect + tmp_dev->data_offset; 487 bio->bi_sector = sector_offset + zone->dev_start +
473 488 tmp_dev->data_offset;
474 /* 489 /*
475 * Let the main block layer submit the IO and resolve recursion: 490 * Let the main block layer submit the IO and resolve recursion:
476 */ 491 */
@@ -485,31 +500,35 @@ bad_map:
485 return 0; 500 return 0;
486} 501}
487 502
488static void raid0_status (struct seq_file *seq, mddev_t *mddev) 503static void raid0_status(struct seq_file *seq, mddev_t *mddev)
489{ 504{
490#undef MD_DEBUG 505#undef MD_DEBUG
491#ifdef MD_DEBUG 506#ifdef MD_DEBUG
492 int j, k, h; 507 int j, k, h;
493 char b[BDEVNAME_SIZE]; 508 char b[BDEVNAME_SIZE];
494 raid0_conf_t *conf = mddev_to_conf(mddev); 509 raid0_conf_t *conf = mddev->private;
495 510
511 sector_t zone_size;
512 sector_t zone_start = 0;
496 h = 0; 513 h = 0;
514
497 for (j = 0; j < conf->nr_strip_zones; j++) { 515 for (j = 0; j < conf->nr_strip_zones; j++) {
498 seq_printf(seq, " z%d", j); 516 seq_printf(seq, " z%d", j);
499 if (conf->hash_table[h] == conf->strip_zone+j)
500 seq_printf(seq, "(h%d)", h++);
501 seq_printf(seq, "=["); 517 seq_printf(seq, "=[");
502 for (k = 0; k < conf->strip_zone[j].nb_dev; k++) 518 for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
503 seq_printf(seq, "%s/", bdevname( 519 seq_printf(seq, "%s/", bdevname(
504 conf->strip_zone[j].dev[k]->bdev,b)); 520 conf->devlist[j*mddev->raid_disks + k]
505 521 ->bdev, b));
506 seq_printf(seq, "] zs=%d ds=%d s=%d\n", 522
507 conf->strip_zone[j].zone_start, 523 zone_size = conf->strip_zone[j].zone_end - zone_start;
508 conf->strip_zone[j].dev_start, 524 seq_printf(seq, "] ze=%lld ds=%lld s=%lld\n",
509 conf->strip_zone[j].sectors); 525 (unsigned long long)zone_start>>1,
526 (unsigned long long)conf->strip_zone[j].dev_start>>1,
527 (unsigned long long)zone_size>>1);
528 zone_start = conf->strip_zone[j].zone_end;
510 } 529 }
511#endif 530#endif
512 seq_printf(seq, " %dk chunks", mddev->chunk_size/1024); 531 seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
513 return; 532 return;
514} 533}
515 534
diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h
index 824b12eb1d4f..91f8e876ee64 100644
--- a/drivers/md/raid0.h
+++ b/drivers/md/raid0.h
@@ -3,26 +3,18 @@
3 3
4struct strip_zone 4struct strip_zone
5{ 5{
6 sector_t zone_start; /* Zone offset in md_dev (in sectors) */ 6 sector_t zone_end; /* Start of the next zone (in sectors) */
7 sector_t dev_start; /* Zone offset in real dev (in sectors) */ 7 sector_t dev_start; /* Zone offset in real dev (in sectors) */
8 sector_t sectors; /* Zone size in sectors */
9 int nb_dev; /* # of devices attached to the zone */ 8 int nb_dev; /* # of devices attached to the zone */
10 mdk_rdev_t **dev; /* Devices attached to the zone */
11}; 9};
12 10
13struct raid0_private_data 11struct raid0_private_data
14{ 12{
15 struct strip_zone **hash_table; /* Table of indexes into strip_zone */
16 struct strip_zone *strip_zone; 13 struct strip_zone *strip_zone;
17 mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */ 14 mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */
18 int nr_strip_zones; 15 int nr_strip_zones;
19
20 sector_t spacing;
21 int sector_shift; /* shift this before divide by spacing */
22}; 16};
23 17
24typedef struct raid0_private_data raid0_conf_t; 18typedef struct raid0_private_data raid0_conf_t;
25 19
26#define mddev_to_conf(mddev) ((raid0_conf_t *) mddev->private)
27
28#endif 20#endif
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e23758b4a34e..89939a7aef57 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -182,7 +182,7 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
182 182
183static void free_r1bio(r1bio_t *r1_bio) 183static void free_r1bio(r1bio_t *r1_bio)
184{ 184{
185 conf_t *conf = mddev_to_conf(r1_bio->mddev); 185 conf_t *conf = r1_bio->mddev->private;
186 186
187 /* 187 /*
188 * Wake up any possible resync thread that waits for the device 188 * Wake up any possible resync thread that waits for the device
@@ -196,7 +196,7 @@ static void free_r1bio(r1bio_t *r1_bio)
196 196
197static void put_buf(r1bio_t *r1_bio) 197static void put_buf(r1bio_t *r1_bio)
198{ 198{
199 conf_t *conf = mddev_to_conf(r1_bio->mddev); 199 conf_t *conf = r1_bio->mddev->private;
200 int i; 200 int i;
201 201
202 for (i=0; i<conf->raid_disks; i++) { 202 for (i=0; i<conf->raid_disks; i++) {
@@ -214,7 +214,7 @@ static void reschedule_retry(r1bio_t *r1_bio)
214{ 214{
215 unsigned long flags; 215 unsigned long flags;
216 mddev_t *mddev = r1_bio->mddev; 216 mddev_t *mddev = r1_bio->mddev;
217 conf_t *conf = mddev_to_conf(mddev); 217 conf_t *conf = mddev->private;
218 218
219 spin_lock_irqsave(&conf->device_lock, flags); 219 spin_lock_irqsave(&conf->device_lock, flags);
220 list_add(&r1_bio->retry_list, &conf->retry_list); 220 list_add(&r1_bio->retry_list, &conf->retry_list);
@@ -253,7 +253,7 @@ static void raid_end_bio_io(r1bio_t *r1_bio)
253 */ 253 */
254static inline void update_head_pos(int disk, r1bio_t *r1_bio) 254static inline void update_head_pos(int disk, r1bio_t *r1_bio)
255{ 255{
256 conf_t *conf = mddev_to_conf(r1_bio->mddev); 256 conf_t *conf = r1_bio->mddev->private;
257 257
258 conf->mirrors[disk].head_position = 258 conf->mirrors[disk].head_position =
259 r1_bio->sector + (r1_bio->sectors); 259 r1_bio->sector + (r1_bio->sectors);
@@ -264,7 +264,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
264 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 264 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
265 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 265 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
266 int mirror; 266 int mirror;
267 conf_t *conf = mddev_to_conf(r1_bio->mddev); 267 conf_t *conf = r1_bio->mddev->private;
268 268
269 mirror = r1_bio->read_disk; 269 mirror = r1_bio->read_disk;
270 /* 270 /*
@@ -309,7 +309,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
309 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 309 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
310 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 310 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
311 int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state); 311 int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
312 conf_t *conf = mddev_to_conf(r1_bio->mddev); 312 conf_t *conf = r1_bio->mddev->private;
313 struct bio *to_put = NULL; 313 struct bio *to_put = NULL;
314 314
315 315
@@ -541,7 +541,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
541 541
542static void unplug_slaves(mddev_t *mddev) 542static void unplug_slaves(mddev_t *mddev)
543{ 543{
544 conf_t *conf = mddev_to_conf(mddev); 544 conf_t *conf = mddev->private;
545 int i; 545 int i;
546 546
547 rcu_read_lock(); 547 rcu_read_lock();
@@ -573,7 +573,7 @@ static void raid1_unplug(struct request_queue *q)
573static int raid1_congested(void *data, int bits) 573static int raid1_congested(void *data, int bits)
574{ 574{
575 mddev_t *mddev = data; 575 mddev_t *mddev = data;
576 conf_t *conf = mddev_to_conf(mddev); 576 conf_t *conf = mddev->private;
577 int i, ret = 0; 577 int i, ret = 0;
578 578
579 rcu_read_lock(); 579 rcu_read_lock();
@@ -772,7 +772,7 @@ do_sync_io:
772static int make_request(struct request_queue *q, struct bio * bio) 772static int make_request(struct request_queue *q, struct bio * bio)
773{ 773{
774 mddev_t *mddev = q->queuedata; 774 mddev_t *mddev = q->queuedata;
775 conf_t *conf = mddev_to_conf(mddev); 775 conf_t *conf = mddev->private;
776 mirror_info_t *mirror; 776 mirror_info_t *mirror;
777 r1bio_t *r1_bio; 777 r1bio_t *r1_bio;
778 struct bio *read_bio; 778 struct bio *read_bio;
@@ -991,7 +991,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
991 991
992static void status(struct seq_file *seq, mddev_t *mddev) 992static void status(struct seq_file *seq, mddev_t *mddev)
993{ 993{
994 conf_t *conf = mddev_to_conf(mddev); 994 conf_t *conf = mddev->private;
995 int i; 995 int i;
996 996
997 seq_printf(seq, " [%d/%d] [", conf->raid_disks, 997 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
@@ -1010,7 +1010,7 @@ static void status(struct seq_file *seq, mddev_t *mddev)
1010static void error(mddev_t *mddev, mdk_rdev_t *rdev) 1010static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1011{ 1011{
1012 char b[BDEVNAME_SIZE]; 1012 char b[BDEVNAME_SIZE];
1013 conf_t *conf = mddev_to_conf(mddev); 1013 conf_t *conf = mddev->private;
1014 1014
1015 /* 1015 /*
1016 * If it is not operational, then we have already marked it as dead 1016 * If it is not operational, then we have already marked it as dead
@@ -1214,7 +1214,7 @@ static void end_sync_write(struct bio *bio, int error)
1214 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1214 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1215 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 1215 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
1216 mddev_t *mddev = r1_bio->mddev; 1216 mddev_t *mddev = r1_bio->mddev;
1217 conf_t *conf = mddev_to_conf(mddev); 1217 conf_t *conf = mddev->private;
1218 int i; 1218 int i;
1219 int mirror=0; 1219 int mirror=0;
1220 1220
@@ -1248,7 +1248,7 @@ static void end_sync_write(struct bio *bio, int error)
1248 1248
1249static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) 1249static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
1250{ 1250{
1251 conf_t *conf = mddev_to_conf(mddev); 1251 conf_t *conf = mddev->private;
1252 int i; 1252 int i;
1253 int disks = conf->raid_disks; 1253 int disks = conf->raid_disks;
1254 struct bio *bio, *wbio; 1254 struct bio *bio, *wbio;
@@ -1562,7 +1562,7 @@ static void raid1d(mddev_t *mddev)
1562 r1bio_t *r1_bio; 1562 r1bio_t *r1_bio;
1563 struct bio *bio; 1563 struct bio *bio;
1564 unsigned long flags; 1564 unsigned long flags;
1565 conf_t *conf = mddev_to_conf(mddev); 1565 conf_t *conf = mddev->private;
1566 struct list_head *head = &conf->retry_list; 1566 struct list_head *head = &conf->retry_list;
1567 int unplug=0; 1567 int unplug=0;
1568 mdk_rdev_t *rdev; 1568 mdk_rdev_t *rdev;
@@ -1585,7 +1585,7 @@ static void raid1d(mddev_t *mddev)
1585 spin_unlock_irqrestore(&conf->device_lock, flags); 1585 spin_unlock_irqrestore(&conf->device_lock, flags);
1586 1586
1587 mddev = r1_bio->mddev; 1587 mddev = r1_bio->mddev;
1588 conf = mddev_to_conf(mddev); 1588 conf = mddev->private;
1589 if (test_bit(R1BIO_IsSync, &r1_bio->state)) { 1589 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
1590 sync_request_write(mddev, r1_bio); 1590 sync_request_write(mddev, r1_bio);
1591 unplug = 1; 1591 unplug = 1;
@@ -1706,7 +1706,7 @@ static int init_resync(conf_t *conf)
1706 1706
1707static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) 1707static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1708{ 1708{
1709 conf_t *conf = mddev_to_conf(mddev); 1709 conf_t *conf = mddev->private;
1710 r1bio_t *r1_bio; 1710 r1bio_t *r1_bio;
1711 struct bio *bio; 1711 struct bio *bio;
1712 sector_t max_sector, nr_sectors; 1712 sector_t max_sector, nr_sectors;
@@ -2052,6 +2052,10 @@ static int run(mddev_t *mddev)
2052 goto out_free_conf; 2052 goto out_free_conf;
2053 } 2053 }
2054 2054
2055 if (mddev->recovery_cp != MaxSector)
2056 printk(KERN_NOTICE "raid1: %s is not clean"
2057 " -- starting background reconstruction\n",
2058 mdname(mddev));
2055 printk(KERN_INFO 2059 printk(KERN_INFO
2056 "raid1: raid set %s active with %d out of %d mirrors\n", 2060 "raid1: raid set %s active with %d out of %d mirrors\n",
2057 mdname(mddev), mddev->raid_disks - mddev->degraded, 2061 mdname(mddev), mddev->raid_disks - mddev->degraded,
@@ -2087,7 +2091,7 @@ out:
2087 2091
2088static int stop(mddev_t *mddev) 2092static int stop(mddev_t *mddev)
2089{ 2093{
2090 conf_t *conf = mddev_to_conf(mddev); 2094 conf_t *conf = mddev->private;
2091 struct bitmap *bitmap = mddev->bitmap; 2095 struct bitmap *bitmap = mddev->bitmap;
2092 int behind_wait = 0; 2096 int behind_wait = 0;
2093 2097
@@ -2155,16 +2159,16 @@ static int raid1_reshape(mddev_t *mddev)
2155 mempool_t *newpool, *oldpool; 2159 mempool_t *newpool, *oldpool;
2156 struct pool_info *newpoolinfo; 2160 struct pool_info *newpoolinfo;
2157 mirror_info_t *newmirrors; 2161 mirror_info_t *newmirrors;
2158 conf_t *conf = mddev_to_conf(mddev); 2162 conf_t *conf = mddev->private;
2159 int cnt, raid_disks; 2163 int cnt, raid_disks;
2160 unsigned long flags; 2164 unsigned long flags;
2161 int d, d2, err; 2165 int d, d2, err;
2162 2166
2163 /* Cannot change chunk_size, layout, or level */ 2167 /* Cannot change chunk_size, layout, or level */
2164 if (mddev->chunk_size != mddev->new_chunk || 2168 if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
2165 mddev->layout != mddev->new_layout || 2169 mddev->layout != mddev->new_layout ||
2166 mddev->level != mddev->new_level) { 2170 mddev->level != mddev->new_level) {
2167 mddev->new_chunk = mddev->chunk_size; 2171 mddev->new_chunk_sectors = mddev->chunk_sectors;
2168 mddev->new_layout = mddev->layout; 2172 mddev->new_layout = mddev->layout;
2169 mddev->new_level = mddev->level; 2173 mddev->new_level = mddev->level;
2170 return -EINVAL; 2174 return -EINVAL;
@@ -2252,7 +2256,7 @@ static int raid1_reshape(mddev_t *mddev)
2252 2256
2253static void raid1_quiesce(mddev_t *mddev, int state) 2257static void raid1_quiesce(mddev_t *mddev, int state)
2254{ 2258{
2255 conf_t *conf = mddev_to_conf(mddev); 2259 conf_t *conf = mddev->private;
2256 2260
2257 switch(state) { 2261 switch(state) {
2258 case 1: 2262 case 1:
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 1620eea3d57c..e87b84deff68 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -64,12 +64,6 @@ struct r1_private_data_s {
64typedef struct r1_private_data_s conf_t; 64typedef struct r1_private_data_s conf_t;
65 65
66/* 66/*
67 * this is the only point in the RAID code where we violate
68 * C type safety. mddev->private is an 'opaque' pointer.
69 */
70#define mddev_to_conf(mddev) ((conf_t *) mddev->private)
71
72/*
73 * this is our 'private' RAID1 bio. 67 * this is our 'private' RAID1 bio.
74 * 68 *
75 * it contains information about what kind of IO operations were started 69 * it contains information about what kind of IO operations were started
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 750550c1166f..ae12ceafe10c 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -188,7 +188,7 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
188 188
189static void free_r10bio(r10bio_t *r10_bio) 189static void free_r10bio(r10bio_t *r10_bio)
190{ 190{
191 conf_t *conf = mddev_to_conf(r10_bio->mddev); 191 conf_t *conf = r10_bio->mddev->private;
192 192
193 /* 193 /*
194 * Wake up any possible resync thread that waits for the device 194 * Wake up any possible resync thread that waits for the device
@@ -202,7 +202,7 @@ static void free_r10bio(r10bio_t *r10_bio)
202 202
203static void put_buf(r10bio_t *r10_bio) 203static void put_buf(r10bio_t *r10_bio)
204{ 204{
205 conf_t *conf = mddev_to_conf(r10_bio->mddev); 205 conf_t *conf = r10_bio->mddev->private;
206 206
207 mempool_free(r10_bio, conf->r10buf_pool); 207 mempool_free(r10_bio, conf->r10buf_pool);
208 208
@@ -213,7 +213,7 @@ static void reschedule_retry(r10bio_t *r10_bio)
213{ 213{
214 unsigned long flags; 214 unsigned long flags;
215 mddev_t *mddev = r10_bio->mddev; 215 mddev_t *mddev = r10_bio->mddev;
216 conf_t *conf = mddev_to_conf(mddev); 216 conf_t *conf = mddev->private;
217 217
218 spin_lock_irqsave(&conf->device_lock, flags); 218 spin_lock_irqsave(&conf->device_lock, flags);
219 list_add(&r10_bio->retry_list, &conf->retry_list); 219 list_add(&r10_bio->retry_list, &conf->retry_list);
@@ -245,7 +245,7 @@ static void raid_end_bio_io(r10bio_t *r10_bio)
245 */ 245 */
246static inline void update_head_pos(int slot, r10bio_t *r10_bio) 246static inline void update_head_pos(int slot, r10bio_t *r10_bio)
247{ 247{
248 conf_t *conf = mddev_to_conf(r10_bio->mddev); 248 conf_t *conf = r10_bio->mddev->private;
249 249
250 conf->mirrors[r10_bio->devs[slot].devnum].head_position = 250 conf->mirrors[r10_bio->devs[slot].devnum].head_position =
251 r10_bio->devs[slot].addr + (r10_bio->sectors); 251 r10_bio->devs[slot].addr + (r10_bio->sectors);
@@ -256,7 +256,7 @@ static void raid10_end_read_request(struct bio *bio, int error)
256 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 256 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
257 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); 257 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
258 int slot, dev; 258 int slot, dev;
259 conf_t *conf = mddev_to_conf(r10_bio->mddev); 259 conf_t *conf = r10_bio->mddev->private;
260 260
261 261
262 slot = r10_bio->read_slot; 262 slot = r10_bio->read_slot;
@@ -297,7 +297,7 @@ static void raid10_end_write_request(struct bio *bio, int error)
297 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 297 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
298 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); 298 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
299 int slot, dev; 299 int slot, dev;
300 conf_t *conf = mddev_to_conf(r10_bio->mddev); 300 conf_t *conf = r10_bio->mddev->private;
301 301
302 for (slot = 0; slot < conf->copies; slot++) 302 for (slot = 0; slot < conf->copies; slot++)
303 if (r10_bio->devs[slot].bio == bio) 303 if (r10_bio->devs[slot].bio == bio)
@@ -461,7 +461,7 @@ static int raid10_mergeable_bvec(struct request_queue *q,
461 mddev_t *mddev = q->queuedata; 461 mddev_t *mddev = q->queuedata;
462 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 462 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
463 int max; 463 int max;
464 unsigned int chunk_sectors = mddev->chunk_size >> 9; 464 unsigned int chunk_sectors = mddev->chunk_sectors;
465 unsigned int bio_sectors = bvm->bi_size >> 9; 465 unsigned int bio_sectors = bvm->bi_size >> 9;
466 466
467 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 467 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
@@ -596,7 +596,7 @@ rb_out:
596 596
597static void unplug_slaves(mddev_t *mddev) 597static void unplug_slaves(mddev_t *mddev)
598{ 598{
599 conf_t *conf = mddev_to_conf(mddev); 599 conf_t *conf = mddev->private;
600 int i; 600 int i;
601 601
602 rcu_read_lock(); 602 rcu_read_lock();
@@ -628,7 +628,7 @@ static void raid10_unplug(struct request_queue *q)
628static int raid10_congested(void *data, int bits) 628static int raid10_congested(void *data, int bits)
629{ 629{
630 mddev_t *mddev = data; 630 mddev_t *mddev = data;
631 conf_t *conf = mddev_to_conf(mddev); 631 conf_t *conf = mddev->private;
632 int i, ret = 0; 632 int i, ret = 0;
633 633
634 rcu_read_lock(); 634 rcu_read_lock();
@@ -788,7 +788,7 @@ static void unfreeze_array(conf_t *conf)
788static int make_request(struct request_queue *q, struct bio * bio) 788static int make_request(struct request_queue *q, struct bio * bio)
789{ 789{
790 mddev_t *mddev = q->queuedata; 790 mddev_t *mddev = q->queuedata;
791 conf_t *conf = mddev_to_conf(mddev); 791 conf_t *conf = mddev->private;
792 mirror_info_t *mirror; 792 mirror_info_t *mirror;
793 r10bio_t *r10_bio; 793 r10bio_t *r10_bio;
794 struct bio *read_bio; 794 struct bio *read_bio;
@@ -981,11 +981,11 @@ static int make_request(struct request_queue *q, struct bio * bio)
981 981
982static void status(struct seq_file *seq, mddev_t *mddev) 982static void status(struct seq_file *seq, mddev_t *mddev)
983{ 983{
984 conf_t *conf = mddev_to_conf(mddev); 984 conf_t *conf = mddev->private;
985 int i; 985 int i;
986 986
987 if (conf->near_copies < conf->raid_disks) 987 if (conf->near_copies < conf->raid_disks)
988 seq_printf(seq, " %dK chunks", mddev->chunk_size/1024); 988 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
989 if (conf->near_copies > 1) 989 if (conf->near_copies > 1)
990 seq_printf(seq, " %d near-copies", conf->near_copies); 990 seq_printf(seq, " %d near-copies", conf->near_copies);
991 if (conf->far_copies > 1) { 991 if (conf->far_copies > 1) {
@@ -1006,7 +1006,7 @@ static void status(struct seq_file *seq, mddev_t *mddev)
1006static void error(mddev_t *mddev, mdk_rdev_t *rdev) 1006static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1007{ 1007{
1008 char b[BDEVNAME_SIZE]; 1008 char b[BDEVNAME_SIZE];
1009 conf_t *conf = mddev_to_conf(mddev); 1009 conf_t *conf = mddev->private;
1010 1010
1011 /* 1011 /*
1012 * If it is not operational, then we have already marked it as dead 1012 * If it is not operational, then we have already marked it as dead
@@ -1215,7 +1215,7 @@ abort:
1215static void end_sync_read(struct bio *bio, int error) 1215static void end_sync_read(struct bio *bio, int error)
1216{ 1216{
1217 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); 1217 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
1218 conf_t *conf = mddev_to_conf(r10_bio->mddev); 1218 conf_t *conf = r10_bio->mddev->private;
1219 int i,d; 1219 int i,d;
1220 1220
1221 for (i=0; i<conf->copies; i++) 1221 for (i=0; i<conf->copies; i++)
@@ -1253,7 +1253,7 @@ static void end_sync_write(struct bio *bio, int error)
1253 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1253 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1254 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); 1254 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
1255 mddev_t *mddev = r10_bio->mddev; 1255 mddev_t *mddev = r10_bio->mddev;
1256 conf_t *conf = mddev_to_conf(mddev); 1256 conf_t *conf = mddev->private;
1257 int i,d; 1257 int i,d;
1258 1258
1259 for (i = 0; i < conf->copies; i++) 1259 for (i = 0; i < conf->copies; i++)
@@ -1300,7 +1300,7 @@ static void end_sync_write(struct bio *bio, int error)
1300 */ 1300 */
1301static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio) 1301static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1302{ 1302{
1303 conf_t *conf = mddev_to_conf(mddev); 1303 conf_t *conf = mddev->private;
1304 int i, first; 1304 int i, first;
1305 struct bio *tbio, *fbio; 1305 struct bio *tbio, *fbio;
1306 1306
@@ -1400,7 +1400,7 @@ done:
1400 1400
1401static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio) 1401static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1402{ 1402{
1403 conf_t *conf = mddev_to_conf(mddev); 1403 conf_t *conf = mddev->private;
1404 int i, d; 1404 int i, d;
1405 struct bio *bio, *wbio; 1405 struct bio *bio, *wbio;
1406 1406
@@ -1549,7 +1549,7 @@ static void raid10d(mddev_t *mddev)
1549 r10bio_t *r10_bio; 1549 r10bio_t *r10_bio;
1550 struct bio *bio; 1550 struct bio *bio;
1551 unsigned long flags; 1551 unsigned long flags;
1552 conf_t *conf = mddev_to_conf(mddev); 1552 conf_t *conf = mddev->private;
1553 struct list_head *head = &conf->retry_list; 1553 struct list_head *head = &conf->retry_list;
1554 int unplug=0; 1554 int unplug=0;
1555 mdk_rdev_t *rdev; 1555 mdk_rdev_t *rdev;
@@ -1572,7 +1572,7 @@ static void raid10d(mddev_t *mddev)
1572 spin_unlock_irqrestore(&conf->device_lock, flags); 1572 spin_unlock_irqrestore(&conf->device_lock, flags);
1573 1573
1574 mddev = r10_bio->mddev; 1574 mddev = r10_bio->mddev;
1575 conf = mddev_to_conf(mddev); 1575 conf = mddev->private;
1576 if (test_bit(R10BIO_IsSync, &r10_bio->state)) { 1576 if (test_bit(R10BIO_IsSync, &r10_bio->state)) {
1577 sync_request_write(mddev, r10_bio); 1577 sync_request_write(mddev, r10_bio);
1578 unplug = 1; 1578 unplug = 1;
@@ -1680,7 +1680,7 @@ static int init_resync(conf_t *conf)
1680 1680
1681static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) 1681static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1682{ 1682{
1683 conf_t *conf = mddev_to_conf(mddev); 1683 conf_t *conf = mddev->private;
1684 r10bio_t *r10_bio; 1684 r10bio_t *r10_bio;
1685 struct bio *biolist = NULL, *bio; 1685 struct bio *biolist = NULL, *bio;
1686 sector_t max_sector, nr_sectors; 1686 sector_t max_sector, nr_sectors;
@@ -2026,7 +2026,7 @@ static sector_t
2026raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks) 2026raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
2027{ 2027{
2028 sector_t size; 2028 sector_t size;
2029 conf_t *conf = mddev_to_conf(mddev); 2029 conf_t *conf = mddev->private;
2030 2030
2031 if (!raid_disks) 2031 if (!raid_disks)
2032 raid_disks = mddev->raid_disks; 2032 raid_disks = mddev->raid_disks;
@@ -2050,9 +2050,10 @@ static int run(mddev_t *mddev)
2050 int nc, fc, fo; 2050 int nc, fc, fo;
2051 sector_t stride, size; 2051 sector_t stride, size;
2052 2052
2053 if (mddev->chunk_size < PAGE_SIZE) { 2053 if (mddev->chunk_sectors < (PAGE_SIZE >> 9) ||
2054 !is_power_of_2(mddev->chunk_sectors)) {
2054 printk(KERN_ERR "md/raid10: chunk size must be " 2055 printk(KERN_ERR "md/raid10: chunk size must be "
2055 "at least PAGE_SIZE(%ld).\n", PAGE_SIZE); 2056 "at least PAGE_SIZE(%ld) and be a power of 2.\n", PAGE_SIZE);
2056 return -EINVAL; 2057 return -EINVAL;
2057 } 2058 }
2058 2059
@@ -2095,8 +2096,8 @@ static int run(mddev_t *mddev)
2095 conf->far_copies = fc; 2096 conf->far_copies = fc;
2096 conf->copies = nc*fc; 2097 conf->copies = nc*fc;
2097 conf->far_offset = fo; 2098 conf->far_offset = fo;
2098 conf->chunk_mask = (sector_t)(mddev->chunk_size>>9)-1; 2099 conf->chunk_mask = mddev->chunk_sectors - 1;
2099 conf->chunk_shift = ffz(~mddev->chunk_size) - 9; 2100 conf->chunk_shift = ffz(~mddev->chunk_sectors);
2100 size = mddev->dev_sectors >> conf->chunk_shift; 2101 size = mddev->dev_sectors >> conf->chunk_shift;
2101 sector_div(size, fc); 2102 sector_div(size, fc);
2102 size = size * conf->raid_disks; 2103 size = size * conf->raid_disks;
@@ -2185,6 +2186,10 @@ static int run(mddev_t *mddev)
2185 goto out_free_conf; 2186 goto out_free_conf;
2186 } 2187 }
2187 2188
2189 if (mddev->recovery_cp != MaxSector)
2190 printk(KERN_NOTICE "raid10: %s is not clean"
2191 " -- starting background reconstruction\n",
2192 mdname(mddev));
2188 printk(KERN_INFO 2193 printk(KERN_INFO
2189 "raid10: raid set %s active with %d out of %d devices\n", 2194 "raid10: raid set %s active with %d out of %d devices\n",
2190 mdname(mddev), mddev->raid_disks - mddev->degraded, 2195 mdname(mddev), mddev->raid_disks - mddev->degraded,
@@ -2204,7 +2209,8 @@ static int run(mddev_t *mddev)
2204 * maybe... 2209 * maybe...
2205 */ 2210 */
2206 { 2211 {
2207 int stripe = conf->raid_disks * (mddev->chunk_size / PAGE_SIZE); 2212 int stripe = conf->raid_disks *
2213 ((mddev->chunk_sectors << 9) / PAGE_SIZE);
2208 stripe /= conf->near_copies; 2214 stripe /= conf->near_copies;
2209 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) 2215 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
2210 mddev->queue->backing_dev_info.ra_pages = 2* stripe; 2216 mddev->queue->backing_dev_info.ra_pages = 2* stripe;
@@ -2227,7 +2233,7 @@ out:
2227 2233
2228static int stop(mddev_t *mddev) 2234static int stop(mddev_t *mddev)
2229{ 2235{
2230 conf_t *conf = mddev_to_conf(mddev); 2236 conf_t *conf = mddev->private;
2231 2237
2232 raise_barrier(conf, 0); 2238 raise_barrier(conf, 0);
2233 lower_barrier(conf); 2239 lower_barrier(conf);
@@ -2245,7 +2251,7 @@ static int stop(mddev_t *mddev)
2245 2251
2246static void raid10_quiesce(mddev_t *mddev, int state) 2252static void raid10_quiesce(mddev_t *mddev, int state)
2247{ 2253{
2248 conf_t *conf = mddev_to_conf(mddev); 2254 conf_t *conf = mddev->private;
2249 2255
2250 switch(state) { 2256 switch(state) {
2251 case 1: 2257 case 1:
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 244dbe507a54..59cd1efb8d30 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -62,12 +62,6 @@ struct r10_private_data_s {
62typedef struct r10_private_data_s conf_t; 62typedef struct r10_private_data_s conf_t;
63 63
64/* 64/*
65 * this is the only point in the RAID code where we violate
66 * C type safety. mddev->private is an 'opaque' pointer.
67 */
68#define mddev_to_conf(mddev) ((conf_t *) mddev->private)
69
70/*
71 * this is our 'private' RAID10 bio. 65 * this is our 'private' RAID10 bio.
72 * 66 *
73 * it contains information about what kind of IO operations were started 67 * it contains information about what kind of IO operations were started
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index bef876698232..f9f991e6e138 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1274,8 +1274,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1274 sector_t new_sector; 1274 sector_t new_sector;
1275 int algorithm = previous ? conf->prev_algo 1275 int algorithm = previous ? conf->prev_algo
1276 : conf->algorithm; 1276 : conf->algorithm;
1277 int sectors_per_chunk = previous ? (conf->prev_chunk >> 9) 1277 int sectors_per_chunk = previous ? conf->prev_chunk_sectors
1278 : (conf->chunk_size >> 9); 1278 : conf->chunk_sectors;
1279 int raid_disks = previous ? conf->previous_raid_disks 1279 int raid_disks = previous ? conf->previous_raid_disks
1280 : conf->raid_disks; 1280 : conf->raid_disks;
1281 int data_disks = raid_disks - conf->max_degraded; 1281 int data_disks = raid_disks - conf->max_degraded;
@@ -1480,8 +1480,8 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1480 int raid_disks = sh->disks; 1480 int raid_disks = sh->disks;
1481 int data_disks = raid_disks - conf->max_degraded; 1481 int data_disks = raid_disks - conf->max_degraded;
1482 sector_t new_sector = sh->sector, check; 1482 sector_t new_sector = sh->sector, check;
1483 int sectors_per_chunk = previous ? (conf->prev_chunk >> 9) 1483 int sectors_per_chunk = previous ? conf->prev_chunk_sectors
1484 : (conf->chunk_size >> 9); 1484 : conf->chunk_sectors;
1485 int algorithm = previous ? conf->prev_algo 1485 int algorithm = previous ? conf->prev_algo
1486 : conf->algorithm; 1486 : conf->algorithm;
1487 sector_t stripe; 1487 sector_t stripe;
@@ -1997,8 +1997,7 @@ static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
1997 struct stripe_head *sh) 1997 struct stripe_head *sh)
1998{ 1998{
1999 int sectors_per_chunk = 1999 int sectors_per_chunk =
2000 previous ? (conf->prev_chunk >> 9) 2000 previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
2001 : (conf->chunk_size >> 9);
2002 int dd_idx; 2001 int dd_idx;
2003 int chunk_offset = sector_div(stripe, sectors_per_chunk); 2002 int chunk_offset = sector_div(stripe, sectors_per_chunk);
2004 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; 2003 int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
@@ -3284,7 +3283,7 @@ static void activate_bit_delay(raid5_conf_t *conf)
3284 3283
3285static void unplug_slaves(mddev_t *mddev) 3284static void unplug_slaves(mddev_t *mddev)
3286{ 3285{
3287 raid5_conf_t *conf = mddev_to_conf(mddev); 3286 raid5_conf_t *conf = mddev->private;
3288 int i; 3287 int i;
3289 3288
3290 rcu_read_lock(); 3289 rcu_read_lock();
@@ -3308,7 +3307,7 @@ static void unplug_slaves(mddev_t *mddev)
3308static void raid5_unplug_device(struct request_queue *q) 3307static void raid5_unplug_device(struct request_queue *q)
3309{ 3308{
3310 mddev_t *mddev = q->queuedata; 3309 mddev_t *mddev = q->queuedata;
3311 raid5_conf_t *conf = mddev_to_conf(mddev); 3310 raid5_conf_t *conf = mddev->private;
3312 unsigned long flags; 3311 unsigned long flags;
3313 3312
3314 spin_lock_irqsave(&conf->device_lock, flags); 3313 spin_lock_irqsave(&conf->device_lock, flags);
@@ -3327,7 +3326,7 @@ static void raid5_unplug_device(struct request_queue *q)
3327static int raid5_congested(void *data, int bits) 3326static int raid5_congested(void *data, int bits)
3328{ 3327{
3329 mddev_t *mddev = data; 3328 mddev_t *mddev = data;
3330 raid5_conf_t *conf = mddev_to_conf(mddev); 3329 raid5_conf_t *conf = mddev->private;
3331 3330
3332 /* No difference between reads and writes. Just check 3331 /* No difference between reads and writes. Just check
3333 * how busy the stripe_cache is 3332 * how busy the stripe_cache is
@@ -3352,14 +3351,14 @@ static int raid5_mergeable_bvec(struct request_queue *q,
3352 mddev_t *mddev = q->queuedata; 3351 mddev_t *mddev = q->queuedata;
3353 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 3352 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
3354 int max; 3353 int max;
3355 unsigned int chunk_sectors = mddev->chunk_size >> 9; 3354 unsigned int chunk_sectors = mddev->chunk_sectors;
3356 unsigned int bio_sectors = bvm->bi_size >> 9; 3355 unsigned int bio_sectors = bvm->bi_size >> 9;
3357 3356
3358 if ((bvm->bi_rw & 1) == WRITE) 3357 if ((bvm->bi_rw & 1) == WRITE)
3359 return biovec->bv_len; /* always allow writes to be mergeable */ 3358 return biovec->bv_len; /* always allow writes to be mergeable */
3360 3359
3361 if (mddev->new_chunk < mddev->chunk_size) 3360 if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3362 chunk_sectors = mddev->new_chunk >> 9; 3361 chunk_sectors = mddev->new_chunk_sectors;
3363 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 3362 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
3364 if (max < 0) max = 0; 3363 if (max < 0) max = 0;
3365 if (max <= biovec->bv_len && bio_sectors == 0) 3364 if (max <= biovec->bv_len && bio_sectors == 0)
@@ -3372,11 +3371,11 @@ static int raid5_mergeable_bvec(struct request_queue *q,
3372static int in_chunk_boundary(mddev_t *mddev, struct bio *bio) 3371static int in_chunk_boundary(mddev_t *mddev, struct bio *bio)
3373{ 3372{
3374 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3373 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
3375 unsigned int chunk_sectors = mddev->chunk_size >> 9; 3374 unsigned int chunk_sectors = mddev->chunk_sectors;
3376 unsigned int bio_sectors = bio->bi_size >> 9; 3375 unsigned int bio_sectors = bio->bi_size >> 9;
3377 3376
3378 if (mddev->new_chunk < mddev->chunk_size) 3377 if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3379 chunk_sectors = mddev->new_chunk >> 9; 3378 chunk_sectors = mddev->new_chunk_sectors;
3380 return chunk_sectors >= 3379 return chunk_sectors >=
3381 ((sector & (chunk_sectors - 1)) + bio_sectors); 3380 ((sector & (chunk_sectors - 1)) + bio_sectors);
3382} 3381}
@@ -3440,7 +3439,7 @@ static void raid5_align_endio(struct bio *bi, int error)
3440 bio_put(bi); 3439 bio_put(bi);
3441 3440
3442 mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata; 3441 mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
3443 conf = mddev_to_conf(mddev); 3442 conf = mddev->private;
3444 rdev = (void*)raid_bi->bi_next; 3443 rdev = (void*)raid_bi->bi_next;
3445 raid_bi->bi_next = NULL; 3444 raid_bi->bi_next = NULL;
3446 3445
@@ -3482,7 +3481,7 @@ static int bio_fits_rdev(struct bio *bi)
3482static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio) 3481static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
3483{ 3482{
3484 mddev_t *mddev = q->queuedata; 3483 mddev_t *mddev = q->queuedata;
3485 raid5_conf_t *conf = mddev_to_conf(mddev); 3484 raid5_conf_t *conf = mddev->private;
3486 unsigned int dd_idx; 3485 unsigned int dd_idx;
3487 struct bio* align_bi; 3486 struct bio* align_bi;
3488 mdk_rdev_t *rdev; 3487 mdk_rdev_t *rdev;
@@ -3599,7 +3598,7 @@ static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf)
3599static int make_request(struct request_queue *q, struct bio * bi) 3598static int make_request(struct request_queue *q, struct bio * bi)
3600{ 3599{
3601 mddev_t *mddev = q->queuedata; 3600 mddev_t *mddev = q->queuedata;
3602 raid5_conf_t *conf = mddev_to_conf(mddev); 3601 raid5_conf_t *conf = mddev->private;
3603 int dd_idx; 3602 int dd_idx;
3604 sector_t new_sector; 3603 sector_t new_sector;
3605 sector_t logical_sector, last_sector; 3604 sector_t logical_sector, last_sector;
@@ -3696,6 +3695,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
3696 spin_unlock_irq(&conf->device_lock); 3695 spin_unlock_irq(&conf->device_lock);
3697 if (must_retry) { 3696 if (must_retry) {
3698 release_stripe(sh); 3697 release_stripe(sh);
3698 schedule();
3699 goto retry; 3699 goto retry;
3700 } 3700 }
3701 } 3701 }
@@ -3791,10 +3791,10 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
3791 * If old and new chunk sizes differ, we need to process the 3791 * If old and new chunk sizes differ, we need to process the
3792 * largest of these 3792 * largest of these
3793 */ 3793 */
3794 if (mddev->new_chunk > mddev->chunk_size) 3794 if (mddev->new_chunk_sectors > mddev->chunk_sectors)
3795 reshape_sectors = mddev->new_chunk / 512; 3795 reshape_sectors = mddev->new_chunk_sectors;
3796 else 3796 else
3797 reshape_sectors = mddev->chunk_size / 512; 3797 reshape_sectors = mddev->chunk_sectors;
3798 3798
3799 /* we update the metadata when there is more than 3Meg 3799 /* we update the metadata when there is more than 3Meg
3800 * in the block range (that is rather arbitrary, should 3800 * in the block range (that is rather arbitrary, should
@@ -3917,7 +3917,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
3917 1, &dd_idx, NULL); 3917 1, &dd_idx, NULL);
3918 last_sector = 3918 last_sector =
3919 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) 3919 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
3920 *(new_data_disks) - 1), 3920 * new_data_disks - 1),
3921 1, &dd_idx, NULL); 3921 1, &dd_idx, NULL);
3922 if (last_sector >= mddev->dev_sectors) 3922 if (last_sector >= mddev->dev_sectors)
3923 last_sector = mddev->dev_sectors - 1; 3923 last_sector = mddev->dev_sectors - 1;
@@ -3946,7 +3946,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
3946 wait_event(conf->wait_for_overlap, 3946 wait_event(conf->wait_for_overlap,
3947 atomic_read(&conf->reshape_stripes) == 0); 3947 atomic_read(&conf->reshape_stripes) == 0);
3948 mddev->reshape_position = conf->reshape_progress; 3948 mddev->reshape_position = conf->reshape_progress;
3949 mddev->curr_resync_completed = mddev->curr_resync; 3949 mddev->curr_resync_completed = mddev->curr_resync + reshape_sectors;
3950 conf->reshape_checkpoint = jiffies; 3950 conf->reshape_checkpoint = jiffies;
3951 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3951 set_bit(MD_CHANGE_DEVS, &mddev->flags);
3952 md_wakeup_thread(mddev->thread); 3952 md_wakeup_thread(mddev->thread);
@@ -4129,7 +4129,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
4129static void raid5d(mddev_t *mddev) 4129static void raid5d(mddev_t *mddev)
4130{ 4130{
4131 struct stripe_head *sh; 4131 struct stripe_head *sh;
4132 raid5_conf_t *conf = mddev_to_conf(mddev); 4132 raid5_conf_t *conf = mddev->private;
4133 int handled; 4133 int handled;
4134 4134
4135 pr_debug("+++ raid5d active\n"); 4135 pr_debug("+++ raid5d active\n");
@@ -4185,7 +4185,7 @@ static void raid5d(mddev_t *mddev)
4185static ssize_t 4185static ssize_t
4186raid5_show_stripe_cache_size(mddev_t *mddev, char *page) 4186raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
4187{ 4187{
4188 raid5_conf_t *conf = mddev_to_conf(mddev); 4188 raid5_conf_t *conf = mddev->private;
4189 if (conf) 4189 if (conf)
4190 return sprintf(page, "%d\n", conf->max_nr_stripes); 4190 return sprintf(page, "%d\n", conf->max_nr_stripes);
4191 else 4191 else
@@ -4195,7 +4195,7 @@ raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
4195static ssize_t 4195static ssize_t
4196raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) 4196raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
4197{ 4197{
4198 raid5_conf_t *conf = mddev_to_conf(mddev); 4198 raid5_conf_t *conf = mddev->private;
4199 unsigned long new; 4199 unsigned long new;
4200 int err; 4200 int err;
4201 4201
@@ -4233,7 +4233,7 @@ raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
4233static ssize_t 4233static ssize_t
4234raid5_show_preread_threshold(mddev_t *mddev, char *page) 4234raid5_show_preread_threshold(mddev_t *mddev, char *page)
4235{ 4235{
4236 raid5_conf_t *conf = mddev_to_conf(mddev); 4236 raid5_conf_t *conf = mddev->private;
4237 if (conf) 4237 if (conf)
4238 return sprintf(page, "%d\n", conf->bypass_threshold); 4238 return sprintf(page, "%d\n", conf->bypass_threshold);
4239 else 4239 else
@@ -4243,7 +4243,7 @@ raid5_show_preread_threshold(mddev_t *mddev, char *page)
4243static ssize_t 4243static ssize_t
4244raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len) 4244raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len)
4245{ 4245{
4246 raid5_conf_t *conf = mddev_to_conf(mddev); 4246 raid5_conf_t *conf = mddev->private;
4247 unsigned long new; 4247 unsigned long new;
4248 if (len >= PAGE_SIZE) 4248 if (len >= PAGE_SIZE)
4249 return -EINVAL; 4249 return -EINVAL;
@@ -4267,7 +4267,7 @@ raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
4267static ssize_t 4267static ssize_t
4268stripe_cache_active_show(mddev_t *mddev, char *page) 4268stripe_cache_active_show(mddev_t *mddev, char *page)
4269{ 4269{
4270 raid5_conf_t *conf = mddev_to_conf(mddev); 4270 raid5_conf_t *conf = mddev->private;
4271 if (conf) 4271 if (conf)
4272 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); 4272 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
4273 else 4273 else
@@ -4291,7 +4291,7 @@ static struct attribute_group raid5_attrs_group = {
4291static sector_t 4291static sector_t
4292raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) 4292raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
4293{ 4293{
4294 raid5_conf_t *conf = mddev_to_conf(mddev); 4294 raid5_conf_t *conf = mddev->private;
4295 4295
4296 if (!sectors) 4296 if (!sectors)
4297 sectors = mddev->dev_sectors; 4297 sectors = mddev->dev_sectors;
@@ -4303,8 +4303,8 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
4303 raid_disks = conf->previous_raid_disks; 4303 raid_disks = conf->previous_raid_disks;
4304 } 4304 }
4305 4305
4306 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); 4306 sectors &= ~((sector_t)mddev->chunk_sectors - 1);
4307 sectors &= ~((sector_t)mddev->new_chunk/512 - 1); 4307 sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
4308 return sectors * (raid_disks - conf->max_degraded); 4308 return sectors * (raid_disks - conf->max_degraded);
4309} 4309}
4310 4310
@@ -4336,9 +4336,11 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4336 return ERR_PTR(-EINVAL); 4336 return ERR_PTR(-EINVAL);
4337 } 4337 }
4338 4338
4339 if (!mddev->new_chunk || mddev->new_chunk % PAGE_SIZE) { 4339 if (!mddev->new_chunk_sectors ||
4340 (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
4341 !is_power_of_2(mddev->new_chunk_sectors)) {
4340 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", 4342 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
4341 mddev->new_chunk, mdname(mddev)); 4343 mddev->new_chunk_sectors << 9, mdname(mddev));
4342 return ERR_PTR(-EINVAL); 4344 return ERR_PTR(-EINVAL);
4343 } 4345 }
4344 4346
@@ -4401,7 +4403,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4401 conf->fullsync = 1; 4403 conf->fullsync = 1;
4402 } 4404 }
4403 4405
4404 conf->chunk_size = mddev->new_chunk; 4406 conf->chunk_sectors = mddev->new_chunk_sectors;
4405 conf->level = mddev->new_level; 4407 conf->level = mddev->new_level;
4406 if (conf->level == 6) 4408 if (conf->level == 6)
4407 conf->max_degraded = 2; 4409 conf->max_degraded = 2;
@@ -4411,7 +4413,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4411 conf->max_nr_stripes = NR_STRIPES; 4413 conf->max_nr_stripes = NR_STRIPES;
4412 conf->reshape_progress = mddev->reshape_position; 4414 conf->reshape_progress = mddev->reshape_position;
4413 if (conf->reshape_progress != MaxSector) { 4415 if (conf->reshape_progress != MaxSector) {
4414 conf->prev_chunk = mddev->chunk_size; 4416 conf->prev_chunk_sectors = mddev->chunk_sectors;
4415 conf->prev_algo = mddev->layout; 4417 conf->prev_algo = mddev->layout;
4416 } 4418 }
4417 4419
@@ -4453,6 +4455,10 @@ static int run(mddev_t *mddev)
4453 int working_disks = 0; 4455 int working_disks = 0;
4454 mdk_rdev_t *rdev; 4456 mdk_rdev_t *rdev;
4455 4457
4458 if (mddev->recovery_cp != MaxSector)
4459 printk(KERN_NOTICE "raid5: %s is not clean"
4460 " -- starting background reconstruction\n",
4461 mdname(mddev));
4456 if (mddev->reshape_position != MaxSector) { 4462 if (mddev->reshape_position != MaxSector) {
4457 /* Check that we can continue the reshape. 4463 /* Check that we can continue the reshape.
4458 * Currently only disks can change, it must 4464 * Currently only disks can change, it must
@@ -4475,7 +4481,7 @@ static int run(mddev_t *mddev)
4475 * geometry. 4481 * geometry.
4476 */ 4482 */
4477 here_new = mddev->reshape_position; 4483 here_new = mddev->reshape_position;
4478 if (sector_div(here_new, (mddev->new_chunk>>9)* 4484 if (sector_div(here_new, mddev->new_chunk_sectors *
4479 (mddev->raid_disks - max_degraded))) { 4485 (mddev->raid_disks - max_degraded))) {
4480 printk(KERN_ERR "raid5: reshape_position not " 4486 printk(KERN_ERR "raid5: reshape_position not "
4481 "on a stripe boundary\n"); 4487 "on a stripe boundary\n");
@@ -4483,7 +4489,7 @@ static int run(mddev_t *mddev)
4483 } 4489 }
4484 /* here_new is the stripe we will write to */ 4490 /* here_new is the stripe we will write to */
4485 here_old = mddev->reshape_position; 4491 here_old = mddev->reshape_position;
4486 sector_div(here_old, (mddev->chunk_size>>9)* 4492 sector_div(here_old, mddev->chunk_sectors *
4487 (old_disks-max_degraded)); 4493 (old_disks-max_degraded));
4488 /* here_old is the first stripe that we might need to read 4494 /* here_old is the first stripe that we might need to read
4489 * from */ 4495 * from */
@@ -4498,7 +4504,7 @@ static int run(mddev_t *mddev)
4498 } else { 4504 } else {
4499 BUG_ON(mddev->level != mddev->new_level); 4505 BUG_ON(mddev->level != mddev->new_level);
4500 BUG_ON(mddev->layout != mddev->new_layout); 4506 BUG_ON(mddev->layout != mddev->new_layout);
4501 BUG_ON(mddev->chunk_size != mddev->new_chunk); 4507 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
4502 BUG_ON(mddev->delta_disks != 0); 4508 BUG_ON(mddev->delta_disks != 0);
4503 } 4509 }
4504 4510
@@ -4532,7 +4538,7 @@ static int run(mddev_t *mddev)
4532 } 4538 }
4533 4539
4534 /* device size must be a multiple of chunk size */ 4540 /* device size must be a multiple of chunk size */
4535 mddev->dev_sectors &= ~(mddev->chunk_size / 512 - 1); 4541 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
4536 mddev->resync_max_sectors = mddev->dev_sectors; 4542 mddev->resync_max_sectors = mddev->dev_sectors;
4537 4543
4538 if (mddev->degraded > 0 && 4544 if (mddev->degraded > 0 &&
@@ -4581,7 +4587,7 @@ static int run(mddev_t *mddev)
4581 { 4587 {
4582 int data_disks = conf->previous_raid_disks - conf->max_degraded; 4588 int data_disks = conf->previous_raid_disks - conf->max_degraded;
4583 int stripe = data_disks * 4589 int stripe = data_disks *
4584 (mddev->chunk_size / PAGE_SIZE); 4590 ((mddev->chunk_sectors << 9) / PAGE_SIZE);
4585 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 4591 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
4586 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 4592 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
4587 } 4593 }
@@ -4678,7 +4684,8 @@ static void status(struct seq_file *seq, mddev_t *mddev)
4678 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 4684 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
4679 int i; 4685 int i;
4680 4686
4681 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout); 4687 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
4688 mddev->chunk_sectors / 2, mddev->layout);
4682 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); 4689 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
4683 for (i = 0; i < conf->raid_disks; i++) 4690 for (i = 0; i < conf->raid_disks; i++)
4684 seq_printf (seq, "%s", 4691 seq_printf (seq, "%s",
@@ -4826,7 +4833,7 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
4826 * any io in the removed space completes, but it hardly seems 4833 * any io in the removed space completes, but it hardly seems
4827 * worth it. 4834 * worth it.
4828 */ 4835 */
4829 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); 4836 sectors &= ~((sector_t)mddev->chunk_sectors - 1);
4830 md_set_array_sectors(mddev, raid5_size(mddev, sectors, 4837 md_set_array_sectors(mddev, raid5_size(mddev, sectors,
4831 mddev->raid_disks)); 4838 mddev->raid_disks));
4832 if (mddev->array_sectors > 4839 if (mddev->array_sectors >
@@ -4843,14 +4850,37 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
4843 return 0; 4850 return 0;
4844} 4851}
4845 4852
4846static int raid5_check_reshape(mddev_t *mddev) 4853static int check_stripe_cache(mddev_t *mddev)
4847{ 4854{
4848 raid5_conf_t *conf = mddev_to_conf(mddev); 4855 /* Can only proceed if there are plenty of stripe_heads.
4856 * We need a minimum of one full stripe,, and for sensible progress
4857 * it is best to have about 4 times that.
4858 * If we require 4 times, then the default 256 4K stripe_heads will
4859 * allow for chunk sizes up to 256K, which is probably OK.
4860 * If the chunk size is greater, user-space should request more
4861 * stripe_heads first.
4862 */
4863 raid5_conf_t *conf = mddev->private;
4864 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
4865 > conf->max_nr_stripes ||
4866 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
4867 > conf->max_nr_stripes) {
4868 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n",
4869 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
4870 / STRIPE_SIZE)*4);
4871 return 0;
4872 }
4873 return 1;
4874}
4875
4876static int check_reshape(mddev_t *mddev)
4877{
4878 raid5_conf_t *conf = mddev->private;
4849 4879
4850 if (mddev->delta_disks == 0 && 4880 if (mddev->delta_disks == 0 &&
4851 mddev->new_layout == mddev->layout && 4881 mddev->new_layout == mddev->layout &&
4852 mddev->new_chunk == mddev->chunk_size) 4882 mddev->new_chunk_sectors == mddev->chunk_sectors)
4853 return -EINVAL; /* nothing to do */ 4883 return 0; /* nothing to do */
4854 if (mddev->bitmap) 4884 if (mddev->bitmap)
4855 /* Cannot grow a bitmap yet */ 4885 /* Cannot grow a bitmap yet */
4856 return -EBUSY; 4886 return -EBUSY;
@@ -4869,28 +4899,15 @@ static int raid5_check_reshape(mddev_t *mddev)
4869 return -EINVAL; 4899 return -EINVAL;
4870 } 4900 }
4871 4901
4872 /* Can only proceed if there are plenty of stripe_heads. 4902 if (!check_stripe_cache(mddev))
4873 * We need a minimum of one full stripe,, and for sensible progress
4874 * it is best to have about 4 times that.
4875 * If we require 4 times, then the default 256 4K stripe_heads will
4876 * allow for chunk sizes up to 256K, which is probably OK.
4877 * If the chunk size is greater, user-space should request more
4878 * stripe_heads first.
4879 */
4880 if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes ||
4881 (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) {
4882 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n",
4883 (max(mddev->chunk_size, mddev->new_chunk)
4884 / STRIPE_SIZE)*4);
4885 return -ENOSPC; 4903 return -ENOSPC;
4886 }
4887 4904
4888 return resize_stripes(conf, conf->raid_disks + mddev->delta_disks); 4905 return resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
4889} 4906}
4890 4907
4891static int raid5_start_reshape(mddev_t *mddev) 4908static int raid5_start_reshape(mddev_t *mddev)
4892{ 4909{
4893 raid5_conf_t *conf = mddev_to_conf(mddev); 4910 raid5_conf_t *conf = mddev->private;
4894 mdk_rdev_t *rdev; 4911 mdk_rdev_t *rdev;
4895 int spares = 0; 4912 int spares = 0;
4896 int added_devices = 0; 4913 int added_devices = 0;
@@ -4899,6 +4916,9 @@ static int raid5_start_reshape(mddev_t *mddev)
4899 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4916 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4900 return -EBUSY; 4917 return -EBUSY;
4901 4918
4919 if (!check_stripe_cache(mddev))
4920 return -ENOSPC;
4921
4902 list_for_each_entry(rdev, &mddev->disks, same_set) 4922 list_for_each_entry(rdev, &mddev->disks, same_set)
4903 if (rdev->raid_disk < 0 && 4923 if (rdev->raid_disk < 0 &&
4904 !test_bit(Faulty, &rdev->flags)) 4924 !test_bit(Faulty, &rdev->flags))
@@ -4925,8 +4945,8 @@ static int raid5_start_reshape(mddev_t *mddev)
4925 spin_lock_irq(&conf->device_lock); 4945 spin_lock_irq(&conf->device_lock);
4926 conf->previous_raid_disks = conf->raid_disks; 4946 conf->previous_raid_disks = conf->raid_disks;
4927 conf->raid_disks += mddev->delta_disks; 4947 conf->raid_disks += mddev->delta_disks;
4928 conf->prev_chunk = conf->chunk_size; 4948 conf->prev_chunk_sectors = conf->chunk_sectors;
4929 conf->chunk_size = mddev->new_chunk; 4949 conf->chunk_sectors = mddev->new_chunk_sectors;
4930 conf->prev_algo = conf->algorithm; 4950 conf->prev_algo = conf->algorithm;
4931 conf->algorithm = mddev->new_layout; 4951 conf->algorithm = mddev->new_layout;
4932 if (mddev->delta_disks < 0) 4952 if (mddev->delta_disks < 0)
@@ -5008,7 +5028,7 @@ static void end_reshape(raid5_conf_t *conf)
5008 */ 5028 */
5009 { 5029 {
5010 int data_disks = conf->raid_disks - conf->max_degraded; 5030 int data_disks = conf->raid_disks - conf->max_degraded;
5011 int stripe = data_disks * (conf->chunk_size 5031 int stripe = data_disks * ((conf->chunk_sectors << 9)
5012 / PAGE_SIZE); 5032 / PAGE_SIZE);
5013 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 5033 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
5014 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 5034 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
@@ -5022,7 +5042,7 @@ static void end_reshape(raid5_conf_t *conf)
5022static void raid5_finish_reshape(mddev_t *mddev) 5042static void raid5_finish_reshape(mddev_t *mddev)
5023{ 5043{
5024 struct block_device *bdev; 5044 struct block_device *bdev;
5025 raid5_conf_t *conf = mddev_to_conf(mddev); 5045 raid5_conf_t *conf = mddev->private;
5026 5046
5027 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 5047 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5028 5048
@@ -5053,7 +5073,7 @@ static void raid5_finish_reshape(mddev_t *mddev)
5053 raid5_remove_disk(mddev, d); 5073 raid5_remove_disk(mddev, d);
5054 } 5074 }
5055 mddev->layout = conf->algorithm; 5075 mddev->layout = conf->algorithm;
5056 mddev->chunk_size = conf->chunk_size; 5076 mddev->chunk_sectors = conf->chunk_sectors;
5057 mddev->reshape_position = MaxSector; 5077 mddev->reshape_position = MaxSector;
5058 mddev->delta_disks = 0; 5078 mddev->delta_disks = 0;
5059 } 5079 }
@@ -5061,7 +5081,7 @@ static void raid5_finish_reshape(mddev_t *mddev)
5061 5081
5062static void raid5_quiesce(mddev_t *mddev, int state) 5082static void raid5_quiesce(mddev_t *mddev, int state)
5063{ 5083{
5064 raid5_conf_t *conf = mddev_to_conf(mddev); 5084 raid5_conf_t *conf = mddev->private;
5065 5085
5066 switch(state) { 5086 switch(state) {
5067 case 2: /* resume for a suspend */ 5087 case 2: /* resume for a suspend */
@@ -5111,7 +5131,7 @@ static void *raid5_takeover_raid1(mddev_t *mddev)
5111 5131
5112 mddev->new_level = 5; 5132 mddev->new_level = 5;
5113 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; 5133 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
5114 mddev->new_chunk = chunksect << 9; 5134 mddev->new_chunk_sectors = chunksect;
5115 5135
5116 return setup_conf(mddev); 5136 return setup_conf(mddev);
5117} 5137}
@@ -5150,24 +5170,24 @@ static void *raid5_takeover_raid6(mddev_t *mddev)
5150} 5170}
5151 5171
5152 5172
5153static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk) 5173static int raid5_check_reshape(mddev_t *mddev)
5154{ 5174{
5155 /* For a 2-drive array, the layout and chunk size can be changed 5175 /* For a 2-drive array, the layout and chunk size can be changed
5156 * immediately as not restriping is needed. 5176 * immediately as not restriping is needed.
5157 * For larger arrays we record the new value - after validation 5177 * For larger arrays we record the new value - after validation
5158 * to be used by a reshape pass. 5178 * to be used by a reshape pass.
5159 */ 5179 */
5160 raid5_conf_t *conf = mddev_to_conf(mddev); 5180 raid5_conf_t *conf = mddev->private;
5181 int new_chunk = mddev->new_chunk_sectors;
5161 5182
5162 if (new_layout >= 0 && !algorithm_valid_raid5(new_layout)) 5183 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
5163 return -EINVAL; 5184 return -EINVAL;
5164 if (new_chunk > 0) { 5185 if (new_chunk > 0) {
5165 if (new_chunk & (new_chunk-1)) 5186 if (!is_power_of_2(new_chunk))
5166 /* not a power of 2 */
5167 return -EINVAL; 5187 return -EINVAL;
5168 if (new_chunk < PAGE_SIZE) 5188 if (new_chunk < (PAGE_SIZE>>9))
5169 return -EINVAL; 5189 return -EINVAL;
5170 if (mddev->array_sectors & ((new_chunk>>9)-1)) 5190 if (mddev->array_sectors & (new_chunk-1))
5171 /* not factor of array size */ 5191 /* not factor of array size */
5172 return -EINVAL; 5192 return -EINVAL;
5173 } 5193 }
@@ -5175,49 +5195,39 @@ static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk)
5175 /* They look valid */ 5195 /* They look valid */
5176 5196
5177 if (mddev->raid_disks == 2) { 5197 if (mddev->raid_disks == 2) {
5178 5198 /* can make the change immediately */
5179 if (new_layout >= 0) { 5199 if (mddev->new_layout >= 0) {
5180 conf->algorithm = new_layout; 5200 conf->algorithm = mddev->new_layout;
5181 mddev->layout = mddev->new_layout = new_layout; 5201 mddev->layout = mddev->new_layout;
5182 } 5202 }
5183 if (new_chunk > 0) { 5203 if (new_chunk > 0) {
5184 conf->chunk_size = new_chunk; 5204 conf->chunk_sectors = new_chunk ;
5185 mddev->chunk_size = mddev->new_chunk = new_chunk; 5205 mddev->chunk_sectors = new_chunk;
5186 } 5206 }
5187 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5207 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5188 md_wakeup_thread(mddev->thread); 5208 md_wakeup_thread(mddev->thread);
5189 } else {
5190 if (new_layout >= 0)
5191 mddev->new_layout = new_layout;
5192 if (new_chunk > 0)
5193 mddev->new_chunk = new_chunk;
5194 } 5209 }
5195 return 0; 5210 return check_reshape(mddev);
5196} 5211}
5197 5212
5198static int raid6_reconfig(mddev_t *mddev, int new_layout, int new_chunk) 5213static int raid6_check_reshape(mddev_t *mddev)
5199{ 5214{
5200 if (new_layout >= 0 && !algorithm_valid_raid6(new_layout)) 5215 int new_chunk = mddev->new_chunk_sectors;
5216
5217 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
5201 return -EINVAL; 5218 return -EINVAL;
5202 if (new_chunk > 0) { 5219 if (new_chunk > 0) {
5203 if (new_chunk & (new_chunk-1)) 5220 if (!is_power_of_2(new_chunk))
5204 /* not a power of 2 */
5205 return -EINVAL; 5221 return -EINVAL;
5206 if (new_chunk < PAGE_SIZE) 5222 if (new_chunk < (PAGE_SIZE >> 9))
5207 return -EINVAL; 5223 return -EINVAL;
5208 if (mddev->array_sectors & ((new_chunk>>9)-1)) 5224 if (mddev->array_sectors & (new_chunk-1))
5209 /* not factor of array size */ 5225 /* not factor of array size */
5210 return -EINVAL; 5226 return -EINVAL;
5211 } 5227 }
5212 5228
5213 /* They look valid */ 5229 /* They look valid */
5214 5230 return check_reshape(mddev);
5215 if (new_layout >= 0)
5216 mddev->new_layout = new_layout;
5217 if (new_chunk > 0)
5218 mddev->new_chunk = new_chunk;
5219
5220 return 0;
5221} 5231}
5222 5232
5223static void *raid5_takeover(mddev_t *mddev) 5233static void *raid5_takeover(mddev_t *mddev)
@@ -5227,8 +5237,6 @@ static void *raid5_takeover(mddev_t *mddev)
5227 * raid1 - if there are two drives. We need to know the chunk size 5237 * raid1 - if there are two drives. We need to know the chunk size
5228 * raid4 - trivial - just use a raid4 layout. 5238 * raid4 - trivial - just use a raid4 layout.
5229 * raid6 - Providing it is a *_6 layout 5239 * raid6 - Providing it is a *_6 layout
5230 *
5231 * For now, just do raid1
5232 */ 5240 */
5233 5241
5234 if (mddev->level == 1) 5242 if (mddev->level == 1)
@@ -5310,12 +5318,11 @@ static struct mdk_personality raid6_personality =
5310 .sync_request = sync_request, 5318 .sync_request = sync_request,
5311 .resize = raid5_resize, 5319 .resize = raid5_resize,
5312 .size = raid5_size, 5320 .size = raid5_size,
5313 .check_reshape = raid5_check_reshape, 5321 .check_reshape = raid6_check_reshape,
5314 .start_reshape = raid5_start_reshape, 5322 .start_reshape = raid5_start_reshape,
5315 .finish_reshape = raid5_finish_reshape, 5323 .finish_reshape = raid5_finish_reshape,
5316 .quiesce = raid5_quiesce, 5324 .quiesce = raid5_quiesce,
5317 .takeover = raid6_takeover, 5325 .takeover = raid6_takeover,
5318 .reconfig = raid6_reconfig,
5319}; 5326};
5320static struct mdk_personality raid5_personality = 5327static struct mdk_personality raid5_personality =
5321{ 5328{
@@ -5338,7 +5345,6 @@ static struct mdk_personality raid5_personality =
5338 .finish_reshape = raid5_finish_reshape, 5345 .finish_reshape = raid5_finish_reshape,
5339 .quiesce = raid5_quiesce, 5346 .quiesce = raid5_quiesce,
5340 .takeover = raid5_takeover, 5347 .takeover = raid5_takeover,
5341 .reconfig = raid5_reconfig,
5342}; 5348};
5343 5349
5344static struct mdk_personality raid4_personality = 5350static struct mdk_personality raid4_personality =
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 52ba99954dec..9459689c4ea0 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -334,7 +334,8 @@ struct raid5_private_data {
334 struct hlist_head *stripe_hashtbl; 334 struct hlist_head *stripe_hashtbl;
335 mddev_t *mddev; 335 mddev_t *mddev;
336 struct disk_info *spare; 336 struct disk_info *spare;
337 int chunk_size, level, algorithm; 337 int chunk_sectors;
338 int level, algorithm;
338 int max_degraded; 339 int max_degraded;
339 int raid_disks; 340 int raid_disks;
340 int max_nr_stripes; 341 int max_nr_stripes;
@@ -350,7 +351,8 @@ struct raid5_private_data {
350 */ 351 */
351 sector_t reshape_safe; 352 sector_t reshape_safe;
352 int previous_raid_disks; 353 int previous_raid_disks;
353 int prev_chunk, prev_algo; 354 int prev_chunk_sectors;
355 int prev_algo;
354 short generation; /* increments with every reshape */ 356 short generation; /* increments with every reshape */
355 unsigned long reshape_checkpoint; /* Time we last updated 357 unsigned long reshape_checkpoint; /* Time we last updated
356 * metadata */ 358 * metadata */
@@ -408,8 +410,6 @@ struct raid5_private_data {
408 410
409typedef struct raid5_private_data raid5_conf_t; 411typedef struct raid5_private_data raid5_conf_t;
410 412
411#define mddev_to_conf(mddev) ((raid5_conf_t *) mddev->private)
412
413/* 413/*
414 * Our supported algorithms 414 * Our supported algorithms
415 */ 415 */
diff --git a/drivers/media/video/ov772x.c b/drivers/media/video/ov772x.c
index c0d911252862..0bce255168bd 100644
--- a/drivers/media/video/ov772x.c
+++ b/drivers/media/video/ov772x.c
@@ -1067,10 +1067,12 @@ static int ov772x_probe(struct i2c_client *client,
1067 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); 1067 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
1068 int ret; 1068 int ret;
1069 1069
1070 info = client->dev.platform_data; 1070 if (!client->dev.platform_data)
1071 if (!info)
1072 return -EINVAL; 1071 return -EINVAL;
1073 1072
1073 info = container_of(client->dev.platform_data,
1074 struct ov772x_camera_info, link);
1075
1074 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { 1076 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
1075 dev_err(&adapter->dev, 1077 dev_err(&adapter->dev,
1076 "I2C-Adapter doesn't support " 1078 "I2C-Adapter doesn't support "
diff --git a/drivers/media/video/tw9910.c b/drivers/media/video/tw9910.c
index a39947643992..aa5065ea09ed 100644
--- a/drivers/media/video/tw9910.c
+++ b/drivers/media/video/tw9910.c
@@ -875,10 +875,12 @@ static int tw9910_probe(struct i2c_client *client,
875 const struct tw9910_scale_ctrl *scale; 875 const struct tw9910_scale_ctrl *scale;
876 int i, ret; 876 int i, ret;
877 877
878 info = client->dev.platform_data; 878 if (!client->dev.platform_data)
879 if (!info)
880 return -EINVAL; 879 return -EINVAL;
881 880
881 info = container_of(client->dev.platform_data,
882 struct tw9910_video_info, link);
883
882 if (!i2c_check_functionality(to_i2c_adapter(client->dev.parent), 884 if (!i2c_check_functionality(to_i2c_adapter(client->dev.parent),
883 I2C_FUNC_SMBUS_BYTE_DATA)) { 885 I2C_FUNC_SMBUS_BYTE_DATA)) {
884 dev_err(&client->dev, 886 dev_err(&client->dev,
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 0df065275cd3..5d0ba4f5924c 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -4414,11 +4414,11 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
4414 * 1078 errata workaround for the 36GB limitation 4414 * 1078 errata workaround for the 36GB limitation
4415 */ 4415 */
4416 if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078 && 4416 if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078 &&
4417 ioc->dma_mask > DMA_35BIT_MASK) { 4417 ioc->dma_mask > DMA_BIT_MASK(35)) {
4418 if (!pci_set_dma_mask(ioc->pcidev, DMA_BIT_MASK(32)) 4418 if (!pci_set_dma_mask(ioc->pcidev, DMA_BIT_MASK(32))
4419 && !pci_set_consistent_dma_mask(ioc->pcidev, 4419 && !pci_set_consistent_dma_mask(ioc->pcidev,
4420 DMA_BIT_MASK(32))) { 4420 DMA_BIT_MASK(32))) {
4421 dma_mask = DMA_35BIT_MASK; 4421 dma_mask = DMA_BIT_MASK(35);
4422 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT 4422 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
4423 "setting 35 bit addressing for " 4423 "setting 35 bit addressing for "
4424 "Request/Reply/Chain and Sense Buffers\n", 4424 "Request/Reply/Chain and Sense Buffers\n",
@@ -4575,7 +4575,7 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
4575 alloc_dma += ioc->reply_sz; 4575 alloc_dma += ioc->reply_sz;
4576 } 4576 }
4577 4577
4578 if (dma_mask == DMA_35BIT_MASK && !pci_set_dma_mask(ioc->pcidev, 4578 if (dma_mask == DMA_BIT_MASK(35) && !pci_set_dma_mask(ioc->pcidev,
4579 ioc->dma_mask) && !pci_set_consistent_dma_mask(ioc->pcidev, 4579 ioc->dma_mask) && !pci_set_consistent_dma_mask(ioc->pcidev,
4580 ioc->dma_mask)) 4580 ioc->dma_mask))
4581 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT 4581 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
@@ -4602,7 +4602,7 @@ out_fail:
4602 ioc->sense_buf_pool = NULL; 4602 ioc->sense_buf_pool = NULL;
4603 } 4603 }
4604 4604
4605 if (dma_mask == DMA_35BIT_MASK && !pci_set_dma_mask(ioc->pcidev, 4605 if (dma_mask == DMA_BIT_MASK(35) && !pci_set_dma_mask(ioc->pcidev,
4606 DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(ioc->pcidev, 4606 DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(ioc->pcidev,
4607 DMA_BIT_MASK(64))) 4607 DMA_BIT_MASK(64)))
4608 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT 4608 d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index ee3927ab11e0..491ac0f800d2 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -30,6 +30,7 @@ config MFD_SM501_GPIO
30config MFD_ASIC3 30config MFD_ASIC3
31 bool "Support for Compaq ASIC3" 31 bool "Support for Compaq ASIC3"
32 depends on GENERIC_HARDIRQS && GPIOLIB && ARM 32 depends on GENERIC_HARDIRQS && GPIOLIB && ARM
33 select MFD_CORE
33 ---help--- 34 ---help---
34 This driver supports the ASIC3 multifunction chip found on many 35 This driver supports the ASIC3 multifunction chip found on many
35 PDAs (mainly iPAQ and HTC based ones) 36 PDAs (mainly iPAQ and HTC based ones)
@@ -152,7 +153,7 @@ config MFD_WM8400
152 depends on I2C 153 depends on I2C
153 help 154 help
154 Support for the Wolfson Microelecronics WM8400 PMIC and audio 155 Support for the Wolfson Microelecronics WM8400 PMIC and audio
155 CODEC. This driver adds provides common support for accessing 156 CODEC. This driver provides common support for accessing
156 the device, additional drivers must be enabled in order to use 157 the device, additional drivers must be enabled in order to use
157 the functionality of the device. 158 the functionality of the device.
158 159
@@ -241,6 +242,27 @@ config PCF50633_GPIO
241 Say yes here if you want to include support GPIO for pins on 242 Say yes here if you want to include support GPIO for pins on
242 the PCF50633 chip. 243 the PCF50633 chip.
243 244
245config AB3100_CORE
246 tristate "ST-Ericsson AB3100 Mixed Signal Circuit core functions"
247 depends on I2C
248 default y if ARCH_U300
249 help
250 Select this to enable the AB3100 Mixed Signal IC core
251 functionality. This connects to a AB3100 on the I2C bus
252 and expose a number of symbols needed for dependent devices
253 to read and write registers and subscribe to events from
254 this multi-functional IC. This is needed to use other features
255 of the AB3100 such as battery-backed RTC, charging control,
256 LEDs, vibrator, system power and temperature, power management
257 and ALSA sound.
258
259config EZX_PCAP
260 bool "PCAP Support"
261 depends on GENERIC_HARDIRQS && SPI_MASTER
262 help
263 This enables the PCAP ASIC present on EZX Phones. This is
264 needed for MMC, TouchScreen, Sound, USB, etc..
265
244endmenu 266endmenu
245 267
246menu "Multimedia Capabilities Port drivers" 268menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 3afb5192e4da..6f8a9a1af20b 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -26,6 +26,8 @@ obj-$(CONFIG_TWL4030_CORE) += twl4030-core.o twl4030-irq.o
26 26
27obj-$(CONFIG_MFD_CORE) += mfd-core.o 27obj-$(CONFIG_MFD_CORE) += mfd-core.o
28 28
29obj-$(CONFIG_EZX_PCAP) += ezx-pcap.o
30
29obj-$(CONFIG_MCP) += mcp-core.o 31obj-$(CONFIG_MCP) += mcp-core.o
30obj-$(CONFIG_MCP_SA11X0) += mcp-sa11x0.o 32obj-$(CONFIG_MCP_SA11X0) += mcp-sa11x0.o
31obj-$(CONFIG_MCP_UCB1200) += ucb1x00-core.o 33obj-$(CONFIG_MCP_UCB1200) += ucb1x00-core.o
@@ -40,4 +42,5 @@ obj-$(CONFIG_PMIC_DA903X) += da903x.o
40 42
41obj-$(CONFIG_MFD_PCF50633) += pcf50633-core.o 43obj-$(CONFIG_MFD_PCF50633) += pcf50633-core.o
42obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o 44obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o
43obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o \ No newline at end of file 45obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o
46obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
new file mode 100644
index 000000000000..13e7d7bfe85f
--- /dev/null
+++ b/drivers/mfd/ab3100-core.c
@@ -0,0 +1,991 @@
1/*
2 * Copyright (C) 2007-2009 ST-Ericsson
3 * License terms: GNU General Public License (GPL) version 2
4 * Low-level core for exclusive access to the AB3100 IC on the I2C bus
5 * and some basic chip-configuration.
6 * Author: Linus Walleij <linus.walleij@stericsson.com>
7 */
8
9#include <linux/i2c.h>
10#include <linux/mutex.h>
11#include <linux/list.h>
12#include <linux/notifier.h>
13#include <linux/err.h>
14#include <linux/platform_device.h>
15#include <linux/device.h>
16#include <linux/interrupt.h>
17#include <linux/workqueue.h>
18#include <linux/debugfs.h>
19#include <linux/seq_file.h>
20#include <linux/uaccess.h>
21#include <linux/mfd/ab3100.h>
22
23/* These are the only registers inside AB3100 used in this main file */
24
25/* Interrupt event registers */
26#define AB3100_EVENTA1 0x21
27#define AB3100_EVENTA2 0x22
28#define AB3100_EVENTA3 0x23
29
30/* AB3100 DAC converter registers */
31#define AB3100_DIS 0x00
32#define AB3100_D0C 0x01
33#define AB3100_D1C 0x02
34#define AB3100_D2C 0x03
35#define AB3100_D3C 0x04
36
37/* Chip ID register */
38#define AB3100_CID 0x20
39
40/* AB3100 interrupt registers */
41#define AB3100_IMRA1 0x24
42#define AB3100_IMRA2 0x25
43#define AB3100_IMRA3 0x26
44#define AB3100_IMRB1 0x2B
45#define AB3100_IMRB2 0x2C
46#define AB3100_IMRB3 0x2D
47
48/* System Power Monitoring and control registers */
49#define AB3100_MCA 0x2E
50#define AB3100_MCB 0x2F
51
52/* SIM power up */
53#define AB3100_SUP 0x50
54
55/*
56 * I2C communication
57 *
58 * The AB3100 is usually assigned address 0x48 (7-bit)
59 * The chip is defined in the platform i2c_board_data section.
60 */
61static unsigned short normal_i2c[] = { 0x48, I2C_CLIENT_END };
62I2C_CLIENT_INSMOD_1(ab3100);
63
64u8 ab3100_get_chip_type(struct ab3100 *ab3100)
65{
66 u8 chip = ABUNKNOWN;
67
68 switch (ab3100->chip_id & 0xf0) {
69 case 0xa0:
70 chip = AB3000;
71 break;
72 case 0xc0:
73 chip = AB3100;
74 break;
75 }
76 return chip;
77}
78EXPORT_SYMBOL(ab3100_get_chip_type);
79
80int ab3100_set_register(struct ab3100 *ab3100, u8 reg, u8 regval)
81{
82 u8 regandval[2] = {reg, regval};
83 int err;
84
85 err = mutex_lock_interruptible(&ab3100->access_mutex);
86 if (err)
87 return err;
88
89 /*
90 * A two-byte write message with the first byte containing the register
91 * number and the second byte containing the value to be written
92 * effectively sets a register in the AB3100.
93 */
94 err = i2c_master_send(ab3100->i2c_client, regandval, 2);
95 if (err < 0) {
96 dev_err(ab3100->dev,
97 "write error (write register): %d\n",
98 err);
99 } else if (err != 2) {
100 dev_err(ab3100->dev,
101 "write error (write register) "
102 "%d bytes transferred (expected 2)\n",
103 err);
104 err = -EIO;
105 } else {
106 /* All is well */
107 err = 0;
108 }
109 mutex_unlock(&ab3100->access_mutex);
110 return 0;
111}
112EXPORT_SYMBOL(ab3100_set_register);
113
114/*
115 * The test registers exist at an I2C bus address up one
116 * from the ordinary base. They are not supposed to be used
117 * in production code, but sometimes you have to do that
118 * anyway. It's currently only used from this file so declare
119 * it static and do not export.
120 */
121static int ab3100_set_test_register(struct ab3100 *ab3100,
122 u8 reg, u8 regval)
123{
124 u8 regandval[2] = {reg, regval};
125 int err;
126
127 err = mutex_lock_interruptible(&ab3100->access_mutex);
128 if (err)
129 return err;
130
131 err = i2c_master_send(ab3100->testreg_client, regandval, 2);
132 if (err < 0) {
133 dev_err(ab3100->dev,
134 "write error (write test register): %d\n",
135 err);
136 } else if (err != 2) {
137 dev_err(ab3100->dev,
138 "write error (write test register) "
139 "%d bytes transferred (expected 2)\n",
140 err);
141 err = -EIO;
142 } else {
143 /* All is well */
144 err = 0;
145 }
146 mutex_unlock(&ab3100->access_mutex);
147
148 return err;
149}
150
151int ab3100_get_register(struct ab3100 *ab3100, u8 reg, u8 *regval)
152{
153 int err;
154
155 err = mutex_lock_interruptible(&ab3100->access_mutex);
156 if (err)
157 return err;
158
159 /*
160 * AB3100 require an I2C "stop" command between each message, else
161 * it will not work. The only way of achieveing this with the
162 * message transport layer is to send the read and write messages
163 * separately.
164 */
165 err = i2c_master_send(ab3100->i2c_client, &reg, 1);
166 if (err < 0) {
167 dev_err(ab3100->dev,
168 "write error (send register address): %d\n",
169 err);
170 goto get_reg_out_unlock;
171 } else if (err != 1) {
172 dev_err(ab3100->dev,
173 "write error (send register address) "
174 "%d bytes transferred (expected 1)\n",
175 err);
176 err = -EIO;
177 goto get_reg_out_unlock;
178 } else {
179 /* All is well */
180 err = 0;
181 }
182
183 err = i2c_master_recv(ab3100->i2c_client, regval, 1);
184 if (err < 0) {
185 dev_err(ab3100->dev,
186 "write error (read register): %d\n",
187 err);
188 goto get_reg_out_unlock;
189 } else if (err != 1) {
190 dev_err(ab3100->dev,
191 "write error (read register) "
192 "%d bytes transferred (expected 1)\n",
193 err);
194 err = -EIO;
195 goto get_reg_out_unlock;
196 } else {
197 /* All is well */
198 err = 0;
199 }
200
201 get_reg_out_unlock:
202 mutex_unlock(&ab3100->access_mutex);
203 return err;
204}
205EXPORT_SYMBOL(ab3100_get_register);
206
207int ab3100_get_register_page(struct ab3100 *ab3100,
208 u8 first_reg, u8 *regvals, u8 numregs)
209{
210 int err;
211
212 if (ab3100->chip_id == 0xa0 ||
213 ab3100->chip_id == 0xa1)
214 /* These don't support paged reads */
215 return -EIO;
216
217 err = mutex_lock_interruptible(&ab3100->access_mutex);
218 if (err)
219 return err;
220
221 /*
222 * Paged read also require an I2C "stop" command.
223 */
224 err = i2c_master_send(ab3100->i2c_client, &first_reg, 1);
225 if (err < 0) {
226 dev_err(ab3100->dev,
227 "write error (send first register address): %d\n",
228 err);
229 goto get_reg_page_out_unlock;
230 } else if (err != 1) {
231 dev_err(ab3100->dev,
232 "write error (send first register address) "
233 "%d bytes transferred (expected 1)\n",
234 err);
235 err = -EIO;
236 goto get_reg_page_out_unlock;
237 }
238
239 err = i2c_master_recv(ab3100->i2c_client, regvals, numregs);
240 if (err < 0) {
241 dev_err(ab3100->dev,
242 "write error (read register page): %d\n",
243 err);
244 goto get_reg_page_out_unlock;
245 } else if (err != numregs) {
246 dev_err(ab3100->dev,
247 "write error (read register page) "
248 "%d bytes transferred (expected %d)\n",
249 err, numregs);
250 err = -EIO;
251 goto get_reg_page_out_unlock;
252 }
253
254 /* All is well */
255 err = 0;
256
257 get_reg_page_out_unlock:
258 mutex_unlock(&ab3100->access_mutex);
259 return err;
260}
261EXPORT_SYMBOL(ab3100_get_register_page);
262
263int ab3100_mask_and_set_register(struct ab3100 *ab3100,
264 u8 reg, u8 andmask, u8 ormask)
265{
266 u8 regandval[2] = {reg, 0};
267 int err;
268
269 err = mutex_lock_interruptible(&ab3100->access_mutex);
270 if (err)
271 return err;
272
273 /* First read out the target register */
274 err = i2c_master_send(ab3100->i2c_client, &reg, 1);
275 if (err < 0) {
276 dev_err(ab3100->dev,
277 "write error (maskset send address): %d\n",
278 err);
279 goto get_maskset_unlock;
280 } else if (err != 1) {
281 dev_err(ab3100->dev,
282 "write error (maskset send address) "
283 "%d bytes transferred (expected 1)\n",
284 err);
285 err = -EIO;
286 goto get_maskset_unlock;
287 }
288
289 err = i2c_master_recv(ab3100->i2c_client, &regandval[1], 1);
290 if (err < 0) {
291 dev_err(ab3100->dev,
292 "write error (maskset read register): %d\n",
293 err);
294 goto get_maskset_unlock;
295 } else if (err != 1) {
296 dev_err(ab3100->dev,
297 "write error (maskset read register) "
298 "%d bytes transferred (expected 1)\n",
299 err);
300 err = -EIO;
301 goto get_maskset_unlock;
302 }
303
304 /* Modify the register */
305 regandval[1] &= andmask;
306 regandval[1] |= ormask;
307
308 /* Write the register */
309 err = i2c_master_send(ab3100->i2c_client, regandval, 2);
310 if (err < 0) {
311 dev_err(ab3100->dev,
312 "write error (write register): %d\n",
313 err);
314 goto get_maskset_unlock;
315 } else if (err != 2) {
316 dev_err(ab3100->dev,
317 "write error (write register) "
318 "%d bytes transferred (expected 2)\n",
319 err);
320 err = -EIO;
321 goto get_maskset_unlock;
322 }
323
324 /* All is well */
325 err = 0;
326
327 get_maskset_unlock:
328 mutex_unlock(&ab3100->access_mutex);
329 return err;
330}
331EXPORT_SYMBOL(ab3100_mask_and_set_register);
332
333/*
334 * Register a simple callback for handling any AB3100 events.
335 */
336int ab3100_event_register(struct ab3100 *ab3100,
337 struct notifier_block *nb)
338{
339 return blocking_notifier_chain_register(&ab3100->event_subscribers,
340 nb);
341}
342EXPORT_SYMBOL(ab3100_event_register);
343
344/*
345 * Remove a previously registered callback.
346 */
347int ab3100_event_unregister(struct ab3100 *ab3100,
348 struct notifier_block *nb)
349{
350 return blocking_notifier_chain_unregister(&ab3100->event_subscribers,
351 nb);
352}
353EXPORT_SYMBOL(ab3100_event_unregister);
354
355
356int ab3100_event_registers_startup_state_get(struct ab3100 *ab3100,
357 u32 *fatevent)
358{
359 if (!ab3100->startup_events_read)
360 return -EAGAIN; /* Try again later */
361 *fatevent = ab3100->startup_events;
362 return 0;
363}
364EXPORT_SYMBOL(ab3100_event_registers_startup_state_get);
365
366/* Interrupt handling worker */
367static void ab3100_work(struct work_struct *work)
368{
369 struct ab3100 *ab3100 = container_of(work, struct ab3100, work);
370 u8 event_regs[3];
371 u32 fatevent;
372 int err;
373
374 err = ab3100_get_register_page(ab3100, AB3100_EVENTA1,
375 event_regs, 3);
376 if (err)
377 goto err_event_wq;
378
379 fatevent = (event_regs[0] << 16) |
380 (event_regs[1] << 8) |
381 event_regs[2];
382
383 if (!ab3100->startup_events_read) {
384 ab3100->startup_events = fatevent;
385 ab3100->startup_events_read = true;
386 }
387 /*
388 * The notified parties will have to mask out the events
389 * they're interested in and react to them. They will be
390 * notified on all events, then they use the fatevent value
391 * to determine if they're interested.
392 */
393 blocking_notifier_call_chain(&ab3100->event_subscribers,
394 fatevent, NULL);
395
396 dev_dbg(ab3100->dev,
397 "IRQ Event: 0x%08x\n", fatevent);
398
399 /* By now the IRQ should be acked and deasserted so enable it again */
400 enable_irq(ab3100->i2c_client->irq);
401 return;
402
403 err_event_wq:
404 dev_dbg(ab3100->dev,
405 "error in event workqueue\n");
406 /* Enable the IRQ anyway, what choice do we have? */
407 enable_irq(ab3100->i2c_client->irq);
408 return;
409}
410
411static irqreturn_t ab3100_irq_handler(int irq, void *data)
412{
413 struct ab3100 *ab3100 = data;
414 /*
415 * Disable the IRQ and dispatch a worker to handle the
416 * event. Since the chip resides on I2C this is slow
417 * stuff and we will re-enable the interrupts once th
418 * worker has finished.
419 */
420 disable_irq(ab3100->i2c_client->irq);
421 schedule_work(&ab3100->work);
422 return IRQ_HANDLED;
423}
424
425#ifdef CONFIG_DEBUG_FS
426/*
427 * Some debugfs entries only exposed if we're using debug
428 */
429static int ab3100_registers_print(struct seq_file *s, void *p)
430{
431 struct ab3100 *ab3100 = s->private;
432 u8 value;
433 u8 reg;
434
435 seq_printf(s, "AB3100 registers:\n");
436
437 for (reg = 0; reg < 0xff; reg++) {
438 ab3100_get_register(ab3100, reg, &value);
439 seq_printf(s, "[0x%x]: 0x%x\n", reg, value);
440 }
441 return 0;
442}
443
444static int ab3100_registers_open(struct inode *inode, struct file *file)
445{
446 return single_open(file, ab3100_registers_print, inode->i_private);
447}
448
449static const struct file_operations ab3100_registers_fops = {
450 .open = ab3100_registers_open,
451 .read = seq_read,
452 .llseek = seq_lseek,
453 .release = single_release,
454 .owner = THIS_MODULE,
455};
456
457struct ab3100_get_set_reg_priv {
458 struct ab3100 *ab3100;
459 bool mode;
460};
461
462static int ab3100_get_set_reg_open_file(struct inode *inode, struct file *file)
463{
464 file->private_data = inode->i_private;
465 return 0;
466}
467
468static int ab3100_get_set_reg(struct file *file,
469 const char __user *user_buf,
470 size_t count, loff_t *ppos)
471{
472 struct ab3100_get_set_reg_priv *priv = file->private_data;
473 struct ab3100 *ab3100 = priv->ab3100;
474 char buf[32];
475 int buf_size;
476 int regp;
477 unsigned long user_reg;
478 int err;
479 int i = 0;
480
481 /* Get userspace string and assure termination */
482 buf_size = min(count, (sizeof(buf)-1));
483 if (copy_from_user(buf, user_buf, buf_size))
484 return -EFAULT;
485 buf[buf_size] = 0;
486
487 /*
488 * The idea is here to parse a string which is either
489 * "0xnn" for reading a register, or "0xaa 0xbb" for
490 * writing 0xbb to the register 0xaa. First move past
491 * whitespace and then begin to parse the register.
492 */
493 while ((i < buf_size) && (buf[i] == ' '))
494 i++;
495 regp = i;
496
497 /*
498 * Advance pointer to end of string then terminate
499 * the register string. This is needed to satisfy
500 * the strict_strtoul() function.
501 */
502 while ((i < buf_size) && (buf[i] != ' '))
503 i++;
504 buf[i] = '\0';
505
506 err = strict_strtoul(&buf[regp], 16, &user_reg);
507 if (err)
508 return err;
509 if (user_reg > 0xff)
510 return -EINVAL;
511
512 /* Either we read or we write a register here */
513 if (!priv->mode) {
514 /* Reading */
515 u8 reg = (u8) user_reg;
516 u8 regvalue;
517
518 ab3100_get_register(ab3100, reg, &regvalue);
519
520 dev_info(ab3100->dev,
521 "debug read AB3100 reg[0x%02x]: 0x%02x\n",
522 reg, regvalue);
523 } else {
524 int valp;
525 unsigned long user_value;
526 u8 reg = (u8) user_reg;
527 u8 value;
528 u8 regvalue;
529
530 /*
531 * Writing, we need some value to write to
532 * the register so keep parsing the string
533 * from userspace.
534 */
535 i++;
536 while ((i < buf_size) && (buf[i] == ' '))
537 i++;
538 valp = i;
539 while ((i < buf_size) && (buf[i] != ' '))
540 i++;
541 buf[i] = '\0';
542
543 err = strict_strtoul(&buf[valp], 16, &user_value);
544 if (err)
545 return err;
546 if (user_reg > 0xff)
547 return -EINVAL;
548
549 value = (u8) user_value;
550 ab3100_set_register(ab3100, reg, value);
551 ab3100_get_register(ab3100, reg, &regvalue);
552
553 dev_info(ab3100->dev,
554 "debug write reg[0x%02x] with 0x%02x, "
555 "after readback: 0x%02x\n",
556 reg, value, regvalue);
557 }
558 return buf_size;
559}
560
561static const struct file_operations ab3100_get_set_reg_fops = {
562 .open = ab3100_get_set_reg_open_file,
563 .write = ab3100_get_set_reg,
564};
565
566static struct dentry *ab3100_dir;
567static struct dentry *ab3100_reg_file;
568static struct ab3100_get_set_reg_priv ab3100_get_priv;
569static struct dentry *ab3100_get_reg_file;
570static struct ab3100_get_set_reg_priv ab3100_set_priv;
571static struct dentry *ab3100_set_reg_file;
572
573static void ab3100_setup_debugfs(struct ab3100 *ab3100)
574{
575 int err;
576
577 ab3100_dir = debugfs_create_dir("ab3100", NULL);
578 if (!ab3100_dir)
579 goto exit_no_debugfs;
580
581 ab3100_reg_file = debugfs_create_file("registers",
582 S_IRUGO, ab3100_dir, ab3100,
583 &ab3100_registers_fops);
584 if (!ab3100_reg_file) {
585 err = -ENOMEM;
586 goto exit_destroy_dir;
587 }
588
589 ab3100_get_priv.ab3100 = ab3100;
590 ab3100_get_priv.mode = false;
591 ab3100_get_reg_file = debugfs_create_file("get_reg",
592 S_IWUGO, ab3100_dir, &ab3100_get_priv,
593 &ab3100_get_set_reg_fops);
594 if (!ab3100_get_reg_file) {
595 err = -ENOMEM;
596 goto exit_destroy_reg;
597 }
598
599 ab3100_set_priv.ab3100 = ab3100;
600 ab3100_set_priv.mode = true;
601 ab3100_set_reg_file = debugfs_create_file("set_reg",
602 S_IWUGO, ab3100_dir, &ab3100_set_priv,
603 &ab3100_get_set_reg_fops);
604 if (!ab3100_set_reg_file) {
605 err = -ENOMEM;
606 goto exit_destroy_get_reg;
607 }
608 return;
609
610 exit_destroy_get_reg:
611 debugfs_remove(ab3100_get_reg_file);
612 exit_destroy_reg:
613 debugfs_remove(ab3100_reg_file);
614 exit_destroy_dir:
615 debugfs_remove(ab3100_dir);
616 exit_no_debugfs:
617 return;
618}
619static inline void ab3100_remove_debugfs(void)
620{
621 debugfs_remove(ab3100_set_reg_file);
622 debugfs_remove(ab3100_get_reg_file);
623 debugfs_remove(ab3100_reg_file);
624 debugfs_remove(ab3100_dir);
625}
626#else
627static inline void ab3100_setup_debugfs(struct ab3100 *ab3100)
628{
629}
630static inline void ab3100_remove_debugfs(void)
631{
632}
633#endif
634
635/*
636 * Basic set-up, datastructure creation/destruction and I2C interface.
637 * This sets up a default config in the AB3100 chip so that it
638 * will work as expected.
639 */
640
641struct ab3100_init_setting {
642 u8 abreg;
643 u8 setting;
644};
645
646static const struct ab3100_init_setting __initdata
647ab3100_init_settings[] = {
648 {
649 .abreg = AB3100_MCA,
650 .setting = 0x01
651 }, {
652 .abreg = AB3100_MCB,
653 .setting = 0x30
654 }, {
655 .abreg = AB3100_IMRA1,
656 .setting = 0x00
657 }, {
658 .abreg = AB3100_IMRA2,
659 .setting = 0xFF
660 }, {
661 .abreg = AB3100_IMRA3,
662 .setting = 0x01
663 }, {
664 .abreg = AB3100_IMRB1,
665 .setting = 0xFF
666 }, {
667 .abreg = AB3100_IMRB2,
668 .setting = 0xFF
669 }, {
670 .abreg = AB3100_IMRB3,
671 .setting = 0xFF
672 }, {
673 .abreg = AB3100_SUP,
674 .setting = 0x00
675 }, {
676 .abreg = AB3100_DIS,
677 .setting = 0xF0
678 }, {
679 .abreg = AB3100_D0C,
680 .setting = 0x00
681 }, {
682 .abreg = AB3100_D1C,
683 .setting = 0x00
684 }, {
685 .abreg = AB3100_D2C,
686 .setting = 0x00
687 }, {
688 .abreg = AB3100_D3C,
689 .setting = 0x00
690 },
691};
692
693static int __init ab3100_setup(struct ab3100 *ab3100)
694{
695 int err = 0;
696 int i;
697
698 for (i = 0; i < ARRAY_SIZE(ab3100_init_settings); i++) {
699 err = ab3100_set_register(ab3100,
700 ab3100_init_settings[i].abreg,
701 ab3100_init_settings[i].setting);
702 if (err)
703 goto exit_no_setup;
704 }
705
706 /*
707 * Special trick to make the AB3100 use the 32kHz clock (RTC)
708 * bit 3 in test registe 0x02 is a special, undocumented test
709 * register bit that only exist in AB3100 P1E
710 */
711 if (ab3100->chip_id == 0xc4) {
712 dev_warn(ab3100->dev,
713 "AB3100 P1E variant detected, "
714 "forcing chip to 32KHz\n");
715 err = ab3100_set_test_register(ab3100, 0x02, 0x08);
716 }
717
718 exit_no_setup:
719 return err;
720}
721
722/*
723 * Here we define all the platform devices that appear
724 * as children of the AB3100. These are regular platform
725 * devices with the IORESOURCE_IO .start and .end set
726 * to correspond to the internal AB3100 register range
727 * mapping to the corresponding subdevice.
728 */
729
730#define AB3100_DEVICE(devname, devid) \
731static struct platform_device ab3100_##devname##_device = { \
732 .name = devid, \
733 .id = -1, \
734}
735
736/*
737 * This lists all the subdevices and corresponding register
738 * ranges.
739 */
740AB3100_DEVICE(dac, "ab3100-dac");
741AB3100_DEVICE(leds, "ab3100-leds");
742AB3100_DEVICE(power, "ab3100-power");
743AB3100_DEVICE(regulators, "ab3100-regulators");
744AB3100_DEVICE(sim, "ab3100-sim");
745AB3100_DEVICE(uart, "ab3100-uart");
746AB3100_DEVICE(rtc, "ab3100-rtc");
747AB3100_DEVICE(charger, "ab3100-charger");
748AB3100_DEVICE(boost, "ab3100-boost");
749AB3100_DEVICE(adc, "ab3100-adc");
750AB3100_DEVICE(fuelgauge, "ab3100-fuelgauge");
751AB3100_DEVICE(vibrator, "ab3100-vibrator");
752AB3100_DEVICE(otp, "ab3100-otp");
753AB3100_DEVICE(codec, "ab3100-codec");
754
755static struct platform_device *
756ab3100_platform_devs[] = {
757 &ab3100_dac_device,
758 &ab3100_leds_device,
759 &ab3100_power_device,
760 &ab3100_regulators_device,
761 &ab3100_sim_device,
762 &ab3100_uart_device,
763 &ab3100_rtc_device,
764 &ab3100_charger_device,
765 &ab3100_boost_device,
766 &ab3100_adc_device,
767 &ab3100_fuelgauge_device,
768 &ab3100_vibrator_device,
769 &ab3100_otp_device,
770 &ab3100_codec_device,
771};
772
773struct ab_family_id {
774 u8 id;
775 char *name;
776};
777
778static const struct ab_family_id ids[] __initdata = {
779 /* AB3100 */
780 {
781 .id = 0xc0,
782 .name = "P1A"
783 }, {
784 .id = 0xc1,
785 .name = "P1B"
786 }, {
787 .id = 0xc2,
788 .name = "P1C"
789 }, {
790 .id = 0xc3,
791 .name = "P1D"
792 }, {
793 .id = 0xc4,
794 .name = "P1E"
795 }, {
796 .id = 0xc5,
797 .name = "P1F/R1A"
798 }, {
799 .id = 0xc6,
800 .name = "P1G/R1A"
801 }, {
802 .id = 0xc7,
803 .name = "P2A/R2A"
804 }, {
805 .id = 0xc8,
806 .name = "P2B/R2B"
807 },
808 /* AB3000 variants, not supported */
809 {
810 .id = 0xa0
811 }, {
812 .id = 0xa1
813 }, {
814 .id = 0xa2
815 }, {
816 .id = 0xa3
817 }, {
818 .id = 0xa4
819 }, {
820 .id = 0xa5
821 }, {
822 .id = 0xa6
823 }, {
824 .id = 0xa7
825 },
826 /* Terminator */
827 {
828 .id = 0x00,
829 },
830};
831
832static int __init ab3100_probe(struct i2c_client *client,
833 const struct i2c_device_id *id)
834{
835 struct ab3100 *ab3100;
836 int err;
837 int i;
838
839 ab3100 = kzalloc(sizeof(struct ab3100), GFP_KERNEL);
840 if (!ab3100) {
841 dev_err(&client->dev, "could not allocate AB3100 device\n");
842 return -ENOMEM;
843 }
844
845 /* Initialize data structure */
846 mutex_init(&ab3100->access_mutex);
847 BLOCKING_INIT_NOTIFIER_HEAD(&ab3100->event_subscribers);
848
849 ab3100->i2c_client = client;
850 ab3100->dev = &ab3100->i2c_client->dev;
851
852 i2c_set_clientdata(client, ab3100);
853
854 /* Read chip ID register */
855 err = ab3100_get_register(ab3100, AB3100_CID,
856 &ab3100->chip_id);
857 if (err) {
858 dev_err(&client->dev,
859 "could not communicate with the AB3100 analog "
860 "baseband chip\n");
861 goto exit_no_detect;
862 }
863
864 for (i = 0; ids[i].id != 0x0; i++) {
865 if (ids[i].id == ab3100->chip_id) {
866 if (ids[i].name != NULL) {
867 snprintf(&ab3100->chip_name[0],
868 sizeof(ab3100->chip_name) - 1,
869 "AB3100 %s",
870 ids[i].name);
871 break;
872 } else {
873 dev_err(&client->dev,
874 "AB3000 is not supported\n");
875 goto exit_no_detect;
876 }
877 }
878 }
879
880 if (ids[i].id == 0x0) {
881 dev_err(&client->dev, "unknown analog baseband chip id: 0x%x\n",
882 ab3100->chip_id);
883 dev_err(&client->dev, "accepting it anyway. Please update "
884 "the driver.\n");
885 goto exit_no_detect;
886 }
887
888 dev_info(&client->dev, "Detected chip: %s\n",
889 &ab3100->chip_name[0]);
890
891 /* Attach a second dummy i2c_client to the test register address */
892 ab3100->testreg_client = i2c_new_dummy(client->adapter,
893 client->addr + 1);
894 if (!ab3100->testreg_client) {
895 err = -ENOMEM;
896 goto exit_no_testreg_client;
897 }
898
899 strlcpy(ab3100->testreg_client->name, id->name,
900 sizeof(ab3100->testreg_client->name));
901
902 err = ab3100_setup(ab3100);
903 if (err)
904 goto exit_no_setup;
905
906 INIT_WORK(&ab3100->work, ab3100_work);
907
908 /* This real unpredictable IRQ is of course sampled for entropy */
909 err = request_irq(client->irq, ab3100_irq_handler,
910 IRQF_DISABLED | IRQF_SAMPLE_RANDOM,
911 "AB3100 IRQ", ab3100);
912 if (err)
913 goto exit_no_irq;
914
915 /* Set parent and a pointer back to the container in device data */
916 for (i = 0; i < ARRAY_SIZE(ab3100_platform_devs); i++) {
917 ab3100_platform_devs[i]->dev.parent =
918 &client->dev;
919 platform_set_drvdata(ab3100_platform_devs[i], ab3100);
920 }
921
922 /* Register the platform devices */
923 platform_add_devices(ab3100_platform_devs,
924 ARRAY_SIZE(ab3100_platform_devs));
925
926 ab3100_setup_debugfs(ab3100);
927
928 return 0;
929
930 exit_no_irq:
931 exit_no_setup:
932 i2c_unregister_device(ab3100->testreg_client);
933 exit_no_testreg_client:
934 exit_no_detect:
935 kfree(ab3100);
936 return err;
937}
938
939static int __exit ab3100_remove(struct i2c_client *client)
940{
941 struct ab3100 *ab3100 = i2c_get_clientdata(client);
942 int i;
943
944 /* Unregister subdevices */
945 for (i = 0; i < ARRAY_SIZE(ab3100_platform_devs); i++)
946 platform_device_unregister(ab3100_platform_devs[i]);
947
948 ab3100_remove_debugfs();
949 i2c_unregister_device(ab3100->testreg_client);
950
951 /*
952 * At this point, all subscribers should have unregistered
953 * their notifiers so deactivate IRQ
954 */
955 free_irq(client->irq, ab3100);
956 kfree(ab3100);
957 return 0;
958}
959
960static const struct i2c_device_id ab3100_id[] = {
961 { "ab3100", ab3100 },
962 { }
963};
964MODULE_DEVICE_TABLE(i2c, ab3100_id);
965
966static struct i2c_driver ab3100_driver = {
967 .driver = {
968 .name = "ab3100",
969 .owner = THIS_MODULE,
970 },
971 .id_table = ab3100_id,
972 .probe = ab3100_probe,
973 .remove = __exit_p(ab3100_remove),
974};
975
976static int __init ab3100_i2c_init(void)
977{
978 return i2c_add_driver(&ab3100_driver);
979}
980
981static void __exit ab3100_i2c_exit(void)
982{
983 i2c_del_driver(&ab3100_driver);
984}
985
986subsys_initcall(ab3100_i2c_init);
987module_exit(ab3100_i2c_exit);
988
989MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
990MODULE_DESCRIPTION("AB3100 core driver");
991MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 9e485459f63b..63a2a6632106 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -17,6 +17,7 @@
17 */ 17 */
18 18
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/delay.h>
20#include <linux/irq.h> 21#include <linux/irq.h>
21#include <linux/gpio.h> 22#include <linux/gpio.h>
22#include <linux/io.h> 23#include <linux/io.h>
@@ -24,6 +25,51 @@
24#include <linux/platform_device.h> 25#include <linux/platform_device.h>
25 26
26#include <linux/mfd/asic3.h> 27#include <linux/mfd/asic3.h>
28#include <linux/mfd/core.h>
29#include <linux/mfd/ds1wm.h>
30#include <linux/mfd/tmio.h>
31
32enum {
33 ASIC3_CLOCK_SPI,
34 ASIC3_CLOCK_OWM,
35 ASIC3_CLOCK_PWM0,
36 ASIC3_CLOCK_PWM1,
37 ASIC3_CLOCK_LED0,
38 ASIC3_CLOCK_LED1,
39 ASIC3_CLOCK_LED2,
40 ASIC3_CLOCK_SD_HOST,
41 ASIC3_CLOCK_SD_BUS,
42 ASIC3_CLOCK_SMBUS,
43 ASIC3_CLOCK_EX0,
44 ASIC3_CLOCK_EX1,
45};
46
47struct asic3_clk {
48 int enabled;
49 unsigned int cdex;
50 unsigned long rate;
51};
52
53#define INIT_CDEX(_name, _rate) \
54 [ASIC3_CLOCK_##_name] = { \
55 .cdex = CLOCK_CDEX_##_name, \
56 .rate = _rate, \
57 }
58
59struct asic3_clk asic3_clk_init[] __initdata = {
60 INIT_CDEX(SPI, 0),
61 INIT_CDEX(OWM, 5000000),
62 INIT_CDEX(PWM0, 0),
63 INIT_CDEX(PWM1, 0),
64 INIT_CDEX(LED0, 0),
65 INIT_CDEX(LED1, 0),
66 INIT_CDEX(LED2, 0),
67 INIT_CDEX(SD_HOST, 24576000),
68 INIT_CDEX(SD_BUS, 12288000),
69 INIT_CDEX(SMBUS, 0),
70 INIT_CDEX(EX0, 32768),
71 INIT_CDEX(EX1, 24576000),
72};
27 73
28struct asic3 { 74struct asic3 {
29 void __iomem *mapping; 75 void __iomem *mapping;
@@ -34,6 +80,8 @@ struct asic3 {
34 u16 irq_bothedge[4]; 80 u16 irq_bothedge[4];
35 struct gpio_chip gpio; 81 struct gpio_chip gpio;
36 struct device *dev; 82 struct device *dev;
83
84 struct asic3_clk clocks[ARRAY_SIZE(asic3_clk_init)];
37}; 85};
38 86
39static int asic3_gpio_get(struct gpio_chip *chip, unsigned offset); 87static int asic3_gpio_get(struct gpio_chip *chip, unsigned offset);
@@ -52,6 +100,21 @@ static inline u32 asic3_read_register(struct asic3 *asic,
52 (reg >> asic->bus_shift)); 100 (reg >> asic->bus_shift));
53} 101}
54 102
103void asic3_set_register(struct asic3 *asic, u32 reg, u32 bits, bool set)
104{
105 unsigned long flags;
106 u32 val;
107
108 spin_lock_irqsave(&asic->lock, flags);
109 val = asic3_read_register(asic, reg);
110 if (set)
111 val |= bits;
112 else
113 val &= ~bits;
114 asic3_write_register(asic, reg, val);
115 spin_unlock_irqrestore(&asic->lock, flags);
116}
117
55/* IRQs */ 118/* IRQs */
56#define MAX_ASIC_ISR_LOOPS 20 119#define MAX_ASIC_ISR_LOOPS 20
57#define ASIC3_GPIO_BASE_INCR \ 120#define ASIC3_GPIO_BASE_INCR \
@@ -525,6 +588,240 @@ static int asic3_gpio_remove(struct platform_device *pdev)
525 return gpiochip_remove(&asic->gpio); 588 return gpiochip_remove(&asic->gpio);
526} 589}
527 590
591static int asic3_clk_enable(struct asic3 *asic, struct asic3_clk *clk)
592{
593 unsigned long flags;
594 u32 cdex;
595
596 spin_lock_irqsave(&asic->lock, flags);
597 if (clk->enabled++ == 0) {
598 cdex = asic3_read_register(asic, ASIC3_OFFSET(CLOCK, CDEX));
599 cdex |= clk->cdex;
600 asic3_write_register(asic, ASIC3_OFFSET(CLOCK, CDEX), cdex);
601 }
602 spin_unlock_irqrestore(&asic->lock, flags);
603
604 return 0;
605}
606
607static void asic3_clk_disable(struct asic3 *asic, struct asic3_clk *clk)
608{
609 unsigned long flags;
610 u32 cdex;
611
612 WARN_ON(clk->enabled == 0);
613
614 spin_lock_irqsave(&asic->lock, flags);
615 if (--clk->enabled == 0) {
616 cdex = asic3_read_register(asic, ASIC3_OFFSET(CLOCK, CDEX));
617 cdex &= ~clk->cdex;
618 asic3_write_register(asic, ASIC3_OFFSET(CLOCK, CDEX), cdex);
619 }
620 spin_unlock_irqrestore(&asic->lock, flags);
621}
622
623/* MFD cells (SPI, PWM, LED, DS1WM, MMC) */
624static struct ds1wm_driver_data ds1wm_pdata = {
625 .active_high = 1,
626};
627
628static struct resource ds1wm_resources[] = {
629 {
630 .start = ASIC3_OWM_BASE,
631 .end = ASIC3_OWM_BASE + 0x13,
632 .flags = IORESOURCE_MEM,
633 },
634 {
635 .start = ASIC3_IRQ_OWM,
636 .start = ASIC3_IRQ_OWM,
637 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
638 },
639};
640
641static int ds1wm_enable(struct platform_device *pdev)
642{
643 struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
644
645 /* Turn on external clocks and the OWM clock */
646 asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_EX0]);
647 asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_EX1]);
648 asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_OWM]);
649 msleep(1);
650
651 /* Reset and enable DS1WM */
652 asic3_set_register(asic, ASIC3_OFFSET(EXTCF, RESET),
653 ASIC3_EXTCF_OWM_RESET, 1);
654 msleep(1);
655 asic3_set_register(asic, ASIC3_OFFSET(EXTCF, RESET),
656 ASIC3_EXTCF_OWM_RESET, 0);
657 msleep(1);
658 asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
659 ASIC3_EXTCF_OWM_EN, 1);
660 msleep(1);
661
662 return 0;
663}
664
665static int ds1wm_disable(struct platform_device *pdev)
666{
667 struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
668
669 asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
670 ASIC3_EXTCF_OWM_EN, 0);
671
672 asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_OWM]);
673 asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_EX0]);
674 asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_EX1]);
675
676 return 0;
677}
678
679static struct mfd_cell asic3_cell_ds1wm = {
680 .name = "ds1wm",
681 .enable = ds1wm_enable,
682 .disable = ds1wm_disable,
683 .driver_data = &ds1wm_pdata,
684 .num_resources = ARRAY_SIZE(ds1wm_resources),
685 .resources = ds1wm_resources,
686};
687
688static struct tmio_mmc_data asic3_mmc_data = {
689 .hclk = 24576000,
690};
691
692static struct resource asic3_mmc_resources[] = {
693 {
694 .start = ASIC3_SD_CTRL_BASE,
695 .end = ASIC3_SD_CTRL_BASE + 0x3ff,
696 .flags = IORESOURCE_MEM,
697 },
698 {
699 .start = ASIC3_SD_CONFIG_BASE,
700 .end = ASIC3_SD_CONFIG_BASE + 0x1ff,
701 .flags = IORESOURCE_MEM,
702 },
703 {
704 .start = 0,
705 .end = 0,
706 .flags = IORESOURCE_IRQ,
707 },
708};
709
710static int asic3_mmc_enable(struct platform_device *pdev)
711{
712 struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
713
714 /* Not sure if it must be done bit by bit, but leaving as-is */
715 asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
716 ASIC3_SDHWCTRL_LEVCD, 1);
717 asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
718 ASIC3_SDHWCTRL_LEVWP, 1);
719 asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
720 ASIC3_SDHWCTRL_SUSPEND, 0);
721 asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
722 ASIC3_SDHWCTRL_PCLR, 0);
723
724 asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_EX0]);
725 /* CLK32 used for card detection and for interruption detection
726 * when HCLK is stopped.
727 */
728 asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_EX1]);
729 msleep(1);
730
731 /* HCLK 24.576 MHz, BCLK 12.288 MHz: */
732 asic3_write_register(asic, ASIC3_OFFSET(CLOCK, SEL),
733 CLOCK_SEL_CX | CLOCK_SEL_SD_HCLK_SEL);
734
735 asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_SD_HOST]);
736 asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_SD_BUS]);
737 msleep(1);
738
739 asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
740 ASIC3_EXTCF_SD_MEM_ENABLE, 1);
741
742 /* Enable SD card slot 3.3V power supply */
743 asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
744 ASIC3_SDHWCTRL_SDPWR, 1);
745
746 return 0;
747}
748
749static int asic3_mmc_disable(struct platform_device *pdev)
750{
751 struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
752
753 /* Put in suspend mode */
754 asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
755 ASIC3_SDHWCTRL_SUSPEND, 1);
756
757 /* Disable clocks */
758 asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_SD_HOST]);
759 asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_SD_BUS]);
760 asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_EX0]);
761 asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_EX1]);
762 return 0;
763}
764
765static struct mfd_cell asic3_cell_mmc = {
766 .name = "tmio-mmc",
767 .enable = asic3_mmc_enable,
768 .disable = asic3_mmc_disable,
769 .driver_data = &asic3_mmc_data,
770 .num_resources = ARRAY_SIZE(asic3_mmc_resources),
771 .resources = asic3_mmc_resources,
772};
773
774static int __init asic3_mfd_probe(struct platform_device *pdev,
775 struct resource *mem)
776{
777 struct asic3 *asic = platform_get_drvdata(pdev);
778 struct resource *mem_sdio;
779 int irq, ret;
780
781 mem_sdio = platform_get_resource(pdev, IORESOURCE_MEM, 1);
782 if (!mem_sdio)
783 dev_dbg(asic->dev, "no SDIO MEM resource\n");
784
785 irq = platform_get_irq(pdev, 1);
786 if (irq < 0)
787 dev_dbg(asic->dev, "no SDIO IRQ resource\n");
788
789 /* DS1WM */
790 asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
791 ASIC3_EXTCF_OWM_SMB, 0);
792
793 ds1wm_resources[0].start >>= asic->bus_shift;
794 ds1wm_resources[0].end >>= asic->bus_shift;
795
796 asic3_cell_ds1wm.platform_data = &asic3_cell_ds1wm;
797 asic3_cell_ds1wm.data_size = sizeof(asic3_cell_ds1wm);
798
799 /* MMC */
800 asic3_mmc_resources[0].start >>= asic->bus_shift;
801 asic3_mmc_resources[0].end >>= asic->bus_shift;
802 asic3_mmc_resources[1].start >>= asic->bus_shift;
803 asic3_mmc_resources[1].end >>= asic->bus_shift;
804
805 asic3_cell_mmc.platform_data = &asic3_cell_mmc;
806 asic3_cell_mmc.data_size = sizeof(asic3_cell_mmc);
807
808 ret = mfd_add_devices(&pdev->dev, pdev->id,
809 &asic3_cell_ds1wm, 1, mem, asic->irq_base);
810 if (ret < 0)
811 goto out;
812
813 if (mem_sdio && (irq >= 0))
814 ret = mfd_add_devices(&pdev->dev, pdev->id,
815 &asic3_cell_mmc, 1, mem_sdio, irq);
816
817 out:
818 return ret;
819}
820
821static void asic3_mfd_remove(struct platform_device *pdev)
822{
823 mfd_remove_devices(&pdev->dev);
824}
528 825
529/* Core */ 826/* Core */
530static int __init asic3_probe(struct platform_device *pdev) 827static int __init asic3_probe(struct platform_device *pdev)
@@ -533,7 +830,6 @@ static int __init asic3_probe(struct platform_device *pdev)
533 struct asic3 *asic; 830 struct asic3 *asic;
534 struct resource *mem; 831 struct resource *mem;
535 unsigned long clksel; 832 unsigned long clksel;
536 int map_size;
537 int ret = 0; 833 int ret = 0;
538 834
539 asic = kzalloc(sizeof(struct asic3), GFP_KERNEL); 835 asic = kzalloc(sizeof(struct asic3), GFP_KERNEL);
@@ -553,8 +849,7 @@ static int __init asic3_probe(struct platform_device *pdev)
553 goto out_free; 849 goto out_free;
554 } 850 }
555 851
556 map_size = mem->end - mem->start + 1; 852 asic->mapping = ioremap(mem->start, resource_size(mem));
557 asic->mapping = ioremap(mem->start, map_size);
558 if (!asic->mapping) { 853 if (!asic->mapping) {
559 ret = -ENOMEM; 854 ret = -ENOMEM;
560 dev_err(asic->dev, "Couldn't ioremap\n"); 855 dev_err(asic->dev, "Couldn't ioremap\n");
@@ -564,7 +859,7 @@ static int __init asic3_probe(struct platform_device *pdev)
564 asic->irq_base = pdata->irq_base; 859 asic->irq_base = pdata->irq_base;
565 860
566 /* calculate bus shift from mem resource */ 861 /* calculate bus shift from mem resource */
567 asic->bus_shift = 2 - (map_size >> 12); 862 asic->bus_shift = 2 - (resource_size(mem) >> 12);
568 863
569 clksel = 0; 864 clksel = 0;
570 asic3_write_register(asic, ASIC3_OFFSET(CLOCK, SEL), clksel); 865 asic3_write_register(asic, ASIC3_OFFSET(CLOCK, SEL), clksel);
@@ -590,6 +885,13 @@ static int __init asic3_probe(struct platform_device *pdev)
590 goto out_irq; 885 goto out_irq;
591 } 886 }
592 887
888 /* Making a per-device copy is only needed for the
889 * theoretical case of multiple ASIC3s on one board:
890 */
891 memcpy(asic->clocks, asic3_clk_init, sizeof(asic3_clk_init));
892
893 asic3_mfd_probe(pdev, mem);
894
593 dev_info(asic->dev, "ASIC3 Core driver\n"); 895 dev_info(asic->dev, "ASIC3 Core driver\n");
594 896
595 return 0; 897 return 0;
@@ -611,6 +913,8 @@ static int asic3_remove(struct platform_device *pdev)
611 int ret; 913 int ret;
612 struct asic3 *asic = platform_get_drvdata(pdev); 914 struct asic3 *asic = platform_get_drvdata(pdev);
613 915
916 asic3_mfd_remove(pdev);
917
614 ret = asic3_gpio_remove(pdev); 918 ret = asic3_gpio_remove(pdev);
615 if (ret < 0) 919 if (ret < 0)
616 return ret; 920 return ret;
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index 7283d88656af..e5ffe5617393 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -561,7 +561,7 @@ static int __init da903x_init(void)
561{ 561{
562 return i2c_add_driver(&da903x_driver); 562 return i2c_add_driver(&da903x_driver);
563} 563}
564module_init(da903x_init); 564subsys_initcall(da903x_init);
565 565
566static void __exit da903x_exit(void) 566static void __exit da903x_exit(void)
567{ 567{
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
new file mode 100644
index 000000000000..671a7efe86a8
--- /dev/null
+++ b/drivers/mfd/ezx-pcap.c
@@ -0,0 +1,505 @@
1/*
2 * Driver for Motorola PCAP2 as present in EZX phones
3 *
4 * Copyright (C) 2006 Harald Welte <laforge@openezx.org>
5 * Copyright (C) 2009 Daniel Ribeiro <drwyrm@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/platform_device.h>
16#include <linux/interrupt.h>
17#include <linux/irq.h>
18#include <linux/mfd/ezx-pcap.h>
19#include <linux/spi/spi.h>
20
21#define PCAP_ADC_MAXQ 8
22struct pcap_adc_request {
23 u8 bank;
24 u8 ch[2];
25 u32 flags;
26 void (*callback)(void *, u16[]);
27 void *data;
28};
29
30struct pcap_adc_sync_request {
31 u16 res[2];
32 struct completion completion;
33};
34
35struct pcap_chip {
36 struct spi_device *spi;
37
38 /* IO */
39 u32 buf;
40 struct mutex io_mutex;
41
42 /* IRQ */
43 unsigned int irq_base;
44 u32 msr;
45 struct work_struct isr_work;
46 struct work_struct msr_work;
47 struct workqueue_struct *workqueue;
48
49 /* ADC */
50 struct pcap_adc_request *adc_queue[PCAP_ADC_MAXQ];
51 u8 adc_head;
52 u8 adc_tail;
53 struct mutex adc_mutex;
54};
55
56/* IO */
57static int ezx_pcap_putget(struct pcap_chip *pcap, u32 *data)
58{
59 struct spi_transfer t;
60 struct spi_message m;
61 int status;
62
63 memset(&t, 0, sizeof t);
64 spi_message_init(&m);
65 t.len = sizeof(u32);
66 spi_message_add_tail(&t, &m);
67
68 pcap->buf = *data;
69 t.tx_buf = (u8 *) &pcap->buf;
70 t.rx_buf = (u8 *) &pcap->buf;
71 status = spi_sync(pcap->spi, &m);
72
73 if (status == 0)
74 *data = pcap->buf;
75
76 return status;
77}
78
79int ezx_pcap_write(struct pcap_chip *pcap, u8 reg_num, u32 value)
80{
81 int ret;
82
83 mutex_lock(&pcap->io_mutex);
84 value &= PCAP_REGISTER_VALUE_MASK;
85 value |= PCAP_REGISTER_WRITE_OP_BIT
86 | (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
87 ret = ezx_pcap_putget(pcap, &value);
88 mutex_unlock(&pcap->io_mutex);
89
90 return ret;
91}
92EXPORT_SYMBOL_GPL(ezx_pcap_write);
93
94int ezx_pcap_read(struct pcap_chip *pcap, u8 reg_num, u32 *value)
95{
96 int ret;
97
98 mutex_lock(&pcap->io_mutex);
99 *value = PCAP_REGISTER_READ_OP_BIT
100 | (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
101
102 ret = ezx_pcap_putget(pcap, value);
103 mutex_unlock(&pcap->io_mutex);
104
105 return ret;
106}
107EXPORT_SYMBOL_GPL(ezx_pcap_read);
108
109/* IRQ */
110static inline unsigned int irq2pcap(struct pcap_chip *pcap, int irq)
111{
112 return 1 << (irq - pcap->irq_base);
113}
114
115int pcap_to_irq(struct pcap_chip *pcap, int irq)
116{
117 return pcap->irq_base + irq;
118}
119EXPORT_SYMBOL_GPL(pcap_to_irq);
120
121static void pcap_mask_irq(unsigned int irq)
122{
123 struct pcap_chip *pcap = get_irq_chip_data(irq);
124
125 pcap->msr |= irq2pcap(pcap, irq);
126 queue_work(pcap->workqueue, &pcap->msr_work);
127}
128
129static void pcap_unmask_irq(unsigned int irq)
130{
131 struct pcap_chip *pcap = get_irq_chip_data(irq);
132
133 pcap->msr &= ~irq2pcap(pcap, irq);
134 queue_work(pcap->workqueue, &pcap->msr_work);
135}
136
137static struct irq_chip pcap_irq_chip = {
138 .name = "pcap",
139 .mask = pcap_mask_irq,
140 .unmask = pcap_unmask_irq,
141};
142
143static void pcap_msr_work(struct work_struct *work)
144{
145 struct pcap_chip *pcap = container_of(work, struct pcap_chip, msr_work);
146
147 ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
148}
149
150static void pcap_isr_work(struct work_struct *work)
151{
152 struct pcap_chip *pcap = container_of(work, struct pcap_chip, isr_work);
153 struct pcap_platform_data *pdata = pcap->spi->dev.platform_data;
154 u32 msr, isr, int_sel, service;
155 int irq;
156
157 ezx_pcap_read(pcap, PCAP_REG_MSR, &msr);
158 ezx_pcap_read(pcap, PCAP_REG_ISR, &isr);
159
160 /* We cant service/ack irqs that are assigned to port 2 */
161 if (!(pdata->config & PCAP_SECOND_PORT)) {
162 ezx_pcap_read(pcap, PCAP_REG_INT_SEL, &int_sel);
163 isr &= ~int_sel;
164 }
165 ezx_pcap_write(pcap, PCAP_REG_ISR, isr);
166
167 local_irq_disable();
168 service = isr & ~msr;
169
170 for (irq = pcap->irq_base; service; service >>= 1, irq++) {
171 if (service & 1) {
172 struct irq_desc *desc = irq_to_desc(irq);
173
174 if (WARN(!desc, KERN_WARNING
175 "Invalid PCAP IRQ %d\n", irq))
176 break;
177
178 if (desc->status & IRQ_DISABLED)
179 note_interrupt(irq, desc, IRQ_NONE);
180 else
181 desc->handle_irq(irq, desc);
182 }
183 }
184 local_irq_enable();
185}
186
187static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
188{
189 struct pcap_chip *pcap = get_irq_data(irq);
190
191 desc->chip->ack(irq);
192 queue_work(pcap->workqueue, &pcap->isr_work);
193 return;
194}
195
196/* ADC */
197static void pcap_disable_adc(struct pcap_chip *pcap)
198{
199 u32 tmp;
200
201 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
202 tmp &= ~(PCAP_ADC_ADEN|PCAP_ADC_BATT_I_ADC|PCAP_ADC_BATT_I_POLARITY);
203 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
204}
205
206static void pcap_adc_trigger(struct pcap_chip *pcap)
207{
208 u32 tmp;
209 u8 head;
210
211 mutex_lock(&pcap->adc_mutex);
212 head = pcap->adc_head;
213 if (!pcap->adc_queue[head]) {
214 /* queue is empty, save power */
215 pcap_disable_adc(pcap);
216 mutex_unlock(&pcap->adc_mutex);
217 return;
218 }
219 mutex_unlock(&pcap->adc_mutex);
220
221 /* start conversion on requested bank */
222 tmp = pcap->adc_queue[head]->flags | PCAP_ADC_ADEN;
223
224 if (pcap->adc_queue[head]->bank == PCAP_ADC_BANK_1)
225 tmp |= PCAP_ADC_AD_SEL1;
226
227 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
228 ezx_pcap_write(pcap, PCAP_REG_ADR, PCAP_ADR_ASC);
229}
230
231static irqreturn_t pcap_adc_irq(int irq, void *_pcap)
232{
233 struct pcap_chip *pcap = _pcap;
234 struct pcap_adc_request *req;
235 u16 res[2];
236 u32 tmp;
237
238 mutex_lock(&pcap->adc_mutex);
239 req = pcap->adc_queue[pcap->adc_head];
240
241 if (WARN(!req, KERN_WARNING "adc irq without pending request\n"))
242 return IRQ_HANDLED;
243
244 /* read requested channels results */
245 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
246 tmp &= ~(PCAP_ADC_ADA1_MASK | PCAP_ADC_ADA2_MASK);
247 tmp |= (req->ch[0] << PCAP_ADC_ADA1_SHIFT);
248 tmp |= (req->ch[1] << PCAP_ADC_ADA2_SHIFT);
249 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
250 ezx_pcap_read(pcap, PCAP_REG_ADR, &tmp);
251 res[0] = (tmp & PCAP_ADR_ADD1_MASK) >> PCAP_ADR_ADD1_SHIFT;
252 res[1] = (tmp & PCAP_ADR_ADD2_MASK) >> PCAP_ADR_ADD2_SHIFT;
253
254 pcap->adc_queue[pcap->adc_head] = NULL;
255 pcap->adc_head = (pcap->adc_head + 1) & (PCAP_ADC_MAXQ - 1);
256 mutex_unlock(&pcap->adc_mutex);
257
258 /* pass the results and release memory */
259 req->callback(req->data, res);
260 kfree(req);
261
262 /* trigger next conversion (if any) on queue */
263 pcap_adc_trigger(pcap);
264
265 return IRQ_HANDLED;
266}
267
268int pcap_adc_async(struct pcap_chip *pcap, u8 bank, u32 flags, u8 ch[],
269 void *callback, void *data)
270{
271 struct pcap_adc_request *req;
272
273 /* This will be freed after we have a result */
274 req = kmalloc(sizeof(struct pcap_adc_request), GFP_KERNEL);
275 if (!req)
276 return -ENOMEM;
277
278 req->bank = bank;
279 req->flags = flags;
280 req->ch[0] = ch[0];
281 req->ch[1] = ch[1];
282 req->callback = callback;
283 req->data = data;
284
285 mutex_lock(&pcap->adc_mutex);
286 if (pcap->adc_queue[pcap->adc_tail]) {
287 mutex_unlock(&pcap->adc_mutex);
288 kfree(req);
289 return -EBUSY;
290 }
291 pcap->adc_queue[pcap->adc_tail] = req;
292 pcap->adc_tail = (pcap->adc_tail + 1) & (PCAP_ADC_MAXQ - 1);
293 mutex_unlock(&pcap->adc_mutex);
294
295 /* start conversion */
296 pcap_adc_trigger(pcap);
297
298 return 0;
299}
300EXPORT_SYMBOL_GPL(pcap_adc_async);
301
302static void pcap_adc_sync_cb(void *param, u16 res[])
303{
304 struct pcap_adc_sync_request *req = param;
305
306 req->res[0] = res[0];
307 req->res[1] = res[1];
308 complete(&req->completion);
309}
310
311int pcap_adc_sync(struct pcap_chip *pcap, u8 bank, u32 flags, u8 ch[],
312 u16 res[])
313{
314 struct pcap_adc_sync_request sync_data;
315 int ret;
316
317 init_completion(&sync_data.completion);
318 ret = pcap_adc_async(pcap, bank, flags, ch, pcap_adc_sync_cb,
319 &sync_data);
320 if (ret)
321 return ret;
322 wait_for_completion(&sync_data.completion);
323 res[0] = sync_data.res[0];
324 res[1] = sync_data.res[1];
325
326 return 0;
327}
328EXPORT_SYMBOL_GPL(pcap_adc_sync);
329
330/* subdevs */
331static int pcap_remove_subdev(struct device *dev, void *unused)
332{
333 platform_device_unregister(to_platform_device(dev));
334 return 0;
335}
336
337static int __devinit pcap_add_subdev(struct pcap_chip *pcap,
338 struct pcap_subdev *subdev)
339{
340 struct platform_device *pdev;
341
342 pdev = platform_device_alloc(subdev->name, subdev->id);
343 pdev->dev.parent = &pcap->spi->dev;
344 pdev->dev.platform_data = subdev->platform_data;
345 platform_set_drvdata(pdev, pcap);
346
347 return platform_device_add(pdev);
348}
349
350static int __devexit ezx_pcap_remove(struct spi_device *spi)
351{
352 struct pcap_chip *pcap = dev_get_drvdata(&spi->dev);
353 struct pcap_platform_data *pdata = spi->dev.platform_data;
354 int i, adc_irq;
355
356 /* remove all registered subdevs */
357 device_for_each_child(&spi->dev, NULL, pcap_remove_subdev);
358
359 /* cleanup ADC */
360 adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ?
361 PCAP_IRQ_ADCDONE2 : PCAP_IRQ_ADCDONE);
362 free_irq(adc_irq, pcap);
363 mutex_lock(&pcap->adc_mutex);
364 for (i = 0; i < PCAP_ADC_MAXQ; i++)
365 kfree(pcap->adc_queue[i]);
366 mutex_unlock(&pcap->adc_mutex);
367
368 /* cleanup irqchip */
369 for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++)
370 set_irq_chip_and_handler(i, NULL, NULL);
371
372 destroy_workqueue(pcap->workqueue);
373
374 kfree(pcap);
375
376 return 0;
377}
378
379static int __devinit ezx_pcap_probe(struct spi_device *spi)
380{
381 struct pcap_platform_data *pdata = spi->dev.platform_data;
382 struct pcap_chip *pcap;
383 int i, adc_irq;
384 int ret = -ENODEV;
385
386 /* platform data is required */
387 if (!pdata)
388 goto ret;
389
390 pcap = kzalloc(sizeof(*pcap), GFP_KERNEL);
391 if (!pcap) {
392 ret = -ENOMEM;
393 goto ret;
394 }
395
396 mutex_init(&pcap->io_mutex);
397 mutex_init(&pcap->adc_mutex);
398 INIT_WORK(&pcap->isr_work, pcap_isr_work);
399 INIT_WORK(&pcap->msr_work, pcap_msr_work);
400 dev_set_drvdata(&spi->dev, pcap);
401
402 /* setup spi */
403 spi->bits_per_word = 32;
404 spi->mode = SPI_MODE_0 | (pdata->config & PCAP_CS_AH ? SPI_CS_HIGH : 0);
405 ret = spi_setup(spi);
406 if (ret)
407 goto free_pcap;
408
409 pcap->spi = spi;
410
411 /* setup irq */
412 pcap->irq_base = pdata->irq_base;
413 pcap->workqueue = create_singlethread_workqueue("pcapd");
414 if (!pcap->workqueue) {
415 dev_err(&spi->dev, "cant create pcap thread\n");
416 goto free_pcap;
417 }
418
419 /* redirect interrupts to AP, except adcdone2 */
420 if (!(pdata->config & PCAP_SECOND_PORT))
421 ezx_pcap_write(pcap, PCAP_REG_INT_SEL,
422 (1 << PCAP_IRQ_ADCDONE2));
423
424 /* setup irq chip */
425 for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) {
426 set_irq_chip_and_handler(i, &pcap_irq_chip, handle_simple_irq);
427 set_irq_chip_data(i, pcap);
428#ifdef CONFIG_ARM
429 set_irq_flags(i, IRQF_VALID);
430#else
431 set_irq_noprobe(i);
432#endif
433 }
434
435 /* mask/ack all PCAP interrupts */
436 ezx_pcap_write(pcap, PCAP_REG_MSR, PCAP_MASK_ALL_INTERRUPT);
437 ezx_pcap_write(pcap, PCAP_REG_ISR, PCAP_CLEAR_INTERRUPT_REGISTER);
438 pcap->msr = PCAP_MASK_ALL_INTERRUPT;
439
440 set_irq_type(spi->irq, IRQ_TYPE_EDGE_RISING);
441 set_irq_data(spi->irq, pcap);
442 set_irq_chained_handler(spi->irq, pcap_irq_handler);
443 set_irq_wake(spi->irq, 1);
444
445 /* ADC */
446 adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ?
447 PCAP_IRQ_ADCDONE2 : PCAP_IRQ_ADCDONE);
448
449 ret = request_irq(adc_irq, pcap_adc_irq, 0, "ADC", pcap);
450 if (ret)
451 goto free_irqchip;
452
453 /* setup subdevs */
454 for (i = 0; i < pdata->num_subdevs; i++) {
455 ret = pcap_add_subdev(pcap, &pdata->subdevs[i]);
456 if (ret)
457 goto remove_subdevs;
458 }
459
460 /* board specific quirks */
461 if (pdata->init)
462 pdata->init(pcap);
463
464 return 0;
465
466remove_subdevs:
467 device_for_each_child(&spi->dev, NULL, pcap_remove_subdev);
468/* free_adc: */
469 free_irq(adc_irq, pcap);
470free_irqchip:
471 for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++)
472 set_irq_chip_and_handler(i, NULL, NULL);
473/* destroy_workqueue: */
474 destroy_workqueue(pcap->workqueue);
475free_pcap:
476 kfree(pcap);
477ret:
478 return ret;
479}
480
481static struct spi_driver ezxpcap_driver = {
482 .probe = ezx_pcap_probe,
483 .remove = __devexit_p(ezx_pcap_remove),
484 .driver = {
485 .name = "ezx-pcap",
486 .owner = THIS_MODULE,
487 },
488};
489
490static int __init ezx_pcap_init(void)
491{
492 return spi_register_driver(&ezxpcap_driver);
493}
494
495static void __exit ezx_pcap_exit(void)
496{
497 spi_unregister_driver(&ezxpcap_driver);
498}
499
500module_init(ezx_pcap_init);
501module_exit(ezx_pcap_exit);
502
503MODULE_LICENSE("GPL");
504MODULE_AUTHOR("Daniel Ribeiro / Harald Welte");
505MODULE_DESCRIPTION("Motorola PCAP2 ASIC Driver");
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 082c197ab9b8..8d3c38bf9714 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -705,5 +705,5 @@ MODULE_DESCRIPTION("I2C chip driver for NXP PCF50633 PMU");
705MODULE_AUTHOR("Harald Welte <laforge@openmoko.org>"); 705MODULE_AUTHOR("Harald Welte <laforge@openmoko.org>");
706MODULE_LICENSE("GPL"); 706MODULE_LICENSE("GPL");
707 707
708module_init(pcf50633_init); 708subsys_initcall(pcf50633_init);
709module_exit(pcf50633_exit); 709module_exit(pcf50633_exit);
diff --git a/drivers/mfd/pcf50633-gpio.c b/drivers/mfd/pcf50633-gpio.c
index 2fa2eca5c9cc..9ab19a8f669d 100644
--- a/drivers/mfd/pcf50633-gpio.c
+++ b/drivers/mfd/pcf50633-gpio.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/module.h>
18 19
19#include <linux/mfd/pcf50633/core.h> 20#include <linux/mfd/pcf50633/core.h>
20#include <linux/mfd/pcf50633/gpio.h> 21#include <linux/mfd/pcf50633/gpio.h>
@@ -116,3 +117,5 @@ int pcf50633_gpio_power_supply_set(struct pcf50633 *pcf,
116 return pcf50633_reg_set_bit_mask(pcf, reg, mask, val); 117 return pcf50633_reg_set_bit_mask(pcf, reg, mask, val);
117} 118}
118EXPORT_SYMBOL_GPL(pcf50633_gpio_power_supply_set); 119EXPORT_SYMBOL_GPL(pcf50633_gpio_power_supply_set);
120
121MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 875f7a875734..0a255c1f1ce7 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -108,7 +108,7 @@ static int t7l66xb_mmc_disable(struct platform_device *mmc)
108 108
109/*--------------------------------------------------------------------------*/ 109/*--------------------------------------------------------------------------*/
110 110
111static const struct tmio_mmc_data t7166xb_mmc_data = { 111static struct tmio_mmc_data t7166xb_mmc_data = {
112 .hclk = 24000000, 112 .hclk = 24000000,
113}; 113};
114 114
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
index c3993ac20542..3280ab33f88a 100644
--- a/drivers/mfd/tc6387xb.c
+++ b/drivers/mfd/tc6387xb.c
@@ -75,7 +75,7 @@ static int tc6387xb_mmc_disable(struct platform_device *mmc)
75 75
76/*--------------------------------------------------------------------------*/ 76/*--------------------------------------------------------------------------*/
77 77
78const static struct tmio_mmc_data tc6387xb_mmc_data = { 78static struct tmio_mmc_data tc6387xb_mmc_data = {
79 .hclk = 24000000, 79 .hclk = 24000000,
80}; 80};
81 81
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 9d2abb5d6e2c..1429a7341a9a 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -136,7 +136,7 @@ static int tc6393xb_nand_enable(struct platform_device *nand)
136 return 0; 136 return 0;
137} 137}
138 138
139const static struct tmio_mmc_data tc6393xb_mmc_data = { 139static struct tmio_mmc_data tc6393xb_mmc_data = {
140 .hclk = 24000000, 140 .hclk = 24000000,
141}; 141};
142 142
diff --git a/drivers/mfd/twl4030-core.c b/drivers/mfd/twl4030-core.c
index ec90e953adce..cd1008c19cd7 100644
--- a/drivers/mfd/twl4030-core.c
+++ b/drivers/mfd/twl4030-core.c
@@ -647,7 +647,7 @@ static inline int __init unprotect_pm_master(void)
647 return e; 647 return e;
648} 648}
649 649
650static void __init clocks_init(struct device *dev) 650static void clocks_init(struct device *dev)
651{ 651{
652 int e = 0; 652 int e = 0;
653 struct clk *osc; 653 struct clk *osc;
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index aca2670afd78..bae61b22501c 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -255,7 +255,7 @@ static int twl4030_irq_thread(void *data)
255 * thread. All we do here is acknowledge and mask the interrupt and wakeup 255 * thread. All we do here is acknowledge and mask the interrupt and wakeup
256 * the kernel thread. 256 * the kernel thread.
257 */ 257 */
258static void handle_twl4030_pih(unsigned int irq, irq_desc_t *desc) 258static void handle_twl4030_pih(unsigned int irq, struct irq_desc *desc)
259{ 259{
260 /* Acknowledge, clear *AND* mask the interrupt... */ 260 /* Acknowledge, clear *AND* mask the interrupt... */
261 desc->chip->ack(irq); 261 desc->chip->ack(irq);
diff --git a/drivers/mfd/wm8350-regmap.c b/drivers/mfd/wm8350-regmap.c
index 9a4cc954cb7c..7ccc1eab98ab 100644
--- a/drivers/mfd/wm8350-regmap.c
+++ b/drivers/mfd/wm8350-regmap.c
@@ -3186,7 +3186,7 @@ const struct wm8350_reg_access wm8350_reg_io_map[] = {
3186 /* read write volatile */ 3186 /* read write volatile */
3187 { 0xFFFF, 0xFFFF, 0xFFFF }, /* R0 - Reset/ID */ 3187 { 0xFFFF, 0xFFFF, 0xFFFF }, /* R0 - Reset/ID */
3188 { 0x7CFF, 0x0C00, 0x7FFF }, /* R1 - ID */ 3188 { 0x7CFF, 0x0C00, 0x7FFF }, /* R1 - ID */
3189 { 0x0000, 0x0000, 0x0000 }, /* R2 */ 3189 { 0x007F, 0x0000, 0x0000 }, /* R2 - ROM Mask ID */
3190 { 0xBE3B, 0xBE3B, 0x8000 }, /* R3 - System Control 1 */ 3190 { 0xBE3B, 0xBE3B, 0x8000 }, /* R3 - System Control 1 */
3191 { 0xFEF7, 0xFEF7, 0xF800 }, /* R4 - System Control 2 */ 3191 { 0xFEF7, 0xFEF7, 0xF800 }, /* R4 - System Control 2 */
3192 { 0x80FF, 0x80FF, 0x8000 }, /* R5 - System Hibernate */ 3192 { 0x80FF, 0x80FF, 0x8000 }, /* R5 - System Hibernate */
@@ -3411,7 +3411,7 @@ const struct wm8350_reg_access wm8350_reg_io_map[] = {
3411 { 0x0000, 0x0000, 0x0000 }, /* R224 */ 3411 { 0x0000, 0x0000, 0x0000 }, /* R224 */
3412 { 0x8F3F, 0x0000, 0xFFFF }, /* R225 - DCDC/LDO status */ 3412 { 0x8F3F, 0x0000, 0xFFFF }, /* R225 - DCDC/LDO status */
3413 { 0x0000, 0x0000, 0xFFFF }, /* R226 - Charger status */ 3413 { 0x0000, 0x0000, 0xFFFF }, /* R226 - Charger status */
3414 { 0x0000, 0x0000, 0xFFFF }, /* R227 */ 3414 { 0x34FE, 0x0000, 0xFFFF }, /* R227 */
3415 { 0x0000, 0x0000, 0x0000 }, /* R228 */ 3415 { 0x0000, 0x0000, 0x0000 }, /* R228 */
3416 { 0x0000, 0x0000, 0x0000 }, /* R229 */ 3416 { 0x0000, 0x0000, 0x0000 }, /* R229 */
3417 { 0xFFFF, 0x1FFF, 0xFFFF }, /* R230 - GPIO Pin Status */ 3417 { 0xFFFF, 0x1FFF, 0xFFFF }, /* R230 - GPIO Pin Status */
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 7c21bf791569..ecfc8bbe89b9 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -460,7 +460,7 @@ static int __init wm8400_module_init(void)
460 460
461 return ret; 461 return ret;
462} 462}
463module_init(wm8400_module_init); 463subsys_initcall(wm8400_module_init);
464 464
465static void __exit wm8400_module_exit(void) 465static void __exit wm8400_module_exit(void)
466{ 466{
diff --git a/drivers/misc/sgi-gru/Makefile b/drivers/misc/sgi-gru/Makefile
index bcd8136d2f98..7c4c306dfa8a 100644
--- a/drivers/misc/sgi-gru/Makefile
+++ b/drivers/misc/sgi-gru/Makefile
@@ -3,5 +3,5 @@ ifdef CONFIG_SGI_GRU_DEBUG
3endif 3endif
4 4
5obj-$(CONFIG_SGI_GRU) := gru.o 5obj-$(CONFIG_SGI_GRU) := gru.o
6gru-y := grufile.o grumain.o grufault.o grutlbpurge.o gruprocfs.o grukservices.o gruhandles.o 6gru-y := grufile.o grumain.o grufault.o grutlbpurge.o gruprocfs.o grukservices.o gruhandles.o grukdump.o
7 7
diff --git a/drivers/misc/sgi-gru/gru_instructions.h b/drivers/misc/sgi-gru/gru_instructions.h
index 3fde33c1e8f3..3c9c06618e6a 100644
--- a/drivers/misc/sgi-gru/gru_instructions.h
+++ b/drivers/misc/sgi-gru/gru_instructions.h
@@ -81,6 +81,8 @@ struct control_block_extended_exc_detail {
81 int exopc; 81 int exopc;
82 long exceptdet0; 82 long exceptdet0;
83 int exceptdet1; 83 int exceptdet1;
84 int cbrstate;
85 int cbrexecstatus;
84}; 86};
85 87
86/* 88/*
@@ -107,7 +109,8 @@ struct gru_instruction_bits {
107 unsigned char reserved2: 2; 109 unsigned char reserved2: 2;
108 unsigned char istatus: 2; 110 unsigned char istatus: 2;
109 unsigned char isubstatus:4; 111 unsigned char isubstatus:4;
110 unsigned char reserved3: 2; 112 unsigned char reserved3: 1;
113 unsigned char tlb_fault_color: 1;
111 /* DW 1 */ 114 /* DW 1 */
112 unsigned long idef4; /* 42 bits: TRi1, BufSize */ 115 unsigned long idef4; /* 42 bits: TRi1, BufSize */
113 /* DW 2-6 */ 116 /* DW 2-6 */
@@ -250,17 +253,37 @@ struct gru_instruction {
250#define CBE_CAUSE_HA_RESPONSE_FATAL (1 << 13) 253#define CBE_CAUSE_HA_RESPONSE_FATAL (1 << 13)
251#define CBE_CAUSE_HA_RESPONSE_NON_FATAL (1 << 14) 254#define CBE_CAUSE_HA_RESPONSE_NON_FATAL (1 << 14)
252#define CBE_CAUSE_ADDRESS_SPACE_DECODE_ERROR (1 << 15) 255#define CBE_CAUSE_ADDRESS_SPACE_DECODE_ERROR (1 << 15)
253#define CBE_CAUSE_RESPONSE_DATA_ERROR (1 << 16) 256#define CBE_CAUSE_PROTOCOL_STATE_DATA_ERROR (1 << 16)
254#define CBE_CAUSE_PROTOCOL_STATE_DATA_ERROR (1 << 17) 257#define CBE_CAUSE_RA_RESPONSE_DATA_ERROR (1 << 17)
258#define CBE_CAUSE_HA_RESPONSE_DATA_ERROR (1 << 18)
259
260/* CBE cbrexecstatus bits */
261#define CBR_EXS_ABORT_OCC_BIT 0
262#define CBR_EXS_INT_OCC_BIT 1
263#define CBR_EXS_PENDING_BIT 2
264#define CBR_EXS_QUEUED_BIT 3
265#define CBR_EXS_TLB_INVAL_BIT 4
266#define CBR_EXS_EXCEPTION_BIT 5
267
268#define CBR_EXS_ABORT_OCC (1 << CBR_EXS_ABORT_OCC_BIT)
269#define CBR_EXS_INT_OCC (1 << CBR_EXS_INT_OCC_BIT)
270#define CBR_EXS_PENDING (1 << CBR_EXS_PENDING_BIT)
271#define CBR_EXS_QUEUED (1 << CBR_EXS_QUEUED_BIT)
272#define CBR_TLB_INVAL (1 << CBR_EXS_TLB_INVAL_BIT)
273#define CBR_EXS_EXCEPTION (1 << CBR_EXS_EXCEPTION_BIT)
255 274
256/* 275/*
257 * Exceptions are retried for the following cases. If any OTHER bits are set 276 * Exceptions are retried for the following cases. If any OTHER bits are set
258 * in ecause, the exception is not retryable. 277 * in ecause, the exception is not retryable.
259 */ 278 */
260#define EXCEPTION_RETRY_BITS (CBE_CAUSE_RESPONSE_DATA_ERROR | \ 279#define EXCEPTION_RETRY_BITS (CBE_CAUSE_EXECUTION_HW_ERROR | \
261 CBE_CAUSE_RA_REQUEST_TIMEOUT | \
262 CBE_CAUSE_TLBHW_ERROR | \ 280 CBE_CAUSE_TLBHW_ERROR | \
263 CBE_CAUSE_HA_REQUEST_TIMEOUT) 281 CBE_CAUSE_RA_REQUEST_TIMEOUT | \
282 CBE_CAUSE_RA_RESPONSE_NON_FATAL | \
283 CBE_CAUSE_HA_RESPONSE_NON_FATAL | \
284 CBE_CAUSE_RA_RESPONSE_DATA_ERROR | \
285 CBE_CAUSE_HA_RESPONSE_DATA_ERROR \
286 )
264 287
265/* Message queue head structure */ 288/* Message queue head structure */
266union gru_mesqhead { 289union gru_mesqhead {
@@ -600,9 +623,11 @@ static inline int gru_get_cb_substatus(void *cb)
600 return cbs->isubstatus; 623 return cbs->isubstatus;
601} 624}
602 625
603/* Check the status of a CB. If the CB is in UPM mode, call the 626/*
604 * OS to handle the UPM status. 627 * User interface to check an instruction status. UPM and exceptions
605 * Returns the CB status field value (0 for normal completion) 628 * are handled automatically. However, this function does NOT wait
629 * for an active instruction to complete.
630 *
606 */ 631 */
607static inline int gru_check_status(void *cb) 632static inline int gru_check_status(void *cb)
608{ 633{
@@ -610,34 +635,31 @@ static inline int gru_check_status(void *cb)
610 int ret; 635 int ret;
611 636
612 ret = cbs->istatus; 637 ret = cbs->istatus;
613 if (ret == CBS_CALL_OS) 638 if (ret != CBS_ACTIVE)
614 ret = gru_check_status_proc(cb); 639 ret = gru_check_status_proc(cb);
615 return ret; 640 return ret;
616} 641}
617 642
618/* Wait for CB to complete. 643/*
619 * Returns the CB status field value (0 for normal completion) 644 * User interface (via inline function) to wait for an instruction
645 * to complete. Completion status (IDLE or EXCEPTION is returned
646 * to the user. Exception due to hardware errors are automatically
647 * retried before returning an exception.
648 *
620 */ 649 */
621static inline int gru_wait(void *cb) 650static inline int gru_wait(void *cb)
622{ 651{
623 struct gru_control_block_status *cbs = (void *)cb; 652 return gru_wait_proc(cb);
624 int ret = cbs->istatus;
625
626 if (ret != CBS_IDLE)
627 ret = gru_wait_proc(cb);
628 return ret;
629} 653}
630 654
631/* Wait for CB to complete. Aborts program if error. (Note: error does NOT 655/*
656 * Wait for CB to complete. Aborts program if error. (Note: error does NOT
632 * mean TLB mis - only fatal errors such as memory parity error or user 657 * mean TLB mis - only fatal errors such as memory parity error or user
633 * bugs will cause termination. 658 * bugs will cause termination.
634 */ 659 */
635static inline void gru_wait_abort(void *cb) 660static inline void gru_wait_abort(void *cb)
636{ 661{
637 struct gru_control_block_status *cbs = (void *)cb; 662 gru_wait_abort_proc(cb);
638
639 if (cbs->istatus != CBS_IDLE)
640 gru_wait_abort_proc(cb);
641} 663}
642 664
643 665
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index ab118558552e..679e01778286 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -166,7 +166,8 @@ static inline struct gru_state *irq_to_gru(int irq)
166 * the GRU, atomic operations must be used to clear bits. 166 * the GRU, atomic operations must be used to clear bits.
167 */ 167 */
168static void get_clear_fault_map(struct gru_state *gru, 168static void get_clear_fault_map(struct gru_state *gru,
169 struct gru_tlb_fault_map *map) 169 struct gru_tlb_fault_map *imap,
170 struct gru_tlb_fault_map *dmap)
170{ 171{
171 unsigned long i, k; 172 unsigned long i, k;
172 struct gru_tlb_fault_map *tfm; 173 struct gru_tlb_fault_map *tfm;
@@ -177,7 +178,11 @@ static void get_clear_fault_map(struct gru_state *gru,
177 k = tfm->fault_bits[i]; 178 k = tfm->fault_bits[i];
178 if (k) 179 if (k)
179 k = xchg(&tfm->fault_bits[i], 0UL); 180 k = xchg(&tfm->fault_bits[i], 0UL);
180 map->fault_bits[i] = k; 181 imap->fault_bits[i] = k;
182 k = tfm->done_bits[i];
183 if (k)
184 k = xchg(&tfm->done_bits[i], 0UL);
185 dmap->fault_bits[i] = k;
181 } 186 }
182 187
183 /* 188 /*
@@ -334,6 +339,12 @@ static int gru_try_dropin(struct gru_thread_state *gts,
334 * Might be a hardware race OR a stupid user. Ignore FMM because FMM 339 * Might be a hardware race OR a stupid user. Ignore FMM because FMM
335 * is a transient state. 340 * is a transient state.
336 */ 341 */
342 if (tfh->status != TFHSTATUS_EXCEPTION) {
343 gru_flush_cache(tfh);
344 if (tfh->status != TFHSTATUS_EXCEPTION)
345 goto failnoexception;
346 STAT(tfh_stale_on_fault);
347 }
337 if (tfh->state == TFHSTATE_IDLE) 348 if (tfh->state == TFHSTATE_IDLE)
338 goto failidle; 349 goto failidle;
339 if (tfh->state == TFHSTATE_MISS_FMM && cb) 350 if (tfh->state == TFHSTATE_MISS_FMM && cb)
@@ -401,8 +412,17 @@ failfmm:
401 gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state); 412 gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
402 return 0; 413 return 0;
403 414
415failnoexception:
416 /* TFH status did not show exception pending */
417 gru_flush_cache(tfh);
418 if (cb)
419 gru_flush_cache(cb);
420 STAT(tlb_dropin_fail_no_exception);
421 gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n", tfh, tfh->status, tfh->state);
422 return 0;
423
404failidle: 424failidle:
405 /* TFH was idle - no miss pending */ 425 /* TFH state was idle - no miss pending */
406 gru_flush_cache(tfh); 426 gru_flush_cache(tfh);
407 if (cb) 427 if (cb)
408 gru_flush_cache(cb); 428 gru_flush_cache(cb);
@@ -438,7 +458,7 @@ failactive:
438irqreturn_t gru_intr(int irq, void *dev_id) 458irqreturn_t gru_intr(int irq, void *dev_id)
439{ 459{
440 struct gru_state *gru; 460 struct gru_state *gru;
441 struct gru_tlb_fault_map map; 461 struct gru_tlb_fault_map imap, dmap;
442 struct gru_thread_state *gts; 462 struct gru_thread_state *gts;
443 struct gru_tlb_fault_handle *tfh = NULL; 463 struct gru_tlb_fault_handle *tfh = NULL;
444 int cbrnum, ctxnum; 464 int cbrnum, ctxnum;
@@ -451,11 +471,15 @@ irqreturn_t gru_intr(int irq, void *dev_id)
451 raw_smp_processor_id(), irq); 471 raw_smp_processor_id(), irq);
452 return IRQ_NONE; 472 return IRQ_NONE;
453 } 473 }
454 get_clear_fault_map(gru, &map); 474 get_clear_fault_map(gru, &imap, &dmap);
455 gru_dbg(grudev, "irq %d, gru %x, map 0x%lx\n", irq, gru->gs_gid, 475
456 map.fault_bits[0]); 476 for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
477 complete(gru->gs_blade->bs_async_wq);
478 gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n",
479 gru->gs_gid, cbrnum, gru->gs_blade->bs_async_wq->done);
480 }
457 481
458 for_each_cbr_in_tfm(cbrnum, map.fault_bits) { 482 for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
459 tfh = get_tfh_by_index(gru, cbrnum); 483 tfh = get_tfh_by_index(gru, cbrnum);
460 prefetchw(tfh); /* Helps on hdw, required for emulator */ 484 prefetchw(tfh); /* Helps on hdw, required for emulator */
461 485
@@ -472,7 +496,9 @@ irqreturn_t gru_intr(int irq, void *dev_id)
472 * This is running in interrupt context. Trylock the mmap_sem. 496 * This is running in interrupt context. Trylock the mmap_sem.
473 * If it fails, retry the fault in user context. 497 * If it fails, retry the fault in user context.
474 */ 498 */
475 if (down_read_trylock(&gts->ts_mm->mmap_sem)) { 499 if (!gts->ts_force_cch_reload &&
500 down_read_trylock(&gts->ts_mm->mmap_sem)) {
501 gts->ustats.fmm_tlbdropin++;
476 gru_try_dropin(gts, tfh, NULL); 502 gru_try_dropin(gts, tfh, NULL);
477 up_read(&gts->ts_mm->mmap_sem); 503 up_read(&gts->ts_mm->mmap_sem);
478 } else { 504 } else {
@@ -491,6 +517,7 @@ static int gru_user_dropin(struct gru_thread_state *gts,
491 struct gru_mm_struct *gms = gts->ts_gms; 517 struct gru_mm_struct *gms = gts->ts_gms;
492 int ret; 518 int ret;
493 519
520 gts->ustats.upm_tlbdropin++;
494 while (1) { 521 while (1) {
495 wait_event(gms->ms_wait_queue, 522 wait_event(gms->ms_wait_queue,
496 atomic_read(&gms->ms_range_active) == 0); 523 atomic_read(&gms->ms_range_active) == 0);
@@ -546,8 +573,8 @@ int gru_handle_user_call_os(unsigned long cb)
546 * CCH may contain stale data if ts_force_cch_reload is set. 573 * CCH may contain stale data if ts_force_cch_reload is set.
547 */ 574 */
548 if (gts->ts_gru && gts->ts_force_cch_reload) { 575 if (gts->ts_gru && gts->ts_force_cch_reload) {
549 gru_update_cch(gts, 0);
550 gts->ts_force_cch_reload = 0; 576 gts->ts_force_cch_reload = 0;
577 gru_update_cch(gts, 0);
551 } 578 }
552 579
553 ret = -EAGAIN; 580 ret = -EAGAIN;
@@ -589,20 +616,26 @@ int gru_get_exception_detail(unsigned long arg)
589 } else if (gts->ts_gru) { 616 } else if (gts->ts_gru) {
590 cbrnum = thread_cbr_number(gts, ucbnum); 617 cbrnum = thread_cbr_number(gts, ucbnum);
591 cbe = get_cbe_by_index(gts->ts_gru, cbrnum); 618 cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
592 prefetchw(cbe);/* Harmless on hardware, required for emulator */ 619 gru_flush_cache(cbe); /* CBE not coherent */
593 excdet.opc = cbe->opccpy; 620 excdet.opc = cbe->opccpy;
594 excdet.exopc = cbe->exopccpy; 621 excdet.exopc = cbe->exopccpy;
595 excdet.ecause = cbe->ecause; 622 excdet.ecause = cbe->ecause;
596 excdet.exceptdet0 = cbe->idef1upd; 623 excdet.exceptdet0 = cbe->idef1upd;
597 excdet.exceptdet1 = cbe->idef3upd; 624 excdet.exceptdet1 = cbe->idef3upd;
625 excdet.cbrstate = cbe->cbrstate;
626 excdet.cbrexecstatus = cbe->cbrexecstatus;
627 gru_flush_cache(cbe);
598 ret = 0; 628 ret = 0;
599 } else { 629 } else {
600 ret = -EAGAIN; 630 ret = -EAGAIN;
601 } 631 }
602 gru_unlock_gts(gts); 632 gru_unlock_gts(gts);
603 633
604 gru_dbg(grudev, "address 0x%lx, ecause 0x%x\n", excdet.cb, 634 gru_dbg(grudev,
605 excdet.ecause); 635 "cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, "
636 "exdet0 0x%lx, exdet1 0x%x\n",
637 excdet.cb, excdet.opc, excdet.exopc, excdet.cbrstate, excdet.cbrexecstatus,
638 excdet.ecause, excdet.exceptdet0, excdet.exceptdet1);
606 if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet))) 639 if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet)))
607 ret = -EFAULT; 640 ret = -EFAULT;
608 return ret; 641 return ret;
@@ -627,7 +660,7 @@ static int gru_unload_all_contexts(void)
627 if (gts && mutex_trylock(&gts->ts_ctxlock)) { 660 if (gts && mutex_trylock(&gts->ts_ctxlock)) {
628 spin_unlock(&gru->gs_lock); 661 spin_unlock(&gru->gs_lock);
629 gru_unload_context(gts, 1); 662 gru_unload_context(gts, 1);
630 gru_unlock_gts(gts); 663 mutex_unlock(&gts->ts_ctxlock);
631 spin_lock(&gru->gs_lock); 664 spin_lock(&gru->gs_lock);
632 } 665 }
633 } 666 }
@@ -669,6 +702,7 @@ int gru_user_flush_tlb(unsigned long arg)
669{ 702{
670 struct gru_thread_state *gts; 703 struct gru_thread_state *gts;
671 struct gru_flush_tlb_req req; 704 struct gru_flush_tlb_req req;
705 struct gru_mm_struct *gms;
672 706
673 STAT(user_flush_tlb); 707 STAT(user_flush_tlb);
674 if (copy_from_user(&req, (void __user *)arg, sizeof(req))) 708 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
@@ -681,8 +715,34 @@ int gru_user_flush_tlb(unsigned long arg)
681 if (!gts) 715 if (!gts)
682 return -EINVAL; 716 return -EINVAL;
683 717
684 gru_flush_tlb_range(gts->ts_gms, req.vaddr, req.len); 718 gms = gts->ts_gms;
685 gru_unlock_gts(gts); 719 gru_unlock_gts(gts);
720 gru_flush_tlb_range(gms, req.vaddr, req.len);
721
722 return 0;
723}
724
725/*
726 * Fetch GSEG statisticss
727 */
728long gru_get_gseg_statistics(unsigned long arg)
729{
730 struct gru_thread_state *gts;
731 struct gru_get_gseg_statistics_req req;
732
733 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
734 return -EFAULT;
735
736 gts = gru_find_lock_gts(req.gseg);
737 if (gts) {
738 memcpy(&req.stats, &gts->ustats, sizeof(gts->ustats));
739 gru_unlock_gts(gts);
740 } else {
741 memset(&req.stats, 0, sizeof(gts->ustats));
742 }
743
744 if (copy_to_user((void __user *)arg, &req, sizeof(req)))
745 return -EFAULT;
686 746
687 return 0; 747 return 0;
688} 748}
@@ -691,18 +751,34 @@ int gru_user_flush_tlb(unsigned long arg)
691 * Register the current task as the user of the GSEG slice. 751 * Register the current task as the user of the GSEG slice.
692 * Needed for TLB fault interrupt targeting. 752 * Needed for TLB fault interrupt targeting.
693 */ 753 */
694int gru_set_task_slice(long address) 754int gru_set_context_option(unsigned long arg)
695{ 755{
696 struct gru_thread_state *gts; 756 struct gru_thread_state *gts;
757 struct gru_set_context_option_req req;
758 int ret = 0;
759
760 STAT(set_context_option);
761 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
762 return -EFAULT;
763 gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1);
697 764
698 STAT(set_task_slice); 765 gts = gru_alloc_locked_gts(req.gseg);
699 gru_dbg(grudev, "address 0x%lx\n", address);
700 gts = gru_alloc_locked_gts(address);
701 if (!gts) 766 if (!gts)
702 return -EINVAL; 767 return -EINVAL;
703 768
704 gts->ts_tgid_owner = current->tgid; 769 switch (req.op) {
770 case sco_gseg_owner:
771 /* Register the current task as the GSEG owner */
772 gts->ts_tgid_owner = current->tgid;
773 break;
774 case sco_cch_req_slice:
775 /* Set the CCH slice option */
776 gts->ts_cch_req_slice = req.val1 & 3;
777 break;
778 default:
779 ret = -EINVAL;
780 }
705 gru_unlock_gts(gts); 781 gru_unlock_gts(gts);
706 782
707 return 0; 783 return ret;
708} 784}
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index 3ce2920e2bf3..fa2d93a9fb8d 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -46,6 +46,7 @@
46 46
47struct gru_blade_state *gru_base[GRU_MAX_BLADES] __read_mostly; 47struct gru_blade_state *gru_base[GRU_MAX_BLADES] __read_mostly;
48unsigned long gru_start_paddr __read_mostly; 48unsigned long gru_start_paddr __read_mostly;
49void *gru_start_vaddr __read_mostly;
49unsigned long gru_end_paddr __read_mostly; 50unsigned long gru_end_paddr __read_mostly;
50unsigned int gru_max_gids __read_mostly; 51unsigned int gru_max_gids __read_mostly;
51struct gru_stats_s gru_stats; 52struct gru_stats_s gru_stats;
@@ -135,11 +136,9 @@ static int gru_create_new_context(unsigned long arg)
135 if (copy_from_user(&req, (void __user *)arg, sizeof(req))) 136 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
136 return -EFAULT; 137 return -EFAULT;
137 138
138 if (req.data_segment_bytes == 0 || 139 if (req.data_segment_bytes > max_user_dsr_bytes)
139 req.data_segment_bytes > max_user_dsr_bytes)
140 return -EINVAL; 140 return -EINVAL;
141 if (!req.control_blocks || !req.maximum_thread_count || 141 if (req.control_blocks > max_user_cbrs || !req.maximum_thread_count)
142 req.control_blocks > max_user_cbrs)
143 return -EINVAL; 142 return -EINVAL;
144 143
145 if (!(req.options & GRU_OPT_MISS_MASK)) 144 if (!(req.options & GRU_OPT_MISS_MASK))
@@ -184,41 +183,6 @@ static long gru_get_config_info(unsigned long arg)
184} 183}
185 184
186/* 185/*
187 * Get GRU chiplet status
188 */
189static long gru_get_chiplet_status(unsigned long arg)
190{
191 struct gru_state *gru;
192 struct gru_chiplet_info info;
193
194 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
195 return -EFAULT;
196
197 if (info.node == -1)
198 info.node = numa_node_id();
199 if (info.node >= num_possible_nodes() ||
200 info.chiplet >= GRU_CHIPLETS_PER_HUB ||
201 info.node < 0 || info.chiplet < 0)
202 return -EINVAL;
203
204 info.blade = uv_node_to_blade_id(info.node);
205 gru = get_gru(info.blade, info.chiplet);
206
207 info.total_dsr_bytes = GRU_NUM_DSR_BYTES;
208 info.total_cbr = GRU_NUM_CB;
209 info.total_user_dsr_bytes = GRU_NUM_DSR_BYTES -
210 gru->gs_reserved_dsr_bytes;
211 info.total_user_cbr = GRU_NUM_CB - gru->gs_reserved_cbrs;
212 info.free_user_dsr_bytes = hweight64(gru->gs_dsr_map) *
213 GRU_DSR_AU_BYTES;
214 info.free_user_cbr = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE;
215
216 if (copy_to_user((void __user *)arg, &info, sizeof(info)))
217 return -EFAULT;
218 return 0;
219}
220
221/*
222 * gru_file_unlocked_ioctl 186 * gru_file_unlocked_ioctl
223 * 187 *
224 * Called to update file attributes via IOCTL calls. 188 * Called to update file attributes via IOCTL calls.
@@ -234,8 +198,8 @@ static long gru_file_unlocked_ioctl(struct file *file, unsigned int req,
234 case GRU_CREATE_CONTEXT: 198 case GRU_CREATE_CONTEXT:
235 err = gru_create_new_context(arg); 199 err = gru_create_new_context(arg);
236 break; 200 break;
237 case GRU_SET_TASK_SLICE: 201 case GRU_SET_CONTEXT_OPTION:
238 err = gru_set_task_slice(arg); 202 err = gru_set_context_option(arg);
239 break; 203 break;
240 case GRU_USER_GET_EXCEPTION_DETAIL: 204 case GRU_USER_GET_EXCEPTION_DETAIL:
241 err = gru_get_exception_detail(arg); 205 err = gru_get_exception_detail(arg);
@@ -243,18 +207,24 @@ static long gru_file_unlocked_ioctl(struct file *file, unsigned int req,
243 case GRU_USER_UNLOAD_CONTEXT: 207 case GRU_USER_UNLOAD_CONTEXT:
244 err = gru_user_unload_context(arg); 208 err = gru_user_unload_context(arg);
245 break; 209 break;
246 case GRU_GET_CHIPLET_STATUS:
247 err = gru_get_chiplet_status(arg);
248 break;
249 case GRU_USER_FLUSH_TLB: 210 case GRU_USER_FLUSH_TLB:
250 err = gru_user_flush_tlb(arg); 211 err = gru_user_flush_tlb(arg);
251 break; 212 break;
252 case GRU_USER_CALL_OS: 213 case GRU_USER_CALL_OS:
253 err = gru_handle_user_call_os(arg); 214 err = gru_handle_user_call_os(arg);
254 break; 215 break;
216 case GRU_GET_GSEG_STATISTICS:
217 err = gru_get_gseg_statistics(arg);
218 break;
219 case GRU_KTEST:
220 err = gru_ktest(arg);
221 break;
255 case GRU_GET_CONFIG_INFO: 222 case GRU_GET_CONFIG_INFO:
256 err = gru_get_config_info(arg); 223 err = gru_get_config_info(arg);
257 break; 224 break;
225 case GRU_DUMP_CHIPLET_STATE:
226 err = gru_dump_chiplet_request(arg);
227 break;
258 } 228 }
259 return err; 229 return err;
260} 230}
@@ -282,7 +252,6 @@ static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr,
282 gru_dbg(grudev, "bid %d, nid %d, gid %d, vaddr %p (0x%lx)\n", 252 gru_dbg(grudev, "bid %d, nid %d, gid %d, vaddr %p (0x%lx)\n",
283 bid, nid, gru->gs_gid, gru->gs_gru_base_vaddr, 253 bid, nid, gru->gs_gid, gru->gs_gru_base_vaddr,
284 gru->gs_gru_base_paddr); 254 gru->gs_gru_base_paddr);
285 gru_kservices_init(gru);
286} 255}
287 256
288static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr) 257static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
@@ -309,6 +278,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
309 memset(gru_base[bid], 0, sizeof(struct gru_blade_state)); 278 memset(gru_base[bid], 0, sizeof(struct gru_blade_state));
310 gru_base[bid]->bs_lru_gru = &gru_base[bid]->bs_grus[0]; 279 gru_base[bid]->bs_lru_gru = &gru_base[bid]->bs_grus[0];
311 spin_lock_init(&gru_base[bid]->bs_lock); 280 spin_lock_init(&gru_base[bid]->bs_lock);
281 init_rwsem(&gru_base[bid]->bs_kgts_sema);
312 282
313 dsrbytes = 0; 283 dsrbytes = 0;
314 cbrs = 0; 284 cbrs = 0;
@@ -372,7 +342,6 @@ static int __init gru_init(void)
372{ 342{
373 int ret, irq, chip; 343 int ret, irq, chip;
374 char id[10]; 344 char id[10];
375 void *gru_start_vaddr;
376 345
377 if (!is_uv_system()) 346 if (!is_uv_system())
378 return 0; 347 return 0;
@@ -422,6 +391,7 @@ static int __init gru_init(void)
422 printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR); 391 printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR);
423 goto exit3; 392 goto exit3;
424 } 393 }
394 gru_kservices_init();
425 395
426 printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR, 396 printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR,
427 GRU_DRIVER_VERSION_STR); 397 GRU_DRIVER_VERSION_STR);
@@ -440,7 +410,7 @@ exit1:
440 410
441static void __exit gru_exit(void) 411static void __exit gru_exit(void)
442{ 412{
443 int i, bid, gid; 413 int i, bid;
444 int order = get_order(sizeof(struct gru_state) * 414 int order = get_order(sizeof(struct gru_state) *
445 GRU_CHIPLETS_PER_BLADE); 415 GRU_CHIPLETS_PER_BLADE);
446 416
@@ -449,10 +419,7 @@ static void __exit gru_exit(void)
449 419
450 for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++) 420 for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++)
451 free_irq(IRQ_GRU + i, NULL); 421 free_irq(IRQ_GRU + i, NULL);
452 422 gru_kservices_exit();
453 foreach_gid(gid)
454 gru_kservices_exit(GID_TO_GRU(gid));
455
456 for (bid = 0; bid < GRU_MAX_BLADES; bid++) 423 for (bid = 0; bid < GRU_MAX_BLADES; bid++)
457 free_pages((unsigned long)gru_base[bid], order); 424 free_pages((unsigned long)gru_base[bid], order);
458 425
diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
index 9b7ccb328697..37e7cfc53b9c 100644
--- a/drivers/misc/sgi-gru/gruhandles.c
+++ b/drivers/misc/sgi-gru/gruhandles.c
@@ -57,7 +57,7 @@ static void start_instruction(void *h)
57static int wait_instruction_complete(void *h, enum mcs_op opc) 57static int wait_instruction_complete(void *h, enum mcs_op opc)
58{ 58{
59 int status; 59 int status;
60 cycles_t start_time = get_cycles(); 60 unsigned long start_time = get_cycles();
61 61
62 while (1) { 62 while (1) {
63 cpu_relax(); 63 cpu_relax();
@@ -65,25 +65,16 @@ static int wait_instruction_complete(void *h, enum mcs_op opc)
65 if (status != CCHSTATUS_ACTIVE) 65 if (status != CCHSTATUS_ACTIVE)
66 break; 66 break;
67 if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time)) 67 if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time))
68 panic("GRU %p is malfunctioning\n", h); 68 panic("GRU %p is malfunctioning: start %ld, end %ld\n",
69 h, start_time, (unsigned long)get_cycles());
69 } 70 }
70 if (gru_options & OPT_STATS) 71 if (gru_options & OPT_STATS)
71 update_mcs_stats(opc, get_cycles() - start_time); 72 update_mcs_stats(opc, get_cycles() - start_time);
72 return status; 73 return status;
73} 74}
74 75
75int cch_allocate(struct gru_context_configuration_handle *cch, 76int cch_allocate(struct gru_context_configuration_handle *cch)
76 int asidval, int sizeavail, unsigned long cbrmap,
77 unsigned long dsrmap)
78{ 77{
79 int i;
80
81 for (i = 0; i < 8; i++) {
82 cch->asid[i] = (asidval++);
83 cch->sizeavail[i] = sizeavail;
84 }
85 cch->dsr_allocation_map = dsrmap;
86 cch->cbr_allocation_map = cbrmap;
87 cch->opc = CCHOP_ALLOCATE; 78 cch->opc = CCHOP_ALLOCATE;
88 start_instruction(cch); 79 start_instruction(cch);
89 return wait_instruction_complete(cch, cchop_allocate); 80 return wait_instruction_complete(cch, cchop_allocate);
diff --git a/drivers/misc/sgi-gru/gruhandles.h b/drivers/misc/sgi-gru/gruhandles.h
index 1ed74d7508c8..f44112242d00 100644
--- a/drivers/misc/sgi-gru/gruhandles.h
+++ b/drivers/misc/sgi-gru/gruhandles.h
@@ -39,7 +39,6 @@
39#define GRU_NUM_CBE 128 39#define GRU_NUM_CBE 128
40#define GRU_NUM_TFH 128 40#define GRU_NUM_TFH 128
41#define GRU_NUM_CCH 16 41#define GRU_NUM_CCH 16
42#define GRU_NUM_GSH 1
43 42
44/* Maximum resource counts that can be reserved by user programs */ 43/* Maximum resource counts that can be reserved by user programs */
45#define GRU_NUM_USER_CBR GRU_NUM_CBE 44#define GRU_NUM_USER_CBR GRU_NUM_CBE
@@ -56,7 +55,6 @@
56#define GRU_CBE_BASE (GRU_MCS_BASE + 0x10000) 55#define GRU_CBE_BASE (GRU_MCS_BASE + 0x10000)
57#define GRU_TFH_BASE (GRU_MCS_BASE + 0x18000) 56#define GRU_TFH_BASE (GRU_MCS_BASE + 0x18000)
58#define GRU_CCH_BASE (GRU_MCS_BASE + 0x20000) 57#define GRU_CCH_BASE (GRU_MCS_BASE + 0x20000)
59#define GRU_GSH_BASE (GRU_MCS_BASE + 0x30000)
60 58
61/* User gseg constants */ 59/* User gseg constants */
62#define GRU_GSEG_STRIDE (4 * 1024 * 1024) 60#define GRU_GSEG_STRIDE (4 * 1024 * 1024)
@@ -251,15 +249,15 @@ struct gru_tlb_fault_handle {
251 unsigned int fill1:9; 249 unsigned int fill1:9;
252 250
253 unsigned int status:2; 251 unsigned int status:2;
254 unsigned int fill2:1; 252 unsigned int fill2:2;
255 unsigned int color:1;
256 unsigned int state:3; 253 unsigned int state:3;
257 unsigned int fill3:1; 254 unsigned int fill3:1;
258 255
259 unsigned int cause:7; /* DW 0 - high 32 */ 256 unsigned int cause:6;
257 unsigned int cb_int:1;
260 unsigned int fill4:1; 258 unsigned int fill4:1;
261 259
262 unsigned int indexway:12; 260 unsigned int indexway:12; /* DW 0 - high 32 */
263 unsigned int fill5:4; 261 unsigned int fill5:4;
264 262
265 unsigned int ctxnum:4; 263 unsigned int ctxnum:4;
@@ -457,21 +455,7 @@ enum gru_cbr_state {
457 CBRSTATE_BUSY_INTERRUPT, 455 CBRSTATE_BUSY_INTERRUPT,
458}; 456};
459 457
460/* CBE cbrexecstatus bits */ 458/* CBE cbrexecstatus bits - defined in gru_instructions.h*/
461#define CBR_EXS_ABORT_OCC_BIT 0
462#define CBR_EXS_INT_OCC_BIT 1
463#define CBR_EXS_PENDING_BIT 2
464#define CBR_EXS_QUEUED_BIT 3
465#define CBR_EXS_TLBHW_BIT 4
466#define CBR_EXS_EXCEPTION_BIT 5
467
468#define CBR_EXS_ABORT_OCC (1 << CBR_EXS_ABORT_OCC_BIT)
469#define CBR_EXS_INT_OCC (1 << CBR_EXS_INT_OCC_BIT)
470#define CBR_EXS_PENDING (1 << CBR_EXS_PENDING_BIT)
471#define CBR_EXS_QUEUED (1 << CBR_EXS_QUEUED_BIT)
472#define CBR_EXS_TLBHW (1 << CBR_EXS_TLBHW_BIT)
473#define CBR_EXS_EXCEPTION (1 << CBR_EXS_EXCEPTION_BIT)
474
475/* CBE ecause bits - defined in gru_instructions.h */ 459/* CBE ecause bits - defined in gru_instructions.h */
476 460
477/* 461/*
@@ -495,9 +479,7 @@ enum gru_cbr_state {
495/* minimum TLB purge count to ensure a full purge */ 479/* minimum TLB purge count to ensure a full purge */
496#define GRUMAXINVAL 1024UL 480#define GRUMAXINVAL 1024UL
497 481
498int cch_allocate(struct gru_context_configuration_handle *cch, 482int cch_allocate(struct gru_context_configuration_handle *cch);
499 int asidval, int sizeavail, unsigned long cbrmap, unsigned long dsrmap);
500
501int cch_start(struct gru_context_configuration_handle *cch); 483int cch_start(struct gru_context_configuration_handle *cch);
502int cch_interrupt(struct gru_context_configuration_handle *cch); 484int cch_interrupt(struct gru_context_configuration_handle *cch);
503int cch_deallocate(struct gru_context_configuration_handle *cch); 485int cch_deallocate(struct gru_context_configuration_handle *cch);
diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c
new file mode 100644
index 000000000000..55eabfa85585
--- /dev/null
+++ b/drivers/misc/sgi-gru/grukdump.c
@@ -0,0 +1,232 @@
1/*
2 * SN Platform GRU Driver
3 *
4 * Dump GRU State
5 *
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/kernel.h>
24#include <linux/mm.h>
25#include <linux/spinlock.h>
26#include <linux/uaccess.h>
27#include <linux/delay.h>
28#include <linux/bitops.h>
29#include <asm/uv/uv_hub.h>
30#include "gru.h"
31#include "grutables.h"
32#include "gruhandles.h"
33#include "grulib.h"
34
35#define CCH_LOCK_ATTEMPTS 10
36
37static int gru_user_copy_handle(void __user **dp, void *s)
38{
39 if (copy_to_user(*dp, s, GRU_HANDLE_BYTES))
40 return -1;
41 *dp += GRU_HANDLE_BYTES;
42 return 0;
43}
44
45static int gru_dump_context_data(void *grubase,
46 struct gru_context_configuration_handle *cch,
47 void __user *ubuf, int ctxnum, int dsrcnt)
48{
49 void *cb, *cbe, *tfh, *gseg;
50 int i, scr;
51
52 gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
53 cb = gseg + GRU_CB_BASE;
54 cbe = grubase + GRU_CBE_BASE;
55 tfh = grubase + GRU_TFH_BASE;
56
57 for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) {
58 if (gru_user_copy_handle(&ubuf, cb))
59 goto fail;
60 if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE))
61 goto fail;
62 if (gru_user_copy_handle(&ubuf, cbe + i * GRU_HANDLE_STRIDE))
63 goto fail;
64 cb += GRU_HANDLE_STRIDE;
65 }
66 if (dsrcnt)
67 memcpy(ubuf, gseg + GRU_DS_BASE, dsrcnt * GRU_HANDLE_STRIDE);
68 return 0;
69
70fail:
71 return -EFAULT;
72}
73
74static int gru_dump_tfm(struct gru_state *gru,
75 void __user *ubuf, void __user *ubufend)
76{
77 struct gru_tlb_fault_map *tfm;
78 int i, ret, bytes;
79
80 bytes = GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
81 if (bytes > ubufend - ubuf)
82 ret = -EFBIG;
83
84 for (i = 0; i < GRU_NUM_TFM; i++) {
85 tfm = get_tfm(gru->gs_gru_base_vaddr, i);
86 if (gru_user_copy_handle(&ubuf, tfm))
87 goto fail;
88 }
89 return GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
90
91fail:
92 return -EFAULT;
93}
94
95static int gru_dump_tgh(struct gru_state *gru,
96 void __user *ubuf, void __user *ubufend)
97{
98 struct gru_tlb_global_handle *tgh;
99 int i, ret, bytes;
100
101 bytes = GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
102 if (bytes > ubufend - ubuf)
103 ret = -EFBIG;
104
105 for (i = 0; i < GRU_NUM_TGH; i++) {
106 tgh = get_tgh(gru->gs_gru_base_vaddr, i);
107 if (gru_user_copy_handle(&ubuf, tgh))
108 goto fail;
109 }
110 return GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
111
112fail:
113 return -EFAULT;
114}
115
116static int gru_dump_context(struct gru_state *gru, int ctxnum,
117 void __user *ubuf, void __user *ubufend, char data_opt,
118 char lock_cch)
119{
120 struct gru_dump_context_header hdr;
121 struct gru_dump_context_header __user *uhdr = ubuf;
122 struct gru_context_configuration_handle *cch, *ubufcch;
123 struct gru_thread_state *gts;
124 int try, cch_locked, cbrcnt = 0, dsrcnt = 0, bytes = 0, ret = 0;
125 void *grubase;
126
127 memset(&hdr, 0, sizeof(hdr));
128 grubase = gru->gs_gru_base_vaddr;
129 cch = get_cch(grubase, ctxnum);
130 for (try = 0; try < CCH_LOCK_ATTEMPTS; try++) {
131 cch_locked = trylock_cch_handle(cch);
132 if (cch_locked)
133 break;
134 msleep(1);
135 }
136
137 ubuf += sizeof(hdr);
138 ubufcch = ubuf;
139 if (gru_user_copy_handle(&ubuf, cch))
140 goto fail;
141 if (cch_locked)
142 ubufcch->delresp = 0;
143 bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
144
145 if (cch_locked || !lock_cch) {
146 gts = gru->gs_gts[ctxnum];
147 if (gts && gts->ts_vma) {
148 hdr.pid = gts->ts_tgid_owner;
149 hdr.vaddr = gts->ts_vma->vm_start;
150 }
151 if (cch->state != CCHSTATE_INACTIVE) {
152 cbrcnt = hweight64(cch->cbr_allocation_map) *
153 GRU_CBR_AU_SIZE;
154 dsrcnt = data_opt ? hweight32(cch->dsr_allocation_map) *
155 GRU_DSR_AU_CL : 0;
156 }
157 bytes += (3 * cbrcnt + dsrcnt) * GRU_CACHE_LINE_BYTES;
158 if (bytes > ubufend - ubuf)
159 ret = -EFBIG;
160 else
161 ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum,
162 dsrcnt);
163
164 }
165 if (cch_locked)
166 unlock_cch_handle(cch);
167 if (ret)
168 return ret;
169
170 hdr.magic = GRU_DUMP_MAGIC;
171 hdr.gid = gru->gs_gid;
172 hdr.ctxnum = ctxnum;
173 hdr.cbrcnt = cbrcnt;
174 hdr.dsrcnt = dsrcnt;
175 hdr.cch_locked = cch_locked;
176 if (!ret && copy_to_user((void __user *)uhdr, &hdr, sizeof(hdr)))
177 ret = -EFAULT;
178
179 return ret ? ret : bytes;
180
181fail:
182 unlock_cch_handle(cch);
183 return -EFAULT;
184}
185
186int gru_dump_chiplet_request(unsigned long arg)
187{
188 struct gru_state *gru;
189 struct gru_dump_chiplet_state_req req;
190 void __user *ubuf;
191 void __user *ubufend;
192 int ctxnum, ret, cnt = 0;
193
194 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
195 return -EFAULT;
196
197 /* Currently, only dump by gid is implemented */
198 if (req.gid >= gru_max_gids || req.gid < 0)
199 return -EINVAL;
200
201 gru = GID_TO_GRU(req.gid);
202 ubuf = req.buf;
203 ubufend = req.buf + req.buflen;
204
205 ret = gru_dump_tfm(gru, ubuf, ubufend);
206 if (ret < 0)
207 goto fail;
208 ubuf += ret;
209
210 ret = gru_dump_tgh(gru, ubuf, ubufend);
211 if (ret < 0)
212 goto fail;
213 ubuf += ret;
214
215 for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
216 if (req.ctxnum == ctxnum || req.ctxnum < 0) {
217 ret = gru_dump_context(gru, ctxnum, ubuf, ubufend,
218 req.data_opt, req.lock_cch);
219 if (ret < 0)
220 goto fail;
221 ubuf += ret;
222 cnt++;
223 }
224 }
225
226 if (copy_to_user((void __user *)arg, &req, sizeof(req)))
227 return -EFAULT;
228 return cnt;
229
230fail:
231 return ret;
232}
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index d8bd7d84a7cf..eedbf9c32760 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -31,6 +31,7 @@
31#include <linux/proc_fs.h> 31#include <linux/proc_fs.h>
32#include <linux/interrupt.h> 32#include <linux/interrupt.h>
33#include <linux/uaccess.h> 33#include <linux/uaccess.h>
34#include <linux/delay.h>
34#include "gru.h" 35#include "gru.h"
35#include "grulib.h" 36#include "grulib.h"
36#include "grutables.h" 37#include "grutables.h"
@@ -45,18 +46,66 @@
45 * resources. This will likely be replaced when we better understand the 46 * resources. This will likely be replaced when we better understand the
46 * kernel/user requirements. 47 * kernel/user requirements.
47 * 48 *
48 * At boot time, the kernel permanently reserves a fixed number of 49 * Blade percpu resources reserved for kernel use. These resources are
49 * CBRs/DSRs for each cpu to use. The resources are all taken from 50 * reserved whenever the the kernel context for the blade is loaded. Note
50 * the GRU chiplet 1 on the blade. This leaves the full set of resources 51 * that the kernel context is not guaranteed to be always available. It is
51 * of chiplet 0 available to be allocated to a single user. 52 * loaded on demand & can be stolen by a user if the user demand exceeds the
53 * kernel demand. The kernel can always reload the kernel context but
54 * a SLEEP may be required!!!.
55 *
56 * Async Overview:
57 *
58 * Each blade has one "kernel context" that owns GRU kernel resources
59 * located on the blade. Kernel drivers use GRU resources in this context
60 * for sending messages, zeroing memory, etc.
61 *
62 * The kernel context is dynamically loaded on demand. If it is not in
63 * use by the kernel, the kernel context can be unloaded & given to a user.
64 * The kernel context will be reloaded when needed. This may require that
65 * a context be stolen from a user.
66 * NOTE: frequent unloading/reloading of the kernel context is
67 * expensive. We are depending on batch schedulers, cpusets, sane
68 * drivers or some other mechanism to prevent the need for frequent
69 * stealing/reloading.
70 *
71 * The kernel context consists of two parts:
72 * - 1 CB & a few DSRs that are reserved for each cpu on the blade.
73 * Each cpu has it's own private resources & does not share them
74 * with other cpus. These resources are used serially, ie,
75 * locked, used & unlocked on each call to a function in
76 * grukservices.
77 * (Now that we have dynamic loading of kernel contexts, I
78 * may rethink this & allow sharing between cpus....)
79 *
80 * - Additional resources can be reserved long term & used directly
81 * by UV drivers located in the kernel. Drivers using these GRU
82 * resources can use asynchronous GRU instructions that send
83 * interrupts on completion.
84 * - these resources must be explicitly locked/unlocked
85 * - locked resources prevent (obviously) the kernel
86 * context from being unloaded.
87 * - drivers using these resource directly issue their own
88 * GRU instruction and must wait/check completion.
89 *
90 * When these resources are reserved, the caller can optionally
91 * associate a wait_queue with the resources and use asynchronous
92 * GRU instructions. When an async GRU instruction completes, the
93 * driver will do a wakeup on the event.
94 *
52 */ 95 */
53 96
54/* Blade percpu resources PERMANENTLY reserved for kernel use */ 97
98#define ASYNC_HAN_TO_BID(h) ((h) - 1)
99#define ASYNC_BID_TO_HAN(b) ((b) + 1)
100#define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)]
101#define KCB_TO_GID(cb) ((cb - gru_start_vaddr) / \
102 (GRU_SIZE * GRU_CHIPLETS_PER_BLADE))
103#define KCB_TO_BS(cb) gru_base[KCB_TO_GID(cb)]
104
55#define GRU_NUM_KERNEL_CBR 1 105#define GRU_NUM_KERNEL_CBR 1
56#define GRU_NUM_KERNEL_DSR_BYTES 256 106#define GRU_NUM_KERNEL_DSR_BYTES 256
57#define GRU_NUM_KERNEL_DSR_CL (GRU_NUM_KERNEL_DSR_BYTES / \ 107#define GRU_NUM_KERNEL_DSR_CL (GRU_NUM_KERNEL_DSR_BYTES / \
58 GRU_CACHE_LINE_BYTES) 108 GRU_CACHE_LINE_BYTES)
59#define KERNEL_CTXNUM 15
60 109
61/* GRU instruction attributes for all instructions */ 110/* GRU instruction attributes for all instructions */
62#define IMA IMA_CB_DELAY 111#define IMA IMA_CB_DELAY
@@ -98,6 +147,108 @@ struct message_header {
98 147
99#define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h])) 148#define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h]))
100 149
150/*
151 * Reload the blade's kernel context into a GRU chiplet. Called holding
152 * the bs_kgts_sema for READ. Will steal user contexts if necessary.
153 */
154static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
155{
156 struct gru_state *gru;
157 struct gru_thread_state *kgts;
158 void *vaddr;
159 int ctxnum, ncpus;
160
161 up_read(&bs->bs_kgts_sema);
162 down_write(&bs->bs_kgts_sema);
163
164 if (!bs->bs_kgts)
165 bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0);
166 kgts = bs->bs_kgts;
167
168 if (!kgts->ts_gru) {
169 STAT(load_kernel_context);
170 ncpus = uv_blade_nr_possible_cpus(blade_id);
171 kgts->ts_cbr_au_count = GRU_CB_COUNT_TO_AU(
172 GRU_NUM_KERNEL_CBR * ncpus + bs->bs_async_cbrs);
173 kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU(
174 GRU_NUM_KERNEL_DSR_BYTES * ncpus +
175 bs->bs_async_dsr_bytes);
176 while (!gru_assign_gru_context(kgts, blade_id)) {
177 msleep(1);
178 gru_steal_context(kgts, blade_id);
179 }
180 gru_load_context(kgts);
181 gru = bs->bs_kgts->ts_gru;
182 vaddr = gru->gs_gru_base_vaddr;
183 ctxnum = kgts->ts_ctxnum;
184 bs->kernel_cb = get_gseg_base_address_cb(vaddr, ctxnum, 0);
185 bs->kernel_dsr = get_gseg_base_address_ds(vaddr, ctxnum, 0);
186 }
187 downgrade_write(&bs->bs_kgts_sema);
188}
189
190/*
191 * Free all kernel contexts that are not currently in use.
192 * Returns 0 if all freed, else number of inuse context.
193 */
194static int gru_free_kernel_contexts(void)
195{
196 struct gru_blade_state *bs;
197 struct gru_thread_state *kgts;
198 int bid, ret = 0;
199
200 for (bid = 0; bid < GRU_MAX_BLADES; bid++) {
201 bs = gru_base[bid];
202 if (!bs)
203 continue;
204 if (down_write_trylock(&bs->bs_kgts_sema)) {
205 kgts = bs->bs_kgts;
206 if (kgts && kgts->ts_gru)
207 gru_unload_context(kgts, 0);
208 kfree(kgts);
209 bs->bs_kgts = NULL;
210 up_write(&bs->bs_kgts_sema);
211 } else {
212 ret++;
213 }
214 }
215 return ret;
216}
217
218/*
219 * Lock & load the kernel context for the specified blade.
220 */
221static struct gru_blade_state *gru_lock_kernel_context(int blade_id)
222{
223 struct gru_blade_state *bs;
224
225 STAT(lock_kernel_context);
226 bs = gru_base[blade_id];
227
228 down_read(&bs->bs_kgts_sema);
229 if (!bs->bs_kgts || !bs->bs_kgts->ts_gru)
230 gru_load_kernel_context(bs, blade_id);
231 return bs;
232
233}
234
235/*
236 * Unlock the kernel context for the specified blade. Context is not
237 * unloaded but may be stolen before next use.
238 */
239static void gru_unlock_kernel_context(int blade_id)
240{
241 struct gru_blade_state *bs;
242
243 bs = gru_base[blade_id];
244 up_read(&bs->bs_kgts_sema);
245 STAT(unlock_kernel_context);
246}
247
248/*
249 * Reserve & get pointers to the DSR/CBRs reserved for the current cpu.
250 * - returns with preemption disabled
251 */
101static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr) 252static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
102{ 253{
103 struct gru_blade_state *bs; 254 struct gru_blade_state *bs;
@@ -105,30 +256,148 @@ static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
105 256
106 BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES); 257 BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES);
107 preempt_disable(); 258 preempt_disable();
108 bs = gru_base[uv_numa_blade_id()]; 259 bs = gru_lock_kernel_context(uv_numa_blade_id());
109 lcpu = uv_blade_processor_id(); 260 lcpu = uv_blade_processor_id();
110 *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE; 261 *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE;
111 *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES; 262 *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES;
112 return 0; 263 return 0;
113} 264}
114 265
266/*
267 * Free the current cpus reserved DSR/CBR resources.
268 */
115static void gru_free_cpu_resources(void *cb, void *dsr) 269static void gru_free_cpu_resources(void *cb, void *dsr)
116{ 270{
271 gru_unlock_kernel_context(uv_numa_blade_id());
117 preempt_enable(); 272 preempt_enable();
118} 273}
119 274
275/*
276 * Reserve GRU resources to be used asynchronously.
277 * Note: currently supports only 1 reservation per blade.
278 *
279 * input:
280 * blade_id - blade on which resources should be reserved
281 * cbrs - number of CBRs
282 * dsr_bytes - number of DSR bytes needed
283 * output:
284 * handle to identify resource
285 * (0 = async resources already reserved)
286 */
287unsigned long gru_reserve_async_resources(int blade_id, int cbrs, int dsr_bytes,
288 struct completion *cmp)
289{
290 struct gru_blade_state *bs;
291 struct gru_thread_state *kgts;
292 int ret = 0;
293
294 bs = gru_base[blade_id];
295
296 down_write(&bs->bs_kgts_sema);
297
298 /* Verify no resources already reserved */
299 if (bs->bs_async_dsr_bytes + bs->bs_async_cbrs)
300 goto done;
301 bs->bs_async_dsr_bytes = dsr_bytes;
302 bs->bs_async_cbrs = cbrs;
303 bs->bs_async_wq = cmp;
304 kgts = bs->bs_kgts;
305
306 /* Resources changed. Unload context if already loaded */
307 if (kgts && kgts->ts_gru)
308 gru_unload_context(kgts, 0);
309 ret = ASYNC_BID_TO_HAN(blade_id);
310
311done:
312 up_write(&bs->bs_kgts_sema);
313 return ret;
314}
315
316/*
317 * Release async resources previously reserved.
318 *
319 * input:
320 * han - handle to identify resources
321 */
322void gru_release_async_resources(unsigned long han)
323{
324 struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
325
326 down_write(&bs->bs_kgts_sema);
327 bs->bs_async_dsr_bytes = 0;
328 bs->bs_async_cbrs = 0;
329 bs->bs_async_wq = NULL;
330 up_write(&bs->bs_kgts_sema);
331}
332
333/*
334 * Wait for async GRU instructions to complete.
335 *
336 * input:
337 * han - handle to identify resources
338 */
339void gru_wait_async_cbr(unsigned long han)
340{
341 struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
342
343 wait_for_completion(bs->bs_async_wq);
344 mb();
345}
346
347/*
348 * Lock previous reserved async GRU resources
349 *
350 * input:
351 * han - handle to identify resources
352 * output:
353 * cb - pointer to first CBR
354 * dsr - pointer to first DSR
355 */
356void gru_lock_async_resource(unsigned long han, void **cb, void **dsr)
357{
358 struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
359 int blade_id = ASYNC_HAN_TO_BID(han);
360 int ncpus;
361
362 gru_lock_kernel_context(blade_id);
363 ncpus = uv_blade_nr_possible_cpus(blade_id);
364 if (cb)
365 *cb = bs->kernel_cb + ncpus * GRU_HANDLE_STRIDE;
366 if (dsr)
367 *dsr = bs->kernel_dsr + ncpus * GRU_NUM_KERNEL_DSR_BYTES;
368}
369
370/*
371 * Unlock previous reserved async GRU resources
372 *
373 * input:
374 * han - handle to identify resources
375 */
376void gru_unlock_async_resource(unsigned long han)
377{
378 int blade_id = ASYNC_HAN_TO_BID(han);
379
380 gru_unlock_kernel_context(blade_id);
381}
382
383/*----------------------------------------------------------------------*/
120int gru_get_cb_exception_detail(void *cb, 384int gru_get_cb_exception_detail(void *cb,
121 struct control_block_extended_exc_detail *excdet) 385 struct control_block_extended_exc_detail *excdet)
122{ 386{
123 struct gru_control_block_extended *cbe; 387 struct gru_control_block_extended *cbe;
388 struct gru_blade_state *bs;
389 int cbrnum;
124 390
125 cbe = get_cbe(GRUBASE(cb), get_cb_number(cb)); 391 bs = KCB_TO_BS(cb);
126 prefetchw(cbe); /* Harmless on hardware, required for emulator */ 392 cbrnum = thread_cbr_number(bs->bs_kgts, get_cb_number(cb));
393 cbe = get_cbe(GRUBASE(cb), cbrnum);
394 gru_flush_cache(cbe); /* CBE not coherent */
127 excdet->opc = cbe->opccpy; 395 excdet->opc = cbe->opccpy;
128 excdet->exopc = cbe->exopccpy; 396 excdet->exopc = cbe->exopccpy;
129 excdet->ecause = cbe->ecause; 397 excdet->ecause = cbe->ecause;
130 excdet->exceptdet0 = cbe->idef1upd; 398 excdet->exceptdet0 = cbe->idef1upd;
131 excdet->exceptdet1 = cbe->idef3upd; 399 excdet->exceptdet1 = cbe->idef3upd;
400 gru_flush_cache(cbe);
132 return 0; 401 return 0;
133} 402}
134 403
@@ -167,13 +436,13 @@ static int gru_retry_exception(void *cb)
167 int retry = EXCEPTION_RETRY_LIMIT; 436 int retry = EXCEPTION_RETRY_LIMIT;
168 437
169 while (1) { 438 while (1) {
170 if (gru_get_cb_message_queue_substatus(cb))
171 break;
172 if (gru_wait_idle_or_exception(gen) == CBS_IDLE) 439 if (gru_wait_idle_or_exception(gen) == CBS_IDLE)
173 return CBS_IDLE; 440 return CBS_IDLE;
174 441 if (gru_get_cb_message_queue_substatus(cb))
442 return CBS_EXCEPTION;
175 gru_get_cb_exception_detail(cb, &excdet); 443 gru_get_cb_exception_detail(cb, &excdet);
176 if (excdet.ecause & ~EXCEPTION_RETRY_BITS) 444 if ((excdet.ecause & ~EXCEPTION_RETRY_BITS) ||
445 (excdet.cbrexecstatus & CBR_EXS_ABORT_OCC))
177 break; 446 break;
178 if (retry-- == 0) 447 if (retry-- == 0)
179 break; 448 break;
@@ -416,6 +685,29 @@ static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd)
416 mqd->interrupt_vector); 685 mqd->interrupt_vector);
417} 686}
418 687
688/*
689 * Handle a PUT failure. Note: if message was a 2-line message, one of the
690 * lines might have successfully have been written. Before sending the
691 * message, "present" must be cleared in BOTH lines to prevent the receiver
692 * from prematurely seeing the full message.
693 */
694static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
695 void *mesg, int lines)
696{
697 unsigned long m;
698
699 m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
700 if (lines == 2) {
701 gru_vset(cb, m, 0, XTYPE_CL, lines, 1, IMA);
702 if (gru_wait(cb) != CBS_IDLE)
703 return MQE_UNEXPECTED_CB_ERR;
704 }
705 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
706 if (gru_wait(cb) != CBS_IDLE)
707 return MQE_UNEXPECTED_CB_ERR;
708 send_message_queue_interrupt(mqd);
709 return MQE_OK;
710}
419 711
420/* 712/*
421 * Handle a gru_mesq failure. Some of these failures are software recoverable 713 * Handle a gru_mesq failure. Some of these failures are software recoverable
@@ -425,7 +717,6 @@ static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
425 void *mesg, int lines) 717 void *mesg, int lines)
426{ 718{
427 int substatus, ret = 0; 719 int substatus, ret = 0;
428 unsigned long m;
429 720
430 substatus = gru_get_cb_message_queue_substatus(cb); 721 substatus = gru_get_cb_message_queue_substatus(cb);
431 switch (substatus) { 722 switch (substatus) {
@@ -447,14 +738,7 @@ static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
447 break; 738 break;
448 case CBSS_PUT_NACKED: 739 case CBSS_PUT_NACKED:
449 STAT(mesq_send_put_nacked); 740 STAT(mesq_send_put_nacked);
450 m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6); 741 ret = send_message_put_nacked(cb, mqd, mesg, lines);
451 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
452 if (gru_wait(cb) == CBS_IDLE) {
453 ret = MQE_OK;
454 send_message_queue_interrupt(mqd);
455 } else {
456 ret = MQE_UNEXPECTED_CB_ERR;
457 }
458 break; 742 break;
459 default: 743 default:
460 BUG(); 744 BUG();
@@ -597,115 +881,177 @@ EXPORT_SYMBOL_GPL(gru_copy_gpa);
597 881
598/* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/ 882/* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/
599/* Temp - will delete after we gain confidence in the GRU */ 883/* Temp - will delete after we gain confidence in the GRU */
600static __cacheline_aligned unsigned long word0;
601static __cacheline_aligned unsigned long word1;
602 884
603static int quicktest(struct gru_state *gru) 885static int quicktest0(unsigned long arg)
604{ 886{
887 unsigned long word0;
888 unsigned long word1;
605 void *cb; 889 void *cb;
606 void *ds; 890 void *dsr;
607 unsigned long *p; 891 unsigned long *p;
892 int ret = -EIO;
608 893
609 cb = get_gseg_base_address_cb(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0); 894 if (gru_get_cpu_resources(GRU_CACHE_LINE_BYTES, &cb, &dsr))
610 ds = get_gseg_base_address_ds(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0); 895 return MQE_BUG_NO_RESOURCES;
611 p = ds; 896 p = dsr;
612 word0 = MAGIC; 897 word0 = MAGIC;
898 word1 = 0;
613 899
614 gru_vload(cb, uv_gpa(&word0), 0, XTYPE_DW, 1, 1, IMA); 900 gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
615 if (gru_wait(cb) != CBS_IDLE) 901 if (gru_wait(cb) != CBS_IDLE) {
616 BUG(); 902 printk(KERN_DEBUG "GRU quicktest0: CBR failure 1\n");
903 goto done;
904 }
617 905
618 if (*(unsigned long *)ds != MAGIC) 906 if (*p != MAGIC) {
619 BUG(); 907 printk(KERN_DEBUG "GRU: quicktest0 bad magic 0x%lx\n", *p);
620 gru_vstore(cb, uv_gpa(&word1), 0, XTYPE_DW, 1, 1, IMA); 908 goto done;
621 if (gru_wait(cb) != CBS_IDLE) 909 }
622 BUG(); 910 gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
911 if (gru_wait(cb) != CBS_IDLE) {
912 printk(KERN_DEBUG "GRU quicktest0: CBR failure 2\n");
913 goto done;
914 }
623 915
624 if (word0 != word1 || word0 != MAGIC) { 916 if (word0 != word1 || word1 != MAGIC) {
625 printk 917 printk(KERN_DEBUG
626 ("GRU quicktest err: gid %d, found 0x%lx, expected 0x%lx\n", 918 "GRU quicktest0 err: found 0x%lx, expected 0x%lx\n",
627 gru->gs_gid, word1, MAGIC); 919 word1, MAGIC);
628 BUG(); /* ZZZ should not be fatal */ 920 goto done;
629 } 921 }
922 ret = 0;
630 923
631 return 0; 924done:
925 gru_free_cpu_resources(cb, dsr);
926 return ret;
632} 927}
633 928
929#define ALIGNUP(p, q) ((void *)(((unsigned long)(p) + (q) - 1) & ~(q - 1)))
634 930
635int gru_kservices_init(struct gru_state *gru) 931static int quicktest1(unsigned long arg)
636{ 932{
637 struct gru_blade_state *bs; 933 struct gru_message_queue_desc mqd;
638 struct gru_context_configuration_handle *cch; 934 void *p, *mq;
639 unsigned long cbr_map, dsr_map; 935 unsigned long *dw;
640 int err, num, cpus_possible; 936 int i, ret = -EIO;
641 937 char mes[GRU_CACHE_LINE_BYTES], *m;
642 /* 938
643 * Currently, resources are reserved ONLY on the second chiplet 939 /* Need 1K cacheline aligned that does not cross page boundary */
644 * on each blade. This leaves ALL resources on chiplet 0 available 940 p = kmalloc(4096, 0);
645 * for user code. 941 mq = ALIGNUP(p, 1024);
646 */ 942 memset(mes, 0xee, sizeof(mes));
647 bs = gru->gs_blade; 943 dw = mq;
648 if (gru != &bs->bs_grus[1]) 944
649 return 0; 945 gru_create_message_queue(&mqd, mq, 8 * GRU_CACHE_LINE_BYTES, 0, 0, 0);
650 946 for (i = 0; i < 6; i++) {
651 cpus_possible = uv_blade_nr_possible_cpus(gru->gs_blade_id); 947 mes[8] = i;
652 948 do {
653 num = GRU_NUM_KERNEL_CBR * cpus_possible; 949 ret = gru_send_message_gpa(&mqd, mes, sizeof(mes));
654 cbr_map = gru_reserve_cb_resources(gru, GRU_CB_COUNT_TO_AU(num), NULL); 950 } while (ret == MQE_CONGESTION);
655 gru->gs_reserved_cbrs += num; 951 if (ret)
656 952 break;
657 num = GRU_NUM_KERNEL_DSR_BYTES * cpus_possible;
658 dsr_map = gru_reserve_ds_resources(gru, GRU_DS_BYTES_TO_AU(num), NULL);
659 gru->gs_reserved_dsr_bytes += num;
660
661 gru->gs_active_contexts++;
662 __set_bit(KERNEL_CTXNUM, &gru->gs_context_map);
663 cch = get_cch(gru->gs_gru_base_vaddr, KERNEL_CTXNUM);
664
665 bs->kernel_cb = get_gseg_base_address_cb(gru->gs_gru_base_vaddr,
666 KERNEL_CTXNUM, 0);
667 bs->kernel_dsr = get_gseg_base_address_ds(gru->gs_gru_base_vaddr,
668 KERNEL_CTXNUM, 0);
669
670 lock_cch_handle(cch);
671 cch->tfm_fault_bit_enable = 0;
672 cch->tlb_int_enable = 0;
673 cch->tfm_done_bit_enable = 0;
674 cch->unmap_enable = 1;
675 err = cch_allocate(cch, 0, 0, cbr_map, dsr_map);
676 if (err) {
677 gru_dbg(grudev,
678 "Unable to allocate kernel CCH: gid %d, err %d\n",
679 gru->gs_gid, err);
680 BUG();
681 } 953 }
682 if (cch_start(cch)) { 954 if (ret != MQE_QUEUE_FULL || i != 4)
683 gru_dbg(grudev, "Unable to start kernel CCH: gid %d, err %d\n", 955 goto done;
684 gru->gs_gid, err); 956
685 BUG(); 957 for (i = 0; i < 6; i++) {
958 m = gru_get_next_message(&mqd);
959 if (!m || m[8] != i)
960 break;
961 gru_free_message(&mqd, m);
686 } 962 }
687 unlock_cch_handle(cch); 963 ret = (i == 4) ? 0 : -EIO;
688 964
689 if (gru_options & GRU_QUICKLOOK) 965done:
690 quicktest(gru); 966 kfree(p);
691 return 0; 967 return ret;
692} 968}
693 969
694void gru_kservices_exit(struct gru_state *gru) 970static int quicktest2(unsigned long arg)
695{ 971{
696 struct gru_context_configuration_handle *cch; 972 static DECLARE_COMPLETION(cmp);
697 struct gru_blade_state *bs; 973 unsigned long han;
974 int blade_id = 0;
975 int numcb = 4;
976 int ret = 0;
977 unsigned long *buf;
978 void *cb0, *cb;
979 int i, k, istatus, bytes;
980
981 bytes = numcb * 4 * 8;
982 buf = kmalloc(bytes, GFP_KERNEL);
983 if (!buf)
984 return -ENOMEM;
985
986 ret = -EBUSY;
987 han = gru_reserve_async_resources(blade_id, numcb, 0, &cmp);
988 if (!han)
989 goto done;
990
991 gru_lock_async_resource(han, &cb0, NULL);
992 memset(buf, 0xee, bytes);
993 for (i = 0; i < numcb; i++)
994 gru_vset(cb0 + i * GRU_HANDLE_STRIDE, uv_gpa(&buf[i * 4]), 0,
995 XTYPE_DW, 4, 1, IMA_INTERRUPT);
996
997 ret = 0;
998 for (k = 0; k < numcb; k++) {
999 gru_wait_async_cbr(han);
1000 for (i = 0; i < numcb; i++) {
1001 cb = cb0 + i * GRU_HANDLE_STRIDE;
1002 istatus = gru_check_status(cb);
1003 if (istatus == CBS_ACTIVE)
1004 continue;
1005 if (istatus == CBS_EXCEPTION)
1006 ret = -EFAULT;
1007 else if (buf[i] || buf[i + 1] || buf[i + 2] ||
1008 buf[i + 3])
1009 ret = -EIO;
1010 }
1011 }
1012 BUG_ON(cmp.done);
698 1013
699 bs = gru->gs_blade; 1014 gru_unlock_async_resource(han);
700 if (gru != &bs->bs_grus[1]) 1015 gru_release_async_resources(han);
701 return; 1016done:
1017 kfree(buf);
1018 return ret;
1019}
702 1020
703 cch = get_cch(gru->gs_gru_base_vaddr, KERNEL_CTXNUM); 1021/*
704 lock_cch_handle(cch); 1022 * Debugging only. User hook for various kernel tests
705 if (cch_interrupt_sync(cch)) 1023 * of driver & gru.
706 BUG(); 1024 */
707 if (cch_deallocate(cch)) 1025int gru_ktest(unsigned long arg)
1026{
1027 int ret = -EINVAL;
1028
1029 switch (arg & 0xff) {
1030 case 0:
1031 ret = quicktest0(arg);
1032 break;
1033 case 1:
1034 ret = quicktest1(arg);
1035 break;
1036 case 2:
1037 ret = quicktest2(arg);
1038 break;
1039 case 99:
1040 ret = gru_free_kernel_contexts();
1041 break;
1042 }
1043 return ret;
1044
1045}
1046
1047int gru_kservices_init(void)
1048{
1049 return 0;
1050}
1051
1052void gru_kservices_exit(void)
1053{
1054 if (gru_free_kernel_contexts())
708 BUG(); 1055 BUG();
709 unlock_cch_handle(cch);
710} 1056}
711 1057
diff --git a/drivers/misc/sgi-gru/grukservices.h b/drivers/misc/sgi-gru/grukservices.h
index 747ed315d56f..d60d34bca44d 100644
--- a/drivers/misc/sgi-gru/grukservices.h
+++ b/drivers/misc/sgi-gru/grukservices.h
@@ -146,4 +146,55 @@ extern void *gru_get_next_message(struct gru_message_queue_desc *mqd);
146extern int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa, 146extern int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa,
147 unsigned int bytes); 147 unsigned int bytes);
148 148
149/*
150 * Reserve GRU resources to be used asynchronously.
151 *
152 * input:
153 * blade_id - blade on which resources should be reserved
154 * cbrs - number of CBRs
155 * dsr_bytes - number of DSR bytes needed
156 * cmp - completion structure for waiting for
157 * async completions
158 * output:
159 * handle to identify resource
160 * (0 = no resources)
161 */
162extern unsigned long gru_reserve_async_resources(int blade_id, int cbrs, int dsr_bytes,
163 struct completion *cmp);
164
165/*
166 * Release async resources previously reserved.
167 *
168 * input:
169 * han - handle to identify resources
170 */
171extern void gru_release_async_resources(unsigned long han);
172
173/*
174 * Wait for async GRU instructions to complete.
175 *
176 * input:
177 * han - handle to identify resources
178 */
179extern void gru_wait_async_cbr(unsigned long han);
180
181/*
182 * Lock previous reserved async GRU resources
183 *
184 * input:
185 * han - handle to identify resources
186 * output:
187 * cb - pointer to first CBR
188 * dsr - pointer to first DSR
189 */
190extern void gru_lock_async_resource(unsigned long han, void **cb, void **dsr);
191
192/*
193 * Unlock previous reserved async GRU resources
194 *
195 * input:
196 * han - handle to identify resources
197 */
198extern void gru_unlock_async_resource(unsigned long han);
199
149#endif /* __GRU_KSERVICES_H_ */ 200#endif /* __GRU_KSERVICES_H_ */
diff --git a/drivers/misc/sgi-gru/grulib.h b/drivers/misc/sgi-gru/grulib.h
index e56e196a6998..889bc442a3e8 100644
--- a/drivers/misc/sgi-gru/grulib.h
+++ b/drivers/misc/sgi-gru/grulib.h
@@ -32,8 +32,8 @@
32/* Set Number of Request Blocks */ 32/* Set Number of Request Blocks */
33#define GRU_CREATE_CONTEXT _IOWR(GRU_IOCTL_NUM, 1, void *) 33#define GRU_CREATE_CONTEXT _IOWR(GRU_IOCTL_NUM, 1, void *)
34 34
35/* Register task as using the slice */ 35/* Set Context Options */
36#define GRU_SET_TASK_SLICE _IOWR(GRU_IOCTL_NUM, 5, void *) 36#define GRU_SET_CONTEXT_OPTION _IOWR(GRU_IOCTL_NUM, 4, void *)
37 37
38/* Fetch exception detail */ 38/* Fetch exception detail */
39#define GRU_USER_GET_EXCEPTION_DETAIL _IOWR(GRU_IOCTL_NUM, 6, void *) 39#define GRU_USER_GET_EXCEPTION_DETAIL _IOWR(GRU_IOCTL_NUM, 6, void *)
@@ -44,8 +44,11 @@
44/* For user unload context */ 44/* For user unload context */
45#define GRU_USER_UNLOAD_CONTEXT _IOWR(GRU_IOCTL_NUM, 9, void *) 45#define GRU_USER_UNLOAD_CONTEXT _IOWR(GRU_IOCTL_NUM, 9, void *)
46 46
47/* For fetching GRU chiplet status */ 47/* For dumpping GRU chiplet state */
48#define GRU_GET_CHIPLET_STATUS _IOWR(GRU_IOCTL_NUM, 10, void *) 48#define GRU_DUMP_CHIPLET_STATE _IOWR(GRU_IOCTL_NUM, 11, void *)
49
50/* For getting gseg statistics */
51#define GRU_GET_GSEG_STATISTICS _IOWR(GRU_IOCTL_NUM, 12, void *)
49 52
50/* For user TLB flushing (primarily for tests) */ 53/* For user TLB flushing (primarily for tests) */
51#define GRU_USER_FLUSH_TLB _IOWR(GRU_IOCTL_NUM, 50, void *) 54#define GRU_USER_FLUSH_TLB _IOWR(GRU_IOCTL_NUM, 50, void *)
@@ -53,8 +56,26 @@
53/* Get some config options (primarily for tests & emulator) */ 56/* Get some config options (primarily for tests & emulator) */
54#define GRU_GET_CONFIG_INFO _IOWR(GRU_IOCTL_NUM, 51, void *) 57#define GRU_GET_CONFIG_INFO _IOWR(GRU_IOCTL_NUM, 51, void *)
55 58
59/* Various kernel self-tests */
60#define GRU_KTEST _IOWR(GRU_IOCTL_NUM, 52, void *)
61
56#define CONTEXT_WINDOW_BYTES(th) (GRU_GSEG_PAGESIZE * (th)) 62#define CONTEXT_WINDOW_BYTES(th) (GRU_GSEG_PAGESIZE * (th))
57#define THREAD_POINTER(p, th) (p + GRU_GSEG_PAGESIZE * (th)) 63#define THREAD_POINTER(p, th) (p + GRU_GSEG_PAGESIZE * (th))
64#define GSEG_START(cb) ((void *)((unsigned long)(cb) & ~(GRU_GSEG_PAGESIZE - 1)))
65
66/*
67 * Statictics kept on a per-GTS basis.
68 */
69struct gts_statistics {
70 unsigned long fmm_tlbdropin;
71 unsigned long upm_tlbdropin;
72 unsigned long context_stolen;
73};
74
75struct gru_get_gseg_statistics_req {
76 unsigned long gseg;
77 struct gts_statistics stats;
78};
58 79
59/* 80/*
60 * Structure used to pass TLB flush parameters to the driver 81 * Structure used to pass TLB flush parameters to the driver
@@ -75,6 +96,16 @@ struct gru_unload_context_req {
75}; 96};
76 97
77/* 98/*
99 * Structure used to set context options
100 */
101enum {sco_gseg_owner, sco_cch_req_slice};
102struct gru_set_context_option_req {
103 unsigned long gseg;
104 int op;
105 unsigned long val1;
106};
107
108/*
78 * Structure used to pass TLB flush parameters to the driver 109 * Structure used to pass TLB flush parameters to the driver
79 */ 110 */
80struct gru_flush_tlb_req { 111struct gru_flush_tlb_req {
@@ -84,6 +115,36 @@ struct gru_flush_tlb_req {
84}; 115};
85 116
86/* 117/*
118 * Structure used to pass TLB flush parameters to the driver
119 */
120enum {dcs_pid, dcs_gid};
121struct gru_dump_chiplet_state_req {
122 unsigned int op;
123 unsigned int gid;
124 int ctxnum;
125 char data_opt;
126 char lock_cch;
127 pid_t pid;
128 void *buf;
129 size_t buflen;
130 /* ---- output --- */
131 unsigned int num_contexts;
132};
133
134#define GRU_DUMP_MAGIC 0x3474ab6c
135struct gru_dump_context_header {
136 unsigned int magic;
137 unsigned int gid;
138 unsigned char ctxnum;
139 unsigned char cbrcnt;
140 unsigned char dsrcnt;
141 pid_t pid;
142 unsigned long vaddr;
143 int cch_locked;
144 unsigned long data[0];
145};
146
147/*
87 * GRU configuration info (temp - for testing) 148 * GRU configuration info (temp - for testing)
88 */ 149 */
89struct gru_config_info { 150struct gru_config_info {
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index ec3f7a17d221..3bc643dad606 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -3,11 +3,21 @@
3 * 3 *
4 * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD 4 * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD
5 * 5 *
6 * This file is subject to the terms and conditions of the GNU General Public 6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 * 7 *
10 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
11 */ 21 */
12 22
13#include <linux/kernel.h> 23#include <linux/kernel.h>
@@ -96,7 +106,7 @@ static int gru_reset_asid_limit(struct gru_state *gru, int asid)
96 gid = gru->gs_gid; 106 gid = gru->gs_gid;
97again: 107again:
98 for (i = 0; i < GRU_NUM_CCH; i++) { 108 for (i = 0; i < GRU_NUM_CCH; i++) {
99 if (!gru->gs_gts[i]) 109 if (!gru->gs_gts[i] || is_kernel_context(gru->gs_gts[i]))
100 continue; 110 continue;
101 inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid; 111 inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid;
102 gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n", 112 gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n",
@@ -150,7 +160,7 @@ static unsigned long reserve_resources(unsigned long *p, int n, int mmax,
150 unsigned long bits = 0; 160 unsigned long bits = 0;
151 int i; 161 int i;
152 162
153 do { 163 while (n--) {
154 i = find_first_bit(p, mmax); 164 i = find_first_bit(p, mmax);
155 if (i == mmax) 165 if (i == mmax)
156 BUG(); 166 BUG();
@@ -158,7 +168,7 @@ static unsigned long reserve_resources(unsigned long *p, int n, int mmax,
158 __set_bit(i, &bits); 168 __set_bit(i, &bits);
159 if (idx) 169 if (idx)
160 *idx++ = i; 170 *idx++ = i;
161 } while (--n); 171 }
162 return bits; 172 return bits;
163} 173}
164 174
@@ -299,38 +309,39 @@ static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data
299/* 309/*
300 * Allocate a thread state structure. 310 * Allocate a thread state structure.
301 */ 311 */
302static struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, 312struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
303 struct gru_vma_data *vdata, 313 int cbr_au_count, int dsr_au_count, int options, int tsid)
304 int tsid)
305{ 314{
306 struct gru_thread_state *gts; 315 struct gru_thread_state *gts;
307 int bytes; 316 int bytes;
308 317
309 bytes = DSR_BYTES(vdata->vd_dsr_au_count) + 318 bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count);
310 CBR_BYTES(vdata->vd_cbr_au_count);
311 bytes += sizeof(struct gru_thread_state); 319 bytes += sizeof(struct gru_thread_state);
312 gts = kzalloc(bytes, GFP_KERNEL); 320 gts = kmalloc(bytes, GFP_KERNEL);
313 if (!gts) 321 if (!gts)
314 return NULL; 322 return NULL;
315 323
316 STAT(gts_alloc); 324 STAT(gts_alloc);
325 memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */
317 atomic_set(&gts->ts_refcnt, 1); 326 atomic_set(&gts->ts_refcnt, 1);
318 mutex_init(&gts->ts_ctxlock); 327 mutex_init(&gts->ts_ctxlock);
319 gts->ts_cbr_au_count = vdata->vd_cbr_au_count; 328 gts->ts_cbr_au_count = cbr_au_count;
320 gts->ts_dsr_au_count = vdata->vd_dsr_au_count; 329 gts->ts_dsr_au_count = dsr_au_count;
321 gts->ts_user_options = vdata->vd_user_options; 330 gts->ts_user_options = options;
322 gts->ts_tsid = tsid; 331 gts->ts_tsid = tsid;
323 gts->ts_user_options = vdata->vd_user_options;
324 gts->ts_ctxnum = NULLCTX; 332 gts->ts_ctxnum = NULLCTX;
325 gts->ts_mm = current->mm;
326 gts->ts_vma = vma;
327 gts->ts_tlb_int_select = -1; 333 gts->ts_tlb_int_select = -1;
328 gts->ts_gms = gru_register_mmu_notifier(); 334 gts->ts_cch_req_slice = -1;
329 gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT); 335 gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT);
330 if (!gts->ts_gms) 336 if (vma) {
331 goto err; 337 gts->ts_mm = current->mm;
338 gts->ts_vma = vma;
339 gts->ts_gms = gru_register_mmu_notifier();
340 if (!gts->ts_gms)
341 goto err;
342 }
332 343
333 gru_dbg(grudev, "alloc vdata %p, new gts %p\n", vdata, gts); 344 gru_dbg(grudev, "alloc gts %p\n", gts);
334 return gts; 345 return gts;
335 346
336err: 347err:
@@ -381,7 +392,8 @@ struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma,
381 struct gru_vma_data *vdata = vma->vm_private_data; 392 struct gru_vma_data *vdata = vma->vm_private_data;
382 struct gru_thread_state *gts, *ngts; 393 struct gru_thread_state *gts, *ngts;
383 394
384 gts = gru_alloc_gts(vma, vdata, tsid); 395 gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, vdata->vd_dsr_au_count,
396 vdata->vd_user_options, tsid);
385 if (!gts) 397 if (!gts)
386 return NULL; 398 return NULL;
387 399
@@ -458,7 +470,8 @@ static void gru_prefetch_context(void *gseg, void *cb, void *cbe,
458} 470}
459 471
460static void gru_load_context_data(void *save, void *grubase, int ctxnum, 472static void gru_load_context_data(void *save, void *grubase, int ctxnum,
461 unsigned long cbrmap, unsigned long dsrmap) 473 unsigned long cbrmap, unsigned long dsrmap,
474 int data_valid)
462{ 475{
463 void *gseg, *cb, *cbe; 476 void *gseg, *cb, *cbe;
464 unsigned long length; 477 unsigned long length;
@@ -471,12 +484,22 @@ static void gru_load_context_data(void *save, void *grubase, int ctxnum,
471 gru_prefetch_context(gseg, cb, cbe, cbrmap, length); 484 gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
472 485
473 for_each_cbr_in_allocation_map(i, &cbrmap, scr) { 486 for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
474 save += gru_copy_handle(cb, save); 487 if (data_valid) {
475 save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE, save); 488 save += gru_copy_handle(cb, save);
489 save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE,
490 save);
491 } else {
492 memset(cb, 0, GRU_CACHE_LINE_BYTES);
493 memset(cbe + i * GRU_HANDLE_STRIDE, 0,
494 GRU_CACHE_LINE_BYTES);
495 }
476 cb += GRU_HANDLE_STRIDE; 496 cb += GRU_HANDLE_STRIDE;
477 } 497 }
478 498
479 memcpy(gseg + GRU_DS_BASE, save, length); 499 if (data_valid)
500 memcpy(gseg + GRU_DS_BASE, save, length);
501 else
502 memset(gseg + GRU_DS_BASE, 0, length);
480} 503}
481 504
482static void gru_unload_context_data(void *save, void *grubase, int ctxnum, 505static void gru_unload_context_data(void *save, void *grubase, int ctxnum,
@@ -506,7 +529,8 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
506 struct gru_context_configuration_handle *cch; 529 struct gru_context_configuration_handle *cch;
507 int ctxnum = gts->ts_ctxnum; 530 int ctxnum = gts->ts_ctxnum;
508 531
509 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE); 532 if (!is_kernel_context(gts))
533 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
510 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 534 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
511 535
512 gru_dbg(grudev, "gts %p\n", gts); 536 gru_dbg(grudev, "gts %p\n", gts);
@@ -514,11 +538,14 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
514 if (cch_interrupt_sync(cch)) 538 if (cch_interrupt_sync(cch))
515 BUG(); 539 BUG();
516 540
517 gru_unload_mm_tracker(gru, gts); 541 if (!is_kernel_context(gts))
518 if (savestate) 542 gru_unload_mm_tracker(gru, gts);
543 if (savestate) {
519 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, 544 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr,
520 ctxnum, gts->ts_cbr_map, 545 ctxnum, gts->ts_cbr_map,
521 gts->ts_dsr_map); 546 gts->ts_dsr_map);
547 gts->ts_data_valid = 1;
548 }
522 549
523 if (cch_deallocate(cch)) 550 if (cch_deallocate(cch))
524 BUG(); 551 BUG();
@@ -526,24 +553,22 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
526 unlock_cch_handle(cch); 553 unlock_cch_handle(cch);
527 554
528 gru_free_gru_context(gts); 555 gru_free_gru_context(gts);
529 STAT(unload_context);
530} 556}
531 557
532/* 558/*
533 * Load a GRU context by copying it from the thread data structure in memory 559 * Load a GRU context by copying it from the thread data structure in memory
534 * to the GRU. 560 * to the GRU.
535 */ 561 */
536static void gru_load_context(struct gru_thread_state *gts) 562void gru_load_context(struct gru_thread_state *gts)
537{ 563{
538 struct gru_state *gru = gts->ts_gru; 564 struct gru_state *gru = gts->ts_gru;
539 struct gru_context_configuration_handle *cch; 565 struct gru_context_configuration_handle *cch;
540 int err, asid, ctxnum = gts->ts_ctxnum; 566 int i, err, asid, ctxnum = gts->ts_ctxnum;
541 567
542 gru_dbg(grudev, "gts %p\n", gts); 568 gru_dbg(grudev, "gts %p\n", gts);
543 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 569 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
544 570
545 lock_cch_handle(cch); 571 lock_cch_handle(cch);
546 asid = gru_load_mm_tracker(gru, gts);
547 cch->tfm_fault_bit_enable = 572 cch->tfm_fault_bit_enable =
548 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL 573 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
549 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); 574 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
@@ -552,9 +577,32 @@ static void gru_load_context(struct gru_thread_state *gts)
552 gts->ts_tlb_int_select = gru_cpu_fault_map_id(); 577 gts->ts_tlb_int_select = gru_cpu_fault_map_id();
553 cch->tlb_int_select = gts->ts_tlb_int_select; 578 cch->tlb_int_select = gts->ts_tlb_int_select;
554 } 579 }
580 if (gts->ts_cch_req_slice >= 0) {
581 cch->req_slice_set_enable = 1;
582 cch->req_slice = gts->ts_cch_req_slice;
583 } else {
584 cch->req_slice_set_enable =0;
585 }
555 cch->tfm_done_bit_enable = 0; 586 cch->tfm_done_bit_enable = 0;
556 err = cch_allocate(cch, asid, gts->ts_sizeavail, gts->ts_cbr_map, 587 cch->dsr_allocation_map = gts->ts_dsr_map;
557 gts->ts_dsr_map); 588 cch->cbr_allocation_map = gts->ts_cbr_map;
589
590 if (is_kernel_context(gts)) {
591 cch->unmap_enable = 1;
592 cch->tfm_done_bit_enable = 1;
593 cch->cb_int_enable = 1;
594 } else {
595 cch->unmap_enable = 0;
596 cch->tfm_done_bit_enable = 0;
597 cch->cb_int_enable = 0;
598 asid = gru_load_mm_tracker(gru, gts);
599 for (i = 0; i < 8; i++) {
600 cch->asid[i] = asid + i;
601 cch->sizeavail[i] = gts->ts_sizeavail;
602 }
603 }
604
605 err = cch_allocate(cch);
558 if (err) { 606 if (err) {
559 gru_dbg(grudev, 607 gru_dbg(grudev,
560 "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n", 608 "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n",
@@ -563,13 +611,11 @@ static void gru_load_context(struct gru_thread_state *gts)
563 } 611 }
564 612
565 gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum, 613 gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum,
566 gts->ts_cbr_map, gts->ts_dsr_map); 614 gts->ts_cbr_map, gts->ts_dsr_map, gts->ts_data_valid);
567 615
568 if (cch_start(cch)) 616 if (cch_start(cch))
569 BUG(); 617 BUG();
570 unlock_cch_handle(cch); 618 unlock_cch_handle(cch);
571
572 STAT(load_context);
573} 619}
574 620
575/* 621/*
@@ -599,6 +645,9 @@ int gru_update_cch(struct gru_thread_state *gts, int force_unload)
599 cch->sizeavail[i] = gts->ts_sizeavail; 645 cch->sizeavail[i] = gts->ts_sizeavail;
600 gts->ts_tlb_int_select = gru_cpu_fault_map_id(); 646 gts->ts_tlb_int_select = gru_cpu_fault_map_id();
601 cch->tlb_int_select = gru_cpu_fault_map_id(); 647 cch->tlb_int_select = gru_cpu_fault_map_id();
648 cch->tfm_fault_bit_enable =
649 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
650 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
602 } else { 651 } else {
603 for (i = 0; i < 8; i++) 652 for (i = 0; i < 8; i++)
604 cch->asid[i] = 0; 653 cch->asid[i] = 0;
@@ -642,7 +691,28 @@ static int gru_retarget_intr(struct gru_thread_state *gts)
642#define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \ 691#define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \
643 ((g)+1) : &(b)->bs_grus[0]) 692 ((g)+1) : &(b)->bs_grus[0])
644 693
645static void gru_steal_context(struct gru_thread_state *gts) 694static int is_gts_stealable(struct gru_thread_state *gts,
695 struct gru_blade_state *bs)
696{
697 if (is_kernel_context(gts))
698 return down_write_trylock(&bs->bs_kgts_sema);
699 else
700 return mutex_trylock(&gts->ts_ctxlock);
701}
702
703static void gts_stolen(struct gru_thread_state *gts,
704 struct gru_blade_state *bs)
705{
706 if (is_kernel_context(gts)) {
707 up_write(&bs->bs_kgts_sema);
708 STAT(steal_kernel_context);
709 } else {
710 mutex_unlock(&gts->ts_ctxlock);
711 STAT(steal_user_context);
712 }
713}
714
715void gru_steal_context(struct gru_thread_state *gts, int blade_id)
646{ 716{
647 struct gru_blade_state *blade; 717 struct gru_blade_state *blade;
648 struct gru_state *gru, *gru0; 718 struct gru_state *gru, *gru0;
@@ -652,8 +722,7 @@ static void gru_steal_context(struct gru_thread_state *gts)
652 cbr = gts->ts_cbr_au_count; 722 cbr = gts->ts_cbr_au_count;
653 dsr = gts->ts_dsr_au_count; 723 dsr = gts->ts_dsr_au_count;
654 724
655 preempt_disable(); 725 blade = gru_base[blade_id];
656 blade = gru_base[uv_numa_blade_id()];
657 spin_lock(&blade->bs_lock); 726 spin_lock(&blade->bs_lock);
658 727
659 ctxnum = next_ctxnum(blade->bs_lru_ctxnum); 728 ctxnum = next_ctxnum(blade->bs_lru_ctxnum);
@@ -676,7 +745,7 @@ static void gru_steal_context(struct gru_thread_state *gts)
676 * success are high. If trylock fails, try to steal a 745 * success are high. If trylock fails, try to steal a
677 * different GSEG. 746 * different GSEG.
678 */ 747 */
679 if (ngts && mutex_trylock(&ngts->ts_ctxlock)) 748 if (ngts && is_gts_stealable(ngts, blade))
680 break; 749 break;
681 ngts = NULL; 750 ngts = NULL;
682 flag = 1; 751 flag = 1;
@@ -690,13 +759,12 @@ static void gru_steal_context(struct gru_thread_state *gts)
690 blade->bs_lru_gru = gru; 759 blade->bs_lru_gru = gru;
691 blade->bs_lru_ctxnum = ctxnum; 760 blade->bs_lru_ctxnum = ctxnum;
692 spin_unlock(&blade->bs_lock); 761 spin_unlock(&blade->bs_lock);
693 preempt_enable();
694 762
695 if (ngts) { 763 if (ngts) {
696 STAT(steal_context); 764 gts->ustats.context_stolen++;
697 ngts->ts_steal_jiffies = jiffies; 765 ngts->ts_steal_jiffies = jiffies;
698 gru_unload_context(ngts, 1); 766 gru_unload_context(ngts, is_kernel_context(ngts) ? 0 : 1);
699 mutex_unlock(&ngts->ts_ctxlock); 767 gts_stolen(ngts, blade);
700 } else { 768 } else {
701 STAT(steal_context_failed); 769 STAT(steal_context_failed);
702 } 770 }
@@ -710,17 +778,17 @@ static void gru_steal_context(struct gru_thread_state *gts)
710/* 778/*
711 * Scan the GRUs on the local blade & assign a GRU context. 779 * Scan the GRUs on the local blade & assign a GRU context.
712 */ 780 */
713static struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts) 781struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts,
782 int blade)
714{ 783{
715 struct gru_state *gru, *grux; 784 struct gru_state *gru, *grux;
716 int i, max_active_contexts; 785 int i, max_active_contexts;
717 786
718 preempt_disable();
719 787
720again: 788again:
721 gru = NULL; 789 gru = NULL;
722 max_active_contexts = GRU_NUM_CCH; 790 max_active_contexts = GRU_NUM_CCH;
723 for_each_gru_on_blade(grux, uv_numa_blade_id(), i) { 791 for_each_gru_on_blade(grux, blade, i) {
724 if (check_gru_resources(grux, gts->ts_cbr_au_count, 792 if (check_gru_resources(grux, gts->ts_cbr_au_count,
725 gts->ts_dsr_au_count, 793 gts->ts_dsr_au_count,
726 max_active_contexts)) { 794 max_active_contexts)) {
@@ -760,7 +828,6 @@ again:
760 STAT(assign_context_failed); 828 STAT(assign_context_failed);
761 } 829 }
762 830
763 preempt_enable();
764 return gru; 831 return gru;
765} 832}
766 833
@@ -775,6 +842,7 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
775{ 842{
776 struct gru_thread_state *gts; 843 struct gru_thread_state *gts;
777 unsigned long paddr, vaddr; 844 unsigned long paddr, vaddr;
845 int blade_id;
778 846
779 vaddr = (unsigned long)vmf->virtual_address; 847 vaddr = (unsigned long)vmf->virtual_address;
780 gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n", 848 gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
@@ -789,8 +857,10 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
789again: 857again:
790 mutex_lock(&gts->ts_ctxlock); 858 mutex_lock(&gts->ts_ctxlock);
791 preempt_disable(); 859 preempt_disable();
860 blade_id = uv_numa_blade_id();
861
792 if (gts->ts_gru) { 862 if (gts->ts_gru) {
793 if (gts->ts_gru->gs_blade_id != uv_numa_blade_id()) { 863 if (gts->ts_gru->gs_blade_id != blade_id) {
794 STAT(migrated_nopfn_unload); 864 STAT(migrated_nopfn_unload);
795 gru_unload_context(gts, 1); 865 gru_unload_context(gts, 1);
796 } else { 866 } else {
@@ -800,12 +870,15 @@ again:
800 } 870 }
801 871
802 if (!gts->ts_gru) { 872 if (!gts->ts_gru) {
803 if (!gru_assign_gru_context(gts)) { 873 STAT(load_user_context);
804 mutex_unlock(&gts->ts_ctxlock); 874 if (!gru_assign_gru_context(gts, blade_id)) {
805 preempt_enable(); 875 preempt_enable();
876 mutex_unlock(&gts->ts_ctxlock);
877 set_current_state(TASK_INTERRUPTIBLE);
806 schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */ 878 schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */
879 blade_id = uv_numa_blade_id();
807 if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies) 880 if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies)
808 gru_steal_context(gts); 881 gru_steal_context(gts, blade_id);
809 goto again; 882 goto again;
810 } 883 }
811 gru_load_context(gts); 884 gru_load_context(gts);
@@ -815,8 +888,8 @@ again:
815 vma->vm_page_prot); 888 vma->vm_page_prot);
816 } 889 }
817 890
818 mutex_unlock(&gts->ts_ctxlock);
819 preempt_enable(); 891 preempt_enable();
892 mutex_unlock(&gts->ts_ctxlock);
820 893
821 return VM_FAULT_NOPAGE; 894 return VM_FAULT_NOPAGE;
822} 895}
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
index ee74821b171c..9cbf95bedce6 100644
--- a/drivers/misc/sgi-gru/gruprocfs.c
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -51,9 +51,12 @@ static int statistics_show(struct seq_file *s, void *p)
51 printstat(s, assign_context); 51 printstat(s, assign_context);
52 printstat(s, assign_context_failed); 52 printstat(s, assign_context_failed);
53 printstat(s, free_context); 53 printstat(s, free_context);
54 printstat(s, load_context); 54 printstat(s, load_user_context);
55 printstat(s, unload_context); 55 printstat(s, load_kernel_context);
56 printstat(s, steal_context); 56 printstat(s, lock_kernel_context);
57 printstat(s, unlock_kernel_context);
58 printstat(s, steal_user_context);
59 printstat(s, steal_kernel_context);
57 printstat(s, steal_context_failed); 60 printstat(s, steal_context_failed);
58 printstat(s, nopfn); 61 printstat(s, nopfn);
59 printstat(s, break_cow); 62 printstat(s, break_cow);
@@ -70,7 +73,7 @@ static int statistics_show(struct seq_file *s, void *p)
70 printstat(s, user_flush_tlb); 73 printstat(s, user_flush_tlb);
71 printstat(s, user_unload_context); 74 printstat(s, user_unload_context);
72 printstat(s, user_exception); 75 printstat(s, user_exception);
73 printstat(s, set_task_slice); 76 printstat(s, set_context_option);
74 printstat(s, migrate_check); 77 printstat(s, migrate_check);
75 printstat(s, migrated_retarget); 78 printstat(s, migrated_retarget);
76 printstat(s, migrated_unload); 79 printstat(s, migrated_unload);
@@ -84,6 +87,9 @@ static int statistics_show(struct seq_file *s, void *p)
84 printstat(s, tlb_dropin_fail_range_active); 87 printstat(s, tlb_dropin_fail_range_active);
85 printstat(s, tlb_dropin_fail_idle); 88 printstat(s, tlb_dropin_fail_idle);
86 printstat(s, tlb_dropin_fail_fmm); 89 printstat(s, tlb_dropin_fail_fmm);
90 printstat(s, tlb_dropin_fail_no_exception);
91 printstat(s, tlb_dropin_fail_no_exception_war);
92 printstat(s, tfh_stale_on_fault);
87 printstat(s, mmu_invalidate_range); 93 printstat(s, mmu_invalidate_range);
88 printstat(s, mmu_invalidate_page); 94 printstat(s, mmu_invalidate_page);
89 printstat(s, mmu_clear_flush_young); 95 printstat(s, mmu_clear_flush_young);
@@ -158,8 +164,7 @@ static ssize_t options_write(struct file *file, const char __user *userbuf,
158 unsigned long val; 164 unsigned long val;
159 char buf[80]; 165 char buf[80];
160 166
161 if (copy_from_user 167 if (strncpy_from_user(buf, userbuf, sizeof(buf) - 1) < 0)
162 (buf, userbuf, count < sizeof(buf) ? count : sizeof(buf)))
163 return -EFAULT; 168 return -EFAULT;
164 buf[count - 1] = '\0'; 169 buf[count - 1] = '\0';
165 if (!strict_strtoul(buf, 10, &val)) 170 if (!strict_strtoul(buf, 10, &val))
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
index bf1eeb7553ed..34ab3d453919 100644
--- a/drivers/misc/sgi-gru/grutables.h
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -148,11 +148,13 @@
148#include <linux/wait.h> 148#include <linux/wait.h>
149#include <linux/mmu_notifier.h> 149#include <linux/mmu_notifier.h>
150#include "gru.h" 150#include "gru.h"
151#include "grulib.h"
151#include "gruhandles.h" 152#include "gruhandles.h"
152 153
153extern struct gru_stats_s gru_stats; 154extern struct gru_stats_s gru_stats;
154extern struct gru_blade_state *gru_base[]; 155extern struct gru_blade_state *gru_base[];
155extern unsigned long gru_start_paddr, gru_end_paddr; 156extern unsigned long gru_start_paddr, gru_end_paddr;
157extern void *gru_start_vaddr;
156extern unsigned int gru_max_gids; 158extern unsigned int gru_max_gids;
157 159
158#define GRU_MAX_BLADES MAX_NUMNODES 160#define GRU_MAX_BLADES MAX_NUMNODES
@@ -174,9 +176,12 @@ struct gru_stats_s {
174 atomic_long_t assign_context; 176 atomic_long_t assign_context;
175 atomic_long_t assign_context_failed; 177 atomic_long_t assign_context_failed;
176 atomic_long_t free_context; 178 atomic_long_t free_context;
177 atomic_long_t load_context; 179 atomic_long_t load_user_context;
178 atomic_long_t unload_context; 180 atomic_long_t load_kernel_context;
179 atomic_long_t steal_context; 181 atomic_long_t lock_kernel_context;
182 atomic_long_t unlock_kernel_context;
183 atomic_long_t steal_user_context;
184 atomic_long_t steal_kernel_context;
180 atomic_long_t steal_context_failed; 185 atomic_long_t steal_context_failed;
181 atomic_long_t nopfn; 186 atomic_long_t nopfn;
182 atomic_long_t break_cow; 187 atomic_long_t break_cow;
@@ -193,7 +198,7 @@ struct gru_stats_s {
193 atomic_long_t user_flush_tlb; 198 atomic_long_t user_flush_tlb;
194 atomic_long_t user_unload_context; 199 atomic_long_t user_unload_context;
195 atomic_long_t user_exception; 200 atomic_long_t user_exception;
196 atomic_long_t set_task_slice; 201 atomic_long_t set_context_option;
197 atomic_long_t migrate_check; 202 atomic_long_t migrate_check;
198 atomic_long_t migrated_retarget; 203 atomic_long_t migrated_retarget;
199 atomic_long_t migrated_unload; 204 atomic_long_t migrated_unload;
@@ -207,6 +212,9 @@ struct gru_stats_s {
207 atomic_long_t tlb_dropin_fail_range_active; 212 atomic_long_t tlb_dropin_fail_range_active;
208 atomic_long_t tlb_dropin_fail_idle; 213 atomic_long_t tlb_dropin_fail_idle;
209 atomic_long_t tlb_dropin_fail_fmm; 214 atomic_long_t tlb_dropin_fail_fmm;
215 atomic_long_t tlb_dropin_fail_no_exception;
216 atomic_long_t tlb_dropin_fail_no_exception_war;
217 atomic_long_t tfh_stale_on_fault;
210 atomic_long_t mmu_invalidate_range; 218 atomic_long_t mmu_invalidate_range;
211 atomic_long_t mmu_invalidate_page; 219 atomic_long_t mmu_invalidate_page;
212 atomic_long_t mmu_clear_flush_young; 220 atomic_long_t mmu_clear_flush_young;
@@ -253,7 +261,6 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
253 261
254#define OPT_DPRINT 1 262#define OPT_DPRINT 1
255#define OPT_STATS 2 263#define OPT_STATS 2
256#define GRU_QUICKLOOK 4
257 264
258 265
259#define IRQ_GRU 110 /* Starting IRQ number for interrupts */ 266#define IRQ_GRU 110 /* Starting IRQ number for interrupts */
@@ -373,6 +380,7 @@ struct gru_thread_state {
373 required for contest */ 380 required for contest */
374 unsigned char ts_cbr_au_count;/* Number of CBR resources 381 unsigned char ts_cbr_au_count;/* Number of CBR resources
375 required for contest */ 382 required for contest */
383 char ts_cch_req_slice;/* CCH packet slice */
376 char ts_blade; /* If >= 0, migrate context if 384 char ts_blade; /* If >= 0, migrate context if
377 ref from diferent blade */ 385 ref from diferent blade */
378 char ts_force_cch_reload; 386 char ts_force_cch_reload;
@@ -380,6 +388,9 @@ struct gru_thread_state {
380 after migration */ 388 after migration */
381 char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each 389 char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each
382 allocated CB */ 390 allocated CB */
391 int ts_data_valid; /* Indicates if ts_gdata has
392 valid data */
393 struct gts_statistics ustats; /* User statistics */
383 unsigned long ts_gdata[0]; /* save area for GRU data (CB, 394 unsigned long ts_gdata[0]; /* save area for GRU data (CB,
384 DS, CBE) */ 395 DS, CBE) */
385}; 396};
@@ -452,6 +463,14 @@ struct gru_blade_state {
452 reserved cb */ 463 reserved cb */
453 void *kernel_dsr; /* First kernel 464 void *kernel_dsr; /* First kernel
454 reserved DSR */ 465 reserved DSR */
466 struct rw_semaphore bs_kgts_sema; /* lock for kgts */
467 struct gru_thread_state *bs_kgts; /* GTS for kernel use */
468
469 /* ---- the following are used for managing kernel async GRU CBRs --- */
470 int bs_async_dsr_bytes; /* DSRs for async */
471 int bs_async_cbrs; /* CBRs AU for async */
472 struct completion *bs_async_wq;
473
455 /* ---- the following are protected by the bs_lock spinlock ---- */ 474 /* ---- the following are protected by the bs_lock spinlock ---- */
456 spinlock_t bs_lock; /* lock used for 475 spinlock_t bs_lock; /* lock used for
457 stealing contexts */ 476 stealing contexts */
@@ -552,6 +571,12 @@ struct gru_blade_state {
552 571
553/* Lock hierarchy checking enabled only in emulator */ 572/* Lock hierarchy checking enabled only in emulator */
554 573
574/* 0 = lock failed, 1 = locked */
575static inline int __trylock_handle(void *h)
576{
577 return !test_and_set_bit(1, h);
578}
579
555static inline void __lock_handle(void *h) 580static inline void __lock_handle(void *h)
556{ 581{
557 while (test_and_set_bit(1, h)) 582 while (test_and_set_bit(1, h))
@@ -563,6 +588,11 @@ static inline void __unlock_handle(void *h)
563 clear_bit(1, h); 588 clear_bit(1, h);
564} 589}
565 590
591static inline int trylock_cch_handle(struct gru_context_configuration_handle *cch)
592{
593 return __trylock_handle(cch);
594}
595
566static inline void lock_cch_handle(struct gru_context_configuration_handle *cch) 596static inline void lock_cch_handle(struct gru_context_configuration_handle *cch)
567{ 597{
568 __lock_handle(cch); 598 __lock_handle(cch);
@@ -584,6 +614,11 @@ static inline void unlock_tgh_handle(struct gru_tlb_global_handle *tgh)
584 __unlock_handle(tgh); 614 __unlock_handle(tgh);
585} 615}
586 616
617static inline int is_kernel_context(struct gru_thread_state *gts)
618{
619 return !gts->ts_mm;
620}
621
587/*----------------------------------------------------------------------------- 622/*-----------------------------------------------------------------------------
588 * Function prototypes & externs 623 * Function prototypes & externs
589 */ 624 */
@@ -598,24 +633,32 @@ extern struct gru_thread_state *gru_find_thread_state(struct vm_area_struct
598 *vma, int tsid); 633 *vma, int tsid);
599extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct 634extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct
600 *vma, int tsid); 635 *vma, int tsid);
636extern struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts,
637 int blade);
638extern void gru_load_context(struct gru_thread_state *gts);
639extern void gru_steal_context(struct gru_thread_state *gts, int blade_id);
601extern void gru_unload_context(struct gru_thread_state *gts, int savestate); 640extern void gru_unload_context(struct gru_thread_state *gts, int savestate);
602extern int gru_update_cch(struct gru_thread_state *gts, int force_unload); 641extern int gru_update_cch(struct gru_thread_state *gts, int force_unload);
603extern void gts_drop(struct gru_thread_state *gts); 642extern void gts_drop(struct gru_thread_state *gts);
604extern void gru_tgh_flush_init(struct gru_state *gru); 643extern void gru_tgh_flush_init(struct gru_state *gru);
605extern int gru_kservices_init(struct gru_state *gru); 644extern int gru_kservices_init(void);
606extern void gru_kservices_exit(struct gru_state *gru); 645extern void gru_kservices_exit(void);
646extern int gru_dump_chiplet_request(unsigned long arg);
647extern long gru_get_gseg_statistics(unsigned long arg);
607extern irqreturn_t gru_intr(int irq, void *dev_id); 648extern irqreturn_t gru_intr(int irq, void *dev_id);
608extern int gru_handle_user_call_os(unsigned long address); 649extern int gru_handle_user_call_os(unsigned long address);
609extern int gru_user_flush_tlb(unsigned long arg); 650extern int gru_user_flush_tlb(unsigned long arg);
610extern int gru_user_unload_context(unsigned long arg); 651extern int gru_user_unload_context(unsigned long arg);
611extern int gru_get_exception_detail(unsigned long arg); 652extern int gru_get_exception_detail(unsigned long arg);
612extern int gru_set_task_slice(long address); 653extern int gru_set_context_option(unsigned long address);
613extern int gru_cpu_fault_map_id(void); 654extern int gru_cpu_fault_map_id(void);
614extern struct vm_area_struct *gru_find_vma(unsigned long vaddr); 655extern struct vm_area_struct *gru_find_vma(unsigned long vaddr);
615extern void gru_flush_all_tlb(struct gru_state *gru); 656extern void gru_flush_all_tlb(struct gru_state *gru);
616extern int gru_proc_init(void); 657extern int gru_proc_init(void);
617extern void gru_proc_exit(void); 658extern void gru_proc_exit(void);
618 659
660extern struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
661 int cbr_au_count, int dsr_au_count, int options, int tsid);
619extern unsigned long gru_reserve_cb_resources(struct gru_state *gru, 662extern unsigned long gru_reserve_cb_resources(struct gru_state *gru,
620 int cbr_au_count, char *cbmap); 663 int cbr_au_count, char *cbmap);
621extern unsigned long gru_reserve_ds_resources(struct gru_state *gru, 664extern unsigned long gru_reserve_ds_resources(struct gru_state *gru,
@@ -624,6 +667,7 @@ extern int gru_fault(struct vm_area_struct *, struct vm_fault *vmf);
624extern struct gru_mm_struct *gru_register_mmu_notifier(void); 667extern struct gru_mm_struct *gru_register_mmu_notifier(void);
625extern void gru_drop_mmu_notifier(struct gru_mm_struct *gms); 668extern void gru_drop_mmu_notifier(struct gru_mm_struct *gms);
626 669
670extern int gru_ktest(unsigned long arg);
627extern void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start, 671extern void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start,
628 unsigned long len); 672 unsigned long len);
629 673
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 7e3738112c4e..38f1c3375d7f 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -3552,14 +3552,14 @@ bnx2_set_rx_mode(struct net_device *dev)
3552 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN; 3552 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3553 } 3553 }
3554 3554
3555 if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) { 3555 if (dev->uc.count > BNX2_MAX_UNICAST_ADDRESSES) {
3556 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS; 3556 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3557 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN | 3557 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3558 BNX2_RPM_SORT_USER0_PROM_VLAN; 3558 BNX2_RPM_SORT_USER0_PROM_VLAN;
3559 } else if (!(dev->flags & IFF_PROMISC)) { 3559 } else if (!(dev->flags & IFF_PROMISC)) {
3560 /* Add all entries into to the match filter list */ 3560 /* Add all entries into to the match filter list */
3561 i = 0; 3561 i = 0;
3562 list_for_each_entry(ha, &dev->uc_list, list) { 3562 list_for_each_entry(ha, &dev->uc.list, list) {
3563 bnx2_set_mac_addr(bp, ha->addr, 3563 bnx2_set_mac_addr(bp, ha->addr,
3564 i + BNX2_START_UNICAST_ADDRESS_INDEX); 3564 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3565 sort_mode |= (1 << 3565 sort_mode |= (1 <<
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 0e9b9f9632c1..2df8fb0af701 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -2767,7 +2767,6 @@ static int __devexit davinci_emac_remove(struct platform_device *pdev)
2767 2767
2768 dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n"); 2768 dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n");
2769 2769
2770 clk_disable(emac_clk);
2771 platform_set_drvdata(pdev, NULL); 2770 platform_set_drvdata(pdev, NULL);
2772 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2771 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2773 mdiobus_unregister(priv->mii_bus); 2772 mdiobus_unregister(priv->mii_bus);
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index f7929e89eb03..efa680f4b8dd 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -2895,12 +2895,13 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
2895 2895
2896static int __e100_power_off(struct pci_dev *pdev, bool wake) 2896static int __e100_power_off(struct pci_dev *pdev, bool wake)
2897{ 2897{
2898 if (wake) { 2898 if (wake)
2899 return pci_prepare_to_sleep(pdev); 2899 return pci_prepare_to_sleep(pdev);
2900 } else { 2900
2901 pci_wake_from_d3(pdev, false); 2901 pci_wake_from_d3(pdev, false);
2902 return pci_set_power_state(pdev, PCI_D3hot); 2902 pci_set_power_state(pdev, PCI_D3hot);
2903 } 2903
2904 return 0;
2904} 2905}
2905 2906
2906#ifdef CONFIG_PM 2907#ifdef CONFIG_PM
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 8d36743c8140..5e3356f8eb5a 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2370,7 +2370,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2370 rctl |= E1000_RCTL_VFE; 2370 rctl |= E1000_RCTL_VFE;
2371 } 2371 }
2372 2372
2373 if (netdev->uc_count > rar_entries - 1) { 2373 if (netdev->uc.count > rar_entries - 1) {
2374 rctl |= E1000_RCTL_UPE; 2374 rctl |= E1000_RCTL_UPE;
2375 } else if (!(netdev->flags & IFF_PROMISC)) { 2375 } else if (!(netdev->flags & IFF_PROMISC)) {
2376 rctl &= ~E1000_RCTL_UPE; 2376 rctl &= ~E1000_RCTL_UPE;
@@ -2394,7 +2394,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2394 */ 2394 */
2395 i = 1; 2395 i = 1;
2396 if (use_uc) 2396 if (use_uc)
2397 list_for_each_entry(ha, &netdev->uc_list, list) { 2397 list_for_each_entry(ha, &netdev->uc.list, list) {
2398 if (i == rar_entries) 2398 if (i == rar_entries)
2399 break; 2399 break;
2400 e1000_rar_set(hw, ha->addr, i++); 2400 e1000_rar_set(hw, ha->addr, i++);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index b60a3041b64c..1094d292630f 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -719,7 +719,8 @@ static const struct register_test nv_registers_test[] = {
719struct nv_skb_map { 719struct nv_skb_map {
720 struct sk_buff *skb; 720 struct sk_buff *skb;
721 dma_addr_t dma; 721 dma_addr_t dma;
722 unsigned int dma_len; 722 unsigned int dma_len:31;
723 unsigned int dma_single:1;
723 struct ring_desc_ex *first_tx_desc; 724 struct ring_desc_ex *first_tx_desc;
724 struct nv_skb_map *next_tx_ctx; 725 struct nv_skb_map *next_tx_ctx;
725}; 726};
@@ -1912,6 +1913,7 @@ static void nv_init_tx(struct net_device *dev)
1912 np->tx_skb[i].skb = NULL; 1913 np->tx_skb[i].skb = NULL;
1913 np->tx_skb[i].dma = 0; 1914 np->tx_skb[i].dma = 0;
1914 np->tx_skb[i].dma_len = 0; 1915 np->tx_skb[i].dma_len = 0;
1916 np->tx_skb[i].dma_single = 0;
1915 np->tx_skb[i].first_tx_desc = NULL; 1917 np->tx_skb[i].first_tx_desc = NULL;
1916 np->tx_skb[i].next_tx_ctx = NULL; 1918 np->tx_skb[i].next_tx_ctx = NULL;
1917 } 1919 }
@@ -1930,23 +1932,30 @@ static int nv_init_ring(struct net_device *dev)
1930 return nv_alloc_rx_optimized(dev); 1932 return nv_alloc_rx_optimized(dev);
1931} 1933}
1932 1934
1933static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb) 1935static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
1934{ 1936{
1935 struct fe_priv *np = netdev_priv(dev);
1936
1937 if (tx_skb->dma) { 1937 if (tx_skb->dma) {
1938 pci_unmap_page(np->pci_dev, tx_skb->dma, 1938 if (tx_skb->dma_single)
1939 tx_skb->dma_len, 1939 pci_unmap_single(np->pci_dev, tx_skb->dma,
1940 PCI_DMA_TODEVICE); 1940 tx_skb->dma_len,
1941 PCI_DMA_TODEVICE);
1942 else
1943 pci_unmap_page(np->pci_dev, tx_skb->dma,
1944 tx_skb->dma_len,
1945 PCI_DMA_TODEVICE);
1941 tx_skb->dma = 0; 1946 tx_skb->dma = 0;
1942 } 1947 }
1948}
1949
1950static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
1951{
1952 nv_unmap_txskb(np, tx_skb);
1943 if (tx_skb->skb) { 1953 if (tx_skb->skb) {
1944 dev_kfree_skb_any(tx_skb->skb); 1954 dev_kfree_skb_any(tx_skb->skb);
1945 tx_skb->skb = NULL; 1955 tx_skb->skb = NULL;
1946 return 1; 1956 return 1;
1947 } else {
1948 return 0;
1949 } 1957 }
1958 return 0;
1950} 1959}
1951 1960
1952static void nv_drain_tx(struct net_device *dev) 1961static void nv_drain_tx(struct net_device *dev)
@@ -1964,10 +1973,11 @@ static void nv_drain_tx(struct net_device *dev)
1964 np->tx_ring.ex[i].bufhigh = 0; 1973 np->tx_ring.ex[i].bufhigh = 0;
1965 np->tx_ring.ex[i].buflow = 0; 1974 np->tx_ring.ex[i].buflow = 0;
1966 } 1975 }
1967 if (nv_release_txskb(dev, &np->tx_skb[i])) 1976 if (nv_release_txskb(np, &np->tx_skb[i]))
1968 dev->stats.tx_dropped++; 1977 dev->stats.tx_dropped++;
1969 np->tx_skb[i].dma = 0; 1978 np->tx_skb[i].dma = 0;
1970 np->tx_skb[i].dma_len = 0; 1979 np->tx_skb[i].dma_len = 0;
1980 np->tx_skb[i].dma_single = 0;
1971 np->tx_skb[i].first_tx_desc = NULL; 1981 np->tx_skb[i].first_tx_desc = NULL;
1972 np->tx_skb[i].next_tx_ctx = NULL; 1982 np->tx_skb[i].next_tx_ctx = NULL;
1973 } 1983 }
@@ -2171,6 +2181,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2171 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 2181 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2172 PCI_DMA_TODEVICE); 2182 PCI_DMA_TODEVICE);
2173 np->put_tx_ctx->dma_len = bcnt; 2183 np->put_tx_ctx->dma_len = bcnt;
2184 np->put_tx_ctx->dma_single = 1;
2174 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 2185 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2175 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2186 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2176 2187
@@ -2196,6 +2207,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2196 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 2207 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
2197 PCI_DMA_TODEVICE); 2208 PCI_DMA_TODEVICE);
2198 np->put_tx_ctx->dma_len = bcnt; 2209 np->put_tx_ctx->dma_len = bcnt;
2210 np->put_tx_ctx->dma_single = 0;
2199 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 2211 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2200 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2212 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2201 2213
@@ -2291,6 +2303,7 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
2291 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 2303 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2292 PCI_DMA_TODEVICE); 2304 PCI_DMA_TODEVICE);
2293 np->put_tx_ctx->dma_len = bcnt; 2305 np->put_tx_ctx->dma_len = bcnt;
2306 np->put_tx_ctx->dma_single = 1;
2294 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); 2307 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2295 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); 2308 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2296 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2309 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
@@ -2317,6 +2330,7 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
2317 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 2330 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
2318 PCI_DMA_TODEVICE); 2331 PCI_DMA_TODEVICE);
2319 np->put_tx_ctx->dma_len = bcnt; 2332 np->put_tx_ctx->dma_len = bcnt;
2333 np->put_tx_ctx->dma_single = 0;
2320 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); 2334 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2321 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); 2335 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2322 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2336 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
@@ -2434,10 +2448,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
2434 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n", 2448 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
2435 dev->name, flags); 2449 dev->name, flags);
2436 2450
2437 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, 2451 nv_unmap_txskb(np, np->get_tx_ctx);
2438 np->get_tx_ctx->dma_len,
2439 PCI_DMA_TODEVICE);
2440 np->get_tx_ctx->dma = 0;
2441 2452
2442 if (np->desc_ver == DESC_VER_1) { 2453 if (np->desc_ver == DESC_VER_1) {
2443 if (flags & NV_TX_LASTPACKET) { 2454 if (flags & NV_TX_LASTPACKET) {
@@ -2502,10 +2513,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
2502 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n", 2513 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
2503 dev->name, flags); 2514 dev->name, flags);
2504 2515
2505 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma, 2516 nv_unmap_txskb(np, np->get_tx_ctx);
2506 np->get_tx_ctx->dma_len,
2507 PCI_DMA_TODEVICE);
2508 np->get_tx_ctx->dma = 0;
2509 2517
2510 if (flags & NV_TX2_LASTPACKET) { 2518 if (flags & NV_TX2_LASTPACKET) {
2511 if (!(flags & NV_TX2_ERROR)) 2519 if (!(flags & NV_TX2_ERROR))
@@ -5091,7 +5099,7 @@ static int nv_loopback_test(struct net_device *dev)
5091 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name); 5099 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
5092 } 5100 }
5093 5101
5094 pci_unmap_page(np->pci_dev, test_dma_addr, 5102 pci_unmap_single(np->pci_dev, test_dma_addr,
5095 (skb_end_pointer(tx_skb) - tx_skb->data), 5103 (skb_end_pointer(tx_skb) - tx_skb->data),
5096 PCI_DMA_TODEVICE); 5104 PCI_DMA_TODEVICE);
5097 dev_kfree_skb_any(tx_skb); 5105 dev_kfree_skb_any(tx_skb);
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 5105548ad50c..abcd19a8bff9 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -260,7 +260,7 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev)
260 */ 260 */
261 if (!netif_running(dev)) { 261 if (!netif_running(dev)) {
262 kfree_skb(skb); 262 kfree_skb(skb);
263 return -ENODEV; 263 return NETDEV_TX_OK;
264 } 264 }
265 265
266 skb_pull(skb, 1); 266 skb_pull(skb, 1);
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 8feda9fe8297..1d3429a415e6 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -1495,13 +1495,8 @@ static int hp100_start_xmit_bm(struct sk_buff *skb, struct net_device *dev)
1495 hp100_outw(0x4210, TRACE); 1495 hp100_outw(0x4210, TRACE);
1496 printk("hp100: %s: start_xmit_bm\n", dev->name); 1496 printk("hp100: %s: start_xmit_bm\n", dev->name);
1497#endif 1497#endif
1498
1499 if (skb == NULL) {
1500 return 0;
1501 }
1502
1503 if (skb->len <= 0) 1498 if (skb->len <= 0)
1504 return 0; 1499 goto drop;
1505 1500
1506 if (lp->chip == HP100_CHIPID_SHASTA && skb_padto(skb, ETH_ZLEN)) 1501 if (lp->chip == HP100_CHIPID_SHASTA && skb_padto(skb, ETH_ZLEN))
1507 return 0; 1502 return 0;
@@ -1514,10 +1509,10 @@ static int hp100_start_xmit_bm(struct sk_buff *skb, struct net_device *dev)
1514#endif 1509#endif
1515 /* not waited long enough since last tx? */ 1510 /* not waited long enough since last tx? */
1516 if (time_before(jiffies, dev->trans_start + HZ)) 1511 if (time_before(jiffies, dev->trans_start + HZ))
1517 return -EAGAIN; 1512 goto drop;
1518 1513
1519 if (hp100_check_lan(dev)) 1514 if (hp100_check_lan(dev))
1520 return -EIO; 1515 goto drop;
1521 1516
1522 if (lp->lan_type == HP100_LAN_100 && lp->hub_status < 0) { 1517 if (lp->lan_type == HP100_LAN_100 && lp->hub_status < 0) {
1523 /* we have a 100Mb/s adapter but it isn't connected to hub */ 1518 /* we have a 100Mb/s adapter but it isn't connected to hub */
@@ -1551,7 +1546,7 @@ static int hp100_start_xmit_bm(struct sk_buff *skb, struct net_device *dev)
1551 } 1546 }
1552 1547
1553 dev->trans_start = jiffies; 1548 dev->trans_start = jiffies;
1554 return -EAGAIN; 1549 goto drop;
1555 } 1550 }
1556 1551
1557 /* 1552 /*
@@ -1591,6 +1586,10 @@ static int hp100_start_xmit_bm(struct sk_buff *skb, struct net_device *dev)
1591 dev->trans_start = jiffies; 1586 dev->trans_start = jiffies;
1592 1587
1593 return 0; 1588 return 0;
1589
1590drop:
1591 dev_kfree_skb(skb);
1592 return NETDEV_TX_OK;
1594} 1593}
1595 1594
1596 1595
@@ -1648,16 +1647,11 @@ static int hp100_start_xmit(struct sk_buff *skb, struct net_device *dev)
1648 hp100_outw(0x4212, TRACE); 1647 hp100_outw(0x4212, TRACE);
1649 printk("hp100: %s: start_xmit\n", dev->name); 1648 printk("hp100: %s: start_xmit\n", dev->name);
1650#endif 1649#endif
1651
1652 if (skb == NULL) {
1653 return 0;
1654 }
1655
1656 if (skb->len <= 0) 1650 if (skb->len <= 0)
1657 return 0; 1651 goto drop;
1658 1652
1659 if (hp100_check_lan(dev)) 1653 if (hp100_check_lan(dev))
1660 return -EIO; 1654 goto drop;
1661 1655
1662 /* If there is not enough free memory on the card... */ 1656 /* If there is not enough free memory on the card... */
1663 i = hp100_inl(TX_MEM_FREE) & 0x7fffffff; 1657 i = hp100_inl(TX_MEM_FREE) & 0x7fffffff;
@@ -1671,7 +1665,7 @@ static int hp100_start_xmit(struct sk_buff *skb, struct net_device *dev)
1671 printk("hp100: %s: trans_start timing problem\n", 1665 printk("hp100: %s: trans_start timing problem\n",
1672 dev->name); 1666 dev->name);
1673#endif 1667#endif
1674 return -EAGAIN; 1668 goto drop;
1675 } 1669 }
1676 if (lp->lan_type == HP100_LAN_100 && lp->hub_status < 0) { 1670 if (lp->lan_type == HP100_LAN_100 && lp->hub_status < 0) {
1677 /* we have a 100Mb/s adapter but it isn't connected to hub */ 1671 /* we have a 100Mb/s adapter but it isn't connected to hub */
@@ -1705,7 +1699,7 @@ static int hp100_start_xmit(struct sk_buff *skb, struct net_device *dev)
1705 } 1699 }
1706 } 1700 }
1707 dev->trans_start = jiffies; 1701 dev->trans_start = jiffies;
1708 return -EAGAIN; 1702 goto drop;
1709 } 1703 }
1710 1704
1711 for (i = 0; i < 6000 && (hp100_inb(OPTION_MSW) & HP100_TX_CMD); i++) { 1705 for (i = 0; i < 6000 && (hp100_inb(OPTION_MSW) & HP100_TX_CMD); i++) {
@@ -1759,6 +1753,11 @@ static int hp100_start_xmit(struct sk_buff *skb, struct net_device *dev)
1759#endif 1753#endif
1760 1754
1761 return 0; 1755 return 0;
1756
1757drop:
1758 dev_kfree_skb(skb);
1759 return NETDEV_TX_OK;
1760
1762} 1761}
1763 1762
1764 1763
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 22aadb7884fa..2bc9d63027db 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1281,7 +1281,7 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter)
1281 /* Setup the HW Tx Head and Tail descriptor pointers */ 1281 /* Setup the HW Tx Head and Tail descriptor pointers */
1282 ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc)); 1282 ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc));
1283 tdba = tx_ring->dma; 1283 tdba = tx_ring->dma;
1284 ew32(TDBAL(0), (tdba & DMA_32BIT_MASK)); 1284 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
1285 ew32(TDBAH(0), (tdba >> 32)); 1285 ew32(TDBAH(0), (tdba >> 32));
1286 ew32(TDH(0), 0); 1286 ew32(TDH(0), 0);
1287 ew32(TDT(0), 0); 1287 ew32(TDT(0), 0);
@@ -1367,7 +1367,7 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter)
1367 * the Base and Length of the Rx Descriptor Ring 1367 * the Base and Length of the Rx Descriptor Ring
1368 */ 1368 */
1369 rdba = rx_ring->dma; 1369 rdba = rx_ring->dma;
1370 ew32(RDBAL(0), (rdba & DMA_32BIT_MASK)); 1370 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
1371 ew32(RDBAH(0), (rdba >> 32)); 1371 ew32(RDBAH(0), (rdba >> 32));
1372 ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc)); 1372 ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc));
1373 rx_ring->head = E1000_RDH(0); 1373 rx_ring->head = E1000_RDH(0);
@@ -2628,15 +2628,16 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
2628 return err; 2628 return err;
2629 2629
2630 pci_using_dac = 0; 2630 pci_using_dac = 0;
2631 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); 2631 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2632 if (!err) { 2632 if (!err) {
2633 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 2633 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2634 if (!err) 2634 if (!err)
2635 pci_using_dac = 1; 2635 pci_using_dac = 1;
2636 } else { 2636 } else {
2637 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 2637 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2638 if (err) { 2638 if (err) {
2639 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 2639 err = pci_set_consistent_dma_mask(pdev,
2640 DMA_BIT_MASK(32));
2640 if (err) { 2641 if (err) {
2641 dev_err(&pdev->dev, "No usable DMA " 2642 dev_err(&pdev->dev, "No usable DMA "
2642 "configuration, aborting\n"); 2643 "configuration, aborting\n");
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 3c3bf1f07b81..fa9f24e23683 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -251,7 +251,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
251 /* program DMA context */ 251 /* program DMA context */
252 hw = &adapter->hw; 252 hw = &adapter->hw;
253 spin_lock_bh(&fcoe->lock); 253 spin_lock_bh(&fcoe->lock);
254 IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_32BIT_MASK); 254 IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
255 IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); 255 IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
256 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); 256 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
257 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); 257 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index a551a96ce676..e756e220db32 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -2321,7 +2321,7 @@ static void ixgbe_set_rx_mode(struct net_device *netdev)
2321 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 2321 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2322 2322
2323 /* reprogram secondary unicast list */ 2323 /* reprogram secondary unicast list */
2324 hw->mac.ops.update_uc_addr_list(hw, &netdev->uc_list); 2324 hw->mac.ops.update_uc_addr_list(hw, &netdev->uc.list);
2325 2325
2326 /* reprogram multicast list */ 2326 /* reprogram multicast list */
2327 addr_count = netdev->mc_count; 2327 addr_count = netdev->mc_count;
@@ -5261,7 +5261,7 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
5261 5261
5262/** 5262/**
5263 * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding 5263 * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
5264 * netdev->dev_addr_list 5264 * netdev->dev_addrs
5265 * @netdev: network interface device structure 5265 * @netdev: network interface device structure
5266 * 5266 *
5267 * Returns non-zero on failure 5267 * Returns non-zero on failure
@@ -5282,7 +5282,7 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev)
5282 5282
5283/** 5283/**
5284 * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding 5284 * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
5285 * netdev->dev_addr_list 5285 * netdev->dev_addrs
5286 * @netdev: network interface device structure 5286 * @netdev: network interface device structure
5287 * 5287 *
5288 * Returns non-zero on failure 5288 * Returns non-zero on failure
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index b4e18a58cb1b..745ae8b4a2e8 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1729,7 +1729,7 @@ static u32 uc_addr_filter_mask(struct net_device *dev)
1729 return 0; 1729 return 0;
1730 1730
1731 nibbles = 1 << (dev->dev_addr[5] & 0x0f); 1731 nibbles = 1 << (dev->dev_addr[5] & 0x0f);
1732 list_for_each_entry(ha, &dev->uc_list, list) { 1732 list_for_each_entry(ha, &dev->uc.list, list) {
1733 if (memcmp(dev->dev_addr, ha->addr, 5)) 1733 if (memcmp(dev->dev_addr, ha->addr, 5))
1734 return 0; 1734 return 0;
1735 if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0) 1735 if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index ab11c2b3f0fe..970cedeb5f37 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -169,6 +169,7 @@
169#define MAX_NUM_CARDS 4 169#define MAX_NUM_CARDS 4
170 170
171#define MAX_BUFFERS_PER_CMD 32 171#define MAX_BUFFERS_PER_CMD 32
172#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + 4)
172 173
173/* 174/*
174 * Following are the states of the Phantom. Phantom will set them and 175 * Following are the states of the Phantom. Phantom will set them and
@@ -1436,7 +1437,7 @@ int netxen_nic_set_mac(struct net_device *netdev, void *p);
1436struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev); 1437struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev);
1437 1438
1438void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, 1439void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
1439 struct nx_host_tx_ring *tx_ring, uint32_t crb_producer); 1440 struct nx_host_tx_ring *tx_ring);
1440 1441
1441/* 1442/*
1442 * NetXen Board information 1443 * NetXen Board information
@@ -1538,6 +1539,14 @@ dma_watchdog_wakeup(struct netxen_adapter *adapter)
1538} 1539}
1539 1540
1540 1541
1542static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring)
1543{
1544 smp_mb();
1545 return find_diff_among(tx_ring->producer,
1546 tx_ring->sw_consumer, tx_ring->num_desc);
1547
1548}
1549
1541int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac); 1550int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac);
1542int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac); 1551int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac);
1543extern void netxen_change_ringparam(struct netxen_adapter *adapter); 1552extern void netxen_change_ringparam(struct netxen_adapter *adapter);
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 7f0ddbfa7b28..3cc047844af3 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -355,6 +355,7 @@ enum {
355#define NETXEN_HW_CRB_HUB_AGT_ADR_LPC \ 355#define NETXEN_HW_CRB_HUB_AGT_ADR_LPC \
356 ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_LPC_CRB_AGT_ADR) 356 ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_LPC_CRB_AGT_ADR)
357 357
358#define NETXEN_SRE_MISC (NETXEN_CRB_SRE + 0x0002c)
358#define NETXEN_SRE_INT_STATUS (NETXEN_CRB_SRE + 0x00034) 359#define NETXEN_SRE_INT_STATUS (NETXEN_CRB_SRE + 0x00034)
359#define NETXEN_SRE_PBI_ACTIVE_STATUS (NETXEN_CRB_SRE + 0x01014) 360#define NETXEN_SRE_PBI_ACTIVE_STATUS (NETXEN_CRB_SRE + 0x01014)
360#define NETXEN_SRE_L1RE_CTL (NETXEN_CRB_SRE + 0x03000) 361#define NETXEN_SRE_L1RE_CTL (NETXEN_CRB_SRE + 0x03000)
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 42ffb825ebf1..ce3b89d2cbb6 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -488,7 +488,7 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
488 488
489 tx_ring->producer = producer; 489 tx_ring->producer = producer;
490 490
491 netxen_nic_update_cmd_producer(adapter, tx_ring, producer); 491 netxen_nic_update_cmd_producer(adapter, tx_ring);
492 492
493 netif_tx_unlock_bh(adapter->netdev); 493 netif_tx_unlock_bh(adapter->netdev);
494 494
@@ -2041,8 +2041,8 @@ void netxen_nic_get_firmware_info(struct netxen_adapter *adapter)
2041 fw_major, fw_minor, fw_build); 2041 fw_major, fw_minor, fw_build);
2042 2042
2043 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 2043 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
2044 i = NXRD32(adapter, NETXEN_MIU_MN_CONTROL); 2044 i = NXRD32(adapter, NETXEN_SRE_MISC);
2045 adapter->ahw.cut_through = (i & 0x4) ? 1 : 0; 2045 adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
2046 dev_info(&pdev->dev, "firmware running in %s mode\n", 2046 dev_info(&pdev->dev, "firmware running in %s mode\n",
2047 adapter->ahw.cut_through ? "cut-through" : "legacy"); 2047 adapter->ahw.cut_through ? "cut-through" : "legacy");
2048 } 2048 }
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 6f77ad58e3b3..bdb143d2b5c7 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -1292,7 +1292,6 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
1292 return 1; 1292 return 1;
1293 1293
1294 sw_consumer = tx_ring->sw_consumer; 1294 sw_consumer = tx_ring->sw_consumer;
1295 barrier(); /* hw_consumer can change underneath */
1296 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); 1295 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1297 1296
1298 while (sw_consumer != hw_consumer) { 1297 while (sw_consumer != hw_consumer) {
@@ -1319,14 +1318,15 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
1319 break; 1318 break;
1320 } 1319 }
1321 1320
1322 tx_ring->sw_consumer = sw_consumer;
1323
1324 if (count && netif_running(netdev)) { 1321 if (count && netif_running(netdev)) {
1322 tx_ring->sw_consumer = sw_consumer;
1323
1325 smp_mb(); 1324 smp_mb();
1325
1326 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { 1326 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
1327 netif_tx_lock(netdev); 1327 netif_tx_lock(netdev);
1328 netif_wake_queue(netdev); 1328 if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
1329 smp_mb(); 1329 netif_wake_queue(netdev);
1330 netif_tx_unlock(netdev); 1330 netif_tx_unlock(netdev);
1331 } 1331 }
1332 } 1332 }
@@ -1343,7 +1343,6 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
1343 * There is still a possible race condition and the host could miss an 1343 * There is still a possible race condition and the host could miss an
1344 * interrupt. The card has to take care of this. 1344 * interrupt. The card has to take care of this.
1345 */ 1345 */
1346 barrier(); /* hw_consumer can change underneath */
1347 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); 1346 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1348 done = (sw_consumer == hw_consumer); 1347 done = (sw_consumer == hw_consumer);
1349 spin_unlock(&adapter->tx_clean_lock); 1348 spin_unlock(&adapter->tx_clean_lock);
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 98737ef72936..71daa3d5f114 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -107,9 +107,14 @@ static uint32_t crb_cmd_producer[4] = {
107 107
108void 108void
109netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, 109netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
110 struct nx_host_tx_ring *tx_ring, u32 producer) 110 struct nx_host_tx_ring *tx_ring)
111{ 111{
112 NXWR32(adapter, tx_ring->crb_cmd_producer, producer); 112 NXWR32(adapter, tx_ring->crb_cmd_producer, tx_ring->producer);
113
114 if (netxen_tx_avail(tx_ring) <= TX_STOP_THRESH) {
115 netif_stop_queue(adapter->netdev);
116 smp_mb();
117 }
113} 118}
114 119
115static uint32_t crb_cmd_consumer[4] = { 120static uint32_t crb_cmd_consumer[4] = {
@@ -119,9 +124,9 @@ static uint32_t crb_cmd_consumer[4] = {
119 124
120static inline void 125static inline void
121netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter, 126netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter,
122 struct nx_host_tx_ring *tx_ring, u32 consumer) 127 struct nx_host_tx_ring *tx_ring)
123{ 128{
124 NXWR32(adapter, tx_ring->crb_cmd_consumer, consumer); 129 NXWR32(adapter, tx_ring->crb_cmd_consumer, tx_ring->sw_consumer);
125} 130}
126 131
127static uint32_t msi_tgt_status[8] = { 132static uint32_t msi_tgt_status[8] = {
@@ -900,8 +905,11 @@ netxen_nic_attach(struct netxen_adapter *adapter)
900 tx_ring->crb_cmd_producer = crb_cmd_producer[adapter->portnum]; 905 tx_ring->crb_cmd_producer = crb_cmd_producer[adapter->portnum];
901 tx_ring->crb_cmd_consumer = crb_cmd_consumer[adapter->portnum]; 906 tx_ring->crb_cmd_consumer = crb_cmd_consumer[adapter->portnum];
902 907
903 netxen_nic_update_cmd_producer(adapter, tx_ring, 0); 908 tx_ring->producer = 0;
904 netxen_nic_update_cmd_consumer(adapter, tx_ring, 0); 909 tx_ring->sw_consumer = 0;
910
911 netxen_nic_update_cmd_producer(adapter, tx_ring);
912 netxen_nic_update_cmd_consumer(adapter, tx_ring);
905 } 913 }
906 914
907 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 915 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
@@ -1362,7 +1370,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1362 dma_addr_t temp_dma; 1370 dma_addr_t temp_dma;
1363 int i, k; 1371 int i, k;
1364 1372
1365 u32 producer, consumer; 1373 u32 producer;
1366 int frag_count, no_of_desc; 1374 int frag_count, no_of_desc;
1367 u32 num_txd = tx_ring->num_desc; 1375 u32 num_txd = tx_ring->num_desc;
1368 bool is_tso = false; 1376 bool is_tso = false;
@@ -1372,15 +1380,13 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1372 /* 4 fragments per cmd des */ 1380 /* 4 fragments per cmd des */
1373 no_of_desc = (frag_count + 3) >> 2; 1381 no_of_desc = (frag_count + 3) >> 2;
1374 1382
1375 producer = tx_ring->producer; 1383 if (unlikely(no_of_desc + 2) > netxen_tx_avail(tx_ring)) {
1376 smp_mb();
1377 consumer = tx_ring->sw_consumer;
1378 if ((no_of_desc+2) >= find_diff_among(producer, consumer, num_txd)) {
1379 netif_stop_queue(netdev); 1384 netif_stop_queue(netdev);
1380 smp_mb();
1381 return NETDEV_TX_BUSY; 1385 return NETDEV_TX_BUSY;
1382 } 1386 }
1383 1387
1388 producer = tx_ring->producer;
1389
1384 hwdesc = &tx_ring->desc_head[producer]; 1390 hwdesc = &tx_ring->desc_head[producer];
1385 netxen_clear_cmddesc((u64 *)hwdesc); 1391 netxen_clear_cmddesc((u64 *)hwdesc);
1386 pbuf = &tx_ring->cmd_buf_arr[producer]; 1392 pbuf = &tx_ring->cmd_buf_arr[producer];
@@ -1493,7 +1499,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1493 tx_ring->producer = producer; 1499 tx_ring->producer = producer;
1494 adapter->stats.txbytes += skb->len; 1500 adapter->stats.txbytes += skb->len;
1495 1501
1496 netxen_nic_update_cmd_producer(adapter, tx_ring, producer); 1502 netxen_nic_update_cmd_producer(adapter, tx_ring);
1497 1503
1498 adapter->stats.xmitcalled++; 1504 adapter->stats.xmitcalled++;
1499 1505
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index fa61a12c5e15..d2146d4a10f3 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -6376,7 +6376,7 @@ static void niu_set_rx_mode(struct net_device *dev)
6376 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0)) 6376 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0))
6377 np->flags |= NIU_FLAGS_MCAST; 6377 np->flags |= NIU_FLAGS_MCAST;
6378 6378
6379 alt_cnt = dev->uc_count; 6379 alt_cnt = dev->uc.count;
6380 if (alt_cnt > niu_num_alt_addr(np)) { 6380 if (alt_cnt > niu_num_alt_addr(np)) {
6381 alt_cnt = 0; 6381 alt_cnt = 0;
6382 np->flags |= NIU_FLAGS_PROMISC; 6382 np->flags |= NIU_FLAGS_PROMISC;
@@ -6385,7 +6385,7 @@ static void niu_set_rx_mode(struct net_device *dev)
6385 if (alt_cnt) { 6385 if (alt_cnt) {
6386 int index = 0; 6386 int index = 0;
6387 6387
6388 list_for_each_entry(ha, &dev->uc_list, list) { 6388 list_for_each_entry(ha, &dev->uc.list, list) {
6389 err = niu_set_alt_mac(np, index, ha->addr); 6389 err = niu_set_alt_mac(np, index, ha->addr);
6390 if (err) 6390 if (err)
6391 printk(KERN_WARNING PFX "%s: Error %d " 6391 printk(KERN_WARNING PFX "%s: Error %d "
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index a2ece89622d6..eba937c46376 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -244,7 +244,7 @@ EXPORT_SYMBOL(get_phy_device);
244 244
245/** 245/**
246 * phy_device_register - Register the phy device on the MDIO bus 246 * phy_device_register - Register the phy device on the MDIO bus
247 * @phy_device: phy_device structure to be added to the MDIO bus 247 * @phydev: phy_device structure to be added to the MDIO bus
248 */ 248 */
249int phy_device_register(struct phy_device *phydev) 249int phy_device_register(struct phy_device *phydev)
250{ 250{
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 35196faa084e..4e22462684c9 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -3811,22 +3811,11 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
3811 3811
3812static void rtl8169_net_suspend(struct net_device *dev) 3812static void rtl8169_net_suspend(struct net_device *dev)
3813{ 3813{
3814 struct rtl8169_private *tp = netdev_priv(dev);
3815 void __iomem *ioaddr = tp->mmio_addr;
3816
3817 if (!netif_running(dev)) 3814 if (!netif_running(dev))
3818 return; 3815 return;
3819 3816
3820 netif_device_detach(dev); 3817 netif_device_detach(dev);
3821 netif_stop_queue(dev); 3818 netif_stop_queue(dev);
3822
3823 spin_lock_irq(&tp->lock);
3824
3825 rtl8169_asic_down(ioaddr);
3826
3827 rtl8169_rx_missed(dev, ioaddr);
3828
3829 spin_unlock_irq(&tp->lock);
3830} 3819}
3831 3820
3832#ifdef CONFIG_PM 3821#ifdef CONFIG_PM
@@ -3876,9 +3865,17 @@ static struct dev_pm_ops rtl8169_pm_ops = {
3876static void rtl_shutdown(struct pci_dev *pdev) 3865static void rtl_shutdown(struct pci_dev *pdev)
3877{ 3866{
3878 struct net_device *dev = pci_get_drvdata(pdev); 3867 struct net_device *dev = pci_get_drvdata(pdev);
3868 struct rtl8169_private *tp = netdev_priv(dev);
3869 void __iomem *ioaddr = tp->mmio_addr;
3879 3870
3880 rtl8169_net_suspend(dev); 3871 rtl8169_net_suspend(dev);
3881 3872
3873 spin_lock_irq(&tp->lock);
3874
3875 rtl8169_asic_down(ioaddr);
3876
3877 spin_unlock_irq(&tp->lock);
3878
3882 if (system_state == SYSTEM_POWER_OFF) { 3879 if (system_state == SYSTEM_POWER_OFF) {
3883 pci_wake_from_d3(pdev, true); 3880 pci_wake_from_d3(pdev, true);
3884 pci_set_power_state(pdev, PCI_D3hot); 3881 pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index e2247669a495..1f040e8a000b 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -1281,7 +1281,7 @@ static u16 sis190_default_phy(struct net_device *dev)
1281 else if (phy_lan) 1281 else if (phy_lan)
1282 phy_default = phy_lan; 1282 phy_default = phy_lan;
1283 else 1283 else
1284 phy_default = list_entry(&tp->first_phy, 1284 phy_default = list_first_entry(&tp->first_phy,
1285 struct sis190_phy, list); 1285 struct sis190_phy, list);
1286 } 1286 }
1287 1287
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 6b5946fe8ae2..7681d28c53d7 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -50,7 +50,7 @@
50#include "sky2.h" 50#include "sky2.h"
51 51
52#define DRV_NAME "sky2" 52#define DRV_NAME "sky2"
53#define DRV_VERSION "1.22" 53#define DRV_VERSION "1.23"
54#define PFX DRV_NAME " " 54#define PFX DRV_NAME " "
55 55
56/* 56/*
@@ -65,9 +65,9 @@
65#define RX_DEF_PENDING RX_MAX_PENDING 65#define RX_DEF_PENDING RX_MAX_PENDING
66 66
67#define TX_RING_SIZE 512 67#define TX_RING_SIZE 512
68#define TX_DEF_PENDING (TX_RING_SIZE - 1) 68#define TX_DEF_PENDING 128
69#define TX_MIN_PENDING 64
70#define MAX_SKB_TX_LE (4 + (sizeof(dma_addr_t)/sizeof(u32))*MAX_SKB_FRAGS) 69#define MAX_SKB_TX_LE (4 + (sizeof(dma_addr_t)/sizeof(u32))*MAX_SKB_FRAGS)
70#define TX_MIN_PENDING (MAX_SKB_TX_LE+1)
71 71
72#define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */ 72#define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */
73#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le)) 73#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le))
@@ -1151,7 +1151,14 @@ stopped:
1151 1151
1152 /* reset the Rx prefetch unit */ 1152 /* reset the Rx prefetch unit */
1153 sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); 1153 sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
1154 mmiowb(); 1154
1155 /* Reset the RAM Buffer receive queue */
1156 sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_RST_SET);
1157
1158 /* Reset Rx MAC FIFO */
1159 sky2_write8(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), GMF_RST_SET);
1160
1161 sky2_read8(hw, B0_CTST);
1155} 1162}
1156 1163
1157/* Clean out receive buffer area, assumes receiver hardware stopped */ 1164/* Clean out receive buffer area, assumes receiver hardware stopped */
@@ -1169,6 +1176,7 @@ static void sky2_rx_clean(struct sky2_port *sky2)
1169 re->skb = NULL; 1176 re->skb = NULL;
1170 } 1177 }
1171 } 1178 }
1179 skb_queue_purge(&sky2->rx_recycle);
1172} 1180}
1173 1181
1174/* Basic MII support */ 1182/* Basic MII support */
@@ -1245,6 +1253,12 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp
1245} 1253}
1246#endif 1254#endif
1247 1255
1256/* Amount of required worst case padding in rx buffer */
1257static inline unsigned sky2_rx_pad(const struct sky2_hw *hw)
1258{
1259 return (hw->flags & SKY2_HW_RAM_BUFFER) ? 8 : 2;
1260}
1261
1248/* 1262/*
1249 * Allocate an skb for receiving. If the MTU is large enough 1263 * Allocate an skb for receiving. If the MTU is large enough
1250 * make the skb non-linear with a fragment list of pages. 1264 * make the skb non-linear with a fragment list of pages.
@@ -1254,6 +1268,13 @@ static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2)
1254 struct sk_buff *skb; 1268 struct sk_buff *skb;
1255 int i; 1269 int i;
1256 1270
1271 skb = __skb_dequeue(&sky2->rx_recycle);
1272 if (!skb)
1273 skb = netdev_alloc_skb(sky2->netdev, sky2->rx_data_size
1274 + sky2_rx_pad(sky2->hw));
1275 if (!skb)
1276 goto nomem;
1277
1257 if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) { 1278 if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) {
1258 unsigned char *start; 1279 unsigned char *start;
1259 /* 1280 /*
@@ -1262,18 +1283,10 @@ static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2)
1262 * The buffer returned from netdev_alloc_skb is 1283 * The buffer returned from netdev_alloc_skb is
1263 * aligned except if slab debugging is enabled. 1284 * aligned except if slab debugging is enabled.
1264 */ 1285 */
1265 skb = netdev_alloc_skb(sky2->netdev, sky2->rx_data_size + 8);
1266 if (!skb)
1267 goto nomem;
1268 start = PTR_ALIGN(skb->data, 8); 1286 start = PTR_ALIGN(skb->data, 8);
1269 skb_reserve(skb, start - skb->data); 1287 skb_reserve(skb, start - skb->data);
1270 } else { 1288 } else
1271 skb = netdev_alloc_skb(sky2->netdev,
1272 sky2->rx_data_size + NET_IP_ALIGN);
1273 if (!skb)
1274 goto nomem;
1275 skb_reserve(skb, NET_IP_ALIGN); 1289 skb_reserve(skb, NET_IP_ALIGN);
1276 }
1277 1290
1278 for (i = 0; i < sky2->rx_nfrags; i++) { 1291 for (i = 0; i < sky2->rx_nfrags; i++) {
1279 struct page *page = alloc_page(GFP_ATOMIC); 1292 struct page *page = alloc_page(GFP_ATOMIC);
@@ -1350,6 +1363,8 @@ static int sky2_rx_start(struct sky2_port *sky2)
1350 1363
1351 sky2->rx_data_size = size; 1364 sky2->rx_data_size = size;
1352 1365
1366 skb_queue_head_init(&sky2->rx_recycle);
1367
1353 /* Fill Rx ring */ 1368 /* Fill Rx ring */
1354 for (i = 0; i < sky2->rx_pending; i++) { 1369 for (i = 0; i < sky2->rx_pending; i++) {
1355 re = sky2->rx_ring + i; 1370 re = sky2->rx_ring + i;
@@ -1488,6 +1503,7 @@ static int sky2_up(struct net_device *dev)
1488 imask = sky2_read32(hw, B0_IMSK); 1503 imask = sky2_read32(hw, B0_IMSK);
1489 imask |= portirq_msk[port]; 1504 imask |= portirq_msk[port];
1490 sky2_write32(hw, B0_IMSK, imask); 1505 sky2_write32(hw, B0_IMSK, imask);
1506 sky2_read32(hw, B0_IMSK);
1491 1507
1492 sky2_set_multicast(dev); 1508 sky2_set_multicast(dev);
1493 1509
@@ -1756,14 +1772,22 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1756 } 1772 }
1757 1773
1758 if (le->ctrl & EOP) { 1774 if (le->ctrl & EOP) {
1775 struct sk_buff *skb = re->skb;
1776
1759 if (unlikely(netif_msg_tx_done(sky2))) 1777 if (unlikely(netif_msg_tx_done(sky2)))
1760 printk(KERN_DEBUG "%s: tx done %u\n", 1778 printk(KERN_DEBUG "%s: tx done %u\n",
1761 dev->name, idx); 1779 dev->name, idx);
1762 1780
1763 dev->stats.tx_packets++; 1781 dev->stats.tx_packets++;
1764 dev->stats.tx_bytes += re->skb->len; 1782 dev->stats.tx_bytes += skb->len;
1783
1784 if (skb_queue_len(&sky2->rx_recycle) < sky2->rx_pending
1785 && skb_recycle_check(skb, sky2->rx_data_size
1786 + sky2_rx_pad(sky2->hw)))
1787 __skb_queue_head(&sky2->rx_recycle, skb);
1788 else
1789 dev_kfree_skb_any(skb);
1765 1790
1766 dev_kfree_skb_any(re->skb);
1767 sky2->tx_next = RING_NEXT(idx, TX_RING_SIZE); 1791 sky2->tx_next = RING_NEXT(idx, TX_RING_SIZE);
1768 } 1792 }
1769 } 1793 }
@@ -1805,10 +1829,10 @@ static int sky2_down(struct net_device *dev)
1805 imask = sky2_read32(hw, B0_IMSK); 1829 imask = sky2_read32(hw, B0_IMSK);
1806 imask &= ~portirq_msk[port]; 1830 imask &= ~portirq_msk[port];
1807 sky2_write32(hw, B0_IMSK, imask); 1831 sky2_write32(hw, B0_IMSK, imask);
1832 sky2_read32(hw, B0_IMSK);
1808 1833
1809 synchronize_irq(hw->pdev->irq); 1834 /* Force flow control off */
1810 1835 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1811 sky2_gmac_reset(hw, port);
1812 1836
1813 /* Stop transmitter */ 1837 /* Stop transmitter */
1814 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP); 1838 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
@@ -1821,9 +1845,6 @@ static int sky2_down(struct net_device *dev)
1821 ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA); 1845 ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
1822 gma_write16(hw, port, GM_GP_CTRL, ctrl); 1846 gma_write16(hw, port, GM_GP_CTRL, ctrl);
1823 1847
1824 /* Make sure no packets are pending */
1825 napi_synchronize(&hw->napi);
1826
1827 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); 1848 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1828 1849
1829 /* Workaround shared GMAC reset */ 1850 /* Workaround shared GMAC reset */
@@ -1854,6 +1875,15 @@ static int sky2_down(struct net_device *dev)
1854 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); 1875 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
1855 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); 1876 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
1856 1877
1878 /* Force any delayed status interrrupt and NAPI */
1879 sky2_write32(hw, STAT_LEV_TIMER_CNT, 0);
1880 sky2_write32(hw, STAT_TX_TIMER_CNT, 0);
1881 sky2_write32(hw, STAT_ISR_TIMER_CNT, 0);
1882 sky2_read8(hw, STAT_ISR_TIMER_CTRL);
1883
1884 synchronize_irq(hw->pdev->irq);
1885 napi_synchronize(&hw->napi);
1886
1857 sky2_phy_power_down(hw, port); 1887 sky2_phy_power_down(hw, port);
1858 1888
1859 /* turn off LED's */ 1889 /* turn off LED's */
@@ -2343,11 +2373,45 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
2343 } 2373 }
2344} 2374}
2345 2375
2376static inline void sky2_skb_rx(const struct sky2_port *sky2,
2377 u32 status, struct sk_buff *skb)
2378{
2379#ifdef SKY2_VLAN_TAG_USED
2380 u16 vlan_tag = be16_to_cpu(sky2->rx_tag);
2381 if (sky2->vlgrp && (status & GMR_FS_VLAN)) {
2382 if (skb->ip_summed == CHECKSUM_NONE)
2383 vlan_hwaccel_receive_skb(skb, sky2->vlgrp, vlan_tag);
2384 else
2385 vlan_gro_receive(&sky2->hw->napi, sky2->vlgrp,
2386 vlan_tag, skb);
2387 return;
2388 }
2389#endif
2390 if (skb->ip_summed == CHECKSUM_NONE)
2391 netif_receive_skb(skb);
2392 else
2393 napi_gro_receive(&sky2->hw->napi, skb);
2394}
2395
2396static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port,
2397 unsigned packets, unsigned bytes)
2398{
2399 if (packets) {
2400 struct net_device *dev = hw->dev[port];
2401
2402 dev->stats.rx_packets += packets;
2403 dev->stats.rx_bytes += bytes;
2404 dev->last_rx = jiffies;
2405 sky2_rx_update(netdev_priv(dev), rxqaddr[port]);
2406 }
2407}
2408
2346/* Process status response ring */ 2409/* Process status response ring */
2347static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) 2410static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2348{ 2411{
2349 int work_done = 0; 2412 int work_done = 0;
2350 unsigned rx[2] = { 0, 0 }; 2413 unsigned int total_bytes[2] = { 0 };
2414 unsigned int total_packets[2] = { 0 };
2351 2415
2352 rmb(); 2416 rmb();
2353 do { 2417 do {
@@ -2374,7 +2438,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2374 le->opcode = 0; 2438 le->opcode = 0;
2375 switch (opcode & ~HW_OWNER) { 2439 switch (opcode & ~HW_OWNER) {
2376 case OP_RXSTAT: 2440 case OP_RXSTAT:
2377 ++rx[port]; 2441 total_packets[port]++;
2442 total_bytes[port] += length;
2378 skb = sky2_receive(dev, length, status); 2443 skb = sky2_receive(dev, length, status);
2379 if (unlikely(!skb)) { 2444 if (unlikely(!skb)) {
2380 dev->stats.rx_dropped++; 2445 dev->stats.rx_dropped++;
@@ -2392,18 +2457,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2392 } 2457 }
2393 2458
2394 skb->protocol = eth_type_trans(skb, dev); 2459 skb->protocol = eth_type_trans(skb, dev);
2395 dev->stats.rx_packets++;
2396 dev->stats.rx_bytes += skb->len;
2397 dev->last_rx = jiffies;
2398 2460
2399#ifdef SKY2_VLAN_TAG_USED 2461 sky2_skb_rx(sky2, status, skb);
2400 if (sky2->vlgrp && (status & GMR_FS_VLAN)) {
2401 vlan_hwaccel_receive_skb(skb,
2402 sky2->vlgrp,
2403 be16_to_cpu(sky2->rx_tag));
2404 } else
2405#endif
2406 netif_receive_skb(skb);
2407 2462
2408 /* Stop after net poll weight */ 2463 /* Stop after net poll weight */
2409 if (++work_done >= to_do) 2464 if (++work_done >= to_do)
@@ -2473,11 +2528,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2473 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); 2528 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
2474 2529
2475exit_loop: 2530exit_loop:
2476 if (rx[0]) 2531 sky2_rx_done(hw, 0, total_packets[0], total_bytes[0]);
2477 sky2_rx_update(netdev_priv(hw->dev[0]), Q_R1); 2532 sky2_rx_done(hw, 1, total_packets[1], total_bytes[1]);
2478
2479 if (rx[1])
2480 sky2_rx_update(netdev_priv(hw->dev[1]), Q_R2);
2481 2533
2482 return work_done; 2534 return work_done;
2483} 2535}
@@ -4364,6 +4416,22 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4364 goto err_out; 4416 goto err_out;
4365 } 4417 }
4366 4418
4419 /* Get configuration information
4420 * Note: only regular PCI config access once to test for HW issues
4421 * other PCI access through shared memory for speed and to
4422 * avoid MMCONFIG problems.
4423 */
4424 err = pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
4425 if (err) {
4426 dev_err(&pdev->dev, "PCI read config failed\n");
4427 goto err_out;
4428 }
4429
4430 if (~reg == 0) {
4431 dev_err(&pdev->dev, "PCI configuration read error\n");
4432 goto err_out;
4433 }
4434
4367 err = pci_request_regions(pdev, DRV_NAME); 4435 err = pci_request_regions(pdev, DRV_NAME);
4368 if (err) { 4436 if (err) {
4369 dev_err(&pdev->dev, "cannot obtain PCI resources\n"); 4437 dev_err(&pdev->dev, "cannot obtain PCI resources\n");
@@ -4389,21 +4457,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4389 } 4457 }
4390 } 4458 }
4391 4459
4392 /* Get configuration information
4393 * Note: only regular PCI config access once to test for HW issues
4394 * other PCI access through shared memory for speed and to
4395 * avoid MMCONFIG problems.
4396 */
4397 err = pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
4398 if (err) {
4399 dev_err(&pdev->dev, "PCI read config failed\n");
4400 goto err_out_free_regions;
4401 }
4402
4403 /* size of available VPD, only impact sysfs */
4404 err = pci_vpd_truncate(pdev, 1ul << (((reg & PCI_VPD_ROM_SZ) >> 14) + 8));
4405 if (err)
4406 dev_warn(&pdev->dev, "Can't set VPD size\n");
4407 4460
4408#ifdef __BIG_ENDIAN 4461#ifdef __BIG_ENDIAN
4409 /* The sk98lin vendor driver uses hardware byte swapping but 4462 /* The sk98lin vendor driver uses hardware byte swapping but
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 92fb24b27d45..b5549c9e5107 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2028,6 +2028,7 @@ struct sky2_port {
2028 u16 rx_pending; 2028 u16 rx_pending;
2029 u16 rx_data_size; 2029 u16 rx_data_size;
2030 u16 rx_nfrags; 2030 u16 rx_nfrags;
2031 struct sk_buff_head rx_recycle;
2031 2032
2032#ifdef SKY2_VLAN_TAG_USED 2033#ifdef SKY2_VLAN_TAG_USED
2033 u16 rx_tag; 2034 u16 rx_tag;
diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c
index e4255d829380..753a1fba4609 100644
--- a/drivers/net/sonic.c
+++ b/drivers/net/sonic.c
@@ -223,7 +223,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
223 if (!laddr) { 223 if (!laddr) {
224 printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name); 224 printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name);
225 dev_kfree_skb(skb); 225 dev_kfree_skb(skb);
226 return NETDEV_TX_BUSY 226 return NETDEV_TX_BUSY;
227 } 227 }
228 228
229 sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */ 229 sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index e2f2e91cfdd2..40c6eba775ce 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -65,8 +65,6 @@
65 65
66static DEFINE_SPINLOCK(ugeth_lock); 66static DEFINE_SPINLOCK(ugeth_lock);
67 67
68static void uec_configure_serdes(struct net_device *dev);
69
70static struct { 68static struct {
71 u32 msg_enable; 69 u32 msg_enable;
72} debug = { -1 }; 70} debug = { -1 };
@@ -1536,6 +1534,49 @@ static void adjust_link(struct net_device *dev)
1536 spin_unlock_irqrestore(&ugeth->lock, flags); 1534 spin_unlock_irqrestore(&ugeth->lock, flags);
1537} 1535}
1538 1536
1537/* Initialize TBI PHY interface for communicating with the
1538 * SERDES lynx PHY on the chip. We communicate with this PHY
1539 * through the MDIO bus on each controller, treating it as a
1540 * "normal" PHY at the address found in the UTBIPA register. We assume
1541 * that the UTBIPA register is valid. Either the MDIO bus code will set
1542 * it to a value that doesn't conflict with other PHYs on the bus, or the
1543 * value doesn't matter, as there are no other PHYs on the bus.
1544 */
1545static void uec_configure_serdes(struct net_device *dev)
1546{
1547 struct ucc_geth_private *ugeth = netdev_priv(dev);
1548 struct ucc_geth_info *ug_info = ugeth->ug_info;
1549 struct phy_device *tbiphy;
1550
1551 if (!ug_info->tbi_node) {
1552 dev_warn(&dev->dev, "SGMII mode requires that the device "
1553 "tree specify a tbi-handle\n");
1554 return;
1555 }
1556
1557 tbiphy = of_phy_find_device(ug_info->tbi_node);
1558 if (!tbiphy) {
1559 dev_err(&dev->dev, "error: Could not get TBI device\n");
1560 return;
1561 }
1562
1563 /*
1564 * If the link is already up, we must already be ok, and don't need to
1565 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1566 * everything for us? Resetting it takes the link down and requires
1567 * several seconds for it to come back.
1568 */
1569 if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS)
1570 return;
1571
1572 /* Single clk mode, mii mode off(for serdes communication) */
1573 phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS);
1574
1575 phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
1576
1577 phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS);
1578}
1579
1539/* Configure the PHY for dev. 1580/* Configure the PHY for dev.
1540 * returns 0 if success. -1 if failure 1581 * returns 0 if success. -1 if failure
1541 */ 1582 */
@@ -1577,41 +1618,7 @@ static int init_phy(struct net_device *dev)
1577 return 0; 1618 return 0;
1578} 1619}
1579 1620
1580/* Initialize TBI PHY interface for communicating with the
1581 * SERDES lynx PHY on the chip. We communicate with this PHY
1582 * through the MDIO bus on each controller, treating it as a
1583 * "normal" PHY at the address found in the UTBIPA register. We assume
1584 * that the UTBIPA register is valid. Either the MDIO bus code will set
1585 * it to a value that doesn't conflict with other PHYs on the bus, or the
1586 * value doesn't matter, as there are no other PHYs on the bus.
1587 */
1588static void uec_configure_serdes(struct net_device *dev)
1589{
1590 struct ucc_geth_private *ugeth = netdev_priv(dev);
1591
1592 if (!ugeth->tbiphy) {
1593 printk(KERN_WARNING "SGMII mode requires that the device "
1594 "tree specify a tbi-handle\n");
1595 return;
1596 }
1597 1621
1598 /*
1599 * If the link is already up, we must already be ok, and don't need to
1600 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1601 * everything for us? Resetting it takes the link down and requires
1602 * several seconds for it to come back.
1603 */
1604 if (phy_read(ugeth->tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS)
1605 return;
1606
1607 /* Single clk mode, mii mode off(for serdes communication) */
1608 phy_write(ugeth->tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS);
1609
1610 phy_write(ugeth->tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
1611
1612 phy_write(ugeth->tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS);
1613
1614}
1615 1622
1616static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth) 1623static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
1617{ 1624{
@@ -3711,6 +3718,9 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3711 } 3718 }
3712 ug_info->phy_node = phy; 3719 ug_info->phy_node = phy;
3713 3720
3721 /* Find the TBI PHY node. If it's not there, we don't support SGMII */
3722 ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
3723
3714 /* get the phy interface type, or default to MII */ 3724 /* get the phy interface type, or default to MII */
3715 prop = of_get_property(np, "phy-connection-type", NULL); 3725 prop = of_get_property(np, "phy-connection-type", NULL);
3716 if (!prop) { 3726 if (!prop) {
@@ -3818,37 +3828,6 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3818 ugeth->ndev = dev; 3828 ugeth->ndev = dev;
3819 ugeth->node = np; 3829 ugeth->node = np;
3820 3830
3821 /* Find the TBI PHY. If it's not there, we don't support SGMII */
3822 ph = of_get_property(np, "tbi-handle", NULL);
3823 if (ph) {
3824 struct device_node *tbi = of_find_node_by_phandle(*ph);
3825 struct of_device *ofdev;
3826 struct mii_bus *bus;
3827 const unsigned int *id;
3828
3829 if (!tbi)
3830 return 0;
3831
3832 mdio = of_get_parent(tbi);
3833 if (!mdio)
3834 return 0;
3835
3836 ofdev = of_find_device_by_node(mdio);
3837
3838 of_node_put(mdio);
3839
3840 id = of_get_property(tbi, "reg", NULL);
3841 if (!id)
3842 return 0;
3843 of_node_put(tbi);
3844
3845 bus = dev_get_drvdata(&ofdev->dev);
3846 if (!bus)
3847 return 0;
3848
3849 ugeth->tbiphy = bus->phy_map[*id];
3850 }
3851
3852 return 0; 3831 return 0;
3853} 3832}
3854 3833
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index 5beba4c14532..195ab267ead7 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -1125,6 +1125,7 @@ struct ucc_geth_info {
1125 u16 pausePeriod; 1125 u16 pausePeriod;
1126 u16 extensionField; 1126 u16 extensionField;
1127 struct device_node *phy_node; 1127 struct device_node *phy_node;
1128 struct device_node *tbi_node;
1128 u8 weightfactor[NUM_TX_QUEUES]; 1129 u8 weightfactor[NUM_TX_QUEUES];
1129 u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES]; 1130 u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES];
1130 u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX]; 1131 u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX];
@@ -1213,7 +1214,6 @@ struct ucc_geth_private {
1213 1214
1214 struct ugeth_mii_info *mii_info; 1215 struct ugeth_mii_info *mii_info;
1215 struct phy_device *phydev; 1216 struct phy_device *phydev;
1216 struct phy_device *tbiphy;
1217 phy_interface_t phy_interface; 1217 phy_interface_t phy_interface;
1218 int max_speed; 1218 int max_speed;
1219 uint32_t msg_enable; 1219 uint32_t msg_enable;
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index e2a7725e567e..b02f7adff5dc 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -989,8 +989,10 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
989 if (ret < 0) 989 if (ret < 0)
990 goto err_iounmap; 990 goto err_iounmap;
991 991
992 if (velocity_get_link(dev)) 992 if (!velocity_get_link(dev)) {
993 netif_carrier_off(dev); 993 netif_carrier_off(dev);
994 vptr->mii_status |= VELOCITY_LINK_FAIL;
995 }
994 996
995 velocity_print_info(vptr); 997 velocity_print_info(vptr);
996 pci_set_drvdata(pdev, dev); 998 pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 52198f6797a4..2a6e81d5b579 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -709,7 +709,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
709 allmulti ? "en" : "dis"); 709 allmulti ? "en" : "dis");
710 710
711 /* MAC filter - use one buffer for both lists */ 711 /* MAC filter - use one buffer for both lists */
712 mac_data = buf = kzalloc(((dev->uc_count + dev->mc_count) * ETH_ALEN) + 712 mac_data = buf = kzalloc(((dev->uc.count + dev->mc_count) * ETH_ALEN) +
713 (2 * sizeof(mac_data->entries)), GFP_ATOMIC); 713 (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
714 if (!buf) { 714 if (!buf) {
715 dev_warn(&dev->dev, "No memory for MAC address buffer\n"); 715 dev_warn(&dev->dev, "No memory for MAC address buffer\n");
@@ -719,16 +719,16 @@ static void virtnet_set_rx_mode(struct net_device *dev)
719 sg_init_table(sg, 2); 719 sg_init_table(sg, 2);
720 720
721 /* Store the unicast list and count in the front of the buffer */ 721 /* Store the unicast list and count in the front of the buffer */
722 mac_data->entries = dev->uc_count; 722 mac_data->entries = dev->uc.count;
723 i = 0; 723 i = 0;
724 list_for_each_entry(ha, &dev->uc_list, list) 724 list_for_each_entry(ha, &dev->uc.list, list)
725 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); 725 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
726 726
727 sg_set_buf(&sg[0], mac_data, 727 sg_set_buf(&sg[0], mac_data,
728 sizeof(mac_data->entries) + (dev->uc_count * ETH_ALEN)); 728 sizeof(mac_data->entries) + (dev->uc.count * ETH_ALEN));
729 729
730 /* multicast list and count fill the end */ 730 /* multicast list and count fill the end */
731 mac_data = (void *)&mac_data->macs[dev->uc_count][0]; 731 mac_data = (void *)&mac_data->macs[dev->uc.count][0];
732 732
733 mac_data->entries = dev->mc_count; 733 mac_data->entries = dev->mc_count;
734 addr = dev->mc_list; 734 addr = dev->mc_list;
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 26cde573af43..58d2551c78ed 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -454,7 +454,7 @@ __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
454 return VXGE_HW_OK; 454 return VXGE_HW_OK;
455} 455}
456 456
457static enum vxge_hw_status 457enum vxge_hw_status
458__vxge_hw_device_is_privilaged(struct __vxge_hw_device *hldev) 458__vxge_hw_device_is_privilaged(struct __vxge_hw_device *hldev)
459{ 459{
460 if ((hldev->host_type == VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION || 460 if ((hldev->host_type == VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION ||
@@ -676,10 +676,12 @@ enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
676{ 676{
677 enum vxge_hw_status status = VXGE_HW_OK; 677 enum vxge_hw_status status = VXGE_HW_OK;
678 678
679 /* Validate the pci-e link width and speed */ 679 if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev)) {
680 status = __vxge_hw_verify_pci_e_info(hldev); 680 /* Validate the pci-e link width and speed */
681 if (status != VXGE_HW_OK) 681 status = __vxge_hw_verify_pci_e_info(hldev);
682 goto exit; 682 if (status != VXGE_HW_OK)
683 goto exit;
684 }
683 685
684 vxge_hw_wrr_rebalance(hldev); 686 vxge_hw_wrr_rebalance(hldev);
685exit: 687exit:
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 6c838b3e063a..6034497536a4 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -4203,6 +4203,16 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4203 max_vpath_supported++; 4203 max_vpath_supported++;
4204 } 4204 }
4205 4205
4206 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
4207 if ((VXGE_HW_FUNCTION_MODE_SRIOV ==
4208 ll_config.device_hw_info.function_mode) &&
4209 (max_config_dev > 1) && (pdev->is_physfn)) {
4210 ret = pci_enable_sriov(pdev, max_config_dev - 1);
4211 if (ret)
4212 vxge_debug_ll_config(VXGE_ERR,
4213 "Failed to enable SRIOV: %d \n", ret);
4214 }
4215
4206 /* 4216 /*
4207 * Configure vpaths and get driver configured number of vpaths 4217 * Configure vpaths and get driver configured number of vpaths
4208 * which is less than or equal to the maximum vpaths per function. 4218 * which is less than or equal to the maximum vpaths per function.
@@ -4366,6 +4376,7 @@ _exit6:
4366 4376
4367 vxge_device_unregister(hldev); 4377 vxge_device_unregister(hldev);
4368_exit5: 4378_exit5:
4379 pci_disable_sriov(pdev);
4369 vxge_hw_device_terminate(hldev); 4380 vxge_hw_device_terminate(hldev);
4370_exit4: 4381_exit4:
4371 iounmap(attr.bar1); 4382 iounmap(attr.bar1);
@@ -4429,6 +4440,8 @@ vxge_remove(struct pci_dev *pdev)
4429 iounmap(vdev->bar0); 4440 iounmap(vdev->bar0);
4430 iounmap(vdev->bar1); 4441 iounmap(vdev->bar1);
4431 4442
4443 pci_disable_sriov(pdev);
4444
4432 /* we are safe to free it now */ 4445 /* we are safe to free it now */
4433 free_netdev(dev); 4446 free_netdev(dev);
4434 4447
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index 7da02c545ed5..82786ffb7dd9 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -17,7 +17,7 @@
17 17
18#define VXGE_VERSION_MAJOR "2" 18#define VXGE_VERSION_MAJOR "2"
19#define VXGE_VERSION_MINOR "0" 19#define VXGE_VERSION_MINOR "0"
20#define VXGE_VERSION_FIX "1" 20#define VXGE_VERSION_FIX "4"
21#define VXGE_VERSION_BUILD "17129" 21#define VXGE_VERSION_BUILD "17795"
22#define VXGE_VERSION_FOR "k" 22#define VXGE_VERSION_FOR "k"
23#endif 23#endif
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 2dd78d20eb05..aff4f6bdf3d5 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -149,46 +149,40 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
149 */ 149 */
150static int lapbeth_xmit(struct sk_buff *skb, struct net_device *dev) 150static int lapbeth_xmit(struct sk_buff *skb, struct net_device *dev)
151{ 151{
152 int err = -ENODEV; 152 int err;
153 153
154 /* 154 /*
155 * Just to be *really* sure not to send anything if the interface 155 * Just to be *really* sure not to send anything if the interface
156 * is down, the ethernet device may have gone. 156 * is down, the ethernet device may have gone.
157 */ 157 */
158 if (!netif_running(dev)) { 158 if (!netif_running(dev))
159 goto drop; 159 goto drop;
160 }
161 160
162 switch (skb->data[0]) { 161 switch (skb->data[0]) {
163 case 0x00: 162 case 0x00:
164 err = 0;
165 break; 163 break;
166 case 0x01: 164 case 0x01:
167 if ((err = lapb_connect_request(dev)) != LAPB_OK) 165 if ((err = lapb_connect_request(dev)) != LAPB_OK)
168 printk(KERN_ERR "lapbeth: lapb_connect_request " 166 printk(KERN_ERR "lapbeth: lapb_connect_request "
169 "error: %d\n", err); 167 "error: %d\n", err);
170 goto drop_ok; 168 goto drop;
171 case 0x02: 169 case 0x02:
172 if ((err = lapb_disconnect_request(dev)) != LAPB_OK) 170 if ((err = lapb_disconnect_request(dev)) != LAPB_OK)
173 printk(KERN_ERR "lapbeth: lapb_disconnect_request " 171 printk(KERN_ERR "lapbeth: lapb_disconnect_request "
174 "err: %d\n", err); 172 "err: %d\n", err);
175 /* Fall thru */ 173 /* Fall thru */
176 default: 174 default:
177 goto drop_ok; 175 goto drop;
178 } 176 }
179 177
180 skb_pull(skb, 1); 178 skb_pull(skb, 1);
181 179
182 if ((err = lapb_data_request(dev, skb)) != LAPB_OK) { 180 if ((err = lapb_data_request(dev, skb)) != LAPB_OK) {
183 printk(KERN_ERR "lapbeth: lapb_data_request error - %d\n", err); 181 printk(KERN_ERR "lapbeth: lapb_data_request error - %d\n", err);
184 err = -ENOMEM;
185 goto drop; 182 goto drop;
186 } 183 }
187 err = 0;
188out: 184out:
189 return err; 185 return NETDEV_TX_OK;
190drop_ok:
191 err = 0;
192drop: 186drop:
193 kfree_skb(skb); 187 kfree_skb(skb);
194 goto out; 188 goto out;
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index ec35503f6a40..2942f13c9c4a 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -733,8 +733,9 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
733 /* 733 /*
734 * Set the beacon register and enable all timers. 734 * Set the beacon register and enable all timers.
735 */ 735 */
736 /* When in AP mode zero timer0 to start TSF */ 736 /* When in AP or Mesh Point mode zero timer0 to start TSF */
737 if (ah->ah_op_mode == NL80211_IFTYPE_AP) 737 if (ah->ah_op_mode == NL80211_IFTYPE_AP ||
738 ah->ah_op_mode == NL80211_IFTYPE_MESH_POINT)
738 ath5k_hw_reg_write(ah, 0, AR5K_TIMER0); 739 ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
739 740
740 ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0); 741 ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0);
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 0ed1ac312aa6..2d79610bce12 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -1,7 +1,6 @@
1config ATH9K 1config ATH9K
2 tristate "Atheros 802.11n wireless cards support" 2 tristate "Atheros 802.11n wireless cards support"
3 depends on PCI && MAC80211 && WLAN_80211 3 depends on PCI && MAC80211 && WLAN_80211
4 depends on RFKILL || RFKILL=n
5 select ATH_COMMON 4 select ATH_COMMON
6 select MAC80211_LEDS 5 select MAC80211_LEDS
7 select LEDS_CLASS 6 select LEDS_CLASS
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 515880aa2116..5efc9345ca0d 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -21,7 +21,6 @@
21#include <linux/device.h> 21#include <linux/device.h>
22#include <net/mac80211.h> 22#include <net/mac80211.h>
23#include <linux/leds.h> 23#include <linux/leds.h>
24#include <linux/rfkill.h>
25 24
26#include "hw.h" 25#include "hw.h"
27#include "rc.h" 26#include "rc.h"
@@ -460,12 +459,6 @@ struct ath_led {
460 bool registered; 459 bool registered;
461}; 460};
462 461
463struct ath_rfkill {
464 struct rfkill *rfkill;
465 struct rfkill_ops ops;
466 char rfkill_name[32];
467};
468
469/********************/ 462/********************/
470/* Main driver core */ 463/* Main driver core */
471/********************/ 464/********************/
@@ -505,7 +498,6 @@ struct ath_rfkill {
505#define SC_OP_PROTECT_ENABLE BIT(6) 498#define SC_OP_PROTECT_ENABLE BIT(6)
506#define SC_OP_RXFLUSH BIT(7) 499#define SC_OP_RXFLUSH BIT(7)
507#define SC_OP_LED_ASSOCIATED BIT(8) 500#define SC_OP_LED_ASSOCIATED BIT(8)
508#define SC_OP_RFKILL_REGISTERED BIT(9)
509#define SC_OP_WAIT_FOR_BEACON BIT(12) 501#define SC_OP_WAIT_FOR_BEACON BIT(12)
510#define SC_OP_LED_ON BIT(13) 502#define SC_OP_LED_ON BIT(13)
511#define SC_OP_SCANNING BIT(14) 503#define SC_OP_SCANNING BIT(14)
@@ -591,7 +583,6 @@ struct ath_softc {
591 583
592 int beacon_interval; 584 int beacon_interval;
593 585
594 struct ath_rfkill rf_kill;
595 struct ath_ani ani; 586 struct ath_ani ani;
596 struct ath9k_node_stats nodestats; 587 struct ath9k_node_stats nodestats;
597#ifdef CONFIG_ATH9K_DEBUG 588#ifdef CONFIG_ATH9K_DEBUG
@@ -677,6 +668,7 @@ static inline void ath9k_ps_restore(struct ath_softc *sc)
677 if (atomic_dec_and_test(&sc->ps_usecount)) 668 if (atomic_dec_and_test(&sc->ps_usecount))
678 if ((sc->hw->conf.flags & IEEE80211_CONF_PS) && 669 if ((sc->hw->conf.flags & IEEE80211_CONF_PS) &&
679 !(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | 670 !(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
671 SC_OP_WAIT_FOR_CAB |
680 SC_OP_WAIT_FOR_PSPOLL_DATA | 672 SC_OP_WAIT_FOR_PSPOLL_DATA |
681 SC_OP_WAIT_FOR_TX_ACK))) 673 SC_OP_WAIT_FOR_TX_ACK)))
682 ath9k_hw_setpower(sc->sc_ah, 674 ath9k_hw_setpower(sc->sc_ah,
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 1579c9407ed5..34935a8ee59d 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -2186,6 +2186,18 @@ static void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan
2186 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask); 2186 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
2187} 2187}
2188 2188
2189static void ath9k_enable_rfkill(struct ath_hw *ah)
2190{
2191 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
2192 AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
2193
2194 REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
2195 AR_GPIO_INPUT_MUX2_RFSILENT);
2196
2197 ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
2198 REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
2199}
2200
2189int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, 2201int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2190 bool bChannelChange) 2202 bool bChannelChange)
2191{ 2203{
@@ -2313,10 +2325,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2313 ath9k_hw_init_interrupt_masks(ah, ah->opmode); 2325 ath9k_hw_init_interrupt_masks(ah, ah->opmode);
2314 ath9k_hw_init_qos(ah); 2326 ath9k_hw_init_qos(ah);
2315 2327
2316#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
2317 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT) 2328 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
2318 ath9k_enable_rfkill(ah); 2329 ath9k_enable_rfkill(ah);
2319#endif 2330
2320 ath9k_hw_init_user_settings(ah); 2331 ath9k_hw_init_user_settings(ah);
2321 2332
2322 REG_WRITE(ah, AR_STA_ID1, 2333 REG_WRITE(ah, AR_STA_ID1,
@@ -3613,20 +3624,6 @@ void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
3613 AR_GPIO_BIT(gpio)); 3624 AR_GPIO_BIT(gpio));
3614} 3625}
3615 3626
3616#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
3617void ath9k_enable_rfkill(struct ath_hw *ah)
3618{
3619 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
3620 AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
3621
3622 REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
3623 AR_GPIO_INPUT_MUX2_RFSILENT);
3624
3625 ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
3626 REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
3627}
3628#endif
3629
3630u32 ath9k_hw_getdefantenna(struct ath_hw *ah) 3627u32 ath9k_hw_getdefantenna(struct ath_hw *ah)
3631{ 3628{
3632 return REG_READ(ah, AR_DEF_ANTENNA) & 0x7; 3629 return REG_READ(ah, AR_DEF_ANTENNA) & 0x7;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index dd8508ef6e05..9d0b31ad4603 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -565,9 +565,6 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio);
565void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio, 565void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
566 u32 ah_signal_type); 566 u32 ah_signal_type);
567void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val); 567void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val);
568#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
569void ath9k_enable_rfkill(struct ath_hw *ah);
570#endif
571u32 ath9k_hw_getdefantenna(struct ath_hw *ah); 568u32 ath9k_hw_getdefantenna(struct ath_hw *ah);
572void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna); 569void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna);
573bool ath9k_hw_setantennaswitch(struct ath_hw *ah, 570bool ath9k_hw_setantennaswitch(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index f7baa406918b..9f49a3251d4d 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -231,6 +231,19 @@ static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
231 } 231 }
232} 232}
233 233
234static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc,
235 struct ieee80211_hw *hw)
236{
237 struct ieee80211_channel *curchan = hw->conf.channel;
238 struct ath9k_channel *channel;
239 u8 chan_idx;
240
241 chan_idx = curchan->hw_value;
242 channel = &sc->sc_ah->channels[chan_idx];
243 ath9k_update_ichannel(sc, hw, channel);
244 return channel;
245}
246
234/* 247/*
235 * Set/change channels. If the channel is really being changed, it's done 248 * Set/change channels. If the channel is really being changed, it's done
236 * by reseting the chip. To accomplish this we must first cleanup any pending 249 * by reseting the chip. To accomplish this we must first cleanup any pending
@@ -283,7 +296,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
283 "reset status %d\n", 296 "reset status %d\n",
284 channel->center_freq, r); 297 channel->center_freq, r);
285 spin_unlock_bh(&sc->sc_resetlock); 298 spin_unlock_bh(&sc->sc_resetlock);
286 return r; 299 goto ps_restore;
287 } 300 }
288 spin_unlock_bh(&sc->sc_resetlock); 301 spin_unlock_bh(&sc->sc_resetlock);
289 302
@@ -292,14 +305,17 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
292 if (ath_startrecv(sc) != 0) { 305 if (ath_startrecv(sc) != 0) {
293 DPRINTF(sc, ATH_DBG_FATAL, 306 DPRINTF(sc, ATH_DBG_FATAL,
294 "Unable to restart recv logic\n"); 307 "Unable to restart recv logic\n");
295 return -EIO; 308 r = -EIO;
309 goto ps_restore;
296 } 310 }
297 311
298 ath_cache_conf_rate(sc, &hw->conf); 312 ath_cache_conf_rate(sc, &hw->conf);
299 ath_update_txpow(sc); 313 ath_update_txpow(sc);
300 ath9k_hw_set_interrupts(ah, sc->imask); 314 ath9k_hw_set_interrupts(ah, sc->imask);
315
316 ps_restore:
301 ath9k_ps_restore(sc); 317 ath9k_ps_restore(sc);
302 return 0; 318 return r;
303} 319}
304 320
305/* 321/*
@@ -1110,6 +1126,9 @@ void ath_radio_enable(struct ath_softc *sc)
1110 ath9k_ps_wakeup(sc); 1126 ath9k_ps_wakeup(sc);
1111 ath9k_hw_configpcipowersave(ah, 0); 1127 ath9k_hw_configpcipowersave(ah, 0);
1112 1128
1129 if (!ah->curchan)
1130 ah->curchan = ath_get_curchannel(sc, sc->hw);
1131
1113 spin_lock_bh(&sc->sc_resetlock); 1132 spin_lock_bh(&sc->sc_resetlock);
1114 r = ath9k_hw_reset(ah, ah->curchan, false); 1133 r = ath9k_hw_reset(ah, ah->curchan, false);
1115 if (r) { 1134 if (r) {
@@ -1162,6 +1181,9 @@ void ath_radio_disable(struct ath_softc *sc)
1162 ath_stoprecv(sc); /* turn off frame recv */ 1181 ath_stoprecv(sc); /* turn off frame recv */
1163 ath_flushrecv(sc); /* flush recv queue */ 1182 ath_flushrecv(sc); /* flush recv queue */
1164 1183
1184 if (!ah->curchan)
1185 ah->curchan = ath_get_curchannel(sc, sc->hw);
1186
1165 spin_lock_bh(&sc->sc_resetlock); 1187 spin_lock_bh(&sc->sc_resetlock);
1166 r = ath9k_hw_reset(ah, ah->curchan, false); 1188 r = ath9k_hw_reset(ah, ah->curchan, false);
1167 if (r) { 1189 if (r) {
@@ -1178,8 +1200,6 @@ void ath_radio_disable(struct ath_softc *sc)
1178 ath9k_ps_restore(sc); 1200 ath9k_ps_restore(sc);
1179} 1201}
1180 1202
1181#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1182
1183/*******************/ 1203/*******************/
1184/* Rfkill */ 1204/* Rfkill */
1185/*******************/ 1205/*******************/
@@ -1192,81 +1212,27 @@ static bool ath_is_rfkill_set(struct ath_softc *sc)
1192 ah->rfkill_polarity; 1212 ah->rfkill_polarity;
1193} 1213}
1194 1214
1195/* s/w rfkill handlers */ 1215static void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
1196static int ath_rfkill_set_block(void *data, bool blocked)
1197{ 1216{
1198 struct ath_softc *sc = data; 1217 struct ath_wiphy *aphy = hw->priv;
1199 1218 struct ath_softc *sc = aphy->sc;
1200 if (blocked)
1201 ath_radio_disable(sc);
1202 else
1203 ath_radio_enable(sc);
1204
1205 return 0;
1206}
1207
1208static void ath_rfkill_poll_state(struct rfkill *rfkill, void *data)
1209{
1210 struct ath_softc *sc = data;
1211 bool blocked = !!ath_is_rfkill_set(sc); 1219 bool blocked = !!ath_is_rfkill_set(sc);
1212 1220
1213 if (rfkill_set_hw_state(rfkill, blocked)) 1221 wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
1222
1223 if (blocked)
1214 ath_radio_disable(sc); 1224 ath_radio_disable(sc);
1215 else 1225 else
1216 ath_radio_enable(sc); 1226 ath_radio_enable(sc);
1217} 1227}
1218 1228
1219/* Init s/w rfkill */ 1229static void ath_start_rfkill_poll(struct ath_softc *sc)
1220static int ath_init_sw_rfkill(struct ath_softc *sc)
1221{
1222 sc->rf_kill.ops.set_block = ath_rfkill_set_block;
1223 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1224 sc->rf_kill.ops.poll = ath_rfkill_poll_state;
1225
1226 snprintf(sc->rf_kill.rfkill_name, sizeof(sc->rf_kill.rfkill_name),
1227 "ath9k-%s::rfkill", wiphy_name(sc->hw->wiphy));
1228
1229 sc->rf_kill.rfkill = rfkill_alloc(sc->rf_kill.rfkill_name,
1230 wiphy_dev(sc->hw->wiphy),
1231 RFKILL_TYPE_WLAN,
1232 &sc->rf_kill.ops, sc);
1233 if (!sc->rf_kill.rfkill) {
1234 DPRINTF(sc, ATH_DBG_FATAL, "Failed to allocate rfkill\n");
1235 return -ENOMEM;
1236 }
1237
1238 return 0;
1239}
1240
1241/* Deinitialize rfkill */
1242static void ath_deinit_rfkill(struct ath_softc *sc)
1243{
1244 if (sc->sc_flags & SC_OP_RFKILL_REGISTERED) {
1245 rfkill_unregister(sc->rf_kill.rfkill);
1246 rfkill_destroy(sc->rf_kill.rfkill);
1247 sc->sc_flags &= ~SC_OP_RFKILL_REGISTERED;
1248 }
1249}
1250
1251static int ath_start_rfkill_poll(struct ath_softc *sc)
1252{ 1230{
1253 if (!(sc->sc_flags & SC_OP_RFKILL_REGISTERED)) { 1231 struct ath_hw *ah = sc->sc_ah;
1254 if (rfkill_register(sc->rf_kill.rfkill)) {
1255 DPRINTF(sc, ATH_DBG_FATAL,
1256 "Unable to register rfkill\n");
1257 rfkill_destroy(sc->rf_kill.rfkill);
1258
1259 /* Deinitialize the device */
1260 ath_cleanup(sc);
1261 return -EIO;
1262 } else {
1263 sc->sc_flags |= SC_OP_RFKILL_REGISTERED;
1264 }
1265 }
1266 1232
1267 return 0; 1233 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1234 wiphy_rfkill_start_polling(sc->hw->wiphy);
1268} 1235}
1269#endif /* CONFIG_RFKILL */
1270 1236
1271void ath_cleanup(struct ath_softc *sc) 1237void ath_cleanup(struct ath_softc *sc)
1272{ 1238{
@@ -1286,9 +1252,6 @@ void ath_detach(struct ath_softc *sc)
1286 1252
1287 DPRINTF(sc, ATH_DBG_CONFIG, "Detach ATH hw\n"); 1253 DPRINTF(sc, ATH_DBG_CONFIG, "Detach ATH hw\n");
1288 1254
1289#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1290 ath_deinit_rfkill(sc);
1291#endif
1292 ath_deinit_leds(sc); 1255 ath_deinit_leds(sc);
1293 cancel_work_sync(&sc->chan_work); 1256 cancel_work_sync(&sc->chan_work);
1294 cancel_delayed_work_sync(&sc->wiphy_work); 1257 cancel_delayed_work_sync(&sc->wiphy_work);
@@ -1626,13 +1589,6 @@ int ath_attach(u16 devid, struct ath_softc *sc)
1626 if (error != 0) 1589 if (error != 0)
1627 goto error_attach; 1590 goto error_attach;
1628 1591
1629#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1630 /* Initialize s/w rfkill */
1631 error = ath_init_sw_rfkill(sc);
1632 if (error)
1633 goto error_attach;
1634#endif
1635
1636 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work); 1592 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
1637 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work); 1593 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
1638 sc->wiphy_scheduler_int = msecs_to_jiffies(500); 1594 sc->wiphy_scheduler_int = msecs_to_jiffies(500);
@@ -1648,6 +1604,7 @@ int ath_attach(u16 devid, struct ath_softc *sc)
1648 /* Initialize LED control */ 1604 /* Initialize LED control */
1649 ath_init_leds(sc); 1605 ath_init_leds(sc);
1650 1606
1607 ath_start_rfkill_poll(sc);
1651 1608
1652 return 0; 1609 return 0;
1653 1610
@@ -1920,7 +1877,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
1920 struct ath_softc *sc = aphy->sc; 1877 struct ath_softc *sc = aphy->sc;
1921 struct ieee80211_channel *curchan = hw->conf.channel; 1878 struct ieee80211_channel *curchan = hw->conf.channel;
1922 struct ath9k_channel *init_channel; 1879 struct ath9k_channel *init_channel;
1923 int r, pos; 1880 int r;
1924 1881
1925 DPRINTF(sc, ATH_DBG_CONFIG, "Starting driver with " 1882 DPRINTF(sc, ATH_DBG_CONFIG, "Starting driver with "
1926 "initial channel: %d MHz\n", curchan->center_freq); 1883 "initial channel: %d MHz\n", curchan->center_freq);
@@ -1950,11 +1907,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
1950 1907
1951 /* setup initial channel */ 1908 /* setup initial channel */
1952 1909
1953 pos = curchan->hw_value; 1910 sc->chan_idx = curchan->hw_value;
1954 1911
1955 sc->chan_idx = pos; 1912 init_channel = ath_get_curchannel(sc, hw);
1956 init_channel = &sc->sc_ah->channels[pos];
1957 ath9k_update_ichannel(sc, hw, init_channel);
1958 1913
1959 /* Reset SERDES registers */ 1914 /* Reset SERDES registers */
1960 ath9k_hw_configpcipowersave(sc->sc_ah, 0); 1915 ath9k_hw_configpcipowersave(sc->sc_ah, 0);
@@ -2018,10 +1973,6 @@ static int ath9k_start(struct ieee80211_hw *hw)
2018 1973
2019 ieee80211_wake_queues(hw); 1974 ieee80211_wake_queues(hw);
2020 1975
2021#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
2022 r = ath_start_rfkill_poll(sc);
2023#endif
2024
2025mutex_unlock: 1976mutex_unlock:
2026 mutex_unlock(&sc->mutex); 1977 mutex_unlock(&sc->mutex);
2027 1978
@@ -2159,7 +2110,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
2159 } else 2110 } else
2160 sc->rx.rxlink = NULL; 2111 sc->rx.rxlink = NULL;
2161 2112
2162 rfkill_pause_polling(sc->rf_kill.rfkill); 2113 wiphy_rfkill_stop_polling(sc->hw->wiphy);
2163 2114
2164 /* disable HAL and put h/w to sleep */ 2115 /* disable HAL and put h/w to sleep */
2165 ath9k_hw_disable(sc->sc_ah); 2116 ath9k_hw_disable(sc->sc_ah);
@@ -2765,6 +2716,7 @@ struct ieee80211_ops ath9k_ops = {
2765 .ampdu_action = ath9k_ampdu_action, 2716 .ampdu_action = ath9k_ampdu_action,
2766 .sw_scan_start = ath9k_sw_scan_start, 2717 .sw_scan_start = ath9k_sw_scan_start,
2767 .sw_scan_complete = ath9k_sw_scan_complete, 2718 .sw_scan_complete = ath9k_sw_scan_complete,
2719 .rfkill_poll = ath9k_rfkill_poll_state,
2768}; 2720};
2769 2721
2770static struct { 2722static struct {
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 5014a19b0f75..f99f3a76df3f 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -817,6 +817,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
817 } 817 }
818 818
819 if (unlikely(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | 819 if (unlikely(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
820 SC_OP_WAIT_FOR_CAB |
820 SC_OP_WAIT_FOR_PSPOLL_DATA))) 821 SC_OP_WAIT_FOR_PSPOLL_DATA)))
821 ath_rx_ps(sc, skb); 822 ath_rx_ps(sc, skb);
822 823
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index a5637c4aa85d..6d1519e1f011 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2152,7 +2152,6 @@ static int iwl_mac_start(struct ieee80211_hw *hw)
2152 /* we should be verifying the device is ready to be opened */ 2152 /* we should be verifying the device is ready to be opened */
2153 mutex_lock(&priv->mutex); 2153 mutex_lock(&priv->mutex);
2154 2154
2155 memset(&priv->staging_rxon, 0, sizeof(struct iwl_rxon_cmd));
2156 /* fetch ucode file from disk, alloc and copy to bus-master buffers ... 2155 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
2157 * ucode filename and max sizes are card-specific. */ 2156 * ucode filename and max sizes are card-specific. */
2158 2157
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index f9d16ca5b3d9..6ab07165ea28 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -629,13 +629,9 @@ u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv,
629 if (!sta_ht_inf->ht_supported) 629 if (!sta_ht_inf->ht_supported)
630 return 0; 630 return 0;
631 } 631 }
632 632 return iwl_is_channel_extension(priv, priv->band,
633 if (iwl_ht_conf->ht_protection & IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) 633 le16_to_cpu(priv->staging_rxon.channel),
634 return 1; 634 iwl_ht_conf->extension_chan_offset);
635 else
636 return iwl_is_channel_extension(priv, priv->band,
637 le16_to_cpu(priv->staging_rxon.channel),
638 iwl_ht_conf->extension_chan_offset);
639} 635}
640EXPORT_SYMBOL(iwl_is_fat_tx_allowed); 636EXPORT_SYMBOL(iwl_is_fat_tx_allowed);
641 637
@@ -826,9 +822,18 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
826 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); 822 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
827 if (iwl_is_fat_tx_allowed(priv, NULL)) { 823 if (iwl_is_fat_tx_allowed(priv, NULL)) {
828 /* pure 40 fat */ 824 /* pure 40 fat */
829 if (rxon->flags & RXON_FLG_FAT_PROT_MSK) 825 if (ht_info->ht_protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
830 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40; 826 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
831 else { 827 /* Note: control channel is opposite of extension channel */
828 switch (ht_info->extension_chan_offset) {
829 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
830 rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
831 break;
832 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
833 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
834 break;
835 }
836 } else {
832 /* Note: control channel is opposite of extension channel */ 837 /* Note: control channel is opposite of extension channel */
833 switch (ht_info->extension_chan_offset) { 838 switch (ht_info->extension_chan_offset) {
834 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 839 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
@@ -2390,39 +2395,46 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
2390 priv->ibss_beacon = ieee80211_beacon_get(hw, vif); 2395 priv->ibss_beacon = ieee80211_beacon_get(hw, vif);
2391 } 2396 }
2392 2397
2393 if ((changes & BSS_CHANGED_BSSID) && !iwl_is_rfkill(priv)) { 2398 if (changes & BSS_CHANGED_BEACON_INT) {
2394 /* If there is currently a HW scan going on in the background 2399 priv->beacon_int = bss_conf->beacon_int;
2395 * then we need to cancel it else the RXON below will fail. */ 2400 /* TODO: in AP mode, do something to make this take effect */
2401 }
2402
2403 if (changes & BSS_CHANGED_BSSID) {
2404 IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
2405
2406 /*
2407 * If there is currently a HW scan going on in the
2408 * background then we need to cancel it else the RXON
2409 * below/in post_associate will fail.
2410 */
2396 if (iwl_scan_cancel_timeout(priv, 100)) { 2411 if (iwl_scan_cancel_timeout(priv, 100)) {
2397 IWL_WARN(priv, "Aborted scan still in progress " 2412 IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
2398 "after 100ms\n");
2399 IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n"); 2413 IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
2400 mutex_unlock(&priv->mutex); 2414 mutex_unlock(&priv->mutex);
2401 return; 2415 return;
2402 } 2416 }
2403 memcpy(priv->staging_rxon.bssid_addr, 2417
2404 bss_conf->bssid, ETH_ALEN); 2418 /* mac80211 only sets assoc when in STATION mode */
2405 2419 if (priv->iw_mode == NL80211_IFTYPE_ADHOC ||
2406 /* TODO: Audit driver for usage of these members and see 2420 bss_conf->assoc) {
2407 * if mac80211 deprecates them (priv->bssid looks like it 2421 memcpy(priv->staging_rxon.bssid_addr,
2408 * shouldn't be there, but I haven't scanned the IBSS code 2422 bss_conf->bssid, ETH_ALEN);
2409 * to verify) - jpk */ 2423
2410 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN); 2424 /* currently needed in a few places */
2411 2425 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2412 if (priv->iw_mode == NL80211_IFTYPE_AP) 2426 } else {
2413 iwlcore_config_ap(priv); 2427 priv->staging_rxon.filter_flags &=
2414 else { 2428 ~RXON_FILTER_ASSOC_MSK;
2415 int rc = iwlcore_commit_rxon(priv);
2416 if ((priv->iw_mode == NL80211_IFTYPE_STATION) && rc)
2417 iwl_rxon_add_station(
2418 priv, priv->active_rxon.bssid_addr, 1);
2419 } 2429 }
2420 } else if (!iwl_is_rfkill(priv)) { 2430
2421 iwl_scan_cancel_timeout(priv, 100);
2422 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2423 iwlcore_commit_rxon(priv);
2424 } 2431 }
2425 2432
2433 /*
2434 * This needs to be after setting the BSSID in case
2435 * mac80211 decides to do both changes at once because
2436 * it will invoke post_associate.
2437 */
2426 if (priv->iw_mode == NL80211_IFTYPE_ADHOC && 2438 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
2427 changes & BSS_CHANGED_BEACON) { 2439 changes & BSS_CHANGED_BEACON) {
2428 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); 2440 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
@@ -2431,8 +2443,6 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
2431 iwl_mac_beacon_update(hw, beacon); 2443 iwl_mac_beacon_update(hw, beacon);
2432 } 2444 }
2433 2445
2434 mutex_unlock(&priv->mutex);
2435
2436 if (changes & BSS_CHANGED_ERP_PREAMBLE) { 2446 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
2437 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n", 2447 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
2438 bss_conf->use_short_preamble); 2448 bss_conf->use_short_preamble);
@@ -2450,6 +2460,23 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
2450 priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK; 2460 priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
2451 } 2461 }
2452 2462
2463 if (changes & BSS_CHANGED_BASIC_RATES) {
2464 /* XXX use this information
2465 *
2466 * To do that, remove code from iwl_set_rate() and put something
2467 * like this here:
2468 *
2469 if (A-band)
2470 priv->staging_rxon.ofdm_basic_rates =
2471 bss_conf->basic_rates;
2472 else
2473 priv->staging_rxon.ofdm_basic_rates =
2474 bss_conf->basic_rates >> 4;
2475 priv->staging_rxon.cck_basic_rates =
2476 bss_conf->basic_rates & 0xF;
2477 */
2478 }
2479
2453 if (changes & BSS_CHANGED_HT) { 2480 if (changes & BSS_CHANGED_HT) {
2454 iwl_ht_conf(priv, bss_conf); 2481 iwl_ht_conf(priv, bss_conf);
2455 2482
@@ -2459,10 +2486,6 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
2459 2486
2460 if (changes & BSS_CHANGED_ASSOC) { 2487 if (changes & BSS_CHANGED_ASSOC) {
2461 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc); 2488 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
2462 /* This should never happen as this function should
2463 * never be called from interrupt context. */
2464 if (WARN_ON_ONCE(in_interrupt()))
2465 return;
2466 if (bss_conf->assoc) { 2489 if (bss_conf->assoc) {
2467 priv->assoc_id = bss_conf->aid; 2490 priv->assoc_id = bss_conf->aid;
2468 priv->beacon_int = bss_conf->beacon_int; 2491 priv->beacon_int = bss_conf->beacon_int;
@@ -2470,27 +2493,35 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
2470 priv->timestamp = bss_conf->timestamp; 2493 priv->timestamp = bss_conf->timestamp;
2471 priv->assoc_capability = bss_conf->assoc_capability; 2494 priv->assoc_capability = bss_conf->assoc_capability;
2472 2495
2473 /* we have just associated, don't start scan too early 2496 /*
2474 * leave time for EAPOL exchange to complete 2497 * We have just associated, don't start scan too early
2498 * leave time for EAPOL exchange to complete.
2499 *
2500 * XXX: do this in mac80211
2475 */ 2501 */
2476 priv->next_scan_jiffies = jiffies + 2502 priv->next_scan_jiffies = jiffies +
2477 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC; 2503 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
2478 mutex_lock(&priv->mutex); 2504 if (!iwl_is_rfkill(priv))
2479 priv->cfg->ops->lib->post_associate(priv); 2505 priv->cfg->ops->lib->post_associate(priv);
2480 mutex_unlock(&priv->mutex); 2506 } else
2481 } else {
2482 priv->assoc_id = 0; 2507 priv->assoc_id = 0;
2483 IWL_DEBUG_MAC80211(priv, "DISASSOC %d\n", bss_conf->assoc); 2508
2509 }
2510
2511 if (changes && iwl_is_associated(priv) && priv->assoc_id) {
2512 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
2513 changes);
2514 ret = iwl_send_rxon_assoc(priv);
2515 if (!ret) {
2516 /* Sync active_rxon with latest change. */
2517 memcpy((void *)&priv->active_rxon,
2518 &priv->staging_rxon,
2519 sizeof(struct iwl_rxon_cmd));
2484 } 2520 }
2485 } else if (changes && iwl_is_associated(priv) && priv->assoc_id) {
2486 IWL_DEBUG_MAC80211(priv, "Associated Changes %d\n", changes);
2487 ret = iwl_send_rxon_assoc(priv);
2488 if (!ret)
2489 /* Sync active_rxon with latest change. */
2490 memcpy((void *)&priv->active_rxon,
2491 &priv->staging_rxon,
2492 sizeof(struct iwl_rxon_cmd));
2493 } 2521 }
2522
2523 mutex_unlock(&priv->mutex);
2524
2494 IWL_DEBUG_MAC80211(priv, "leave\n"); 2525 IWL_DEBUG_MAC80211(priv, "leave\n");
2495} 2526}
2496EXPORT_SYMBOL(iwl_bss_info_changed); 2527EXPORT_SYMBOL(iwl_bss_info_changed);
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 83d31606dd00..cb9bd4c8f25e 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -2498,8 +2498,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2498 struct iwl3945_rxon_cmd *active_rxon = 2498 struct iwl3945_rxon_cmd *active_rxon =
2499 (struct iwl3945_rxon_cmd *)(&priv->active_rxon); 2499 (struct iwl3945_rxon_cmd *)(&priv->active_rxon);
2500 2500
2501 memcpy(&priv->staging_rxon, &priv->active_rxon, 2501 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
2502 sizeof(priv->staging_rxon));
2503 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2502 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2504 } else { 2503 } else {
2505 /* Initialize our rx_config data */ 2504 /* Initialize our rx_config data */
@@ -3147,7 +3146,6 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
3147 /* we should be verifying the device is ready to be opened */ 3146 /* we should be verifying the device is ready to be opened */
3148 mutex_lock(&priv->mutex); 3147 mutex_lock(&priv->mutex);
3149 3148
3150 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
3151 /* fetch ucode file from disk, alloc and copy to bus-master buffers ... 3149 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
3152 * ucode filename and max sizes are card-specific. */ 3150 * ucode filename and max sizes are card-specific. */
3153 3151
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 06a46d7b3d6c..6564282ce476 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -812,7 +812,6 @@ out:
812static void if_spi_e2h(struct if_spi_card *card) 812static void if_spi_e2h(struct if_spi_card *card)
813{ 813{
814 int err = 0; 814 int err = 0;
815 unsigned long flags;
816 u32 cause; 815 u32 cause;
817 struct lbs_private *priv = card->priv; 816 struct lbs_private *priv = card->priv;
818 817
@@ -827,10 +826,7 @@ static void if_spi_e2h(struct if_spi_card *card)
827 /* generate a card interrupt */ 826 /* generate a card interrupt */
828 spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG, IF_SPI_CIC_HOST_EVENT); 827 spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG, IF_SPI_CIC_HOST_EVENT);
829 828
830 spin_lock_irqsave(&priv->driver_lock, flags);
831 lbs_queue_event(priv, cause & 0xff); 829 lbs_queue_event(priv, cause & 0xff);
832 spin_unlock_irqrestore(&priv->driver_lock, flags);
833
834out: 830out:
835 if (err) 831 if (err)
836 lbs_pr_err("%s: error %d\n", __func__, err); 832 lbs_pr_err("%s: error %d\n", __func__, err);
@@ -875,7 +871,12 @@ static int lbs_spi_thread(void *data)
875 err = if_spi_c2h_data(card); 871 err = if_spi_c2h_data(card);
876 if (err) 872 if (err)
877 goto err; 873 goto err;
878 if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY) { 874
875 /* workaround: in PS mode, the card does not set the Command
876 * Download Ready bit, but it sets TX Download Ready. */
877 if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY ||
878 (card->priv->psstate != PS_STATE_FULL_POWER &&
879 (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) {
879 /* This means two things. First of all, 880 /* This means two things. First of all,
880 * if there was a previous command sent, the card has 881 * if there was a previous command sent, the card has
881 * successfully received it. 882 * successfully received it.
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 3039fcb86afc..12403516776a 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -99,11 +99,11 @@ void pci_update_resource(struct pci_dev *dev, int resno)
99int pci_claim_resource(struct pci_dev *dev, int resource) 99int pci_claim_resource(struct pci_dev *dev, int resource)
100{ 100{
101 struct resource *res = &dev->resource[resource]; 101 struct resource *res = &dev->resource[resource];
102 struct resource *root = NULL; 102 struct resource *root;
103 char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge"; 103 char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge";
104 int err; 104 int err;
105 105
106 root = pcibios_select_root(dev, res); 106 root = pci_find_parent_resource(dev, res);
107 107
108 err = -EINVAL; 108 err = -EINVAL;
109 if (root != NULL) 109 if (root != NULL)
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 2faf0e14f05a..74909c4aaeea 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -177,7 +177,7 @@ dell_send_request(struct calling_interface_buffer *buffer, int class,
177static int dell_rfkill_set(void *data, bool blocked) 177static int dell_rfkill_set(void *data, bool blocked)
178{ 178{
179 struct calling_interface_buffer buffer; 179 struct calling_interface_buffer buffer;
180 int disable = blocked ? 0 : 1; 180 int disable = blocked ? 1 : 0;
181 unsigned long radio = (unsigned long)data; 181 unsigned long radio = (unsigned long)data;
182 182
183 memset(&buffer, 0, sizeof(struct calling_interface_buffer)); 183 memset(&buffer, 0, sizeof(struct calling_interface_buffer));
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index e48d9a4506ff..dafaa4a92df5 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1133,8 +1133,9 @@ static void sony_nc_rfkill_update()
1133 continue; 1133 continue;
1134 1134
1135 if (hwblock) { 1135 if (hwblock) {
1136 if (rfkill_set_hw_state(sony_rfkill_devices[i], true)) 1136 if (rfkill_set_hw_state(sony_rfkill_devices[i], true)) {
1137 sony_nc_rfkill_set((void *)i, true); 1137 /* we already know we're blocked */
1138 }
1138 continue; 1139 continue;
1139 } 1140 }
1140 1141
diff --git a/drivers/pps/Kconfig b/drivers/pps/Kconfig
new file mode 100644
index 000000000000..cc2eb8edb514
--- /dev/null
+++ b/drivers/pps/Kconfig
@@ -0,0 +1,33 @@
1#
2# PPS support configuration
3#
4
5menu "PPS support"
6
7config PPS
8 tristate "PPS support"
9 depends on EXPERIMENTAL
10 ---help---
11 PPS (Pulse Per Second) is a special pulse provided by some GPS
12 antennae. Userland can use it to get a high-precision time
13 reference.
14
15 Some antennae's PPS signals are connected with the CD (Carrier
16 Detect) pin of the serial line they use to communicate with the
17 host. In this case use the SERIAL_LINE client support.
18
19 Some antennae's PPS signals are connected with some special host
20 inputs so you have to enable the corresponding client support.
21
22 To compile this driver as a module, choose M here: the module
23 will be called pps_core.ko.
24
25config PPS_DEBUG
26 bool "PPS debugging messages"
27 depends on PPS
28 help
29 Say Y here if you want the PPS support to produce a bunch of debug
30 messages to the system log. Select this if you are having a
31 problem with PPS support and want to see more of what is going on.
32
33endmenu
diff --git a/drivers/pps/Makefile b/drivers/pps/Makefile
new file mode 100644
index 000000000000..19ea582f431d
--- /dev/null
+++ b/drivers/pps/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the PPS core.
3#
4
5pps_core-y := pps.o kapi.o sysfs.o
6obj-$(CONFIG_PPS) := pps_core.o
7
8ccflags-$(CONFIG_PPS_DEBUG) := -DDEBUG
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
new file mode 100644
index 000000000000..35a0b192d768
--- /dev/null
+++ b/drivers/pps/kapi.c
@@ -0,0 +1,329 @@
1/*
2 * kernel API
3 *
4 *
5 * Copyright (C) 2005-2009 Rodolfo Giometti <giometti@linux.it>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/sched.h>
27#include <linux/time.h>
28#include <linux/spinlock.h>
29#include <linux/idr.h>
30#include <linux/fs.h>
31#include <linux/pps_kernel.h>
32
33/*
34 * Global variables
35 */
36
37DEFINE_SPINLOCK(pps_idr_lock);
38DEFINE_IDR(pps_idr);
39
40/*
41 * Local functions
42 */
43
44static void pps_add_offset(struct pps_ktime *ts, struct pps_ktime *offset)
45{
46 ts->nsec += offset->nsec;
47 while (ts->nsec >= NSEC_PER_SEC) {
48 ts->nsec -= NSEC_PER_SEC;
49 ts->sec++;
50 }
51 while (ts->nsec < 0) {
52 ts->nsec += NSEC_PER_SEC;
53 ts->sec--;
54 }
55 ts->sec += offset->sec;
56}
57
58/*
59 * Exported functions
60 */
61
62/* pps_get_source - find a PPS source
63 * @source: the PPS source ID.
64 *
65 * This function is used to find an already registered PPS source into the
66 * system.
67 *
68 * The function returns NULL if found nothing, otherwise it returns a pointer
69 * to the PPS source data struct (the refcounter is incremented by 1).
70 */
71
72struct pps_device *pps_get_source(int source)
73{
74 struct pps_device *pps;
75 unsigned long flags;
76
77 spin_lock_irqsave(&pps_idr_lock, flags);
78
79 pps = idr_find(&pps_idr, source);
80 if (pps != NULL)
81 atomic_inc(&pps->usage);
82
83 spin_unlock_irqrestore(&pps_idr_lock, flags);
84
85 return pps;
86}
87
88/* pps_put_source - free the PPS source data
89 * @pps: a pointer to the PPS source.
90 *
91 * This function is used to free a PPS data struct if its refcount is 0.
92 */
93
94void pps_put_source(struct pps_device *pps)
95{
96 unsigned long flags;
97
98 spin_lock_irqsave(&pps_idr_lock, flags);
99 BUG_ON(atomic_read(&pps->usage) == 0);
100
101 if (!atomic_dec_and_test(&pps->usage)) {
102 pps = NULL;
103 goto exit;
104 }
105
106 /* No more reference to the PPS source. We can safely remove the
107 * PPS data struct.
108 */
109 idr_remove(&pps_idr, pps->id);
110
111exit:
112 spin_unlock_irqrestore(&pps_idr_lock, flags);
113 kfree(pps);
114}
115
116/* pps_register_source - add a PPS source in the system
117 * @info: the PPS info struct
118 * @default_params: the default PPS parameters of the new source
119 *
120 * This function is used to add a new PPS source in the system. The new
121 * source is described by info's fields and it will have, as default PPS
122 * parameters, the ones specified into default_params.
123 *
124 * The function returns, in case of success, the PPS source ID.
125 */
126
127int pps_register_source(struct pps_source_info *info, int default_params)
128{
129 struct pps_device *pps;
130 int id;
131 int err;
132
133 /* Sanity checks */
134 if ((info->mode & default_params) != default_params) {
135 printk(KERN_ERR "pps: %s: unsupported default parameters\n",
136 info->name);
137 err = -EINVAL;
138 goto pps_register_source_exit;
139 }
140 if ((info->mode & (PPS_ECHOASSERT | PPS_ECHOCLEAR)) != 0 &&
141 info->echo == NULL) {
142 printk(KERN_ERR "pps: %s: echo function is not defined\n",
143 info->name);
144 err = -EINVAL;
145 goto pps_register_source_exit;
146 }
147 if ((info->mode & (PPS_TSFMT_TSPEC | PPS_TSFMT_NTPFP)) == 0) {
148 printk(KERN_ERR "pps: %s: unspecified time format\n",
149 info->name);
150 err = -EINVAL;
151 goto pps_register_source_exit;
152 }
153
154 /* Allocate memory for the new PPS source struct */
155 pps = kzalloc(sizeof(struct pps_device), GFP_KERNEL);
156 if (pps == NULL) {
157 err = -ENOMEM;
158 goto pps_register_source_exit;
159 }
160
161 /* These initializations must be done before calling idr_get_new()
162 * in order to avoid reces into pps_event().
163 */
164 pps->params.api_version = PPS_API_VERS;
165 pps->params.mode = default_params;
166 pps->info = *info;
167
168 init_waitqueue_head(&pps->queue);
169 spin_lock_init(&pps->lock);
170 atomic_set(&pps->usage, 1);
171
172 /* Get new ID for the new PPS source */
173 if (idr_pre_get(&pps_idr, GFP_KERNEL) == 0) {
174 err = -ENOMEM;
175 goto kfree_pps;
176 }
177
178 spin_lock_irq(&pps_idr_lock);
179
180 /* Now really allocate the PPS source.
181 * After idr_get_new() calling the new source will be freely available
182 * into the kernel.
183 */
184 err = idr_get_new(&pps_idr, pps, &id);
185 if (err < 0) {
186 spin_unlock_irq(&pps_idr_lock);
187 goto kfree_pps;
188 }
189
190 id = id & MAX_ID_MASK;
191 if (id >= PPS_MAX_SOURCES) {
192 spin_unlock_irq(&pps_idr_lock);
193
194 printk(KERN_ERR "pps: %s: too many PPS sources in the system\n",
195 info->name);
196 err = -EBUSY;
197 goto free_idr;
198 }
199 pps->id = id;
200
201 spin_unlock_irq(&pps_idr_lock);
202
203 /* Create the char device */
204 err = pps_register_cdev(pps);
205 if (err < 0) {
206 printk(KERN_ERR "pps: %s: unable to create char device\n",
207 info->name);
208 goto free_idr;
209 }
210
211 pr_info("new PPS source %s at ID %d\n", info->name, id);
212
213 return id;
214
215free_idr:
216 spin_lock_irq(&pps_idr_lock);
217 idr_remove(&pps_idr, id);
218 spin_unlock_irq(&pps_idr_lock);
219
220kfree_pps:
221 kfree(pps);
222
223pps_register_source_exit:
224 printk(KERN_ERR "pps: %s: unable to register source\n", info->name);
225
226 return err;
227}
228EXPORT_SYMBOL(pps_register_source);
229
230/* pps_unregister_source - remove a PPS source from the system
231 * @source: the PPS source ID
232 *
233 * This function is used to remove a previously registered PPS source from
234 * the system.
235 */
236
237void pps_unregister_source(int source)
238{
239 struct pps_device *pps;
240
241 spin_lock_irq(&pps_idr_lock);
242 pps = idr_find(&pps_idr, source);
243
244 if (!pps) {
245 BUG();
246 spin_unlock_irq(&pps_idr_lock);
247 return;
248 }
249 spin_unlock_irq(&pps_idr_lock);
250
251 pps_unregister_cdev(pps);
252 pps_put_source(pps);
253}
254EXPORT_SYMBOL(pps_unregister_source);
255
256/* pps_event - register a PPS event into the system
257 * @source: the PPS source ID
258 * @ts: the event timestamp
259 * @event: the event type
260 * @data: userdef pointer
261 *
262 * This function is used by each PPS client in order to register a new
263 * PPS event into the system (it's usually called inside an IRQ handler).
264 *
265 * If an echo function is associated with the PPS source it will be called
266 * as:
267 * pps->info.echo(source, event, data);
268 */
269
270void pps_event(int source, struct pps_ktime *ts, int event, void *data)
271{
272 struct pps_device *pps;
273 unsigned long flags;
274
275 if ((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0) {
276 printk(KERN_ERR "pps: unknown event (%x) for source %d\n",
277 event, source);
278 return;
279 }
280
281 pps = pps_get_source(source);
282 if (!pps)
283 return;
284
285 pr_debug("PPS event on source %d at %llu.%06u\n",
286 pps->id, (unsigned long long) ts->sec, ts->nsec);
287
288 spin_lock_irqsave(&pps->lock, flags);
289
290 /* Must call the echo function? */
291 if ((pps->params.mode & (PPS_ECHOASSERT | PPS_ECHOCLEAR)))
292 pps->info.echo(source, event, data);
293
294 /* Check the event */
295 pps->current_mode = pps->params.mode;
296 if (event & PPS_CAPTUREASSERT) {
297 /* We have to add an offset? */
298 if (pps->params.mode & PPS_OFFSETASSERT)
299 pps_add_offset(ts, &pps->params.assert_off_tu);
300
301 /* Save the time stamp */
302 pps->assert_tu = *ts;
303 pps->assert_sequence++;
304 pr_debug("capture assert seq #%u for source %d\n",
305 pps->assert_sequence, source);
306 }
307 if (event & PPS_CAPTURECLEAR) {
308 /* We have to add an offset? */
309 if (pps->params.mode & PPS_OFFSETCLEAR)
310 pps_add_offset(ts, &pps->params.clear_off_tu);
311
312 /* Save the time stamp */
313 pps->clear_tu = *ts;
314 pps->clear_sequence++;
315 pr_debug("capture clear seq #%u for source %d\n",
316 pps->clear_sequence, source);
317 }
318
319 pps->go = ~0;
320 wake_up_interruptible(&pps->queue);
321
322 kill_fasync(&pps->async_queue, SIGIO, POLL_IN);
323
324 spin_unlock_irqrestore(&pps->lock, flags);
325
326 /* Now we can release the PPS source for (possible) deregistration */
327 pps_put_source(pps);
328}
329EXPORT_SYMBOL(pps_event);
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
new file mode 100644
index 000000000000..ac8cc8cea1e3
--- /dev/null
+++ b/drivers/pps/pps.c
@@ -0,0 +1,312 @@
1/*
2 * PPS core file
3 *
4 *
5 * Copyright (C) 2005-2009 Rodolfo Giometti <giometti@linux.it>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/sched.h>
27#include <linux/uaccess.h>
28#include <linux/idr.h>
29#include <linux/cdev.h>
30#include <linux/poll.h>
31#include <linux/pps_kernel.h>
32
33/*
34 * Local variables
35 */
36
37static dev_t pps_devt;
38static struct class *pps_class;
39
40/*
41 * Char device methods
42 */
43
44static unsigned int pps_cdev_poll(struct file *file, poll_table *wait)
45{
46 struct pps_device *pps = file->private_data;
47
48 poll_wait(file, &pps->queue, wait);
49
50 return POLLIN | POLLRDNORM;
51}
52
53static int pps_cdev_fasync(int fd, struct file *file, int on)
54{
55 struct pps_device *pps = file->private_data;
56 return fasync_helper(fd, file, on, &pps->async_queue);
57}
58
59static long pps_cdev_ioctl(struct file *file,
60 unsigned int cmd, unsigned long arg)
61{
62 struct pps_device *pps = file->private_data;
63 struct pps_kparams params;
64 struct pps_fdata fdata;
65 unsigned long ticks;
66 void __user *uarg = (void __user *) arg;
67 int __user *iuarg = (int __user *) arg;
68 int err;
69
70 switch (cmd) {
71 case PPS_GETPARAMS:
72 pr_debug("PPS_GETPARAMS: source %d\n", pps->id);
73
74 /* Return current parameters */
75 err = copy_to_user(uarg, &pps->params,
76 sizeof(struct pps_kparams));
77 if (err)
78 return -EFAULT;
79
80 break;
81
82 case PPS_SETPARAMS:
83 pr_debug("PPS_SETPARAMS: source %d\n", pps->id);
84
85 /* Check the capabilities */
86 if (!capable(CAP_SYS_TIME))
87 return -EPERM;
88
89 err = copy_from_user(&params, uarg, sizeof(struct pps_kparams));
90 if (err)
91 return -EFAULT;
92 if (!(params.mode & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR))) {
93 pr_debug("capture mode unspecified (%x)\n",
94 params.mode);
95 return -EINVAL;
96 }
97
98 /* Check for supported capabilities */
99 if ((params.mode & ~pps->info.mode) != 0) {
100 pr_debug("unsupported capabilities (%x)\n",
101 params.mode);
102 return -EINVAL;
103 }
104
105 spin_lock_irq(&pps->lock);
106
107 /* Save the new parameters */
108 pps->params = params;
109
110 /* Restore the read only parameters */
111 if ((params.mode & (PPS_TSFMT_TSPEC | PPS_TSFMT_NTPFP)) == 0) {
112 /* section 3.3 of RFC 2783 interpreted */
113 pr_debug("time format unspecified (%x)\n",
114 params.mode);
115 pps->params.mode |= PPS_TSFMT_TSPEC;
116 }
117 if (pps->info.mode & PPS_CANWAIT)
118 pps->params.mode |= PPS_CANWAIT;
119 pps->params.api_version = PPS_API_VERS;
120
121 spin_unlock_irq(&pps->lock);
122
123 break;
124
125 case PPS_GETCAP:
126 pr_debug("PPS_GETCAP: source %d\n", pps->id);
127
128 err = put_user(pps->info.mode, iuarg);
129 if (err)
130 return -EFAULT;
131
132 break;
133
134 case PPS_FETCH:
135 pr_debug("PPS_FETCH: source %d\n", pps->id);
136
137 err = copy_from_user(&fdata, uarg, sizeof(struct pps_fdata));
138 if (err)
139 return -EFAULT;
140
141 pps->go = 0;
142
143 /* Manage the timeout */
144 if (fdata.timeout.flags & PPS_TIME_INVALID)
145 err = wait_event_interruptible(pps->queue, pps->go);
146 else {
147 pr_debug("timeout %lld.%09d\n",
148 (long long) fdata.timeout.sec,
149 fdata.timeout.nsec);
150 ticks = fdata.timeout.sec * HZ;
151 ticks += fdata.timeout.nsec / (NSEC_PER_SEC / HZ);
152
153 if (ticks != 0) {
154 err = wait_event_interruptible_timeout(
155 pps->queue, pps->go, ticks);
156 if (err == 0)
157 return -ETIMEDOUT;
158 }
159 }
160
161 /* Check for pending signals */
162 if (err == -ERESTARTSYS) {
163 pr_debug("pending signal caught\n");
164 return -EINTR;
165 }
166
167 /* Return the fetched timestamp */
168 spin_lock_irq(&pps->lock);
169
170 fdata.info.assert_sequence = pps->assert_sequence;
171 fdata.info.clear_sequence = pps->clear_sequence;
172 fdata.info.assert_tu = pps->assert_tu;
173 fdata.info.clear_tu = pps->clear_tu;
174 fdata.info.current_mode = pps->current_mode;
175
176 spin_unlock_irq(&pps->lock);
177
178 err = copy_to_user(uarg, &fdata, sizeof(struct pps_fdata));
179 if (err)
180 return -EFAULT;
181
182 break;
183
184 default:
185 return -ENOTTY;
186 break;
187 }
188
189 return 0;
190}
191
192static int pps_cdev_open(struct inode *inode, struct file *file)
193{
194 struct pps_device *pps = container_of(inode->i_cdev,
195 struct pps_device, cdev);
196 int found;
197
198 found = pps_get_source(pps->id) != 0;
199 if (!found)
200 return -ENODEV;
201
202 file->private_data = pps;
203
204 return 0;
205}
206
207static int pps_cdev_release(struct inode *inode, struct file *file)
208{
209 struct pps_device *pps = file->private_data;
210
211 /* Free the PPS source and wake up (possible) deregistration */
212 pps_put_source(pps);
213
214 return 0;
215}
216
217/*
218 * Char device stuff
219 */
220
221static const struct file_operations pps_cdev_fops = {
222 .owner = THIS_MODULE,
223 .llseek = no_llseek,
224 .poll = pps_cdev_poll,
225 .fasync = pps_cdev_fasync,
226 .unlocked_ioctl = pps_cdev_ioctl,
227 .open = pps_cdev_open,
228 .release = pps_cdev_release,
229};
230
231int pps_register_cdev(struct pps_device *pps)
232{
233 int err;
234
235 pps->devno = MKDEV(MAJOR(pps_devt), pps->id);
236 cdev_init(&pps->cdev, &pps_cdev_fops);
237 pps->cdev.owner = pps->info.owner;
238
239 err = cdev_add(&pps->cdev, pps->devno, 1);
240 if (err) {
241 printk(KERN_ERR "pps: %s: failed to add char device %d:%d\n",
242 pps->info.name, MAJOR(pps_devt), pps->id);
243 return err;
244 }
245 pps->dev = device_create(pps_class, pps->info.dev, pps->devno, NULL,
246 "pps%d", pps->id);
247 if (err)
248 goto del_cdev;
249 dev_set_drvdata(pps->dev, pps);
250
251 pr_debug("source %s got cdev (%d:%d)\n", pps->info.name,
252 MAJOR(pps_devt), pps->id);
253
254 return 0;
255
256del_cdev:
257 cdev_del(&pps->cdev);
258
259 return err;
260}
261
262void pps_unregister_cdev(struct pps_device *pps)
263{
264 device_destroy(pps_class, pps->devno);
265 cdev_del(&pps->cdev);
266}
267
268/*
269 * Module stuff
270 */
271
272static void __exit pps_exit(void)
273{
274 class_destroy(pps_class);
275 unregister_chrdev_region(pps_devt, PPS_MAX_SOURCES);
276}
277
278static int __init pps_init(void)
279{
280 int err;
281
282 pps_class = class_create(THIS_MODULE, "pps");
283 if (!pps_class) {
284 printk(KERN_ERR "pps: failed to allocate class\n");
285 return -ENOMEM;
286 }
287 pps_class->dev_attrs = pps_attrs;
288
289 err = alloc_chrdev_region(&pps_devt, 0, PPS_MAX_SOURCES, "pps");
290 if (err < 0) {
291 printk(KERN_ERR "pps: failed to allocate char device region\n");
292 goto remove_class;
293 }
294
295 pr_info("LinuxPPS API ver. %d registered\n", PPS_API_VERS);
296 pr_info("Software ver. %s - Copyright 2005-2007 Rodolfo Giometti "
297 "<giometti@linux.it>\n", PPS_VERSION);
298
299 return 0;
300
301remove_class:
302 class_destroy(pps_class);
303
304 return err;
305}
306
307subsys_initcall(pps_init);
308module_exit(pps_exit);
309
310MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
311MODULE_DESCRIPTION("LinuxPPS support (RFC 2783) - ver. " PPS_VERSION);
312MODULE_LICENSE("GPL");
diff --git a/drivers/pps/sysfs.c b/drivers/pps/sysfs.c
new file mode 100644
index 000000000000..ef0978c71eee
--- /dev/null
+++ b/drivers/pps/sysfs.c
@@ -0,0 +1,98 @@
1/*
2 * PPS sysfs support
3 *
4 *
5 * Copyright (C) 2007-2009 Rodolfo Giometti <giometti@linux.it>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22
23#include <linux/device.h>
24#include <linux/module.h>
25#include <linux/string.h>
26#include <linux/pps_kernel.h>
27
28/*
29 * Attribute functions
30 */
31
32static ssize_t pps_show_assert(struct device *dev,
33 struct device_attribute *attr, char *buf)
34{
35 struct pps_device *pps = dev_get_drvdata(dev);
36
37 if (!(pps->info.mode & PPS_CAPTUREASSERT))
38 return 0;
39
40 return sprintf(buf, "%lld.%09d#%d\n",
41 (long long) pps->assert_tu.sec, pps->assert_tu.nsec,
42 pps->assert_sequence);
43}
44
45static ssize_t pps_show_clear(struct device *dev,
46 struct device_attribute *attr, char *buf)
47{
48 struct pps_device *pps = dev_get_drvdata(dev);
49
50 if (!(pps->info.mode & PPS_CAPTURECLEAR))
51 return 0;
52
53 return sprintf(buf, "%lld.%09d#%d\n",
54 (long long) pps->clear_tu.sec, pps->clear_tu.nsec,
55 pps->clear_sequence);
56}
57
58static ssize_t pps_show_mode(struct device *dev,
59 struct device_attribute *attr, char *buf)
60{
61 struct pps_device *pps = dev_get_drvdata(dev);
62
63 return sprintf(buf, "%4x\n", pps->info.mode);
64}
65
66static ssize_t pps_show_echo(struct device *dev,
67 struct device_attribute *attr, char *buf)
68{
69 struct pps_device *pps = dev_get_drvdata(dev);
70
71 return sprintf(buf, "%d\n", !!pps->info.echo);
72}
73
74static ssize_t pps_show_name(struct device *dev,
75 struct device_attribute *attr, char *buf)
76{
77 struct pps_device *pps = dev_get_drvdata(dev);
78
79 return sprintf(buf, "%s\n", pps->info.name);
80}
81
82static ssize_t pps_show_path(struct device *dev,
83 struct device_attribute *attr, char *buf)
84{
85 struct pps_device *pps = dev_get_drvdata(dev);
86
87 return sprintf(buf, "%s\n", pps->info.path);
88}
89
90struct device_attribute pps_attrs[] = {
91 __ATTR(assert, S_IRUGO, pps_show_assert, NULL),
92 __ATTR(clear, S_IRUGO, pps_show_clear, NULL),
93 __ATTR(mode, S_IRUGO, pps_show_mode, NULL),
94 __ATTR(echo, S_IRUGO, pps_show_echo, NULL),
95 __ATTR(name, S_IRUGO, pps_show_name, NULL),
96 __ATTR(path, S_IRUGO, pps_show_path, NULL),
97 __ATTR_NULL,
98};
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 277d35d232fa..81adbdbd5042 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -296,6 +296,15 @@ config RTC_DRV_RX8581
296 This driver can also be built as a module. If so the module 296 This driver can also be built as a module. If so the module
297 will be called rtc-rx8581. 297 will be called rtc-rx8581.
298 298
299config RTC_DRV_RX8025
300 tristate "Epson RX-8025SA/NB"
301 help
302 If you say yes here you get support for the Epson
303 RX-8025SA/NB RTC chips.
304
305 This driver can also be built as a module. If so, the module
306 will be called rtc-rx8025.
307
299endif # I2C 308endif # I2C
300 309
301comment "SPI RTC drivers" 310comment "SPI RTC drivers"
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 6c0639a14f09..3c0f2b2ac927 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -62,6 +62,7 @@ obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o
62obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o 62obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
63obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o 63obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o
64obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o 64obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o
65obj-$(CONFIG_RTC_DRV_RX8025) += rtc-rx8025.o
65obj-$(CONFIG_RTC_DRV_RX8581) += rtc-rx8581.o 66obj-$(CONFIG_RTC_DRV_RX8581) += rtc-rx8581.o
66obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o 67obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o
67obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o 68obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 4348c4b0d453..4cdb31a362ca 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -371,19 +371,21 @@ EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
371 * @rtc: the rtc device 371 * @rtc: the rtc device
372 * @num: how many irqs are being reported (usually one) 372 * @num: how many irqs are being reported (usually one)
373 * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF 373 * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF
374 * Context: in_interrupt(), irqs blocked 374 * Context: any
375 */ 375 */
376void rtc_update_irq(struct rtc_device *rtc, 376void rtc_update_irq(struct rtc_device *rtc,
377 unsigned long num, unsigned long events) 377 unsigned long num, unsigned long events)
378{ 378{
379 spin_lock(&rtc->irq_lock); 379 unsigned long flags;
380
381 spin_lock_irqsave(&rtc->irq_lock, flags);
380 rtc->irq_data = (rtc->irq_data + (num << 8)) | events; 382 rtc->irq_data = (rtc->irq_data + (num << 8)) | events;
381 spin_unlock(&rtc->irq_lock); 383 spin_unlock_irqrestore(&rtc->irq_lock, flags);
382 384
383 spin_lock(&rtc->irq_task_lock); 385 spin_lock_irqsave(&rtc->irq_task_lock, flags);
384 if (rtc->irq_task) 386 if (rtc->irq_task)
385 rtc->irq_task->func(rtc->irq_task->private_data); 387 rtc->irq_task->func(rtc->irq_task->private_data);
386 spin_unlock(&rtc->irq_task_lock); 388 spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
387 389
388 wake_up_interruptible(&rtc->irq_queue); 390 wake_up_interruptible(&rtc->irq_queue);
389 kill_fasync(&rtc->async_queue, SIGIO, POLL_IN); 391 kill_fasync(&rtc->async_queue, SIGIO, POLL_IN);
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 45152f4952d6..8a11de9552cd 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -60,8 +60,7 @@ static void rtc_uie_task(struct work_struct *work)
60 60
61 err = rtc_read_time(rtc, &tm); 61 err = rtc_read_time(rtc, &tm);
62 62
63 local_irq_disable(); 63 spin_lock_irq(&rtc->irq_lock);
64 spin_lock(&rtc->irq_lock);
65 if (rtc->stop_uie_polling || err) { 64 if (rtc->stop_uie_polling || err) {
66 rtc->uie_task_active = 0; 65 rtc->uie_task_active = 0;
67 } else if (rtc->oldsecs != tm.tm_sec) { 66 } else if (rtc->oldsecs != tm.tm_sec) {
@@ -74,10 +73,9 @@ static void rtc_uie_task(struct work_struct *work)
74 } else if (schedule_work(&rtc->uie_task) == 0) { 73 } else if (schedule_work(&rtc->uie_task) == 0) {
75 rtc->uie_task_active = 0; 74 rtc->uie_task_active = 0;
76 } 75 }
77 spin_unlock(&rtc->irq_lock); 76 spin_unlock_irq(&rtc->irq_lock);
78 if (num) 77 if (num)
79 rtc_update_irq(rtc, num, RTC_UF | RTC_IRQF); 78 rtc_update_irq(rtc, num, RTC_UF | RTC_IRQF);
80 local_irq_enable();
81} 79}
82static void rtc_uie_timer(unsigned long data) 80static void rtc_uie_timer(unsigned long data)
83{ 81{
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index fc372df6534b..8f410e59d9f5 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -499,10 +499,7 @@ static void ds1305_work(struct work_struct *work)
499 if (!test_bit(FLAG_EXITING, &ds1305->flags)) 499 if (!test_bit(FLAG_EXITING, &ds1305->flags))
500 enable_irq(spi->irq); 500 enable_irq(spi->irq);
501 501
502 /* rtc_update_irq() requires an IRQ-disabled context */
503 local_irq_disable();
504 rtc_update_irq(ds1305->rtc, 1, RTC_AF | RTC_IRQF); 502 rtc_update_irq(ds1305->rtc, 1, RTC_AF | RTC_IRQF);
505 local_irq_enable();
506} 503}
507 504
508/* 505/*
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 2c4a65302a9d..47a93c022d91 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -31,6 +31,8 @@ enum ds_type {
31 ds_1338, 31 ds_1338,
32 ds_1339, 32 ds_1339,
33 ds_1340, 33 ds_1340,
34 ds_1388,
35 ds_3231,
34 m41t00, 36 m41t00,
35 rx_8025, 37 rx_8025,
36 // rs5c372 too? different address... 38 // rs5c372 too? different address...
@@ -66,6 +68,7 @@ enum ds_type {
66#define DS1337_REG_CONTROL 0x0e 68#define DS1337_REG_CONTROL 0x0e
67# define DS1337_BIT_nEOSC 0x80 69# define DS1337_BIT_nEOSC 0x80
68# define DS1339_BIT_BBSQI 0x20 70# define DS1339_BIT_BBSQI 0x20
71# define DS3231_BIT_BBSQW 0x40 /* same as BBSQI */
69# define DS1337_BIT_RS2 0x10 72# define DS1337_BIT_RS2 0x10
70# define DS1337_BIT_RS1 0x08 73# define DS1337_BIT_RS1 0x08
71# define DS1337_BIT_INTCN 0x04 74# define DS1337_BIT_INTCN 0x04
@@ -94,6 +97,7 @@ enum ds_type {
94 97
95 98
96struct ds1307 { 99struct ds1307 {
100 u8 offset; /* register's offset */
97 u8 regs[11]; 101 u8 regs[11];
98 enum ds_type type; 102 enum ds_type type;
99 unsigned long flags; 103 unsigned long flags;
@@ -128,6 +132,9 @@ static const struct chip_desc chips[] = {
128}, 132},
129[ds_1340] = { 133[ds_1340] = {
130}, 134},
135[ds_3231] = {
136 .alarm = 1,
137},
131[m41t00] = { 138[m41t00] = {
132}, 139},
133[rx_8025] = { 140[rx_8025] = {
@@ -138,7 +145,9 @@ static const struct i2c_device_id ds1307_id[] = {
138 { "ds1337", ds_1337 }, 145 { "ds1337", ds_1337 },
139 { "ds1338", ds_1338 }, 146 { "ds1338", ds_1338 },
140 { "ds1339", ds_1339 }, 147 { "ds1339", ds_1339 },
148 { "ds1388", ds_1388 },
141 { "ds1340", ds_1340 }, 149 { "ds1340", ds_1340 },
150 { "ds3231", ds_3231 },
142 { "m41t00", m41t00 }, 151 { "m41t00", m41t00 },
143 { "rx8025", rx_8025 }, 152 { "rx8025", rx_8025 },
144 { } 153 { }
@@ -258,12 +267,7 @@ static void ds1307_work(struct work_struct *work)
258 control &= ~DS1337_BIT_A1IE; 267 control &= ~DS1337_BIT_A1IE;
259 i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, control); 268 i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, control);
260 269
261 /* rtc_update_irq() assumes that it is called
262 * from IRQ-disabled context.
263 */
264 local_irq_disable();
265 rtc_update_irq(ds1307->rtc, 1, RTC_AF | RTC_IRQF); 270 rtc_update_irq(ds1307->rtc, 1, RTC_AF | RTC_IRQF);
266 local_irq_enable();
267 } 271 }
268 272
269out: 273out:
@@ -291,7 +295,7 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
291 295
292 /* read the RTC date and time registers all at once */ 296 /* read the RTC date and time registers all at once */
293 tmp = ds1307->read_block_data(ds1307->client, 297 tmp = ds1307->read_block_data(ds1307->client,
294 DS1307_REG_SECS, 7, ds1307->regs); 298 ds1307->offset, 7, ds1307->regs);
295 if (tmp != 7) { 299 if (tmp != 7) {
296 dev_err(dev, "%s error %d\n", "read", tmp); 300 dev_err(dev, "%s error %d\n", "read", tmp);
297 return -EIO; 301 return -EIO;
@@ -353,6 +357,7 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
353 switch (ds1307->type) { 357 switch (ds1307->type) {
354 case ds_1337: 358 case ds_1337:
355 case ds_1339: 359 case ds_1339:
360 case ds_3231:
356 buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY; 361 buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY;
357 break; 362 break;
358 case ds_1340: 363 case ds_1340:
@@ -367,7 +372,8 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
367 "write", buf[0], buf[1], buf[2], buf[3], 372 "write", buf[0], buf[1], buf[2], buf[3],
368 buf[4], buf[5], buf[6]); 373 buf[4], buf[5], buf[6]);
369 374
370 result = ds1307->write_block_data(ds1307->client, 0, 7, buf); 375 result = ds1307->write_block_data(ds1307->client,
376 ds1307->offset, 7, buf);
371 if (result < 0) { 377 if (result < 0) {
372 dev_err(dev, "%s error %d\n", "write", result); 378 dev_err(dev, "%s error %d\n", "write", result);
373 return result; 379 return result;
@@ -624,6 +630,11 @@ static int __devinit ds1307_probe(struct i2c_client *client,
624 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); 630 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
625 int want_irq = false; 631 int want_irq = false;
626 unsigned char *buf; 632 unsigned char *buf;
633 static const int bbsqi_bitpos[] = {
634 [ds_1337] = 0,
635 [ds_1339] = DS1339_BIT_BBSQI,
636 [ds_3231] = DS3231_BIT_BBSQW,
637 };
627 638
628 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA) 639 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)
629 && !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) 640 && !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK))
@@ -632,9 +643,12 @@ static int __devinit ds1307_probe(struct i2c_client *client,
632 if (!(ds1307 = kzalloc(sizeof(struct ds1307), GFP_KERNEL))) 643 if (!(ds1307 = kzalloc(sizeof(struct ds1307), GFP_KERNEL)))
633 return -ENOMEM; 644 return -ENOMEM;
634 645
635 ds1307->client = client;
636 i2c_set_clientdata(client, ds1307); 646 i2c_set_clientdata(client, ds1307);
637 ds1307->type = id->driver_data; 647
648 ds1307->client = client;
649 ds1307->type = id->driver_data;
650 ds1307->offset = 0;
651
638 buf = ds1307->regs; 652 buf = ds1307->regs;
639 if (i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { 653 if (i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) {
640 ds1307->read_block_data = i2c_smbus_read_i2c_block_data; 654 ds1307->read_block_data = i2c_smbus_read_i2c_block_data;
@@ -647,6 +661,7 @@ static int __devinit ds1307_probe(struct i2c_client *client,
647 switch (ds1307->type) { 661 switch (ds1307->type) {
648 case ds_1337: 662 case ds_1337:
649 case ds_1339: 663 case ds_1339:
664 case ds_3231:
650 /* has IRQ? */ 665 /* has IRQ? */
651 if (ds1307->client->irq > 0 && chip->alarm) { 666 if (ds1307->client->irq > 0 && chip->alarm) {
652 INIT_WORK(&ds1307->work, ds1307_work); 667 INIT_WORK(&ds1307->work, ds1307_work);
@@ -666,12 +681,12 @@ static int __devinit ds1307_probe(struct i2c_client *client,
666 ds1307->regs[0] &= ~DS1337_BIT_nEOSC; 681 ds1307->regs[0] &= ~DS1337_BIT_nEOSC;
667 682
668 /* Using IRQ? Disable the square wave and both alarms. 683 /* Using IRQ? Disable the square wave and both alarms.
669 * For ds1339, be sure alarms can trigger when we're 684 * For some variants, be sure alarms can trigger when we're
670 * running on Vbackup (BBSQI); we assume ds1337 will 685 * running on Vbackup (BBSQI/BBSQW)
671 * ignore that bit
672 */ 686 */
673 if (want_irq) { 687 if (want_irq) {
674 ds1307->regs[0] |= DS1337_BIT_INTCN | DS1339_BIT_BBSQI; 688 ds1307->regs[0] |= DS1337_BIT_INTCN
689 | bbsqi_bitpos[ds1307->type];
675 ds1307->regs[0] &= ~(DS1337_BIT_A2IE | DS1337_BIT_A1IE); 690 ds1307->regs[0] &= ~(DS1337_BIT_A2IE | DS1337_BIT_A1IE);
676 } 691 }
677 692
@@ -751,6 +766,9 @@ static int __devinit ds1307_probe(struct i2c_client *client,
751 hour); 766 hour);
752 } 767 }
753 break; 768 break;
769 case ds_1388:
770 ds1307->offset = 1; /* Seconds starts at 1 */
771 break;
754 default: 772 default:
755 break; 773 break;
756 } 774 }
@@ -814,6 +832,8 @@ read_rtc:
814 case rx_8025: 832 case rx_8025:
815 case ds_1337: 833 case ds_1337:
816 case ds_1339: 834 case ds_1339:
835 case ds_1388:
836 case ds_3231:
817 break; 837 break;
818 } 838 }
819 839
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 4d32e328f6cd..32b27739ec2a 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -296,12 +296,7 @@ static void ds1374_work(struct work_struct *work)
296 control &= ~(DS1374_REG_CR_WACE | DS1374_REG_CR_AIE); 296 control &= ~(DS1374_REG_CR_WACE | DS1374_REG_CR_AIE);
297 i2c_smbus_write_byte_data(client, DS1374_REG_CR, control); 297 i2c_smbus_write_byte_data(client, DS1374_REG_CR, control);
298 298
299 /* rtc_update_irq() assumes that it is called
300 * from IRQ-disabled context.
301 */
302 local_irq_disable();
303 rtc_update_irq(ds1374->rtc, 1, RTC_AF | RTC_IRQF); 299 rtc_update_irq(ds1374->rtc, 1, RTC_AF | RTC_IRQF);
304 local_irq_enable();
305 } 300 }
306 301
307out: 302out:
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index 38d472b63406..717288527c6b 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -329,8 +329,7 @@ static int __devinit ds1553_rtc_probe(struct platform_device *pdev)
329 if (pdata->irq > 0) { 329 if (pdata->irq > 0) {
330 writeb(0, ioaddr + RTC_INTERRUPTS); 330 writeb(0, ioaddr + RTC_INTERRUPTS);
331 if (request_irq(pdata->irq, ds1553_rtc_interrupt, 331 if (request_irq(pdata->irq, ds1553_rtc_interrupt,
332 IRQF_DISABLED | IRQF_SHARED, 332 IRQF_DISABLED, pdev->name, pdev) < 0) {
333 pdev->name, pdev) < 0) {
334 dev_warn(&pdev->dev, "interrupt not available.\n"); 333 dev_warn(&pdev->dev, "interrupt not available.\n");
335 pdata->irq = 0; 334 pdata->irq = 0;
336 } 335 }
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index 8bc8501bffc8..09249459e9a4 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -57,6 +57,7 @@ struct rtc_plat_data {
57 size_t size; 57 size_t size;
58 resource_size_t baseaddr; 58 resource_size_t baseaddr;
59 unsigned long last_jiffies; 59 unsigned long last_jiffies;
60 struct bin_attribute nvram_attr;
60}; 61};
61 62
62static int ds1742_rtc_set_time(struct device *dev, struct rtc_time *tm) 63static int ds1742_rtc_set_time(struct device *dev, struct rtc_time *tm)
@@ -157,18 +158,6 @@ static ssize_t ds1742_nvram_write(struct kobject *kobj,
157 return count; 158 return count;
158} 159}
159 160
160static struct bin_attribute ds1742_nvram_attr = {
161 .attr = {
162 .name = "nvram",
163 .mode = S_IRUGO | S_IWUSR,
164 },
165 .read = ds1742_nvram_read,
166 .write = ds1742_nvram_write,
167 /* REVISIT: size in sysfs won't match actual size... if it's
168 * not a constant, each RTC should have its own attribute.
169 */
170};
171
172static int __devinit ds1742_rtc_probe(struct platform_device *pdev) 161static int __devinit ds1742_rtc_probe(struct platform_device *pdev)
173{ 162{
174 struct rtc_device *rtc; 163 struct rtc_device *rtc;
@@ -199,6 +188,12 @@ static int __devinit ds1742_rtc_probe(struct platform_device *pdev)
199 pdata->size_nvram = pdata->size - RTC_SIZE; 188 pdata->size_nvram = pdata->size - RTC_SIZE;
200 pdata->ioaddr_rtc = ioaddr + pdata->size_nvram; 189 pdata->ioaddr_rtc = ioaddr + pdata->size_nvram;
201 190
191 pdata->nvram_attr.attr.name = "nvram";
192 pdata->nvram_attr.attr.mode = S_IRUGO | S_IWUSR;
193 pdata->nvram_attr.read = ds1742_nvram_read;
194 pdata->nvram_attr.write = ds1742_nvram_write;
195 pdata->nvram_attr.size = pdata->size_nvram;
196
202 /* turn RTC on if it was not on */ 197 /* turn RTC on if it was not on */
203 ioaddr = pdata->ioaddr_rtc; 198 ioaddr = pdata->ioaddr_rtc;
204 sec = readb(ioaddr + RTC_SECONDS); 199 sec = readb(ioaddr + RTC_SECONDS);
@@ -221,11 +216,13 @@ static int __devinit ds1742_rtc_probe(struct platform_device *pdev)
221 pdata->rtc = rtc; 216 pdata->rtc = rtc;
222 pdata->last_jiffies = jiffies; 217 pdata->last_jiffies = jiffies;
223 platform_set_drvdata(pdev, pdata); 218 platform_set_drvdata(pdev, pdata);
224 ds1742_nvram_attr.size = max(ds1742_nvram_attr.size, 219
225 pdata->size_nvram); 220 ret = sysfs_create_bin_file(&pdev->dev.kobj, &pdata->nvram_attr);
226 ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1742_nvram_attr); 221 if (ret) {
227 if (ret) 222 dev_err(&pdev->dev, "creating nvram file in sysfs failed\n");
228 goto out; 223 goto out;
224 }
225
229 return 0; 226 return 0;
230 out: 227 out:
231 if (pdata->rtc) 228 if (pdata->rtc)
@@ -242,7 +239,7 @@ static int __devexit ds1742_rtc_remove(struct platform_device *pdev)
242{ 239{
243 struct rtc_plat_data *pdata = platform_get_drvdata(pdev); 240 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
244 241
245 sysfs_remove_bin_file(&pdev->dev.kobj, &ds1742_nvram_attr); 242 sysfs_remove_bin_file(&pdev->dev.kobj, &pdata->nvram_attr);
246 rtc_device_unregister(pdata->rtc); 243 rtc_device_unregister(pdata->rtc);
247 iounmap(pdata->ioaddr_nvram); 244 iounmap(pdata->ioaddr_nvram);
248 release_mem_region(pdata->baseaddr, pdata->size); 245 release_mem_region(pdata->baseaddr, pdata->size);
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
new file mode 100644
index 000000000000..b1a29bcfdf13
--- /dev/null
+++ b/drivers/rtc/rtc-rx8025.c
@@ -0,0 +1,688 @@
1/*
2 * Driver for Epson's RTC module RX-8025 SA/NB
3 *
4 * Copyright (C) 2009 Wolfgang Grandegger <wg@grandegger.com>
5 *
6 * Copyright (C) 2005 by Digi International Inc.
7 * All rights reserved.
8 *
9 * Modified by fengjh at rising.com.cn
10 * <http://lists.lm-sensors.org/mailman/listinfo/lm-sensors>
11 * 2006.11
12 *
13 * Code cleanup by Sergei Poselenov, <sposelenov@emcraft.com>
14 * Converted to new style by Wolfgang Grandegger <wg@grandegger.com>
15 * Alarm and periodic interrupt added by Dmitry Rakhchev <rda@emcraft.com>
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * version 2 as published by the Free Software Foundation.
20 */
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/bcd.h>
25#include <linux/i2c.h>
26#include <linux/list.h>
27#include <linux/rtc.h>
28
29/* Register definitions */
30#define RX8025_REG_SEC 0x00
31#define RX8025_REG_MIN 0x01
32#define RX8025_REG_HOUR 0x02
33#define RX8025_REG_WDAY 0x03
34#define RX8025_REG_MDAY 0x04
35#define RX8025_REG_MONTH 0x05
36#define RX8025_REG_YEAR 0x06
37#define RX8025_REG_DIGOFF 0x07
38#define RX8025_REG_ALWMIN 0x08
39#define RX8025_REG_ALWHOUR 0x09
40#define RX8025_REG_ALWWDAY 0x0a
41#define RX8025_REG_ALDMIN 0x0b
42#define RX8025_REG_ALDHOUR 0x0c
43/* 0x0d is reserved */
44#define RX8025_REG_CTRL1 0x0e
45#define RX8025_REG_CTRL2 0x0f
46
47#define RX8025_BIT_CTRL1_CT (7 << 0)
48/* 1 Hz periodic level irq */
49#define RX8025_BIT_CTRL1_CT_1HZ 4
50#define RX8025_BIT_CTRL1_TEST (1 << 3)
51#define RX8025_BIT_CTRL1_1224 (1 << 5)
52#define RX8025_BIT_CTRL1_DALE (1 << 6)
53#define RX8025_BIT_CTRL1_WALE (1 << 7)
54
55#define RX8025_BIT_CTRL2_DAFG (1 << 0)
56#define RX8025_BIT_CTRL2_WAFG (1 << 1)
57#define RX8025_BIT_CTRL2_CTFG (1 << 2)
58#define RX8025_BIT_CTRL2_PON (1 << 4)
59#define RX8025_BIT_CTRL2_XST (1 << 5)
60#define RX8025_BIT_CTRL2_VDET (1 << 6)
61
62/* Clock precision adjustment */
63#define RX8025_ADJ_RESOLUTION 3050 /* in ppb */
64#define RX8025_ADJ_DATA_MAX 62
65#define RX8025_ADJ_DATA_MIN -62
66
67static const struct i2c_device_id rx8025_id[] = {
68 { "rx8025", 0 },
69 { }
70};
71MODULE_DEVICE_TABLE(i2c, rx8025_id);
72
73struct rx8025_data {
74 struct i2c_client *client;
75 struct rtc_device *rtc;
76 struct work_struct work;
77 u8 ctrl1;
78 unsigned exiting:1;
79};
80
81static int rx8025_read_reg(struct i2c_client *client, int number, u8 *value)
82{
83 int ret = i2c_smbus_read_byte_data(client, (number << 4) | 0x08);
84
85 if (ret < 0) {
86 dev_err(&client->dev, "Unable to read register #%d\n", number);
87 return ret;
88 }
89
90 *value = ret;
91 return 0;
92}
93
94static int rx8025_read_regs(struct i2c_client *client,
95 int number, u8 length, u8 *values)
96{
97 int ret = i2c_smbus_read_i2c_block_data(client, (number << 4) | 0x08,
98 length, values);
99
100 if (ret != length) {
101 dev_err(&client->dev, "Unable to read registers #%d..#%d\n",
102 number, number + length - 1);
103 return ret < 0 ? ret : -EIO;
104 }
105
106 return 0;
107}
108
109static int rx8025_write_reg(struct i2c_client *client, int number, u8 value)
110{
111 int ret = i2c_smbus_write_byte_data(client, number << 4, value);
112
113 if (ret)
114 dev_err(&client->dev, "Unable to write register #%d\n",
115 number);
116
117 return ret;
118}
119
120static int rx8025_write_regs(struct i2c_client *client,
121 int number, u8 length, u8 *values)
122{
123 int ret = i2c_smbus_write_i2c_block_data(client, (number << 4) | 0x08,
124 length, values);
125
126 if (ret)
127 dev_err(&client->dev, "Unable to write registers #%d..#%d\n",
128 number, number + length - 1);
129
130 return ret;
131}
132
133static irqreturn_t rx8025_irq(int irq, void *dev_id)
134{
135 struct i2c_client *client = dev_id;
136 struct rx8025_data *rx8025 = i2c_get_clientdata(client);
137
138 disable_irq_nosync(irq);
139 schedule_work(&rx8025->work);
140 return IRQ_HANDLED;
141}
142
143static void rx8025_work(struct work_struct *work)
144{
145 struct rx8025_data *rx8025 = container_of(work, struct rx8025_data,
146 work);
147 struct i2c_client *client = rx8025->client;
148 struct mutex *lock = &rx8025->rtc->ops_lock;
149 u8 status;
150
151 mutex_lock(lock);
152
153 if (rx8025_read_reg(client, RX8025_REG_CTRL2, &status))
154 goto out;
155
156 if (!(status & RX8025_BIT_CTRL2_XST))
157 dev_warn(&client->dev, "Oscillation stop was detected,"
158 "you may have to readjust the clock\n");
159
160 if (status & RX8025_BIT_CTRL2_CTFG) {
161 /* periodic */
162 status &= ~RX8025_BIT_CTRL2_CTFG;
163 local_irq_disable();
164 rtc_update_irq(rx8025->rtc, 1, RTC_PF | RTC_IRQF);
165 local_irq_enable();
166 }
167
168 if (status & RX8025_BIT_CTRL2_DAFG) {
169 /* alarm */
170 status &= RX8025_BIT_CTRL2_DAFG;
171 if (rx8025_write_reg(client, RX8025_REG_CTRL1,
172 rx8025->ctrl1 & ~RX8025_BIT_CTRL1_DALE))
173 goto out;
174 local_irq_disable();
175 rtc_update_irq(rx8025->rtc, 1, RTC_AF | RTC_IRQF);
176 local_irq_enable();
177 }
178
179 /* acknowledge IRQ */
180 rx8025_write_reg(client, RX8025_REG_CTRL2,
181 status | RX8025_BIT_CTRL2_XST);
182
183out:
184 if (!rx8025->exiting)
185 enable_irq(client->irq);
186
187 mutex_unlock(lock);
188}
189
190static int rx8025_get_time(struct device *dev, struct rtc_time *dt)
191{
192 struct rx8025_data *rx8025 = dev_get_drvdata(dev);
193 u8 date[7];
194 int err;
195
196 err = rx8025_read_regs(rx8025->client, RX8025_REG_SEC, 7, date);
197 if (err)
198 return err;
199
200 dev_dbg(dev, "%s: read 0x%02x 0x%02x "
201 "0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n", __func__,
202 date[0], date[1], date[2], date[3], date[4],
203 date[5], date[6]);
204
205 dt->tm_sec = bcd2bin(date[RX8025_REG_SEC] & 0x7f);
206 dt->tm_min = bcd2bin(date[RX8025_REG_MIN] & 0x7f);
207 if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
208 dt->tm_hour = bcd2bin(date[RX8025_REG_HOUR] & 0x3f);
209 else
210 dt->tm_hour = bcd2bin(date[RX8025_REG_HOUR] & 0x1f) % 12
211 + (date[RX8025_REG_HOUR] & 0x20 ? 12 : 0);
212
213 dt->tm_mday = bcd2bin(date[RX8025_REG_MDAY] & 0x3f);
214 dt->tm_mon = bcd2bin(date[RX8025_REG_MONTH] & 0x1f) - 1;
215 dt->tm_year = bcd2bin(date[RX8025_REG_YEAR]);
216
217 if (dt->tm_year < 70)
218 dt->tm_year += 100;
219
220 dev_dbg(dev, "%s: date %ds %dm %dh %dmd %dm %dy\n", __func__,
221 dt->tm_sec, dt->tm_min, dt->tm_hour,
222 dt->tm_mday, dt->tm_mon, dt->tm_year);
223
224 return rtc_valid_tm(dt);
225}
226
227static int rx8025_set_time(struct device *dev, struct rtc_time *dt)
228{
229 struct rx8025_data *rx8025 = dev_get_drvdata(dev);
230 u8 date[7];
231
232 /*
233 * BUG: The HW assumes every year that is a multiple of 4 to be a leap
234 * year. Next time this is wrong is 2100, which will not be a leap
235 * year.
236 */
237
238 /*
239 * Here the read-only bits are written as "0". I'm not sure if that
240 * is sound.
241 */
242 date[RX8025_REG_SEC] = bin2bcd(dt->tm_sec);
243 date[RX8025_REG_MIN] = bin2bcd(dt->tm_min);
244 if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
245 date[RX8025_REG_HOUR] = bin2bcd(dt->tm_hour);
246 else
247 date[RX8025_REG_HOUR] = (dt->tm_hour >= 12 ? 0x20 : 0)
248 | bin2bcd((dt->tm_hour + 11) % 12 + 1);
249
250 date[RX8025_REG_WDAY] = bin2bcd(dt->tm_wday);
251 date[RX8025_REG_MDAY] = bin2bcd(dt->tm_mday);
252 date[RX8025_REG_MONTH] = bin2bcd(dt->tm_mon + 1);
253 date[RX8025_REG_YEAR] = bin2bcd(dt->tm_year % 100);
254
255 dev_dbg(dev,
256 "%s: write 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
257 __func__,
258 date[0], date[1], date[2], date[3], date[4], date[5], date[6]);
259
260 return rx8025_write_regs(rx8025->client, RX8025_REG_SEC, 7, date);
261}
262
263static int rx8025_init_client(struct i2c_client *client, int *need_reset)
264{
265 struct rx8025_data *rx8025 = i2c_get_clientdata(client);
266 u8 ctrl[2], ctrl2;
267 int need_clear = 0;
268 int err;
269
270 err = rx8025_read_regs(rx8025->client, RX8025_REG_CTRL1, 2, ctrl);
271 if (err)
272 goto out;
273
274 /* Keep test bit zero ! */
275 rx8025->ctrl1 = ctrl[0] & ~RX8025_BIT_CTRL1_TEST;
276
277 if (ctrl[1] & RX8025_BIT_CTRL2_PON) {
278 dev_warn(&client->dev, "power-on reset was detected, "
279 "you may have to readjust the clock\n");
280 *need_reset = 1;
281 }
282
283 if (ctrl[1] & RX8025_BIT_CTRL2_VDET) {
284 dev_warn(&client->dev, "a power voltage drop was detected, "
285 "you may have to readjust the clock\n");
286 *need_reset = 1;
287 }
288
289 if (!(ctrl[1] & RX8025_BIT_CTRL2_XST)) {
290 dev_warn(&client->dev, "Oscillation stop was detected,"
291 "you may have to readjust the clock\n");
292 *need_reset = 1;
293 }
294
295 if (ctrl[1] & (RX8025_BIT_CTRL2_DAFG | RX8025_BIT_CTRL2_WAFG)) {
296 dev_warn(&client->dev, "Alarm was detected\n");
297 need_clear = 1;
298 }
299
300 if (!(ctrl[1] & RX8025_BIT_CTRL2_CTFG))
301 need_clear = 1;
302
303 if (*need_reset || need_clear) {
304 ctrl2 = ctrl[0];
305 ctrl2 &= ~(RX8025_BIT_CTRL2_PON | RX8025_BIT_CTRL2_VDET |
306 RX8025_BIT_CTRL2_CTFG | RX8025_BIT_CTRL2_WAFG |
307 RX8025_BIT_CTRL2_DAFG);
308 ctrl2 |= RX8025_BIT_CTRL2_XST;
309
310 err = rx8025_write_reg(client, RX8025_REG_CTRL2, ctrl2);
311 }
312out:
313 return err;
314}
315
316/* Alarm support */
317static int rx8025_read_alarm(struct device *dev, struct rtc_wkalrm *t)
318{
319 struct rx8025_data *rx8025 = dev_get_drvdata(dev);
320 struct i2c_client *client = rx8025->client;
321 u8 ctrl2, ald[2];
322 int err;
323
324 if (client->irq <= 0)
325 return -EINVAL;
326
327 err = rx8025_read_regs(client, RX8025_REG_ALDMIN, 2, ald);
328 if (err)
329 return err;
330
331 err = rx8025_read_reg(client, RX8025_REG_CTRL2, &ctrl2);
332 if (err)
333 return err;
334
335 dev_dbg(dev, "%s: read alarm 0x%02x 0x%02x ctrl2 %02x\n",
336 __func__, ald[0], ald[1], ctrl2);
337
338 /* Hardware alarms precision is 1 minute! */
339 t->time.tm_sec = 0;
340 t->time.tm_min = bcd2bin(ald[0] & 0x7f);
341 if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
342 t->time.tm_hour = bcd2bin(ald[1] & 0x3f);
343 else
344 t->time.tm_hour = bcd2bin(ald[1] & 0x1f) % 12
345 + (ald[1] & 0x20 ? 12 : 0);
346
347 t->time.tm_wday = -1;
348 t->time.tm_mday = -1;
349 t->time.tm_mon = -1;
350 t->time.tm_year = -1;
351
352 dev_dbg(dev, "%s: date: %ds %dm %dh %dmd %dm %dy\n",
353 __func__,
354 t->time.tm_sec, t->time.tm_min, t->time.tm_hour,
355 t->time.tm_mday, t->time.tm_mon, t->time.tm_year);
356 t->enabled = !!(rx8025->ctrl1 & RX8025_BIT_CTRL1_DALE);
357 t->pending = (ctrl2 & RX8025_BIT_CTRL2_DAFG) && t->enabled;
358
359 return err;
360}
361
362static int rx8025_set_alarm(struct device *dev, struct rtc_wkalrm *t)
363{
364 struct i2c_client *client = to_i2c_client(dev);
365 struct rx8025_data *rx8025 = dev_get_drvdata(dev);
366 u8 ald[2];
367 int err;
368
369 if (client->irq <= 0)
370 return -EINVAL;
371
372 /* Hardware alarm precision is 1 minute! */
373 ald[0] = bin2bcd(t->time.tm_min);
374 if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
375 ald[1] = bin2bcd(t->time.tm_hour);
376 else
377 ald[1] = (t->time.tm_hour >= 12 ? 0x20 : 0)
378 | bin2bcd((t->time.tm_hour + 11) % 12 + 1);
379
380 dev_dbg(dev, "%s: write 0x%02x 0x%02x\n", __func__, ald[0], ald[1]);
381
382 if (rx8025->ctrl1 & RX8025_BIT_CTRL1_DALE) {
383 rx8025->ctrl1 &= ~RX8025_BIT_CTRL1_DALE;
384 err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1,
385 rx8025->ctrl1);
386 if (err)
387 return err;
388 }
389 err = rx8025_write_regs(rx8025->client, RX8025_REG_ALDMIN, 2, ald);
390 if (err)
391 return err;
392
393 if (t->enabled) {
394 rx8025->ctrl1 |= RX8025_BIT_CTRL1_DALE;
395 err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1,
396 rx8025->ctrl1);
397 if (err)
398 return err;
399 }
400
401 return 0;
402}
403
404static int rx8025_alarm_irq_enable(struct device *dev, unsigned int enabled)
405{
406 struct rx8025_data *rx8025 = dev_get_drvdata(dev);
407 u8 ctrl1;
408 int err;
409
410 ctrl1 = rx8025->ctrl1;
411 if (enabled)
412 ctrl1 |= RX8025_BIT_CTRL1_DALE;
413 else
414 ctrl1 &= ~RX8025_BIT_CTRL1_DALE;
415
416 if (ctrl1 != rx8025->ctrl1) {
417 rx8025->ctrl1 = ctrl1;
418 err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1,
419 rx8025->ctrl1);
420 if (err)
421 return err;
422 }
423 return 0;
424}
425
426static int rx8025_irq_set_state(struct device *dev, int enabled)
427{
428 struct i2c_client *client = to_i2c_client(dev);
429 struct rx8025_data *rx8025 = i2c_get_clientdata(client);
430 int ctrl1;
431 int err;
432
433 if (client->irq <= 0)
434 return -ENXIO;
435
436 ctrl1 = rx8025->ctrl1 & ~RX8025_BIT_CTRL1_CT;
437 if (enabled)
438 ctrl1 |= RX8025_BIT_CTRL1_CT_1HZ;
439 if (ctrl1 != rx8025->ctrl1) {
440 rx8025->ctrl1 = ctrl1;
441 err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1,
442 rx8025->ctrl1);
443 if (err)
444 return err;
445 }
446
447 return 0;
448}
449
450static struct rtc_class_ops rx8025_rtc_ops = {
451 .read_time = rx8025_get_time,
452 .set_time = rx8025_set_time,
453 .read_alarm = rx8025_read_alarm,
454 .set_alarm = rx8025_set_alarm,
455 .alarm_irq_enable = rx8025_alarm_irq_enable,
456 .irq_set_state = rx8025_irq_set_state,
457};
458
459/*
460 * Clock precision adjustment support
461 *
462 * According to the RX8025 SA/NB application manual the frequency and
463 * temperature charateristics can be approximated using the following
464 * equation:
465 *
466 * df = a * (ut - t)**2
467 *
468 * df: Frequency deviation in any temperature
469 * a : Coefficient = (-35 +-5) * 10**-9
470 * ut: Ultimate temperature in degree = +25 +-5 degree
471 * t : Any temperature in degree
472 *
473 * Note that the clock adjustment in ppb must be entered (which is
474 * the negative value of the deviation).
475 */
476static int rx8025_get_clock_adjust(struct device *dev, int *adj)
477{
478 struct i2c_client *client = to_i2c_client(dev);
479 u8 digoff;
480 int err;
481
482 err = rx8025_read_reg(client, RX8025_REG_DIGOFF, &digoff);
483 if (err)
484 return err;
485
486 *adj = digoff >= 64 ? digoff - 128 : digoff;
487 if (*adj > 0)
488 (*adj)--;
489 *adj *= -RX8025_ADJ_RESOLUTION;
490
491 return 0;
492}
493
494static int rx8025_set_clock_adjust(struct device *dev, int adj)
495{
496 struct i2c_client *client = to_i2c_client(dev);
497 u8 digoff;
498 int err;
499
500 adj /= -RX8025_ADJ_RESOLUTION;
501 if (adj > RX8025_ADJ_DATA_MAX)
502 adj = RX8025_ADJ_DATA_MAX;
503 else if (adj < RX8025_ADJ_DATA_MIN)
504 adj = RX8025_ADJ_DATA_MIN;
505 else if (adj > 0)
506 adj++;
507 else if (adj < 0)
508 adj += 128;
509 digoff = adj;
510
511 err = rx8025_write_reg(client, RX8025_REG_DIGOFF, digoff);
512 if (err)
513 return err;
514
515 dev_dbg(dev, "%s: write 0x%02x\n", __func__, digoff);
516
517 return 0;
518}
519
520static ssize_t rx8025_sysfs_show_clock_adjust(struct device *dev,
521 struct device_attribute *attr,
522 char *buf)
523{
524 int err, adj;
525
526 err = rx8025_get_clock_adjust(dev, &adj);
527 if (err)
528 return err;
529
530 return sprintf(buf, "%d\n", adj);
531}
532
533static ssize_t rx8025_sysfs_store_clock_adjust(struct device *dev,
534 struct device_attribute *attr,
535 const char *buf, size_t count)
536{
537 int adj, err;
538
539 if (sscanf(buf, "%i", &adj) != 1)
540 return -EINVAL;
541
542 err = rx8025_set_clock_adjust(dev, adj);
543
544 return err ? err : count;
545}
546
547static DEVICE_ATTR(clock_adjust_ppb, S_IRUGO | S_IWUSR,
548 rx8025_sysfs_show_clock_adjust,
549 rx8025_sysfs_store_clock_adjust);
550
551static int rx8025_sysfs_register(struct device *dev)
552{
553 return device_create_file(dev, &dev_attr_clock_adjust_ppb);
554}
555
556static void rx8025_sysfs_unregister(struct device *dev)
557{
558 device_remove_file(dev, &dev_attr_clock_adjust_ppb);
559}
560
561static int __devinit rx8025_probe(struct i2c_client *client,
562 const struct i2c_device_id *id)
563{
564 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
565 struct rx8025_data *rx8025;
566 int err, need_reset = 0;
567
568 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
569 | I2C_FUNC_SMBUS_I2C_BLOCK)) {
570 dev_err(&adapter->dev,
571 "doesn't support required functionality\n");
572 err = -EIO;
573 goto errout;
574 }
575
576 rx8025 = kzalloc(sizeof(*rx8025), GFP_KERNEL);
577 if (!rx8025) {
578 dev_err(&adapter->dev, "failed to alloc memory\n");
579 err = -ENOMEM;
580 goto errout;
581 }
582
583 rx8025->client = client;
584 i2c_set_clientdata(client, rx8025);
585 INIT_WORK(&rx8025->work, rx8025_work);
586
587 err = rx8025_init_client(client, &need_reset);
588 if (err)
589 goto errout_free;
590
591 if (need_reset) {
592 struct rtc_time tm;
593 dev_info(&client->dev,
594 "bad conditions detected, resetting date\n");
595 rtc_time_to_tm(0, &tm); /* 1970/1/1 */
596 rx8025_set_time(&client->dev, &tm);
597 }
598
599 rx8025->rtc = rtc_device_register(client->name, &client->dev,
600 &rx8025_rtc_ops, THIS_MODULE);
601 if (IS_ERR(rx8025->rtc)) {
602 err = PTR_ERR(rx8025->rtc);
603 dev_err(&client->dev, "unable to register the class device\n");
604 goto errout_free;
605 }
606
607 if (client->irq > 0) {
608 dev_info(&client->dev, "IRQ %d supplied\n", client->irq);
609 err = request_irq(client->irq, rx8025_irq,
610 0, "rx8025", client);
611 if (err) {
612 dev_err(&client->dev, "unable to request IRQ\n");
613 goto errout_reg;
614 }
615 }
616
617 rx8025->rtc->irq_freq = 1;
618 rx8025->rtc->max_user_freq = 1;
619
620 err = rx8025_sysfs_register(&client->dev);
621 if (err)
622 goto errout_irq;
623
624 return 0;
625
626errout_irq:
627 if (client->irq > 0)
628 free_irq(client->irq, client);
629
630errout_reg:
631 rtc_device_unregister(rx8025->rtc);
632
633errout_free:
634 i2c_set_clientdata(client, NULL);
635 kfree(rx8025);
636
637errout:
638 dev_err(&adapter->dev, "probing for rx8025 failed\n");
639 return err;
640}
641
642static int __devexit rx8025_remove(struct i2c_client *client)
643{
644 struct rx8025_data *rx8025 = i2c_get_clientdata(client);
645 struct mutex *lock = &rx8025->rtc->ops_lock;
646
647 if (client->irq > 0) {
648 mutex_lock(lock);
649 rx8025->exiting = 1;
650 mutex_unlock(lock);
651
652 free_irq(client->irq, client);
653 flush_scheduled_work();
654 }
655
656 rx8025_sysfs_unregister(&client->dev);
657 rtc_device_unregister(rx8025->rtc);
658 i2c_set_clientdata(client, NULL);
659 kfree(rx8025);
660 return 0;
661}
662
663static struct i2c_driver rx8025_driver = {
664 .driver = {
665 .name = "rtc-rx8025",
666 .owner = THIS_MODULE,
667 },
668 .probe = rx8025_probe,
669 .remove = __devexit_p(rx8025_remove),
670 .id_table = rx8025_id,
671};
672
673static int __init rx8025_init(void)
674{
675 return i2c_add_driver(&rx8025_driver);
676}
677
678static void __exit rx8025_exit(void)
679{
680 i2c_del_driver(&rx8025_driver);
681}
682
683MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
684MODULE_DESCRIPTION("RX-8025 SA/NB RTC driver");
685MODULE_LICENSE("GPL");
686
687module_init(rx8025_init);
688module_exit(rx8025_exit);
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
index e478280ff628..51725f7755b0 100644
--- a/drivers/rtc/rtc-test.c
+++ b/drivers/rtc/rtc-test.c
@@ -93,7 +93,6 @@ static ssize_t test_irq_store(struct device *dev,
93 struct rtc_device *rtc = platform_get_drvdata(plat_dev); 93 struct rtc_device *rtc = platform_get_drvdata(plat_dev);
94 94
95 retval = count; 95 retval = count;
96 local_irq_disable();
97 if (strncmp(buf, "tick", 4) == 0) 96 if (strncmp(buf, "tick", 4) == 0)
98 rtc_update_irq(rtc, 1, RTC_PF | RTC_IRQF); 97 rtc_update_irq(rtc, 1, RTC_PF | RTC_IRQF);
99 else if (strncmp(buf, "alarm", 5) == 0) 98 else if (strncmp(buf, "alarm", 5) == 0)
@@ -102,7 +101,6 @@ static ssize_t test_irq_store(struct device *dev,
102 rtc_update_irq(rtc, 1, RTC_UF | RTC_IRQF); 101 rtc_update_irq(rtc, 1, RTC_UF | RTC_IRQF);
103 else 102 else
104 retval = -EINVAL; 103 retval = -EINVAL;
105 local_irq_enable();
106 104
107 return retval; 105 return retval;
108} 106}
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
index 4ee4857ff207..4a6ed1104fbb 100644
--- a/drivers/rtc/rtc-tx4939.c
+++ b/drivers/rtc/rtc-tx4939.c
@@ -261,10 +261,8 @@ static int __init tx4939_rtc_probe(struct platform_device *pdev)
261 261
262 tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP); 262 tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP);
263 if (devm_request_irq(&pdev->dev, irq, tx4939_rtc_interrupt, 263 if (devm_request_irq(&pdev->dev, irq, tx4939_rtc_interrupt,
264 IRQF_DISABLED | IRQF_SHARED, 264 IRQF_DISABLED, pdev->name, &pdev->dev) < 0)
265 pdev->name, &pdev->dev) < 0) {
266 return -EBUSY; 265 return -EBUSY;
267 }
268 rtc = rtc_device_register(pdev->name, &pdev->dev, 266 rtc = rtc_device_register(pdev->name, &pdev->dev,
269 &tx4939_rtc_ops, THIS_MODULE); 267 &tx4939_rtc_ops, THIS_MODULE);
270 if (IS_ERR(rtc)) 268 if (IS_ERR(rtc))
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 81d7f268418a..691cecd03b83 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -655,7 +655,7 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
655 for (dm = dev->mc_list; dm; dm = dm->next) 655 for (dm = dev->mc_list; dm; dm = dm->next)
656 qeth_l2_add_mc(card, dm->da_addr, 0); 656 qeth_l2_add_mc(card, dm->da_addr, 0);
657 657
658 list_for_each_entry(ha, &dev->uc_list, list) 658 list_for_each_entry(ha, &dev->uc.list, list)
659 qeth_l2_add_mc(card, ha->addr, 1); 659 qeth_l2_add_mc(card, ha->addr, 1);
660 660
661 spin_unlock_bh(&card->mclock); 661 spin_unlock_bh(&card->mclock);
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index a4cf1079b312..66f52674ca0c 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -1332,44 +1332,46 @@ err_unreg:
1332 return ret; 1332 return ret;
1333} 1333}
1334 1334
1335static int sci_suspend(struct platform_device *dev, pm_message_t state) 1335static int sci_suspend(struct device *dev)
1336{ 1336{
1337 struct sh_sci_priv *priv = platform_get_drvdata(dev); 1337 struct sh_sci_priv *priv = dev_get_drvdata(dev);
1338 struct sci_port *p; 1338 struct sci_port *p;
1339 unsigned long flags; 1339 unsigned long flags;
1340 1340
1341 spin_lock_irqsave(&priv->lock, flags); 1341 spin_lock_irqsave(&priv->lock, flags);
1342 list_for_each_entry(p, &priv->ports, node) 1342 list_for_each_entry(p, &priv->ports, node)
1343 uart_suspend_port(&sci_uart_driver, &p->port); 1343 uart_suspend_port(&sci_uart_driver, &p->port);
1344
1345 spin_unlock_irqrestore(&priv->lock, flags); 1344 spin_unlock_irqrestore(&priv->lock, flags);
1346 1345
1347 return 0; 1346 return 0;
1348} 1347}
1349 1348
1350static int sci_resume(struct platform_device *dev) 1349static int sci_resume(struct device *dev)
1351{ 1350{
1352 struct sh_sci_priv *priv = platform_get_drvdata(dev); 1351 struct sh_sci_priv *priv = dev_get_drvdata(dev);
1353 struct sci_port *p; 1352 struct sci_port *p;
1354 unsigned long flags; 1353 unsigned long flags;
1355 1354
1356 spin_lock_irqsave(&priv->lock, flags); 1355 spin_lock_irqsave(&priv->lock, flags);
1357 list_for_each_entry(p, &priv->ports, node) 1356 list_for_each_entry(p, &priv->ports, node)
1358 uart_resume_port(&sci_uart_driver, &p->port); 1357 uart_resume_port(&sci_uart_driver, &p->port);
1359
1360 spin_unlock_irqrestore(&priv->lock, flags); 1358 spin_unlock_irqrestore(&priv->lock, flags);
1361 1359
1362 return 0; 1360 return 0;
1363} 1361}
1364 1362
1363static struct dev_pm_ops sci_dev_pm_ops = {
1364 .suspend = sci_suspend,
1365 .resume = sci_resume,
1366};
1367
1365static struct platform_driver sci_driver = { 1368static struct platform_driver sci_driver = {
1366 .probe = sci_probe, 1369 .probe = sci_probe,
1367 .remove = __devexit_p(sci_remove), 1370 .remove = __devexit_p(sci_remove),
1368 .suspend = sci_suspend,
1369 .resume = sci_resume,
1370 .driver = { 1371 .driver = {
1371 .name = "sh-sci", 1372 .name = "sh-sci",
1372 .owner = THIS_MODULE, 1373 .owner = THIS_MODULE,
1374 .pm = &sci_dev_pm_ops,
1373 }, 1375 },
1374}; 1376};
1375 1377
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index d687a9b93d03..3dd231a643b5 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -20,7 +20,6 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/bootmem.h>
24#include <linux/sh_intc.h> 23#include <linux/sh_intc.h>
25#include <linux/sysdev.h> 24#include <linux/sysdev.h>
26#include <linux/list.h> 25#include <linux/list.h>
@@ -675,7 +674,7 @@ void __init register_intc_controller(struct intc_desc *desc)
675 unsigned int i, k, smp; 674 unsigned int i, k, smp;
676 struct intc_desc_int *d; 675 struct intc_desc_int *d;
677 676
678 d = alloc_bootmem(sizeof(*d)); 677 d = kzalloc(sizeof(*d), GFP_NOWAIT);
679 678
680 INIT_LIST_HEAD(&d->list); 679 INIT_LIST_HEAD(&d->list);
681 list_add(&d->list, &intc_list); 680 list_add(&d->list, &intc_list);
@@ -687,9 +686,9 @@ void __init register_intc_controller(struct intc_desc *desc)
687#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) 686#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
688 d->nr_reg += desc->ack_regs ? desc->nr_ack_regs : 0; 687 d->nr_reg += desc->ack_regs ? desc->nr_ack_regs : 0;
689#endif 688#endif
690 d->reg = alloc_bootmem(d->nr_reg * sizeof(*d->reg)); 689 d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
691#ifdef CONFIG_SMP 690#ifdef CONFIG_SMP
692 d->smp = alloc_bootmem(d->nr_reg * sizeof(*d->smp)); 691 d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
693#endif 692#endif
694 k = 0; 693 k = 0;
695 694
@@ -702,7 +701,7 @@ void __init register_intc_controller(struct intc_desc *desc)
702 } 701 }
703 702
704 if (desc->prio_regs) { 703 if (desc->prio_regs) {
705 d->prio = alloc_bootmem(desc->nr_vectors * sizeof(*d->prio)); 704 d->prio = kzalloc(desc->nr_vectors * sizeof(*d->prio), GFP_NOWAIT);
706 705
707 for (i = 0; i < desc->nr_prio_regs; i++) { 706 for (i = 0; i < desc->nr_prio_regs; i++) {
708 smp = IS_SMP(desc->prio_regs[i]); 707 smp = IS_SMP(desc->prio_regs[i]);
@@ -712,7 +711,7 @@ void __init register_intc_controller(struct intc_desc *desc)
712 } 711 }
713 712
714 if (desc->sense_regs) { 713 if (desc->sense_regs) {
715 d->sense = alloc_bootmem(desc->nr_vectors * sizeof(*d->sense)); 714 d->sense = kzalloc(desc->nr_vectors * sizeof(*d->sense), GFP_NOWAIT);
716 715
717 for (i = 0; i < desc->nr_sense_regs; i++) { 716 for (i = 0; i < desc->nr_sense_regs; i++) {
718 k += save_reg(d, k, desc->sense_regs[i].reg, 0); 717 k += save_reg(d, k, desc->sense_regs[i].reg, 0);
@@ -757,7 +756,7 @@ void __init register_intc_controller(struct intc_desc *desc)
757 vect2->enum_id = 0; 756 vect2->enum_id = 0;
758 757
759 if (!intc_evt2irq_table) 758 if (!intc_evt2irq_table)
760 intc_evt2irq_table = alloc_bootmem(NR_IRQS); 759 intc_evt2irq_table = kzalloc(NR_IRQS, GFP_NOWAIT);
761 760
762 if (!intc_evt2irq_table) { 761 if (!intc_evt2irq_table) {
763 pr_warning("intc: cannot allocate evt2irq!\n"); 762 pr_warning("intc: cannot allocate evt2irq!\n");
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index e8aae227b5e0..2c733c27db2f 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -139,17 +139,15 @@ config SPI_MPC52xx_PSC
139 This enables using the Freescale MPC52xx Programmable Serial 139 This enables using the Freescale MPC52xx Programmable Serial
140 Controller in master SPI mode. 140 Controller in master SPI mode.
141 141
142config SPI_MPC83xx 142config SPI_MPC8xxx
143 tristate "Freescale MPC83xx/QUICC Engine SPI controller" 143 tristate "Freescale MPC8xxx SPI controller"
144 depends on (PPC_83xx || QUICC_ENGINE) && EXPERIMENTAL 144 depends on FSL_SOC
145 help 145 help
146 This enables using the Freescale MPC83xx and QUICC Engine SPI 146 This enables using the Freescale MPC8xxx SPI controllers in master
147 controllers in master mode. 147 mode.
148 148
149 Note, this driver uniquely supports the SPI controller on the MPC83xx 149 This driver uses a simple set of shift registers for data (opposed
150 family of PowerPC processors, plus processors with QUICC Engine 150 to the CPM based descriptor model).
151 technology. This driver uses a simple set of shift registers for data
152 (opposed to the CPM based descriptor model).
153 151
154config SPI_OMAP_UWIRE 152config SPI_OMAP_UWIRE
155 tristate "OMAP1 MicroWire" 153 tristate "OMAP1 MicroWire"
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index ecfadb180482..3de408d294ba 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -25,7 +25,7 @@ obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o
25obj-$(CONFIG_SPI_ORION) += orion_spi.o 25obj-$(CONFIG_SPI_ORION) += orion_spi.o
26obj-$(CONFIG_SPI_PL022) += amba-pl022.o 26obj-$(CONFIG_SPI_PL022) += amba-pl022.o
27obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o 27obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o
28obj-$(CONFIG_SPI_MPC83xx) += spi_mpc83xx.o 28obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o
29obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o 29obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
30obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o 30obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o
31obj-$(CONFIG_SPI_TXX9) += spi_txx9.o 31obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index 12e443cc4ac9..f5b3fdbb1e27 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -530,9 +530,6 @@ atmel_spi_interrupt(int irq, void *dev_id)
530 return ret; 530 return ret;
531} 531}
532 532
533/* the spi->mode bits understood by this driver: */
534#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
535
536static int atmel_spi_setup(struct spi_device *spi) 533static int atmel_spi_setup(struct spi_device *spi)
537{ 534{
538 struct atmel_spi *as; 535 struct atmel_spi *as;
@@ -555,8 +552,6 @@ static int atmel_spi_setup(struct spi_device *spi)
555 return -EINVAL; 552 return -EINVAL;
556 } 553 }
557 554
558 if (bits == 0)
559 bits = 8;
560 if (bits < 8 || bits > 16) { 555 if (bits < 8 || bits > 16) {
561 dev_dbg(&spi->dev, 556 dev_dbg(&spi->dev,
562 "setup: invalid bits_per_word %u (8 to 16)\n", 557 "setup: invalid bits_per_word %u (8 to 16)\n",
@@ -564,12 +559,6 @@ static int atmel_spi_setup(struct spi_device *spi)
564 return -EINVAL; 559 return -EINVAL;
565 } 560 }
566 561
567 if (spi->mode & ~MODEBITS) {
568 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
569 spi->mode & ~MODEBITS);
570 return -EINVAL;
571 }
572
573 /* see notes above re chipselect */ 562 /* see notes above re chipselect */
574 if (!atmel_spi_is_v2() 563 if (!atmel_spi_is_v2()
575 && spi->chip_select == 0 564 && spi->chip_select == 0
@@ -775,6 +764,9 @@ static int __init atmel_spi_probe(struct platform_device *pdev)
775 if (!master) 764 if (!master)
776 goto out_free; 765 goto out_free;
777 766
767 /* the spi->mode bits understood by this driver: */
768 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
769
778 master->bus_num = pdev->id; 770 master->bus_num = pdev->id;
779 master->num_chipselect = 4; 771 master->num_chipselect = 4;
780 master->setup = atmel_spi_setup; 772 master->setup = atmel_spi_setup;
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c
index b02f25c702fd..76cbc1a66598 100644
--- a/drivers/spi/au1550_spi.c
+++ b/drivers/spi/au1550_spi.c
@@ -284,27 +284,16 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
284 return 0; 284 return 0;
285} 285}
286 286
287/* the spi->mode bits understood by this driver: */
288#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST)
289
290static int au1550_spi_setup(struct spi_device *spi) 287static int au1550_spi_setup(struct spi_device *spi)
291{ 288{
292 struct au1550_spi *hw = spi_master_get_devdata(spi->master); 289 struct au1550_spi *hw = spi_master_get_devdata(spi->master);
293 290
294 if (spi->bits_per_word == 0)
295 spi->bits_per_word = 8;
296 if (spi->bits_per_word < 4 || spi->bits_per_word > 24) { 291 if (spi->bits_per_word < 4 || spi->bits_per_word > 24) {
297 dev_err(&spi->dev, "setup: invalid bits_per_word=%d\n", 292 dev_err(&spi->dev, "setup: invalid bits_per_word=%d\n",
298 spi->bits_per_word); 293 spi->bits_per_word);
299 return -EINVAL; 294 return -EINVAL;
300 } 295 }
301 296
302 if (spi->mode & ~MODEBITS) {
303 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
304 spi->mode & ~MODEBITS);
305 return -EINVAL;
306 }
307
308 if (spi->max_speed_hz == 0) 297 if (spi->max_speed_hz == 0)
309 spi->max_speed_hz = hw->freq_max; 298 spi->max_speed_hz = hw->freq_max;
310 if (spi->max_speed_hz > hw->freq_max 299 if (spi->max_speed_hz > hw->freq_max
@@ -781,6 +770,9 @@ static int __init au1550_spi_probe(struct platform_device *pdev)
781 goto err_nomem; 770 goto err_nomem;
782 } 771 }
783 772
773 /* the spi->mode bits understood by this driver: */
774 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
775
784 hw = spi_master_get_devdata(master); 776 hw = spi_master_get_devdata(master);
785 777
786 hw->master = spi_master_get(master); 778 hw->master = spi_master_get(master);
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
index e1901fdce774..1b74d5ca03f3 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/mpc52xx_psc_spi.c
@@ -259,9 +259,6 @@ static void mpc52xx_psc_spi_work(struct work_struct *work)
259 spin_unlock_irq(&mps->lock); 259 spin_unlock_irq(&mps->lock);
260} 260}
261 261
262/* the spi->mode bits understood by this driver: */
263#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST)
264
265static int mpc52xx_psc_spi_setup(struct spi_device *spi) 262static int mpc52xx_psc_spi_setup(struct spi_device *spi)
266{ 263{
267 struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); 264 struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master);
@@ -271,12 +268,6 @@ static int mpc52xx_psc_spi_setup(struct spi_device *spi)
271 if (spi->bits_per_word%8) 268 if (spi->bits_per_word%8)
272 return -EINVAL; 269 return -EINVAL;
273 270
274 if (spi->mode & ~MODEBITS) {
275 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
276 spi->mode & ~MODEBITS);
277 return -EINVAL;
278 }
279
280 if (!cs) { 271 if (!cs) {
281 cs = kzalloc(sizeof *cs, GFP_KERNEL); 272 cs = kzalloc(sizeof *cs, GFP_KERNEL);
282 if (!cs) 273 if (!cs)
@@ -383,6 +374,9 @@ static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
383 dev_set_drvdata(dev, master); 374 dev_set_drvdata(dev, master);
384 mps = spi_master_get_devdata(master); 375 mps = spi_master_get_devdata(master);
385 376
377 /* the spi->mode bits understood by this driver: */
378 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
379
386 mps->irq = irq; 380 mps->irq = irq;
387 if (pdata == NULL) { 381 if (pdata == NULL) {
388 dev_warn(dev, "probe called without platform data, no " 382 dev_warn(dev, "probe called without platform data, no "
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index d6d0c5d241ce..eee4b6e0af2c 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -603,9 +603,6 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
603 return 0; 603 return 0;
604} 604}
605 605
606/* the spi->mode bits understood by this driver: */
607#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
608
609static int omap2_mcspi_setup(struct spi_device *spi) 606static int omap2_mcspi_setup(struct spi_device *spi)
610{ 607{
611 int ret; 608 int ret;
@@ -613,15 +610,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
613 struct omap2_mcspi_dma *mcspi_dma; 610 struct omap2_mcspi_dma *mcspi_dma;
614 struct omap2_mcspi_cs *cs = spi->controller_state; 611 struct omap2_mcspi_cs *cs = spi->controller_state;
615 612
616 if (spi->mode & ~MODEBITS) { 613 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
617 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
618 spi->mode & ~MODEBITS);
619 return -EINVAL;
620 }
621
622 if (spi->bits_per_word == 0)
623 spi->bits_per_word = 8;
624 else if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
625 dev_dbg(&spi->dev, "setup: unsupported %d bit words\n", 614 dev_dbg(&spi->dev, "setup: unsupported %d bit words\n",
626 spi->bits_per_word); 615 spi->bits_per_word);
627 return -EINVAL; 616 return -EINVAL;
@@ -984,6 +973,9 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
984 return -ENOMEM; 973 return -ENOMEM;
985 } 974 }
986 975
976 /* the spi->mode bits understood by this driver: */
977 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
978
987 if (pdev->id != -1) 979 if (pdev->id != -1)
988 master->bus_num = pdev->id; 980 master->bus_num = pdev->id;
989 981
diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c
index fe8b9ac0ccef..aa90ddb37066 100644
--- a/drivers/spi/omap_uwire.c
+++ b/drivers/spi/omap_uwire.c
@@ -339,8 +339,6 @@ static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
339 bits = spi->bits_per_word; 339 bits = spi->bits_per_word;
340 if (t != NULL && t->bits_per_word) 340 if (t != NULL && t->bits_per_word)
341 bits = t->bits_per_word; 341 bits = t->bits_per_word;
342 if (!bits)
343 bits = 8;
344 342
345 if (bits > 16) { 343 if (bits > 16) {
346 pr_debug("%s: wordsize %d?\n", dev_name(&spi->dev), bits); 344 pr_debug("%s: wordsize %d?\n", dev_name(&spi->dev), bits);
@@ -449,19 +447,10 @@ done:
449 return status; 447 return status;
450} 448}
451 449
452/* the spi->mode bits understood by this driver: */
453#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
454
455static int uwire_setup(struct spi_device *spi) 450static int uwire_setup(struct spi_device *spi)
456{ 451{
457 struct uwire_state *ust = spi->controller_state; 452 struct uwire_state *ust = spi->controller_state;
458 453
459 if (spi->mode & ~MODEBITS) {
460 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
461 spi->mode & ~MODEBITS);
462 return -EINVAL;
463 }
464
465 if (ust == NULL) { 454 if (ust == NULL) {
466 ust = kzalloc(sizeof(*ust), GFP_KERNEL); 455 ust = kzalloc(sizeof(*ust), GFP_KERNEL);
467 if (ust == NULL) 456 if (ust == NULL)
@@ -522,6 +511,9 @@ static int __init uwire_probe(struct platform_device *pdev)
522 511
523 uwire_write_reg(UWIRE_SR3, 1); 512 uwire_write_reg(UWIRE_SR3, 1);
524 513
514 /* the spi->mode bits understood by this driver: */
515 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
516
525 master->bus_num = 2; /* "official" */ 517 master->bus_num = 2; /* "official" */
526 master->num_chipselect = 4; 518 master->num_chipselect = 4;
527 master->setup = uwire_setup; 519 master->setup = uwire_setup;
diff --git a/drivers/spi/orion_spi.c b/drivers/spi/orion_spi.c
index c8b0babdc2a6..3aea50da7b29 100644
--- a/drivers/spi/orion_spi.c
+++ b/drivers/spi/orion_spi.c
@@ -358,20 +358,11 @@ static int orion_spi_setup(struct spi_device *spi)
358 358
359 orion_spi = spi_master_get_devdata(spi->master); 359 orion_spi = spi_master_get_devdata(spi->master);
360 360
361 if (spi->mode) {
362 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
363 spi->mode);
364 return -EINVAL;
365 }
366
367 /* Fix ac timing if required. */ 361 /* Fix ac timing if required. */
368 if (orion_spi->spi_info->enable_clock_fix) 362 if (orion_spi->spi_info->enable_clock_fix)
369 orion_spi_setbits(orion_spi, ORION_SPI_IF_CONFIG_REG, 363 orion_spi_setbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
370 (1 << 14)); 364 (1 << 14));
371 365
372 if (spi->bits_per_word == 0)
373 spi->bits_per_word = 8;
374
375 if ((spi->max_speed_hz == 0) 366 if ((spi->max_speed_hz == 0)
376 || (spi->max_speed_hz > orion_spi->max_speed)) 367 || (spi->max_speed_hz > orion_spi->max_speed))
377 spi->max_speed_hz = orion_spi->max_speed; 368 spi->max_speed_hz = orion_spi->max_speed;
@@ -476,6 +467,9 @@ static int __init orion_spi_probe(struct platform_device *pdev)
476 if (pdev->id != -1) 467 if (pdev->id != -1)
477 master->bus_num = pdev->id; 468 master->bus_num = pdev->id;
478 469
470 /* we support only mode 0, and no options */
471 master->mode_bits = 0;
472
479 master->setup = orion_spi_setup; 473 master->setup = orion_spi_setup;
480 master->transfer = orion_spi_transfer; 474 master->transfer = orion_spi_transfer;
481 master->num_chipselect = ORION_NUM_CHIPSELECTS; 475 master->num_chipselect = ORION_NUM_CHIPSELECTS;
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 3f3c08c6ba4e..d949dbf1141f 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -1185,9 +1185,6 @@ static int transfer(struct spi_device *spi, struct spi_message *msg)
1185 return 0; 1185 return 0;
1186} 1186}
1187 1187
1188/* the spi->mode bits understood by this driver: */
1189#define MODEBITS (SPI_CPOL | SPI_CPHA)
1190
1191static int setup_cs(struct spi_device *spi, struct chip_data *chip, 1188static int setup_cs(struct spi_device *spi, struct chip_data *chip,
1192 struct pxa2xx_spi_chip *chip_info) 1189 struct pxa2xx_spi_chip *chip_info)
1193{ 1190{
@@ -1236,9 +1233,6 @@ static int setup(struct spi_device *spi)
1236 uint tx_thres = TX_THRESH_DFLT; 1233 uint tx_thres = TX_THRESH_DFLT;
1237 uint rx_thres = RX_THRESH_DFLT; 1234 uint rx_thres = RX_THRESH_DFLT;
1238 1235
1239 if (!spi->bits_per_word)
1240 spi->bits_per_word = 8;
1241
1242 if (drv_data->ssp_type != PXA25x_SSP 1236 if (drv_data->ssp_type != PXA25x_SSP
1243 && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) { 1237 && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) {
1244 dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d " 1238 dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d "
@@ -1255,12 +1249,6 @@ static int setup(struct spi_device *spi)
1255 return -EINVAL; 1249 return -EINVAL;
1256 } 1250 }
1257 1251
1258 if (spi->mode & ~MODEBITS) {
1259 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
1260 spi->mode & ~MODEBITS);
1261 return -EINVAL;
1262 }
1263
1264 /* Only alloc on first setup */ 1252 /* Only alloc on first setup */
1265 chip = spi_get_ctldata(spi); 1253 chip = spi_get_ctldata(spi);
1266 if (!chip) { 1254 if (!chip) {
@@ -1328,18 +1316,14 @@ static int setup(struct spi_device *spi)
1328 1316
1329 /* NOTE: PXA25x_SSP _could_ use external clocking ... */ 1317 /* NOTE: PXA25x_SSP _could_ use external clocking ... */
1330 if (drv_data->ssp_type != PXA25x_SSP) 1318 if (drv_data->ssp_type != PXA25x_SSP)
1331 dev_dbg(&spi->dev, "%d bits/word, %ld Hz, mode %d, %s\n", 1319 dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
1332 spi->bits_per_word,
1333 clk_get_rate(ssp->clk) 1320 clk_get_rate(ssp->clk)
1334 / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)), 1321 / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)),
1335 spi->mode & 0x3,
1336 chip->enable_dma ? "DMA" : "PIO"); 1322 chip->enable_dma ? "DMA" : "PIO");
1337 else 1323 else
1338 dev_dbg(&spi->dev, "%d bits/word, %ld Hz, mode %d, %s\n", 1324 dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
1339 spi->bits_per_word,
1340 clk_get_rate(ssp->clk) / 2 1325 clk_get_rate(ssp->clk) / 2
1341 / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)), 1326 / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)),
1342 spi->mode & 0x3,
1343 chip->enable_dma ? "DMA" : "PIO"); 1327 chip->enable_dma ? "DMA" : "PIO");
1344 1328
1345 if (spi->bits_per_word <= 8) { 1329 if (spi->bits_per_word <= 8) {
@@ -1500,6 +1484,9 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev)
1500 drv_data->pdev = pdev; 1484 drv_data->pdev = pdev;
1501 drv_data->ssp = ssp; 1485 drv_data->ssp = ssp;
1502 1486
1487 /* the spi->mode bits understood by this driver: */
1488 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1489
1503 master->bus_num = pdev->id; 1490 master->bus_num = pdev->id;
1504 master->num_chipselect = platform_info->num_chipselect; 1491 master->num_chipselect = platform_info->num_chipselect;
1505 master->dma_alignment = DMA_ALIGNMENT; 1492 master->dma_alignment = DMA_ALIGNMENT;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 8eba98c8ed1e..70845ccd85c3 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -265,7 +265,7 @@ int spi_add_device(struct spi_device *spi)
265 * normally rely on the device being setup. Devices 265 * normally rely on the device being setup. Devices
266 * using SPI_CS_HIGH can't coexist well otherwise... 266 * using SPI_CS_HIGH can't coexist well otherwise...
267 */ 267 */
268 status = spi->master->setup(spi); 268 status = spi_setup(spi);
269 if (status < 0) { 269 if (status < 0) {
270 dev_err(dev, "can't %s %s, status %d\n", 270 dev_err(dev, "can't %s %s, status %d\n",
271 "setup", dev_name(&spi->dev), status); 271 "setup", dev_name(&spi->dev), status);
@@ -583,6 +583,70 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master);
583 583
584/*-------------------------------------------------------------------------*/ 584/*-------------------------------------------------------------------------*/
585 585
586/* Core methods for SPI master protocol drivers. Some of the
587 * other core methods are currently defined as inline functions.
588 */
589
590/**
591 * spi_setup - setup SPI mode and clock rate
592 * @spi: the device whose settings are being modified
593 * Context: can sleep, and no requests are queued to the device
594 *
595 * SPI protocol drivers may need to update the transfer mode if the
596 * device doesn't work with its default. They may likewise need
597 * to update clock rates or word sizes from initial values. This function
598 * changes those settings, and must be called from a context that can sleep.
599 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
600 * effect the next time the device is selected and data is transferred to
601 * or from it. When this function returns, the spi device is deselected.
602 *
603 * Note that this call will fail if the protocol driver specifies an option
604 * that the underlying controller or its driver does not support. For
605 * example, not all hardware supports wire transfers using nine bit words,
606 * LSB-first wire encoding, or active-high chipselects.
607 */
608int spi_setup(struct spi_device *spi)
609{
610 unsigned bad_bits;
611 int status;
612
613 /* help drivers fail *cleanly* when they need options
614 * that aren't supported with their current master
615 */
616 bad_bits = spi->mode & ~spi->master->mode_bits;
617 if (bad_bits) {
618 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
619 bad_bits);
620 return -EINVAL;
621 }
622
623 if (!spi->bits_per_word)
624 spi->bits_per_word = 8;
625
626 status = spi->master->setup(spi);
627
628 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s"
629 "%u bits/w, %u Hz max --> %d\n",
630 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
631 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
632 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
633 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
634 (spi->mode & SPI_LOOP) ? "loopback, " : "",
635 spi->bits_per_word, spi->max_speed_hz,
636 status);
637
638 return status;
639}
640EXPORT_SYMBOL_GPL(spi_setup);
641
642
643/*-------------------------------------------------------------------------*/
644
645/* Utility methods for SPI master protocol drivers, layered on
646 * top of the core. Some other utility methods are defined as
647 * inline functions.
648 */
649
586static void spi_complete(void *arg) 650static void spi_complete(void *arg)
587{ 651{
588 complete(arg); 652 complete(arg);
@@ -636,8 +700,8 @@ static u8 *buf;
636 * @spi: device with which data will be exchanged 700 * @spi: device with which data will be exchanged
637 * @txbuf: data to be written (need not be dma-safe) 701 * @txbuf: data to be written (need not be dma-safe)
638 * @n_tx: size of txbuf, in bytes 702 * @n_tx: size of txbuf, in bytes
639 * @rxbuf: buffer into which data will be read 703 * @rxbuf: buffer into which data will be read (need not be dma-safe)
640 * @n_rx: size of rxbuf, in bytes (need not be dma-safe) 704 * @n_rx: size of rxbuf, in bytes
641 * Context: can sleep 705 * Context: can sleep
642 * 706 *
643 * This performs a half duplex MicroWire style transaction with the 707 * This performs a half duplex MicroWire style transaction with the
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index 011c5bddba6a..73e24ef5a2f9 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -169,7 +169,7 @@ static int bfin_spi_flush(struct driver_data *drv_data)
169 unsigned long limit = loops_per_jiffy << 1; 169 unsigned long limit = loops_per_jiffy << 1;
170 170
171 /* wait for stop and clear stat */ 171 /* wait for stop and clear stat */
172 while (!(read_STAT(drv_data) & BIT_STAT_SPIF) && limit--) 172 while (!(read_STAT(drv_data) & BIT_STAT_SPIF) && --limit)
173 cpu_relax(); 173 cpu_relax();
174 174
175 write_STAT(drv_data, BIT_STAT_CLR); 175 write_STAT(drv_data, BIT_STAT_CLR);
@@ -1010,16 +1010,6 @@ static int bfin_spi_setup(struct spi_device *spi)
1010 struct driver_data *drv_data = spi_master_get_devdata(spi->master); 1010 struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1011 int ret; 1011 int ret;
1012 1012
1013 /* Abort device setup if requested features are not supported */
1014 if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) {
1015 dev_err(&spi->dev, "requested mode not fully supported\n");
1016 return -EINVAL;
1017 }
1018
1019 /* Zero (the default) here means 8 bits */
1020 if (!spi->bits_per_word)
1021 spi->bits_per_word = 8;
1022
1023 if (spi->bits_per_word != 8 && spi->bits_per_word != 16) 1013 if (spi->bits_per_word != 8 && spi->bits_per_word != 16)
1024 return -EINVAL; 1014 return -EINVAL;
1025 1015
@@ -1287,6 +1277,9 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
1287 drv_data->pdev = pdev; 1277 drv_data->pdev = pdev;
1288 drv_data->pin_req = platform_info->pin_req; 1278 drv_data->pin_req = platform_info->pin_req;
1289 1279
1280 /* the spi->mode bits supported by this driver: */
1281 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
1282
1290 master->bus_num = pdev->id; 1283 master->bus_num = pdev->id;
1291 master->num_chipselect = platform_info->num_chipselect; 1284 master->num_chipselect = platform_info->num_chipselect;
1292 master->cleanup = bfin_spi_cleanup; 1285 master->cleanup = bfin_spi_cleanup;
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index 85e61f451218..2a5abc08e857 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -188,12 +188,6 @@ int spi_bitbang_setup(struct spi_device *spi)
188 188
189 bitbang = spi_master_get_devdata(spi->master); 189 bitbang = spi_master_get_devdata(spi->master);
190 190
191 /* Bitbangers can support SPI_CS_HIGH, SPI_3WIRE, and so on;
192 * add those to master->flags, and provide the other support.
193 */
194 if ((spi->mode & ~(SPI_CPOL|SPI_CPHA|bitbang->flags)) != 0)
195 return -EINVAL;
196
197 if (!cs) { 191 if (!cs) {
198 cs = kzalloc(sizeof *cs, GFP_KERNEL); 192 cs = kzalloc(sizeof *cs, GFP_KERNEL);
199 if (!cs) 193 if (!cs)
@@ -201,9 +195,6 @@ int spi_bitbang_setup(struct spi_device *spi)
201 spi->controller_state = cs; 195 spi->controller_state = cs;
202 } 196 }
203 197
204 if (!spi->bits_per_word)
205 spi->bits_per_word = 8;
206
207 /* per-word shift register access, in hardware or bitbanging */ 198 /* per-word shift register access, in hardware or bitbanging */
208 cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)]; 199 cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)];
209 if (!cs->txrx_word) 200 if (!cs->txrx_word)
@@ -213,9 +204,7 @@ int spi_bitbang_setup(struct spi_device *spi)
213 if (retval < 0) 204 if (retval < 0)
214 return retval; 205 return retval;
215 206
216 dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n", 207 dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs);
217 __func__, spi->mode & (SPI_CPOL | SPI_CPHA),
218 spi->bits_per_word, 2 * cs->nsecs);
219 208
220 /* NOTE we _need_ to call chipselect() early, ideally with adapter 209 /* NOTE we _need_ to call chipselect() early, ideally with adapter
221 * setup, unless the hardware defaults cooperate to avoid confusion 210 * setup, unless the hardware defaults cooperate to avoid confusion
@@ -457,6 +446,9 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
457 spin_lock_init(&bitbang->lock); 446 spin_lock_init(&bitbang->lock);
458 INIT_LIST_HEAD(&bitbang->queue); 447 INIT_LIST_HEAD(&bitbang->queue);
459 448
449 if (!bitbang->master->mode_bits)
450 bitbang->master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags;
451
460 if (!bitbang->master->transfer) 452 if (!bitbang->master->transfer)
461 bitbang->master->transfer = spi_bitbang_transfer; 453 bitbang->master->transfer = spi_bitbang_transfer;
462 if (!bitbang->txrx_bufs) { 454 if (!bitbang->txrx_bufs) {
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 0671aeef5792..c195e45f7f35 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -1171,9 +1171,6 @@ msg_rejected:
1171 return -EINVAL; 1171 return -EINVAL;
1172} 1172}
1173 1173
1174/* the spi->mode bits understood by this driver: */
1175#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
1176
1177/* On first setup bad values must free chip_data memory since will cause 1174/* On first setup bad values must free chip_data memory since will cause
1178 spi_new_device to fail. Bad value setup from protocol driver are simply not 1175 spi_new_device to fail. Bad value setup from protocol driver are simply not
1179 applied and notified to the calling driver. */ 1176 applied and notified to the calling driver. */
@@ -1186,12 +1183,6 @@ static int setup(struct spi_device *spi)
1186 u32 tmp; 1183 u32 tmp;
1187 int status = 0; 1184 int status = 0;
1188 1185
1189 if (spi->mode & ~MODEBITS) {
1190 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
1191 spi->mode & ~MODEBITS);
1192 return -EINVAL;
1193 }
1194
1195 /* Get controller data */ 1186 /* Get controller data */
1196 chip_info = spi->controller_data; 1187 chip_info = spi->controller_data;
1197 1188
@@ -1286,10 +1277,7 @@ static int setup(struct spi_device *spi)
1286 1277
1287 /* SPI word width */ 1278 /* SPI word width */
1288 tmp = spi->bits_per_word; 1279 tmp = spi->bits_per_word;
1289 if (tmp == 0) { 1280 if (tmp > 16) {
1290 tmp = 8;
1291 spi->bits_per_word = 8;
1292 } else if (tmp > 16) {
1293 status = -EINVAL; 1281 status = -EINVAL;
1294 dev_err(&spi->dev, 1282 dev_err(&spi->dev,
1295 "setup - " 1283 "setup - "
@@ -1481,6 +1469,9 @@ static int __init spi_imx_probe(struct platform_device *pdev)
1481 drv_data->master_info = platform_info; 1469 drv_data->master_info = platform_info;
1482 drv_data->pdev = pdev; 1470 drv_data->pdev = pdev;
1483 1471
1472 /* the spi->mode bits understood by this driver: */
1473 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1474
1484 master->bus_num = pdev->id; 1475 master->bus_num = pdev->id;
1485 master->num_chipselect = platform_info->num_chipselect; 1476 master->num_chipselect = platform_info->num_chipselect;
1486 master->dma_alignment = DMA_ALIGNMENT; 1477 master->dma_alignment = DMA_ALIGNMENT;
diff --git a/drivers/spi/spi_mpc83xx.c b/drivers/spi/spi_mpc8xxx.c
index a32ccb44065e..0fd0ec4d3a7d 100644
--- a/drivers/spi/spi_mpc83xx.c
+++ b/drivers/spi/spi_mpc8xxx.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * MPC83xx SPI controller driver. 2 * MPC8xxx SPI controller driver.
3 * 3 *
4 * Maintainer: Kumar Gala 4 * Maintainer: Kumar Gala
5 * 5 *
@@ -14,8 +14,10 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/bug.h>
17#include <linux/errno.h> 18#include <linux/errno.h>
18#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/io.h>
19#include <linux/completion.h> 21#include <linux/completion.h>
20#include <linux/interrupt.h> 22#include <linux/interrupt.h>
21#include <linux/delay.h> 23#include <linux/delay.h>
@@ -33,10 +35,9 @@
33 35
34#include <sysdev/fsl_soc.h> 36#include <sysdev/fsl_soc.h>
35#include <asm/irq.h> 37#include <asm/irq.h>
36#include <asm/io.h>
37 38
38/* SPI Controller registers */ 39/* SPI Controller registers */
39struct mpc83xx_spi_reg { 40struct mpc8xxx_spi_reg {
40 u8 res1[0x20]; 41 u8 res1[0x20];
41 __be32 mode; 42 __be32 mode;
42 __be32 event; 43 __be32 event;
@@ -75,16 +76,16 @@ struct mpc83xx_spi_reg {
75#define SPIM_NF 0x00000100 /* Not full */ 76#define SPIM_NF 0x00000100 /* Not full */
76 77
77/* SPI Controller driver's private data. */ 78/* SPI Controller driver's private data. */
78struct mpc83xx_spi { 79struct mpc8xxx_spi {
79 struct mpc83xx_spi_reg __iomem *base; 80 struct mpc8xxx_spi_reg __iomem *base;
80 81
81 /* rx & tx bufs from the spi_transfer */ 82 /* rx & tx bufs from the spi_transfer */
82 const void *tx; 83 const void *tx;
83 void *rx; 84 void *rx;
84 85
85 /* functions to deal with different sized buffers */ 86 /* functions to deal with different sized buffers */
86 void (*get_rx) (u32 rx_data, struct mpc83xx_spi *); 87 void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
87 u32(*get_tx) (struct mpc83xx_spi *); 88 u32(*get_tx) (struct mpc8xxx_spi *);
88 89
89 unsigned int count; 90 unsigned int count;
90 unsigned int irq; 91 unsigned int irq;
@@ -97,8 +98,6 @@ struct mpc83xx_spi {
97 98
98 bool qe_mode; 99 bool qe_mode;
99 100
100 u8 busy;
101
102 struct workqueue_struct *workqueue; 101 struct workqueue_struct *workqueue;
103 struct work_struct work; 102 struct work_struct work;
104 103
@@ -108,44 +107,44 @@ struct mpc83xx_spi {
108 struct completion done; 107 struct completion done;
109}; 108};
110 109
111struct spi_mpc83xx_cs { 110struct spi_mpc8xxx_cs {
112 /* functions to deal with different sized buffers */ 111 /* functions to deal with different sized buffers */
113 void (*get_rx) (u32 rx_data, struct mpc83xx_spi *); 112 void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
114 u32 (*get_tx) (struct mpc83xx_spi *); 113 u32 (*get_tx) (struct mpc8xxx_spi *);
115 u32 rx_shift; /* RX data reg shift when in qe mode */ 114 u32 rx_shift; /* RX data reg shift when in qe mode */
116 u32 tx_shift; /* TX data reg shift when in qe mode */ 115 u32 tx_shift; /* TX data reg shift when in qe mode */
117 u32 hw_mode; /* Holds HW mode register settings */ 116 u32 hw_mode; /* Holds HW mode register settings */
118}; 117};
119 118
120static inline void mpc83xx_spi_write_reg(__be32 __iomem * reg, u32 val) 119static inline void mpc8xxx_spi_write_reg(__be32 __iomem *reg, u32 val)
121{ 120{
122 out_be32(reg, val); 121 out_be32(reg, val);
123} 122}
124 123
125static inline u32 mpc83xx_spi_read_reg(__be32 __iomem * reg) 124static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg)
126{ 125{
127 return in_be32(reg); 126 return in_be32(reg);
128} 127}
129 128
130#define MPC83XX_SPI_RX_BUF(type) \ 129#define MPC83XX_SPI_RX_BUF(type) \
131static \ 130static \
132void mpc83xx_spi_rx_buf_##type(u32 data, struct mpc83xx_spi *mpc83xx_spi) \ 131void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \
133{ \ 132{ \
134 type * rx = mpc83xx_spi->rx; \ 133 type *rx = mpc8xxx_spi->rx; \
135 *rx++ = (type)(data >> mpc83xx_spi->rx_shift); \ 134 *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \
136 mpc83xx_spi->rx = rx; \ 135 mpc8xxx_spi->rx = rx; \
137} 136}
138 137
139#define MPC83XX_SPI_TX_BUF(type) \ 138#define MPC83XX_SPI_TX_BUF(type) \
140static \ 139static \
141u32 mpc83xx_spi_tx_buf_##type(struct mpc83xx_spi *mpc83xx_spi) \ 140u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \
142{ \ 141{ \
143 u32 data; \ 142 u32 data; \
144 const type * tx = mpc83xx_spi->tx; \ 143 const type *tx = mpc8xxx_spi->tx; \
145 if (!tx) \ 144 if (!tx) \
146 return 0; \ 145 return 0; \
147 data = *tx++ << mpc83xx_spi->tx_shift; \ 146 data = *tx++ << mpc8xxx_spi->tx_shift; \
148 mpc83xx_spi->tx = tx; \ 147 mpc8xxx_spi->tx = tx; \
149 return data; \ 148 return data; \
150} 149}
151 150
@@ -156,12 +155,12 @@ MPC83XX_SPI_TX_BUF(u8)
156MPC83XX_SPI_TX_BUF(u16) 155MPC83XX_SPI_TX_BUF(u16)
157MPC83XX_SPI_TX_BUF(u32) 156MPC83XX_SPI_TX_BUF(u32)
158 157
159static void mpc83xx_spi_chipselect(struct spi_device *spi, int value) 158static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value)
160{ 159{
161 struct mpc83xx_spi *mpc83xx_spi = spi_master_get_devdata(spi->master); 160 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
162 struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data; 161 struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data;
163 bool pol = spi->mode & SPI_CS_HIGH; 162 bool pol = spi->mode & SPI_CS_HIGH;
164 struct spi_mpc83xx_cs *cs = spi->controller_state; 163 struct spi_mpc8xxx_cs *cs = spi->controller_state;
165 164
166 if (value == BITBANG_CS_INACTIVE) { 165 if (value == BITBANG_CS_INACTIVE) {
167 if (pdata->cs_control) 166 if (pdata->cs_control)
@@ -169,16 +168,16 @@ static void mpc83xx_spi_chipselect(struct spi_device *spi, int value)
169 } 168 }
170 169
171 if (value == BITBANG_CS_ACTIVE) { 170 if (value == BITBANG_CS_ACTIVE) {
172 u32 regval = mpc83xx_spi_read_reg(&mpc83xx_spi->base->mode); 171 u32 regval = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->mode);
173 172
174 mpc83xx_spi->rx_shift = cs->rx_shift; 173 mpc8xxx_spi->rx_shift = cs->rx_shift;
175 mpc83xx_spi->tx_shift = cs->tx_shift; 174 mpc8xxx_spi->tx_shift = cs->tx_shift;
176 mpc83xx_spi->get_rx = cs->get_rx; 175 mpc8xxx_spi->get_rx = cs->get_rx;
177 mpc83xx_spi->get_tx = cs->get_tx; 176 mpc8xxx_spi->get_tx = cs->get_tx;
178 177
179 if (cs->hw_mode != regval) { 178 if (cs->hw_mode != regval) {
180 unsigned long flags; 179 unsigned long flags;
181 __be32 __iomem *mode = &mpc83xx_spi->base->mode; 180 __be32 __iomem *mode = &mpc8xxx_spi->base->mode;
182 181
183 regval = cs->hw_mode; 182 regval = cs->hw_mode;
184 /* Turn off IRQs locally to minimize time that 183 /* Turn off IRQs locally to minimize time that
@@ -186,8 +185,8 @@ static void mpc83xx_spi_chipselect(struct spi_device *spi, int value)
186 */ 185 */
187 local_irq_save(flags); 186 local_irq_save(flags);
188 /* Turn off SPI unit prior changing mode */ 187 /* Turn off SPI unit prior changing mode */
189 mpc83xx_spi_write_reg(mode, regval & ~SPMODE_ENABLE); 188 mpc8xxx_spi_write_reg(mode, regval & ~SPMODE_ENABLE);
190 mpc83xx_spi_write_reg(mode, regval); 189 mpc8xxx_spi_write_reg(mode, regval);
191 local_irq_restore(flags); 190 local_irq_restore(flags);
192 } 191 }
193 if (pdata->cs_control) 192 if (pdata->cs_control)
@@ -196,15 +195,15 @@ static void mpc83xx_spi_chipselect(struct spi_device *spi, int value)
196} 195}
197 196
198static 197static
199int mpc83xx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) 198int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
200{ 199{
201 struct mpc83xx_spi *mpc83xx_spi; 200 struct mpc8xxx_spi *mpc8xxx_spi;
202 u32 regval; 201 u32 regval;
203 u8 bits_per_word, pm; 202 u8 bits_per_word, pm;
204 u32 hz; 203 u32 hz;
205 struct spi_mpc83xx_cs *cs = spi->controller_state; 204 struct spi_mpc8xxx_cs *cs = spi->controller_state;
206 205
207 mpc83xx_spi = spi_master_get_devdata(spi->master); 206 mpc8xxx_spi = spi_master_get_devdata(spi->master);
208 207
209 if (t) { 208 if (t) {
210 bits_per_word = t->bits_per_word; 209 bits_per_word = t->bits_per_word;
@@ -229,26 +228,26 @@ int mpc83xx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
229 cs->rx_shift = 0; 228 cs->rx_shift = 0;
230 cs->tx_shift = 0; 229 cs->tx_shift = 0;
231 if (bits_per_word <= 8) { 230 if (bits_per_word <= 8) {
232 cs->get_rx = mpc83xx_spi_rx_buf_u8; 231 cs->get_rx = mpc8xxx_spi_rx_buf_u8;
233 cs->get_tx = mpc83xx_spi_tx_buf_u8; 232 cs->get_tx = mpc8xxx_spi_tx_buf_u8;
234 if (mpc83xx_spi->qe_mode) { 233 if (mpc8xxx_spi->qe_mode) {
235 cs->rx_shift = 16; 234 cs->rx_shift = 16;
236 cs->tx_shift = 24; 235 cs->tx_shift = 24;
237 } 236 }
238 } else if (bits_per_word <= 16) { 237 } else if (bits_per_word <= 16) {
239 cs->get_rx = mpc83xx_spi_rx_buf_u16; 238 cs->get_rx = mpc8xxx_spi_rx_buf_u16;
240 cs->get_tx = mpc83xx_spi_tx_buf_u16; 239 cs->get_tx = mpc8xxx_spi_tx_buf_u16;
241 if (mpc83xx_spi->qe_mode) { 240 if (mpc8xxx_spi->qe_mode) {
242 cs->rx_shift = 16; 241 cs->rx_shift = 16;
243 cs->tx_shift = 16; 242 cs->tx_shift = 16;
244 } 243 }
245 } else if (bits_per_word <= 32) { 244 } else if (bits_per_word <= 32) {
246 cs->get_rx = mpc83xx_spi_rx_buf_u32; 245 cs->get_rx = mpc8xxx_spi_rx_buf_u32;
247 cs->get_tx = mpc83xx_spi_tx_buf_u32; 246 cs->get_tx = mpc8xxx_spi_tx_buf_u32;
248 } else 247 } else
249 return -EINVAL; 248 return -EINVAL;
250 249
251 if (mpc83xx_spi->qe_mode && spi->mode & SPI_LSB_FIRST) { 250 if (mpc8xxx_spi->qe_mode && spi->mode & SPI_LSB_FIRST) {
252 cs->tx_shift = 0; 251 cs->tx_shift = 0;
253 if (bits_per_word <= 8) 252 if (bits_per_word <= 8)
254 cs->rx_shift = 8; 253 cs->rx_shift = 8;
@@ -256,10 +255,10 @@ int mpc83xx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
256 cs->rx_shift = 0; 255 cs->rx_shift = 0;
257 } 256 }
258 257
259 mpc83xx_spi->rx_shift = cs->rx_shift; 258 mpc8xxx_spi->rx_shift = cs->rx_shift;
260 mpc83xx_spi->tx_shift = cs->tx_shift; 259 mpc8xxx_spi->tx_shift = cs->tx_shift;
261 mpc83xx_spi->get_rx = cs->get_rx; 260 mpc8xxx_spi->get_rx = cs->get_rx;
262 mpc83xx_spi->get_tx = cs->get_tx; 261 mpc8xxx_spi->get_tx = cs->get_tx;
263 262
264 if (bits_per_word == 32) 263 if (bits_per_word == 32)
265 bits_per_word = 0; 264 bits_per_word = 0;
@@ -272,25 +271,25 @@ int mpc83xx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
272 271
273 cs->hw_mode |= SPMODE_LEN(bits_per_word); 272 cs->hw_mode |= SPMODE_LEN(bits_per_word);
274 273
275 if ((mpc83xx_spi->spibrg / hz) > 64) { 274 if ((mpc8xxx_spi->spibrg / hz) > 64) {
276 cs->hw_mode |= SPMODE_DIV16; 275 cs->hw_mode |= SPMODE_DIV16;
277 pm = mpc83xx_spi->spibrg / (hz * 64); 276 pm = mpc8xxx_spi->spibrg / (hz * 64);
278 if (pm > 16) { 277
279 dev_err(&spi->dev, "Requested speed is too " 278 WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. "
280 "low: %d Hz. Will use %d Hz instead.\n", 279 "Will use %d Hz instead.\n", dev_name(&spi->dev),
281 hz, mpc83xx_spi->spibrg / 1024); 280 hz, mpc8xxx_spi->spibrg / 1024);
281 if (pm > 16)
282 pm = 16; 282 pm = 16;
283 }
284 } else 283 } else
285 pm = mpc83xx_spi->spibrg / (hz * 4); 284 pm = mpc8xxx_spi->spibrg / (hz * 4);
286 if (pm) 285 if (pm)
287 pm--; 286 pm--;
288 287
289 cs->hw_mode |= SPMODE_PM(pm); 288 cs->hw_mode |= SPMODE_PM(pm);
290 regval = mpc83xx_spi_read_reg(&mpc83xx_spi->base->mode); 289 regval = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->mode);
291 if (cs->hw_mode != regval) { 290 if (cs->hw_mode != regval) {
292 unsigned long flags; 291 unsigned long flags;
293 __be32 __iomem *mode = &mpc83xx_spi->base->mode; 292 __be32 __iomem *mode = &mpc8xxx_spi->base->mode;
294 293
295 regval = cs->hw_mode; 294 regval = cs->hw_mode;
296 /* Turn off IRQs locally to minimize time 295 /* Turn off IRQs locally to minimize time
@@ -298,22 +297,22 @@ int mpc83xx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
298 */ 297 */
299 local_irq_save(flags); 298 local_irq_save(flags);
300 /* Turn off SPI unit prior changing mode */ 299 /* Turn off SPI unit prior changing mode */
301 mpc83xx_spi_write_reg(mode, regval & ~SPMODE_ENABLE); 300 mpc8xxx_spi_write_reg(mode, regval & ~SPMODE_ENABLE);
302 mpc83xx_spi_write_reg(mode, regval); 301 mpc8xxx_spi_write_reg(mode, regval);
303 local_irq_restore(flags); 302 local_irq_restore(flags);
304 } 303 }
305 return 0; 304 return 0;
306} 305}
307 306
308static int mpc83xx_spi_bufs(struct spi_device *spi, struct spi_transfer *t) 307static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
309{ 308{
310 struct mpc83xx_spi *mpc83xx_spi; 309 struct mpc8xxx_spi *mpc8xxx_spi;
311 u32 word, len, bits_per_word; 310 u32 word, len, bits_per_word;
312 311
313 mpc83xx_spi = spi_master_get_devdata(spi->master); 312 mpc8xxx_spi = spi_master_get_devdata(spi->master);
314 313
315 mpc83xx_spi->tx = t->tx_buf; 314 mpc8xxx_spi->tx = t->tx_buf;
316 mpc83xx_spi->rx = t->rx_buf; 315 mpc8xxx_spi->rx = t->rx_buf;
317 bits_per_word = spi->bits_per_word; 316 bits_per_word = spi->bits_per_word;
318 if (t->bits_per_word) 317 if (t->bits_per_word)
319 bits_per_word = t->bits_per_word; 318 bits_per_word = t->bits_per_word;
@@ -330,111 +329,106 @@ static int mpc83xx_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
330 return -EINVAL; 329 return -EINVAL;
331 len /= 2; 330 len /= 2;
332 } 331 }
333 mpc83xx_spi->count = len; 332 mpc8xxx_spi->count = len;
334 333
335 INIT_COMPLETION(mpc83xx_spi->done); 334 INIT_COMPLETION(mpc8xxx_spi->done);
336 335
337 /* enable rx ints */ 336 /* enable rx ints */
338 mpc83xx_spi_write_reg(&mpc83xx_spi->base->mask, SPIM_NE); 337 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, SPIM_NE);
339 338
340 /* transmit word */ 339 /* transmit word */
341 word = mpc83xx_spi->get_tx(mpc83xx_spi); 340 word = mpc8xxx_spi->get_tx(mpc8xxx_spi);
342 mpc83xx_spi_write_reg(&mpc83xx_spi->base->transmit, word); 341 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->transmit, word);
343 342
344 wait_for_completion(&mpc83xx_spi->done); 343 wait_for_completion(&mpc8xxx_spi->done);
345 344
346 /* disable rx ints */ 345 /* disable rx ints */
347 mpc83xx_spi_write_reg(&mpc83xx_spi->base->mask, 0); 346 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0);
348 347
349 return mpc83xx_spi->count; 348 return mpc8xxx_spi->count;
350} 349}
351 350
352static void mpc83xx_spi_work(struct work_struct *work) 351static void mpc8xxx_spi_do_one_msg(struct spi_message *m)
353{ 352{
354 struct mpc83xx_spi *mpc83xx_spi = 353 struct spi_device *spi = m->spi;
355 container_of(work, struct mpc83xx_spi, work); 354 struct spi_transfer *t;
356 355 unsigned int cs_change;
357 spin_lock_irq(&mpc83xx_spi->lock); 356 const int nsecs = 50;
358 mpc83xx_spi->busy = 1; 357 int status;
359 while (!list_empty(&mpc83xx_spi->queue)) { 358
360 struct spi_message *m; 359 cs_change = 1;
361 struct spi_device *spi; 360 status = 0;
362 struct spi_transfer *t = NULL; 361 list_for_each_entry(t, &m->transfers, transfer_list) {
363 unsigned cs_change; 362 if (t->bits_per_word || t->speed_hz) {
364 int status, nsecs = 50; 363 /* Don't allow changes if CS is active */
365 364 status = -EINVAL;
366 m = container_of(mpc83xx_spi->queue.next,
367 struct spi_message, queue);
368 list_del_init(&m->queue);
369 spin_unlock_irq(&mpc83xx_spi->lock);
370
371 spi = m->spi;
372 cs_change = 1;
373 status = 0;
374 list_for_each_entry(t, &m->transfers, transfer_list) {
375 if (t->bits_per_word || t->speed_hz) {
376 /* Don't allow changes if CS is active */
377 status = -EINVAL;
378
379 if (cs_change)
380 status = mpc83xx_spi_setup_transfer(spi, t);
381 if (status < 0)
382 break;
383 }
384 365
385 if (cs_change) 366 if (cs_change)
386 mpc83xx_spi_chipselect(spi, BITBANG_CS_ACTIVE); 367 status = mpc8xxx_spi_setup_transfer(spi, t);
387 cs_change = t->cs_change; 368 if (status < 0)
388 if (t->len)
389 status = mpc83xx_spi_bufs(spi, t);
390 if (status) {
391 status = -EMSGSIZE;
392 break; 369 break;
393 } 370 }
394 m->actual_length += t->len;
395
396 if (t->delay_usecs)
397 udelay(t->delay_usecs);
398 371
399 if (cs_change) { 372 if (cs_change) {
400 ndelay(nsecs); 373 mpc8xxx_spi_chipselect(spi, BITBANG_CS_ACTIVE);
401 mpc83xx_spi_chipselect(spi, BITBANG_CS_INACTIVE); 374 ndelay(nsecs);
402 ndelay(nsecs); 375 }
403 } 376 cs_change = t->cs_change;
377 if (t->len)
378 status = mpc8xxx_spi_bufs(spi, t);
379 if (status) {
380 status = -EMSGSIZE;
381 break;
404 } 382 }
383 m->actual_length += t->len;
405 384
406 m->status = status; 385 if (t->delay_usecs)
407 m->complete(m->context); 386 udelay(t->delay_usecs);
408 387
409 if (status || !cs_change) { 388 if (cs_change) {
389 ndelay(nsecs);
390 mpc8xxx_spi_chipselect(spi, BITBANG_CS_INACTIVE);
410 ndelay(nsecs); 391 ndelay(nsecs);
411 mpc83xx_spi_chipselect(spi, BITBANG_CS_INACTIVE);
412 } 392 }
393 }
413 394
414 mpc83xx_spi_setup_transfer(spi, NULL); 395 m->status = status;
396 m->complete(m->context);
415 397
416 spin_lock_irq(&mpc83xx_spi->lock); 398 if (status || !cs_change) {
399 ndelay(nsecs);
400 mpc8xxx_spi_chipselect(spi, BITBANG_CS_INACTIVE);
417 } 401 }
418 mpc83xx_spi->busy = 0; 402
419 spin_unlock_irq(&mpc83xx_spi->lock); 403 mpc8xxx_spi_setup_transfer(spi, NULL);
420} 404}
421 405
422/* the spi->mode bits understood by this driver: */ 406static void mpc8xxx_spi_work(struct work_struct *work)
423#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \ 407{
424 | SPI_LSB_FIRST | SPI_LOOP) 408 struct mpc8xxx_spi *mpc8xxx_spi = container_of(work, struct mpc8xxx_spi,
409 work);
410
411 spin_lock_irq(&mpc8xxx_spi->lock);
412 while (!list_empty(&mpc8xxx_spi->queue)) {
413 struct spi_message *m = container_of(mpc8xxx_spi->queue.next,
414 struct spi_message, queue);
415
416 list_del_init(&m->queue);
417 spin_unlock_irq(&mpc8xxx_spi->lock);
418
419 mpc8xxx_spi_do_one_msg(m);
420
421 spin_lock_irq(&mpc8xxx_spi->lock);
422 }
423 spin_unlock_irq(&mpc8xxx_spi->lock);
424}
425 425
426static int mpc83xx_spi_setup(struct spi_device *spi) 426static int mpc8xxx_spi_setup(struct spi_device *spi)
427{ 427{
428 struct mpc83xx_spi *mpc83xx_spi; 428 struct mpc8xxx_spi *mpc8xxx_spi;
429 int retval; 429 int retval;
430 u32 hw_mode; 430 u32 hw_mode;
431 struct spi_mpc83xx_cs *cs = spi->controller_state; 431 struct spi_mpc8xxx_cs *cs = spi->controller_state;
432
433 if (spi->mode & ~MODEBITS) {
434 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
435 spi->mode & ~MODEBITS);
436 return -EINVAL;
437 }
438 432
439 if (!spi->max_speed_hz) 433 if (!spi->max_speed_hz)
440 return -EINVAL; 434 return -EINVAL;
@@ -445,13 +439,10 @@ static int mpc83xx_spi_setup(struct spi_device *spi)
445 return -ENOMEM; 439 return -ENOMEM;
446 spi->controller_state = cs; 440 spi->controller_state = cs;
447 } 441 }
448 mpc83xx_spi = spi_master_get_devdata(spi->master); 442 mpc8xxx_spi = spi_master_get_devdata(spi->master);
449
450 if (!spi->bits_per_word)
451 spi->bits_per_word = 8;
452 443
453 hw_mode = cs->hw_mode; /* Save orginal settings */ 444 hw_mode = cs->hw_mode; /* Save orginal settings */
454 cs->hw_mode = mpc83xx_spi_read_reg(&mpc83xx_spi->base->mode); 445 cs->hw_mode = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->mode);
455 /* mask out bits we are going to set */ 446 /* mask out bits we are going to set */
456 cs->hw_mode &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH 447 cs->hw_mode &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH
457 | SPMODE_REV | SPMODE_LOOP); 448 | SPMODE_REV | SPMODE_LOOP);
@@ -465,45 +456,29 @@ static int mpc83xx_spi_setup(struct spi_device *spi)
465 if (spi->mode & SPI_LOOP) 456 if (spi->mode & SPI_LOOP)
466 cs->hw_mode |= SPMODE_LOOP; 457 cs->hw_mode |= SPMODE_LOOP;
467 458
468 retval = mpc83xx_spi_setup_transfer(spi, NULL); 459 retval = mpc8xxx_spi_setup_transfer(spi, NULL);
469 if (retval < 0) { 460 if (retval < 0) {
470 cs->hw_mode = hw_mode; /* Restore settings */ 461 cs->hw_mode = hw_mode; /* Restore settings */
471 return retval; 462 return retval;
472 } 463 }
473
474 dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u Hz\n",
475 __func__, spi->mode & (SPI_CPOL | SPI_CPHA),
476 spi->bits_per_word, spi->max_speed_hz);
477#if 0 /* Don't think this is needed */
478 /* NOTE we _need_ to call chipselect() early, ideally with adapter
479 * setup, unless the hardware defaults cooperate to avoid confusion
480 * between normal (active low) and inverted chipselects.
481 */
482
483 /* deselect chip (low or high) */
484 spin_lock(&mpc83xx_spi->lock);
485 if (!mpc83xx_spi->busy)
486 mpc83xx_spi_chipselect(spi, BITBANG_CS_INACTIVE);
487 spin_unlock(&mpc83xx_spi->lock);
488#endif
489 return 0; 464 return 0;
490} 465}
491 466
492static irqreturn_t mpc83xx_spi_irq(s32 irq, void *context_data) 467static irqreturn_t mpc8xxx_spi_irq(s32 irq, void *context_data)
493{ 468{
494 struct mpc83xx_spi *mpc83xx_spi = context_data; 469 struct mpc8xxx_spi *mpc8xxx_spi = context_data;
495 u32 event; 470 u32 event;
496 irqreturn_t ret = IRQ_NONE; 471 irqreturn_t ret = IRQ_NONE;
497 472
498 /* Get interrupt events(tx/rx) */ 473 /* Get interrupt events(tx/rx) */
499 event = mpc83xx_spi_read_reg(&mpc83xx_spi->base->event); 474 event = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->event);
500 475
501 /* We need handle RX first */ 476 /* We need handle RX first */
502 if (event & SPIE_NE) { 477 if (event & SPIE_NE) {
503 u32 rx_data = mpc83xx_spi_read_reg(&mpc83xx_spi->base->receive); 478 u32 rx_data = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->receive);
504 479
505 if (mpc83xx_spi->rx) 480 if (mpc8xxx_spi->rx)
506 mpc83xx_spi->get_rx(rx_data, mpc83xx_spi); 481 mpc8xxx_spi->get_rx(rx_data, mpc8xxx_spi);
507 482
508 ret = IRQ_HANDLED; 483 ret = IRQ_HANDLED;
509 } 484 }
@@ -511,56 +486,56 @@ static irqreturn_t mpc83xx_spi_irq(s32 irq, void *context_data)
511 if ((event & SPIE_NF) == 0) 486 if ((event & SPIE_NF) == 0)
512 /* spin until TX is done */ 487 /* spin until TX is done */
513 while (((event = 488 while (((event =
514 mpc83xx_spi_read_reg(&mpc83xx_spi->base->event)) & 489 mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->event)) &
515 SPIE_NF) == 0) 490 SPIE_NF) == 0)
516 cpu_relax(); 491 cpu_relax();
517 492
518 mpc83xx_spi->count -= 1; 493 mpc8xxx_spi->count -= 1;
519 if (mpc83xx_spi->count) { 494 if (mpc8xxx_spi->count) {
520 u32 word = mpc83xx_spi->get_tx(mpc83xx_spi); 495 u32 word = mpc8xxx_spi->get_tx(mpc8xxx_spi);
521 mpc83xx_spi_write_reg(&mpc83xx_spi->base->transmit, word); 496 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->transmit, word);
522 } else { 497 } else {
523 complete(&mpc83xx_spi->done); 498 complete(&mpc8xxx_spi->done);
524 } 499 }
525 500
526 /* Clear the events */ 501 /* Clear the events */
527 mpc83xx_spi_write_reg(&mpc83xx_spi->base->event, event); 502 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->event, event);
528 503
529 return ret; 504 return ret;
530} 505}
531static int mpc83xx_spi_transfer(struct spi_device *spi, 506static int mpc8xxx_spi_transfer(struct spi_device *spi,
532 struct spi_message *m) 507 struct spi_message *m)
533{ 508{
534 struct mpc83xx_spi *mpc83xx_spi = spi_master_get_devdata(spi->master); 509 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
535 unsigned long flags; 510 unsigned long flags;
536 511
537 m->actual_length = 0; 512 m->actual_length = 0;
538 m->status = -EINPROGRESS; 513 m->status = -EINPROGRESS;
539 514
540 spin_lock_irqsave(&mpc83xx_spi->lock, flags); 515 spin_lock_irqsave(&mpc8xxx_spi->lock, flags);
541 list_add_tail(&m->queue, &mpc83xx_spi->queue); 516 list_add_tail(&m->queue, &mpc8xxx_spi->queue);
542 queue_work(mpc83xx_spi->workqueue, &mpc83xx_spi->work); 517 queue_work(mpc8xxx_spi->workqueue, &mpc8xxx_spi->work);
543 spin_unlock_irqrestore(&mpc83xx_spi->lock, flags); 518 spin_unlock_irqrestore(&mpc8xxx_spi->lock, flags);
544 519
545 return 0; 520 return 0;
546} 521}
547 522
548 523
549static void mpc83xx_spi_cleanup(struct spi_device *spi) 524static void mpc8xxx_spi_cleanup(struct spi_device *spi)
550{ 525{
551 kfree(spi->controller_state); 526 kfree(spi->controller_state);
552} 527}
553 528
554static struct spi_master * __devinit 529static struct spi_master * __devinit
555mpc83xx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq) 530mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
556{ 531{
557 struct fsl_spi_platform_data *pdata = dev->platform_data; 532 struct fsl_spi_platform_data *pdata = dev->platform_data;
558 struct spi_master *master; 533 struct spi_master *master;
559 struct mpc83xx_spi *mpc83xx_spi; 534 struct mpc8xxx_spi *mpc8xxx_spi;
560 u32 regval; 535 u32 regval;
561 int ret = 0; 536 int ret = 0;
562 537
563 master = spi_alloc_master(dev, sizeof(struct mpc83xx_spi)); 538 master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi));
564 if (master == NULL) { 539 if (master == NULL) {
565 ret = -ENOMEM; 540 ret = -ENOMEM;
566 goto err; 541 goto err;
@@ -568,36 +543,40 @@ mpc83xx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
568 543
569 dev_set_drvdata(dev, master); 544 dev_set_drvdata(dev, master);
570 545
571 master->setup = mpc83xx_spi_setup; 546 /* the spi->mode bits understood by this driver: */
572 master->transfer = mpc83xx_spi_transfer; 547 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH
573 master->cleanup = mpc83xx_spi_cleanup; 548 | SPI_LSB_FIRST | SPI_LOOP;
574 549
575 mpc83xx_spi = spi_master_get_devdata(master); 550 master->setup = mpc8xxx_spi_setup;
576 mpc83xx_spi->qe_mode = pdata->qe_mode; 551 master->transfer = mpc8xxx_spi_transfer;
577 mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u8; 552 master->cleanup = mpc8xxx_spi_cleanup;
578 mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u8; 553
579 mpc83xx_spi->spibrg = pdata->sysclk; 554 mpc8xxx_spi = spi_master_get_devdata(master);
580 555 mpc8xxx_spi->qe_mode = pdata->qe_mode;
581 mpc83xx_spi->rx_shift = 0; 556 mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8;
582 mpc83xx_spi->tx_shift = 0; 557 mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8;
583 if (mpc83xx_spi->qe_mode) { 558 mpc8xxx_spi->spibrg = pdata->sysclk;
584 mpc83xx_spi->rx_shift = 16; 559
585 mpc83xx_spi->tx_shift = 24; 560 mpc8xxx_spi->rx_shift = 0;
561 mpc8xxx_spi->tx_shift = 0;
562 if (mpc8xxx_spi->qe_mode) {
563 mpc8xxx_spi->rx_shift = 16;
564 mpc8xxx_spi->tx_shift = 24;
586 } 565 }
587 566
588 init_completion(&mpc83xx_spi->done); 567 init_completion(&mpc8xxx_spi->done);
589 568
590 mpc83xx_spi->base = ioremap(mem->start, mem->end - mem->start + 1); 569 mpc8xxx_spi->base = ioremap(mem->start, mem->end - mem->start + 1);
591 if (mpc83xx_spi->base == NULL) { 570 if (mpc8xxx_spi->base == NULL) {
592 ret = -ENOMEM; 571 ret = -ENOMEM;
593 goto put_master; 572 goto put_master;
594 } 573 }
595 574
596 mpc83xx_spi->irq = irq; 575 mpc8xxx_spi->irq = irq;
597 576
598 /* Register for SPI Interrupt */ 577 /* Register for SPI Interrupt */
599 ret = request_irq(mpc83xx_spi->irq, mpc83xx_spi_irq, 578 ret = request_irq(mpc8xxx_spi->irq, mpc8xxx_spi_irq,
600 0, "mpc83xx_spi", mpc83xx_spi); 579 0, "mpc8xxx_spi", mpc8xxx_spi);
601 580
602 if (ret != 0) 581 if (ret != 0)
603 goto unmap_io; 582 goto unmap_io;
@@ -606,25 +585,25 @@ mpc83xx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
606 master->num_chipselect = pdata->max_chipselect; 585 master->num_chipselect = pdata->max_chipselect;
607 586
608 /* SPI controller initializations */ 587 /* SPI controller initializations */
609 mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, 0); 588 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, 0);
610 mpc83xx_spi_write_reg(&mpc83xx_spi->base->mask, 0); 589 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0);
611 mpc83xx_spi_write_reg(&mpc83xx_spi->base->command, 0); 590 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->command, 0);
612 mpc83xx_spi_write_reg(&mpc83xx_spi->base->event, 0xffffffff); 591 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->event, 0xffffffff);
613 592
614 /* Enable SPI interface */ 593 /* Enable SPI interface */
615 regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; 594 regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
616 if (pdata->qe_mode) 595 if (pdata->qe_mode)
617 regval |= SPMODE_OP; 596 regval |= SPMODE_OP;
618 597
619 mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, regval); 598 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, regval);
620 spin_lock_init(&mpc83xx_spi->lock); 599 spin_lock_init(&mpc8xxx_spi->lock);
621 init_completion(&mpc83xx_spi->done); 600 init_completion(&mpc8xxx_spi->done);
622 INIT_WORK(&mpc83xx_spi->work, mpc83xx_spi_work); 601 INIT_WORK(&mpc8xxx_spi->work, mpc8xxx_spi_work);
623 INIT_LIST_HEAD(&mpc83xx_spi->queue); 602 INIT_LIST_HEAD(&mpc8xxx_spi->queue);
624 603
625 mpc83xx_spi->workqueue = create_singlethread_workqueue( 604 mpc8xxx_spi->workqueue = create_singlethread_workqueue(
626 dev_name(master->dev.parent)); 605 dev_name(master->dev.parent));
627 if (mpc83xx_spi->workqueue == NULL) { 606 if (mpc8xxx_spi->workqueue == NULL) {
628 ret = -EBUSY; 607 ret = -EBUSY;
629 goto free_irq; 608 goto free_irq;
630 } 609 }
@@ -634,57 +613,57 @@ mpc83xx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
634 goto unreg_master; 613 goto unreg_master;
635 614
636 printk(KERN_INFO 615 printk(KERN_INFO
637 "%s: MPC83xx SPI Controller driver at 0x%p (irq = %d)\n", 616 "%s: MPC8xxx SPI Controller driver at 0x%p (irq = %d)\n",
638 dev_name(dev), mpc83xx_spi->base, mpc83xx_spi->irq); 617 dev_name(dev), mpc8xxx_spi->base, mpc8xxx_spi->irq);
639 618
640 return master; 619 return master;
641 620
642unreg_master: 621unreg_master:
643 destroy_workqueue(mpc83xx_spi->workqueue); 622 destroy_workqueue(mpc8xxx_spi->workqueue);
644free_irq: 623free_irq:
645 free_irq(mpc83xx_spi->irq, mpc83xx_spi); 624 free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
646unmap_io: 625unmap_io:
647 iounmap(mpc83xx_spi->base); 626 iounmap(mpc8xxx_spi->base);
648put_master: 627put_master:
649 spi_master_put(master); 628 spi_master_put(master);
650err: 629err:
651 return ERR_PTR(ret); 630 return ERR_PTR(ret);
652} 631}
653 632
654static int __devexit mpc83xx_spi_remove(struct device *dev) 633static int __devexit mpc8xxx_spi_remove(struct device *dev)
655{ 634{
656 struct mpc83xx_spi *mpc83xx_spi; 635 struct mpc8xxx_spi *mpc8xxx_spi;
657 struct spi_master *master; 636 struct spi_master *master;
658 637
659 master = dev_get_drvdata(dev); 638 master = dev_get_drvdata(dev);
660 mpc83xx_spi = spi_master_get_devdata(master); 639 mpc8xxx_spi = spi_master_get_devdata(master);
661 640
662 flush_workqueue(mpc83xx_spi->workqueue); 641 flush_workqueue(mpc8xxx_spi->workqueue);
663 destroy_workqueue(mpc83xx_spi->workqueue); 642 destroy_workqueue(mpc8xxx_spi->workqueue);
664 spi_unregister_master(master); 643 spi_unregister_master(master);
665 644
666 free_irq(mpc83xx_spi->irq, mpc83xx_spi); 645 free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
667 iounmap(mpc83xx_spi->base); 646 iounmap(mpc8xxx_spi->base);
668 647
669 return 0; 648 return 0;
670} 649}
671 650
672struct mpc83xx_spi_probe_info { 651struct mpc8xxx_spi_probe_info {
673 struct fsl_spi_platform_data pdata; 652 struct fsl_spi_platform_data pdata;
674 int *gpios; 653 int *gpios;
675 bool *alow_flags; 654 bool *alow_flags;
676}; 655};
677 656
678static struct mpc83xx_spi_probe_info * 657static struct mpc8xxx_spi_probe_info *
679to_of_pinfo(struct fsl_spi_platform_data *pdata) 658to_of_pinfo(struct fsl_spi_platform_data *pdata)
680{ 659{
681 return container_of(pdata, struct mpc83xx_spi_probe_info, pdata); 660 return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata);
682} 661}
683 662
684static void mpc83xx_spi_cs_control(struct spi_device *spi, bool on) 663static void mpc8xxx_spi_cs_control(struct spi_device *spi, bool on)
685{ 664{
686 struct device *dev = spi->dev.parent; 665 struct device *dev = spi->dev.parent;
687 struct mpc83xx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data); 666 struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data);
688 u16 cs = spi->chip_select; 667 u16 cs = spi->chip_select;
689 int gpio = pinfo->gpios[cs]; 668 int gpio = pinfo->gpios[cs];
690 bool alow = pinfo->alow_flags[cs]; 669 bool alow = pinfo->alow_flags[cs];
@@ -692,11 +671,11 @@ static void mpc83xx_spi_cs_control(struct spi_device *spi, bool on)
692 gpio_set_value(gpio, on ^ alow); 671 gpio_set_value(gpio, on ^ alow);
693} 672}
694 673
695static int of_mpc83xx_spi_get_chipselects(struct device *dev) 674static int of_mpc8xxx_spi_get_chipselects(struct device *dev)
696{ 675{
697 struct device_node *np = dev_archdata_get_node(&dev->archdata); 676 struct device_node *np = dev_archdata_get_node(&dev->archdata);
698 struct fsl_spi_platform_data *pdata = dev->platform_data; 677 struct fsl_spi_platform_data *pdata = dev->platform_data;
699 struct mpc83xx_spi_probe_info *pinfo = to_of_pinfo(pdata); 678 struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
700 unsigned int ngpios; 679 unsigned int ngpios;
701 int i = 0; 680 int i = 0;
702 int ret; 681 int ret;
@@ -752,7 +731,7 @@ static int of_mpc83xx_spi_get_chipselects(struct device *dev)
752 } 731 }
753 732
754 pdata->max_chipselect = ngpios; 733 pdata->max_chipselect = ngpios;
755 pdata->cs_control = mpc83xx_spi_cs_control; 734 pdata->cs_control = mpc8xxx_spi_cs_control;
756 735
757 return 0; 736 return 0;
758 737
@@ -771,10 +750,10 @@ err_alloc_flags:
771 return ret; 750 return ret;
772} 751}
773 752
774static int of_mpc83xx_spi_free_chipselects(struct device *dev) 753static int of_mpc8xxx_spi_free_chipselects(struct device *dev)
775{ 754{
776 struct fsl_spi_platform_data *pdata = dev->platform_data; 755 struct fsl_spi_platform_data *pdata = dev->platform_data;
777 struct mpc83xx_spi_probe_info *pinfo = to_of_pinfo(pdata); 756 struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
778 int i; 757 int i;
779 758
780 if (!pinfo->gpios) 759 if (!pinfo->gpios)
@@ -790,12 +769,12 @@ static int of_mpc83xx_spi_free_chipselects(struct device *dev)
790 return 0; 769 return 0;
791} 770}
792 771
793static int __devinit of_mpc83xx_spi_probe(struct of_device *ofdev, 772static int __devinit of_mpc8xxx_spi_probe(struct of_device *ofdev,
794 const struct of_device_id *ofid) 773 const struct of_device_id *ofid)
795{ 774{
796 struct device *dev = &ofdev->dev; 775 struct device *dev = &ofdev->dev;
797 struct device_node *np = ofdev->node; 776 struct device_node *np = ofdev->node;
798 struct mpc83xx_spi_probe_info *pinfo; 777 struct mpc8xxx_spi_probe_info *pinfo;
799 struct fsl_spi_platform_data *pdata; 778 struct fsl_spi_platform_data *pdata;
800 struct spi_master *master; 779 struct spi_master *master;
801 struct resource mem; 780 struct resource mem;
@@ -827,7 +806,7 @@ static int __devinit of_mpc83xx_spi_probe(struct of_device *ofdev,
827 if (prop && !strcmp(prop, "cpu-qe")) 806 if (prop && !strcmp(prop, "cpu-qe"))
828 pdata->qe_mode = 1; 807 pdata->qe_mode = 1;
829 808
830 ret = of_mpc83xx_spi_get_chipselects(dev); 809 ret = of_mpc8xxx_spi_get_chipselects(dev);
831 if (ret) 810 if (ret)
832 goto err; 811 goto err;
833 812
@@ -841,7 +820,7 @@ static int __devinit of_mpc83xx_spi_probe(struct of_device *ofdev,
841 goto err; 820 goto err;
842 } 821 }
843 822
844 master = mpc83xx_spi_probe(dev, &mem, irq.start); 823 master = mpc8xxx_spi_probe(dev, &mem, irq.start);
845 if (IS_ERR(master)) { 824 if (IS_ERR(master)) {
846 ret = PTR_ERR(master); 825 ret = PTR_ERR(master);
847 goto err; 826 goto err;
@@ -852,34 +831,34 @@ static int __devinit of_mpc83xx_spi_probe(struct of_device *ofdev,
852 return 0; 831 return 0;
853 832
854err: 833err:
855 of_mpc83xx_spi_free_chipselects(dev); 834 of_mpc8xxx_spi_free_chipselects(dev);
856err_clk: 835err_clk:
857 kfree(pinfo); 836 kfree(pinfo);
858 return ret; 837 return ret;
859} 838}
860 839
861static int __devexit of_mpc83xx_spi_remove(struct of_device *ofdev) 840static int __devexit of_mpc8xxx_spi_remove(struct of_device *ofdev)
862{ 841{
863 int ret; 842 int ret;
864 843
865 ret = mpc83xx_spi_remove(&ofdev->dev); 844 ret = mpc8xxx_spi_remove(&ofdev->dev);
866 if (ret) 845 if (ret)
867 return ret; 846 return ret;
868 of_mpc83xx_spi_free_chipselects(&ofdev->dev); 847 of_mpc8xxx_spi_free_chipselects(&ofdev->dev);
869 return 0; 848 return 0;
870} 849}
871 850
872static const struct of_device_id of_mpc83xx_spi_match[] = { 851static const struct of_device_id of_mpc8xxx_spi_match[] = {
873 { .compatible = "fsl,spi" }, 852 { .compatible = "fsl,spi" },
874 {}, 853 {},
875}; 854};
876MODULE_DEVICE_TABLE(of, of_mpc83xx_spi_match); 855MODULE_DEVICE_TABLE(of, of_mpc8xxx_spi_match);
877 856
878static struct of_platform_driver of_mpc83xx_spi_driver = { 857static struct of_platform_driver of_mpc8xxx_spi_driver = {
879 .name = "mpc83xx_spi", 858 .name = "mpc8xxx_spi",
880 .match_table = of_mpc83xx_spi_match, 859 .match_table = of_mpc8xxx_spi_match,
881 .probe = of_mpc83xx_spi_probe, 860 .probe = of_mpc8xxx_spi_probe,
882 .remove = __devexit_p(of_mpc83xx_spi_remove), 861 .remove = __devexit_p(of_mpc8xxx_spi_remove),
883}; 862};
884 863
885#ifdef CONFIG_MPC832x_RDB 864#ifdef CONFIG_MPC832x_RDB
@@ -890,7 +869,7 @@ static struct of_platform_driver of_mpc83xx_spi_driver = {
890 * tree can work with OpenFirmware driver. But for now we support old trees 869 * tree can work with OpenFirmware driver. But for now we support old trees
891 * as well. 870 * as well.
892 */ 871 */
893static int __devinit plat_mpc83xx_spi_probe(struct platform_device *pdev) 872static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev)
894{ 873{
895 struct resource *mem; 874 struct resource *mem;
896 unsigned int irq; 875 unsigned int irq;
@@ -907,23 +886,23 @@ static int __devinit plat_mpc83xx_spi_probe(struct platform_device *pdev)
907 if (!irq) 886 if (!irq)
908 return -EINVAL; 887 return -EINVAL;
909 888
910 master = mpc83xx_spi_probe(&pdev->dev, mem, irq); 889 master = mpc8xxx_spi_probe(&pdev->dev, mem, irq);
911 if (IS_ERR(master)) 890 if (IS_ERR(master))
912 return PTR_ERR(master); 891 return PTR_ERR(master);
913 return 0; 892 return 0;
914} 893}
915 894
916static int __devexit plat_mpc83xx_spi_remove(struct platform_device *pdev) 895static int __devexit plat_mpc8xxx_spi_remove(struct platform_device *pdev)
917{ 896{
918 return mpc83xx_spi_remove(&pdev->dev); 897 return mpc8xxx_spi_remove(&pdev->dev);
919} 898}
920 899
921MODULE_ALIAS("platform:mpc83xx_spi"); 900MODULE_ALIAS("platform:mpc8xxx_spi");
922static struct platform_driver mpc83xx_spi_driver = { 901static struct platform_driver mpc8xxx_spi_driver = {
923 .probe = plat_mpc83xx_spi_probe, 902 .probe = plat_mpc8xxx_spi_probe,
924 .remove = __exit_p(plat_mpc83xx_spi_remove), 903 .remove = __exit_p(plat_mpc8xxx_spi_remove),
925 .driver = { 904 .driver = {
926 .name = "mpc83xx_spi", 905 .name = "mpc8xxx_spi",
927 .owner = THIS_MODULE, 906 .owner = THIS_MODULE,
928 }, 907 },
929}; 908};
@@ -932,35 +911,35 @@ static bool legacy_driver_failed;
932 911
933static void __init legacy_driver_register(void) 912static void __init legacy_driver_register(void)
934{ 913{
935 legacy_driver_failed = platform_driver_register(&mpc83xx_spi_driver); 914 legacy_driver_failed = platform_driver_register(&mpc8xxx_spi_driver);
936} 915}
937 916
938static void __exit legacy_driver_unregister(void) 917static void __exit legacy_driver_unregister(void)
939{ 918{
940 if (legacy_driver_failed) 919 if (legacy_driver_failed)
941 return; 920 return;
942 platform_driver_unregister(&mpc83xx_spi_driver); 921 platform_driver_unregister(&mpc8xxx_spi_driver);
943} 922}
944#else 923#else
945static void __init legacy_driver_register(void) {} 924static void __init legacy_driver_register(void) {}
946static void __exit legacy_driver_unregister(void) {} 925static void __exit legacy_driver_unregister(void) {}
947#endif /* CONFIG_MPC832x_RDB */ 926#endif /* CONFIG_MPC832x_RDB */
948 927
949static int __init mpc83xx_spi_init(void) 928static int __init mpc8xxx_spi_init(void)
950{ 929{
951 legacy_driver_register(); 930 legacy_driver_register();
952 return of_register_platform_driver(&of_mpc83xx_spi_driver); 931 return of_register_platform_driver(&of_mpc8xxx_spi_driver);
953} 932}
954 933
955static void __exit mpc83xx_spi_exit(void) 934static void __exit mpc8xxx_spi_exit(void)
956{ 935{
957 of_unregister_platform_driver(&of_mpc83xx_spi_driver); 936 of_unregister_platform_driver(&of_mpc8xxx_spi_driver);
958 legacy_driver_unregister(); 937 legacy_driver_unregister();
959} 938}
960 939
961module_init(mpc83xx_spi_init); 940module_init(mpc8xxx_spi_init);
962module_exit(mpc83xx_spi_exit); 941module_exit(mpc8xxx_spi_exit);
963 942
964MODULE_AUTHOR("Kumar Gala"); 943MODULE_AUTHOR("Kumar Gala");
965MODULE_DESCRIPTION("Simple MPC83xx SPI Driver"); 944MODULE_DESCRIPTION("Simple MPC8xxx SPI Driver");
966MODULE_LICENSE("GPL"); 945MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index b3ebc1d0f85f..e0d44af4745a 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -146,32 +146,16 @@ static int s3c24xx_spi_setupxfer(struct spi_device *spi,
146 return 0; 146 return 0;
147} 147}
148 148
149/* the spi->mode bits understood by this driver: */
150#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
151
152static int s3c24xx_spi_setup(struct spi_device *spi) 149static int s3c24xx_spi_setup(struct spi_device *spi)
153{ 150{
154 int ret; 151 int ret;
155 152
156 if (!spi->bits_per_word)
157 spi->bits_per_word = 8;
158
159 if (spi->mode & ~MODEBITS) {
160 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
161 spi->mode & ~MODEBITS);
162 return -EINVAL;
163 }
164
165 ret = s3c24xx_spi_setupxfer(spi, NULL); 153 ret = s3c24xx_spi_setupxfer(spi, NULL);
166 if (ret < 0) { 154 if (ret < 0) {
167 dev_err(&spi->dev, "setupxfer returned %d\n", ret); 155 dev_err(&spi->dev, "setupxfer returned %d\n", ret);
168 return ret; 156 return ret;
169 } 157 }
170 158
171 dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n",
172 __func__, spi->mode, spi->bits_per_word,
173 spi->max_speed_hz);
174
175 return 0; 159 return 0;
176} 160}
177 161
@@ -290,6 +274,9 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev)
290 274
291 /* setup the master state. */ 275 /* setup the master state. */
292 276
277 /* the spi->mode bits understood by this driver: */
278 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
279
293 master->num_chipselect = hw->pdata->num_cs; 280 master->num_chipselect = hw->pdata->num_cs;
294 master->bus_num = pdata->bus_num; 281 master->bus_num = pdata->bus_num;
295 282
diff --git a/drivers/spi/spi_txx9.c b/drivers/spi/spi_txx9.c
index 29cbb065618a..96057de133ad 100644
--- a/drivers/spi/spi_txx9.c
+++ b/drivers/spi/spi_txx9.c
@@ -110,23 +110,17 @@ static void txx9spi_cs_func(struct spi_device *spi, struct txx9spi *c,
110 ndelay(cs_delay); /* CS Setup Time / CS Recovery Time */ 110 ndelay(cs_delay); /* CS Setup Time / CS Recovery Time */
111} 111}
112 112
113/* the spi->mode bits understood by this driver: */
114#define MODEBITS (SPI_CS_HIGH|SPI_CPOL|SPI_CPHA)
115
116static int txx9spi_setup(struct spi_device *spi) 113static int txx9spi_setup(struct spi_device *spi)
117{ 114{
118 struct txx9spi *c = spi_master_get_devdata(spi->master); 115 struct txx9spi *c = spi_master_get_devdata(spi->master);
119 u8 bits_per_word; 116 u8 bits_per_word;
120 117
121 if (spi->mode & ~MODEBITS)
122 return -EINVAL;
123
124 if (!spi->max_speed_hz 118 if (!spi->max_speed_hz
125 || spi->max_speed_hz > c->max_speed_hz 119 || spi->max_speed_hz > c->max_speed_hz
126 || spi->max_speed_hz < c->min_speed_hz) 120 || spi->max_speed_hz < c->min_speed_hz)
127 return -EINVAL; 121 return -EINVAL;
128 122
129 bits_per_word = spi->bits_per_word ? : 8; 123 bits_per_word = spi->bits_per_word;
130 if (bits_per_word != 8 && bits_per_word != 16) 124 if (bits_per_word != 8 && bits_per_word != 16)
131 return -EINVAL; 125 return -EINVAL;
132 126
@@ -414,6 +408,9 @@ static int __init txx9spi_probe(struct platform_device *dev)
414 (unsigned long long)res->start, irq, 408 (unsigned long long)res->start, irq,
415 (c->baseclk + 500000) / 1000000); 409 (c->baseclk + 500000) / 1000000);
416 410
411 /* the spi->mode bits understood by this driver: */
412 master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA;
413
417 master->bus_num = dev->id; 414 master->bus_num = dev->id;
418 master->setup = txx9spi_setup; 415 master->setup = txx9spi_setup;
419 master->transfer = txx9spi_transfer; 416 master->transfer = txx9spi_transfer;
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
index 494d3f756e29..46b8c5c2f45e 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/xilinx_spi.c
@@ -158,9 +158,6 @@ static int xilinx_spi_setup_transfer(struct spi_device *spi,
158 return 0; 158 return 0;
159} 159}
160 160
161/* the spi->mode bits understood by this driver: */
162#define MODEBITS (SPI_CPOL | SPI_CPHA)
163
164static int xilinx_spi_setup(struct spi_device *spi) 161static int xilinx_spi_setup(struct spi_device *spi)
165{ 162{
166 struct spi_bitbang *bitbang; 163 struct spi_bitbang *bitbang;
@@ -170,22 +167,10 @@ static int xilinx_spi_setup(struct spi_device *spi)
170 xspi = spi_master_get_devdata(spi->master); 167 xspi = spi_master_get_devdata(spi->master);
171 bitbang = &xspi->bitbang; 168 bitbang = &xspi->bitbang;
172 169
173 if (!spi->bits_per_word)
174 spi->bits_per_word = 8;
175
176 if (spi->mode & ~MODEBITS) {
177 dev_err(&spi->dev, "%s, unsupported mode bits %x\n",
178 __func__, spi->mode & ~MODEBITS);
179 return -EINVAL;
180 }
181
182 retval = xilinx_spi_setup_transfer(spi, NULL); 170 retval = xilinx_spi_setup_transfer(spi, NULL);
183 if (retval < 0) 171 if (retval < 0)
184 return retval; 172 return retval;
185 173
186 dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n",
187 __func__, spi->mode & MODEBITS, spi->bits_per_word, 0);
188
189 return 0; 174 return 0;
190} 175}
191 176
@@ -333,6 +318,9 @@ static int __init xilinx_spi_of_probe(struct of_device *ofdev,
333 goto put_master; 318 goto put_master;
334 } 319 }
335 320
321 /* the spi->mode bits understood by this driver: */
322 master->mode_bits = SPI_CPOL | SPI_CPHA;
323
336 xspi = spi_master_get_devdata(master); 324 xspi = spi_master_get_devdata(master);
337 xspi->bitbang.master = spi_master_get(master); 325 xspi->bitbang.master = spi_master_get(master);
338 xspi->bitbang.chipselect = xilinx_spi_chipselect; 326 xspi->bitbang.chipselect = xilinx_spi_chipselect;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 932ffdbf86d9..d6d65ef85f54 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1122,12 +1122,14 @@ config FB_INTEL
1122 select FB_CFB_FILLRECT 1122 select FB_CFB_FILLRECT
1123 select FB_CFB_COPYAREA 1123 select FB_CFB_COPYAREA
1124 select FB_CFB_IMAGEBLIT 1124 select FB_CFB_IMAGEBLIT
1125 select FB_BOOT_VESA_SUPPORT 1125 select FB_BOOT_VESA_SUPPORT if FB_INTEL = y
1126 help 1126 help
1127 This driver supports the on-board graphics built in to the Intel 1127 This driver supports the on-board graphics built in to the Intel
1128 830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM/965G/965GM chipsets. 1128 830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM/965G/965GM chipsets.
1129 Say Y if you have and plan to use such a board. 1129 Say Y if you have and plan to use such a board.
1130 1130
1131 To make FB_INTELFB=Y work you need to say AGP_INTEL=y too.
1132
1131 To compile this driver as a module, choose M here: the 1133 To compile this driver as a module, choose M here: the
1132 module will be called intelfb. 1134 module will be called intelfb.
1133 1135
@@ -1460,7 +1462,7 @@ config FB_SIS
1460 select FB_CFB_FILLRECT 1462 select FB_CFB_FILLRECT
1461 select FB_CFB_COPYAREA 1463 select FB_CFB_COPYAREA
1462 select FB_CFB_IMAGEBLIT 1464 select FB_CFB_IMAGEBLIT
1463 select FB_BOOT_VESA_SUPPORT 1465 select FB_BOOT_VESA_SUPPORT if FB_SIS = y
1464 help 1466 help
1465 This is the frame buffer device driver for the SiS 300, 315, 330 1467 This is the frame buffer device driver for the SiS 300, 315, 330
1466 and 340 series as well as XGI V3XT, V5, V8, Z7 graphics chipsets. 1468 and 340 series as well as XGI V3XT, V5, V8, Z7 graphics chipsets.
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index a411702413d6..6f8866d6a905 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -74,6 +74,9 @@ static int __init w1_gpio_probe(struct platform_device *pdev)
74 if (err) 74 if (err)
75 goto free_gpio; 75 goto free_gpio;
76 76
77 if (pdata->enable_external_pullup)
78 pdata->enable_external_pullup(1);
79
77 platform_set_drvdata(pdev, master); 80 platform_set_drvdata(pdev, master);
78 81
79 return 0; 82 return 0;
@@ -91,6 +94,9 @@ static int __exit w1_gpio_remove(struct platform_device *pdev)
91 struct w1_bus_master *master = platform_get_drvdata(pdev); 94 struct w1_bus_master *master = platform_get_drvdata(pdev);
92 struct w1_gpio_platform_data *pdata = pdev->dev.platform_data; 95 struct w1_gpio_platform_data *pdata = pdev->dev.platform_data;
93 96
97 if (pdata->enable_external_pullup)
98 pdata->enable_external_pullup(0);
99
94 w1_remove_master_device(master); 100 w1_remove_master_device(master);
95 gpio_free(pdata->pin); 101 gpio_free(pdata->pin);
96 kfree(master); 102 kfree(master);
@@ -98,12 +104,41 @@ static int __exit w1_gpio_remove(struct platform_device *pdev)
98 return 0; 104 return 0;
99} 105}
100 106
107#ifdef CONFIG_PM
108
109static int w1_gpio_suspend(struct platform_device *pdev, pm_message_t state)
110{
111 struct w1_gpio_platform_data *pdata = pdev->dev.platform_data;
112
113 if (pdata->enable_external_pullup)
114 pdata->enable_external_pullup(0);
115
116 return 0;
117}
118
119static int w1_gpio_resume(struct platform_device *pdev)
120{
121 struct w1_gpio_platform_data *pdata = pdev->dev.platform_data;
122
123 if (pdata->enable_external_pullup)
124 pdata->enable_external_pullup(1);
125
126 return 0;
127}
128
129#else
130#define w1_gpio_suspend NULL
131#define w1_gpio_resume NULL
132#endif
133
101static struct platform_driver w1_gpio_driver = { 134static struct platform_driver w1_gpio_driver = {
102 .driver = { 135 .driver = {
103 .name = "w1-gpio", 136 .name = "w1-gpio",
104 .owner = THIS_MODULE, 137 .owner = THIS_MODULE,
105 }, 138 },
106 .remove = __exit_p(w1_gpio_remove), 139 .remove = __exit_p(w1_gpio_remove),
140 .suspend = w1_gpio_suspend,
141 .resume = w1_gpio_resume,
107}; 142};
108 143
109static int __init w1_gpio_init(void) 144static int __init w1_gpio_init(void)
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index 90f98df5f106..f90afdb1b255 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -322,7 +322,8 @@ static int wdt_notify_sys(struct notifier_block *this,
322 * watchdog on reboot with no heartbeat 322 * watchdog on reboot with no heartbeat
323 */ 323 */
324 wdt_change(WDT_ENABLE); 324 wdt_change(WDT_ENABLE);
325 printk(KERN_INFO PFX "Watchdog timer is now enabled with no heartbeat - should reboot in ~1 second.\n"); 325 printk(KERN_INFO PFX "Watchdog timer is now enabled "
326 "with no heartbeat - should reboot in ~1 second.\n");
326 } 327 }
327 return NOTIFY_DONE; 328 return NOTIFY_DONE;
328} 329}
@@ -374,12 +375,17 @@ static int __init alim7101_wdt_init(void)
374 pci_dev_put(ali1543_south); 375 pci_dev_put(ali1543_south);
375 if ((tmp & 0x1e) == 0x00) { 376 if ((tmp & 0x1e) == 0x00) {
376 if (!use_gpio) { 377 if (!use_gpio) {
377 printk(KERN_INFO PFX "Detected old alim7101 revision 'a1d'. If this is a cobalt board, set the 'use_gpio' module parameter.\n"); 378 printk(KERN_INFO PFX
379 "Detected old alim7101 revision 'a1d'. "
380 "If this is a cobalt board, set the 'use_gpio' "
381 "module parameter.\n");
378 goto err_out; 382 goto err_out;
379 } 383 }
380 nowayout = 1; 384 nowayout = 1;
381 } else if ((tmp & 0x1e) != 0x12 && (tmp & 0x1e) != 0x00) { 385 } else if ((tmp & 0x1e) != 0x12 && (tmp & 0x1e) != 0x00) {
382 printk(KERN_INFO PFX "ALi 1543 South-Bridge does not have the correct revision number (???1001?) - WDT not set\n"); 386 printk(KERN_INFO PFX
387 "ALi 1543 South-Bridge does not have the correct "
388 "revision number (???1001?) - WDT not set\n");
383 goto err_out; 389 goto err_out;
384 } 390 }
385 391
@@ -409,7 +415,8 @@ static int __init alim7101_wdt_init(void)
409 if (nowayout) 415 if (nowayout)
410 __module_get(THIS_MODULE); 416 __module_get(THIS_MODULE);
411 417
412 printk(KERN_INFO PFX "WDT driver for ALi M7101 initialised. timeout=%d sec (nowayout=%d)\n", 418 printk(KERN_INFO PFX "WDT driver for ALi M7101 initialised. "
419 "timeout=%d sec (nowayout=%d)\n",
413 timeout, nowayout); 420 timeout, nowayout);
414 return 0; 421 return 0;
415 422
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index 55dcbfe2bb72..3fe9742c23ca 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -246,7 +246,8 @@ static long ar7_wdt_ioctl(struct file *file,
246 static struct watchdog_info ident = { 246 static struct watchdog_info ident = {
247 .identity = LONGNAME, 247 .identity = LONGNAME,
248 .firmware_version = 1, 248 .firmware_version = 1,
249 .options = (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING), 249 .options = (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
250 WDIOF_MAGICCLOSE),
250 }; 251 };
251 int new_margin; 252 int new_margin;
252 253
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index 29e52c237a3b..b185dafe1494 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -268,7 +268,8 @@ static int __init at91_wdt_init(void)
268 if not reset to the default */ 268 if not reset to the default */
269 if (at91_wdt_settimeout(wdt_time)) { 269 if (at91_wdt_settimeout(wdt_time)) {
270 at91_wdt_settimeout(WDT_DEFAULT_TIME); 270 at91_wdt_settimeout(WDT_DEFAULT_TIME);
271 pr_info("at91_wdt: wdt_time value must be 1 <= wdt_time <= 256, using %d\n", wdt_time); 271 pr_info("at91_wdt: wdt_time value must be 1 <= wdt_time <= 256"
272 ", using %d\n", wdt_time);
272 } 273 }
273 274
274 return platform_driver_register(&at91wdt_driver); 275 return platform_driver_register(&at91wdt_driver);
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index 435b0573fb0a..eac26021e8da 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -156,7 +156,8 @@ static int at91_wdt_settimeout(unsigned int timeout)
156 156
157static const struct watchdog_info at91_wdt_info = { 157static const struct watchdog_info at91_wdt_info = {
158 .identity = DRV_NAME, 158 .identity = DRV_NAME,
159 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, 159 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
160 WDIOF_MAGICCLOSE,
160}; 161};
161 162
162/* 163/*
diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c
index 067a57cb3f82..c7b3f9df2317 100644
--- a/drivers/watchdog/bfin_wdt.c
+++ b/drivers/watchdog/bfin_wdt.c
@@ -27,10 +27,15 @@
27#include <linux/uaccess.h> 27#include <linux/uaccess.h>
28#include <asm/blackfin.h> 28#include <asm/blackfin.h>
29 29
30#define stamp(fmt, args...) pr_debug("%s:%i: " fmt "\n", __func__, __LINE__, ## args) 30#define stamp(fmt, args...) \
31 pr_debug("%s:%i: " fmt "\n", __func__, __LINE__, ## args)
31#define stampit() stamp("here i am") 32#define stampit() stamp("here i am")
32#define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); }) 33#define pr_devinit(fmt, args...) \
33#define pr_init(fmt, args...) ({ static const __initconst char __fmt[] = fmt; printk(__fmt, ## args); }) 34 ({ static const __devinitconst char __fmt[] = fmt; \
35 printk(__fmt, ## args); })
36#define pr_init(fmt, args...) \
37 ({ static const __initconst char __fmt[] = fmt; \
38 printk(__fmt, ## args); })
34 39
35#define WATCHDOG_NAME "bfin-wdt" 40#define WATCHDOG_NAME "bfin-wdt"
36#define PFX WATCHDOG_NAME ": " 41#define PFX WATCHDOG_NAME ": "
@@ -476,7 +481,8 @@ static int __init bfin_wdt_init(void)
476 return ret; 481 return ret;
477 } 482 }
478 483
479 bfin_wdt_device = platform_device_register_simple(WATCHDOG_NAME, -1, NULL, 0); 484 bfin_wdt_device = platform_device_register_simple(WATCHDOG_NAME,
485 -1, NULL, 0);
480 if (IS_ERR(bfin_wdt_device)) { 486 if (IS_ERR(bfin_wdt_device)) {
481 pr_init(KERN_ERR PFX "unable to register device\n"); 487 pr_init(KERN_ERR PFX "unable to register device\n");
482 platform_driver_unregister(&bfin_wdt_driver); 488 platform_driver_unregister(&bfin_wdt_driver);
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index 41070e4771a0..081f2955419e 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -154,9 +154,9 @@ static struct cpwd *cpwd_device;
154 154
155static struct timer_list cpwd_timer; 155static struct timer_list cpwd_timer;
156 156
157static int wd0_timeout = 0; 157static int wd0_timeout;
158static int wd1_timeout = 0; 158static int wd1_timeout;
159static int wd2_timeout = 0; 159static int wd2_timeout;
160 160
161module_param(wd0_timeout, int, 0); 161module_param(wd0_timeout, int, 0);
162MODULE_PARM_DESC(wd0_timeout, "Default watchdog0 timeout in 1/10secs"); 162MODULE_PARM_DESC(wd0_timeout, "Default watchdog0 timeout in 1/10secs");
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index c51d0b0ea0c4..83e22e7ea4a2 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -193,7 +193,7 @@ static struct miscdevice davinci_wdt_miscdev = {
193 .fops = &davinci_wdt_fops, 193 .fops = &davinci_wdt_fops,
194}; 194};
195 195
196static int davinci_wdt_probe(struct platform_device *pdev) 196static int __devinit davinci_wdt_probe(struct platform_device *pdev)
197{ 197{
198 int ret = 0, size; 198 int ret = 0, size;
199 struct resource *res; 199 struct resource *res;
@@ -237,7 +237,7 @@ static int davinci_wdt_probe(struct platform_device *pdev)
237 return ret; 237 return ret;
238} 238}
239 239
240static int davinci_wdt_remove(struct platform_device *pdev) 240static int __devexit davinci_wdt_remove(struct platform_device *pdev)
241{ 241{
242 misc_deregister(&davinci_wdt_miscdev); 242 misc_deregister(&davinci_wdt_miscdev);
243 if (wdt_mem) { 243 if (wdt_mem) {
@@ -254,7 +254,7 @@ static struct platform_driver platform_wdt_driver = {
254 .owner = THIS_MODULE, 254 .owner = THIS_MODULE,
255 }, 255 },
256 .probe = davinci_wdt_probe, 256 .probe = davinci_wdt_probe,
257 .remove = davinci_wdt_remove, 257 .remove = __devexit_p(davinci_wdt_remove),
258}; 258};
259 259
260static int __init davinci_wdt_init(void) 260static int __init davinci_wdt_init(void)
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 3137361ccbfe..c0b9169ba5d5 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -19,6 +19,7 @@
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/irq.h> 21#include <linux/irq.h>
22#include <linux/nmi.h>
22#include <linux/kernel.h> 23#include <linux/kernel.h>
23#include <linux/miscdevice.h> 24#include <linux/miscdevice.h>
24#include <linux/mm.h> 25#include <linux/mm.h>
@@ -47,7 +48,7 @@
47#define PCI_BIOS32_PARAGRAPH_LEN 16 48#define PCI_BIOS32_PARAGRAPH_LEN 16
48#define PCI_ROM_BASE1 0x000F0000 49#define PCI_ROM_BASE1 0x000F0000
49#define ROM_SIZE 0x10000 50#define ROM_SIZE 0x10000
50#define HPWDT_VERSION "1.01" 51#define HPWDT_VERSION "1.1.1"
51 52
52struct bios32_service_dir { 53struct bios32_service_dir {
53 u32 signature; 54 u32 signature;
@@ -119,6 +120,7 @@ static int nowayout = WATCHDOG_NOWAYOUT;
119static char expect_release; 120static char expect_release;
120static unsigned long hpwdt_is_open; 121static unsigned long hpwdt_is_open;
121static unsigned int allow_kdump; 122static unsigned int allow_kdump;
123static int hpwdt_nmi_sourcing;
122 124
123static void __iomem *pci_mem_addr; /* the PCI-memory address */ 125static void __iomem *pci_mem_addr; /* the PCI-memory address */
124static unsigned long __iomem *hpwdt_timer_reg; 126static unsigned long __iomem *hpwdt_timer_reg;
@@ -468,21 +470,22 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
468 if (ulReason != DIE_NMI && ulReason != DIE_NMI_IPI) 470 if (ulReason != DIE_NMI && ulReason != DIE_NMI_IPI)
469 return NOTIFY_OK; 471 return NOTIFY_OK;
470 472
471 spin_lock_irqsave(&rom_lock, rom_pl); 473 if (hpwdt_nmi_sourcing) {
472 if (!die_nmi_called) 474 spin_lock_irqsave(&rom_lock, rom_pl);
473 asminline_call(&cmn_regs, cru_rom_addr); 475 if (!die_nmi_called)
474 die_nmi_called = 1; 476 asminline_call(&cmn_regs, cru_rom_addr);
475 spin_unlock_irqrestore(&rom_lock, rom_pl); 477 die_nmi_called = 1;
476 if (cmn_regs.u1.ral == 0) { 478 spin_unlock_irqrestore(&rom_lock, rom_pl);
477 printk(KERN_WARNING "hpwdt: An NMI occurred, " 479 if (cmn_regs.u1.ral == 0) {
478 "but unable to determine source.\n"); 480 printk(KERN_WARNING "hpwdt: An NMI occurred, "
479 } else { 481 "but unable to determine source.\n");
480 if (allow_kdump) 482 } else {
481 hpwdt_stop(); 483 if (allow_kdump)
482 panic("An NMI occurred, please see the Integrated " 484 hpwdt_stop();
483 "Management Log for details.\n"); 485 panic("An NMI occurred, please see the Integrated "
486 "Management Log for details.\n");
487 }
484 } 488 }
485
486 return NOTIFY_OK; 489 return NOTIFY_OK;
487} 490}
488 491
@@ -627,12 +630,38 @@ static struct notifier_block die_notifier = {
627 * Init & Exit 630 * Init & Exit
628 */ 631 */
629 632
633#ifdef ARCH_HAS_NMI_WATCHDOG
634static void __devinit hpwdt_check_nmi_sourcing(struct pci_dev *dev)
635{
636 /*
637 * If nmi_watchdog is turned off then we can turn on
638 * our nmi sourcing capability.
639 */
640 if (!nmi_watchdog_active())
641 hpwdt_nmi_sourcing = 1;
642 else
643 dev_warn(&dev->dev, "NMI sourcing is disabled. To enable this "
644 "functionality you must reboot with nmi_watchdog=0.\n");
645}
646#else
647static void __devinit hpwdt_check_nmi_sourcing(struct pci_dev *dev)
648{
649 dev_warn(&dev->dev, "NMI sourcing is disabled. "
650 "Your kernel does not support a NMI Watchdog.\n");
651}
652#endif
653
630static int __devinit hpwdt_init_one(struct pci_dev *dev, 654static int __devinit hpwdt_init_one(struct pci_dev *dev,
631 const struct pci_device_id *ent) 655 const struct pci_device_id *ent)
632{ 656{
633 int retval; 657 int retval;
634 658
635 /* 659 /*
660 * Check if we can do NMI sourcing or not
661 */
662 hpwdt_check_nmi_sourcing(dev);
663
664 /*
636 * First let's find out if we are on an iLO2 server. We will 665 * First let's find out if we are on an iLO2 server. We will
637 * not run on a legacy ASM box. 666 * not run on a legacy ASM box.
638 * So we only support the G5 ProLiant servers and higher. 667 * So we only support the G5 ProLiant servers and higher.
diff --git a/drivers/watchdog/iTCO_vendor_support.c b/drivers/watchdog/iTCO_vendor_support.c
index d3c0f6de5523..5133bca5ccbe 100644
--- a/drivers/watchdog/iTCO_vendor_support.c
+++ b/drivers/watchdog/iTCO_vendor_support.c
@@ -19,7 +19,7 @@
19 19
20/* Module and version information */ 20/* Module and version information */
21#define DRV_NAME "iTCO_vendor_support" 21#define DRV_NAME "iTCO_vendor_support"
22#define DRV_VERSION "1.03" 22#define DRV_VERSION "1.04"
23#define PFX DRV_NAME ": " 23#define PFX DRV_NAME ": "
24 24
25/* Includes */ 25/* Includes */
@@ -35,20 +35,23 @@
35#include "iTCO_vendor.h" 35#include "iTCO_vendor.h"
36 36
37/* iTCO defines */ 37/* iTCO defines */
38#define SMI_EN acpibase + 0x30 /* SMI Control and Enable Register */ 38#define SMI_EN (acpibase + 0x30) /* SMI Control and Enable Register */
39#define TCOBASE acpibase + 0x60 /* TCO base address */ 39#define TCOBASE (acpibase + 0x60) /* TCO base address */
40#define TCO1_STS TCOBASE + 0x04 /* TCO1 Status Register */ 40#define TCO1_STS (TCOBASE + 0x04) /* TCO1 Status Register */
41 41
42/* List of vendor support modes */ 42/* List of vendor support modes */
43/* SuperMicro Pentium 3 Era 370SSE+-OEM1/P3TSSE */ 43/* SuperMicro Pentium 3 Era 370SSE+-OEM1/P3TSSE */
44#define SUPERMICRO_OLD_BOARD 1 44#define SUPERMICRO_OLD_BOARD 1
45/* SuperMicro Pentium 4 / Xeon 4 / EMT64T Era Systems */ 45/* SuperMicro Pentium 4 / Xeon 4 / EMT64T Era Systems */
46#define SUPERMICRO_NEW_BOARD 2 46#define SUPERMICRO_NEW_BOARD 2
47/* Broken BIOS */
48#define BROKEN_BIOS 911
47 49
48static int vendorsupport; 50static int vendorsupport;
49module_param(vendorsupport, int, 0); 51module_param(vendorsupport, int, 0);
50MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default=" 52MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default="
51 "0 (none), 1=SuperMicro Pent3, 2=SuperMicro Pent4+"); 53 "0 (none), 1=SuperMicro Pent3, 2=SuperMicro Pent4+, "
54 "911=Broken SMI BIOS");
52 55
53/* 56/*
54 * Vendor Specific Support 57 * Vendor Specific Support
@@ -243,25 +246,92 @@ static void supermicro_new_pre_set_heartbeat(unsigned int heartbeat)
243} 246}
244 247
245/* 248/*
249 * Vendor Support: 911
250 * Board: Some Intel ICHx based motherboards
251 * iTCO chipset: ICH7+
252 *
253 * Some Intel motherboards have a broken BIOS implementation: i.e.
254 * the SMI handler clear's the TIMEOUT bit in the TC01_STS register
255 * and does not reload the time. Thus the TCO watchdog does not reboot
256 * the system.
257 *
258 * These are the conclusions of Andriy Gapon <avg@icyb.net.ua> after
259 * debugging: the SMI handler is quite simple - it tests value in
260 * TCO1_CNT against 0x800, i.e. checks TCO_TMR_HLT. If the bit is set
261 * the handler goes into an infinite loop, apparently to allow the
262 * second timeout and reboot. Otherwise it simply clears TIMEOUT bit
263 * in TCO1_STS and that's it.
264 * So the logic seems to be reversed, because it is hard to see how
265 * TIMEOUT can get set to 1 and SMI generated when TCO_TMR_HLT is set
266 * (other than a transitional effect).
267 *
268 * The only fix found to get the motherboard(s) to reboot is to put
269 * the glb_smi_en bit to 0. This is a dirty hack that bypasses the
270 * broken code by disabling Global SMI.
271 *
272 * WARNING: globally disabling SMI could possibly lead to dramatic
273 * problems, especially on laptops! I.e. various ACPI things where
274 * SMI is used for communication between OS and firmware.
275 *
276 * Don't use this fix if you don't need to!!!
277 */
278
279static void broken_bios_start(unsigned long acpibase)
280{
281 unsigned long val32;
282
283 val32 = inl(SMI_EN);
284 /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI#
285 Bit 0: GBL_SMI_EN -> 0 = No SMI# will be generated by ICH. */
286 val32 &= 0xffffdffe;
287 outl(val32, SMI_EN);
288}
289
290static void broken_bios_stop(unsigned long acpibase)
291{
292 unsigned long val32;
293
294 val32 = inl(SMI_EN);
295 /* Bit 13: TCO_EN -> 1 = Enables TCO logic generating an SMI#
296 Bit 0: GBL_SMI_EN -> 1 = Turn global SMI on again. */
297 val32 |= 0x00002001;
298 outl(val32, SMI_EN);
299}
300
301/*
246 * Generic Support Functions 302 * Generic Support Functions
247 */ 303 */
248 304
249void iTCO_vendor_pre_start(unsigned long acpibase, 305void iTCO_vendor_pre_start(unsigned long acpibase,
250 unsigned int heartbeat) 306 unsigned int heartbeat)
251{ 307{
252 if (vendorsupport == SUPERMICRO_OLD_BOARD) 308 switch (vendorsupport) {
309 case SUPERMICRO_OLD_BOARD:
253 supermicro_old_pre_start(acpibase); 310 supermicro_old_pre_start(acpibase);
254 else if (vendorsupport == SUPERMICRO_NEW_BOARD) 311 break;
312 case SUPERMICRO_NEW_BOARD:
255 supermicro_new_pre_start(heartbeat); 313 supermicro_new_pre_start(heartbeat);
314 break;
315 case BROKEN_BIOS:
316 broken_bios_start(acpibase);
317 break;
318 }
256} 319}
257EXPORT_SYMBOL(iTCO_vendor_pre_start); 320EXPORT_SYMBOL(iTCO_vendor_pre_start);
258 321
259void iTCO_vendor_pre_stop(unsigned long acpibase) 322void iTCO_vendor_pre_stop(unsigned long acpibase)
260{ 323{
261 if (vendorsupport == SUPERMICRO_OLD_BOARD) 324 switch (vendorsupport) {
325 case SUPERMICRO_OLD_BOARD:
262 supermicro_old_pre_stop(acpibase); 326 supermicro_old_pre_stop(acpibase);
263 else if (vendorsupport == SUPERMICRO_NEW_BOARD) 327 break;
328 case SUPERMICRO_NEW_BOARD:
264 supermicro_new_pre_stop(); 329 supermicro_new_pre_stop();
330 break;
331 case BROKEN_BIOS:
332 broken_bios_stop(acpibase);
333 break;
334 }
265} 335}
266EXPORT_SYMBOL(iTCO_vendor_pre_stop); 336EXPORT_SYMBOL(iTCO_vendor_pre_stop);
267 337
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 648250b998c4..6a51edde6ea7 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -236,19 +236,19 @@ MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
236 236
237/* Address definitions for the TCO */ 237/* Address definitions for the TCO */
238/* TCO base address */ 238/* TCO base address */
239#define TCOBASE iTCO_wdt_private.ACPIBASE + 0x60 239#define TCOBASE (iTCO_wdt_private.ACPIBASE + 0x60)
240/* SMI Control and Enable Register */ 240/* SMI Control and Enable Register */
241#define SMI_EN iTCO_wdt_private.ACPIBASE + 0x30 241#define SMI_EN (iTCO_wdt_private.ACPIBASE + 0x30)
242 242
243#define TCO_RLD TCOBASE + 0x00 /* TCO Timer Reload and Curr. Value */ 243#define TCO_RLD (TCOBASE + 0x00) /* TCO Timer Reload and Curr. Value */
244#define TCOv1_TMR TCOBASE + 0x01 /* TCOv1 Timer Initial Value */ 244#define TCOv1_TMR (TCOBASE + 0x01) /* TCOv1 Timer Initial Value */
245#define TCO_DAT_IN TCOBASE + 0x02 /* TCO Data In Register */ 245#define TCO_DAT_IN (TCOBASE + 0x02) /* TCO Data In Register */
246#define TCO_DAT_OUT TCOBASE + 0x03 /* TCO Data Out Register */ 246#define TCO_DAT_OUT (TCOBASE + 0x03) /* TCO Data Out Register */
247#define TCO1_STS TCOBASE + 0x04 /* TCO1 Status Register */ 247#define TCO1_STS (TCOBASE + 0x04) /* TCO1 Status Register */
248#define TCO2_STS TCOBASE + 0x06 /* TCO2 Status Register */ 248#define TCO2_STS (TCOBASE + 0x06) /* TCO2 Status Register */
249#define TCO1_CNT TCOBASE + 0x08 /* TCO1 Control Register */ 249#define TCO1_CNT (TCOBASE + 0x08) /* TCO1 Control Register */
250#define TCO2_CNT TCOBASE + 0x0a /* TCO2 Control Register */ 250#define TCO2_CNT (TCOBASE + 0x0a) /* TCO2 Control Register */
251#define TCOv2_TMR TCOBASE + 0x12 /* TCOv2 Timer Initial Value */ 251#define TCOv2_TMR (TCOBASE + 0x12) /* TCOv2 Timer Initial Value */
252 252
253/* internal variables */ 253/* internal variables */
254static unsigned long is_active; 254static unsigned long is_active;
@@ -666,6 +666,11 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
666 GCS = RCBA + ICH6_GCS(0x3410). */ 666 GCS = RCBA + ICH6_GCS(0x3410). */
667 if (iTCO_wdt_private.iTCO_version == 2) { 667 if (iTCO_wdt_private.iTCO_version == 2) {
668 pci_read_config_dword(pdev, 0xf0, &base_address); 668 pci_read_config_dword(pdev, 0xf0, &base_address);
669 if ((base_address & 1) == 0) {
670 printk(KERN_ERR PFX "RCBA is disabled by harddware\n");
671 ret = -ENODEV;
672 goto out;
673 }
669 RCBA = base_address & 0xffffc000; 674 RCBA = base_address & 0xffffc000;
670 iTCO_wdt_private.gcs = ioremap((RCBA + 0x3410), 4); 675 iTCO_wdt_private.gcs = ioremap((RCBA + 0x3410), 4);
671 } 676 }
@@ -675,7 +680,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
675 printk(KERN_ERR PFX "failed to reset NO_REBOOT flag, " 680 printk(KERN_ERR PFX "failed to reset NO_REBOOT flag, "
676 "reboot disabled by hardware\n"); 681 "reboot disabled by hardware\n");
677 ret = -ENODEV; /* Cannot reset NO_REBOOT bit */ 682 ret = -ENODEV; /* Cannot reset NO_REBOOT bit */
678 goto out; 683 goto out_unmap;
679 } 684 }
680 685
681 /* Set the NO_REBOOT bit to prevent later reboots, just for sure */ 686 /* Set the NO_REBOOT bit to prevent later reboots, just for sure */
@@ -686,7 +691,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
686 printk(KERN_ERR PFX 691 printk(KERN_ERR PFX
687 "I/O address 0x%04lx already in use\n", SMI_EN); 692 "I/O address 0x%04lx already in use\n", SMI_EN);
688 ret = -EIO; 693 ret = -EIO;
689 goto out; 694 goto out_unmap;
690 } 695 }
691 /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */ 696 /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
692 val32 = inl(SMI_EN); 697 val32 = inl(SMI_EN);
@@ -742,9 +747,10 @@ unreg_region:
742 release_region(TCOBASE, 0x20); 747 release_region(TCOBASE, 0x20);
743unreg_smi_en: 748unreg_smi_en:
744 release_region(SMI_EN, 4); 749 release_region(SMI_EN, 4);
745out: 750out_unmap:
746 if (iTCO_wdt_private.iTCO_version == 2) 751 if (iTCO_wdt_private.iTCO_version == 2)
747 iounmap(iTCO_wdt_private.gcs); 752 iounmap(iTCO_wdt_private.gcs);
753out:
748 pci_dev_put(iTCO_wdt_private.pdev); 754 pci_dev_put(iTCO_wdt_private.pdev);
749 iTCO_wdt_private.ACPIBASE = 0; 755 iTCO_wdt_private.ACPIBASE = 0;
750 return ret; 756 return ret;
diff --git a/drivers/watchdog/indydog.c b/drivers/watchdog/indydog.c
index 0f761db9a27c..bea8a124a559 100644
--- a/drivers/watchdog/indydog.c
+++ b/drivers/watchdog/indydog.c
@@ -83,7 +83,6 @@ static int indydog_open(struct inode *inode, struct file *file)
83 indydog_start(); 83 indydog_start();
84 indydog_ping(); 84 indydog_ping();
85 85
86 indydog_alive = 1;
87 printk(KERN_INFO "Started watchdog timer.\n"); 86 printk(KERN_INFO "Started watchdog timer.\n");
88 87
89 return nonseekable_open(inode, file); 88 return nonseekable_open(inode, file);
@@ -113,8 +112,7 @@ static long indydog_ioctl(struct file *file, unsigned int cmd,
113{ 112{
114 int options, retval = -EINVAL; 113 int options, retval = -EINVAL;
115 static struct watchdog_info ident = { 114 static struct watchdog_info ident = {
116 .options = WDIOF_KEEPALIVEPING | 115 .options = WDIOF_KEEPALIVEPING,
117 WDIOF_MAGICCLOSE,
118 .firmware_version = 0, 116 .firmware_version = 0,
119 .identity = "Hardware Watchdog for SGI IP22", 117 .identity = "Hardware Watchdog for SGI IP22",
120 }; 118 };
diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c
index 2270ee07c01b..daed48ded7fe 100644
--- a/drivers/watchdog/it8712f_wdt.c
+++ b/drivers/watchdog/it8712f_wdt.c
@@ -239,7 +239,8 @@ static long it8712f_wdt_ioctl(struct file *file, unsigned int cmd,
239 static struct watchdog_info ident = { 239 static struct watchdog_info ident = {
240 .identity = "IT8712F Watchdog", 240 .identity = "IT8712F Watchdog",
241 .firmware_version = 1, 241 .firmware_version = 1,
242 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, 242 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
243 WDIOF_MAGICCLOSE,
243 }; 244 };
244 int value; 245 int value;
245 246
diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c
index ae3832110acb..00b03eb43bf0 100644
--- a/drivers/watchdog/ks8695_wdt.c
+++ b/drivers/watchdog/ks8695_wdt.c
@@ -293,8 +293,8 @@ static int __init ks8695_wdt_init(void)
293 if not reset to the default */ 293 if not reset to the default */
294 if (ks8695_wdt_settimeout(wdt_time)) { 294 if (ks8695_wdt_settimeout(wdt_time)) {
295 ks8695_wdt_settimeout(WDT_DEFAULT_TIME); 295 ks8695_wdt_settimeout(WDT_DEFAULT_TIME);
296 pr_info("ks8695_wdt: wdt_time value must be 1 <= wdt_time <= %i, using %d\n", 296 pr_info("ks8695_wdt: wdt_time value must be 1 <= wdt_time <= %i"
297 wdt_time, WDT_MAX_TIME); 297 ", using %d\n", wdt_time, WDT_MAX_TIME);
298 } 298 }
299 return platform_driver_register(&ks8695wdt_driver); 299 return platform_driver_register(&ks8695wdt_driver);
300} 300}
diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c
index 2dfc27559bf7..b6b3f59ab446 100644
--- a/drivers/watchdog/machzwd.c
+++ b/drivers/watchdog/machzwd.c
@@ -118,7 +118,8 @@ static struct watchdog_info zf_info = {
118 */ 118 */
119static int action; 119static int action;
120module_param(action, int, 0); 120module_param(action, int, 0);
121MODULE_PARM_DESC(action, "after watchdog resets, generate: 0 = RESET(*) 1 = SMI 2 = NMI 3 = SCI"); 121MODULE_PARM_DESC(action, "after watchdog resets, generate: "
122 "0 = RESET(*) 1 = SMI 2 = NMI 3 = SCI");
122 123
123static void zf_ping(unsigned long data); 124static void zf_ping(unsigned long data);
124 125
@@ -142,7 +143,8 @@ static unsigned long next_heartbeat;
142#ifndef ZF_DEBUG 143#ifndef ZF_DEBUG
143# define dprintk(format, args...) 144# define dprintk(format, args...)
144#else 145#else
145# define dprintk(format, args...) printk(KERN_DEBUG PFX ":%s:%d: " format, __func__, __LINE__ , ## args) 146# define dprintk(format, args...) printk(KERN_DEBUG PFX
147 ":%s:%d: " format, __func__, __LINE__ , ## args)
146#endif 148#endif
147 149
148 150
@@ -340,7 +342,8 @@ static int zf_close(struct inode *inode, struct file *file)
340 zf_timer_off(); 342 zf_timer_off();
341 else { 343 else {
342 del_timer(&zf_timer); 344 del_timer(&zf_timer);
343 printk(KERN_ERR PFX ": device file closed unexpectedly. Will not stop the WDT!\n"); 345 printk(KERN_ERR PFX ": device file closed unexpectedly. "
346 "Will not stop the WDT!\n");
344 } 347 }
345 clear_bit(0, &zf_is_open); 348 clear_bit(0, &zf_is_open);
346 zf_expect_close = 0; 349 zf_expect_close = 0;
diff --git a/drivers/watchdog/mpcore_wdt.c b/drivers/watchdog/mpcore_wdt.c
index 1512ab8b175b..83fa34b214b4 100644
--- a/drivers/watchdog/mpcore_wdt.c
+++ b/drivers/watchdog/mpcore_wdt.c
@@ -61,7 +61,9 @@ MODULE_PARM_DESC(nowayout,
61#define ONLY_TESTING 0 61#define ONLY_TESTING 0
62static int mpcore_noboot = ONLY_TESTING; 62static int mpcore_noboot = ONLY_TESTING;
63module_param(mpcore_noboot, int, 0); 63module_param(mpcore_noboot, int, 0);
64MODULE_PARM_DESC(mpcore_noboot, "MPcore watchdog action, set to 1 to ignore reboots, 0 to reboot (default=" __MODULE_STRING(ONLY_TESTING) ")"); 64MODULE_PARM_DESC(mpcore_noboot, "MPcore watchdog action, "
65 "set to 1 to ignore reboots, 0 to reboot (default="
66 __MODULE_STRING(ONLY_TESTING) ")");
65 67
66/* 68/*
67 * This is the interrupt handler. Note that we only use this 69 * This is the interrupt handler. Note that we only use this
@@ -416,7 +418,8 @@ static struct platform_driver mpcore_wdt_driver = {
416 }, 418 },
417}; 419};
418 420
419static char banner[] __initdata = KERN_INFO "MPcore Watchdog Timer: 0.1. mpcore_noboot=%d mpcore_margin=%d sec (nowayout= %d)\n"; 421static char banner[] __initdata = KERN_INFO "MPcore Watchdog Timer: 0.1. "
422 "mpcore_noboot=%d mpcore_margin=%d sec (nowayout= %d)\n";
420 423
421static int __init mpcore_wdt_init(void) 424static int __init mpcore_wdt_init(void)
422{ 425{
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index 539b6f6ba7f1..08e8a6ab74e1 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -206,7 +206,7 @@ static struct miscdevice mtx1_wdt_misc = {
206}; 206};
207 207
208 208
209static int mtx1_wdt_probe(struct platform_device *pdev) 209static int __devinit mtx1_wdt_probe(struct platform_device *pdev)
210{ 210{
211 int ret; 211 int ret;
212 212
@@ -229,7 +229,7 @@ static int mtx1_wdt_probe(struct platform_device *pdev)
229 return 0; 229 return 0;
230} 230}
231 231
232static int mtx1_wdt_remove(struct platform_device *pdev) 232static int __devexit mtx1_wdt_remove(struct platform_device *pdev)
233{ 233{
234 /* FIXME: do we need to lock this test ? */ 234 /* FIXME: do we need to lock this test ? */
235 if (mtx1_wdt_device.queue) { 235 if (mtx1_wdt_device.queue) {
@@ -242,7 +242,7 @@ static int mtx1_wdt_remove(struct platform_device *pdev)
242 242
243static struct platform_driver mtx1_wdt = { 243static struct platform_driver mtx1_wdt = {
244 .probe = mtx1_wdt_probe, 244 .probe = mtx1_wdt_probe,
245 .remove = mtx1_wdt_remove, 245 .remove = __devexit_p(mtx1_wdt_remove),
246 .driver.name = "mtx1-wdt", 246 .driver.name = "mtx1-wdt",
247 .driver.owner = THIS_MODULE, 247 .driver.owner = THIS_MODULE,
248}; 248};
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 64135195f827..f24d04132eda 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -246,7 +246,7 @@ static struct miscdevice pnx4008_wdt_miscdev = {
246 .fops = &pnx4008_wdt_fops, 246 .fops = &pnx4008_wdt_fops,
247}; 247};
248 248
249static int pnx4008_wdt_probe(struct platform_device *pdev) 249static int __devinit pnx4008_wdt_probe(struct platform_device *pdev)
250{ 250{
251 int ret = 0, size; 251 int ret = 0, size;
252 struct resource *res; 252 struct resource *res;
@@ -299,7 +299,7 @@ out:
299 return ret; 299 return ret;
300} 300}
301 301
302static int pnx4008_wdt_remove(struct platform_device *pdev) 302static int __devexit pnx4008_wdt_remove(struct platform_device *pdev)
303{ 303{
304 misc_deregister(&pnx4008_wdt_miscdev); 304 misc_deregister(&pnx4008_wdt_miscdev);
305 if (wdt_clk) { 305 if (wdt_clk) {
@@ -321,7 +321,7 @@ static struct platform_driver platform_wdt_driver = {
321 .owner = THIS_MODULE, 321 .owner = THIS_MODULE,
322 }, 322 },
323 .probe = pnx4008_wdt_probe, 323 .probe = pnx4008_wdt_probe,
324 .remove = pnx4008_wdt_remove, 324 .remove = __devexit_p(pnx4008_wdt_remove),
325}; 325};
326 326
327static int __init pnx4008_wdt_init(void) 327static int __init pnx4008_wdt_init(void)
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index 36e221beedcd..4976bfd1fce6 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -245,7 +245,7 @@ static int __devinit rdc321x_wdt_probe(struct platform_device *pdev)
245 return 0; 245 return 0;
246} 246}
247 247
248static int rdc321x_wdt_remove(struct platform_device *pdev) 248static int __devexit rdc321x_wdt_remove(struct platform_device *pdev)
249{ 249{
250 if (rdc321x_wdt_device.queue) { 250 if (rdc321x_wdt_device.queue) {
251 rdc321x_wdt_device.queue = 0; 251 rdc321x_wdt_device.queue = 0;
@@ -259,7 +259,7 @@ static int rdc321x_wdt_remove(struct platform_device *pdev)
259 259
260static struct platform_driver rdc321x_wdt_driver = { 260static struct platform_driver rdc321x_wdt_driver = {
261 .probe = rdc321x_wdt_probe, 261 .probe = rdc321x_wdt_probe,
262 .remove = rdc321x_wdt_remove, 262 .remove = __devexit_p(rdc321x_wdt_remove),
263 .driver = { 263 .driver = {
264 .owner = THIS_MODULE, 264 .owner = THIS_MODULE,
265 .name = "rdc321x-wdt", 265 .name = "rdc321x-wdt",
diff --git a/drivers/watchdog/rm9k_wdt.c b/drivers/watchdog/rm9k_wdt.c
index cce1982a1b58..2e4442642262 100644
--- a/drivers/watchdog/rm9k_wdt.c
+++ b/drivers/watchdog/rm9k_wdt.c
@@ -345,8 +345,8 @@ static const struct resource *wdt_gpi_get_resource(struct platform_device *pdv,
345 return platform_get_resource_byname(pdv, type, buf); 345 return platform_get_resource_byname(pdv, type, buf);
346} 346}
347 347
348/* No hotplugging on the platform bus - use __init */ 348/* No hotplugging on the platform bus - use __devinit */
349static int __init wdt_gpi_probe(struct platform_device *pdv) 349static int __devinit wdt_gpi_probe(struct platform_device *pdv)
350{ 350{
351 int res; 351 int res;
352 const struct resource 352 const struct resource
@@ -373,7 +373,7 @@ static int __init wdt_gpi_probe(struct platform_device *pdv)
373 return res; 373 return res;
374} 374}
375 375
376static int __exit wdt_gpi_remove(struct platform_device *dev) 376static int __devexit wdt_gpi_remove(struct platform_device *dev)
377{ 377{
378 int res; 378 int res;
379 379
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index e31925ee8346..b57ac6b49147 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -68,15 +68,10 @@ MODULE_PARM_DESC(tmr_atboot,
68 __MODULE_STRING(CONFIG_S3C2410_WATCHDOG_ATBOOT)); 68 __MODULE_STRING(CONFIG_S3C2410_WATCHDOG_ATBOOT));
69MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" 69MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
70 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 70 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
71MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, 0 to reboot (default depends on ONLY_TESTING)"); 71MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, "
72 "0 to reboot (default depends on ONLY_TESTING)");
72MODULE_PARM_DESC(debug, "Watchdog debug, set to >1 for debug, (default 0)"); 73MODULE_PARM_DESC(debug, "Watchdog debug, set to >1 for debug, (default 0)");
73 74
74
75typedef enum close_state {
76 CLOSE_STATE_NOT,
77 CLOSE_STATE_ALLOW = 0x4021
78} close_state_t;
79
80static unsigned long open_lock; 75static unsigned long open_lock;
81static struct device *wdt_dev; /* platform device attached to */ 76static struct device *wdt_dev; /* platform device attached to */
82static struct resource *wdt_mem; 77static struct resource *wdt_mem;
@@ -84,7 +79,7 @@ static struct resource *wdt_irq;
84static struct clk *wdt_clock; 79static struct clk *wdt_clock;
85static void __iomem *wdt_base; 80static void __iomem *wdt_base;
86static unsigned int wdt_count; 81static unsigned int wdt_count;
87static close_state_t allow_close; 82static char expect_close;
88static DEFINE_SPINLOCK(wdt_lock); 83static DEFINE_SPINLOCK(wdt_lock);
89 84
90/* watchdog control routines */ 85/* watchdog control routines */
@@ -211,7 +206,7 @@ static int s3c2410wdt_open(struct inode *inode, struct file *file)
211 if (nowayout) 206 if (nowayout)
212 __module_get(THIS_MODULE); 207 __module_get(THIS_MODULE);
213 208
214 allow_close = CLOSE_STATE_NOT; 209 expect_close = 0;
215 210
216 /* start the timer */ 211 /* start the timer */
217 s3c2410wdt_start(); 212 s3c2410wdt_start();
@@ -225,13 +220,13 @@ static int s3c2410wdt_release(struct inode *inode, struct file *file)
225 * Lock it in if it's a module and we set nowayout 220 * Lock it in if it's a module and we set nowayout
226 */ 221 */
227 222
228 if (allow_close == CLOSE_STATE_ALLOW) 223 if (expect_close == 42)
229 s3c2410wdt_stop(); 224 s3c2410wdt_stop();
230 else { 225 else {
231 dev_err(wdt_dev, "Unexpected close, not stopping watchdog\n"); 226 dev_err(wdt_dev, "Unexpected close, not stopping watchdog\n");
232 s3c2410wdt_keepalive(); 227 s3c2410wdt_keepalive();
233 } 228 }
234 allow_close = CLOSE_STATE_NOT; 229 expect_close = 0;
235 clear_bit(0, &open_lock); 230 clear_bit(0, &open_lock);
236 return 0; 231 return 0;
237} 232}
@@ -247,7 +242,7 @@ static ssize_t s3c2410wdt_write(struct file *file, const char __user *data,
247 size_t i; 242 size_t i;
248 243
249 /* In case it was set long ago */ 244 /* In case it was set long ago */
250 allow_close = CLOSE_STATE_NOT; 245 expect_close = 0;
251 246
252 for (i = 0; i != len; i++) { 247 for (i = 0; i != len; i++) {
253 char c; 248 char c;
@@ -255,7 +250,7 @@ static ssize_t s3c2410wdt_write(struct file *file, const char __user *data,
255 if (get_user(c, data + i)) 250 if (get_user(c, data + i))
256 return -EFAULT; 251 return -EFAULT;
257 if (c == 'V') 252 if (c == 'V')
258 allow_close = CLOSE_STATE_ALLOW; 253 expect_close = 42;
259 } 254 }
260 } 255 }
261 s3c2410wdt_keepalive(); 256 s3c2410wdt_keepalive();
@@ -263,7 +258,7 @@ static ssize_t s3c2410wdt_write(struct file *file, const char __user *data,
263 return len; 258 return len;
264} 259}
265 260
266#define OPTIONS WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE 261#define OPTIONS (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE)
267 262
268static const struct watchdog_info s3c2410_wdt_ident = { 263static const struct watchdog_info s3c2410_wdt_ident = {
269 .options = OPTIONS, 264 .options = OPTIONS,
@@ -331,7 +326,7 @@ static irqreturn_t s3c2410wdt_irq(int irqno, void *param)
331} 326}
332/* device interface */ 327/* device interface */
333 328
334static int s3c2410wdt_probe(struct platform_device *pdev) 329static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
335{ 330{
336 struct resource *res; 331 struct resource *res;
337 struct device *dev; 332 struct device *dev;
@@ -404,7 +399,8 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
404 "tmr_margin value out of range, default %d used\n", 399 "tmr_margin value out of range, default %d used\n",
405 CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME); 400 CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME);
406 else 401 else
407 dev_info(dev, "default timer value is out of range, cannot start\n"); 402 dev_info(dev, "default timer value is out of range, "
403 "cannot start\n");
408 } 404 }
409 405
410 ret = misc_register(&s3c2410wdt_miscdev); 406 ret = misc_register(&s3c2410wdt_miscdev);
@@ -453,7 +449,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
453 return ret; 449 return ret;
454} 450}
455 451
456static int s3c2410wdt_remove(struct platform_device *dev) 452static int __devexit s3c2410wdt_remove(struct platform_device *dev)
457{ 453{
458 release_resource(wdt_mem); 454 release_resource(wdt_mem);
459 kfree(wdt_mem); 455 kfree(wdt_mem);
@@ -515,7 +511,7 @@ static int s3c2410wdt_resume(struct platform_device *dev)
515 511
516static struct platform_driver s3c2410wdt_driver = { 512static struct platform_driver s3c2410wdt_driver = {
517 .probe = s3c2410wdt_probe, 513 .probe = s3c2410wdt_probe,
518 .remove = s3c2410wdt_remove, 514 .remove = __devexit_p(s3c2410wdt_remove),
519 .shutdown = s3c2410wdt_shutdown, 515 .shutdown = s3c2410wdt_shutdown,
520 .suspend = s3c2410wdt_suspend, 516 .suspend = s3c2410wdt_suspend,
521 .resume = s3c2410wdt_resume, 517 .resume = s3c2410wdt_resume,
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index 38f5831c9291..9748eed73196 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -93,7 +93,7 @@ static int expect_close;
93 93
94static const struct watchdog_info ident = { 94static const struct watchdog_info ident = {
95 .options = WDIOF_CARDRESET | WDIOF_SETTIMEOUT | 95 .options = WDIOF_CARDRESET | WDIOF_SETTIMEOUT |
96 WDIOF_KEEPALIVEPING, 96 WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
97 .identity = "SiByte Watchdog", 97 .identity = "SiByte Watchdog",
98}; 98};
99 99
@@ -269,9 +269,10 @@ irqreturn_t sbwdog_interrupt(int irq, void *addr)
269 * if it's the second watchdog timer, it's for those users 269 * if it's the second watchdog timer, it's for those users
270 */ 270 */
271 if (wd_cfg_reg == user_dog) 271 if (wd_cfg_reg == user_dog)
272 printk(KERN_CRIT 272 printk(KERN_CRIT "%s in danger of initiating system reset "
273 "%s in danger of initiating system reset in %ld.%01ld seconds\n", 273 "in %ld.%01ld seconds\n",
274 ident.identity, wd_init / 1000000, (wd_init / 100000) % 10); 274 ident.identity,
275 wd_init / 1000000, (wd_init / 100000) % 10);
275 else 276 else
276 cfg |= 1; 277 cfg |= 1;
277 278
diff --git a/drivers/watchdog/sbc60xxwdt.c b/drivers/watchdog/sbc60xxwdt.c
index d1c390c7155c..626d0e8e56c3 100644
--- a/drivers/watchdog/sbc60xxwdt.c
+++ b/drivers/watchdog/sbc60xxwdt.c
@@ -372,8 +372,9 @@ static int __init sbc60xxwdt_init(void)
372 wdt_miscdev.minor, rc); 372 wdt_miscdev.minor, rc);
373 goto err_out_reboot; 373 goto err_out_reboot;
374 } 374 }
375 printk(KERN_INFO PFX "WDT driver for 60XX single board computer initialised. timeout=%d sec (nowayout=%d)\n", 375 printk(KERN_INFO PFX
376 timeout, nowayout); 376 "WDT driver for 60XX single board computer initialised. "
377 "timeout=%d sec (nowayout=%d)\n", timeout, nowayout);
377 378
378 return 0; 379 return 0;
379 380
diff --git a/drivers/watchdog/sbc8360.c b/drivers/watchdog/sbc8360.c
index b6e6799ec45d..68e2e2d6f73d 100644
--- a/drivers/watchdog/sbc8360.c
+++ b/drivers/watchdog/sbc8360.c
@@ -280,8 +280,8 @@ static int sbc8360_close(struct inode *inode, struct file *file)
280 if (expect_close == 42) 280 if (expect_close == 42)
281 sbc8360_stop(); 281 sbc8360_stop();
282 else 282 else
283 printk(KERN_CRIT PFX 283 printk(KERN_CRIT PFX "SBC8360 device closed unexpectedly. "
284 "SBC8360 device closed unexpectedly. SBC8360 will not stop!\n"); 284 "SBC8360 will not stop!\n");
285 285
286 clear_bit(0, &sbc8360_is_open); 286 clear_bit(0, &sbc8360_is_open);
287 expect_close = 0; 287 expect_close = 0;
diff --git a/drivers/watchdog/sbc_epx_c3.c b/drivers/watchdog/sbc_epx_c3.c
index e467ddcf796a..28f1214457bd 100644
--- a/drivers/watchdog/sbc_epx_c3.c
+++ b/drivers/watchdog/sbc_epx_c3.c
@@ -107,8 +107,7 @@ static long epx_c3_ioctl(struct file *file, unsigned int cmd,
107 int options, retval = -EINVAL; 107 int options, retval = -EINVAL;
108 int __user *argp = (void __user *)arg; 108 int __user *argp = (void __user *)arg;
109 static const struct watchdog_info ident = { 109 static const struct watchdog_info ident = {
110 .options = WDIOF_KEEPALIVEPING | 110 .options = WDIOF_KEEPALIVEPING,
111 WDIOF_MAGICCLOSE,
112 .firmware_version = 0, 111 .firmware_version = 0,
113 .identity = "Winsystems EPX-C3 H/W Watchdog", 112 .identity = "Winsystems EPX-C3 H/W Watchdog",
114 }; 113 };
@@ -174,8 +173,8 @@ static struct notifier_block epx_c3_notifier = {
174 .notifier_call = epx_c3_notify_sys, 173 .notifier_call = epx_c3_notify_sys,
175}; 174};
176 175
177static const char banner[] __initdata = 176static const char banner[] __initdata = KERN_INFO PFX
178 KERN_INFO PFX "Hardware Watchdog Timer for Winsystems EPX-C3 SBC: 0.1\n"; 177 "Hardware Watchdog Timer for Winsystems EPX-C3 SBC: 0.1\n";
179 178
180static int __init watchdog_init(void) 179static int __init watchdog_init(void)
181{ 180{
@@ -219,6 +218,9 @@ module_init(watchdog_init);
219module_exit(watchdog_exit); 218module_exit(watchdog_exit);
220 219
221MODULE_AUTHOR("Calin A. Culianu <calin@ajvar.org>"); 220MODULE_AUTHOR("Calin A. Culianu <calin@ajvar.org>");
222MODULE_DESCRIPTION("Hardware Watchdog Device for Winsystems EPX-C3 SBC. Note that there is no way to probe for this device -- so only use it if you are *sure* you are runnning on this specific SBC system from Winsystems! It writes to IO ports 0x1ee and 0x1ef!"); 221MODULE_DESCRIPTION("Hardware Watchdog Device for Winsystems EPX-C3 SBC. "
222 "Note that there is no way to probe for this device -- "
223 "so only use it if you are *sure* you are runnning on this specific "
224 "SBC system from Winsystems! It writes to IO ports 0x1ee and 0x1ef!");
223MODULE_LICENSE("GPL"); 225MODULE_LICENSE("GPL");
224MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); 226MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/scx200_wdt.c b/drivers/watchdog/scx200_wdt.c
index 9e19a10a5bb9..e67b76c0526c 100644
--- a/drivers/watchdog/scx200_wdt.c
+++ b/drivers/watchdog/scx200_wdt.c
@@ -108,7 +108,9 @@ static int scx200_wdt_open(struct inode *inode, struct file *file)
108static int scx200_wdt_release(struct inode *inode, struct file *file) 108static int scx200_wdt_release(struct inode *inode, struct file *file)
109{ 109{
110 if (expect_close != 42) 110 if (expect_close != 42)
111 printk(KERN_WARNING NAME ": watchdog device closed unexpectedly, will not disable the watchdog timer\n"); 111 printk(KERN_WARNING NAME
112 ": watchdog device closed unexpectedly, "
113 "will not disable the watchdog timer\n");
112 else if (!nowayout) 114 else if (!nowayout)
113 scx200_wdt_disable(); 115 scx200_wdt_disable();
114 expect_close = 0; 116 expect_close = 0;
@@ -163,7 +165,8 @@ static long scx200_wdt_ioctl(struct file *file, unsigned int cmd,
163 static const struct watchdog_info ident = { 165 static const struct watchdog_info ident = {
164 .identity = "NatSemi SCx200 Watchdog", 166 .identity = "NatSemi SCx200 Watchdog",
165 .firmware_version = 1, 167 .firmware_version = 1,
166 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, 168 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
169 WDIOF_MAGICCLOSE,
167 }; 170 };
168 int new_margin; 171 int new_margin;
169 172
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index cdc7138be301..a03f84e5ee1f 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -494,7 +494,9 @@ MODULE_LICENSE("GPL");
494MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); 494MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
495 495
496module_param(clock_division_ratio, int, 0); 496module_param(clock_division_ratio, int, 0);
497MODULE_PARM_DESC(clock_division_ratio, "Clock division ratio. Valid ranges are from 0x5 (1.31ms) to 0x7 (5.25ms). (default=" __MODULE_STRING(clock_division_ratio) ")"); 497MODULE_PARM_DESC(clock_division_ratio,
498 "Clock division ratio. Valid ranges are from 0x5 (1.31ms) "
499 "to 0x7 (5.25ms). (default=" __MODULE_STRING(clock_division_ratio) ")");
498 500
499module_param(heartbeat, int, 0); 501module_param(heartbeat, int, 0);
500MODULE_PARM_DESC(heartbeat, 502MODULE_PARM_DESC(heartbeat,
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c
index ebcc9cea5e99..833f49f43d43 100644
--- a/drivers/watchdog/softdog.c
+++ b/drivers/watchdog/softdog.c
@@ -71,7 +71,9 @@ static int soft_noboot = 0;
71#endif /* ONLY_TESTING */ 71#endif /* ONLY_TESTING */
72 72
73module_param(soft_noboot, int, 0); 73module_param(soft_noboot, int, 0);
74MODULE_PARM_DESC(soft_noboot, "Softdog action, set to 1 to ignore reboots, 0 to reboot (default depends on ONLY_TESTING)"); 74MODULE_PARM_DESC(soft_noboot,
75 "Softdog action, set to 1 to ignore reboots, 0 to reboot "
76 "(default depends on ONLY_TESTING)");
75 77
76/* 78/*
77 * Our timer 79 * Our timer
@@ -264,7 +266,8 @@ static struct notifier_block softdog_notifier = {
264 .notifier_call = softdog_notify_sys, 266 .notifier_call = softdog_notify_sys,
265}; 267};
266 268
267static char banner[] __initdata = KERN_INFO "Software Watchdog Timer: 0.07 initialized. soft_noboot=%d soft_margin=%d sec (nowayout= %d)\n"; 269static char banner[] __initdata = KERN_INFO "Software Watchdog Timer: 0.07 "
270 "initialized. soft_noboot=%d soft_margin=%d sec (nowayout= %d)\n";
268 271
269static int __init watchdog_init(void) 272static int __init watchdog_init(void)
270{ 273{
diff --git a/drivers/watchdog/w83697hf_wdt.c b/drivers/watchdog/w83697hf_wdt.c
index a9c7f352fcbf..af08972de506 100644
--- a/drivers/watchdog/w83697hf_wdt.c
+++ b/drivers/watchdog/w83697hf_wdt.c
@@ -413,7 +413,8 @@ static int __init wdt_init(void)
413 w83697hf_init(); 413 w83697hf_init();
414 if (early_disable) { 414 if (early_disable) {
415 if (wdt_running()) 415 if (wdt_running())
416 printk(KERN_WARNING PFX "Stopping previously enabled watchdog until userland kicks in\n"); 416 printk(KERN_WARNING PFX "Stopping previously enabled "
417 "watchdog until userland kicks in\n");
417 wdt_disable(); 418 wdt_disable();
418 } 419 }
419 420
diff --git a/drivers/watchdog/wdrtas.c b/drivers/watchdog/wdrtas.c
index a38fa4907c92..a4fe7a38d9b0 100644
--- a/drivers/watchdog/wdrtas.c
+++ b/drivers/watchdog/wdrtas.c
@@ -49,12 +49,7 @@ MODULE_LICENSE("GPL");
49MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); 49MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
50MODULE_ALIAS_MISCDEV(TEMP_MINOR); 50MODULE_ALIAS_MISCDEV(TEMP_MINOR);
51 51
52#ifdef CONFIG_WATCHDOG_NOWAYOUT 52static int wdrtas_nowayout = WATCHDOG_NOWAYOUT;
53static int wdrtas_nowayout = 1;
54#else
55static int wdrtas_nowayout = 0;
56#endif
57
58static atomic_t wdrtas_miscdev_open = ATOMIC_INIT(0); 53static atomic_t wdrtas_miscdev_open = ATOMIC_INIT(0);
59static char wdrtas_expect_close; 54static char wdrtas_expect_close;
60 55