aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/topology.c2
-rw-r--r--drivers/char/Kconfig7
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/amiserial.c61
-rw-r--r--drivers/char/applicom.c11
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c15
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c468
-rw-r--r--drivers/char/ppdev.c4
-rw-r--r--drivers/char/ramoops.c162
-rw-r--r--drivers/char/vt.c10
-rw-r--r--drivers/edac/i5000_edac.c20
-rw-r--r--drivers/edac/i5400_edac.c20
-rw-r--r--drivers/edac/i82443bxgx_edac.c22
-rw-r--r--drivers/firewire/core-card.c22
-rw-r--r--drivers/firewire/core-cdev.c8
-rw-r--r--drivers/firewire/core-transaction.c96
-rw-r--r--drivers/firewire/core.h6
-rw-r--r--drivers/firewire/ohci.c188
-rw-r--r--drivers/firewire/ohci.h10
-rw-r--r--drivers/gpio/Kconfig11
-rw-r--r--drivers/gpio/cs5535-gpio.c2
-rw-r--r--drivers/gpio/gpiolib.c49
-rw-r--r--drivers/gpio/it8761e_gpio.c5
-rw-r--r--drivers/gpio/langwell_gpio.c83
-rw-r--r--drivers/gpio/max732x.c368
-rw-r--r--drivers/gpio/pca953x.c2
-rw-r--r--drivers/gpio/pl061.c2
-rw-r--r--drivers/hid/hid-debug.c2
-rw-r--r--drivers/ide/gayle.c147
-rw-r--r--drivers/ieee1394/dv1394.c11
-rw-r--r--drivers/ieee1394/raw1394.c3
-rw-r--r--drivers/ieee1394/video1394.c5
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c2
-rw-r--r--drivers/input/joydev.c10
-rw-r--r--drivers/input/keyboard/amikbd.c97
-rw-r--r--drivers/input/misc/Kconfig10
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/max8925_onkey.c148
-rw-r--r--drivers/input/misc/twl4030-vibra.c2
-rw-r--r--drivers/input/misc/uinput.c4
-rw-r--r--drivers/input/mouse/amimouse.c98
-rw-r--r--drivers/input/touchscreen/ads7846.c4
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c2
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c7
-rw-r--r--drivers/isdn/mISDN/timerdev.c2
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/message/i2o/i2o_config.c11
-rw-r--r--drivers/misc/lkdtm.c20
-rw-r--r--drivers/mmc/core/core.c3
-rw-r--r--drivers/mmc/core/sd_ops.c2
-rw-r--r--drivers/mmc/core/sdio_io.c30
-rw-r--r--drivers/mmc/host/Kconfig20
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/at91_mci.c2
-rw-r--r--drivers/mmc/host/atmel-mci.c64
-rw-r--r--drivers/mmc/host/au1xmmc.c2
-rw-r--r--drivers/mmc/host/bfin_sdh.c2
-rw-r--r--drivers/mmc/host/cb710-mmc.c2
-rw-r--r--drivers/mmc/host/davinci_mmc.c111
-rw-r--r--drivers/mmc/host/imxmmc.c2
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/msm_sdcc.c2
-rw-r--r--drivers/mmc/host/mvsdio.c2
-rw-r--r--drivers/mmc/host/mxcmmc.c2
-rw-r--r--drivers/mmc/host/omap.c64
-rw-r--r--drivers/mmc/host/omap_hsmmc.c279
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/s3cmci.c3
-rw-r--r--drivers/mmc/host/sdhci-of-core.c2
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c12
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c12
-rw-r--r--drivers/mmc/host/sdhci-pci.c2
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c26
-rw-r--r--drivers/mmc/host/sdhci-s3c.c10
-rw-r--r--drivers/mmc/host/sdhci-spear.c298
-rw-r--r--drivers/mmc/host/sdhci.c25
-rw-r--r--drivers/mmc/host/sdhci.h42
-rw-r--r--drivers/mmc/host/sdricoh_cs.c2
-rw-r--r--drivers/mmc/host/sh_mmcif.c965
-rw-r--r--drivers/mmc/host/tifm_sd.c2
-rw-r--r--drivers/mmc/host/tmio_mmc.c2
-rw-r--r--drivers/mmc/host/via-sdmmc.c2
-rw-r--r--drivers/mmc/host/wbsd.c2
-rw-r--r--drivers/parport/parport_amiga.c64
-rw-r--r--drivers/rapidio/Kconfig24
-rw-r--r--drivers/rapidio/Makefile4
-rw-r--r--drivers/rapidio/rio-scan.c424
-rw-r--r--drivers/rapidio/rio.c431
-rw-r--r--drivers/rapidio/rio.h44
-rw-r--r--drivers/rapidio/switches/Kconfig28
-rw-r--r--drivers/rapidio/switches/Makefile9
-rw-r--r--drivers/rapidio/switches/idtcps.c137
-rw-r--r--drivers/rapidio/switches/tsi500.c20
-rw-r--r--drivers/rapidio/switches/tsi568.c146
-rw-r--r--drivers/rapidio/switches/tsi57x.c315
-rw-r--r--drivers/rtc/Kconfig7
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-ab8500.c363
-rw-r--r--drivers/rtc/rtc-m41t80.c6
-rw-r--r--drivers/s390/block/dasd.c23
-rw-r--r--drivers/s390/block/dasd_eckd.c1
-rw-r--r--drivers/s390/block/dasd_int.h1
-rw-r--r--drivers/s390/cio/ccwgroup.c7
-rw-r--r--drivers/s390/cio/ccwreq.c15
-rw-r--r--drivers/s390/cio/ioasm.h15
-rw-r--r--drivers/scsi/a2091.c245
-rw-r--r--drivers/scsi/a2091.h4
-rw-r--r--drivers/scsi/a3000.c256
-rw-r--r--drivers/scsi/a3000.h4
-rw-r--r--drivers/scsi/a4000t.c101
-rw-r--r--drivers/scsi/aacraid/commctrl.c4
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h29
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c3
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c684
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c5
-rw-r--r--drivers/scsi/bfa/bfa_core.c22
-rw-r--r--drivers/scsi/gvp11.c541
-rw-r--r--drivers/scsi/gvp11.h11
-rw-r--r--drivers/scsi/ipr.c221
-rw-r--r--drivers/scsi/ipr.h31
-rw-r--r--drivers/scsi/iscsi_tcp.c6
-rw-r--r--drivers/scsi/mvme147.c33
-rw-r--r--drivers/scsi/osst.c9
-rw-r--r--drivers/scsi/scsi_scan.c9
-rw-r--r--drivers/scsi/st.c1
-rw-r--r--drivers/sfi/sfi_acpi.c41
-rw-r--r--drivers/sfi/sfi_core.c105
-rw-r--r--drivers/sfi/sfi_core.h8
-rw-r--r--drivers/staging/go7007/saa7134-go7007.c8
-rw-r--r--drivers/telephony/ixj.c15
-rw-r--r--drivers/video/bf54x-lq043fb.c7
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c7
-rw-r--r--drivers/video/s3fb.c101
-rw-r--r--drivers/video/via/viafbdev.c11
134 files changed, 6922 insertions, 1930 deletions
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index bf6b13206d00..9fc630ce1ddb 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -162,7 +162,7 @@ static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
162 topology_remove_dev(cpu); 162 topology_remove_dev(cpu);
163 break; 163 break;
164 } 164 }
165 return rc ? NOTIFY_BAD : NOTIFY_OK; 165 return notifier_from_errno(rc);
166} 166}
167 167
168static int __cpuinit topology_sysfs_init(void) 168static int __cpuinit topology_sysfs_init(void)
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index e21175be25d0..f09fc0e2062d 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -1121,5 +1121,12 @@ config DEVPORT
1121 1121
1122source "drivers/s390/char/Kconfig" 1122source "drivers/s390/char/Kconfig"
1123 1123
1124config RAMOOPS
1125 tristate "Log panic/oops to a RAM buffer"
1126 default n
1127 help
1128 This enables panic and oops messages to be logged to a circular
1129 buffer in RAM where it can be read back at some later point.
1130
1124endmenu 1131endmenu
1125 1132
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index d39be4cf1f5d..88d6eac69754 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -108,6 +108,7 @@ obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o
108obj-$(CONFIG_TCG_TPM) += tpm/ 108obj-$(CONFIG_TCG_TPM) += tpm/
109 109
110obj-$(CONFIG_PS3_FLASH) += ps3flash.o 110obj-$(CONFIG_PS3_FLASH) += ps3flash.o
111obj-$(CONFIG_RAMOOPS) += ramoops.o
111 112
112obj-$(CONFIG_JS_RTC) += js-rtc.o 113obj-$(CONFIG_JS_RTC) += js-rtc.o
113js-rtc-y = rtc.o 114js-rtc-y = rtc.o
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index 56b27671adc4..4f8d60c25a98 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -84,6 +84,7 @@ static char *serial_version = "4.30";
84#include <linux/smp_lock.h> 84#include <linux/smp_lock.h>
85#include <linux/init.h> 85#include <linux/init.h>
86#include <linux/bitops.h> 86#include <linux/bitops.h>
87#include <linux/platform_device.h>
87 88
88#include <asm/setup.h> 89#include <asm/setup.h>
89 90
@@ -1954,29 +1955,16 @@ static const struct tty_operations serial_ops = {
1954/* 1955/*
1955 * The serial driver boot-time initialization code! 1956 * The serial driver boot-time initialization code!
1956 */ 1957 */
1957static int __init rs_init(void) 1958static int __init amiga_serial_probe(struct platform_device *pdev)
1958{ 1959{
1959 unsigned long flags; 1960 unsigned long flags;
1960 struct serial_state * state; 1961 struct serial_state * state;
1961 int error; 1962 int error;
1962 1963
1963 if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_SERIAL))
1964 return -ENODEV;
1965
1966 serial_driver = alloc_tty_driver(1); 1964 serial_driver = alloc_tty_driver(1);
1967 if (!serial_driver) 1965 if (!serial_driver)
1968 return -ENOMEM; 1966 return -ENOMEM;
1969 1967
1970 /*
1971 * We request SERDAT and SERPER only, because the serial registers are
1972 * too spreaded over the custom register space
1973 */
1974 if (!request_mem_region(CUSTOM_PHYSADDR+0x30, 4,
1975 "amiserial [Paula]")) {
1976 error = -EBUSY;
1977 goto fail_put_tty_driver;
1978 }
1979
1980 IRQ_ports = NULL; 1968 IRQ_ports = NULL;
1981 1969
1982 show_serial_version(); 1970 show_serial_version();
@@ -1998,7 +1986,7 @@ static int __init rs_init(void)
1998 1986
1999 error = tty_register_driver(serial_driver); 1987 error = tty_register_driver(serial_driver);
2000 if (error) 1988 if (error)
2001 goto fail_release_mem_region; 1989 goto fail_put_tty_driver;
2002 1990
2003 state = rs_table; 1991 state = rs_table;
2004 state->magic = SSTATE_MAGIC; 1992 state->magic = SSTATE_MAGIC;
@@ -2050,23 +2038,24 @@ static int __init rs_init(void)
2050 ciab.ddra |= (SER_DTR | SER_RTS); /* outputs */ 2038 ciab.ddra |= (SER_DTR | SER_RTS); /* outputs */
2051 ciab.ddra &= ~(SER_DCD | SER_CTS | SER_DSR); /* inputs */ 2039 ciab.ddra &= ~(SER_DCD | SER_CTS | SER_DSR); /* inputs */
2052 2040
2041 platform_set_drvdata(pdev, state);
2042
2053 return 0; 2043 return 0;
2054 2044
2055fail_free_irq: 2045fail_free_irq:
2056 free_irq(IRQ_AMIGA_TBE, state); 2046 free_irq(IRQ_AMIGA_TBE, state);
2057fail_unregister: 2047fail_unregister:
2058 tty_unregister_driver(serial_driver); 2048 tty_unregister_driver(serial_driver);
2059fail_release_mem_region:
2060 release_mem_region(CUSTOM_PHYSADDR+0x30, 4);
2061fail_put_tty_driver: 2049fail_put_tty_driver:
2062 put_tty_driver(serial_driver); 2050 put_tty_driver(serial_driver);
2063 return error; 2051 return error;
2064} 2052}
2065 2053
2066static __exit void rs_exit(void) 2054static int __exit amiga_serial_remove(struct platform_device *pdev)
2067{ 2055{
2068 int error; 2056 int error;
2069 struct async_struct *info = rs_table[0].info; 2057 struct serial_state *state = platform_get_drvdata(pdev);
2058 struct async_struct *info = state->info;
2070 2059
2071 /* printk("Unloading %s: version %s\n", serial_name, serial_version); */ 2060 /* printk("Unloading %s: version %s\n", serial_name, serial_version); */
2072 tasklet_kill(&info->tlet); 2061 tasklet_kill(&info->tlet);
@@ -2075,19 +2064,38 @@ static __exit void rs_exit(void)
2075 error); 2064 error);
2076 put_tty_driver(serial_driver); 2065 put_tty_driver(serial_driver);
2077 2066
2078 if (info) { 2067 rs_table[0].info = NULL;
2079 rs_table[0].info = NULL; 2068 kfree(info);
2080 kfree(info);
2081 }
2082 2069
2083 free_irq(IRQ_AMIGA_TBE, rs_table); 2070 free_irq(IRQ_AMIGA_TBE, rs_table);
2084 free_irq(IRQ_AMIGA_RBF, rs_table); 2071 free_irq(IRQ_AMIGA_RBF, rs_table);
2085 2072
2086 release_mem_region(CUSTOM_PHYSADDR+0x30, 4); 2073 platform_set_drvdata(pdev, NULL);
2074
2075 return error;
2076}
2077
2078static struct platform_driver amiga_serial_driver = {
2079 .remove = __exit_p(amiga_serial_remove),
2080 .driver = {
2081 .name = "amiga-serial",
2082 .owner = THIS_MODULE,
2083 },
2084};
2085
2086static int __init amiga_serial_init(void)
2087{
2088 return platform_driver_probe(&amiga_serial_driver, amiga_serial_probe);
2089}
2090
2091module_init(amiga_serial_init);
2092
2093static void __exit amiga_serial_exit(void)
2094{
2095 platform_driver_unregister(&amiga_serial_driver);
2087} 2096}
2088 2097
2089module_init(rs_init) 2098module_exit(amiga_serial_exit);
2090module_exit(rs_exit)
2091 2099
2092 2100
2093#if defined(CONFIG_SERIAL_CONSOLE) && !defined(MODULE) 2101#if defined(CONFIG_SERIAL_CONSOLE) && !defined(MODULE)
@@ -2154,3 +2162,4 @@ console_initcall(amiserial_console_init);
2154#endif /* CONFIG_SERIAL_CONSOLE && !MODULE */ 2162#endif /* CONFIG_SERIAL_CONSOLE && !MODULE */
2155 2163
2156MODULE_LICENSE("GPL"); 2164MODULE_LICENSE("GPL");
2165MODULE_ALIAS("platform:amiga-serial");
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index 63313a33ba5f..f4ae0e0fb631 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -703,14 +703,9 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
703 /* In general, the device is only openable by root anyway, so we're not 703 /* In general, the device is only openable by root anyway, so we're not
704 particularly concerned that bogus ioctls can flood the console. */ 704 particularly concerned that bogus ioctls can flood the console. */
705 705
706 adgl = kmalloc(sizeof(struct st_ram_io), GFP_KERNEL); 706 adgl = memdup_user(argp, sizeof(struct st_ram_io));
707 if (!adgl) 707 if (IS_ERR(adgl))
708 return -ENOMEM; 708 return PTR_ERR(adgl);
709
710 if (copy_from_user(adgl, argp, sizeof(struct st_ram_io))) {
711 kfree(adgl);
712 return -EFAULT;
713 }
714 709
715 lock_kernel(); 710 lock_kernel();
716 IndexCard = adgl->num_card-1; 711 IndexCard = adgl->num_card-1;
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index c6ad4234378d..4f3f8c9ec262 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2505,12 +2505,11 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2505 return rv; 2505 return rv;
2506 } 2506 }
2507 2507
2508 printk(KERN_INFO 2508 dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, "
2509 "ipmi: Found new BMC (man_id: 0x%6.6x, " 2509 "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2510 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 2510 bmc->id.manufacturer_id,
2511 bmc->id.manufacturer_id, 2511 bmc->id.product_id,
2512 bmc->id.product_id, 2512 bmc->id.device_id);
2513 bmc->id.device_id);
2514 } 2513 }
2515 2514
2516 /* 2515 /*
@@ -4037,8 +4036,8 @@ static void ipmi_request_event(void)
4037 4036
4038static struct timer_list ipmi_timer; 4037static struct timer_list ipmi_timer;
4039 4038
4040/* Call every ~100 ms. */ 4039/* Call every ~1000 ms. */
4041#define IPMI_TIMEOUT_TIME 100 4040#define IPMI_TIMEOUT_TIME 1000
4042 4041
4043/* How many jiffies does it take to get to the timeout time. */ 4042/* How many jiffies does it take to get to the timeout time. */
4044#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) 4043#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 47ffe4a90a95..35603dd4e6c5 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -107,6 +107,14 @@ enum si_type {
107}; 107};
108static char *si_to_str[] = { "kcs", "smic", "bt" }; 108static char *si_to_str[] = { "kcs", "smic", "bt" };
109 109
110enum ipmi_addr_src {
111 SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
112 SI_PCI, SI_DEVICETREE, SI_DEFAULT
113};
114static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI",
115 "ACPI", "SMBIOS", "PCI",
116 "device-tree", "default" };
117
110#define DEVICE_NAME "ipmi_si" 118#define DEVICE_NAME "ipmi_si"
111 119
112static struct platform_driver ipmi_driver = { 120static struct platform_driver ipmi_driver = {
@@ -188,7 +196,7 @@ struct smi_info {
188 int (*irq_setup)(struct smi_info *info); 196 int (*irq_setup)(struct smi_info *info);
189 void (*irq_cleanup)(struct smi_info *info); 197 void (*irq_cleanup)(struct smi_info *info);
190 unsigned int io_size; 198 unsigned int io_size;
191 char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */ 199 enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
192 void (*addr_source_cleanup)(struct smi_info *info); 200 void (*addr_source_cleanup)(struct smi_info *info);
193 void *addr_source_data; 201 void *addr_source_data;
194 202
@@ -300,6 +308,7 @@ static int num_max_busy_us;
300 308
301static int unload_when_empty = 1; 309static int unload_when_empty = 1;
302 310
311static int add_smi(struct smi_info *smi);
303static int try_smi_init(struct smi_info *smi); 312static int try_smi_init(struct smi_info *smi);
304static void cleanup_one_si(struct smi_info *to_clean); 313static void cleanup_one_si(struct smi_info *to_clean);
305 314
@@ -314,9 +323,14 @@ static void deliver_recv_msg(struct smi_info *smi_info,
314{ 323{
315 /* Deliver the message to the upper layer with the lock 324 /* Deliver the message to the upper layer with the lock
316 released. */ 325 released. */
317 spin_unlock(&(smi_info->si_lock)); 326
318 ipmi_smi_msg_received(smi_info->intf, msg); 327 if (smi_info->run_to_completion) {
319 spin_lock(&(smi_info->si_lock)); 328 ipmi_smi_msg_received(smi_info->intf, msg);
329 } else {
330 spin_unlock(&(smi_info->si_lock));
331 ipmi_smi_msg_received(smi_info->intf, msg);
332 spin_lock(&(smi_info->si_lock));
333 }
320} 334}
321 335
322static void return_hosed_msg(struct smi_info *smi_info, int cCode) 336static void return_hosed_msg(struct smi_info *smi_info, int cCode)
@@ -445,6 +459,9 @@ static inline void disable_si_irq(struct smi_info *smi_info)
445 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 459 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
446 start_disable_irq(smi_info); 460 start_disable_irq(smi_info);
447 smi_info->interrupt_disabled = 1; 461 smi_info->interrupt_disabled = 1;
462 if (!atomic_read(&smi_info->stop_operation))
463 mod_timer(&smi_info->si_timer,
464 jiffies + SI_TIMEOUT_JIFFIES);
448 } 465 }
449} 466}
450 467
@@ -576,9 +593,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
576 smi_info->handlers->get_result(smi_info->si_sm, msg, 3); 593 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
577 if (msg[2] != 0) { 594 if (msg[2] != 0) {
578 /* Error clearing flags */ 595 /* Error clearing flags */
579 printk(KERN_WARNING 596 dev_warn(smi_info->dev,
580 "ipmi_si: Error clearing flags: %2.2x\n", 597 "Error clearing flags: %2.2x\n", msg[2]);
581 msg[2]);
582 } 598 }
583 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ) 599 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
584 start_enable_irq(smi_info); 600 start_enable_irq(smi_info);
@@ -670,9 +686,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
670 /* We got the flags from the SMI, now handle them. */ 686 /* We got the flags from the SMI, now handle them. */
671 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 687 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
672 if (msg[2] != 0) { 688 if (msg[2] != 0) {
673 printk(KERN_WARNING 689 dev_warn(smi_info->dev, "Could not enable interrupts"
674 "ipmi_si: Could not enable interrupts" 690 ", failed get, using polled mode.\n");
675 ", failed get, using polled mode.\n");
676 smi_info->si_state = SI_NORMAL; 691 smi_info->si_state = SI_NORMAL;
677 } else { 692 } else {
678 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 693 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -693,11 +708,11 @@ static void handle_transaction_done(struct smi_info *smi_info)
693 708
694 /* We got the flags from the SMI, now handle them. */ 709 /* We got the flags from the SMI, now handle them. */
695 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 710 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
696 if (msg[2] != 0) { 711 if (msg[2] != 0)
697 printk(KERN_WARNING 712 dev_warn(smi_info->dev, "Could not enable interrupts"
698 "ipmi_si: Could not enable interrupts" 713 ", failed set, using polled mode.\n");
699 ", failed set, using polled mode.\n"); 714 else
700 } 715 smi_info->interrupt_disabled = 0;
701 smi_info->si_state = SI_NORMAL; 716 smi_info->si_state = SI_NORMAL;
702 break; 717 break;
703 } 718 }
@@ -709,9 +724,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
709 /* We got the flags from the SMI, now handle them. */ 724 /* We got the flags from the SMI, now handle them. */
710 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 725 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
711 if (msg[2] != 0) { 726 if (msg[2] != 0) {
712 printk(KERN_WARNING 727 dev_warn(smi_info->dev, "Could not disable interrupts"
713 "ipmi_si: Could not disable interrupts" 728 ", failed get.\n");
714 ", failed get.\n");
715 smi_info->si_state = SI_NORMAL; 729 smi_info->si_state = SI_NORMAL;
716 } else { 730 } else {
717 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 731 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -733,9 +747,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
733 /* We got the flags from the SMI, now handle them. */ 747 /* We got the flags from the SMI, now handle them. */
734 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 748 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
735 if (msg[2] != 0) { 749 if (msg[2] != 0) {
736 printk(KERN_WARNING 750 dev_warn(smi_info->dev, "Could not disable interrupts"
737 "ipmi_si: Could not disable interrupts" 751 ", failed set.\n");
738 ", failed set.\n");
739 } 752 }
740 smi_info->si_state = SI_NORMAL; 753 smi_info->si_state = SI_NORMAL;
741 break; 754 break;
@@ -877,6 +890,11 @@ static void sender(void *send_info,
877 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); 890 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
878#endif 891#endif
879 892
893 mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
894
895 if (smi_info->thread)
896 wake_up_process(smi_info->thread);
897
880 if (smi_info->run_to_completion) { 898 if (smi_info->run_to_completion) {
881 /* 899 /*
882 * If we are running to completion, then throw it in 900 * If we are running to completion, then throw it in
@@ -997,6 +1015,8 @@ static int ipmi_thread(void *data)
997 ; /* do nothing */ 1015 ; /* do nothing */
998 else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) 1016 else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
999 schedule(); 1017 schedule();
1018 else if (smi_result == SI_SM_IDLE)
1019 schedule_timeout_interruptible(100);
1000 else 1020 else
1001 schedule_timeout_interruptible(0); 1021 schedule_timeout_interruptible(0);
1002 } 1022 }
@@ -1039,6 +1059,7 @@ static void smi_timeout(unsigned long data)
1039 unsigned long flags; 1059 unsigned long flags;
1040 unsigned long jiffies_now; 1060 unsigned long jiffies_now;
1041 long time_diff; 1061 long time_diff;
1062 long timeout;
1042#ifdef DEBUG_TIMING 1063#ifdef DEBUG_TIMING
1043 struct timeval t; 1064 struct timeval t;
1044#endif 1065#endif
@@ -1059,9 +1080,9 @@ static void smi_timeout(unsigned long data)
1059 1080
1060 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 1081 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
1061 /* Running with interrupts, only do long timeouts. */ 1082 /* Running with interrupts, only do long timeouts. */
1062 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; 1083 timeout = jiffies + SI_TIMEOUT_JIFFIES;
1063 smi_inc_stat(smi_info, long_timeouts); 1084 smi_inc_stat(smi_info, long_timeouts);
1064 goto do_add_timer; 1085 goto do_mod_timer;
1065 } 1086 }
1066 1087
1067 /* 1088 /*
@@ -1070,14 +1091,15 @@ static void smi_timeout(unsigned long data)
1070 */ 1091 */
1071 if (smi_result == SI_SM_CALL_WITH_DELAY) { 1092 if (smi_result == SI_SM_CALL_WITH_DELAY) {
1072 smi_inc_stat(smi_info, short_timeouts); 1093 smi_inc_stat(smi_info, short_timeouts);
1073 smi_info->si_timer.expires = jiffies + 1; 1094 timeout = jiffies + 1;
1074 } else { 1095 } else {
1075 smi_inc_stat(smi_info, long_timeouts); 1096 smi_inc_stat(smi_info, long_timeouts);
1076 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; 1097 timeout = jiffies + SI_TIMEOUT_JIFFIES;
1077 } 1098 }
1078 1099
1079 do_add_timer: 1100 do_mod_timer:
1080 add_timer(&(smi_info->si_timer)); 1101 if (smi_result != SI_SM_IDLE)
1102 mod_timer(&(smi_info->si_timer), timeout);
1081} 1103}
1082 1104
1083static irqreturn_t si_irq_handler(int irq, void *data) 1105static irqreturn_t si_irq_handler(int irq, void *data)
@@ -1144,10 +1166,10 @@ static int smi_start_processing(void *send_info,
1144 new_smi->thread = kthread_run(ipmi_thread, new_smi, 1166 new_smi->thread = kthread_run(ipmi_thread, new_smi,
1145 "kipmi%d", new_smi->intf_num); 1167 "kipmi%d", new_smi->intf_num);
1146 if (IS_ERR(new_smi->thread)) { 1168 if (IS_ERR(new_smi->thread)) {
1147 printk(KERN_NOTICE "ipmi_si_intf: Could not start" 1169 dev_notice(new_smi->dev, "Could not start"
1148 " kernel thread due to error %ld, only using" 1170 " kernel thread due to error %ld, only using"
1149 " timers to drive the interface\n", 1171 " timers to drive the interface\n",
1150 PTR_ERR(new_smi->thread)); 1172 PTR_ERR(new_smi->thread));
1151 new_smi->thread = NULL; 1173 new_smi->thread = NULL;
1152 } 1174 }
1153 } 1175 }
@@ -1308,14 +1330,13 @@ static int std_irq_setup(struct smi_info *info)
1308 DEVICE_NAME, 1330 DEVICE_NAME,
1309 info); 1331 info);
1310 if (rv) { 1332 if (rv) {
1311 printk(KERN_WARNING 1333 dev_warn(info->dev, "%s unable to claim interrupt %d,"
1312 "ipmi_si: %s unable to claim interrupt %d," 1334 " running polled\n",
1313 " running polled\n", 1335 DEVICE_NAME, info->irq);
1314 DEVICE_NAME, info->irq);
1315 info->irq = 0; 1336 info->irq = 0;
1316 } else { 1337 } else {
1317 info->irq_cleanup = std_irq_cleanup; 1338 info->irq_cleanup = std_irq_cleanup;
1318 printk(" Using irq %d\n", info->irq); 1339 dev_info(info->dev, "Using irq %d\n", info->irq);
1319 } 1340 }
1320 1341
1321 return rv; 1342 return rv;
@@ -1406,8 +1427,8 @@ static int port_setup(struct smi_info *info)
1406 info->io.outputb = port_outl; 1427 info->io.outputb = port_outl;
1407 break; 1428 break;
1408 default: 1429 default:
1409 printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n", 1430 dev_warn(info->dev, "Invalid register size: %d\n",
1410 info->io.regsize); 1431 info->io.regsize);
1411 return -EINVAL; 1432 return -EINVAL;
1412 } 1433 }
1413 1434
@@ -1529,8 +1550,8 @@ static int mem_setup(struct smi_info *info)
1529 break; 1550 break;
1530#endif 1551#endif
1531 default: 1552 default:
1532 printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n", 1553 dev_warn(info->dev, "Invalid register size: %d\n",
1533 info->io.regsize); 1554 info->io.regsize);
1534 return -EINVAL; 1555 return -EINVAL;
1535 } 1556 }
1536 1557
@@ -1755,7 +1776,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
1755 goto out; 1776 goto out;
1756 } 1777 }
1757 1778
1758 info->addr_source = "hotmod"; 1779 info->addr_source = SI_HOTMOD;
1759 info->si_type = si_type; 1780 info->si_type = si_type;
1760 info->io.addr_data = addr; 1781 info->io.addr_data = addr;
1761 info->io.addr_type = addr_space; 1782 info->io.addr_type = addr_space;
@@ -1777,7 +1798,9 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
1777 info->irq_setup = std_irq_setup; 1798 info->irq_setup = std_irq_setup;
1778 info->slave_addr = ipmb; 1799 info->slave_addr = ipmb;
1779 1800
1780 try_smi_init(info); 1801 if (!add_smi(info))
1802 if (try_smi_init(info))
1803 cleanup_one_si(info);
1781 } else { 1804 } else {
1782 /* remove */ 1805 /* remove */
1783 struct smi_info *e, *tmp_e; 1806 struct smi_info *e, *tmp_e;
@@ -1813,7 +1836,8 @@ static __devinit void hardcode_find_bmc(void)
1813 if (!info) 1836 if (!info)
1814 return; 1837 return;
1815 1838
1816 info->addr_source = "hardcoded"; 1839 info->addr_source = SI_HARDCODED;
1840 printk(KERN_INFO PFX "probing via hardcoded address\n");
1817 1841
1818 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) { 1842 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1819 info->si_type = SI_KCS; 1843 info->si_type = SI_KCS;
@@ -1822,8 +1846,7 @@ static __devinit void hardcode_find_bmc(void)
1822 } else if (strcmp(si_type[i], "bt") == 0) { 1846 } else if (strcmp(si_type[i], "bt") == 0) {
1823 info->si_type = SI_BT; 1847 info->si_type = SI_BT;
1824 } else { 1848 } else {
1825 printk(KERN_WARNING 1849 printk(KERN_WARNING PFX "Interface type specified "
1826 "ipmi_si: Interface type specified "
1827 "for interface %d, was invalid: %s\n", 1850 "for interface %d, was invalid: %s\n",
1828 i, si_type[i]); 1851 i, si_type[i]);
1829 kfree(info); 1852 kfree(info);
@@ -1841,11 +1864,9 @@ static __devinit void hardcode_find_bmc(void)
1841 info->io.addr_data = addrs[i]; 1864 info->io.addr_data = addrs[i];
1842 info->io.addr_type = IPMI_MEM_ADDR_SPACE; 1865 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1843 } else { 1866 } else {
1844 printk(KERN_WARNING 1867 printk(KERN_WARNING PFX "Interface type specified "
1845 "ipmi_si: Interface type specified " 1868 "for interface %d, but port and address were "
1846 "for interface %d, " 1869 "not set or set to zero.\n", i);
1847 "but port and address were not set or "
1848 "set to zero.\n", i);
1849 kfree(info); 1870 kfree(info);
1850 continue; 1871 continue;
1851 } 1872 }
@@ -1863,7 +1884,9 @@ static __devinit void hardcode_find_bmc(void)
1863 info->irq_setup = std_irq_setup; 1884 info->irq_setup = std_irq_setup;
1864 info->slave_addr = slave_addrs[i]; 1885 info->slave_addr = slave_addrs[i];
1865 1886
1866 try_smi_init(info); 1887 if (!add_smi(info))
1888 if (try_smi_init(info))
1889 cleanup_one_si(info);
1867 } 1890 }
1868} 1891}
1869 1892
@@ -1923,15 +1946,13 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
1923 &ipmi_acpi_gpe, 1946 &ipmi_acpi_gpe,
1924 info); 1947 info);
1925 if (status != AE_OK) { 1948 if (status != AE_OK) {
1926 printk(KERN_WARNING 1949 dev_warn(info->dev, "%s unable to claim ACPI GPE %d,"
1927 "ipmi_si: %s unable to claim ACPI GPE %d," 1950 " running polled\n", DEVICE_NAME, info->irq);
1928 " running polled\n",
1929 DEVICE_NAME, info->irq);
1930 info->irq = 0; 1951 info->irq = 0;
1931 return -EINVAL; 1952 return -EINVAL;
1932 } else { 1953 } else {
1933 info->irq_cleanup = acpi_gpe_irq_cleanup; 1954 info->irq_cleanup = acpi_gpe_irq_cleanup;
1934 printk(" Using ACPI GPE %d\n", info->irq); 1955 dev_info(info->dev, "Using ACPI GPE %d\n", info->irq);
1935 return 0; 1956 return 0;
1936 } 1957 }
1937} 1958}
@@ -1989,8 +2010,8 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
1989 u8 addr_space; 2010 u8 addr_space;
1990 2011
1991 if (spmi->IPMIlegacy != 1) { 2012 if (spmi->IPMIlegacy != 1) {
1992 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy); 2013 printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1993 return -ENODEV; 2014 return -ENODEV;
1994 } 2015 }
1995 2016
1996 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 2017 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
@@ -2000,11 +2021,12 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
2000 2021
2001 info = kzalloc(sizeof(*info), GFP_KERNEL); 2022 info = kzalloc(sizeof(*info), GFP_KERNEL);
2002 if (!info) { 2023 if (!info) {
2003 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n"); 2024 printk(KERN_ERR PFX "Could not allocate SI data (3)\n");
2004 return -ENOMEM; 2025 return -ENOMEM;
2005 } 2026 }
2006 2027
2007 info->addr_source = "SPMI"; 2028 info->addr_source = SI_SPMI;
2029 printk(KERN_INFO PFX "probing via SPMI\n");
2008 2030
2009 /* Figure out the interface type. */ 2031 /* Figure out the interface type. */
2010 switch (spmi->InterfaceType) { 2032 switch (spmi->InterfaceType) {
@@ -2018,8 +2040,8 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
2018 info->si_type = SI_BT; 2040 info->si_type = SI_BT;
2019 break; 2041 break;
2020 default: 2042 default:
2021 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n", 2043 printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n",
2022 spmi->InterfaceType); 2044 spmi->InterfaceType);
2023 kfree(info); 2045 kfree(info);
2024 return -EIO; 2046 return -EIO;
2025 } 2047 }
@@ -2055,13 +2077,12 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
2055 info->io.addr_type = IPMI_IO_ADDR_SPACE; 2077 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2056 } else { 2078 } else {
2057 kfree(info); 2079 kfree(info);
2058 printk(KERN_WARNING 2080 printk(KERN_WARNING PFX "Unknown ACPI I/O Address type\n");
2059 "ipmi_si: Unknown ACPI I/O Address type\n");
2060 return -EIO; 2081 return -EIO;
2061 } 2082 }
2062 info->io.addr_data = spmi->addr.address; 2083 info->io.addr_data = spmi->addr.address;
2063 2084
2064 try_smi_init(info); 2085 add_smi(info);
2065 2086
2066 return 0; 2087 return 0;
2067} 2088}
@@ -2093,6 +2114,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2093{ 2114{
2094 struct acpi_device *acpi_dev; 2115 struct acpi_device *acpi_dev;
2095 struct smi_info *info; 2116 struct smi_info *info;
2117 struct resource *res;
2096 acpi_handle handle; 2118 acpi_handle handle;
2097 acpi_status status; 2119 acpi_status status;
2098 unsigned long long tmp; 2120 unsigned long long tmp;
@@ -2105,7 +2127,8 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2105 if (!info) 2127 if (!info)
2106 return -ENOMEM; 2128 return -ENOMEM;
2107 2129
2108 info->addr_source = "ACPI"; 2130 info->addr_source = SI_ACPI;
2131 printk(KERN_INFO PFX "probing via ACPI\n");
2109 2132
2110 handle = acpi_dev->handle; 2133 handle = acpi_dev->handle;
2111 2134
@@ -2125,22 +2148,26 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2125 info->si_type = SI_BT; 2148 info->si_type = SI_BT;
2126 break; 2149 break;
2127 default: 2150 default:
2128 dev_info(&dev->dev, "unknown interface type %lld\n", tmp); 2151 dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
2129 goto err_free; 2152 goto err_free;
2130 } 2153 }
2131 2154
2132 if (pnp_port_valid(dev, 0)) { 2155 res = pnp_get_resource(dev, IORESOURCE_IO, 0);
2156 if (res) {
2133 info->io_setup = port_setup; 2157 info->io_setup = port_setup;
2134 info->io.addr_type = IPMI_IO_ADDR_SPACE; 2158 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2135 info->io.addr_data = pnp_port_start(dev, 0);
2136 } else if (pnp_mem_valid(dev, 0)) {
2137 info->io_setup = mem_setup;
2138 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2139 info->io.addr_data = pnp_mem_start(dev, 0);
2140 } else { 2159 } else {
2160 res = pnp_get_resource(dev, IORESOURCE_MEM, 0);
2161 if (res) {
2162 info->io_setup = mem_setup;
2163 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2164 }
2165 }
2166 if (!res) {
2141 dev_err(&dev->dev, "no I/O or memory address\n"); 2167 dev_err(&dev->dev, "no I/O or memory address\n");
2142 goto err_free; 2168 goto err_free;
2143 } 2169 }
2170 info->io.addr_data = res->start;
2144 2171
2145 info->io.regspacing = DEFAULT_REGSPACING; 2172 info->io.regspacing = DEFAULT_REGSPACING;
2146 info->io.regsize = DEFAULT_REGSPACING; 2173 info->io.regsize = DEFAULT_REGSPACING;
@@ -2156,10 +2183,14 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2156 info->irq_setup = std_irq_setup; 2183 info->irq_setup = std_irq_setup;
2157 } 2184 }
2158 2185
2159 info->dev = &acpi_dev->dev; 2186 info->dev = &dev->dev;
2160 pnp_set_drvdata(dev, info); 2187 pnp_set_drvdata(dev, info);
2161 2188
2162 return try_smi_init(info); 2189 dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n",
2190 res, info->io.regsize, info->io.regspacing,
2191 info->irq);
2192
2193 return add_smi(info);
2163 2194
2164err_free: 2195err_free:
2165 kfree(info); 2196 kfree(info);
@@ -2264,12 +2295,12 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
2264 2295
2265 info = kzalloc(sizeof(*info), GFP_KERNEL); 2296 info = kzalloc(sizeof(*info), GFP_KERNEL);
2266 if (!info) { 2297 if (!info) {
2267 printk(KERN_ERR 2298 printk(KERN_ERR PFX "Could not allocate SI data\n");
2268 "ipmi_si: Could not allocate SI data\n");
2269 return; 2299 return;
2270 } 2300 }
2271 2301
2272 info->addr_source = "SMBIOS"; 2302 info->addr_source = SI_SMBIOS;
2303 printk(KERN_INFO PFX "probing via SMBIOS\n");
2273 2304
2274 switch (ipmi_data->type) { 2305 switch (ipmi_data->type) {
2275 case 0x01: /* KCS */ 2306 case 0x01: /* KCS */
@@ -2299,8 +2330,7 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
2299 2330
2300 default: 2331 default:
2301 kfree(info); 2332 kfree(info);
2302 printk(KERN_WARNING 2333 printk(KERN_WARNING PFX "Unknown SMBIOS I/O Address type: %d\n",
2303 "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
2304 ipmi_data->addr_space); 2334 ipmi_data->addr_space);
2305 return; 2335 return;
2306 } 2336 }
@@ -2318,7 +2348,7 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
2318 if (info->irq) 2348 if (info->irq)
2319 info->irq_setup = std_irq_setup; 2349 info->irq_setup = std_irq_setup;
2320 2350
2321 try_smi_init(info); 2351 add_smi(info);
2322} 2352}
2323 2353
2324static void __devinit dmi_find_bmc(void) 2354static void __devinit dmi_find_bmc(void)
@@ -2368,7 +2398,8 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2368 if (!info) 2398 if (!info)
2369 return -ENOMEM; 2399 return -ENOMEM;
2370 2400
2371 info->addr_source = "PCI"; 2401 info->addr_source = SI_PCI;
2402 dev_info(&pdev->dev, "probing via PCI");
2372 2403
2373 switch (class_type) { 2404 switch (class_type) {
2374 case PCI_ERMC_CLASSCODE_TYPE_SMIC: 2405 case PCI_ERMC_CLASSCODE_TYPE_SMIC:
@@ -2385,15 +2416,13 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2385 2416
2386 default: 2417 default:
2387 kfree(info); 2418 kfree(info);
2388 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n", 2419 dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type);
2389 pci_name(pdev), class_type);
2390 return -ENOMEM; 2420 return -ENOMEM;
2391 } 2421 }
2392 2422
2393 rv = pci_enable_device(pdev); 2423 rv = pci_enable_device(pdev);
2394 if (rv) { 2424 if (rv) {
2395 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n", 2425 dev_err(&pdev->dev, "couldn't enable PCI device\n");
2396 pci_name(pdev));
2397 kfree(info); 2426 kfree(info);
2398 return rv; 2427 return rv;
2399 } 2428 }
@@ -2421,7 +2450,11 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2421 info->dev = &pdev->dev; 2450 info->dev = &pdev->dev;
2422 pci_set_drvdata(pdev, info); 2451 pci_set_drvdata(pdev, info);
2423 2452
2424 return try_smi_init(info); 2453 dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
2454 &pdev->resource[0], info->io.regsize, info->io.regspacing,
2455 info->irq);
2456
2457 return add_smi(info);
2425} 2458}
2426 2459
2427static void __devexit ipmi_pci_remove(struct pci_dev *pdev) 2460static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
@@ -2473,7 +2506,7 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
2473 int ret; 2506 int ret;
2474 int proplen; 2507 int proplen;
2475 2508
2476 dev_info(&dev->dev, PFX "probing via device tree\n"); 2509 dev_info(&dev->dev, "probing via device tree\n");
2477 2510
2478 ret = of_address_to_resource(np, 0, &resource); 2511 ret = of_address_to_resource(np, 0, &resource);
2479 if (ret) { 2512 if (ret) {
@@ -2503,12 +2536,12 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
2503 2536
2504 if (!info) { 2537 if (!info) {
2505 dev_err(&dev->dev, 2538 dev_err(&dev->dev,
2506 PFX "could not allocate memory for OF probe\n"); 2539 "could not allocate memory for OF probe\n");
2507 return -ENOMEM; 2540 return -ENOMEM;
2508 } 2541 }
2509 2542
2510 info->si_type = (enum si_type) match->data; 2543 info->si_type = (enum si_type) match->data;
2511 info->addr_source = "device-tree"; 2544 info->addr_source = SI_DEVICETREE;
2512 info->irq_setup = std_irq_setup; 2545 info->irq_setup = std_irq_setup;
2513 2546
2514 if (resource.flags & IORESOURCE_IO) { 2547 if (resource.flags & IORESOURCE_IO) {
@@ -2528,13 +2561,13 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
2528 info->irq = irq_of_parse_and_map(dev->dev.of_node, 0); 2561 info->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
2529 info->dev = &dev->dev; 2562 info->dev = &dev->dev;
2530 2563
2531 dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %x\n", 2564 dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n",
2532 info->io.addr_data, info->io.regsize, info->io.regspacing, 2565 info->io.addr_data, info->io.regsize, info->io.regspacing,
2533 info->irq); 2566 info->irq);
2534 2567
2535 dev_set_drvdata(&dev->dev, info); 2568 dev_set_drvdata(&dev->dev, info);
2536 2569
2537 return try_smi_init(info); 2570 return add_smi(info);
2538} 2571}
2539 2572
2540static int __devexit ipmi_of_remove(struct of_device *dev) 2573static int __devexit ipmi_of_remove(struct of_device *dev)
@@ -2643,9 +2676,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
2643 2676
2644 rv = wait_for_msg_done(smi_info); 2677 rv = wait_for_msg_done(smi_info);
2645 if (rv) { 2678 if (rv) {
2646 printk(KERN_WARNING 2679 printk(KERN_WARNING PFX "Error getting response from get"
2647 "ipmi_si: Error getting response from get global," 2680 " global enables command, the event buffer is not"
2648 " enables command, the event buffer is not"
2649 " enabled.\n"); 2681 " enabled.\n");
2650 goto out; 2682 goto out;
2651 } 2683 }
@@ -2657,10 +2689,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
2657 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || 2689 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
2658 resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD || 2690 resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
2659 resp[2] != 0) { 2691 resp[2] != 0) {
2660 printk(KERN_WARNING 2692 printk(KERN_WARNING PFX "Invalid return from get global"
2661 "ipmi_si: Invalid return from get global" 2693 " enables command, cannot enable the event buffer.\n");
2662 " enables command, cannot enable the event"
2663 " buffer.\n");
2664 rv = -EINVAL; 2694 rv = -EINVAL;
2665 goto out; 2695 goto out;
2666 } 2696 }
@@ -2676,9 +2706,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
2676 2706
2677 rv = wait_for_msg_done(smi_info); 2707 rv = wait_for_msg_done(smi_info);
2678 if (rv) { 2708 if (rv) {
2679 printk(KERN_WARNING 2709 printk(KERN_WARNING PFX "Error getting response from set"
2680 "ipmi_si: Error getting response from set global," 2710 " global, enables command, the event buffer is not"
2681 " enables command, the event buffer is not"
2682 " enabled.\n"); 2711 " enabled.\n");
2683 goto out; 2712 goto out;
2684 } 2713 }
@@ -2689,10 +2718,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
2689 if (resp_len < 3 || 2718 if (resp_len < 3 ||
2690 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || 2719 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
2691 resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) { 2720 resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
2692 printk(KERN_WARNING 2721 printk(KERN_WARNING PFX "Invalid return from get global,"
2693 "ipmi_si: Invalid return from get global," 2722 "enables command, not enable the event buffer.\n");
2694 "enables command, not enable the event"
2695 " buffer.\n");
2696 rv = -EINVAL; 2723 rv = -EINVAL;
2697 goto out; 2724 goto out;
2698 } 2725 }
@@ -2951,7 +2978,7 @@ static __devinit void default_find_bmc(void)
2951 if (!info) 2978 if (!info)
2952 return; 2979 return;
2953 2980
2954 info->addr_source = NULL; 2981 info->addr_source = SI_DEFAULT;
2955 2982
2956 info->si_type = ipmi_defaults[i].type; 2983 info->si_type = ipmi_defaults[i].type;
2957 info->io_setup = port_setup; 2984 info->io_setup = port_setup;
@@ -2963,14 +2990,16 @@ static __devinit void default_find_bmc(void)
2963 info->io.regsize = DEFAULT_REGSPACING; 2990 info->io.regsize = DEFAULT_REGSPACING;
2964 info->io.regshift = 0; 2991 info->io.regshift = 0;
2965 2992
2966 if (try_smi_init(info) == 0) { 2993 if (add_smi(info) == 0) {
2967 /* Found one... */ 2994 if ((try_smi_init(info)) == 0) {
2968 printk(KERN_INFO "ipmi_si: Found default %s state" 2995 /* Found one... */
2969 " machine at %s address 0x%lx\n", 2996 printk(KERN_INFO PFX "Found default %s"
2970 si_to_str[info->si_type], 2997 " state machine at %s address 0x%lx\n",
2971 addr_space_to_str[info->io.addr_type], 2998 si_to_str[info->si_type],
2972 info->io.addr_data); 2999 addr_space_to_str[info->io.addr_type],
2973 return; 3000 info->io.addr_data);
3001 } else
3002 cleanup_one_si(info);
2974 } 3003 }
2975 } 3004 }
2976} 3005}
@@ -2989,34 +3018,48 @@ static int is_new_interface(struct smi_info *info)
2989 return 1; 3018 return 1;
2990} 3019}
2991 3020
2992static int try_smi_init(struct smi_info *new_smi) 3021static int add_smi(struct smi_info *new_smi)
2993{ 3022{
2994 int rv; 3023 int rv = 0;
2995 int i;
2996
2997 if (new_smi->addr_source) {
2998 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2999 " machine at %s address 0x%lx, slave address 0x%x,"
3000 " irq %d\n",
3001 new_smi->addr_source,
3002 si_to_str[new_smi->si_type],
3003 addr_space_to_str[new_smi->io.addr_type],
3004 new_smi->io.addr_data,
3005 new_smi->slave_addr, new_smi->irq);
3006 }
3007 3024
3025 printk(KERN_INFO PFX "Adding %s-specified %s state machine",
3026 ipmi_addr_src_to_str[new_smi->addr_source],
3027 si_to_str[new_smi->si_type]);
3008 mutex_lock(&smi_infos_lock); 3028 mutex_lock(&smi_infos_lock);
3009 if (!is_new_interface(new_smi)) { 3029 if (!is_new_interface(new_smi)) {
3010 printk(KERN_WARNING "ipmi_si: duplicate interface\n"); 3030 printk(KERN_CONT PFX "duplicate interface\n");
3011 rv = -EBUSY; 3031 rv = -EBUSY;
3012 goto out_err; 3032 goto out_err;
3013 } 3033 }
3014 3034
3035 printk(KERN_CONT "\n");
3036
3015 /* So we know not to free it unless we have allocated one. */ 3037 /* So we know not to free it unless we have allocated one. */
3016 new_smi->intf = NULL; 3038 new_smi->intf = NULL;
3017 new_smi->si_sm = NULL; 3039 new_smi->si_sm = NULL;
3018 new_smi->handlers = NULL; 3040 new_smi->handlers = NULL;
3019 3041
3042 list_add_tail(&new_smi->link, &smi_infos);
3043
3044out_err:
3045 mutex_unlock(&smi_infos_lock);
3046 return rv;
3047}
3048
3049static int try_smi_init(struct smi_info *new_smi)
3050{
3051 int rv = 0;
3052 int i;
3053
3054 printk(KERN_INFO PFX "Trying %s-specified %s state"
3055 " machine at %s address 0x%lx, slave address 0x%x,"
3056 " irq %d\n",
3057 ipmi_addr_src_to_str[new_smi->addr_source],
3058 si_to_str[new_smi->si_type],
3059 addr_space_to_str[new_smi->io.addr_type],
3060 new_smi->io.addr_data,
3061 new_smi->slave_addr, new_smi->irq);
3062
3020 switch (new_smi->si_type) { 3063 switch (new_smi->si_type) {
3021 case SI_KCS: 3064 case SI_KCS:
3022 new_smi->handlers = &kcs_smi_handlers; 3065 new_smi->handlers = &kcs_smi_handlers;
@@ -3039,7 +3082,8 @@ static int try_smi_init(struct smi_info *new_smi)
3039 /* Allocate the state machine's data and initialize it. */ 3082 /* Allocate the state machine's data and initialize it. */
3040 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); 3083 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
3041 if (!new_smi->si_sm) { 3084 if (!new_smi->si_sm) {
3042 printk(KERN_ERR "Could not allocate state machine memory\n"); 3085 printk(KERN_ERR PFX
3086 "Could not allocate state machine memory\n");
3043 rv = -ENOMEM; 3087 rv = -ENOMEM;
3044 goto out_err; 3088 goto out_err;
3045 } 3089 }
@@ -3049,7 +3093,7 @@ static int try_smi_init(struct smi_info *new_smi)
3049 /* Now that we know the I/O size, we can set up the I/O. */ 3093 /* Now that we know the I/O size, we can set up the I/O. */
3050 rv = new_smi->io_setup(new_smi); 3094 rv = new_smi->io_setup(new_smi);
3051 if (rv) { 3095 if (rv) {
3052 printk(KERN_ERR "Could not set up I/O space\n"); 3096 printk(KERN_ERR PFX "Could not set up I/O space\n");
3053 goto out_err; 3097 goto out_err;
3054 } 3098 }
3055 3099
@@ -3059,8 +3103,7 @@ static int try_smi_init(struct smi_info *new_smi)
3059 /* Do low-level detection first. */ 3103 /* Do low-level detection first. */
3060 if (new_smi->handlers->detect(new_smi->si_sm)) { 3104 if (new_smi->handlers->detect(new_smi->si_sm)) {
3061 if (new_smi->addr_source) 3105 if (new_smi->addr_source)
3062 printk(KERN_INFO "ipmi_si: Interface detection" 3106 printk(KERN_INFO PFX "Interface detection failed\n");
3063 " failed\n");
3064 rv = -ENODEV; 3107 rv = -ENODEV;
3065 goto out_err; 3108 goto out_err;
3066 } 3109 }
@@ -3072,7 +3115,7 @@ static int try_smi_init(struct smi_info *new_smi)
3072 rv = try_get_dev_id(new_smi); 3115 rv = try_get_dev_id(new_smi);
3073 if (rv) { 3116 if (rv) {
3074 if (new_smi->addr_source) 3117 if (new_smi->addr_source)
3075 printk(KERN_INFO "ipmi_si: There appears to be no BMC" 3118 printk(KERN_INFO PFX "There appears to be no BMC"
3076 " at this location\n"); 3119 " at this location\n");
3077 goto out_err; 3120 goto out_err;
3078 } 3121 }
@@ -3088,7 +3131,7 @@ static int try_smi_init(struct smi_info *new_smi)
3088 for (i = 0; i < SI_NUM_STATS; i++) 3131 for (i = 0; i < SI_NUM_STATS; i++)
3089 atomic_set(&new_smi->stats[i], 0); 3132 atomic_set(&new_smi->stats[i], 0);
3090 3133
3091 new_smi->interrupt_disabled = 0; 3134 new_smi->interrupt_disabled = 1;
3092 atomic_set(&new_smi->stop_operation, 0); 3135 atomic_set(&new_smi->stop_operation, 0);
3093 new_smi->intf_num = smi_num; 3136 new_smi->intf_num = smi_num;
3094 smi_num++; 3137 smi_num++;
@@ -3114,9 +3157,8 @@ static int try_smi_init(struct smi_info *new_smi)
3114 new_smi->pdev = platform_device_alloc("ipmi_si", 3157 new_smi->pdev = platform_device_alloc("ipmi_si",
3115 new_smi->intf_num); 3158 new_smi->intf_num);
3116 if (!new_smi->pdev) { 3159 if (!new_smi->pdev) {
3117 printk(KERN_ERR 3160 printk(KERN_ERR PFX
3118 "ipmi_si_intf:" 3161 "Unable to allocate platform device\n");
3119 " Unable to allocate platform device\n");
3120 goto out_err; 3162 goto out_err;
3121 } 3163 }
3122 new_smi->dev = &new_smi->pdev->dev; 3164 new_smi->dev = &new_smi->pdev->dev;
@@ -3124,9 +3166,8 @@ static int try_smi_init(struct smi_info *new_smi)
3124 3166
3125 rv = platform_device_add(new_smi->pdev); 3167 rv = platform_device_add(new_smi->pdev);
3126 if (rv) { 3168 if (rv) {
3127 printk(KERN_ERR 3169 printk(KERN_ERR PFX
3128 "ipmi_si_intf:" 3170 "Unable to register system interface device:"
3129 " Unable to register system interface device:"
3130 " %d\n", 3171 " %d\n",
3131 rv); 3172 rv);
3132 goto out_err; 3173 goto out_err;
@@ -3141,9 +3182,8 @@ static int try_smi_init(struct smi_info *new_smi)
3141 "bmc", 3182 "bmc",
3142 new_smi->slave_addr); 3183 new_smi->slave_addr);
3143 if (rv) { 3184 if (rv) {
3144 printk(KERN_ERR 3185 dev_err(new_smi->dev, "Unable to register device: error %d\n",
3145 "ipmi_si: Unable to register device: error %d\n", 3186 rv);
3146 rv);
3147 goto out_err_stop_timer; 3187 goto out_err_stop_timer;
3148 } 3188 }
3149 3189
@@ -3151,9 +3191,7 @@ static int try_smi_init(struct smi_info *new_smi)
3151 type_file_read_proc, 3191 type_file_read_proc,
3152 new_smi); 3192 new_smi);
3153 if (rv) { 3193 if (rv) {
3154 printk(KERN_ERR 3194 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
3155 "ipmi_si: Unable to create proc entry: %d\n",
3156 rv);
3157 goto out_err_stop_timer; 3195 goto out_err_stop_timer;
3158 } 3196 }
3159 3197
@@ -3161,9 +3199,7 @@ static int try_smi_init(struct smi_info *new_smi)
3161 stat_file_read_proc, 3199 stat_file_read_proc,
3162 new_smi); 3200 new_smi);
3163 if (rv) { 3201 if (rv) {
3164 printk(KERN_ERR 3202 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
3165 "ipmi_si: Unable to create proc entry: %d\n",
3166 rv);
3167 goto out_err_stop_timer; 3203 goto out_err_stop_timer;
3168 } 3204 }
3169 3205
@@ -3171,18 +3207,12 @@ static int try_smi_init(struct smi_info *new_smi)
3171 param_read_proc, 3207 param_read_proc,
3172 new_smi); 3208 new_smi);
3173 if (rv) { 3209 if (rv) {
3174 printk(KERN_ERR 3210 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
3175 "ipmi_si: Unable to create proc entry: %d\n",
3176 rv);
3177 goto out_err_stop_timer; 3211 goto out_err_stop_timer;
3178 } 3212 }
3179 3213
3180 list_add_tail(&new_smi->link, &smi_infos); 3214 dev_info(new_smi->dev, "IPMI %s interface initialized\n",
3181 3215 si_to_str[new_smi->si_type]);
3182 mutex_unlock(&smi_infos_lock);
3183
3184 printk(KERN_INFO "IPMI %s interface initialized\n",
3185 si_to_str[new_smi->si_type]);
3186 3216
3187 return 0; 3217 return 0;
3188 3218
@@ -3191,11 +3221,17 @@ static int try_smi_init(struct smi_info *new_smi)
3191 wait_for_timer_and_thread(new_smi); 3221 wait_for_timer_and_thread(new_smi);
3192 3222
3193 out_err: 3223 out_err:
3194 if (new_smi->intf) 3224 new_smi->interrupt_disabled = 1;
3225
3226 if (new_smi->intf) {
3195 ipmi_unregister_smi(new_smi->intf); 3227 ipmi_unregister_smi(new_smi->intf);
3228 new_smi->intf = NULL;
3229 }
3196 3230
3197 if (new_smi->irq_cleanup) 3231 if (new_smi->irq_cleanup) {
3198 new_smi->irq_cleanup(new_smi); 3232 new_smi->irq_cleanup(new_smi);
3233 new_smi->irq_cleanup = NULL;
3234 }
3199 3235
3200 /* 3236 /*
3201 * Wait until we know that we are out of any interrupt 3237 * Wait until we know that we are out of any interrupt
@@ -3208,18 +3244,21 @@ static int try_smi_init(struct smi_info *new_smi)
3208 if (new_smi->handlers) 3244 if (new_smi->handlers)
3209 new_smi->handlers->cleanup(new_smi->si_sm); 3245 new_smi->handlers->cleanup(new_smi->si_sm);
3210 kfree(new_smi->si_sm); 3246 kfree(new_smi->si_sm);
3247 new_smi->si_sm = NULL;
3211 } 3248 }
3212 if (new_smi->addr_source_cleanup) 3249 if (new_smi->addr_source_cleanup) {
3213 new_smi->addr_source_cleanup(new_smi); 3250 new_smi->addr_source_cleanup(new_smi);
3214 if (new_smi->io_cleanup) 3251 new_smi->addr_source_cleanup = NULL;
3252 }
3253 if (new_smi->io_cleanup) {
3215 new_smi->io_cleanup(new_smi); 3254 new_smi->io_cleanup(new_smi);
3255 new_smi->io_cleanup = NULL;
3256 }
3216 3257
3217 if (new_smi->dev_registered) 3258 if (new_smi->dev_registered) {
3218 platform_device_unregister(new_smi->pdev); 3259 platform_device_unregister(new_smi->pdev);
3219 3260 new_smi->dev_registered = 0;
3220 kfree(new_smi); 3261 }
3221
3222 mutex_unlock(&smi_infos_lock);
3223 3262
3224 return rv; 3263 return rv;
3225} 3264}
@@ -3229,6 +3268,8 @@ static __devinit int init_ipmi_si(void)
3229 int i; 3268 int i;
3230 char *str; 3269 char *str;
3231 int rv; 3270 int rv;
3271 struct smi_info *e;
3272 enum ipmi_addr_src type = SI_INVALID;
3232 3273
3233 if (initialized) 3274 if (initialized)
3234 return 0; 3275 return 0;
@@ -3237,9 +3278,7 @@ static __devinit int init_ipmi_si(void)
3237 /* Register the device drivers. */ 3278 /* Register the device drivers. */
3238 rv = driver_register(&ipmi_driver.driver); 3279 rv = driver_register(&ipmi_driver.driver);
3239 if (rv) { 3280 if (rv) {
3240 printk(KERN_ERR 3281 printk(KERN_ERR PFX "Unable to register driver: %d\n", rv);
3241 "init_ipmi_si: Unable to register driver: %d\n",
3242 rv);
3243 return rv; 3282 return rv;
3244 } 3283 }
3245 3284
@@ -3263,38 +3302,81 @@ static __devinit int init_ipmi_si(void)
3263 3302
3264 hardcode_find_bmc(); 3303 hardcode_find_bmc();
3265 3304
3266#ifdef CONFIG_DMI 3305 /* If the user gave us a device, they presumably want us to use it */
3267 dmi_find_bmc(); 3306 mutex_lock(&smi_infos_lock);
3268#endif 3307 if (!list_empty(&smi_infos)) {
3308 mutex_unlock(&smi_infos_lock);
3309 return 0;
3310 }
3311 mutex_unlock(&smi_infos_lock);
3269 3312
3270#ifdef CONFIG_ACPI 3313#ifdef CONFIG_PCI
3271 spmi_find_bmc(); 3314 rv = pci_register_driver(&ipmi_pci_driver);
3315 if (rv)
3316 printk(KERN_ERR PFX "Unable to register PCI driver: %d\n", rv);
3272#endif 3317#endif
3318
3273#ifdef CONFIG_ACPI 3319#ifdef CONFIG_ACPI
3274 pnp_register_driver(&ipmi_pnp_driver); 3320 pnp_register_driver(&ipmi_pnp_driver);
3275#endif 3321#endif
3276 3322
3277#ifdef CONFIG_PCI 3323#ifdef CONFIG_DMI
3278 rv = pci_register_driver(&ipmi_pci_driver); 3324 dmi_find_bmc();
3279 if (rv) 3325#endif
3280 printk(KERN_ERR 3326
3281 "init_ipmi_si: Unable to register PCI driver: %d\n", 3327#ifdef CONFIG_ACPI
3282 rv); 3328 spmi_find_bmc();
3283#endif 3329#endif
3284 3330
3285#ifdef CONFIG_PPC_OF 3331#ifdef CONFIG_PPC_OF
3286 of_register_platform_driver(&ipmi_of_platform_driver); 3332 of_register_platform_driver(&ipmi_of_platform_driver);
3287#endif 3333#endif
3288 3334
3335 /* We prefer devices with interrupts, but in the case of a machine
3336 with multiple BMCs we assume that there will be several instances
3337 of a given type so if we succeed in registering a type then also
3338 try to register everything else of the same type */
3339
3340 mutex_lock(&smi_infos_lock);
3341 list_for_each_entry(e, &smi_infos, link) {
3342 /* Try to register a device if it has an IRQ and we either
3343 haven't successfully registered a device yet or this
3344 device has the same type as one we successfully registered */
3345 if (e->irq && (!type || e->addr_source == type)) {
3346 if (!try_smi_init(e)) {
3347 type = e->addr_source;
3348 }
3349 }
3350 }
3351
3352 /* type will only have been set if we successfully registered an si */
3353 if (type) {
3354 mutex_unlock(&smi_infos_lock);
3355 return 0;
3356 }
3357
3358 /* Fall back to the preferred device */
3359
3360 list_for_each_entry(e, &smi_infos, link) {
3361 if (!e->irq && (!type || e->addr_source == type)) {
3362 if (!try_smi_init(e)) {
3363 type = e->addr_source;
3364 }
3365 }
3366 }
3367 mutex_unlock(&smi_infos_lock);
3368
3369 if (type)
3370 return 0;
3371
3289 if (si_trydefaults) { 3372 if (si_trydefaults) {
3290 mutex_lock(&smi_infos_lock); 3373 mutex_lock(&smi_infos_lock);
3291 if (list_empty(&smi_infos)) { 3374 if (list_empty(&smi_infos)) {
3292 /* No BMC was found, try defaults. */ 3375 /* No BMC was found, try defaults. */
3293 mutex_unlock(&smi_infos_lock); 3376 mutex_unlock(&smi_infos_lock);
3294 default_find_bmc(); 3377 default_find_bmc();
3295 } else { 3378 } else
3296 mutex_unlock(&smi_infos_lock); 3379 mutex_unlock(&smi_infos_lock);
3297 }
3298 } 3380 }
3299 3381
3300 mutex_lock(&smi_infos_lock); 3382 mutex_lock(&smi_infos_lock);
@@ -3308,8 +3390,8 @@ static __devinit int init_ipmi_si(void)
3308 of_unregister_platform_driver(&ipmi_of_platform_driver); 3390 of_unregister_platform_driver(&ipmi_of_platform_driver);
3309#endif 3391#endif
3310 driver_unregister(&ipmi_driver.driver); 3392 driver_unregister(&ipmi_driver.driver);
3311 printk(KERN_WARNING 3393 printk(KERN_WARNING PFX
3312 "ipmi_si: Unable to find any System Interface(s)\n"); 3394 "Unable to find any System Interface(s)\n");
3313 return -ENODEV; 3395 return -ENODEV;
3314 } else { 3396 } else {
3315 mutex_unlock(&smi_infos_lock); 3397 mutex_unlock(&smi_infos_lock);
@@ -3320,7 +3402,7 @@ module_init(init_ipmi_si);
3320 3402
3321static void cleanup_one_si(struct smi_info *to_clean) 3403static void cleanup_one_si(struct smi_info *to_clean)
3322{ 3404{
3323 int rv; 3405 int rv = 0;
3324 unsigned long flags; 3406 unsigned long flags;
3325 3407
3326 if (!to_clean) 3408 if (!to_clean)
@@ -3364,14 +3446,16 @@ static void cleanup_one_si(struct smi_info *to_clean)
3364 schedule_timeout_uninterruptible(1); 3446 schedule_timeout_uninterruptible(1);
3365 } 3447 }
3366 3448
3367 rv = ipmi_unregister_smi(to_clean->intf); 3449 if (to_clean->intf)
3450 rv = ipmi_unregister_smi(to_clean->intf);
3451
3368 if (rv) { 3452 if (rv) {
3369 printk(KERN_ERR 3453 printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n",
3370 "ipmi_si: Unable to unregister device: errno=%d\n",
3371 rv); 3454 rv);
3372 } 3455 }
3373 3456
3374 to_clean->handlers->cleanup(to_clean->si_sm); 3457 if (to_clean->handlers)
3458 to_clean->handlers->cleanup(to_clean->si_sm);
3375 3459
3376 kfree(to_clean->si_sm); 3460 kfree(to_clean->si_sm);
3377 3461
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index fdd37543aa79..02abfddce45a 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -287,12 +287,10 @@ static int register_device (int minor, struct pp_struct *pp)
287 char *name; 287 char *name;
288 int fl; 288 int fl;
289 289
290 name = kmalloc (strlen (CHRDEV) + 3, GFP_KERNEL); 290 name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
291 if (name == NULL) 291 if (name == NULL)
292 return -ENOMEM; 292 return -ENOMEM;
293 293
294 sprintf (name, CHRDEV "%x", minor);
295
296 port = parport_find_number (minor); 294 port = parport_find_number (minor);
297 if (!port) { 295 if (!port) {
298 printk (KERN_WARNING "%s: no associated port!\n", name); 296 printk (KERN_WARNING "%s: no associated port!\n", name);
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
new file mode 100644
index 000000000000..74f00b5ffa36
--- /dev/null
+++ b/drivers/char/ramoops.c
@@ -0,0 +1,162 @@
1/*
2 * RAM Oops/Panic logger
3 *
4 * Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/kmsg_dump.h>
25#include <linux/time.h>
26#include <linux/io.h>
27#include <linux/ioport.h>
28
29#define RAMOOPS_KERNMSG_HDR "===="
30#define RAMOOPS_HEADER_SIZE (5 + sizeof(struct timeval))
31
32#define RECORD_SIZE 4096
33
34static ulong mem_address;
35module_param(mem_address, ulong, 0400);
36MODULE_PARM_DESC(mem_address,
37 "start of reserved RAM used to store oops/panic logs");
38
39static ulong mem_size;
40module_param(mem_size, ulong, 0400);
41MODULE_PARM_DESC(mem_size,
42 "size of reserved RAM used to store oops/panic logs");
43
44static int dump_oops = 1;
45module_param(dump_oops, int, 0600);
46MODULE_PARM_DESC(dump_oops,
47 "set to 1 to dump oopses, 0 to only dump panics (default 1)");
48
49static struct ramoops_context {
50 struct kmsg_dumper dump;
51 void *virt_addr;
52 phys_addr_t phys_addr;
53 unsigned long size;
54 int count;
55 int max_count;
56} oops_cxt;
57
58static void ramoops_do_dump(struct kmsg_dumper *dumper,
59 enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
60 const char *s2, unsigned long l2)
61{
62 struct ramoops_context *cxt = container_of(dumper,
63 struct ramoops_context, dump);
64 unsigned long s1_start, s2_start;
65 unsigned long l1_cpy, l2_cpy;
66 int res;
67 char *buf;
68 struct timeval timestamp;
69
70 /* Only dump oopses if dump_oops is set */
71 if (reason == KMSG_DUMP_OOPS && !dump_oops)
72 return;
73
74 buf = (char *)(cxt->virt_addr + (cxt->count * RECORD_SIZE));
75 memset(buf, '\0', RECORD_SIZE);
76 res = sprintf(buf, "%s", RAMOOPS_KERNMSG_HDR);
77 buf += res;
78 do_gettimeofday(&timestamp);
79 res = sprintf(buf, "%lu.%lu\n", (long)timestamp.tv_sec, (long)timestamp.tv_usec);
80 buf += res;
81
82 l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE));
83 l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE) - l2_cpy);
84
85 s2_start = l2 - l2_cpy;
86 s1_start = l1 - l1_cpy;
87
88 memcpy(buf, s1 + s1_start, l1_cpy);
89 memcpy(buf + l1_cpy, s2 + s2_start, l2_cpy);
90
91 cxt->count = (cxt->count + 1) % cxt->max_count;
92}
93
94static int __init ramoops_init(void)
95{
96 struct ramoops_context *cxt = &oops_cxt;
97 int err = -EINVAL;
98
99 if (!mem_size) {
100 printk(KERN_ERR "ramoops: invalid size specification");
101 goto fail3;
102 }
103
104 rounddown_pow_of_two(mem_size);
105
106 if (mem_size < RECORD_SIZE) {
107 printk(KERN_ERR "ramoops: size too small");
108 goto fail3;
109 }
110
111 cxt->max_count = mem_size / RECORD_SIZE;
112 cxt->count = 0;
113 cxt->size = mem_size;
114 cxt->phys_addr = mem_address;
115
116 if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) {
117 printk(KERN_ERR "ramoops: request mem region failed");
118 err = -EINVAL;
119 goto fail3;
120 }
121
122 cxt->virt_addr = ioremap(cxt->phys_addr, cxt->size);
123 if (!cxt->virt_addr) {
124 printk(KERN_ERR "ramoops: ioremap failed");
125 goto fail2;
126 }
127
128 cxt->dump.dump = ramoops_do_dump;
129 err = kmsg_dump_register(&cxt->dump);
130 if (err) {
131 printk(KERN_ERR "ramoops: registering kmsg dumper failed");
132 goto fail1;
133 }
134
135 return 0;
136
137fail1:
138 iounmap(cxt->virt_addr);
139fail2:
140 release_mem_region(cxt->phys_addr, cxt->size);
141fail3:
142 return err;
143}
144
145static void __exit ramoops_exit(void)
146{
147 struct ramoops_context *cxt = &oops_cxt;
148
149 if (kmsg_dump_unregister(&cxt->dump) < 0)
150 printk(KERN_WARNING "ramoops: could not unregister kmsg_dumper");
151
152 iounmap(cxt->virt_addr);
153 release_mem_region(cxt->phys_addr, cxt->size);
154}
155
156
157module_init(ramoops_init);
158module_exit(ramoops_exit);
159
160MODULE_LICENSE("GPL");
161MODULE_AUTHOR("Marco Stornelli <marco.stornelli@gmail.com>");
162MODULE_DESCRIPTION("RAM Oops/Panic logger/driver");
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index bd1d1164fec5..7cdb6ee569cd 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -3967,13 +3967,9 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op)
3967 font.charcount = op->charcount; 3967 font.charcount = op->charcount;
3968 font.height = op->height; 3968 font.height = op->height;
3969 font.width = op->width; 3969 font.width = op->width;
3970 font.data = kmalloc(size, GFP_KERNEL); 3970 font.data = memdup_user(op->data, size);
3971 if (!font.data) 3971 if (IS_ERR(font.data))
3972 return -ENOMEM; 3972 return PTR_ERR(font.data);
3973 if (copy_from_user(font.data, op->data, size)) {
3974 kfree(font.data);
3975 return -EFAULT;
3976 }
3977 acquire_console_sem(); 3973 acquire_console_sem();
3978 if (vc->vc_sw->con_font_set) 3974 if (vc->vc_sw->con_font_set)
3979 rc = vc->vc_sw->con_font_set(vc, &font, op->flags); 3975 rc = vc->vc_sw->con_font_set(vc, &font, op->flags);
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index adc10a2ac5f6..996c1bdb5a34 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -774,7 +774,7 @@ static void i5000_clear_error(struct mem_ctl_info *mci)
774static void i5000_check_error(struct mem_ctl_info *mci) 774static void i5000_check_error(struct mem_ctl_info *mci)
775{ 775{
776 struct i5000_error_info info; 776 struct i5000_error_info info;
777 debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); 777 debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
778 i5000_get_error_info(mci, &info); 778 i5000_get_error_info(mci, &info);
779 i5000_process_error_info(mci, &info, 1); 779 i5000_process_error_info(mci, &info, 1);
780} 780}
@@ -1353,8 +1353,8 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1353 int num_dimms_per_channel; 1353 int num_dimms_per_channel;
1354 int num_csrows; 1354 int num_csrows;
1355 1355
1356 debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n", 1356 debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
1357 __func__, 1357 __FILE__, __func__,
1358 pdev->bus->number, 1358 pdev->bus->number,
1359 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 1359 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1360 1360
@@ -1389,7 +1389,7 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1389 return -ENOMEM; 1389 return -ENOMEM;
1390 1390
1391 kobject_get(&mci->edac_mci_kobj); 1391 kobject_get(&mci->edac_mci_kobj);
1392 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); 1392 debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
1393 1393
1394 mci->dev = &pdev->dev; /* record ptr to the generic device */ 1394 mci->dev = &pdev->dev; /* record ptr to the generic device */
1395 1395
@@ -1432,8 +1432,8 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1432 1432
1433 /* add this new MC control structure to EDAC's list of MCs */ 1433 /* add this new MC control structure to EDAC's list of MCs */
1434 if (edac_mc_add_mc(mci)) { 1434 if (edac_mc_add_mc(mci)) {
1435 debugf0("MC: " __FILE__ 1435 debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n",
1436 ": %s(): failed edac_mc_add_mc()\n", __func__); 1436 __FILE__, __func__);
1437 /* FIXME: perhaps some code should go here that disables error 1437 /* FIXME: perhaps some code should go here that disables error
1438 * reporting if we just enabled it 1438 * reporting if we just enabled it
1439 */ 1439 */
@@ -1478,7 +1478,7 @@ static int __devinit i5000_init_one(struct pci_dev *pdev,
1478{ 1478{
1479 int rc; 1479 int rc;
1480 1480
1481 debugf0("MC: " __FILE__ ": %s()\n", __func__); 1481 debugf0("MC: %s: %s()\n", __FILE__, __func__);
1482 1482
1483 /* wake up device */ 1483 /* wake up device */
1484 rc = pci_enable_device(pdev); 1484 rc = pci_enable_device(pdev);
@@ -1497,7 +1497,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
1497{ 1497{
1498 struct mem_ctl_info *mci; 1498 struct mem_ctl_info *mci;
1499 1499
1500 debugf0(__FILE__ ": %s()\n", __func__); 1500 debugf0("%s: %s()\n", __FILE__, __func__);
1501 1501
1502 if (i5000_pci) 1502 if (i5000_pci)
1503 edac_pci_release_generic_ctl(i5000_pci); 1503 edac_pci_release_generic_ctl(i5000_pci);
@@ -1544,7 +1544,7 @@ static int __init i5000_init(void)
1544{ 1544{
1545 int pci_rc; 1545 int pci_rc;
1546 1546
1547 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1547 debugf2("MC: %s: %s()\n", __FILE__, __func__);
1548 1548
1549 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1549 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1550 opstate_init(); 1550 opstate_init();
@@ -1560,7 +1560,7 @@ static int __init i5000_init(void)
1560 */ 1560 */
1561static void __exit i5000_exit(void) 1561static void __exit i5000_exit(void)
1562{ 1562{
1563 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1563 debugf2("MC: %s: %s()\n", __FILE__, __func__);
1564 pci_unregister_driver(&i5000_driver); 1564 pci_unregister_driver(&i5000_driver);
1565} 1565}
1566 1566
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index f99d10655ed4..010c1d6526f5 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -694,7 +694,7 @@ static void i5400_clear_error(struct mem_ctl_info *mci)
694static void i5400_check_error(struct mem_ctl_info *mci) 694static void i5400_check_error(struct mem_ctl_info *mci)
695{ 695{
696 struct i5400_error_info info; 696 struct i5400_error_info info;
697 debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); 697 debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
698 i5400_get_error_info(mci, &info); 698 i5400_get_error_info(mci, &info);
699 i5400_process_error_info(mci, &info); 699 i5400_process_error_info(mci, &info);
700} 700}
@@ -1227,8 +1227,8 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1227 if (dev_idx >= ARRAY_SIZE(i5400_devs)) 1227 if (dev_idx >= ARRAY_SIZE(i5400_devs))
1228 return -EINVAL; 1228 return -EINVAL;
1229 1229
1230 debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n", 1230 debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
1231 __func__, 1231 __FILE__, __func__,
1232 pdev->bus->number, 1232 pdev->bus->number,
1233 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 1233 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1234 1234
@@ -1256,7 +1256,7 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1256 if (mci == NULL) 1256 if (mci == NULL)
1257 return -ENOMEM; 1257 return -ENOMEM;
1258 1258
1259 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); 1259 debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
1260 1260
1261 mci->dev = &pdev->dev; /* record ptr to the generic device */ 1261 mci->dev = &pdev->dev; /* record ptr to the generic device */
1262 1262
@@ -1299,8 +1299,8 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1299 1299
1300 /* add this new MC control structure to EDAC's list of MCs */ 1300 /* add this new MC control structure to EDAC's list of MCs */
1301 if (edac_mc_add_mc(mci)) { 1301 if (edac_mc_add_mc(mci)) {
1302 debugf0("MC: " __FILE__ 1302 debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n",
1303 ": %s(): failed edac_mc_add_mc()\n", __func__); 1303 __FILE__, __func__);
1304 /* FIXME: perhaps some code should go here that disables error 1304 /* FIXME: perhaps some code should go here that disables error
1305 * reporting if we just enabled it 1305 * reporting if we just enabled it
1306 */ 1306 */
@@ -1344,7 +1344,7 @@ static int __devinit i5400_init_one(struct pci_dev *pdev,
1344{ 1344{
1345 int rc; 1345 int rc;
1346 1346
1347 debugf0("MC: " __FILE__ ": %s()\n", __func__); 1347 debugf0("MC: %s: %s()\n", __FILE__, __func__);
1348 1348
1349 /* wake up device */ 1349 /* wake up device */
1350 rc = pci_enable_device(pdev); 1350 rc = pci_enable_device(pdev);
@@ -1363,7 +1363,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
1363{ 1363{
1364 struct mem_ctl_info *mci; 1364 struct mem_ctl_info *mci;
1365 1365
1366 debugf0(__FILE__ ": %s()\n", __func__); 1366 debugf0("%s: %s()\n", __FILE__, __func__);
1367 1367
1368 if (i5400_pci) 1368 if (i5400_pci)
1369 edac_pci_release_generic_ctl(i5400_pci); 1369 edac_pci_release_generic_ctl(i5400_pci);
@@ -1409,7 +1409,7 @@ static int __init i5400_init(void)
1409{ 1409{
1410 int pci_rc; 1410 int pci_rc;
1411 1411
1412 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1412 debugf2("MC: %s: %s()\n", __FILE__, __func__);
1413 1413
1414 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1414 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1415 opstate_init(); 1415 opstate_init();
@@ -1425,7 +1425,7 @@ static int __init i5400_init(void)
1425 */ 1425 */
1426static void __exit i5400_exit(void) 1426static void __exit i5400_exit(void)
1427{ 1427{
1428 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1428 debugf2("MC: %s: %s()\n", __FILE__, __func__);
1429 pci_unregister_driver(&i5400_driver); 1429 pci_unregister_driver(&i5400_driver);
1430} 1430}
1431 1431
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 2bf2c5051bfe..a2fa1feed724 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -178,7 +178,7 @@ static void i82443bxgx_edacmc_check(struct mem_ctl_info *mci)
178{ 178{
179 struct i82443bxgx_edacmc_error_info info; 179 struct i82443bxgx_edacmc_error_info info;
180 180
181 debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); 181 debugf1("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
182 i82443bxgx_edacmc_get_error_info(mci, &info); 182 i82443bxgx_edacmc_get_error_info(mci, &info);
183 i82443bxgx_edacmc_process_error_info(mci, &info, 1); 183 i82443bxgx_edacmc_process_error_info(mci, &info, 1);
184} 184}
@@ -198,13 +198,13 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
198 for (index = 0; index < mci->nr_csrows; index++) { 198 for (index = 0; index < mci->nr_csrows; index++) {
199 csrow = &mci->csrows[index]; 199 csrow = &mci->csrows[index];
200 pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar); 200 pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
201 debugf1("MC%d: " __FILE__ ": %s() Row=%d DRB = %#0x\n", 201 debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n",
202 mci->mc_idx, __func__, index, drbar); 202 mci->mc_idx, __FILE__, __func__, index, drbar);
203 row_high_limit = ((u32) drbar << 23); 203 row_high_limit = ((u32) drbar << 23);
204 /* find the DRAM Chip Select Base address and mask */ 204 /* find the DRAM Chip Select Base address and mask */
205 debugf1("MC%d: " __FILE__ ": %s() Row=%d, " 205 debugf1("MC%d: %s: %s() Row=%d, "
206 "Boundry Address=%#0x, Last = %#0x \n", 206 "Boundry Address=%#0x, Last = %#0x\n",
207 mci->mc_idx, __func__, index, row_high_limit, 207 mci->mc_idx, __FILE__, __func__, index, row_high_limit,
208 row_high_limit_last); 208 row_high_limit_last);
209 209
210 /* 440GX goes to 2GB, represented with a DRB of 0. */ 210 /* 440GX goes to 2GB, represented with a DRB of 0. */
@@ -237,7 +237,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
237 enum mem_type mtype; 237 enum mem_type mtype;
238 enum edac_type edac_mode; 238 enum edac_type edac_mode;
239 239
240 debugf0("MC: " __FILE__ ": %s()\n", __func__); 240 debugf0("MC: %s: %s()\n", __FILE__, __func__);
241 241
242 /* Something is really hosed if PCI config space reads from 242 /* Something is really hosed if PCI config space reads from
243 * the MC aren't working. 243 * the MC aren't working.
@@ -250,7 +250,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
250 if (mci == NULL) 250 if (mci == NULL)
251 return -ENOMEM; 251 return -ENOMEM;
252 252
253 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); 253 debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
254 mci->dev = &pdev->dev; 254 mci->dev = &pdev->dev;
255 mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR; 255 mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR;
256 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 256 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
@@ -336,7 +336,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
336 __func__); 336 __func__);
337 } 337 }
338 338
339 debugf3("MC: " __FILE__ ": %s(): success\n", __func__); 339 debugf3("MC: %s: %s(): success\n", __FILE__, __func__);
340 return 0; 340 return 0;
341 341
342fail: 342fail:
@@ -352,7 +352,7 @@ static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev,
352{ 352{
353 int rc; 353 int rc;
354 354
355 debugf0("MC: " __FILE__ ": %s()\n", __func__); 355 debugf0("MC: %s: %s()\n", __FILE__, __func__);
356 356
357 /* don't need to call pci_enable_device() */ 357 /* don't need to call pci_enable_device() */
358 rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data); 358 rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data);
@@ -367,7 +367,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
367{ 367{
368 struct mem_ctl_info *mci; 368 struct mem_ctl_info *mci;
369 369
370 debugf0(__FILE__ ": %s()\n", __func__); 370 debugf0("%s: %s()\n", __FILE__, __func__);
371 371
372 if (i82443bxgx_pci) 372 if (i82443bxgx_pci)
373 edac_pci_release_generic_ctl(i82443bxgx_pci); 373 edac_pci_release_generic_ctl(i82443bxgx_pci);
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 5045156c5313..9dcb30466ec0 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -30,7 +30,6 @@
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/spinlock.h> 32#include <linux/spinlock.h>
33#include <linux/timer.h>
34#include <linux/workqueue.h> 33#include <linux/workqueue.h>
35 34
36#include <asm/atomic.h> 35#include <asm/atomic.h>
@@ -63,7 +62,7 @@ static size_t config_rom_length = 1 + 4 + 1 + 1;
63#define BIB_CRC(v) ((v) << 0) 62#define BIB_CRC(v) ((v) << 0)
64#define BIB_CRC_LENGTH(v) ((v) << 16) 63#define BIB_CRC_LENGTH(v) ((v) << 16)
65#define BIB_INFO_LENGTH(v) ((v) << 24) 64#define BIB_INFO_LENGTH(v) ((v) << 24)
66 65#define BIB_BUS_NAME 0x31333934 /* "1394" */
67#define BIB_LINK_SPEED(v) ((v) << 0) 66#define BIB_LINK_SPEED(v) ((v) << 0)
68#define BIB_GENERATION(v) ((v) << 4) 67#define BIB_GENERATION(v) ((v) << 4)
69#define BIB_MAX_ROM(v) ((v) << 8) 68#define BIB_MAX_ROM(v) ((v) << 8)
@@ -73,7 +72,8 @@ static size_t config_rom_length = 1 + 4 + 1 + 1;
73#define BIB_BMC ((1) << 28) 72#define BIB_BMC ((1) << 28)
74#define BIB_ISC ((1) << 29) 73#define BIB_ISC ((1) << 29)
75#define BIB_CMC ((1) << 30) 74#define BIB_CMC ((1) << 30)
76#define BIB_IMC ((1) << 31) 75#define BIB_IRMC ((1) << 31)
76#define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
77 77
78static void generate_config_rom(struct fw_card *card, __be32 *config_rom) 78static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
79{ 79{
@@ -91,18 +91,18 @@ static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
91 91
92 config_rom[0] = cpu_to_be32( 92 config_rom[0] = cpu_to_be32(
93 BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0)); 93 BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0));
94 config_rom[1] = cpu_to_be32(0x31333934); 94 config_rom[1] = cpu_to_be32(BIB_BUS_NAME);
95 config_rom[2] = cpu_to_be32( 95 config_rom[2] = cpu_to_be32(
96 BIB_LINK_SPEED(card->link_speed) | 96 BIB_LINK_SPEED(card->link_speed) |
97 BIB_GENERATION(card->config_rom_generation++ % 14 + 2) | 97 BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
98 BIB_MAX_ROM(2) | 98 BIB_MAX_ROM(2) |
99 BIB_MAX_RECEIVE(card->max_receive) | 99 BIB_MAX_RECEIVE(card->max_receive) |
100 BIB_BMC | BIB_ISC | BIB_CMC | BIB_IMC); 100 BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC);
101 config_rom[3] = cpu_to_be32(card->guid >> 32); 101 config_rom[3] = cpu_to_be32(card->guid >> 32);
102 config_rom[4] = cpu_to_be32(card->guid); 102 config_rom[4] = cpu_to_be32(card->guid);
103 103
104 /* Generate root directory. */ 104 /* Generate root directory. */
105 config_rom[6] = cpu_to_be32(0x0c0083c0); /* node capabilities */ 105 config_rom[6] = cpu_to_be32(NODE_CAPABILITIES);
106 i = 7; 106 i = 7;
107 j = 7 + descriptor_count; 107 j = 7 + descriptor_count;
108 108
@@ -407,13 +407,6 @@ static void fw_card_bm_work(struct work_struct *work)
407 fw_card_put(card); 407 fw_card_put(card);
408} 408}
409 409
410static void flush_timer_callback(unsigned long data)
411{
412 struct fw_card *card = (struct fw_card *)data;
413
414 fw_flush_transactions(card);
415}
416
417void fw_card_initialize(struct fw_card *card, 410void fw_card_initialize(struct fw_card *card,
418 const struct fw_card_driver *driver, 411 const struct fw_card_driver *driver,
419 struct device *device) 412 struct device *device)
@@ -432,8 +425,6 @@ void fw_card_initialize(struct fw_card *card,
432 init_completion(&card->done); 425 init_completion(&card->done);
433 INIT_LIST_HEAD(&card->transaction_list); 426 INIT_LIST_HEAD(&card->transaction_list);
434 spin_lock_init(&card->lock); 427 spin_lock_init(&card->lock);
435 setup_timer(&card->flush_timer,
436 flush_timer_callback, (unsigned long)card);
437 428
438 card->local_node = NULL; 429 card->local_node = NULL;
439 430
@@ -558,7 +549,6 @@ void fw_core_remove_card(struct fw_card *card)
558 wait_for_completion(&card->done); 549 wait_for_completion(&card->done);
559 550
560 WARN_ON(!list_empty(&card->transaction_list)); 551 WARN_ON(!list_empty(&card->transaction_list));
561 del_timer_sync(&card->flush_timer);
562} 552}
563EXPORT_SYMBOL(fw_core_remove_card); 553EXPORT_SYMBOL(fw_core_remove_card);
564 554
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 14a34d99eea2..5bf106b9d791 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -227,7 +227,7 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
227 list_add_tail(&client->link, &device->client_list); 227 list_add_tail(&client->link, &device->client_list);
228 mutex_unlock(&device->client_list_mutex); 228 mutex_unlock(&device->client_list_mutex);
229 229
230 return 0; 230 return nonseekable_open(inode, file);
231} 231}
232 232
233static void queue_event(struct client *client, struct event *event, 233static void queue_event(struct client *client, struct event *event,
@@ -1496,13 +1496,13 @@ static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
1496 1496
1497const struct file_operations fw_device_ops = { 1497const struct file_operations fw_device_ops = {
1498 .owner = THIS_MODULE, 1498 .owner = THIS_MODULE,
1499 .llseek = no_llseek,
1499 .open = fw_device_op_open, 1500 .open = fw_device_op_open,
1500 .read = fw_device_op_read, 1501 .read = fw_device_op_read,
1501 .unlocked_ioctl = fw_device_op_ioctl, 1502 .unlocked_ioctl = fw_device_op_ioctl,
1502 .poll = fw_device_op_poll,
1503 .release = fw_device_op_release,
1504 .mmap = fw_device_op_mmap, 1503 .mmap = fw_device_op_mmap,
1505 1504 .release = fw_device_op_release,
1505 .poll = fw_device_op_poll,
1506#ifdef CONFIG_COMPAT 1506#ifdef CONFIG_COMPAT
1507 .compat_ioctl = fw_device_op_compat_ioctl, 1507 .compat_ioctl = fw_device_op_compat_ioctl,
1508#endif 1508#endif
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 673b03f8b4ec..fdc33ff06dc1 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -81,7 +81,7 @@ static int close_transaction(struct fw_transaction *transaction,
81 spin_lock_irqsave(&card->lock, flags); 81 spin_lock_irqsave(&card->lock, flags);
82 list_for_each_entry(t, &card->transaction_list, link) { 82 list_for_each_entry(t, &card->transaction_list, link) {
83 if (t == transaction) { 83 if (t == transaction) {
84 list_del(&t->link); 84 list_del_init(&t->link);
85 card->tlabel_mask &= ~(1ULL << t->tlabel); 85 card->tlabel_mask &= ~(1ULL << t->tlabel);
86 break; 86 break;
87 } 87 }
@@ -89,6 +89,7 @@ static int close_transaction(struct fw_transaction *transaction,
89 spin_unlock_irqrestore(&card->lock, flags); 89 spin_unlock_irqrestore(&card->lock, flags);
90 90
91 if (&t->link != &card->transaction_list) { 91 if (&t->link != &card->transaction_list) {
92 del_timer_sync(&t->split_timeout_timer);
92 t->callback(card, rcode, NULL, 0, t->callback_data); 93 t->callback(card, rcode, NULL, 0, t->callback_data);
93 return 0; 94 return 0;
94 } 95 }
@@ -121,6 +122,31 @@ int fw_cancel_transaction(struct fw_card *card,
121} 122}
122EXPORT_SYMBOL(fw_cancel_transaction); 123EXPORT_SYMBOL(fw_cancel_transaction);
123 124
125static void split_transaction_timeout_callback(unsigned long data)
126{
127 struct fw_transaction *t = (struct fw_transaction *)data;
128 struct fw_card *card = t->card;
129 unsigned long flags;
130
131 spin_lock_irqsave(&card->lock, flags);
132 if (list_empty(&t->link)) {
133 spin_unlock_irqrestore(&card->lock, flags);
134 return;
135 }
136 list_del(&t->link);
137 card->tlabel_mask &= ~(1ULL << t->tlabel);
138 spin_unlock_irqrestore(&card->lock, flags);
139
140 card->driver->cancel_packet(card, &t->packet);
141
142 /*
143 * At this point cancel_packet will never call the transaction
144 * callback, since we just took the transaction out of the list.
145 * So do it here.
146 */
147 t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
148}
149
124static void transmit_complete_callback(struct fw_packet *packet, 150static void transmit_complete_callback(struct fw_packet *packet,
125 struct fw_card *card, int status) 151 struct fw_card *card, int status)
126{ 152{
@@ -229,6 +255,23 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
229 packet->payload_mapped = false; 255 packet->payload_mapped = false;
230} 256}
231 257
258static int allocate_tlabel(struct fw_card *card)
259{
260 int tlabel;
261
262 tlabel = card->current_tlabel;
263 while (card->tlabel_mask & (1ULL << tlabel)) {
264 tlabel = (tlabel + 1) & 0x3f;
265 if (tlabel == card->current_tlabel)
266 return -EBUSY;
267 }
268
269 card->current_tlabel = (tlabel + 1) & 0x3f;
270 card->tlabel_mask |= 1ULL << tlabel;
271
272 return tlabel;
273}
274
232/** 275/**
233 * This function provides low-level access to the IEEE1394 transaction 276 * This function provides low-level access to the IEEE1394 transaction
234 * logic. Most C programs would use either fw_read(), fw_write() or 277 * logic. Most C programs would use either fw_read(), fw_write() or
@@ -277,31 +320,26 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
277 int tlabel; 320 int tlabel;
278 321
279 /* 322 /*
280 * Bump the flush timer up 100ms first of all so we
281 * don't race with a flush timer callback.
282 */
283
284 mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10));
285
286 /*
287 * Allocate tlabel from the bitmap and put the transaction on 323 * Allocate tlabel from the bitmap and put the transaction on
288 * the list while holding the card spinlock. 324 * the list while holding the card spinlock.
289 */ 325 */
290 326
291 spin_lock_irqsave(&card->lock, flags); 327 spin_lock_irqsave(&card->lock, flags);
292 328
293 tlabel = card->current_tlabel; 329 tlabel = allocate_tlabel(card);
294 if (card->tlabel_mask & (1ULL << tlabel)) { 330 if (tlabel < 0) {
295 spin_unlock_irqrestore(&card->lock, flags); 331 spin_unlock_irqrestore(&card->lock, flags);
296 callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data); 332 callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
297 return; 333 return;
298 } 334 }
299 335
300 card->current_tlabel = (card->current_tlabel + 1) & 0x3f;
301 card->tlabel_mask |= (1ULL << tlabel);
302
303 t->node_id = destination_id; 336 t->node_id = destination_id;
304 t->tlabel = tlabel; 337 t->tlabel = tlabel;
338 t->card = card;
339 setup_timer(&t->split_timeout_timer,
340 split_transaction_timeout_callback, (unsigned long)t);
341 /* FIXME: start this timer later, relative to t->timestamp */
342 mod_timer(&t->split_timeout_timer, jiffies + DIV_ROUND_UP(HZ, 10));
305 t->callback = callback; 343 t->callback = callback;
306 t->callback_data = callback_data; 344 t->callback_data = callback_data;
307 345
@@ -347,11 +385,13 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
347 struct transaction_callback_data d; 385 struct transaction_callback_data d;
348 struct fw_transaction t; 386 struct fw_transaction t;
349 387
388 init_timer_on_stack(&t.split_timeout_timer);
350 init_completion(&d.done); 389 init_completion(&d.done);
351 d.payload = payload; 390 d.payload = payload;
352 fw_send_request(card, &t, tcode, destination_id, generation, speed, 391 fw_send_request(card, &t, tcode, destination_id, generation, speed,
353 offset, payload, length, transaction_callback, &d); 392 offset, payload, length, transaction_callback, &d);
354 wait_for_completion(&d.done); 393 wait_for_completion(&d.done);
394 destroy_timer_on_stack(&t.split_timeout_timer);
355 395
356 return d.rcode; 396 return d.rcode;
357} 397}
@@ -394,30 +434,6 @@ void fw_send_phy_config(struct fw_card *card,
394 mutex_unlock(&phy_config_mutex); 434 mutex_unlock(&phy_config_mutex);
395} 435}
396 436
397void fw_flush_transactions(struct fw_card *card)
398{
399 struct fw_transaction *t, *next;
400 struct list_head list;
401 unsigned long flags;
402
403 INIT_LIST_HEAD(&list);
404 spin_lock_irqsave(&card->lock, flags);
405 list_splice_init(&card->transaction_list, &list);
406 card->tlabel_mask = 0;
407 spin_unlock_irqrestore(&card->lock, flags);
408
409 list_for_each_entry_safe(t, next, &list, link) {
410 card->driver->cancel_packet(card, &t->packet);
411
412 /*
413 * At this point cancel_packet will never call the
414 * transaction callback, since we just took all the
415 * transactions out of the list. So do it here.
416 */
417 t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
418 }
419}
420
421static struct fw_address_handler *lookup_overlapping_address_handler( 437static struct fw_address_handler *lookup_overlapping_address_handler(
422 struct list_head *list, unsigned long long offset, size_t length) 438 struct list_head *list, unsigned long long offset, size_t length)
423{ 439{
@@ -827,8 +843,8 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
827 spin_lock_irqsave(&card->lock, flags); 843 spin_lock_irqsave(&card->lock, flags);
828 list_for_each_entry(t, &card->transaction_list, link) { 844 list_for_each_entry(t, &card->transaction_list, link) {
829 if (t->node_id == source && t->tlabel == tlabel) { 845 if (t->node_id == source && t->tlabel == tlabel) {
830 list_del(&t->link); 846 list_del_init(&t->link);
831 card->tlabel_mask &= ~(1 << t->tlabel); 847 card->tlabel_mask &= ~(1ULL << t->tlabel);
832 break; 848 break;
833 } 849 }
834 } 850 }
@@ -869,6 +885,8 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
869 break; 885 break;
870 } 886 }
871 887
888 del_timer_sync(&t->split_timeout_timer);
889
872 /* 890 /*
873 * The response handler may be executed while the request handler 891 * The response handler may be executed while the request handler
874 * is still pending. Cancel the request handler. 892 * is still pending. Cancel the request handler.
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index fb0321300cce..0ecfcd95f4c5 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -27,7 +27,12 @@ struct fw_packet;
27#define PHY_LINK_ACTIVE 0x80 27#define PHY_LINK_ACTIVE 0x80
28#define PHY_CONTENDER 0x40 28#define PHY_CONTENDER 0x40
29#define PHY_BUS_RESET 0x40 29#define PHY_BUS_RESET 0x40
30#define PHY_EXTENDED_REGISTERS 0xe0
30#define PHY_BUS_SHORT_RESET 0x40 31#define PHY_BUS_SHORT_RESET 0x40
32#define PHY_INT_STATUS_BITS 0x3c
33#define PHY_ENABLE_ACCEL 0x02
34#define PHY_ENABLE_MULTI 0x01
35#define PHY_PAGE_SELECT 0xe0
31 36
32#define BANDWIDTH_AVAILABLE_INITIAL 4915 37#define BANDWIDTH_AVAILABLE_INITIAL 4915
33#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) 38#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
@@ -215,7 +220,6 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
215void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); 220void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
216void fw_fill_response(struct fw_packet *response, u32 *request_header, 221void fw_fill_response(struct fw_packet *response, u32 *request_header,
217 int rcode, void *payload, size_t length); 222 int rcode, void *payload, size_t length);
218void fw_flush_transactions(struct fw_card *card);
219void fw_send_phy_config(struct fw_card *card, 223void fw_send_phy_config(struct fw_card *card,
220 int node_id, int generation, int gap_count); 224 int node_id, int generation, int gap_count);
221 225
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index a3b083a7403a..9f627e758cfc 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -236,13 +236,15 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
236#define QUIRK_CYCLE_TIMER 1 236#define QUIRK_CYCLE_TIMER 1
237#define QUIRK_RESET_PACKET 2 237#define QUIRK_RESET_PACKET 2
238#define QUIRK_BE_HEADERS 4 238#define QUIRK_BE_HEADERS 4
239#define QUIRK_NO_1394A 8
239 240
240/* In case of multiple matches in ohci_quirks[], only the first one is used. */ 241/* In case of multiple matches in ohci_quirks[], only the first one is used. */
241static const struct { 242static const struct {
242 unsigned short vendor, device, flags; 243 unsigned short vendor, device, flags;
243} ohci_quirks[] = { 244} ohci_quirks[] = {
244 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, QUIRK_CYCLE_TIMER | 245 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, QUIRK_CYCLE_TIMER |
245 QUIRK_RESET_PACKET}, 246 QUIRK_RESET_PACKET |
247 QUIRK_NO_1394A},
246 {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET}, 248 {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET},
247 {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 249 {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
248 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 250 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
@@ -257,15 +259,16 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
257 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER) 259 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
258 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET) 260 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
259 ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS) 261 ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
262 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
260 ")"); 263 ")");
261 264
262#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
263
264#define OHCI_PARAM_DEBUG_AT_AR 1 265#define OHCI_PARAM_DEBUG_AT_AR 1
265#define OHCI_PARAM_DEBUG_SELFIDS 2 266#define OHCI_PARAM_DEBUG_SELFIDS 2
266#define OHCI_PARAM_DEBUG_IRQS 4 267#define OHCI_PARAM_DEBUG_IRQS 4
267#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */ 268#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
268 269
270#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
271
269static int param_debug; 272static int param_debug;
270module_param_named(debug, param_debug, int, 0644); 273module_param_named(debug, param_debug, int, 0644);
271MODULE_PARM_DESC(debug, "Verbose logging (default = 0" 274MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
@@ -438,9 +441,10 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
438 441
439#else 442#else
440 443
441#define log_irqs(evt) 444#define param_debug 0
442#define log_selfids(node_id, generation, self_id_count, sid) 445static inline void log_irqs(u32 evt) {}
443#define log_ar_at_event(dir, speed, header, evt) 446static inline void log_selfids(int node_id, int generation, int self_id_count, u32 *s) {}
447static inline void log_ar_at_event(char dir, int speed, u32 *header, int evt) {}
444 448
445#endif /* CONFIG_FIREWIRE_OHCI_DEBUG */ 449#endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
446 450
@@ -460,27 +464,71 @@ static inline void flush_writes(const struct fw_ohci *ohci)
460 reg_read(ohci, OHCI1394_Version); 464 reg_read(ohci, OHCI1394_Version);
461} 465}
462 466
463static int ohci_update_phy_reg(struct fw_card *card, int addr, 467static int read_phy_reg(struct fw_ohci *ohci, int addr)
464 int clear_bits, int set_bits)
465{ 468{
466 struct fw_ohci *ohci = fw_ohci(card); 469 u32 val;
467 u32 val, old; 470 int i;
468 471
469 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); 472 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
470 flush_writes(ohci); 473 for (i = 0; i < 10; i++) {
471 msleep(2); 474 val = reg_read(ohci, OHCI1394_PhyControl);
472 val = reg_read(ohci, OHCI1394_PhyControl); 475 if (val & OHCI1394_PhyControl_ReadDone)
473 if ((val & OHCI1394_PhyControl_ReadDone) == 0) { 476 return OHCI1394_PhyControl_ReadData(val);
474 fw_error("failed to set phy reg bits.\n"); 477
475 return -EBUSY; 478 msleep(1);
476 } 479 }
480 fw_error("failed to read phy reg\n");
481
482 return -EBUSY;
483}
484
485static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
486{
487 int i;
477 488
478 old = OHCI1394_PhyControl_ReadData(val);
479 old = (old & ~clear_bits) | set_bits;
480 reg_write(ohci, OHCI1394_PhyControl, 489 reg_write(ohci, OHCI1394_PhyControl,
481 OHCI1394_PhyControl_Write(addr, old)); 490 OHCI1394_PhyControl_Write(addr, val));
491 for (i = 0; i < 100; i++) {
492 val = reg_read(ohci, OHCI1394_PhyControl);
493 if (!(val & OHCI1394_PhyControl_WritePending))
494 return 0;
482 495
483 return 0; 496 msleep(1);
497 }
498 fw_error("failed to write phy reg\n");
499
500 return -EBUSY;
501}
502
503static int ohci_update_phy_reg(struct fw_card *card, int addr,
504 int clear_bits, int set_bits)
505{
506 struct fw_ohci *ohci = fw_ohci(card);
507 int ret;
508
509 ret = read_phy_reg(ohci, addr);
510 if (ret < 0)
511 return ret;
512
513 /*
514 * The interrupt status bits are cleared by writing a one bit.
515 * Avoid clearing them unless explicitly requested in set_bits.
516 */
517 if (addr == 5)
518 clear_bits |= PHY_INT_STATUS_BITS;
519
520 return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits);
521}
522
523static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
524{
525 int ret;
526
527 ret = ohci_update_phy_reg(&ohci->card, 7, PHY_PAGE_SELECT, page << 5);
528 if (ret < 0)
529 return ret;
530
531 return read_phy_reg(ohci, addr);
484} 532}
485 533
486static int ar_context_add_page(struct ar_context *ctx) 534static int ar_context_add_page(struct ar_context *ctx)
@@ -1495,13 +1543,64 @@ static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
1495 memset(&dest[length], 0, CONFIG_ROM_SIZE - size); 1543 memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
1496} 1544}
1497 1545
1546static int configure_1394a_enhancements(struct fw_ohci *ohci)
1547{
1548 bool enable_1394a;
1549 int ret, clear, set, offset;
1550
1551 /* Check if the driver should configure link and PHY. */
1552 if (!(reg_read(ohci, OHCI1394_HCControlSet) &
1553 OHCI1394_HCControl_programPhyEnable))
1554 return 0;
1555
1556 /* Paranoia: check whether the PHY supports 1394a, too. */
1557 enable_1394a = false;
1558 ret = read_phy_reg(ohci, 2);
1559 if (ret < 0)
1560 return ret;
1561 if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) {
1562 ret = read_paged_phy_reg(ohci, 1, 8);
1563 if (ret < 0)
1564 return ret;
1565 if (ret >= 1)
1566 enable_1394a = true;
1567 }
1568
1569 if (ohci->quirks & QUIRK_NO_1394A)
1570 enable_1394a = false;
1571
1572 /* Configure PHY and link consistently. */
1573 if (enable_1394a) {
1574 clear = 0;
1575 set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
1576 } else {
1577 clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
1578 set = 0;
1579 }
1580 ret = ohci_update_phy_reg(&ohci->card, 5, clear, set);
1581 if (ret < 0)
1582 return ret;
1583
1584 if (enable_1394a)
1585 offset = OHCI1394_HCControlSet;
1586 else
1587 offset = OHCI1394_HCControlClear;
1588 reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable);
1589
1590 /* Clean up: configuration has been taken care of. */
1591 reg_write(ohci, OHCI1394_HCControlClear,
1592 OHCI1394_HCControl_programPhyEnable);
1593
1594 return 0;
1595}
1596
1498static int ohci_enable(struct fw_card *card, 1597static int ohci_enable(struct fw_card *card,
1499 const __be32 *config_rom, size_t length) 1598 const __be32 *config_rom, size_t length)
1500{ 1599{
1501 struct fw_ohci *ohci = fw_ohci(card); 1600 struct fw_ohci *ohci = fw_ohci(card);
1502 struct pci_dev *dev = to_pci_dev(card->device); 1601 struct pci_dev *dev = to_pci_dev(card->device);
1503 u32 lps; 1602 u32 lps;
1504 int i; 1603 int i, ret;
1505 1604
1506 if (software_reset(ohci)) { 1605 if (software_reset(ohci)) {
1507 fw_error("Failed to reset ohci card.\n"); 1606 fw_error("Failed to reset ohci card.\n");
@@ -1565,10 +1664,14 @@ static int ohci_enable(struct fw_card *card,
1565 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) 1664 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
1566 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset); 1665 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
1567 1666
1667 ret = configure_1394a_enhancements(ohci);
1668 if (ret < 0)
1669 return ret;
1670
1568 /* Activate link_on bit and contender bit in our self ID packets.*/ 1671 /* Activate link_on bit and contender bit in our self ID packets.*/
1569 if (ohci_update_phy_reg(card, 4, 0, 1672 ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER);
1570 PHY_LINK_ACTIVE | PHY_CONTENDER) < 0) 1673 if (ret < 0)
1571 return -EIO; 1674 return ret;
1572 1675
1573 /* 1676 /*
1574 * When the link is not yet enabled, the atomic config rom 1677 * When the link is not yet enabled, the atomic config rom
@@ -2304,7 +2407,7 @@ static const struct fw_card_driver ohci_driver = {
2304}; 2407};
2305 2408
2306#ifdef CONFIG_PPC_PMAC 2409#ifdef CONFIG_PPC_PMAC
2307static void ohci_pmac_on(struct pci_dev *dev) 2410static void pmac_ohci_on(struct pci_dev *dev)
2308{ 2411{
2309 if (machine_is(powermac)) { 2412 if (machine_is(powermac)) {
2310 struct device_node *ofn = pci_device_to_OF_node(dev); 2413 struct device_node *ofn = pci_device_to_OF_node(dev);
@@ -2316,7 +2419,7 @@ static void ohci_pmac_on(struct pci_dev *dev)
2316 } 2419 }
2317} 2420}
2318 2421
2319static void ohci_pmac_off(struct pci_dev *dev) 2422static void pmac_ohci_off(struct pci_dev *dev)
2320{ 2423{
2321 if (machine_is(powermac)) { 2424 if (machine_is(powermac)) {
2322 struct device_node *ofn = pci_device_to_OF_node(dev); 2425 struct device_node *ofn = pci_device_to_OF_node(dev);
@@ -2328,15 +2431,15 @@ static void ohci_pmac_off(struct pci_dev *dev)
2328 } 2431 }
2329} 2432}
2330#else 2433#else
2331#define ohci_pmac_on(dev) 2434static inline void pmac_ohci_on(struct pci_dev *dev) {}
2332#define ohci_pmac_off(dev) 2435static inline void pmac_ohci_off(struct pci_dev *dev) {}
2333#endif /* CONFIG_PPC_PMAC */ 2436#endif /* CONFIG_PPC_PMAC */
2334 2437
2335static int __devinit pci_probe(struct pci_dev *dev, 2438static int __devinit pci_probe(struct pci_dev *dev,
2336 const struct pci_device_id *ent) 2439 const struct pci_device_id *ent)
2337{ 2440{
2338 struct fw_ohci *ohci; 2441 struct fw_ohci *ohci;
2339 u32 bus_options, max_receive, link_speed, version; 2442 u32 bus_options, max_receive, link_speed, version, link_enh;
2340 u64 guid; 2443 u64 guid;
2341 int i, err, n_ir, n_it; 2444 int i, err, n_ir, n_it;
2342 size_t size; 2445 size_t size;
@@ -2349,7 +2452,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
2349 2452
2350 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev); 2453 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
2351 2454
2352 ohci_pmac_on(dev); 2455 pmac_ohci_on(dev);
2353 2456
2354 err = pci_enable_device(dev); 2457 err = pci_enable_device(dev);
2355 if (err) { 2458 if (err) {
@@ -2389,6 +2492,23 @@ static int __devinit pci_probe(struct pci_dev *dev,
2389 if (param_quirks) 2492 if (param_quirks)
2390 ohci->quirks = param_quirks; 2493 ohci->quirks = param_quirks;
2391 2494
2495 /* TI OHCI-Lynx and compatible: set recommended configuration bits. */
2496 if (dev->vendor == PCI_VENDOR_ID_TI) {
2497 pci_read_config_dword(dev, PCI_CFG_TI_LinkEnh, &link_enh);
2498
2499 /* adjust latency of ATx FIFO: use 1.7 KB threshold */
2500 link_enh &= ~TI_LinkEnh_atx_thresh_mask;
2501 link_enh |= TI_LinkEnh_atx_thresh_1_7K;
2502
2503 /* use priority arbitration for asynchronous responses */
2504 link_enh |= TI_LinkEnh_enab_unfair;
2505
2506 /* required for aPhyEnhanceEnable to work */
2507 link_enh |= TI_LinkEnh_enab_accel;
2508
2509 pci_write_config_dword(dev, PCI_CFG_TI_LinkEnh, link_enh);
2510 }
2511
2392 ar_context_init(&ohci->ar_request_ctx, ohci, 2512 ar_context_init(&ohci->ar_request_ctx, ohci,
2393 OHCI1394_AsReqRcvContextControlSet); 2513 OHCI1394_AsReqRcvContextControlSet);
2394 2514
@@ -2466,7 +2586,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
2466 pci_disable_device(dev); 2586 pci_disable_device(dev);
2467 fail_free: 2587 fail_free:
2468 kfree(&ohci->card); 2588 kfree(&ohci->card);
2469 ohci_pmac_off(dev); 2589 pmac_ohci_off(dev);
2470 fail: 2590 fail:
2471 if (err == -ENOMEM) 2591 if (err == -ENOMEM)
2472 fw_error("Out of memory\n"); 2592 fw_error("Out of memory\n");
@@ -2509,7 +2629,7 @@ static void pci_remove(struct pci_dev *dev)
2509 pci_release_region(dev, 0); 2629 pci_release_region(dev, 0);
2510 pci_disable_device(dev); 2630 pci_disable_device(dev);
2511 kfree(&ohci->card); 2631 kfree(&ohci->card);
2512 ohci_pmac_off(dev); 2632 pmac_ohci_off(dev);
2513 2633
2514 fw_notify("Removed fw-ohci device.\n"); 2634 fw_notify("Removed fw-ohci device.\n");
2515} 2635}
@@ -2530,7 +2650,7 @@ static int pci_suspend(struct pci_dev *dev, pm_message_t state)
2530 err = pci_set_power_state(dev, pci_choose_state(dev, state)); 2650 err = pci_set_power_state(dev, pci_choose_state(dev, state));
2531 if (err) 2651 if (err)
2532 fw_error("pci_set_power_state failed with %d\n", err); 2652 fw_error("pci_set_power_state failed with %d\n", err);
2533 ohci_pmac_off(dev); 2653 pmac_ohci_off(dev);
2534 2654
2535 return 0; 2655 return 0;
2536} 2656}
@@ -2540,7 +2660,7 @@ static int pci_resume(struct pci_dev *dev)
2540 struct fw_ohci *ohci = pci_get_drvdata(dev); 2660 struct fw_ohci *ohci = pci_get_drvdata(dev);
2541 int err; 2661 int err;
2542 2662
2543 ohci_pmac_on(dev); 2663 pmac_ohci_on(dev);
2544 pci_set_power_state(dev, PCI_D0); 2664 pci_set_power_state(dev, PCI_D0);
2545 pci_restore_state(dev); 2665 pci_restore_state(dev);
2546 err = pci_enable_device(dev); 2666 err = pci_enable_device(dev);
diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h
index ba492d85c516..3bc9a5d744eb 100644
--- a/drivers/firewire/ohci.h
+++ b/drivers/firewire/ohci.h
@@ -67,7 +67,7 @@
67#define OHCI1394_PhyControl_ReadDone 0x80000000 67#define OHCI1394_PhyControl_ReadDone 0x80000000
68#define OHCI1394_PhyControl_ReadData(r) (((r) & 0x00ff0000) >> 16) 68#define OHCI1394_PhyControl_ReadData(r) (((r) & 0x00ff0000) >> 16)
69#define OHCI1394_PhyControl_Write(addr, data) (((addr) << 8) | (data) | 0x00004000) 69#define OHCI1394_PhyControl_Write(addr, data) (((addr) << 8) | (data) | 0x00004000)
70#define OHCI1394_PhyControl_WriteDone 0x00004000 70#define OHCI1394_PhyControl_WritePending 0x00004000
71#define OHCI1394_IsochronousCycleTimer 0x0F0 71#define OHCI1394_IsochronousCycleTimer 0x0F0
72#define OHCI1394_AsReqFilterHiSet 0x100 72#define OHCI1394_AsReqFilterHiSet 0x100
73#define OHCI1394_AsReqFilterHiClear 0x104 73#define OHCI1394_AsReqFilterHiClear 0x104
@@ -154,4 +154,12 @@
154 154
155#define OHCI1394_phy_tcode 0xe 155#define OHCI1394_phy_tcode 0xe
156 156
157/* TI extensions */
158
159#define PCI_CFG_TI_LinkEnh 0xf4
160#define TI_LinkEnh_enab_accel 0x00000002
161#define TI_LinkEnh_enab_unfair 0x00000080
162#define TI_LinkEnh_atx_thresh_mask 0x00003000
163#define TI_LinkEnh_atx_thresh_1_7K 0x00001000
164
157#endif /* _FIREWIRE_OHCI_H */ 165#endif /* _FIREWIRE_OHCI_H */
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index fee678f74a19..4fd0f276df5a 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -139,6 +139,13 @@ config GPIO_MAX732X
139 Board setup code must specify the model to use, and the start 139 Board setup code must specify the model to use, and the start
140 number for these GPIOs. 140 number for these GPIOs.
141 141
142config GPIO_MAX732X_IRQ
143 bool "Interrupt controller support for MAX732x"
144 depends on GPIO_MAX732X=y && GENERIC_HARDIRQS
145 help
146 Say yes here to enable the max732x to be used as an interrupt
147 controller. It requires the driver to be built in the kernel.
148
142config GPIO_PCA953X 149config GPIO_PCA953X
143 tristate "PCA953x, PCA955x, TCA64xx, and MAX7310 I/O ports" 150 tristate "PCA953x, PCA955x, TCA64xx, and MAX7310 I/O ports"
144 depends on I2C 151 depends on I2C
@@ -264,10 +271,10 @@ config GPIO_BT8XX
264 If unsure, say N. 271 If unsure, say N.
265 272
266config GPIO_LANGWELL 273config GPIO_LANGWELL
267 bool "Intel Moorestown Platform Langwell GPIO support" 274 bool "Intel Langwell/Penwell GPIO support"
268 depends on PCI 275 depends on PCI
269 help 276 help
270 Say Y here to support Intel Moorestown platform GPIO. 277 Say Y here to support Intel Langwell/Penwell GPIO.
271 278
272config GPIO_TIMBERDALE 279config GPIO_TIMBERDALE
273 bool "Support for timberdale GPIO IP" 280 bool "Support for timberdale GPIO IP"
diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c
index 0c3c498f2260..f73a1555e49d 100644
--- a/drivers/gpio/cs5535-gpio.c
+++ b/drivers/gpio/cs5535-gpio.c
@@ -197,7 +197,7 @@ static int chip_direction_output(struct gpio_chip *c, unsigned offset, int val)
197 return 0; 197 return 0;
198} 198}
199 199
200static char *cs5535_gpio_names[] = { 200static const char * const cs5535_gpio_names[] = {
201 "GPIO0", "GPIO1", "GPIO2", "GPIO3", 201 "GPIO0", "GPIO1", "GPIO2", "GPIO3",
202 "GPIO4", "GPIO5", "GPIO6", "GPIO7", 202 "GPIO4", "GPIO5", "GPIO6", "GPIO7",
203 "GPIO8", "GPIO9", "GPIO10", "GPIO11", 203 "GPIO8", "GPIO9", "GPIO10", "GPIO11",
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index cae1b8c5b08c..3ca36542e338 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -722,7 +722,7 @@ int gpio_export(unsigned gpio, bool direction_may_change)
722 unsigned long flags; 722 unsigned long flags;
723 struct gpio_desc *desc; 723 struct gpio_desc *desc;
724 int status = -EINVAL; 724 int status = -EINVAL;
725 char *ioname = NULL; 725 const char *ioname = NULL;
726 726
727 /* can't export until sysfs is available ... */ 727 /* can't export until sysfs is available ... */
728 if (!gpio_class.p) { 728 if (!gpio_class.p) {
@@ -753,7 +753,7 @@ int gpio_export(unsigned gpio, bool direction_may_change)
753 struct device *dev; 753 struct device *dev;
754 754
755 dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0), 755 dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
756 desc, ioname ? ioname : "gpio%d", gpio); 756 desc, ioname ? ioname : "gpio%u", gpio);
757 if (!IS_ERR(dev)) { 757 if (!IS_ERR(dev)) {
758 status = sysfs_create_group(&dev->kobj, 758 status = sysfs_create_group(&dev->kobj,
759 &gpio_attr_group); 759 &gpio_attr_group);
@@ -1106,7 +1106,7 @@ unlock:
1106fail: 1106fail:
1107 /* failures here can mean systems won't boot... */ 1107 /* failures here can mean systems won't boot... */
1108 if (status) 1108 if (status)
1109 pr_err("gpiochip_add: gpios %d..%d (%s) not registered\n", 1109 pr_err("gpiochip_add: gpios %d..%d (%s) failed to register\n",
1110 chip->base, chip->base + chip->ngpio - 1, 1110 chip->base, chip->base + chip->ngpio - 1,
1111 chip->label ? : "generic"); 1111 chip->label ? : "generic");
1112 return status; 1112 return status;
@@ -1447,6 +1447,49 @@ fail:
1447} 1447}
1448EXPORT_SYMBOL_GPL(gpio_direction_output); 1448EXPORT_SYMBOL_GPL(gpio_direction_output);
1449 1449
1450/**
1451 * gpio_set_debounce - sets @debounce time for a @gpio
1452 * @gpio: the gpio to set debounce time
1453 * @debounce: debounce time is microseconds
1454 */
1455int gpio_set_debounce(unsigned gpio, unsigned debounce)
1456{
1457 unsigned long flags;
1458 struct gpio_chip *chip;
1459 struct gpio_desc *desc = &gpio_desc[gpio];
1460 int status = -EINVAL;
1461
1462 spin_lock_irqsave(&gpio_lock, flags);
1463
1464 if (!gpio_is_valid(gpio))
1465 goto fail;
1466 chip = desc->chip;
1467 if (!chip || !chip->set || !chip->set_debounce)
1468 goto fail;
1469 gpio -= chip->base;
1470 if (gpio >= chip->ngpio)
1471 goto fail;
1472 status = gpio_ensure_requested(desc, gpio);
1473 if (status < 0)
1474 goto fail;
1475
1476 /* now we know the gpio is valid and chip won't vanish */
1477
1478 spin_unlock_irqrestore(&gpio_lock, flags);
1479
1480 might_sleep_if(extra_checks && chip->can_sleep);
1481
1482 return chip->set_debounce(chip, gpio, debounce);
1483
1484fail:
1485 spin_unlock_irqrestore(&gpio_lock, flags);
1486 if (status)
1487 pr_debug("%s: gpio-%d status %d\n",
1488 __func__, gpio, status);
1489
1490 return status;
1491}
1492EXPORT_SYMBOL_GPL(gpio_set_debounce);
1450 1493
1451/* I/O calls are only valid after configuration completed; the relevant 1494/* I/O calls are only valid after configuration completed; the relevant
1452 * "is this a valid GPIO" error checks should already have been done. 1495 * "is this a valid GPIO" error checks should already have been done.
diff --git a/drivers/gpio/it8761e_gpio.c b/drivers/gpio/it8761e_gpio.c
index 41a9388f2fde..48fc43c4bdd1 100644
--- a/drivers/gpio/it8761e_gpio.c
+++ b/drivers/gpio/it8761e_gpio.c
@@ -217,7 +217,10 @@ gpiochip_add_err:
217static void __exit it8761e_gpio_exit(void) 217static void __exit it8761e_gpio_exit(void)
218{ 218{
219 if (gpio_ba) { 219 if (gpio_ba) {
220 gpiochip_remove(&it8761e_gpio_chip); 220 int ret = gpiochip_remove(&it8761e_gpio_chip);
221
222 WARN(ret, "%s(): gpiochip_remove() failed, ret=%d\n",
223 __func__, ret);
221 224
222 release_region(gpio_ba, GPIO_IOSIZE); 225 release_region(gpio_ba, GPIO_IOSIZE);
223 gpio_ba = 0; 226 gpio_ba = 0;
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 00c3a14127af..8383a8d7f994 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -17,6 +17,7 @@
17 17
18/* Supports: 18/* Supports:
19 * Moorestown platform Langwell chip. 19 * Moorestown platform Langwell chip.
20 * Medfield platform Penwell chip.
20 */ 21 */
21 22
22#include <linux/module.h> 23#include <linux/module.h>
@@ -31,44 +32,65 @@
31#include <linux/gpio.h> 32#include <linux/gpio.h>
32#include <linux/slab.h> 33#include <linux/slab.h>
33 34
34struct lnw_gpio_register { 35/*
35 u32 GPLR[2]; 36 * Langwell chip has 64 pins and thus there are 2 32bit registers to control
36 u32 GPDR[2]; 37 * each feature, while Penwell chip has 96 pins for each block, and need 3 32bit
37 u32 GPSR[2]; 38 * registers to control them, so we only define the order here instead of a
38 u32 GPCR[2]; 39 * structure, to get a bit offset for a pin (use GPDR as an example):
39 u32 GRER[2]; 40 *
40 u32 GFER[2]; 41 * nreg = ngpio / 32;
41 u32 GEDR[2]; 42 * reg = offset / 32;
43 * bit = offset % 32;
44 * reg_addr = reg_base + GPDR * nreg * 4 + reg * 4;
45 *
46 * so the bit of reg_addr is to control pin offset's GPDR feature
47*/
48
49enum GPIO_REG {
50 GPLR = 0, /* pin level read-only */
51 GPDR, /* pin direction */
52 GPSR, /* pin set */
53 GPCR, /* pin clear */
54 GRER, /* rising edge detect */
55 GFER, /* falling edge detect */
56 GEDR, /* edge detect result */
42}; 57};
43 58
44struct lnw_gpio { 59struct lnw_gpio {
45 struct gpio_chip chip; 60 struct gpio_chip chip;
46 struct lnw_gpio_register *reg_base; 61 void *reg_base;
47 spinlock_t lock; 62 spinlock_t lock;
48 unsigned irq_base; 63 unsigned irq_base;
49}; 64};
50 65
51static int lnw_gpio_get(struct gpio_chip *chip, unsigned offset) 66static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
67 enum GPIO_REG reg_type)
52{ 68{
53 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip); 69 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
70 unsigned nreg = chip->ngpio / 32;
54 u8 reg = offset / 32; 71 u8 reg = offset / 32;
55 void __iomem *gplr; 72 void __iomem *ptr;
73
74 ptr = (void __iomem *)(lnw->reg_base + reg_type * nreg * 4 + reg * 4);
75 return ptr;
76}
77
78static int lnw_gpio_get(struct gpio_chip *chip, unsigned offset)
79{
80 void __iomem *gplr = gpio_reg(chip, offset, GPLR);
56 81
57 gplr = (void __iomem *)(&lnw->reg_base->GPLR[reg]);
58 return readl(gplr) & BIT(offset % 32); 82 return readl(gplr) & BIT(offset % 32);
59} 83}
60 84
61static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 85static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
62{ 86{
63 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
64 u8 reg = offset / 32;
65 void __iomem *gpsr, *gpcr; 87 void __iomem *gpsr, *gpcr;
66 88
67 if (value) { 89 if (value) {
68 gpsr = (void __iomem *)(&lnw->reg_base->GPSR[reg]); 90 gpsr = gpio_reg(chip, offset, GPSR);
69 writel(BIT(offset % 32), gpsr); 91 writel(BIT(offset % 32), gpsr);
70 } else { 92 } else {
71 gpcr = (void __iomem *)(&lnw->reg_base->GPCR[reg]); 93 gpcr = gpio_reg(chip, offset, GPCR);
72 writel(BIT(offset % 32), gpcr); 94 writel(BIT(offset % 32), gpcr);
73 } 95 }
74} 96}
@@ -76,12 +98,10 @@ static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
76static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset) 98static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
77{ 99{
78 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip); 100 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
79 u8 reg = offset / 32; 101 void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
80 u32 value; 102 u32 value;
81 unsigned long flags; 103 unsigned long flags;
82 void __iomem *gpdr;
83 104
84 gpdr = (void __iomem *)(&lnw->reg_base->GPDR[reg]);
85 spin_lock_irqsave(&lnw->lock, flags); 105 spin_lock_irqsave(&lnw->lock, flags);
86 value = readl(gpdr); 106 value = readl(gpdr);
87 value &= ~BIT(offset % 32); 107 value &= ~BIT(offset % 32);
@@ -94,12 +114,10 @@ static int lnw_gpio_direction_output(struct gpio_chip *chip,
94 unsigned offset, int value) 114 unsigned offset, int value)
95{ 115{
96 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip); 116 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
97 u8 reg = offset / 32; 117 void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
98 unsigned long flags; 118 unsigned long flags;
99 void __iomem *gpdr;
100 119
101 lnw_gpio_set(chip, offset, value); 120 lnw_gpio_set(chip, offset, value);
102 gpdr = (void __iomem *)(&lnw->reg_base->GPDR[reg]);
103 spin_lock_irqsave(&lnw->lock, flags); 121 spin_lock_irqsave(&lnw->lock, flags);
104 value = readl(gpdr); 122 value = readl(gpdr);
105 value |= BIT(offset % 32);; 123 value |= BIT(offset % 32);;
@@ -118,11 +136,10 @@ static int lnw_irq_type(unsigned irq, unsigned type)
118{ 136{
119 struct lnw_gpio *lnw = get_irq_chip_data(irq); 137 struct lnw_gpio *lnw = get_irq_chip_data(irq);
120 u32 gpio = irq - lnw->irq_base; 138 u32 gpio = irq - lnw->irq_base;
121 u8 reg = gpio / 32;
122 unsigned long flags; 139 unsigned long flags;
123 u32 value; 140 u32 value;
124 void __iomem *grer = (void __iomem *)(&lnw->reg_base->GRER[reg]); 141 void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER);
125 void __iomem *gfer = (void __iomem *)(&lnw->reg_base->GFER[reg]); 142 void __iomem *gfer = gpio_reg(&lnw->chip, gpio, GFER);
126 143
127 if (gpio >= lnw->chip.ngpio) 144 if (gpio >= lnw->chip.ngpio)
128 return -EINVAL; 145 return -EINVAL;
@@ -158,8 +175,10 @@ static struct irq_chip lnw_irqchip = {
158 .set_type = lnw_irq_type, 175 .set_type = lnw_irq_type,
159}; 176};
160 177
161static struct pci_device_id lnw_gpio_ids[] = { 178static DEFINE_PCI_DEVICE_TABLE(lnw_gpio_ids) = { /* pin number */
162 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f) }, 179 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f), .driver_data = 64 },
180 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081f), .driver_data = 96 },
181 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081a), .driver_data = 96 },
163 { 0, } 182 { 0, }
164}; 183};
165MODULE_DEVICE_TABLE(pci, lnw_gpio_ids); 184MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
@@ -167,17 +186,17 @@ MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
167static void lnw_irq_handler(unsigned irq, struct irq_desc *desc) 186static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
168{ 187{
169 struct lnw_gpio *lnw = (struct lnw_gpio *)get_irq_data(irq); 188 struct lnw_gpio *lnw = (struct lnw_gpio *)get_irq_data(irq);
170 u32 reg, gpio; 189 u32 base, gpio;
171 void __iomem *gedr; 190 void __iomem *gedr;
172 u32 gedr_v; 191 u32 gedr_v;
173 192
174 /* check GPIO controller to check which pin triggered the interrupt */ 193 /* check GPIO controller to check which pin triggered the interrupt */
175 for (reg = 0; reg < lnw->chip.ngpio / 32; reg++) { 194 for (base = 0; base < lnw->chip.ngpio; base += 32) {
176 gedr = (void __iomem *)(&lnw->reg_base->GEDR[reg]); 195 gedr = gpio_reg(&lnw->chip, base, GEDR);
177 gedr_v = readl(gedr); 196 gedr_v = readl(gedr);
178 if (!gedr_v) 197 if (!gedr_v)
179 continue; 198 continue;
180 for (gpio = reg*32; gpio < reg*32+32; gpio++) 199 for (gpio = base; gpio < base + 32; gpio++)
181 if (gedr_v & BIT(gpio % 32)) { 200 if (gedr_v & BIT(gpio % 32)) {
182 pr_debug("pin %d triggered\n", gpio); 201 pr_debug("pin %d triggered\n", gpio);
183 generic_handle_irq(lnw->irq_base + gpio); 202 generic_handle_irq(lnw->irq_base + gpio);
@@ -245,7 +264,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
245 lnw->chip.set = lnw_gpio_set; 264 lnw->chip.set = lnw_gpio_set;
246 lnw->chip.to_irq = lnw_gpio_to_irq; 265 lnw->chip.to_irq = lnw_gpio_to_irq;
247 lnw->chip.base = gpio_base; 266 lnw->chip.base = gpio_base;
248 lnw->chip.ngpio = 64; 267 lnw->chip.ngpio = id->driver_data;
249 lnw->chip.can_sleep = 0; 268 lnw->chip.can_sleep = 0;
250 pci_set_drvdata(pdev, lnw); 269 pci_set_drvdata(pdev, lnw);
251 retval = gpiochip_add(&lnw->chip); 270 retval = gpiochip_add(&lnw->chip);
diff --git a/drivers/gpio/max732x.c b/drivers/gpio/max732x.c
index f7868243af89..9cad60f9e962 100644
--- a/drivers/gpio/max732x.c
+++ b/drivers/gpio/max732x.c
@@ -17,7 +17,8 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/gpio.h> 19#include <linux/gpio.h>
20 20#include <linux/interrupt.h>
21#include <linux/irq.h>
21#include <linux/i2c.h> 22#include <linux/i2c.h>
22#include <linux/i2c/max732x.h> 23#include <linux/i2c/max732x.h>
23 24
@@ -31,7 +32,8 @@
31 * - Open Drain I/O 32 * - Open Drain I/O
32 * 33 *
33 * designated by 'O', 'I' and 'P' individually according to MAXIM's 34 * designated by 'O', 'I' and 'P' individually according to MAXIM's
34 * datasheets. 35 * datasheets. 'I' and 'P' ports are interrupt capables, some with
36 * a dedicated interrupt mask.
35 * 37 *
36 * There are two groups of I/O ports, each group usually includes 38 * There are two groups of I/O ports, each group usually includes
37 * up to 8 I/O ports, and is accessed by a specific I2C address: 39 * up to 8 I/O ports, and is accessed by a specific I2C address:
@@ -44,7 +46,8 @@
44 * 46 *
45 * Within each group of ports, there are five known combinations of 47 * Within each group of ports, there are five known combinations of
46 * I/O ports: 4I4O, 4P4O, 8I, 8P, 8O, see the definitions below for 48 * I/O ports: 4I4O, 4P4O, 8I, 8P, 8O, see the definitions below for
47 * the detailed organization of these ports. 49 * the detailed organization of these ports. Only Goup A is interrupt
50 * capable.
48 * 51 *
49 * GPIO numbers start from 'gpio_base + 0' to 'gpio_base + 8/16', 52 * GPIO numbers start from 'gpio_base + 0' to 'gpio_base + 8/16',
50 * and GPIOs from GROUP_A are numbered before those from GROUP_B 53 * and GPIOs from GROUP_A are numbered before those from GROUP_B
@@ -68,16 +71,47 @@
68#define GROUP_A(x) ((x) & 0xffff) /* I2C Addr: 0b'110xxxx */ 71#define GROUP_A(x) ((x) & 0xffff) /* I2C Addr: 0b'110xxxx */
69#define GROUP_B(x) ((x) << 16) /* I2C Addr: 0b'101xxxx */ 72#define GROUP_B(x) ((x) << 16) /* I2C Addr: 0b'101xxxx */
70 73
74#define INT_NONE 0x0 /* No interrupt capability */
75#define INT_NO_MASK 0x1 /* Has interrupts, no mask */
76#define INT_INDEP_MASK 0x2 /* Has interrupts, independent mask */
77#define INT_MERGED_MASK 0x3 /* Has interrupts, merged mask */
78
79#define INT_CAPS(x) (((uint64_t)(x)) << 32)
80
81enum {
82 MAX7319,
83 MAX7320,
84 MAX7321,
85 MAX7322,
86 MAX7323,
87 MAX7324,
88 MAX7325,
89 MAX7326,
90 MAX7327,
91};
92
93static uint64_t max732x_features[] = {
94 [MAX7319] = GROUP_A(IO_8I) | INT_CAPS(INT_MERGED_MASK),
95 [MAX7320] = GROUP_B(IO_8O),
96 [MAX7321] = GROUP_A(IO_8P) | INT_CAPS(INT_NO_MASK),
97 [MAX7322] = GROUP_A(IO_4I4O) | INT_CAPS(INT_MERGED_MASK),
98 [MAX7323] = GROUP_A(IO_4P4O) | INT_CAPS(INT_INDEP_MASK),
99 [MAX7324] = GROUP_A(IO_8I) | GROUP_B(IO_8O) | INT_CAPS(INT_MERGED_MASK),
100 [MAX7325] = GROUP_A(IO_8P) | GROUP_B(IO_8O) | INT_CAPS(INT_NO_MASK),
101 [MAX7326] = GROUP_A(IO_4I4O) | GROUP_B(IO_8O) | INT_CAPS(INT_MERGED_MASK),
102 [MAX7327] = GROUP_A(IO_4P4O) | GROUP_B(IO_8O) | INT_CAPS(INT_NO_MASK),
103};
104
71static const struct i2c_device_id max732x_id[] = { 105static const struct i2c_device_id max732x_id[] = {
72 { "max7319", GROUP_A(IO_8I) }, 106 { "max7319", MAX7319 },
73 { "max7320", GROUP_B(IO_8O) }, 107 { "max7320", MAX7320 },
74 { "max7321", GROUP_A(IO_8P) }, 108 { "max7321", MAX7321 },
75 { "max7322", GROUP_A(IO_4I4O) }, 109 { "max7322", MAX7322 },
76 { "max7323", GROUP_A(IO_4P4O) }, 110 { "max7323", MAX7323 },
77 { "max7324", GROUP_A(IO_8I) | GROUP_B(IO_8O) }, 111 { "max7324", MAX7324 },
78 { "max7325", GROUP_A(IO_8P) | GROUP_B(IO_8O) }, 112 { "max7325", MAX7325 },
79 { "max7326", GROUP_A(IO_4I4O) | GROUP_B(IO_8O) }, 113 { "max7326", MAX7326 },
80 { "max7327", GROUP_A(IO_4P4O) | GROUP_B(IO_8O) }, 114 { "max7327", MAX7327 },
81 { }, 115 { },
82}; 116};
83MODULE_DEVICE_TABLE(i2c, max732x_id); 117MODULE_DEVICE_TABLE(i2c, max732x_id);
@@ -96,9 +130,19 @@ struct max732x_chip {
96 130
97 struct mutex lock; 131 struct mutex lock;
98 uint8_t reg_out[2]; 132 uint8_t reg_out[2];
133
134#ifdef CONFIG_GPIO_MAX732X_IRQ
135 struct mutex irq_lock;
136 int irq_base;
137 uint8_t irq_mask;
138 uint8_t irq_mask_cur;
139 uint8_t irq_trig_raise;
140 uint8_t irq_trig_fall;
141 uint8_t irq_features;
142#endif
99}; 143};
100 144
101static int max732x_write(struct max732x_chip *chip, int group_a, uint8_t val) 145static int max732x_writeb(struct max732x_chip *chip, int group_a, uint8_t val)
102{ 146{
103 struct i2c_client *client; 147 struct i2c_client *client;
104 int ret; 148 int ret;
@@ -113,7 +157,7 @@ static int max732x_write(struct max732x_chip *chip, int group_a, uint8_t val)
113 return 0; 157 return 0;
114} 158}
115 159
116static int max732x_read(struct max732x_chip *chip, int group_a, uint8_t *val) 160static int max732x_readb(struct max732x_chip *chip, int group_a, uint8_t *val)
117{ 161{
118 struct i2c_client *client; 162 struct i2c_client *client;
119 int ret; 163 int ret;
@@ -142,7 +186,7 @@ static int max732x_gpio_get_value(struct gpio_chip *gc, unsigned off)
142 186
143 chip = container_of(gc, struct max732x_chip, gpio_chip); 187 chip = container_of(gc, struct max732x_chip, gpio_chip);
144 188
145 ret = max732x_read(chip, is_group_a(chip, off), &reg_val); 189 ret = max732x_readb(chip, is_group_a(chip, off), &reg_val);
146 if (ret < 0) 190 if (ret < 0)
147 return 0; 191 return 0;
148 192
@@ -162,7 +206,7 @@ static void max732x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
162 reg_out = (off > 7) ? chip->reg_out[1] : chip->reg_out[0]; 206 reg_out = (off > 7) ? chip->reg_out[1] : chip->reg_out[0];
163 reg_out = (val) ? reg_out | mask : reg_out & ~mask; 207 reg_out = (val) ? reg_out | mask : reg_out & ~mask;
164 208
165 ret = max732x_write(chip, is_group_a(chip, off), reg_out); 209 ret = max732x_writeb(chip, is_group_a(chip, off), reg_out);
166 if (ret < 0) 210 if (ret < 0)
167 goto out; 211 goto out;
168 212
@@ -188,6 +232,13 @@ static int max732x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
188 return -EACCES; 232 return -EACCES;
189 } 233 }
190 234
235 /*
236 * Open-drain pins must be set to high impedance (which is
237 * equivalent to output-high) to be turned into an input.
238 */
239 if ((mask & chip->dir_output))
240 max732x_gpio_set_value(gc, off, 1);
241
191 return 0; 242 return 0;
192} 243}
193 244
@@ -209,12 +260,278 @@ static int max732x_gpio_direction_output(struct gpio_chip *gc,
209 return 0; 260 return 0;
210} 261}
211 262
263#ifdef CONFIG_GPIO_MAX732X_IRQ
264static int max732x_writew(struct max732x_chip *chip, uint16_t val)
265{
266 int ret;
267
268 val = cpu_to_le16(val);
269
270 ret = i2c_master_send(chip->client_group_a, (char *)&val, 2);
271 if (ret < 0) {
272 dev_err(&chip->client_group_a->dev, "failed writing\n");
273 return ret;
274 }
275
276 return 0;
277}
278
279static int max732x_readw(struct max732x_chip *chip, uint16_t *val)
280{
281 int ret;
282
283 ret = i2c_master_recv(chip->client_group_a, (char *)val, 2);
284 if (ret < 0) {
285 dev_err(&chip->client_group_a->dev, "failed reading\n");
286 return ret;
287 }
288
289 *val = le16_to_cpu(*val);
290 return 0;
291}
292
293static void max732x_irq_update_mask(struct max732x_chip *chip)
294{
295 uint16_t msg;
296
297 if (chip->irq_mask == chip->irq_mask_cur)
298 return;
299
300 chip->irq_mask = chip->irq_mask_cur;
301
302 if (chip->irq_features == INT_NO_MASK)
303 return;
304
305 mutex_lock(&chip->lock);
306
307 switch (chip->irq_features) {
308 case INT_INDEP_MASK:
309 msg = (chip->irq_mask << 8) | chip->reg_out[0];
310 max732x_writew(chip, msg);
311 break;
312
313 case INT_MERGED_MASK:
314 msg = chip->irq_mask | chip->reg_out[0];
315 max732x_writeb(chip, 1, (uint8_t)msg);
316 break;
317 }
318
319 mutex_unlock(&chip->lock);
320}
321
322static int max732x_gpio_to_irq(struct gpio_chip *gc, unsigned off)
323{
324 struct max732x_chip *chip;
325
326 chip = container_of(gc, struct max732x_chip, gpio_chip);
327 return chip->irq_base + off;
328}
329
330static void max732x_irq_mask(unsigned int irq)
331{
332 struct max732x_chip *chip = get_irq_chip_data(irq);
333
334 chip->irq_mask_cur &= ~(1 << (irq - chip->irq_base));
335}
336
337static void max732x_irq_unmask(unsigned int irq)
338{
339 struct max732x_chip *chip = get_irq_chip_data(irq);
340
341 chip->irq_mask_cur |= 1 << (irq - chip->irq_base);
342}
343
344static void max732x_irq_bus_lock(unsigned int irq)
345{
346 struct max732x_chip *chip = get_irq_chip_data(irq);
347
348 mutex_lock(&chip->irq_lock);
349 chip->irq_mask_cur = chip->irq_mask;
350}
351
352static void max732x_irq_bus_sync_unlock(unsigned int irq)
353{
354 struct max732x_chip *chip = get_irq_chip_data(irq);
355
356 max732x_irq_update_mask(chip);
357 mutex_unlock(&chip->irq_lock);
358}
359
360static int max732x_irq_set_type(unsigned int irq, unsigned int type)
361{
362 struct max732x_chip *chip = get_irq_chip_data(irq);
363 uint16_t off = irq - chip->irq_base;
364 uint16_t mask = 1 << off;
365
366 if (!(mask & chip->dir_input)) {
367 dev_dbg(&chip->client->dev, "%s port %d is output only\n",
368 chip->client->name, off);
369 return -EACCES;
370 }
371
372 if (!(type & IRQ_TYPE_EDGE_BOTH)) {
373 dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
374 irq, type);
375 return -EINVAL;
376 }
377
378 if (type & IRQ_TYPE_EDGE_FALLING)
379 chip->irq_trig_fall |= mask;
380 else
381 chip->irq_trig_fall &= ~mask;
382
383 if (type & IRQ_TYPE_EDGE_RISING)
384 chip->irq_trig_raise |= mask;
385 else
386 chip->irq_trig_raise &= ~mask;
387
388 return max732x_gpio_direction_input(&chip->gpio_chip, off);
389}
390
391static struct irq_chip max732x_irq_chip = {
392 .name = "max732x",
393 .mask = max732x_irq_mask,
394 .unmask = max732x_irq_unmask,
395 .bus_lock = max732x_irq_bus_lock,
396 .bus_sync_unlock = max732x_irq_bus_sync_unlock,
397 .set_type = max732x_irq_set_type,
398};
399
400static uint8_t max732x_irq_pending(struct max732x_chip *chip)
401{
402 uint8_t cur_stat;
403 uint8_t old_stat;
404 uint8_t trigger;
405 uint8_t pending;
406 uint16_t status;
407 int ret;
408
409 ret = max732x_readw(chip, &status);
410 if (ret)
411 return 0;
412
413 trigger = status >> 8;
414 trigger &= chip->irq_mask;
415
416 if (!trigger)
417 return 0;
418
419 cur_stat = status & 0xFF;
420 cur_stat &= chip->irq_mask;
421
422 old_stat = cur_stat ^ trigger;
423
424 pending = (old_stat & chip->irq_trig_fall) |
425 (cur_stat & chip->irq_trig_raise);
426 pending &= trigger;
427
428 return pending;
429}
430
431static irqreturn_t max732x_irq_handler(int irq, void *devid)
432{
433 struct max732x_chip *chip = devid;
434 uint8_t pending;
435 uint8_t level;
436
437 pending = max732x_irq_pending(chip);
438
439 if (!pending)
440 return IRQ_HANDLED;
441
442 do {
443 level = __ffs(pending);
444 handle_nested_irq(level + chip->irq_base);
445
446 pending &= ~(1 << level);
447 } while (pending);
448
449 return IRQ_HANDLED;
450}
451
452static int max732x_irq_setup(struct max732x_chip *chip,
453 const struct i2c_device_id *id)
454{
455 struct i2c_client *client = chip->client;
456 struct max732x_platform_data *pdata = client->dev.platform_data;
457 int has_irq = max732x_features[id->driver_data] >> 32;
458 int ret;
459
460 if (pdata->irq_base && has_irq != INT_NONE) {
461 int lvl;
462
463 chip->irq_base = pdata->irq_base;
464 chip->irq_features = has_irq;
465 mutex_init(&chip->irq_lock);
466
467 for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) {
468 int irq = lvl + chip->irq_base;
469
470 if (!(chip->dir_input & (1 << lvl)))
471 continue;
472
473 set_irq_chip_data(irq, chip);
474 set_irq_chip_and_handler(irq, &max732x_irq_chip,
475 handle_edge_irq);
476 set_irq_nested_thread(irq, 1);
477#ifdef CONFIG_ARM
478 set_irq_flags(irq, IRQF_VALID);
479#else
480 set_irq_noprobe(irq);
481#endif
482 }
483
484 ret = request_threaded_irq(client->irq,
485 NULL,
486 max732x_irq_handler,
487 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
488 dev_name(&client->dev), chip);
489 if (ret) {
490 dev_err(&client->dev, "failed to request irq %d\n",
491 client->irq);
492 goto out_failed;
493 }
494
495 chip->gpio_chip.to_irq = max732x_gpio_to_irq;
496 }
497
498 return 0;
499
500out_failed:
501 chip->irq_base = 0;
502 return ret;
503}
504
505static void max732x_irq_teardown(struct max732x_chip *chip)
506{
507 if (chip->irq_base)
508 free_irq(chip->client->irq, chip);
509}
510#else /* CONFIG_GPIO_MAX732X_IRQ */
511static int max732x_irq_setup(struct max732x_chip *chip,
512 const struct i2c_device_id *id)
513{
514 struct i2c_client *client = chip->client;
515 struct max732x_platform_data *pdata = client->dev.platform_data;
516 int has_irq = max732x_features[id->driver_data] >> 32;
517
518 if (pdata->irq_base && has_irq != INT_NONE)
519 dev_warn(&client->dev, "interrupt support not compiled in\n");
520
521 return 0;
522}
523
524static void max732x_irq_teardown(struct max732x_chip *chip)
525{
526}
527#endif
528
212static int __devinit max732x_setup_gpio(struct max732x_chip *chip, 529static int __devinit max732x_setup_gpio(struct max732x_chip *chip,
213 const struct i2c_device_id *id, 530 const struct i2c_device_id *id,
214 unsigned gpio_start) 531 unsigned gpio_start)
215{ 532{
216 struct gpio_chip *gc = &chip->gpio_chip; 533 struct gpio_chip *gc = &chip->gpio_chip;
217 uint32_t id_data = id->driver_data; 534 uint32_t id_data = (uint32_t)max732x_features[id->driver_data];
218 int i, port = 0; 535 int i, port = 0;
219 536
220 for (i = 0; i < 16; i++, id_data >>= 2) { 537 for (i = 0; i < 16; i++, id_data >>= 2) {
@@ -285,14 +602,14 @@ static int __devinit max732x_probe(struct i2c_client *client,
285 switch (client->addr & 0x70) { 602 switch (client->addr & 0x70) {
286 case 0x60: 603 case 0x60:
287 chip->client_group_a = client; 604 chip->client_group_a = client;
288 if (nr_port > 7) { 605 if (nr_port > 8) {
289 c = i2c_new_dummy(client->adapter, addr_b); 606 c = i2c_new_dummy(client->adapter, addr_b);
290 chip->client_group_b = chip->client_dummy = c; 607 chip->client_group_b = chip->client_dummy = c;
291 } 608 }
292 break; 609 break;
293 case 0x50: 610 case 0x50:
294 chip->client_group_b = client; 611 chip->client_group_b = client;
295 if (nr_port > 7) { 612 if (nr_port > 8) {
296 c = i2c_new_dummy(client->adapter, addr_a); 613 c = i2c_new_dummy(client->adapter, addr_a);
297 chip->client_group_a = chip->client_dummy = c; 614 chip->client_group_a = chip->client_dummy = c;
298 } 615 }
@@ -306,9 +623,13 @@ static int __devinit max732x_probe(struct i2c_client *client,
306 623
307 mutex_init(&chip->lock); 624 mutex_init(&chip->lock);
308 625
309 max732x_read(chip, is_group_a(chip, 0), &chip->reg_out[0]); 626 max732x_readb(chip, is_group_a(chip, 0), &chip->reg_out[0]);
310 if (nr_port > 7) 627 if (nr_port > 8)
311 max732x_read(chip, is_group_a(chip, 8), &chip->reg_out[1]); 628 max732x_readb(chip, is_group_a(chip, 8), &chip->reg_out[1]);
629
630 ret = max732x_irq_setup(chip, id);
631 if (ret)
632 goto out_failed;
312 633
313 ret = gpiochip_add(&chip->gpio_chip); 634 ret = gpiochip_add(&chip->gpio_chip);
314 if (ret) 635 if (ret)
@@ -325,6 +646,7 @@ static int __devinit max732x_probe(struct i2c_client *client,
325 return 0; 646 return 0;
326 647
327out_failed: 648out_failed:
649 max732x_irq_teardown(chip);
328 kfree(chip); 650 kfree(chip);
329 return ret; 651 return ret;
330} 652}
@@ -352,6 +674,8 @@ static int __devexit max732x_remove(struct i2c_client *client)
352 return ret; 674 return ret;
353 } 675 }
354 676
677 max732x_irq_teardown(chip);
678
355 /* unregister any dummy i2c_client */ 679 /* unregister any dummy i2c_client */
356 if (chip->client_dummy) 680 if (chip->client_dummy)
357 i2c_unregister_device(chip->client_dummy); 681 i2c_unregister_device(chip->client_dummy);
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index f156ab3bb6ed..a2b12aa1f2b9 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -73,7 +73,7 @@ struct pca953x_chip {
73 struct i2c_client *client; 73 struct i2c_client *client;
74 struct pca953x_platform_data *dyn_pdata; 74 struct pca953x_platform_data *dyn_pdata;
75 struct gpio_chip gpio_chip; 75 struct gpio_chip gpio_chip;
76 char **names; 76 const char *const *names;
77}; 77};
78 78
79static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val) 79static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val)
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index 105701a1f05b..ee568c8fcbd0 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -164,7 +164,7 @@ static int pl061_irq_type(unsigned irq, unsigned trigger)
164 unsigned long flags; 164 unsigned long flags;
165 u8 gpiois, gpioibe, gpioiev; 165 u8 gpiois, gpioibe, gpioiev;
166 166
167 if (offset < 0 || offset > PL061_GPIO_NR) 167 if (offset < 0 || offset >= PL061_GPIO_NR)
168 return -EINVAL; 168 return -EINVAL;
169 169
170 spin_lock_irqsave(&chip->irq_lock, flags); 170 spin_lock_irqsave(&chip->irq_lock, flags);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 56f314fbd4f9..c94026768570 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -811,7 +811,7 @@ static const char *relatives[REL_MAX + 1] = {
811 [REL_WHEEL] = "Wheel", [REL_MISC] = "Misc", 811 [REL_WHEEL] = "Wheel", [REL_MISC] = "Misc",
812}; 812};
813 813
814static const char *absolutes[ABS_MAX + 1] = { 814static const char *absolutes[ABS_CNT] = {
815 [ABS_X] = "X", [ABS_Y] = "Y", 815 [ABS_X] = "X", [ABS_Y] = "Y",
816 [ABS_Z] = "Z", [ABS_RX] = "Rx", 816 [ABS_Z] = "Z", [ABS_RX] = "Rx",
817 [ABS_RY] = "Ry", [ABS_RZ] = "Rz", 817 [ABS_RY] = "Ry", [ABS_RZ] = "Rz",
diff --git a/drivers/ide/gayle.c b/drivers/ide/gayle.c
index b9e517de6a82..3feaa26410be 100644
--- a/drivers/ide/gayle.c
+++ b/drivers/ide/gayle.c
@@ -16,6 +16,7 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/zorro.h> 17#include <linux/zorro.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/platform_device.h>
19 20
20#include <asm/setup.h> 21#include <asm/setup.h>
21#include <asm/amigahw.h> 22#include <asm/amigahw.h>
@@ -24,15 +25,6 @@
24 25
25 26
26 /* 27 /*
27 * Bases of the IDE interfaces
28 */
29
30#define GAYLE_BASE_4000 0xdd2020 /* A4000/A4000T */
31#define GAYLE_BASE_1200 0xda0000 /* A1200/A600 and E-Matrix 530 */
32
33#define GAYLE_IDEREG_SIZE 0x2000
34
35 /*
36 * Offsets from one of the above bases 28 * Offsets from one of the above bases
37 */ 29 */
38 30
@@ -68,20 +60,20 @@ MODULE_PARM_DESC(doubler, "enable support for IDE doublers");
68 60
69static int gayle_test_irq(ide_hwif_t *hwif) 61static int gayle_test_irq(ide_hwif_t *hwif)
70{ 62{
71 unsigned char ch; 63 unsigned char ch;
72 64
73 ch = z_readb(hwif->io_ports.irq_addr); 65 ch = z_readb(hwif->io_ports.irq_addr);
74 if (!(ch & GAYLE_IRQ_IDE)) 66 if (!(ch & GAYLE_IRQ_IDE))
75 return 0; 67 return 0;
76 return 1; 68 return 1;
77} 69}
78 70
79static void gayle_a1200_clear_irq(ide_drive_t *drive) 71static void gayle_a1200_clear_irq(ide_drive_t *drive)
80{ 72{
81 ide_hwif_t *hwif = drive->hwif; 73 ide_hwif_t *hwif = drive->hwif;
82 74
83 (void)z_readb(hwif->io_ports.status_addr); 75 (void)z_readb(hwif->io_ports.status_addr);
84 z_writeb(0x7c, hwif->io_ports.irq_addr); 76 z_writeb(0x7c, hwif->io_ports.irq_addr);
85} 77}
86 78
87static void __init gayle_setup_ports(struct ide_hw *hw, unsigned long base, 79static void __init gayle_setup_ports(struct ide_hw *hw, unsigned long base,
@@ -122,64 +114,89 @@ static const struct ide_port_info gayle_port_info = {
122 * Probe for a Gayle IDE interface (and optionally for an IDE doubler) 114 * Probe for a Gayle IDE interface (and optionally for an IDE doubler)
123 */ 115 */
124 116
125static int __init gayle_init(void) 117static int __init amiga_gayle_ide_probe(struct platform_device *pdev)
126{ 118{
127 unsigned long phys_base, res_start, res_n; 119 struct resource *res;
128 unsigned long base, ctrlport, irqport; 120 struct gayle_ide_platform_data *pdata;
129 int a4000, i, rc; 121 unsigned long base, ctrlport, irqport;
130 struct ide_hw hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS]; 122 unsigned int i;
131 struct ide_port_info d = gayle_port_info; 123 int error;
132 124 struct ide_hw hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS];
133 if (!MACH_IS_AMIGA) 125 struct ide_port_info d = gayle_port_info;
134 return -ENODEV; 126 struct ide_host *host;
135 127
136 if ((a4000 = AMIGAHW_PRESENT(A4000_IDE)) || AMIGAHW_PRESENT(A1200_IDE)) 128 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
137 goto found; 129 if (!res)
138 130 return -ENODEV;
139#ifdef CONFIG_ZORRO 131
140 if (zorro_find_device(ZORRO_PROD_MTEC_VIPER_MK_V_E_MATRIX_530_SCSI_IDE, 132 if (!request_mem_region(res->start, resource_size(res), "IDE"))
141 NULL)) 133 return -EBUSY;
142 goto found; 134
143#endif 135 pdata = pdev->dev.platform_data;
144 return -ENODEV; 136 pr_info("ide: Gayle IDE controller (A%u style%s)\n",
145 137 pdata->explicit_ack ? 1200 : 4000,
146found: 138 ide_doubler ? ", IDE doubler" : "");
147 printk(KERN_INFO "ide: Gayle IDE controller (A%d style%s)\n", 139
148 a4000 ? 4000 : 1200, 140 base = (unsigned long)ZTWO_VADDR(pdata->base);
149 ide_doubler ? ", IDE doubler" : ""); 141 ctrlport = 0;
150 142 irqport = (unsigned long)ZTWO_VADDR(pdata->irqport);
151 if (a4000) { 143 if (pdata->explicit_ack)
152 phys_base = GAYLE_BASE_4000; 144 d.port_ops = &gayle_a1200_port_ops;
153 irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_4000); 145 else
154 d.port_ops = &gayle_a4000_port_ops; 146 d.port_ops = &gayle_a4000_port_ops;
155 } else { 147
156 phys_base = GAYLE_BASE_1200; 148 for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++, base += GAYLE_NEXT_PORT) {
157 irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_1200); 149 if (GAYLE_HAS_CONTROL_REG)
158 d.port_ops = &gayle_a1200_port_ops; 150 ctrlport = base + GAYLE_CONTROL;
151
152 gayle_setup_ports(&hw[i], base, ctrlport, irqport);
153 hws[i] = &hw[i];
159 } 154 }
160 155
161 res_start = ((unsigned long)phys_base) & ~(GAYLE_NEXT_PORT-1); 156 error = ide_host_add(&d, hws, i, &host);
162 res_n = GAYLE_IDEREG_SIZE; 157 if (error)
158 goto out;
163 159
164 if (!request_mem_region(res_start, res_n, "IDE")) 160 platform_set_drvdata(pdev, host);
165 return -EBUSY; 161 return 0;
166 162
167 for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) { 163out:
168 base = (unsigned long)ZTWO_VADDR(phys_base + i * GAYLE_NEXT_PORT); 164 release_mem_region(res->start, resource_size(res));
169 ctrlport = GAYLE_HAS_CONTROL_REG ? (base + GAYLE_CONTROL) : 0; 165 return error;
166}
167
168static int __exit amiga_gayle_ide_remove(struct platform_device *pdev)
169{
170 struct ide_host *host = platform_get_drvdata(pdev);
171 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
172
173 ide_host_remove(host);
174 release_mem_region(res->start, resource_size(res));
175 return 0;
176}
170 177
171 gayle_setup_ports(&hw[i], base, ctrlport, irqport); 178static struct platform_driver amiga_gayle_ide_driver = {
179 .remove = __exit_p(amiga_gayle_ide_remove),
180 .driver = {
181 .name = "amiga-gayle-ide",
182 .owner = THIS_MODULE,
183 },
184};
172 185
173 hws[i] = &hw[i]; 186static int __init amiga_gayle_ide_init(void)
174 } 187{
188 return platform_driver_probe(&amiga_gayle_ide_driver,
189 amiga_gayle_ide_probe);
190}
175 191
176 rc = ide_host_add(&d, hws, i, NULL); 192module_init(amiga_gayle_ide_init);
177 if (rc)
178 release_mem_region(res_start, res_n);
179 193
180 return rc; 194static void __exit amiga_gayle_ide_exit(void)
195{
196 platform_driver_unregister(&amiga_gayle_ide_driver);
181} 197}
182 198
183module_init(gayle_init); 199module_exit(amiga_gayle_ide_exit);
184 200
185MODULE_LICENSE("GPL"); 201MODULE_LICENSE("GPL");
202MODULE_ALIAS("platform:amiga-gayle-ide");
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index 9fd4a0d3206e..adaefabc40e9 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -1824,7 +1824,7 @@ static int dv1394_open(struct inode *inode, struct file *file)
1824 "and will not be available in the new firewire driver stack. " 1824 "and will not be available in the new firewire driver stack. "
1825 "Try libraw1394 based programs instead.\n", current->comm); 1825 "Try libraw1394 based programs instead.\n", current->comm);
1826 1826
1827 return 0; 1827 return nonseekable_open(inode, file);
1828} 1828}
1829 1829
1830 1830
@@ -2153,17 +2153,18 @@ static struct cdev dv1394_cdev;
2153static const struct file_operations dv1394_fops= 2153static const struct file_operations dv1394_fops=
2154{ 2154{
2155 .owner = THIS_MODULE, 2155 .owner = THIS_MODULE,
2156 .poll = dv1394_poll, 2156 .poll = dv1394_poll,
2157 .unlocked_ioctl = dv1394_ioctl, 2157 .unlocked_ioctl = dv1394_ioctl,
2158#ifdef CONFIG_COMPAT 2158#ifdef CONFIG_COMPAT
2159 .compat_ioctl = dv1394_compat_ioctl, 2159 .compat_ioctl = dv1394_compat_ioctl,
2160#endif 2160#endif
2161 .mmap = dv1394_mmap, 2161 .mmap = dv1394_mmap,
2162 .open = dv1394_open, 2162 .open = dv1394_open,
2163 .write = dv1394_write, 2163 .write = dv1394_write,
2164 .read = dv1394_read, 2164 .read = dv1394_read,
2165 .release = dv1394_release, 2165 .release = dv1394_release,
2166 .fasync = dv1394_fasync, 2166 .fasync = dv1394_fasync,
2167 .llseek = no_llseek,
2167}; 2168};
2168 2169
2169 2170
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index 8aa56ac07e29..b563d5e9fa2e 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -2834,7 +2834,7 @@ static int raw1394_open(struct inode *inode, struct file *file)
2834 2834
2835 file->private_data = fi; 2835 file->private_data = fi;
2836 2836
2837 return 0; 2837 return nonseekable_open(inode, file);
2838} 2838}
2839 2839
2840static int raw1394_release(struct inode *inode, struct file *file) 2840static int raw1394_release(struct inode *inode, struct file *file)
@@ -3035,6 +3035,7 @@ static const struct file_operations raw1394_fops = {
3035 .poll = raw1394_poll, 3035 .poll = raw1394_poll,
3036 .open = raw1394_open, 3036 .open = raw1394_open,
3037 .release = raw1394_release, 3037 .release = raw1394_release,
3038 .llseek = no_llseek,
3038}; 3039};
3039 3040
3040static int __init init_raw1394(void) 3041static int __init init_raw1394(void)
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index 949064a05675..a42bd6893bcf 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -1239,7 +1239,7 @@ static int video1394_open(struct inode *inode, struct file *file)
1239 ctx->current_ctx = NULL; 1239 ctx->current_ctx = NULL;
1240 file->private_data = ctx; 1240 file->private_data = ctx;
1241 1241
1242 return 0; 1242 return nonseekable_open(inode, file);
1243} 1243}
1244 1244
1245static int video1394_release(struct inode *inode, struct file *file) 1245static int video1394_release(struct inode *inode, struct file *file)
@@ -1287,7 +1287,8 @@ static const struct file_operations video1394_fops=
1287 .poll = video1394_poll, 1287 .poll = video1394_poll,
1288 .mmap = video1394_mmap, 1288 .mmap = video1394_mmap,
1289 .open = video1394_open, 1289 .open = video1394_open,
1290 .release = video1394_release 1290 .release = video1394_release,
1291 .llseek = no_llseek,
1291}; 1292};
1292 1293
1293/*** HOTPLUG STUFF **********************************************************/ 1294/*** HOTPLUG STUFF **********************************************************/
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 07cae552cafb..e571e60ecb88 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -847,7 +847,7 @@ static int __cpuinit comp_pool_callback(struct notifier_block *nfb,
847 ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu); 847 ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
848 if (!create_comp_task(pool, cpu)) { 848 if (!create_comp_task(pool, cpu)) {
849 ehca_gen_err("Can't create comp_task for cpu: %x", cpu); 849 ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
850 return NOTIFY_BAD; 850 return notifier_from_errno(-ENOMEM);
851 } 851 }
852 break; 852 break;
853 case CPU_UP_CANCELED: 853 case CPU_UP_CANCELED:
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 423e0e6031ab..34157bb97ed6 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -47,15 +47,15 @@ struct joydev {
47 struct mutex mutex; 47 struct mutex mutex;
48 struct device dev; 48 struct device dev;
49 49
50 struct js_corr corr[ABS_MAX + 1]; 50 struct js_corr corr[ABS_CNT];
51 struct JS_DATA_SAVE_TYPE glue; 51 struct JS_DATA_SAVE_TYPE glue;
52 int nabs; 52 int nabs;
53 int nkey; 53 int nkey;
54 __u16 keymap[KEY_MAX - BTN_MISC + 1]; 54 __u16 keymap[KEY_MAX - BTN_MISC + 1];
55 __u16 keypam[KEY_MAX - BTN_MISC + 1]; 55 __u16 keypam[KEY_MAX - BTN_MISC + 1];
56 __u8 absmap[ABS_MAX + 1]; 56 __u8 absmap[ABS_CNT];
57 __u8 abspam[ABS_MAX + 1]; 57 __u8 abspam[ABS_CNT];
58 __s16 abs[ABS_MAX + 1]; 58 __s16 abs[ABS_CNT];
59}; 59};
60 60
61struct joydev_client { 61struct joydev_client {
@@ -826,7 +826,7 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev,
826 joydev->handle.handler = handler; 826 joydev->handle.handler = handler;
827 joydev->handle.private = joydev; 827 joydev->handle.private = joydev;
828 828
829 for (i = 0; i < ABS_MAX + 1; i++) 829 for (i = 0; i < ABS_CNT; i++)
830 if (test_bit(i, dev->absbit)) { 830 if (test_bit(i, dev->absbit)) {
831 joydev->absmap[i] = joydev->nabs; 831 joydev->absmap[i] = joydev->nabs;
832 joydev->abspam[joydev->nabs] = i; 832 joydev->abspam[joydev->nabs] = i;
diff --git a/drivers/input/keyboard/amikbd.c b/drivers/input/keyboard/amikbd.c
index 35149ec455a9..79172af164f2 100644
--- a/drivers/input/keyboard/amikbd.c
+++ b/drivers/input/keyboard/amikbd.c
@@ -35,6 +35,7 @@
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/interrupt.h> 36#include <linux/interrupt.h>
37#include <linux/keyboard.h> 37#include <linux/keyboard.h>
38#include <linux/platform_device.h>
38 39
39#include <asm/amigaints.h> 40#include <asm/amigaints.h>
40#include <asm/amigahw.h> 41#include <asm/amigahw.h>
@@ -154,10 +155,9 @@ static const char *amikbd_messages[8] = {
154 [7] = KERN_WARNING "amikbd: keyboard interrupt\n" 155 [7] = KERN_WARNING "amikbd: keyboard interrupt\n"
155}; 156};
156 157
157static struct input_dev *amikbd_dev; 158static irqreturn_t amikbd_interrupt(int irq, void *data)
158
159static irqreturn_t amikbd_interrupt(int irq, void *dummy)
160{ 159{
160 struct input_dev *dev = data;
161 unsigned char scancode, down; 161 unsigned char scancode, down;
162 162
163 scancode = ~ciaa.sdr; /* get and invert scancode (keyboard is active low) */ 163 scancode = ~ciaa.sdr; /* get and invert scancode (keyboard is active low) */
@@ -170,47 +170,42 @@ static irqreturn_t amikbd_interrupt(int irq, void *dummy)
170 170
171 if (scancode < 0x78) { /* scancodes < 0x78 are keys */ 171 if (scancode < 0x78) { /* scancodes < 0x78 are keys */
172 if (scancode == 98) { /* CapsLock is a toggle switch key on Amiga */ 172 if (scancode == 98) { /* CapsLock is a toggle switch key on Amiga */
173 input_report_key(amikbd_dev, scancode, 1); 173 input_report_key(dev, scancode, 1);
174 input_report_key(amikbd_dev, scancode, 0); 174 input_report_key(dev, scancode, 0);
175 } else { 175 } else {
176 input_report_key(amikbd_dev, scancode, down); 176 input_report_key(dev, scancode, down);
177 } 177 }
178 178
179 input_sync(amikbd_dev); 179 input_sync(dev);
180 } else /* scancodes >= 0x78 are error codes */ 180 } else /* scancodes >= 0x78 are error codes */
181 printk(amikbd_messages[scancode - 0x78]); 181 printk(amikbd_messages[scancode - 0x78]);
182 182
183 return IRQ_HANDLED; 183 return IRQ_HANDLED;
184} 184}
185 185
186static int __init amikbd_init(void) 186static int __init amikbd_probe(struct platform_device *pdev)
187{ 187{
188 struct input_dev *dev;
188 int i, j, err; 189 int i, j, err;
189 190
190 if (!AMIGAHW_PRESENT(AMI_KEYBOARD)) 191 dev = input_allocate_device();
191 return -ENODEV; 192 if (!dev) {
192 193 dev_err(&pdev->dev, "Not enough memory for input device\n");
193 if (!request_mem_region(CIAA_PHYSADDR-1+0xb00, 0x100, "amikeyb")) 194 return -ENOMEM;
194 return -EBUSY;
195
196 amikbd_dev = input_allocate_device();
197 if (!amikbd_dev) {
198 printk(KERN_ERR "amikbd: not enough memory for input device\n");
199 err = -ENOMEM;
200 goto fail1;
201 } 195 }
202 196
203 amikbd_dev->name = "Amiga Keyboard"; 197 dev->name = pdev->name;
204 amikbd_dev->phys = "amikbd/input0"; 198 dev->phys = "amikbd/input0";
205 amikbd_dev->id.bustype = BUS_AMIGA; 199 dev->id.bustype = BUS_AMIGA;
206 amikbd_dev->id.vendor = 0x0001; 200 dev->id.vendor = 0x0001;
207 amikbd_dev->id.product = 0x0001; 201 dev->id.product = 0x0001;
208 amikbd_dev->id.version = 0x0100; 202 dev->id.version = 0x0100;
203 dev->dev.parent = &pdev->dev;
209 204
210 amikbd_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); 205 dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
211 206
212 for (i = 0; i < 0x78; i++) 207 for (i = 0; i < 0x78; i++)
213 set_bit(i, amikbd_dev->keybit); 208 set_bit(i, dev->keybit);
214 209
215 for (i = 0; i < MAX_NR_KEYMAPS; i++) { 210 for (i = 0; i < MAX_NR_KEYMAPS; i++) {
216 static u_short temp_map[NR_KEYS] __initdata; 211 static u_short temp_map[NR_KEYS] __initdata;
@@ -229,30 +224,54 @@ static int __init amikbd_init(void)
229 memcpy(key_maps[i], temp_map, sizeof(temp_map)); 224 memcpy(key_maps[i], temp_map, sizeof(temp_map));
230 } 225 }
231 ciaa.cra &= ~0x41; /* serial data in, turn off TA */ 226 ciaa.cra &= ~0x41; /* serial data in, turn off TA */
232 if (request_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt, 0, "amikbd", 227 err = request_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt, 0, "amikbd",
233 amikbd_interrupt)) { 228 dev);
234 err = -EBUSY; 229 if (err)
235 goto fail2; 230 goto fail2;
236 }
237 231
238 err = input_register_device(amikbd_dev); 232 err = input_register_device(dev);
239 if (err) 233 if (err)
240 goto fail3; 234 goto fail3;
241 235
236 platform_set_drvdata(pdev, dev);
237
242 return 0; 238 return 0;
243 239
244 fail3: free_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt); 240 fail3: free_irq(IRQ_AMIGA_CIAA_SP, dev);
245 fail2: input_free_device(amikbd_dev); 241 fail2: input_free_device(dev);
246 fail1: release_mem_region(CIAA_PHYSADDR - 1 + 0xb00, 0x100);
247 return err; 242 return err;
248} 243}
249 244
250static void __exit amikbd_exit(void) 245static int __exit amikbd_remove(struct platform_device *pdev)
246{
247 struct input_dev *dev = platform_get_drvdata(pdev);
248
249 platform_set_drvdata(pdev, NULL);
250 free_irq(IRQ_AMIGA_CIAA_SP, dev);
251 input_unregister_device(dev);
252 return 0;
253}
254
255static struct platform_driver amikbd_driver = {
256 .remove = __exit_p(amikbd_remove),
257 .driver = {
258 .name = "amiga-keyboard",
259 .owner = THIS_MODULE,
260 },
261};
262
263static int __init amikbd_init(void)
251{ 264{
252 free_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt); 265 return platform_driver_probe(&amikbd_driver, amikbd_probe);
253 input_unregister_device(amikbd_dev);
254 release_mem_region(CIAA_PHYSADDR - 1 + 0xb00, 0x100);
255} 266}
256 267
257module_init(amikbd_init); 268module_init(amikbd_init);
269
270static void __exit amikbd_exit(void)
271{
272 platform_driver_unregister(&amikbd_driver);
273}
274
258module_exit(amikbd_exit); 275module_exit(amikbd_exit);
276
277MODULE_ALIAS("platform:amiga-keyboard");
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 48cdabec372a..c44b9eafc556 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -80,6 +80,16 @@ config INPUT_M68K_BEEP
80 tristate "M68k Beeper support" 80 tristate "M68k Beeper support"
81 depends on M68K 81 depends on M68K
82 82
83config INPUT_MAX8925_ONKEY
84 tristate "MAX8925 ONKEY support"
85 depends on MFD_MAX8925
86 help
87 Support the ONKEY of MAX8925 PMICs as an input device
88 reporting power button status.
89
90 To compile this driver as a module, choose M here: the module
91 will be called max8925_onkey.
92
83config INPUT_APANEL 93config INPUT_APANEL
84 tristate "Fujitsu Lifebook Application Panel buttons" 94 tristate "Fujitsu Lifebook Application Panel buttons"
85 depends on X86 && I2C && LEDS_CLASS 95 depends on X86 && I2C && LEDS_CLASS
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index f9f577031e06..71fe57d8023f 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
20obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o 20obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
21obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o 21obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
22obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o 22obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
23obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o
23obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o 24obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o
24obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o 25obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o
25obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o 26obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o
diff --git a/drivers/input/misc/max8925_onkey.c b/drivers/input/misc/max8925_onkey.c
new file mode 100644
index 000000000000..80af44608018
--- /dev/null
+++ b/drivers/input/misc/max8925_onkey.c
@@ -0,0 +1,148 @@
1/**
2 * max8925_onkey.c - MAX8925 ONKEY driver
3 *
4 * Copyright (C) 2009 Marvell International Ltd.
5 * Haojian Zhuang <haojian.zhuang@marvell.com>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file "COPYING" in the main directory of this
9 * archive for more details.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/platform_device.h>
24#include <linux/i2c.h>
25#include <linux/input.h>
26#include <linux/interrupt.h>
27#include <linux/mfd/max8925.h>
28#include <linux/slab.h>
29
30#define HARDRESET_EN (1 << 7)
31#define PWREN_EN (1 << 7)
32
33struct max8925_onkey_info {
34 struct input_dev *idev;
35 struct i2c_client *i2c;
36 int irq;
37};
38
39/*
40 * MAX8925 gives us an interrupt when ONKEY is held for 3 seconds.
41 * max8925_set_bits() operates I2C bus and may sleep. So implement
42 * it in thread IRQ handler.
43 */
44static irqreturn_t max8925_onkey_handler(int irq, void *data)
45{
46 struct max8925_onkey_info *info = data;
47
48 input_report_key(info->idev, KEY_POWER, 1);
49 input_sync(info->idev);
50
51 /* Enable hardreset to halt if system isn't shutdown on time */
52 max8925_set_bits(info->i2c, MAX8925_SYSENSEL,
53 HARDRESET_EN, HARDRESET_EN);
54
55 return IRQ_HANDLED;
56}
57
58static int __devinit max8925_onkey_probe(struct platform_device *pdev)
59{
60 struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
61 struct max8925_onkey_info *info;
62 int error;
63
64 info = kzalloc(sizeof(struct max8925_onkey_info), GFP_KERNEL);
65 if (!info)
66 return -ENOMEM;
67
68 info->i2c = chip->i2c;
69 info->irq = chip->irq_base + MAX8925_IRQ_GPM_SW_3SEC;
70
71 info->idev = input_allocate_device();
72 if (!info->idev) {
73 dev_err(chip->dev, "Failed to allocate input dev\n");
74 error = -ENOMEM;
75 goto out_input;
76 }
77
78 info->idev->name = "max8925_on";
79 info->idev->phys = "max8925_on/input0";
80 info->idev->id.bustype = BUS_I2C;
81 info->idev->dev.parent = &pdev->dev;
82 info->idev->evbit[0] = BIT_MASK(EV_KEY);
83 info->idev->keybit[BIT_WORD(KEY_POWER)] = BIT_MASK(KEY_POWER);
84
85 error = request_threaded_irq(info->irq, NULL, max8925_onkey_handler,
86 IRQF_ONESHOT, "onkey", info);
87 if (error < 0) {
88 dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
89 info->irq, error);
90 goto out_irq;
91 }
92
93 error = input_register_device(info->idev);
94 if (error) {
95 dev_err(chip->dev, "Can't register input device: %d\n", error);
96 goto out;
97 }
98
99 platform_set_drvdata(pdev, info);
100
101 return 0;
102
103out:
104 free_irq(info->irq, info);
105out_irq:
106 input_free_device(info->idev);
107out_input:
108 kfree(info);
109 return error;
110}
111
112static int __devexit max8925_onkey_remove(struct platform_device *pdev)
113{
114 struct max8925_onkey_info *info = platform_get_drvdata(pdev);
115
116 free_irq(info->irq, info);
117 input_unregister_device(info->idev);
118 kfree(info);
119
120 platform_set_drvdata(pdev, NULL);
121
122 return 0;
123}
124
125static struct platform_driver max8925_onkey_driver = {
126 .driver = {
127 .name = "max8925-onkey",
128 .owner = THIS_MODULE,
129 },
130 .probe = max8925_onkey_probe,
131 .remove = __devexit_p(max8925_onkey_remove),
132};
133
134static int __init max8925_onkey_init(void)
135{
136 return platform_driver_register(&max8925_onkey_driver);
137}
138module_init(max8925_onkey_init);
139
140static void __exit max8925_onkey_exit(void)
141{
142 platform_driver_unregister(&max8925_onkey_driver);
143}
144module_exit(max8925_onkey_exit);
145
146MODULE_DESCRIPTION("Maxim MAX8925 ONKEY driver");
147MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
148MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index fee9eac8e04a..4f9b2afc24e8 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -90,8 +90,8 @@ static void vibra_disable(struct vibra_info *info)
90 twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, 90 twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
91 (reg & ~TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL); 91 (reg & ~TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL);
92 92
93 twl4030_codec_disable_resource(TWL4030_CODEC_RES_POWER);
94 twl4030_codec_disable_resource(TWL4030_CODEC_RES_APLL); 93 twl4030_codec_disable_resource(TWL4030_CODEC_RES_APLL);
94 twl4030_codec_disable_resource(TWL4030_CODEC_RES_POWER);
95 95
96 info->enabled = false; 96 info->enabled = false;
97} 97}
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 1477466076ad..b71eb55f2dbc 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -300,7 +300,7 @@ static int uinput_validate_absbits(struct input_dev *dev)
300 unsigned int cnt; 300 unsigned int cnt;
301 int retval = 0; 301 int retval = 0;
302 302
303 for (cnt = 0; cnt < ABS_MAX + 1; cnt++) { 303 for (cnt = 0; cnt < ABS_CNT; cnt++) {
304 if (!test_bit(cnt, dev->absbit)) 304 if (!test_bit(cnt, dev->absbit))
305 continue; 305 continue;
306 306
@@ -387,7 +387,7 @@ static int uinput_setup_device(struct uinput_device *udev, const char __user *bu
387 dev->id.product = user_dev->id.product; 387 dev->id.product = user_dev->id.product;
388 dev->id.version = user_dev->id.version; 388 dev->id.version = user_dev->id.version;
389 389
390 size = sizeof(int) * (ABS_MAX + 1); 390 size = sizeof(int) * ABS_CNT;
391 memcpy(dev->absmax, user_dev->absmax, size); 391 memcpy(dev->absmax, user_dev->absmax, size);
392 memcpy(dev->absmin, user_dev->absmin, size); 392 memcpy(dev->absmin, user_dev->absmin, size);
393 memcpy(dev->absfuzz, user_dev->absfuzz, size); 393 memcpy(dev->absfuzz, user_dev->absfuzz, size);
diff --git a/drivers/input/mouse/amimouse.c b/drivers/input/mouse/amimouse.c
index a185ac78a42c..ff5f61a0fd3a 100644
--- a/drivers/input/mouse/amimouse.c
+++ b/drivers/input/mouse/amimouse.c
@@ -21,6 +21,7 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/input.h> 22#include <linux/input.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/platform_device.h>
24 25
25#include <asm/irq.h> 26#include <asm/irq.h>
26#include <asm/setup.h> 27#include <asm/setup.h>
@@ -34,10 +35,10 @@ MODULE_DESCRIPTION("Amiga mouse driver");
34MODULE_LICENSE("GPL"); 35MODULE_LICENSE("GPL");
35 36
36static int amimouse_lastx, amimouse_lasty; 37static int amimouse_lastx, amimouse_lasty;
37static struct input_dev *amimouse_dev;
38 38
39static irqreturn_t amimouse_interrupt(int irq, void *dummy) 39static irqreturn_t amimouse_interrupt(int irq, void *data)
40{ 40{
41 struct input_dev *dev = data;
41 unsigned short joy0dat, potgor; 42 unsigned short joy0dat, potgor;
42 int nx, ny, dx, dy; 43 int nx, ny, dx, dy;
43 44
@@ -59,14 +60,14 @@ static irqreturn_t amimouse_interrupt(int irq, void *dummy)
59 60
60 potgor = amiga_custom.potgor; 61 potgor = amiga_custom.potgor;
61 62
62 input_report_rel(amimouse_dev, REL_X, dx); 63 input_report_rel(dev, REL_X, dx);
63 input_report_rel(amimouse_dev, REL_Y, dy); 64 input_report_rel(dev, REL_Y, dy);
64 65
65 input_report_key(amimouse_dev, BTN_LEFT, ciaa.pra & 0x40); 66 input_report_key(dev, BTN_LEFT, ciaa.pra & 0x40);
66 input_report_key(amimouse_dev, BTN_MIDDLE, potgor & 0x0100); 67 input_report_key(dev, BTN_MIDDLE, potgor & 0x0100);
67 input_report_key(amimouse_dev, BTN_RIGHT, potgor & 0x0400); 68 input_report_key(dev, BTN_RIGHT, potgor & 0x0400);
68 69
69 input_sync(amimouse_dev); 70 input_sync(dev);
70 71
71 return IRQ_HANDLED; 72 return IRQ_HANDLED;
72} 73}
@@ -74,63 +75,90 @@ static irqreturn_t amimouse_interrupt(int irq, void *dummy)
74static int amimouse_open(struct input_dev *dev) 75static int amimouse_open(struct input_dev *dev)
75{ 76{
76 unsigned short joy0dat; 77 unsigned short joy0dat;
78 int error;
77 79
78 joy0dat = amiga_custom.joy0dat; 80 joy0dat = amiga_custom.joy0dat;
79 81
80 amimouse_lastx = joy0dat & 0xff; 82 amimouse_lastx = joy0dat & 0xff;
81 amimouse_lasty = joy0dat >> 8; 83 amimouse_lasty = joy0dat >> 8;
82 84
83 if (request_irq(IRQ_AMIGA_VERTB, amimouse_interrupt, 0, "amimouse", amimouse_interrupt)) { 85 error = request_irq(IRQ_AMIGA_VERTB, amimouse_interrupt, 0, "amimouse",
84 printk(KERN_ERR "amimouse.c: Can't allocate irq %d\n", IRQ_AMIGA_VERTB); 86 dev);
85 return -EBUSY; 87 if (error)
86 } 88 dev_err(&dev->dev, "Can't allocate irq %d\n", IRQ_AMIGA_VERTB);
87 89
88 return 0; 90 return error;
89} 91}
90 92
91static void amimouse_close(struct input_dev *dev) 93static void amimouse_close(struct input_dev *dev)
92{ 94{
93 free_irq(IRQ_AMIGA_VERTB, amimouse_interrupt); 95 free_irq(IRQ_AMIGA_VERTB, dev);
94} 96}
95 97
96static int __init amimouse_init(void) 98static int __init amimouse_probe(struct platform_device *pdev)
97{ 99{
98 int err; 100 int err;
101 struct input_dev *dev;
99 102
100 if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_MOUSE)) 103 dev = input_allocate_device();
101 return -ENODEV; 104 if (!dev)
102
103 amimouse_dev = input_allocate_device();
104 if (!amimouse_dev)
105 return -ENOMEM; 105 return -ENOMEM;
106 106
107 amimouse_dev->name = "Amiga mouse"; 107 dev->name = pdev->name;
108 amimouse_dev->phys = "amimouse/input0"; 108 dev->phys = "amimouse/input0";
109 amimouse_dev->id.bustype = BUS_AMIGA; 109 dev->id.bustype = BUS_AMIGA;
110 amimouse_dev->id.vendor = 0x0001; 110 dev->id.vendor = 0x0001;
111 amimouse_dev->id.product = 0x0002; 111 dev->id.product = 0x0002;
112 amimouse_dev->id.version = 0x0100; 112 dev->id.version = 0x0100;
113 113
114 amimouse_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); 114 dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
115 amimouse_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); 115 dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y);
116 amimouse_dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) | 116 dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) |
117 BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT); 117 BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT);
118 amimouse_dev->open = amimouse_open; 118 dev->open = amimouse_open;
119 amimouse_dev->close = amimouse_close; 119 dev->close = amimouse_close;
120 dev->dev.parent = &pdev->dev;
120 121
121 err = input_register_device(amimouse_dev); 122 err = input_register_device(dev);
122 if (err) { 123 if (err) {
123 input_free_device(amimouse_dev); 124 input_free_device(dev);
124 return err; 125 return err;
125 } 126 }
126 127
128 platform_set_drvdata(pdev, dev);
129
127 return 0; 130 return 0;
128} 131}
129 132
130static void __exit amimouse_exit(void) 133static int __exit amimouse_remove(struct platform_device *pdev)
131{ 134{
132 input_unregister_device(amimouse_dev); 135 struct input_dev *dev = platform_get_drvdata(pdev);
136
137 platform_set_drvdata(pdev, NULL);
138 input_unregister_device(dev);
139 return 0;
140}
141
142static struct platform_driver amimouse_driver = {
143 .remove = __exit_p(amimouse_remove),
144 .driver = {
145 .name = "amiga-mouse",
146 .owner = THIS_MODULE,
147 },
148};
149
150static int __init amimouse_init(void)
151{
152 return platform_driver_probe(&amimouse_driver, amimouse_probe);
133} 153}
134 154
135module_init(amimouse_init); 155module_init(amimouse_init);
156
157static void __exit amimouse_exit(void)
158{
159 platform_driver_unregister(&amimouse_driver);
160}
161
136module_exit(amimouse_exit); 162module_exit(amimouse_exit);
163
164MODULE_ALIAS("platform:amiga-mouse");
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 532279cda0e4..634f6f6b9b13 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1163,8 +1163,8 @@ static int __devinit ads7846_probe(struct spi_device *spi)
1163 1163
1164 ts->reg = regulator_get(&spi->dev, "vcc"); 1164 ts->reg = regulator_get(&spi->dev, "vcc");
1165 if (IS_ERR(ts->reg)) { 1165 if (IS_ERR(ts->reg)) {
1166 dev_err(&spi->dev, "unable to get regulator: %ld\n", 1166 err = PTR_ERR(ts->reg);
1167 PTR_ERR(ts->reg)); 1167 dev_err(&spi->dev, "unable to get regulator: %ld\n", err);
1168 goto err_free_gpio; 1168 goto err_free_gpio;
1169 } 1169 }
1170 1170
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index e0b7c834111d..ac5d0f9b0cb1 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -413,6 +413,8 @@ static struct dev_pm_ops s3c_ts_pmops = {
413#endif 413#endif
414 414
415static struct platform_device_id s3cts_driver_ids[] = { 415static struct platform_device_id s3cts_driver_ids[] = {
416 { "s3c2410-ts", 0 },
417 { "s3c2440-ts", 0 },
416 { "s3c64xx-ts", FEAT_PEN_IRQ }, 418 { "s3c64xx-ts", FEAT_PEN_IRQ },
417 { } 419 { }
418}; 420};
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 29a8bbf3f086..567d57215c28 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -857,6 +857,11 @@ static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt)
857 if ((pkt[0] & 0xe0) != 0xe0) 857 if ((pkt[0] & 0xe0) != 0xe0)
858 return 0; 858 return 0;
859 859
860 if (be16_to_cpu(packet->data_len) > 0xff)
861 packet->data_len = cpu_to_be16(be16_to_cpu(packet->data_len) - 0x100);
862 if (be16_to_cpu(packet->x_len) > 0xff)
863 packet->x_len = cpu_to_be16(be16_to_cpu(packet->x_len) - 0x80);
864
860 /* send ACK */ 865 /* send ACK */
861 ret = usb_submit_urb(priv->ack, GFP_ATOMIC); 866 ret = usb_submit_urb(priv->ack, GFP_ATOMIC);
862 867
@@ -1112,7 +1117,7 @@ static struct usbtouch_device_info usbtouch_dev_info[] = {
1112 1117
1113#ifdef CONFIG_TOUCHSCREEN_USB_NEXIO 1118#ifdef CONFIG_TOUCHSCREEN_USB_NEXIO
1114 [DEVTYPE_NEXIO] = { 1119 [DEVTYPE_NEXIO] = {
1115 .rept_size = 128, 1120 .rept_size = 1024,
1116 .irq_always = true, 1121 .irq_always = true,
1117 .read_data = nexio_read_data, 1122 .read_data = nexio_read_data,
1118 .init = nexio_init, 1123 .init = nexio_init,
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index c3243c913ec0..81048b8ed8ad 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -98,8 +98,6 @@ mISDN_read(struct file *filep, char __user *buf, size_t count, loff_t *off)
98 if (*debug & DEBUG_TIMER) 98 if (*debug & DEBUG_TIMER)
99 printk(KERN_DEBUG "%s(%p, %p, %d, %p)\n", __func__, 99 printk(KERN_DEBUG "%s(%p, %p, %d, %p)\n", __func__,
100 filep, buf, (int)count, off); 100 filep, buf, (int)count, off);
101 if (*off != filep->f_pos)
102 return -ESPIPE;
103 101
104 if (list_empty(&dev->expired) && (dev->work == 0)) { 102 if (list_empty(&dev->expired) && (dev->work == 0)) {
105 if (filep->f_flags & O_NONBLOCK) 103 if (filep->f_flags & O_NONBLOCK)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 9ea17d6c799b..d2c0f94fa37d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4645,7 +4645,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
4645 kfree(percpu->scribble); 4645 kfree(percpu->scribble);
4646 pr_err("%s: failed memory allocation for cpu%ld\n", 4646 pr_err("%s: failed memory allocation for cpu%ld\n",
4647 __func__, cpu); 4647 __func__, cpu);
4648 return NOTIFY_BAD; 4648 return notifier_from_errno(-ENOMEM);
4649 } 4649 }
4650 break; 4650 break;
4651 case CPU_DEAD: 4651 case CPU_DEAD:
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index d33693c13368..c4b117f5fb70 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -186,14 +186,9 @@ static int i2o_cfg_parms(unsigned long arg, unsigned int type)
186 if (!dev) 186 if (!dev)
187 return -ENXIO; 187 return -ENXIO;
188 188
189 ops = kmalloc(kcmd.oplen, GFP_KERNEL); 189 ops = memdup_user(kcmd.opbuf, kcmd.oplen);
190 if (!ops) 190 if (IS_ERR(ops))
191 return -ENOMEM; 191 return PTR_ERR(ops);
192
193 if (copy_from_user(ops, kcmd.opbuf, kcmd.oplen)) {
194 kfree(ops);
195 return -EFAULT;
196 }
197 192
198 /* 193 /*
199 * It's possible to have a _very_ large table 194 * It's possible to have a _very_ large table
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 31a991161f0a..5bfb2a2041b8 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -75,6 +75,9 @@ enum ctype {
75 UNALIGNED_LOAD_STORE_WRITE, 75 UNALIGNED_LOAD_STORE_WRITE,
76 OVERWRITE_ALLOCATION, 76 OVERWRITE_ALLOCATION,
77 WRITE_AFTER_FREE, 77 WRITE_AFTER_FREE,
78 SOFTLOCKUP,
79 HARDLOCKUP,
80 HUNG_TASK,
78}; 81};
79 82
80static char* cp_name[] = { 83static char* cp_name[] = {
@@ -99,6 +102,9 @@ static char* cp_type[] = {
99 "UNALIGNED_LOAD_STORE_WRITE", 102 "UNALIGNED_LOAD_STORE_WRITE",
100 "OVERWRITE_ALLOCATION", 103 "OVERWRITE_ALLOCATION",
101 "WRITE_AFTER_FREE", 104 "WRITE_AFTER_FREE",
105 "SOFTLOCKUP",
106 "HARDLOCKUP",
107 "HUNG_TASK",
102}; 108};
103 109
104static struct jprobe lkdtm; 110static struct jprobe lkdtm;
@@ -320,6 +326,20 @@ static void lkdtm_do_action(enum ctype which)
320 memset(data, 0x78, len); 326 memset(data, 0x78, len);
321 break; 327 break;
322 } 328 }
329 case SOFTLOCKUP:
330 preempt_disable();
331 for (;;)
332 cpu_relax();
333 break;
334 case HARDLOCKUP:
335 local_irq_disable();
336 for (;;)
337 cpu_relax();
338 break;
339 case HUNG_TASK:
340 set_current_state(TASK_UNINTERRUPTIBLE);
341 schedule();
342 break;
323 case NONE: 343 case NONE:
324 default: 344 default:
325 break; 345 break;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 3168ebd616b2..569e94da844c 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1252,9 +1252,8 @@ EXPORT_SYMBOL(mmc_card_can_sleep);
1252/** 1252/**
1253 * mmc_suspend_host - suspend a host 1253 * mmc_suspend_host - suspend a host
1254 * @host: mmc host 1254 * @host: mmc host
1255 * @state: suspend mode (PM_SUSPEND_xxx)
1256 */ 1255 */
1257int mmc_suspend_host(struct mmc_host *host, pm_message_t state) 1256int mmc_suspend_host(struct mmc_host *host)
1258{ 1257{
1259 int err = 0; 1258 int err = 0;
1260 1259
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index 0d96080d44b0..63772e7e7608 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -79,8 +79,6 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
79 * we cannot use the retries field in mmc_command. 79 * we cannot use the retries field in mmc_command.
80 */ 80 */
81 for (i = 0;i <= retries;i++) { 81 for (i = 0;i <= retries;i++) {
82 memset(&mrq, 0, sizeof(struct mmc_request));
83
84 err = mmc_app_cmd(host, card); 82 err = mmc_app_cmd(host, card);
85 if (err) { 83 if (err) {
86 /* no point in retrying; no APP commands allowed */ 84 /* no point in retrying; no APP commands allowed */
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index ff27c8c71355..0f687cdeb064 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -406,6 +406,36 @@ void sdio_writeb(struct sdio_func *func, u8 b, unsigned int addr, int *err_ret)
406EXPORT_SYMBOL_GPL(sdio_writeb); 406EXPORT_SYMBOL_GPL(sdio_writeb);
407 407
408/** 408/**
409 * sdio_writeb_readb - write and read a byte from SDIO function
410 * @func: SDIO function to access
411 * @write_byte: byte to write
412 * @addr: address to write to
413 * @err_ret: optional status value from transfer
414 *
415 * Performs a RAW (Read after Write) operation as defined by SDIO spec -
416 * single byte is written to address space of a given SDIO function and
417 * response is read back from the same address, both using single request.
418 * If there is a problem with the operation, 0xff is returned and
419 * @err_ret will contain the error code.
420 */
421u8 sdio_writeb_readb(struct sdio_func *func, u8 write_byte,
422 unsigned int addr, int *err_ret)
423{
424 int ret;
425 u8 val;
426
427 ret = mmc_io_rw_direct(func->card, 1, func->num, addr,
428 write_byte, &val);
429 if (err_ret)
430 *err_ret = ret;
431 if (ret)
432 val = 0xff;
433
434 return val;
435}
436EXPORT_SYMBOL_GPL(sdio_writeb_readb);
437
438/**
409 * sdio_memcpy_fromio - read a chunk of memory from a SDIO function 439 * sdio_memcpy_fromio - read a chunk of memory from a SDIO function
410 * @func: SDIO function to access 440 * @func: SDIO function to access
411 * @dst: buffer to store the data 441 * @dst: buffer to store the data
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 2e13b94769fd..e171e77f6129 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -136,6 +136,18 @@ config MMC_SDHCI_S3C
136 136
137 If unsure, say N. 137 If unsure, say N.
138 138
139config MMC_SDHCI_SPEAR
140 tristate "SDHCI support on ST SPEAr platform"
141 depends on MMC_SDHCI && PLAT_SPEAR
142 help
143 This selects the Secure Digital Host Controller Interface (SDHCI)
144 often referrered to as the HSMMC block in some of the ST SPEAR range
145 of SoC
146
147 If you have a controller with this interface, say Y or M here.
148
149 If unsure, say N.
150
139config MMC_SDHCI_S3C_DMA 151config MMC_SDHCI_S3C_DMA
140 bool "DMA support on S3C SDHCI" 152 bool "DMA support on S3C SDHCI"
141 depends on MMC_SDHCI_S3C && EXPERIMENTAL 153 depends on MMC_SDHCI_S3C && EXPERIMENTAL
@@ -412,3 +424,11 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
412 depends on SDH_BFIN 424 depends on SDH_BFIN
413 help 425 help
414 If you say yes here SD-Cards may work on the EZkit. 426 If you say yes here SD-Cards may work on the EZkit.
427
428config MMC_SH_MMCIF
429 tristate "SuperH Internal MMCIF support"
430 depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
431 help
432 This selects the MMC Host Interface controler (MMCIF).
433
434 This driver supports MMCIF in sh7724/sh7757/sh7372.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index f4803977dfce..e30c2ee48894 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_MMC_SDHCI) += sdhci.o
14obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o 14obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
15obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o 15obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
16obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o 16obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
17obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
17obj-$(CONFIG_MMC_WBSD) += wbsd.o 18obj-$(CONFIG_MMC_WBSD) += wbsd.o
18obj-$(CONFIG_MMC_AU1X) += au1xmmc.o 19obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
19obj-$(CONFIG_MMC_OMAP) += omap.o 20obj-$(CONFIG_MMC_OMAP) += omap.o
@@ -34,6 +35,7 @@ obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
34obj-$(CONFIG_MMC_CB710) += cb710-mmc.o 35obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
35obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o 36obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
36obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o 37obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
38obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
37 39
38obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o 40obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
39sdhci-of-y := sdhci-of-core.o 41sdhci-of-y := sdhci-of-core.o
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 336d9f553f3e..5f3a599ead07 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -1157,7 +1157,7 @@ static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
1157 enable_irq_wake(host->board->det_pin); 1157 enable_irq_wake(host->board->det_pin);
1158 1158
1159 if (mmc) 1159 if (mmc)
1160 ret = mmc_suspend_host(mmc, state); 1160 ret = mmc_suspend_host(mmc);
1161 1161
1162 return ret; 1162 return ret;
1163} 1163}
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index df0e8a88d85f..95ef864ad8f9 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -173,6 +173,7 @@ struct atmel_mci {
173 * @mmc: The mmc_host representing this slot. 173 * @mmc: The mmc_host representing this slot.
174 * @host: The MMC controller this slot is using. 174 * @host: The MMC controller this slot is using.
175 * @sdc_reg: Value of SDCR to be written before using this slot. 175 * @sdc_reg: Value of SDCR to be written before using this slot.
176 * @sdio_irq: SDIO irq mask for this slot.
176 * @mrq: mmc_request currently being processed or waiting to be 177 * @mrq: mmc_request currently being processed or waiting to be
177 * processed, or NULL when the slot is idle. 178 * processed, or NULL when the slot is idle.
178 * @queue_node: List node for placing this node in the @queue list of 179 * @queue_node: List node for placing this node in the @queue list of
@@ -191,6 +192,7 @@ struct atmel_mci_slot {
191 struct atmel_mci *host; 192 struct atmel_mci *host;
192 193
193 u32 sdc_reg; 194 u32 sdc_reg;
195 u32 sdio_irq;
194 196
195 struct mmc_request *mrq; 197 struct mmc_request *mrq;
196 struct list_head queue_node; 198 struct list_head queue_node;
@@ -792,7 +794,7 @@ static void atmci_start_request(struct atmel_mci *host,
792 mci_writel(host, SDCR, slot->sdc_reg); 794 mci_writel(host, SDCR, slot->sdc_reg);
793 795
794 iflags = mci_readl(host, IMR); 796 iflags = mci_readl(host, IMR);
795 if (iflags) 797 if (iflags & ~(MCI_SDIOIRQA | MCI_SDIOIRQB))
796 dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n", 798 dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
797 iflags); 799 iflags);
798 800
@@ -952,10 +954,21 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
952 if (mci_has_rwproof()) 954 if (mci_has_rwproof())
953 host->mode_reg |= (MCI_MR_WRPROOF | MCI_MR_RDPROOF); 955 host->mode_reg |= (MCI_MR_WRPROOF | MCI_MR_RDPROOF);
954 956
955 if (list_empty(&host->queue)) 957 if (atmci_is_mci2()) {
958 /* setup High Speed mode in relation with card capacity */
959 if (ios->timing == MMC_TIMING_SD_HS)
960 host->cfg_reg |= MCI_CFG_HSMODE;
961 else
962 host->cfg_reg &= ~MCI_CFG_HSMODE;
963 }
964
965 if (list_empty(&host->queue)) {
956 mci_writel(host, MR, host->mode_reg); 966 mci_writel(host, MR, host->mode_reg);
957 else 967 if (atmci_is_mci2())
968 mci_writel(host, CFG, host->cfg_reg);
969 } else {
958 host->need_clock_update = true; 970 host->need_clock_update = true;
971 }
959 972
960 spin_unlock_bh(&host->lock); 973 spin_unlock_bh(&host->lock);
961 } else { 974 } else {
@@ -1030,11 +1043,23 @@ static int atmci_get_cd(struct mmc_host *mmc)
1030 return present; 1043 return present;
1031} 1044}
1032 1045
1046static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1047{
1048 struct atmel_mci_slot *slot = mmc_priv(mmc);
1049 struct atmel_mci *host = slot->host;
1050
1051 if (enable)
1052 mci_writel(host, IER, slot->sdio_irq);
1053 else
1054 mci_writel(host, IDR, slot->sdio_irq);
1055}
1056
1033static const struct mmc_host_ops atmci_ops = { 1057static const struct mmc_host_ops atmci_ops = {
1034 .request = atmci_request, 1058 .request = atmci_request,
1035 .set_ios = atmci_set_ios, 1059 .set_ios = atmci_set_ios,
1036 .get_ro = atmci_get_ro, 1060 .get_ro = atmci_get_ro,
1037 .get_cd = atmci_get_cd, 1061 .get_cd = atmci_get_cd,
1062 .enable_sdio_irq = atmci_enable_sdio_irq,
1038}; 1063};
1039 1064
1040/* Called with host->lock held */ 1065/* Called with host->lock held */
@@ -1052,8 +1077,11 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
1052 * necessary if set_ios() is called when a different slot is 1077 * necessary if set_ios() is called when a different slot is
1053 * busy transfering data. 1078 * busy transfering data.
1054 */ 1079 */
1055 if (host->need_clock_update) 1080 if (host->need_clock_update) {
1056 mci_writel(host, MR, host->mode_reg); 1081 mci_writel(host, MR, host->mode_reg);
1082 if (atmci_is_mci2())
1083 mci_writel(host, CFG, host->cfg_reg);
1084 }
1057 1085
1058 host->cur_slot->mrq = NULL; 1086 host->cur_slot->mrq = NULL;
1059 host->mrq = NULL; 1087 host->mrq = NULL;
@@ -1483,6 +1511,19 @@ static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status)
1483 tasklet_schedule(&host->tasklet); 1511 tasklet_schedule(&host->tasklet);
1484} 1512}
1485 1513
1514static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
1515{
1516 int i;
1517
1518 for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
1519 struct atmel_mci_slot *slot = host->slot[i];
1520 if (slot && (status & slot->sdio_irq)) {
1521 mmc_signal_sdio_irq(slot->mmc);
1522 }
1523 }
1524}
1525
1526
1486static irqreturn_t atmci_interrupt(int irq, void *dev_id) 1527static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1487{ 1528{
1488 struct atmel_mci *host = dev_id; 1529 struct atmel_mci *host = dev_id;
@@ -1522,6 +1563,10 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1522 1563
1523 if (pending & MCI_CMDRDY) 1564 if (pending & MCI_CMDRDY)
1524 atmci_cmd_interrupt(host, status); 1565 atmci_cmd_interrupt(host, status);
1566
1567 if (pending & (MCI_SDIOIRQA | MCI_SDIOIRQB))
1568 atmci_sdio_interrupt(host, status);
1569
1525 } while (pass_count++ < 5); 1570 } while (pass_count++ < 5);
1526 1571
1527 return pass_count ? IRQ_HANDLED : IRQ_NONE; 1572 return pass_count ? IRQ_HANDLED : IRQ_NONE;
@@ -1544,7 +1589,7 @@ static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
1544 1589
1545static int __init atmci_init_slot(struct atmel_mci *host, 1590static int __init atmci_init_slot(struct atmel_mci *host,
1546 struct mci_slot_pdata *slot_data, unsigned int id, 1591 struct mci_slot_pdata *slot_data, unsigned int id,
1547 u32 sdc_reg) 1592 u32 sdc_reg, u32 sdio_irq)
1548{ 1593{
1549 struct mmc_host *mmc; 1594 struct mmc_host *mmc;
1550 struct atmel_mci_slot *slot; 1595 struct atmel_mci_slot *slot;
@@ -1560,11 +1605,16 @@ static int __init atmci_init_slot(struct atmel_mci *host,
1560 slot->wp_pin = slot_data->wp_pin; 1605 slot->wp_pin = slot_data->wp_pin;
1561 slot->detect_is_active_high = slot_data->detect_is_active_high; 1606 slot->detect_is_active_high = slot_data->detect_is_active_high;
1562 slot->sdc_reg = sdc_reg; 1607 slot->sdc_reg = sdc_reg;
1608 slot->sdio_irq = sdio_irq;
1563 1609
1564 mmc->ops = &atmci_ops; 1610 mmc->ops = &atmci_ops;
1565 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512); 1611 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
1566 mmc->f_max = host->bus_hz / 2; 1612 mmc->f_max = host->bus_hz / 2;
1567 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1613 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1614 if (sdio_irq)
1615 mmc->caps |= MMC_CAP_SDIO_IRQ;
1616 if (atmci_is_mci2())
1617 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1568 if (slot_data->bus_width >= 4) 1618 if (slot_data->bus_width >= 4)
1569 mmc->caps |= MMC_CAP_4_BIT_DATA; 1619 mmc->caps |= MMC_CAP_4_BIT_DATA;
1570 1620
@@ -1753,13 +1803,13 @@ static int __init atmci_probe(struct platform_device *pdev)
1753 ret = -ENODEV; 1803 ret = -ENODEV;
1754 if (pdata->slot[0].bus_width) { 1804 if (pdata->slot[0].bus_width) {
1755 ret = atmci_init_slot(host, &pdata->slot[0], 1805 ret = atmci_init_slot(host, &pdata->slot[0],
1756 0, MCI_SDCSEL_SLOT_A); 1806 0, MCI_SDCSEL_SLOT_A, MCI_SDIOIRQA);
1757 if (!ret) 1807 if (!ret)
1758 nr_slots++; 1808 nr_slots++;
1759 } 1809 }
1760 if (pdata->slot[1].bus_width) { 1810 if (pdata->slot[1].bus_width) {
1761 ret = atmci_init_slot(host, &pdata->slot[1], 1811 ret = atmci_init_slot(host, &pdata->slot[1],
1762 1, MCI_SDCSEL_SLOT_B); 1812 1, MCI_SDCSEL_SLOT_B, MCI_SDIOIRQB);
1763 if (!ret) 1813 if (!ret)
1764 nr_slots++; 1814 nr_slots++;
1765 } 1815 }
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index f5834449400e..c8da5d30a861 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -1142,7 +1142,7 @@ static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
1142 struct au1xmmc_host *host = platform_get_drvdata(pdev); 1142 struct au1xmmc_host *host = platform_get_drvdata(pdev);
1143 int ret; 1143 int ret;
1144 1144
1145 ret = mmc_suspend_host(host->mmc, state); 1145 ret = mmc_suspend_host(host->mmc);
1146 if (ret) 1146 if (ret)
1147 return ret; 1147 return ret;
1148 1148
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index 6919e844072c..4b0e677d7295 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -576,7 +576,7 @@ static int sdh_suspend(struct platform_device *dev, pm_message_t state)
576 int ret = 0; 576 int ret = 0;
577 577
578 if (mmc) 578 if (mmc)
579 ret = mmc_suspend_host(mmc, state); 579 ret = mmc_suspend_host(mmc);
580 580
581 bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON); 581 bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON);
582 peripheral_free_list(drv_data->pin_req); 582 peripheral_free_list(drv_data->pin_req);
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index 92a324f7417c..ca3bdc831900 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -675,7 +675,7 @@ static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state)
675 struct mmc_host *mmc = cb710_slot_to_mmc(slot); 675 struct mmc_host *mmc = cb710_slot_to_mmc(slot);
676 int err; 676 int err;
677 677
678 err = mmc_suspend_host(mmc, state); 678 err = mmc_suspend_host(mmc);
679 if (err) 679 if (err)
680 return err; 680 return err;
681 681
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 3bd0ba294e9d..33d9f1b00862 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -137,15 +137,15 @@
137 137
138/* 138/*
139 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units, 139 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units,
140 * and we handle up to NR_SG segments. MMC_BLOCK_BOUNCE kicks in only 140 * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only
141 * for drivers with max_hw_segs == 1, making the segments bigger (64KB) 141 * for drivers with max_hw_segs == 1, making the segments bigger (64KB)
142 * than the page or two that's otherwise typical. NR_SG == 16 gives at 142 * than the page or two that's otherwise typical. nr_sg (passed from
143 * least the same throughput boost, using EDMA transfer linkage instead 143 * platform data) == 16 gives at least the same throughput boost, using
144 * of spending CPU time copying pages. 144 * EDMA transfer linkage instead of spending CPU time copying pages.
145 */ 145 */
146#define MAX_CCNT ((1 << 16) - 1) 146#define MAX_CCNT ((1 << 16) - 1)
147 147
148#define NR_SG 16 148#define MAX_NR_SG 16
149 149
150static unsigned rw_threshold = 32; 150static unsigned rw_threshold = 32;
151module_param(rw_threshold, uint, S_IRUGO); 151module_param(rw_threshold, uint, S_IRUGO);
@@ -171,6 +171,7 @@ struct mmc_davinci_host {
171#define DAVINCI_MMC_DATADIR_READ 1 171#define DAVINCI_MMC_DATADIR_READ 1
172#define DAVINCI_MMC_DATADIR_WRITE 2 172#define DAVINCI_MMC_DATADIR_WRITE 2
173 unsigned char data_dir; 173 unsigned char data_dir;
174 unsigned char suspended;
174 175
175 /* buffer is used during PIO of one scatterlist segment, and 176 /* buffer is used during PIO of one scatterlist segment, and
176 * is updated along with buffer_bytes_left. bytes_left applies 177 * is updated along with buffer_bytes_left. bytes_left applies
@@ -192,7 +193,7 @@ struct mmc_davinci_host {
192 struct edmacc_param tx_template; 193 struct edmacc_param tx_template;
193 struct edmacc_param rx_template; 194 struct edmacc_param rx_template;
194 unsigned n_link; 195 unsigned n_link;
195 u32 links[NR_SG - 1]; 196 u32 links[MAX_NR_SG - 1];
196 197
197 /* For PIO we walk scatterlists one segment at a time. */ 198 /* For PIO we walk scatterlists one segment at a time. */
198 unsigned int sg_len; 199 unsigned int sg_len;
@@ -202,6 +203,8 @@ struct mmc_davinci_host {
202 u8 version; 203 u8 version;
203 /* for ns in one cycle calculation */ 204 /* for ns in one cycle calculation */
204 unsigned ns_in_one_cycle; 205 unsigned ns_in_one_cycle;
206 /* Number of sg segments */
207 u8 nr_sg;
205#ifdef CONFIG_CPU_FREQ 208#ifdef CONFIG_CPU_FREQ
206 struct notifier_block freq_transition; 209 struct notifier_block freq_transition;
207#endif 210#endif
@@ -568,6 +571,7 @@ davinci_release_dma_channels(struct mmc_davinci_host *host)
568 571
569static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) 572static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
570{ 573{
574 u32 link_size;
571 int r, i; 575 int r, i;
572 576
573 /* Acquire master DMA write channel */ 577 /* Acquire master DMA write channel */
@@ -593,7 +597,8 @@ static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
593 /* Allocate parameter RAM slots, which will later be bound to a 597 /* Allocate parameter RAM slots, which will later be bound to a
594 * channel as needed to handle a scatterlist. 598 * channel as needed to handle a scatterlist.
595 */ 599 */
596 for (i = 0; i < ARRAY_SIZE(host->links); i++) { 600 link_size = min_t(unsigned, host->nr_sg, ARRAY_SIZE(host->links));
601 for (i = 0; i < link_size; i++) {
597 r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY); 602 r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY);
598 if (r < 0) { 603 if (r < 0) {
599 dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n", 604 dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n",
@@ -905,19 +910,26 @@ static void mmc_davinci_cmd_done(struct mmc_davinci_host *host,
905 } 910 }
906} 911}
907 912
908static void 913static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host,
909davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) 914 int val)
910{ 915{
911 u32 temp; 916 u32 temp;
912 917
913 /* reset command and data state machines */
914 temp = readl(host->base + DAVINCI_MMCCTL); 918 temp = readl(host->base + DAVINCI_MMCCTL);
915 writel(temp | MMCCTL_CMDRST | MMCCTL_DATRST, 919 if (val) /* reset */
916 host->base + DAVINCI_MMCCTL); 920 temp |= MMCCTL_CMDRST | MMCCTL_DATRST;
921 else /* enable */
922 temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
917 923
918 temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
919 udelay(10);
920 writel(temp, host->base + DAVINCI_MMCCTL); 924 writel(temp, host->base + DAVINCI_MMCCTL);
925 udelay(10);
926}
927
928static void
929davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
930{
931 mmc_davinci_reset_ctrl(host, 1);
932 mmc_davinci_reset_ctrl(host, 0);
921} 933}
922 934
923static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) 935static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
@@ -1121,15 +1133,8 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
1121#endif 1133#endif
1122static void __init init_mmcsd_host(struct mmc_davinci_host *host) 1134static void __init init_mmcsd_host(struct mmc_davinci_host *host)
1123{ 1135{
1124 /* DAT line portion is diabled and in reset state */
1125 writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_DATRST,
1126 host->base + DAVINCI_MMCCTL);
1127
1128 /* CMD line portion is diabled and in reset state */
1129 writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_CMDRST,
1130 host->base + DAVINCI_MMCCTL);
1131 1136
1132 udelay(10); 1137 mmc_davinci_reset_ctrl(host, 1);
1133 1138
1134 writel(0, host->base + DAVINCI_MMCCLK); 1139 writel(0, host->base + DAVINCI_MMCCLK);
1135 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 1140 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
@@ -1137,12 +1142,7 @@ static void __init init_mmcsd_host(struct mmc_davinci_host *host)
1137 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 1142 writel(0x1FFF, host->base + DAVINCI_MMCTOR);
1138 writel(0xFFFF, host->base + DAVINCI_MMCTOD); 1143 writel(0xFFFF, host->base + DAVINCI_MMCTOD);
1139 1144
1140 writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_DATRST, 1145 mmc_davinci_reset_ctrl(host, 0);
1141 host->base + DAVINCI_MMCCTL);
1142 writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_CMDRST,
1143 host->base + DAVINCI_MMCCTL);
1144
1145 udelay(10);
1146} 1146}
1147 1147
1148static int __init davinci_mmcsd_probe(struct platform_device *pdev) 1148static int __init davinci_mmcsd_probe(struct platform_device *pdev)
@@ -1202,6 +1202,12 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
1202 1202
1203 init_mmcsd_host(host); 1203 init_mmcsd_host(host);
1204 1204
1205 if (pdata->nr_sg)
1206 host->nr_sg = pdata->nr_sg - 1;
1207
1208 if (host->nr_sg > MAX_NR_SG || !host->nr_sg)
1209 host->nr_sg = MAX_NR_SG;
1210
1205 host->use_dma = use_dma; 1211 host->use_dma = use_dma;
1206 host->irq = irq; 1212 host->irq = irq;
1207 1213
@@ -1327,32 +1333,65 @@ static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
1327} 1333}
1328 1334
1329#ifdef CONFIG_PM 1335#ifdef CONFIG_PM
1330static int davinci_mmcsd_suspend(struct platform_device *pdev, pm_message_t msg) 1336static int davinci_mmcsd_suspend(struct device *dev)
1331{ 1337{
1338 struct platform_device *pdev = to_platform_device(dev);
1332 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1339 struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1340 int ret;
1333 1341
1334 return mmc_suspend_host(host->mmc, msg); 1342 mmc_host_enable(host->mmc);
1343 ret = mmc_suspend_host(host->mmc);
1344 if (!ret) {
1345 writel(0, host->base + DAVINCI_MMCIM);
1346 mmc_davinci_reset_ctrl(host, 1);
1347 mmc_host_disable(host->mmc);
1348 clk_disable(host->clk);
1349 host->suspended = 1;
1350 } else {
1351 host->suspended = 0;
1352 mmc_host_disable(host->mmc);
1353 }
1354
1355 return ret;
1335} 1356}
1336 1357
1337static int davinci_mmcsd_resume(struct platform_device *pdev) 1358static int davinci_mmcsd_resume(struct device *dev)
1338{ 1359{
1360 struct platform_device *pdev = to_platform_device(dev);
1339 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1361 struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1362 int ret;
1363
1364 if (!host->suspended)
1365 return 0;
1340 1366
1341 return mmc_resume_host(host->mmc); 1367 clk_enable(host->clk);
1368 mmc_host_enable(host->mmc);
1369
1370 mmc_davinci_reset_ctrl(host, 0);
1371 ret = mmc_resume_host(host->mmc);
1372 if (!ret)
1373 host->suspended = 0;
1374
1375 return ret;
1342} 1376}
1377
1378static const struct dev_pm_ops davinci_mmcsd_pm = {
1379 .suspend = davinci_mmcsd_suspend,
1380 .resume = davinci_mmcsd_resume,
1381};
1382
1383#define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm)
1343#else 1384#else
1344#define davinci_mmcsd_suspend NULL 1385#define davinci_mmcsd_pm_ops NULL
1345#define davinci_mmcsd_resume NULL
1346#endif 1386#endif
1347 1387
1348static struct platform_driver davinci_mmcsd_driver = { 1388static struct platform_driver davinci_mmcsd_driver = {
1349 .driver = { 1389 .driver = {
1350 .name = "davinci_mmc", 1390 .name = "davinci_mmc",
1351 .owner = THIS_MODULE, 1391 .owner = THIS_MODULE,
1392 .pm = davinci_mmcsd_pm_ops,
1352 }, 1393 },
1353 .remove = __exit_p(davinci_mmcsd_remove), 1394 .remove = __exit_p(davinci_mmcsd_remove),
1354 .suspend = davinci_mmcsd_suspend,
1355 .resume = davinci_mmcsd_resume,
1356}; 1395};
1357 1396
1358static int __init davinci_mmcsd_init(void) 1397static int __init davinci_mmcsd_init(void)
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index bf98d7cc928a..9a68ff4353a2 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -1115,7 +1115,7 @@ static int imxmci_suspend(struct platform_device *dev, pm_message_t state)
1115 int ret = 0; 1115 int ret = 0;
1116 1116
1117 if (mmc) 1117 if (mmc)
1118 ret = mmc_suspend_host(mmc, state); 1118 ret = mmc_suspend_host(mmc);
1119 1119
1120 return ret; 1120 return ret;
1121} 1121}
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index ff115d920888..4917af96bae1 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -824,7 +824,7 @@ static int mmci_suspend(struct amba_device *dev, pm_message_t state)
824 if (mmc) { 824 if (mmc) {
825 struct mmci_host *host = mmc_priv(mmc); 825 struct mmci_host *host = mmc_priv(mmc);
826 826
827 ret = mmc_suspend_host(mmc, state); 827 ret = mmc_suspend_host(mmc);
828 if (ret == 0) 828 if (ret == 0)
829 writel(0, host->base + MMCIMASK0); 829 writel(0, host->base + MMCIMASK0);
830 } 830 }
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 61f1d27fed3f..24e09454e522 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -1327,7 +1327,7 @@ msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
1327 disable_irq(host->stat_irq); 1327 disable_irq(host->stat_irq);
1328 1328
1329 if (mmc->card && mmc->card->type != MMC_TYPE_SDIO) 1329 if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
1330 rc = mmc_suspend_host(mmc, state); 1330 rc = mmc_suspend_host(mmc);
1331 if (!rc) 1331 if (!rc)
1332 msmsdcc_writel(host, 0, MMCIMASK0); 1332 msmsdcc_writel(host, 0, MMCIMASK0);
1333 if (host->clks_on) 1333 if (host->clks_on)
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 34e23489811a..366eefa77c5a 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -865,7 +865,7 @@ static int mvsd_suspend(struct platform_device *dev, pm_message_t state)
865 int ret = 0; 865 int ret = 0;
866 866
867 if (mmc) 867 if (mmc)
868 ret = mmc_suspend_host(mmc, state); 868 ret = mmc_suspend_host(mmc);
869 869
870 return ret; 870 return ret;
871} 871}
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index ec18e3b60342..d9d4a72e0ec7 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -932,7 +932,7 @@ static int mxcmci_suspend(struct platform_device *dev, pm_message_t state)
932 int ret = 0; 932 int ret = 0;
933 933
934 if (mmc) 934 if (mmc)
935 ret = mmc_suspend_host(mmc, state); 935 ret = mmc_suspend_host(mmc);
936 936
937 return ret; 937 return ret;
938} 938}
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 84d280406341..2b281680e320 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -39,30 +39,30 @@
39#include <plat/fpga.h> 39#include <plat/fpga.h>
40 40
41#define OMAP_MMC_REG_CMD 0x00 41#define OMAP_MMC_REG_CMD 0x00
42#define OMAP_MMC_REG_ARGL 0x04 42#define OMAP_MMC_REG_ARGL 0x01
43#define OMAP_MMC_REG_ARGH 0x08 43#define OMAP_MMC_REG_ARGH 0x02
44#define OMAP_MMC_REG_CON 0x0c 44#define OMAP_MMC_REG_CON 0x03
45#define OMAP_MMC_REG_STAT 0x10 45#define OMAP_MMC_REG_STAT 0x04
46#define OMAP_MMC_REG_IE 0x14 46#define OMAP_MMC_REG_IE 0x05
47#define OMAP_MMC_REG_CTO 0x18 47#define OMAP_MMC_REG_CTO 0x06
48#define OMAP_MMC_REG_DTO 0x1c 48#define OMAP_MMC_REG_DTO 0x07
49#define OMAP_MMC_REG_DATA 0x20 49#define OMAP_MMC_REG_DATA 0x08
50#define OMAP_MMC_REG_BLEN 0x24 50#define OMAP_MMC_REG_BLEN 0x09
51#define OMAP_MMC_REG_NBLK 0x28 51#define OMAP_MMC_REG_NBLK 0x0a
52#define OMAP_MMC_REG_BUF 0x2c 52#define OMAP_MMC_REG_BUF 0x0b
53#define OMAP_MMC_REG_SDIO 0x34 53#define OMAP_MMC_REG_SDIO 0x0d
54#define OMAP_MMC_REG_REV 0x3c 54#define OMAP_MMC_REG_REV 0x0f
55#define OMAP_MMC_REG_RSP0 0x40 55#define OMAP_MMC_REG_RSP0 0x10
56#define OMAP_MMC_REG_RSP1 0x44 56#define OMAP_MMC_REG_RSP1 0x11
57#define OMAP_MMC_REG_RSP2 0x48 57#define OMAP_MMC_REG_RSP2 0x12
58#define OMAP_MMC_REG_RSP3 0x4c 58#define OMAP_MMC_REG_RSP3 0x13
59#define OMAP_MMC_REG_RSP4 0x50 59#define OMAP_MMC_REG_RSP4 0x14
60#define OMAP_MMC_REG_RSP5 0x54 60#define OMAP_MMC_REG_RSP5 0x15
61#define OMAP_MMC_REG_RSP6 0x58 61#define OMAP_MMC_REG_RSP6 0x16
62#define OMAP_MMC_REG_RSP7 0x5c 62#define OMAP_MMC_REG_RSP7 0x17
63#define OMAP_MMC_REG_IOSR 0x60 63#define OMAP_MMC_REG_IOSR 0x18
64#define OMAP_MMC_REG_SYSC 0x64 64#define OMAP_MMC_REG_SYSC 0x19
65#define OMAP_MMC_REG_SYSS 0x68 65#define OMAP_MMC_REG_SYSS 0x1a
66 66
67#define OMAP_MMC_STAT_CARD_ERR (1 << 14) 67#define OMAP_MMC_STAT_CARD_ERR (1 << 14)
68#define OMAP_MMC_STAT_CARD_IRQ (1 << 13) 68#define OMAP_MMC_STAT_CARD_IRQ (1 << 13)
@@ -78,8 +78,9 @@
78#define OMAP_MMC_STAT_CARD_BUSY (1 << 2) 78#define OMAP_MMC_STAT_CARD_BUSY (1 << 2)
79#define OMAP_MMC_STAT_END_OF_CMD (1 << 0) 79#define OMAP_MMC_STAT_END_OF_CMD (1 << 0)
80 80
81#define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG_##reg) 81#define OMAP_MMC_REG(host, reg) (OMAP_MMC_REG_##reg << (host)->reg_shift)
82#define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG_##reg) 82#define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG(host, reg))
83#define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG(host, reg))
83 84
84/* 85/*
85 * Command types 86 * Command types
@@ -133,6 +134,7 @@ struct mmc_omap_host {
133 int irq; 134 int irq;
134 unsigned char bus_mode; 135 unsigned char bus_mode;
135 unsigned char hw_bus_mode; 136 unsigned char hw_bus_mode;
137 unsigned int reg_shift;
136 138
137 struct work_struct cmd_abort_work; 139 struct work_struct cmd_abort_work;
138 unsigned abort:1; 140 unsigned abort:1;
@@ -680,9 +682,9 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
680 host->data->bytes_xfered += n; 682 host->data->bytes_xfered += n;
681 683
682 if (write) { 684 if (write) {
683 __raw_writesw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n); 685 __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
684 } else { 686 } else {
685 __raw_readsw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n); 687 __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
686 } 688 }
687} 689}
688 690
@@ -900,7 +902,7 @@ mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
900 int dst_port = 0; 902 int dst_port = 0;
901 int sync_dev = 0; 903 int sync_dev = 0;
902 904
903 data_addr = host->phys_base + OMAP_MMC_REG_DATA; 905 data_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
904 frame = data->blksz; 906 frame = data->blksz;
905 count = sg_dma_len(sg); 907 count = sg_dma_len(sg);
906 908
@@ -1493,6 +1495,8 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1493 } 1495 }
1494 } 1496 }
1495 1497
1498 host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
1499
1496 return 0; 1500 return 0;
1497 1501
1498err_plat_cleanup: 1502err_plat_cleanup:
@@ -1557,7 +1561,7 @@ static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg)
1557 struct mmc_omap_slot *slot; 1561 struct mmc_omap_slot *slot;
1558 1562
1559 slot = host->slots[i]; 1563 slot = host->slots[i];
1560 ret = mmc_suspend_host(slot->mmc, mesg); 1564 ret = mmc_suspend_host(slot->mmc);
1561 if (ret < 0) { 1565 if (ret < 0) {
1562 while (--i >= 0) { 1566 while (--i >= 0) {
1563 slot = host->slots[i]; 1567 slot = host->slots[i];
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index e9caf694c59e..b032828c6126 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -157,12 +157,10 @@ struct omap_hsmmc_host {
157 */ 157 */
158 struct regulator *vcc; 158 struct regulator *vcc;
159 struct regulator *vcc_aux; 159 struct regulator *vcc_aux;
160 struct semaphore sem;
161 struct work_struct mmc_carddetect_work; 160 struct work_struct mmc_carddetect_work;
162 void __iomem *base; 161 void __iomem *base;
163 resource_size_t mapbase; 162 resource_size_t mapbase;
164 spinlock_t irq_lock; /* Prevent races with irq handler */ 163 spinlock_t irq_lock; /* Prevent races with irq handler */
165 unsigned long flags;
166 unsigned int id; 164 unsigned int id;
167 unsigned int dma_len; 165 unsigned int dma_len;
168 unsigned int dma_sg_idx; 166 unsigned int dma_sg_idx;
@@ -183,6 +181,7 @@ struct omap_hsmmc_host {
183 int protect_card; 181 int protect_card;
184 int reqs_blocked; 182 int reqs_blocked;
185 int use_reg; 183 int use_reg;
184 int req_in_progress;
186 185
187 struct omap_mmc_platform_data *pdata; 186 struct omap_mmc_platform_data *pdata;
188}; 187};
@@ -524,6 +523,27 @@ static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host)
524 dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n"); 523 dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n");
525} 524}
526 525
526static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host)
527{
528 unsigned int irq_mask;
529
530 if (host->use_dma)
531 irq_mask = INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE);
532 else
533 irq_mask = INT_EN_MASK;
534
535 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
536 OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
537 OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
538}
539
540static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host)
541{
542 OMAP_HSMMC_WRITE(host->base, ISE, 0);
543 OMAP_HSMMC_WRITE(host->base, IE, 0);
544 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
545}
546
527#ifdef CONFIG_PM 547#ifdef CONFIG_PM
528 548
529/* 549/*
@@ -592,9 +612,7 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
592 && time_before(jiffies, timeout)) 612 && time_before(jiffies, timeout))
593 ; 613 ;
594 614
595 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); 615 omap_hsmmc_disable_irq(host);
596 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
597 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
598 616
599 /* Do not initialize card-specific things if the power is off */ 617 /* Do not initialize card-specific things if the power is off */
600 if (host->power_mode == MMC_POWER_OFF) 618 if (host->power_mode == MMC_POWER_OFF)
@@ -697,6 +715,8 @@ static void send_init_stream(struct omap_hsmmc_host *host)
697 return; 715 return;
698 716
699 disable_irq(host->irq); 717 disable_irq(host->irq);
718
719 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
700 OMAP_HSMMC_WRITE(host->base, CON, 720 OMAP_HSMMC_WRITE(host->base, CON,
701 OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM); 721 OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM);
702 OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD); 722 OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD);
@@ -762,17 +782,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
762 mmc_hostname(host->mmc), cmd->opcode, cmd->arg); 782 mmc_hostname(host->mmc), cmd->opcode, cmd->arg);
763 host->cmd = cmd; 783 host->cmd = cmd;
764 784
765 /* 785 omap_hsmmc_enable_irq(host);
766 * Clear status bits and enable interrupts
767 */
768 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
769 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
770
771 if (host->use_dma)
772 OMAP_HSMMC_WRITE(host->base, IE,
773 INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE));
774 else
775 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
776 786
777 host->response_busy = 0; 787 host->response_busy = 0;
778 if (cmd->flags & MMC_RSP_PRESENT) { 788 if (cmd->flags & MMC_RSP_PRESENT) {
@@ -806,13 +816,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
806 if (host->use_dma) 816 if (host->use_dma)
807 cmdreg |= DMA_EN; 817 cmdreg |= DMA_EN;
808 818
809 /* 819 host->req_in_progress = 1;
810 * In an interrupt context (i.e. STOP command), the spinlock is unlocked
811 * by the interrupt handler, otherwise (i.e. for a new request) it is
812 * unlocked here.
813 */
814 if (!in_interrupt())
815 spin_unlock_irqrestore(&host->irq_lock, host->flags);
816 820
817 OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg); 821 OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg);
818 OMAP_HSMMC_WRITE(host->base, CMD, cmdreg); 822 OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
@@ -827,6 +831,23 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
827 return DMA_FROM_DEVICE; 831 return DMA_FROM_DEVICE;
828} 832}
829 833
834static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
835{
836 int dma_ch;
837
838 spin_lock(&host->irq_lock);
839 host->req_in_progress = 0;
840 dma_ch = host->dma_ch;
841 spin_unlock(&host->irq_lock);
842
843 omap_hsmmc_disable_irq(host);
844 /* Do not complete the request if DMA is still in progress */
845 if (mrq->data && host->use_dma && dma_ch != -1)
846 return;
847 host->mrq = NULL;
848 mmc_request_done(host->mmc, mrq);
849}
850
830/* 851/*
831 * Notify the transfer complete to MMC core 852 * Notify the transfer complete to MMC core
832 */ 853 */
@@ -843,25 +864,19 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
843 return; 864 return;
844 } 865 }
845 866
846 host->mrq = NULL; 867 omap_hsmmc_request_done(host, mrq);
847 mmc_request_done(host->mmc, mrq);
848 return; 868 return;
849 } 869 }
850 870
851 host->data = NULL; 871 host->data = NULL;
852 872
853 if (host->use_dma && host->dma_ch != -1)
854 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
855 omap_hsmmc_get_dma_dir(host, data));
856
857 if (!data->error) 873 if (!data->error)
858 data->bytes_xfered += data->blocks * (data->blksz); 874 data->bytes_xfered += data->blocks * (data->blksz);
859 else 875 else
860 data->bytes_xfered = 0; 876 data->bytes_xfered = 0;
861 877
862 if (!data->stop) { 878 if (!data->stop) {
863 host->mrq = NULL; 879 omap_hsmmc_request_done(host, data->mrq);
864 mmc_request_done(host->mmc, data->mrq);
865 return; 880 return;
866 } 881 }
867 omap_hsmmc_start_command(host, data->stop, NULL); 882 omap_hsmmc_start_command(host, data->stop, NULL);
@@ -887,10 +902,8 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
887 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10); 902 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10);
888 } 903 }
889 } 904 }
890 if ((host->data == NULL && !host->response_busy) || cmd->error) { 905 if ((host->data == NULL && !host->response_busy) || cmd->error)
891 host->mrq = NULL; 906 omap_hsmmc_request_done(host, cmd->mrq);
892 mmc_request_done(host->mmc, cmd->mrq);
893 }
894} 907}
895 908
896/* 909/*
@@ -898,14 +911,19 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
898 */ 911 */
899static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) 912static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
900{ 913{
914 int dma_ch;
915
901 host->data->error = errno; 916 host->data->error = errno;
902 917
903 if (host->use_dma && host->dma_ch != -1) { 918 spin_lock(&host->irq_lock);
919 dma_ch = host->dma_ch;
920 host->dma_ch = -1;
921 spin_unlock(&host->irq_lock);
922
923 if (host->use_dma && dma_ch != -1) {
904 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len, 924 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len,
905 omap_hsmmc_get_dma_dir(host, host->data)); 925 omap_hsmmc_get_dma_dir(host, host->data));
906 omap_free_dma(host->dma_ch); 926 omap_free_dma(dma_ch);
907 host->dma_ch = -1;
908 up(&host->sem);
909 } 927 }
910 host->data = NULL; 928 host->data = NULL;
911} 929}
@@ -967,28 +985,21 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
967 __func__); 985 __func__);
968} 986}
969 987
970/* 988static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
971 * MMC controller IRQ handler
972 */
973static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
974{ 989{
975 struct omap_hsmmc_host *host = dev_id;
976 struct mmc_data *data; 990 struct mmc_data *data;
977 int end_cmd = 0, end_trans = 0, status; 991 int end_cmd = 0, end_trans = 0;
978 992
979 spin_lock(&host->irq_lock); 993 if (!host->req_in_progress) {
980 994 do {
981 if (host->mrq == NULL) { 995 OMAP_HSMMC_WRITE(host->base, STAT, status);
982 OMAP_HSMMC_WRITE(host->base, STAT, 996 /* Flush posted write */
983 OMAP_HSMMC_READ(host->base, STAT)); 997 status = OMAP_HSMMC_READ(host->base, STAT);
984 /* Flush posted write */ 998 } while (status & INT_EN_MASK);
985 OMAP_HSMMC_READ(host->base, STAT); 999 return;
986 spin_unlock(&host->irq_lock);
987 return IRQ_HANDLED;
988 } 1000 }
989 1001
990 data = host->data; 1002 data = host->data;
991 status = OMAP_HSMMC_READ(host->base, STAT);
992 dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status); 1003 dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
993 1004
994 if (status & ERR) { 1005 if (status & ERR) {
@@ -1041,15 +1052,27 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
1041 } 1052 }
1042 1053
1043 OMAP_HSMMC_WRITE(host->base, STAT, status); 1054 OMAP_HSMMC_WRITE(host->base, STAT, status);
1044 /* Flush posted write */
1045 OMAP_HSMMC_READ(host->base, STAT);
1046 1055
1047 if (end_cmd || ((status & CC) && host->cmd)) 1056 if (end_cmd || ((status & CC) && host->cmd))
1048 omap_hsmmc_cmd_done(host, host->cmd); 1057 omap_hsmmc_cmd_done(host, host->cmd);
1049 if ((end_trans || (status & TC)) && host->mrq) 1058 if ((end_trans || (status & TC)) && host->mrq)
1050 omap_hsmmc_xfer_done(host, data); 1059 omap_hsmmc_xfer_done(host, data);
1060}
1051 1061
1052 spin_unlock(&host->irq_lock); 1062/*
1063 * MMC controller IRQ handler
1064 */
1065static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
1066{
1067 struct omap_hsmmc_host *host = dev_id;
1068 int status;
1069
1070 status = OMAP_HSMMC_READ(host->base, STAT);
1071 do {
1072 omap_hsmmc_do_irq(host, status);
1073 /* Flush posted write */
1074 status = OMAP_HSMMC_READ(host->base, STAT);
1075 } while (status & INT_EN_MASK);
1053 1076
1054 return IRQ_HANDLED; 1077 return IRQ_HANDLED;
1055} 1078}
@@ -1244,31 +1267,47 @@ static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
1244/* 1267/*
1245 * DMA call back function 1268 * DMA call back function
1246 */ 1269 */
1247static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data) 1270static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1248{ 1271{
1249 struct omap_hsmmc_host *host = data; 1272 struct omap_hsmmc_host *host = cb_data;
1273 struct mmc_data *data = host->mrq->data;
1274 int dma_ch, req_in_progress;
1250 1275
1251 if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ) 1276 if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ)
1252 dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n"); 1277 dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n");
1253 1278
1254 if (host->dma_ch < 0) 1279 spin_lock(&host->irq_lock);
1280 if (host->dma_ch < 0) {
1281 spin_unlock(&host->irq_lock);
1255 return; 1282 return;
1283 }
1256 1284
1257 host->dma_sg_idx++; 1285 host->dma_sg_idx++;
1258 if (host->dma_sg_idx < host->dma_len) { 1286 if (host->dma_sg_idx < host->dma_len) {
1259 /* Fire up the next transfer. */ 1287 /* Fire up the next transfer. */
1260 omap_hsmmc_config_dma_params(host, host->data, 1288 omap_hsmmc_config_dma_params(host, data,
1261 host->data->sg + host->dma_sg_idx); 1289 data->sg + host->dma_sg_idx);
1290 spin_unlock(&host->irq_lock);
1262 return; 1291 return;
1263 } 1292 }
1264 1293
1265 omap_free_dma(host->dma_ch); 1294 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
1295 omap_hsmmc_get_dma_dir(host, data));
1296
1297 req_in_progress = host->req_in_progress;
1298 dma_ch = host->dma_ch;
1266 host->dma_ch = -1; 1299 host->dma_ch = -1;
1267 /* 1300 spin_unlock(&host->irq_lock);
1268 * DMA Callback: run in interrupt context. 1301
1269 * mutex_unlock will throw a kernel warning if used. 1302 omap_free_dma(dma_ch);
1270 */ 1303
1271 up(&host->sem); 1304 /* If DMA has finished after TC, complete the request */
1305 if (!req_in_progress) {
1306 struct mmc_request *mrq = host->mrq;
1307
1308 host->mrq = NULL;
1309 mmc_request_done(host->mmc, mrq);
1310 }
1272} 1311}
1273 1312
1274/* 1313/*
@@ -1277,7 +1316,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data)
1277static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, 1316static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
1278 struct mmc_request *req) 1317 struct mmc_request *req)
1279{ 1318{
1280 int dma_ch = 0, ret = 0, err = 1, i; 1319 int dma_ch = 0, ret = 0, i;
1281 struct mmc_data *data = req->data; 1320 struct mmc_data *data = req->data;
1282 1321
1283 /* Sanity check: all the SG entries must be aligned by block size. */ 1322 /* Sanity check: all the SG entries must be aligned by block size. */
@@ -1294,23 +1333,7 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
1294 */ 1333 */
1295 return -EINVAL; 1334 return -EINVAL;
1296 1335
1297 /* 1336 BUG_ON(host->dma_ch != -1);
1298 * If for some reason the DMA transfer is still active,
1299 * we wait for timeout period and free the dma
1300 */
1301 if (host->dma_ch != -1) {
1302 set_current_state(TASK_UNINTERRUPTIBLE);
1303 schedule_timeout(100);
1304 if (down_trylock(&host->sem)) {
1305 omap_free_dma(host->dma_ch);
1306 host->dma_ch = -1;
1307 up(&host->sem);
1308 return err;
1309 }
1310 } else {
1311 if (down_trylock(&host->sem))
1312 return err;
1313 }
1314 1337
1315 ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data), 1338 ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
1316 "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch); 1339 "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
@@ -1410,37 +1433,27 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
1410 struct omap_hsmmc_host *host = mmc_priv(mmc); 1433 struct omap_hsmmc_host *host = mmc_priv(mmc);
1411 int err; 1434 int err;
1412 1435
1413 /* 1436 BUG_ON(host->req_in_progress);
1414 * Prevent races with the interrupt handler because of unexpected 1437 BUG_ON(host->dma_ch != -1);
1415 * interrupts, but not if we are already in interrupt context i.e. 1438 if (host->protect_card) {
1416 * retries. 1439 if (host->reqs_blocked < 3) {
1417 */ 1440 /*
1418 if (!in_interrupt()) { 1441 * Ensure the controller is left in a consistent
1419 spin_lock_irqsave(&host->irq_lock, host->flags); 1442 * state by resetting the command and data state
1420 /* 1443 * machines.
1421 * Protect the card from I/O if there is a possibility 1444 */
1422 * it can be removed. 1445 omap_hsmmc_reset_controller_fsm(host, SRD);
1423 */ 1446 omap_hsmmc_reset_controller_fsm(host, SRC);
1424 if (host->protect_card) { 1447 host->reqs_blocked += 1;
1425 if (host->reqs_blocked < 3) { 1448 }
1426 /* 1449 req->cmd->error = -EBADF;
1427 * Ensure the controller is left in a consistent 1450 if (req->data)
1428 * state by resetting the command and data state 1451 req->data->error = -EBADF;
1429 * machines. 1452 req->cmd->retries = 0;
1430 */ 1453 mmc_request_done(mmc, req);
1431 omap_hsmmc_reset_controller_fsm(host, SRD); 1454 return;
1432 omap_hsmmc_reset_controller_fsm(host, SRC); 1455 } else if (host->reqs_blocked)
1433 host->reqs_blocked += 1; 1456 host->reqs_blocked = 0;
1434 }
1435 req->cmd->error = -EBADF;
1436 if (req->data)
1437 req->data->error = -EBADF;
1438 spin_unlock_irqrestore(&host->irq_lock, host->flags);
1439 mmc_request_done(mmc, req);
1440 return;
1441 } else if (host->reqs_blocked)
1442 host->reqs_blocked = 0;
1443 }
1444 WARN_ON(host->mrq != NULL); 1457 WARN_ON(host->mrq != NULL);
1445 host->mrq = req; 1458 host->mrq = req;
1446 err = omap_hsmmc_prepare_data(host, req); 1459 err = omap_hsmmc_prepare_data(host, req);
@@ -1449,8 +1462,6 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
1449 if (req->data) 1462 if (req->data)
1450 req->data->error = err; 1463 req->data->error = err;
1451 host->mrq = NULL; 1464 host->mrq = NULL;
1452 if (!in_interrupt())
1453 spin_unlock_irqrestore(&host->irq_lock, host->flags);
1454 mmc_request_done(mmc, req); 1465 mmc_request_done(mmc, req);
1455 return; 1466 return;
1456 } 1467 }
@@ -2019,7 +2030,6 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
2019 mmc->f_min = 400000; 2030 mmc->f_min = 400000;
2020 mmc->f_max = 52000000; 2031 mmc->f_max = 52000000;
2021 2032
2022 sema_init(&host->sem, 1);
2023 spin_lock_init(&host->irq_lock); 2033 spin_lock_init(&host->irq_lock);
2024 2034
2025 host->iclk = clk_get(&pdev->dev, "ick"); 2035 host->iclk = clk_get(&pdev->dev, "ick");
@@ -2162,8 +2172,7 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
2162 } 2172 }
2163 } 2173 }
2164 2174
2165 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK); 2175 omap_hsmmc_disable_irq(host);
2166 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
2167 2176
2168 mmc_host_lazy_disable(host->mmc); 2177 mmc_host_lazy_disable(host->mmc);
2169 2178
@@ -2258,10 +2267,12 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
2258} 2267}
2259 2268
2260#ifdef CONFIG_PM 2269#ifdef CONFIG_PM
2261static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state) 2270static int omap_hsmmc_suspend(struct device *dev)
2262{ 2271{
2263 int ret = 0; 2272 int ret = 0;
2273 struct platform_device *pdev = to_platform_device(dev);
2264 struct omap_hsmmc_host *host = platform_get_drvdata(pdev); 2274 struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
2275 pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */
2265 2276
2266 if (host && host->suspended) 2277 if (host && host->suspended)
2267 return 0; 2278 return 0;
@@ -2281,12 +2292,9 @@ static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state)
2281 } 2292 }
2282 cancel_work_sync(&host->mmc_carddetect_work); 2293 cancel_work_sync(&host->mmc_carddetect_work);
2283 mmc_host_enable(host->mmc); 2294 mmc_host_enable(host->mmc);
2284 ret = mmc_suspend_host(host->mmc, state); 2295 ret = mmc_suspend_host(host->mmc);
2285 if (ret == 0) { 2296 if (ret == 0) {
2286 OMAP_HSMMC_WRITE(host->base, ISE, 0); 2297 omap_hsmmc_disable_irq(host);
2287 OMAP_HSMMC_WRITE(host->base, IE, 0);
2288
2289
2290 OMAP_HSMMC_WRITE(host->base, HCTL, 2298 OMAP_HSMMC_WRITE(host->base, HCTL,
2291 OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); 2299 OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
2292 mmc_host_disable(host->mmc); 2300 mmc_host_disable(host->mmc);
@@ -2310,9 +2318,10 @@ static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state)
2310} 2318}
2311 2319
2312/* Routine to resume the MMC device */ 2320/* Routine to resume the MMC device */
2313static int omap_hsmmc_resume(struct platform_device *pdev) 2321static int omap_hsmmc_resume(struct device *dev)
2314{ 2322{
2315 int ret = 0; 2323 int ret = 0;
2324 struct platform_device *pdev = to_platform_device(dev);
2316 struct omap_hsmmc_host *host = platform_get_drvdata(pdev); 2325 struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
2317 2326
2318 if (host && !host->suspended) 2327 if (host && !host->suspended)
@@ -2363,13 +2372,17 @@ clk_en_err:
2363#define omap_hsmmc_resume NULL 2372#define omap_hsmmc_resume NULL
2364#endif 2373#endif
2365 2374
2366static struct platform_driver omap_hsmmc_driver = { 2375static struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
2367 .remove = omap_hsmmc_remove,
2368 .suspend = omap_hsmmc_suspend, 2376 .suspend = omap_hsmmc_suspend,
2369 .resume = omap_hsmmc_resume, 2377 .resume = omap_hsmmc_resume,
2378};
2379
2380static struct platform_driver omap_hsmmc_driver = {
2381 .remove = omap_hsmmc_remove,
2370 .driver = { 2382 .driver = {
2371 .name = DRIVER_NAME, 2383 .name = DRIVER_NAME,
2372 .owner = THIS_MODULE, 2384 .owner = THIS_MODULE,
2385 .pm = &omap_hsmmc_dev_pm_ops,
2373 }, 2386 },
2374}; 2387};
2375 2388
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index e4f00e70a749..0a4e43f37140 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -813,7 +813,7 @@ static int pxamci_suspend(struct device *dev)
813 int ret = 0; 813 int ret = 0;
814 814
815 if (mmc) 815 if (mmc)
816 ret = mmc_suspend_host(mmc, PMSG_SUSPEND); 816 ret = mmc_suspend_host(mmc);
817 817
818 return ret; 818 return ret;
819} 819}
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 2fdf7689ae6c..2e16e0a90a5e 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1881,9 +1881,8 @@ MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids);
1881static int s3cmci_suspend(struct device *dev) 1881static int s3cmci_suspend(struct device *dev)
1882{ 1882{
1883 struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev)); 1883 struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev));
1884 struct pm_message event = { PM_EVENT_SUSPEND };
1885 1884
1886 return mmc_suspend_host(mmc, event); 1885 return mmc_suspend_host(mmc);
1887} 1886}
1888 1887
1889static int s3cmci_resume(struct device *dev) 1888static int s3cmci_resume(struct device *dev)
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c
index 7802a543d8fc..a2e9820cd42f 100644
--- a/drivers/mmc/host/sdhci-of-core.c
+++ b/drivers/mmc/host/sdhci-of-core.c
@@ -89,7 +89,7 @@ static int sdhci_of_suspend(struct of_device *ofdev, pm_message_t state)
89{ 89{
90 struct sdhci_host *host = dev_get_drvdata(&ofdev->dev); 90 struct sdhci_host *host = dev_get_drvdata(&ofdev->dev);
91 91
92 return mmc_suspend_host(host->mmc, state); 92 return mmc_suspend_host(host->mmc);
93} 93}
94 94
95static int sdhci_of_resume(struct of_device *ofdev) 95static int sdhci_of_resume(struct of_device *ofdev)
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index d5b11a17e648..c8623de13af3 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -129,12 +129,12 @@ struct sdhci_of_data sdhci_esdhc = {
129 SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET | 129 SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET |
130 SDHCI_QUIRK_NO_CARD_NO_RESET, 130 SDHCI_QUIRK_NO_CARD_NO_RESET,
131 .ops = { 131 .ops = {
132 .readl = sdhci_be32bs_readl, 132 .read_l = sdhci_be32bs_readl,
133 .readw = esdhc_readw, 133 .read_w = esdhc_readw,
134 .readb = sdhci_be32bs_readb, 134 .read_b = sdhci_be32bs_readb,
135 .writel = sdhci_be32bs_writel, 135 .write_l = sdhci_be32bs_writel,
136 .writew = esdhc_writew, 136 .write_w = esdhc_writew,
137 .writeb = esdhc_writeb, 137 .write_b = esdhc_writeb,
138 .set_clock = esdhc_set_clock, 138 .set_clock = esdhc_set_clock,
139 .enable_dma = esdhc_enable_dma, 139 .enable_dma = esdhc_enable_dma,
140 .get_max_clock = esdhc_get_max_clock, 140 .get_max_clock = esdhc_get_max_clock,
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index 35117f3ed757..68ddb7546ae2 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -55,11 +55,11 @@ struct sdhci_of_data sdhci_hlwd = {
55 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | 55 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
56 SDHCI_QUIRK_32BIT_DMA_SIZE, 56 SDHCI_QUIRK_32BIT_DMA_SIZE,
57 .ops = { 57 .ops = {
58 .readl = sdhci_be32bs_readl, 58 .read_l = sdhci_be32bs_readl,
59 .readw = sdhci_be32bs_readw, 59 .read_w = sdhci_be32bs_readw,
60 .readb = sdhci_be32bs_readb, 60 .read_b = sdhci_be32bs_readb,
61 .writel = sdhci_hlwd_writel, 61 .write_l = sdhci_hlwd_writel,
62 .writew = sdhci_hlwd_writew, 62 .write_w = sdhci_hlwd_writew,
63 .writeb = sdhci_hlwd_writeb, 63 .write_b = sdhci_hlwd_writeb,
64 }, 64 },
65}; 65};
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 6701af629c30..65483fdea45b 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -628,7 +628,7 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
628 host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot)); 628 host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot));
629 if (IS_ERR(host)) { 629 if (IS_ERR(host)) {
630 dev_err(&pdev->dev, "cannot allocate host\n"); 630 dev_err(&pdev->dev, "cannot allocate host\n");
631 return ERR_PTR(PTR_ERR(host)); 631 return ERR_CAST(host);
632 } 632 }
633 633
634 slot = sdhci_priv(host); 634 slot = sdhci_priv(host);
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 297f40ae6ad5..b6ee0d719698 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -29,6 +29,7 @@
29#include <linux/mmc/host.h> 29#include <linux/mmc/host.h>
30 30
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/sdhci-pltfm.h>
32 33
33#include "sdhci.h" 34#include "sdhci.h"
34 35
@@ -49,19 +50,18 @@ static struct sdhci_ops sdhci_pltfm_ops = {
49 50
50static int __devinit sdhci_pltfm_probe(struct platform_device *pdev) 51static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
51{ 52{
53 struct sdhci_pltfm_data *pdata = pdev->dev.platform_data;
52 struct sdhci_host *host; 54 struct sdhci_host *host;
53 struct resource *iomem; 55 struct resource *iomem;
54 int ret; 56 int ret;
55 57
56 BUG_ON(pdev == NULL);
57
58 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 58 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
59 if (!iomem) { 59 if (!iomem) {
60 ret = -ENOMEM; 60 ret = -ENOMEM;
61 goto err; 61 goto err;
62 } 62 }
63 63
64 if (resource_size(iomem) != 0x100) 64 if (resource_size(iomem) < 0x100)
65 dev_err(&pdev->dev, "Invalid iomem size. You may " 65 dev_err(&pdev->dev, "Invalid iomem size. You may "
66 "experience problems.\n"); 66 "experience problems.\n");
67 67
@@ -76,7 +76,12 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
76 } 76 }
77 77
78 host->hw_name = "platform"; 78 host->hw_name = "platform";
79 host->ops = &sdhci_pltfm_ops; 79 if (pdata && pdata->ops)
80 host->ops = pdata->ops;
81 else
82 host->ops = &sdhci_pltfm_ops;
83 if (pdata)
84 host->quirks = pdata->quirks;
80 host->irq = platform_get_irq(pdev, 0); 85 host->irq = platform_get_irq(pdev, 0);
81 86
82 if (!request_mem_region(iomem->start, resource_size(iomem), 87 if (!request_mem_region(iomem->start, resource_size(iomem),
@@ -93,6 +98,12 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
93 goto err_remap; 98 goto err_remap;
94 } 99 }
95 100
101 if (pdata && pdata->init) {
102 ret = pdata->init(host);
103 if (ret)
104 goto err_plat_init;
105 }
106
96 ret = sdhci_add_host(host); 107 ret = sdhci_add_host(host);
97 if (ret) 108 if (ret)
98 goto err_add_host; 109 goto err_add_host;
@@ -102,6 +113,9 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
102 return 0; 113 return 0;
103 114
104err_add_host: 115err_add_host:
116 if (pdata && pdata->exit)
117 pdata->exit(host);
118err_plat_init:
105 iounmap(host->ioaddr); 119 iounmap(host->ioaddr);
106err_remap: 120err_remap:
107 release_mem_region(iomem->start, resource_size(iomem)); 121 release_mem_region(iomem->start, resource_size(iomem));
@@ -114,6 +128,7 @@ err:
114 128
115static int __devexit sdhci_pltfm_remove(struct platform_device *pdev) 129static int __devexit sdhci_pltfm_remove(struct platform_device *pdev)
116{ 130{
131 struct sdhci_pltfm_data *pdata = pdev->dev.platform_data;
117 struct sdhci_host *host = platform_get_drvdata(pdev); 132 struct sdhci_host *host = platform_get_drvdata(pdev);
118 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 133 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
119 int dead; 134 int dead;
@@ -125,6 +140,8 @@ static int __devexit sdhci_pltfm_remove(struct platform_device *pdev)
125 dead = 1; 140 dead = 1;
126 141
127 sdhci_remove_host(host, dead); 142 sdhci_remove_host(host, dead);
143 if (pdata && pdata->exit)
144 pdata->exit(host);
128 iounmap(host->ioaddr); 145 iounmap(host->ioaddr);
129 release_mem_region(iomem->start, resource_size(iomem)); 146 release_mem_region(iomem->start, resource_size(iomem));
130 sdhci_free_host(host); 147 sdhci_free_host(host);
@@ -165,4 +182,3 @@ MODULE_DESCRIPTION("Secure Digital Host Controller Interface platform driver");
165MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>"); 182MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
166MODULE_LICENSE("GPL v2"); 183MODULE_LICENSE("GPL v2");
167MODULE_ALIAS("platform:sdhci"); 184MODULE_ALIAS("platform:sdhci");
168
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 2136794c0cfa..af217924a76e 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -317,12 +317,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
317 host->irq = irq; 317 host->irq = irq;
318 318
319 /* Setup quirks for the controller */ 319 /* Setup quirks for the controller */
320 320 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
321 /* Currently with ADMA enabled we are getting some length
322 * interrupts that are not being dealt with, do disable
323 * ADMA until this is sorted out. */
324 host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
325 host->quirks |= SDHCI_QUIRK_32BIT_ADMA_SIZE;
326 321
327#ifndef CONFIG_MMC_SDHCI_S3C_DMA 322#ifndef CONFIG_MMC_SDHCI_S3C_DMA
328 323
@@ -330,9 +325,6 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
330 * support as well. */ 325 * support as well. */
331 host->quirks |= SDHCI_QUIRK_BROKEN_DMA; 326 host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
332 327
333 /* PIO currently has problems with multi-block IO */
334 host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
335
336#endif /* CONFIG_MMC_SDHCI_S3C_DMA */ 328#endif /* CONFIG_MMC_SDHCI_S3C_DMA */
337 329
338 /* It seems we do not get an DATA transfer complete on non-busy 330 /* It seems we do not get an DATA transfer complete on non-busy
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
new file mode 100644
index 000000000000..d70c54c7b70a
--- /dev/null
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -0,0 +1,298 @@
1/*
2 * drivers/mmc/host/sdhci-spear.c
3 *
4 * Support of SDHCI platform devices for spear soc family
5 *
6 * Copyright (C) 2010 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com>
8 *
9 * Inspired by sdhci-pltfm.c
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#include <linux/clk.h>
17#include <linux/delay.h>
18#include <linux/gpio.h>
19#include <linux/highmem.h>
20#include <linux/interrupt.h>
21#include <linux/irq.h>
22#include <linux/platform_device.h>
23#include <linux/slab.h>
24#include <linux/mmc/host.h>
25#include <linux/mmc/sdhci-spear.h>
26#include <linux/io.h>
27#include "sdhci.h"
28
29struct spear_sdhci {
30 struct clk *clk;
31 struct sdhci_plat_data *data;
32};
33
34/* sdhci ops */
35static struct sdhci_ops sdhci_pltfm_ops = {
36 /* Nothing to do for now. */
37};
38
39/* gpio card detection interrupt handler */
40static irqreturn_t sdhci_gpio_irq(int irq, void *dev_id)
41{
42 struct platform_device *pdev = dev_id;
43 struct sdhci_host *host = platform_get_drvdata(pdev);
44 struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
45 unsigned long gpio_irq_type;
46 int val;
47
48 val = gpio_get_value(sdhci->data->card_int_gpio);
49
50 /* val == 1 -> card removed, val == 0 -> card inserted */
51 /* if card removed - set irq for low level, else vice versa */
52 gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH;
53 set_irq_type(irq, gpio_irq_type);
54
55 if (sdhci->data->card_power_gpio >= 0) {
56 if (!sdhci->data->power_always_enb) {
57 /* if card inserted, give power, otherwise remove it */
58 val = sdhci->data->power_active_high ? !val : val ;
59 gpio_set_value(sdhci->data->card_power_gpio, val);
60 }
61 }
62
63 /* inform sdhci driver about card insertion/removal */
64 tasklet_schedule(&host->card_tasklet);
65
66 return IRQ_HANDLED;
67}
68
69static int __devinit sdhci_probe(struct platform_device *pdev)
70{
71 struct sdhci_host *host;
72 struct resource *iomem;
73 struct spear_sdhci *sdhci;
74 int ret;
75
76 BUG_ON(pdev == NULL);
77
78 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
79 if (!iomem) {
80 ret = -ENOMEM;
81 dev_dbg(&pdev->dev, "memory resource not defined\n");
82 goto err;
83 }
84
85 if (!request_mem_region(iomem->start, resource_size(iomem),
86 "spear-sdhci")) {
87 ret = -EBUSY;
88 dev_dbg(&pdev->dev, "cannot request region\n");
89 goto err;
90 }
91
92 sdhci = kzalloc(sizeof(*sdhci), GFP_KERNEL);
93 if (!sdhci) {
94 ret = -ENOMEM;
95 dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n");
96 goto err_kzalloc;
97 }
98
99 /* clk enable */
100 sdhci->clk = clk_get(&pdev->dev, NULL);
101 if (IS_ERR(sdhci->clk)) {
102 ret = PTR_ERR(sdhci->clk);
103 dev_dbg(&pdev->dev, "Error getting clock\n");
104 goto err_clk_get;
105 }
106
107 ret = clk_enable(sdhci->clk);
108 if (ret) {
109 dev_dbg(&pdev->dev, "Error enabling clock\n");
110 goto err_clk_enb;
111 }
112
113 /* overwrite platform_data */
114 sdhci->data = dev_get_platdata(&pdev->dev);
115 pdev->dev.platform_data = sdhci;
116
117 if (pdev->dev.parent)
118 host = sdhci_alloc_host(pdev->dev.parent, 0);
119 else
120 host = sdhci_alloc_host(&pdev->dev, 0);
121
122 if (IS_ERR(host)) {
123 ret = PTR_ERR(host);
124 dev_dbg(&pdev->dev, "error allocating host\n");
125 goto err_alloc_host;
126 }
127
128 host->hw_name = "sdhci";
129 host->ops = &sdhci_pltfm_ops;
130 host->irq = platform_get_irq(pdev, 0);
131 host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
132
133 host->ioaddr = ioremap(iomem->start, resource_size(iomem));
134 if (!host->ioaddr) {
135 ret = -ENOMEM;
136 dev_dbg(&pdev->dev, "failed to remap registers\n");
137 goto err_ioremap;
138 }
139
140 ret = sdhci_add_host(host);
141 if (ret) {
142 dev_dbg(&pdev->dev, "error adding host\n");
143 goto err_add_host;
144 }
145
146 platform_set_drvdata(pdev, host);
147
148 /*
149 * It is optional to use GPIOs for sdhci Power control & sdhci card
150 * interrupt detection. If sdhci->data is NULL, then use original sdhci
151 * lines otherwise GPIO lines.
152 * If GPIO is selected for power control, then power should be disabled
153 * after card removal and should be enabled when card insertion
154 * interrupt occurs
155 */
156 if (!sdhci->data)
157 return 0;
158
159 if (sdhci->data->card_power_gpio >= 0) {
160 int val = 0;
161
162 ret = gpio_request(sdhci->data->card_power_gpio, "sdhci");
163 if (ret < 0) {
164 dev_dbg(&pdev->dev, "gpio request fail: %d\n",
165 sdhci->data->card_power_gpio);
166 goto err_pgpio_request;
167 }
168
169 if (sdhci->data->power_always_enb)
170 val = sdhci->data->power_active_high;
171 else
172 val = !sdhci->data->power_active_high;
173
174 ret = gpio_direction_output(sdhci->data->card_power_gpio, val);
175 if (ret) {
176 dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
177 sdhci->data->card_power_gpio);
178 goto err_pgpio_direction;
179 }
180
181 gpio_set_value(sdhci->data->card_power_gpio, 1);
182 }
183
184 if (sdhci->data->card_int_gpio >= 0) {
185 ret = gpio_request(sdhci->data->card_int_gpio, "sdhci");
186 if (ret < 0) {
187 dev_dbg(&pdev->dev, "gpio request fail: %d\n",
188 sdhci->data->card_int_gpio);
189 goto err_igpio_request;
190 }
191
192 ret = gpio_direction_input(sdhci->data->card_int_gpio);
193 if (ret) {
194 dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
195 sdhci->data->card_int_gpio);
196 goto err_igpio_direction;
197 }
198 ret = request_irq(gpio_to_irq(sdhci->data->card_int_gpio),
199 sdhci_gpio_irq, IRQF_TRIGGER_LOW,
200 mmc_hostname(host->mmc), pdev);
201 if (ret) {
202 dev_dbg(&pdev->dev, "gpio request irq fail: %d\n",
203 sdhci->data->card_int_gpio);
204 goto err_igpio_request_irq;
205 }
206
207 }
208
209 return 0;
210
211err_igpio_request_irq:
212err_igpio_direction:
213 if (sdhci->data->card_int_gpio >= 0)
214 gpio_free(sdhci->data->card_int_gpio);
215err_igpio_request:
216err_pgpio_direction:
217 if (sdhci->data->card_power_gpio >= 0)
218 gpio_free(sdhci->data->card_power_gpio);
219err_pgpio_request:
220 platform_set_drvdata(pdev, NULL);
221 sdhci_remove_host(host, 1);
222err_add_host:
223 iounmap(host->ioaddr);
224err_ioremap:
225 sdhci_free_host(host);
226err_alloc_host:
227 clk_disable(sdhci->clk);
228err_clk_enb:
229 clk_put(sdhci->clk);
230err_clk_get:
231 kfree(sdhci);
232err_kzalloc:
233 release_mem_region(iomem->start, resource_size(iomem));
234err:
235 dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret);
236 return ret;
237}
238
239static int __devexit sdhci_remove(struct platform_device *pdev)
240{
241 struct sdhci_host *host = platform_get_drvdata(pdev);
242 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
243 struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
244 int dead;
245 u32 scratch;
246
247 if (sdhci->data) {
248 if (sdhci->data->card_int_gpio >= 0) {
249 free_irq(gpio_to_irq(sdhci->data->card_int_gpio), pdev);
250 gpio_free(sdhci->data->card_int_gpio);
251 }
252
253 if (sdhci->data->card_power_gpio >= 0)
254 gpio_free(sdhci->data->card_power_gpio);
255 }
256
257 platform_set_drvdata(pdev, NULL);
258 dead = 0;
259 scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
260 if (scratch == (u32)-1)
261 dead = 1;
262
263 sdhci_remove_host(host, dead);
264 iounmap(host->ioaddr);
265 sdhci_free_host(host);
266 clk_disable(sdhci->clk);
267 clk_put(sdhci->clk);
268 kfree(sdhci);
269 if (iomem)
270 release_mem_region(iomem->start, resource_size(iomem));
271
272 return 0;
273}
274
275static struct platform_driver sdhci_driver = {
276 .driver = {
277 .name = "sdhci",
278 .owner = THIS_MODULE,
279 },
280 .probe = sdhci_probe,
281 .remove = __devexit_p(sdhci_remove),
282};
283
284static int __init sdhci_init(void)
285{
286 return platform_driver_register(&sdhci_driver);
287}
288module_init(sdhci_init);
289
290static void __exit sdhci_exit(void)
291{
292 platform_driver_unregister(&sdhci_driver);
293}
294module_exit(sdhci_exit);
295
296MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
297MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
298MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 9d4fdfa685e5..c6d1bd8d4ac4 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -496,12 +496,22 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
496 WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4); 496 WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
497 } 497 }
498 498
499 /* 499 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
500 * Add a terminating entry. 500 /*
501 */ 501 * Mark the last descriptor as the terminating descriptor
502 */
503 if (desc != host->adma_desc) {
504 desc -= 8;
505 desc[0] |= 0x2; /* end */
506 }
507 } else {
508 /*
509 * Add a terminating entry.
510 */
502 511
503 /* nop, end, valid */ 512 /* nop, end, valid */
504 sdhci_set_adma_desc(desc, 0, 0, 0x3); 513 sdhci_set_adma_desc(desc, 0, 0, 0x3);
514 }
505 515
506 /* 516 /*
507 * Resync align buffer as we might have changed it. 517 * Resync align buffer as we might have changed it.
@@ -1587,7 +1597,7 @@ int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
1587 1597
1588 sdhci_disable_card_detection(host); 1598 sdhci_disable_card_detection(host);
1589 1599
1590 ret = mmc_suspend_host(host->mmc, state); 1600 ret = mmc_suspend_host(host->mmc);
1591 if (ret) 1601 if (ret)
1592 return ret; 1602 return ret;
1593 1603
@@ -1744,7 +1754,8 @@ int sdhci_add_host(struct sdhci_host *host)
1744 host->max_clk = 1754 host->max_clk =
1745 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; 1755 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
1746 host->max_clk *= 1000000; 1756 host->max_clk *= 1000000;
1747 if (host->max_clk == 0) { 1757 if (host->max_clk == 0 || host->quirks &
1758 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
1748 if (!host->ops->get_max_clock) { 1759 if (!host->ops->get_max_clock) {
1749 printk(KERN_ERR 1760 printk(KERN_ERR
1750 "%s: Hardware doesn't specify base clock " 1761 "%s: Hardware doesn't specify base clock "
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 842f46f94284..c8468134adc9 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -127,7 +127,7 @@
127#define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \ 127#define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \
128 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \ 128 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
129 SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \ 129 SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
130 SDHCI_INT_DATA_END_BIT | SDHCI_ADMA_ERROR) 130 SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR)
131#define SDHCI_INT_ALL_MASK ((unsigned int)-1) 131#define SDHCI_INT_ALL_MASK ((unsigned int)-1)
132 132
133#define SDHCI_ACMD12_ERR 0x3C 133#define SDHCI_ACMD12_ERR 0x3C
@@ -236,6 +236,10 @@ struct sdhci_host {
236#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23) 236#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23)
237/* Controller uses SDCLK instead of TMCLK for data timeouts */ 237/* Controller uses SDCLK instead of TMCLK for data timeouts */
238#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24) 238#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24)
239/* Controller reports wrong base clock capability */
240#define SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1<<25)
241/* Controller cannot support End Attribute in NOP ADMA descriptor */
242#define SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1<<26)
239 243
240 int irq; /* Device IRQ */ 244 int irq; /* Device IRQ */
241 void __iomem * ioaddr; /* Mapped address */ 245 void __iomem * ioaddr; /* Mapped address */
@@ -294,12 +298,12 @@ struct sdhci_host {
294 298
295struct sdhci_ops { 299struct sdhci_ops {
296#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS 300#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
297 u32 (*readl)(struct sdhci_host *host, int reg); 301 u32 (*read_l)(struct sdhci_host *host, int reg);
298 u16 (*readw)(struct sdhci_host *host, int reg); 302 u16 (*read_w)(struct sdhci_host *host, int reg);
299 u8 (*readb)(struct sdhci_host *host, int reg); 303 u8 (*read_b)(struct sdhci_host *host, int reg);
300 void (*writel)(struct sdhci_host *host, u32 val, int reg); 304 void (*write_l)(struct sdhci_host *host, u32 val, int reg);
301 void (*writew)(struct sdhci_host *host, u16 val, int reg); 305 void (*write_w)(struct sdhci_host *host, u16 val, int reg);
302 void (*writeb)(struct sdhci_host *host, u8 val, int reg); 306 void (*write_b)(struct sdhci_host *host, u8 val, int reg);
303#endif 307#endif
304 308
305 void (*set_clock)(struct sdhci_host *host, unsigned int clock); 309 void (*set_clock)(struct sdhci_host *host, unsigned int clock);
@@ -314,48 +318,48 @@ struct sdhci_ops {
314 318
315static inline void sdhci_writel(struct sdhci_host *host, u32 val, int reg) 319static inline void sdhci_writel(struct sdhci_host *host, u32 val, int reg)
316{ 320{
317 if (unlikely(host->ops->writel)) 321 if (unlikely(host->ops->write_l))
318 host->ops->writel(host, val, reg); 322 host->ops->write_l(host, val, reg);
319 else 323 else
320 writel(val, host->ioaddr + reg); 324 writel(val, host->ioaddr + reg);
321} 325}
322 326
323static inline void sdhci_writew(struct sdhci_host *host, u16 val, int reg) 327static inline void sdhci_writew(struct sdhci_host *host, u16 val, int reg)
324{ 328{
325 if (unlikely(host->ops->writew)) 329 if (unlikely(host->ops->write_w))
326 host->ops->writew(host, val, reg); 330 host->ops->write_w(host, val, reg);
327 else 331 else
328 writew(val, host->ioaddr + reg); 332 writew(val, host->ioaddr + reg);
329} 333}
330 334
331static inline void sdhci_writeb(struct sdhci_host *host, u8 val, int reg) 335static inline void sdhci_writeb(struct sdhci_host *host, u8 val, int reg)
332{ 336{
333 if (unlikely(host->ops->writeb)) 337 if (unlikely(host->ops->write_b))
334 host->ops->writeb(host, val, reg); 338 host->ops->write_b(host, val, reg);
335 else 339 else
336 writeb(val, host->ioaddr + reg); 340 writeb(val, host->ioaddr + reg);
337} 341}
338 342
339static inline u32 sdhci_readl(struct sdhci_host *host, int reg) 343static inline u32 sdhci_readl(struct sdhci_host *host, int reg)
340{ 344{
341 if (unlikely(host->ops->readl)) 345 if (unlikely(host->ops->read_l))
342 return host->ops->readl(host, reg); 346 return host->ops->read_l(host, reg);
343 else 347 else
344 return readl(host->ioaddr + reg); 348 return readl(host->ioaddr + reg);
345} 349}
346 350
347static inline u16 sdhci_readw(struct sdhci_host *host, int reg) 351static inline u16 sdhci_readw(struct sdhci_host *host, int reg)
348{ 352{
349 if (unlikely(host->ops->readw)) 353 if (unlikely(host->ops->read_w))
350 return host->ops->readw(host, reg); 354 return host->ops->read_w(host, reg);
351 else 355 else
352 return readw(host->ioaddr + reg); 356 return readw(host->ioaddr + reg);
353} 357}
354 358
355static inline u8 sdhci_readb(struct sdhci_host *host, int reg) 359static inline u8 sdhci_readb(struct sdhci_host *host, int reg)
356{ 360{
357 if (unlikely(host->ops->readb)) 361 if (unlikely(host->ops->read_b))
358 return host->ops->readb(host, reg); 362 return host->ops->read_b(host, reg);
359 else 363 else
360 return readb(host->ioaddr + reg); 364 return readb(host->ioaddr + reg);
361} 365}
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index cb41e9c3ac07..e7507af3856e 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -519,7 +519,7 @@ static int sdricoh_pcmcia_suspend(struct pcmcia_device *link)
519{ 519{
520 struct mmc_host *mmc = link->priv; 520 struct mmc_host *mmc = link->priv;
521 dev_dbg(&link->dev, "suspend\n"); 521 dev_dbg(&link->dev, "suspend\n");
522 mmc_suspend_host(mmc, PMSG_SUSPEND); 522 mmc_suspend_host(mmc);
523 return 0; 523 return 0;
524} 524}
525 525
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
new file mode 100644
index 000000000000..eb97830c0344
--- /dev/null
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -0,0 +1,965 @@
1/*
2 * MMCIF eMMC driver.
3 *
4 * Copyright (C) 2010 Renesas Solutions Corp.
5 * Yusuke Goda <yusuke.goda.sx@renesas.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License.
10 *
11 *
12 * TODO
13 * 1. DMA
14 * 2. Power management
15 * 3. Handle MMC errors better
16 *
17 */
18
19#include <linux/dma-mapping.h>
20#include <linux/mmc/host.h>
21#include <linux/mmc/card.h>
22#include <linux/mmc/core.h>
23#include <linux/mmc/mmc.h>
24#include <linux/mmc/sdio.h>
25#include <linux/delay.h>
26#include <linux/platform_device.h>
27#include <linux/clk.h>
28#include <linux/mmc/sh_mmcif.h>
29
30#define DRIVER_NAME "sh_mmcif"
31#define DRIVER_VERSION "2010-04-28"
32
33#define MMCIF_CE_CMD_SET 0x00000000
34#define MMCIF_CE_ARG 0x00000008
35#define MMCIF_CE_ARG_CMD12 0x0000000C
36#define MMCIF_CE_CMD_CTRL 0x00000010
37#define MMCIF_CE_BLOCK_SET 0x00000014
38#define MMCIF_CE_CLK_CTRL 0x00000018
39#define MMCIF_CE_BUF_ACC 0x0000001C
40#define MMCIF_CE_RESP3 0x00000020
41#define MMCIF_CE_RESP2 0x00000024
42#define MMCIF_CE_RESP1 0x00000028
43#define MMCIF_CE_RESP0 0x0000002C
44#define MMCIF_CE_RESP_CMD12 0x00000030
45#define MMCIF_CE_DATA 0x00000034
46#define MMCIF_CE_INT 0x00000040
47#define MMCIF_CE_INT_MASK 0x00000044
48#define MMCIF_CE_HOST_STS1 0x00000048
49#define MMCIF_CE_HOST_STS2 0x0000004C
50#define MMCIF_CE_VERSION 0x0000007C
51
52/* CE_CMD_SET */
53#define CMD_MASK 0x3f000000
54#define CMD_SET_RTYP_NO ((0 << 23) | (0 << 22))
55#define CMD_SET_RTYP_6B ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
56#define CMD_SET_RTYP_17B ((1 << 23) | (0 << 22)) /* R2 */
57#define CMD_SET_RBSY (1 << 21) /* R1b */
58#define CMD_SET_CCSEN (1 << 20)
59#define CMD_SET_WDAT (1 << 19) /* 1: on data, 0: no data */
60#define CMD_SET_DWEN (1 << 18) /* 1: write, 0: read */
61#define CMD_SET_CMLTE (1 << 17) /* 1: multi block trans, 0: single */
62#define CMD_SET_CMD12EN (1 << 16) /* 1: CMD12 auto issue */
63#define CMD_SET_RIDXC_INDEX ((0 << 15) | (0 << 14)) /* index check */
64#define CMD_SET_RIDXC_BITS ((0 << 15) | (1 << 14)) /* check bits check */
65#define CMD_SET_RIDXC_NO ((1 << 15) | (0 << 14)) /* no check */
66#define CMD_SET_CRC7C ((0 << 13) | (0 << 12)) /* CRC7 check*/
67#define CMD_SET_CRC7C_BITS ((0 << 13) | (1 << 12)) /* check bits check*/
68#define CMD_SET_CRC7C_INTERNAL ((1 << 13) | (0 << 12)) /* internal CRC7 check*/
69#define CMD_SET_CRC16C (1 << 10) /* 0: CRC16 check*/
70#define CMD_SET_CRCSTE (1 << 8) /* 1: not receive CRC status */
71#define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */
72#define CMD_SET_OPDM (1 << 6) /* 1: open/drain */
73#define CMD_SET_CCSH (1 << 5)
74#define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */
75#define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */
76#define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */
77
78/* CE_CMD_CTRL */
79#define CMD_CTRL_BREAK (1 << 0)
80
81/* CE_BLOCK_SET */
82#define BLOCK_SIZE_MASK 0x0000ffff
83
84/* CE_CLK_CTRL */
85#define CLK_ENABLE (1 << 24) /* 1: output mmc clock */
86#define CLK_CLEAR ((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16))
87#define CLK_SUP_PCLK ((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16))
88#define SRSPTO_256 ((1 << 13) | (0 << 12)) /* resp timeout */
89#define SRBSYTO_29 ((1 << 11) | (1 << 10) | \
90 (1 << 9) | (1 << 8)) /* resp busy timeout */
91#define SRWDTO_29 ((1 << 7) | (1 << 6) | \
92 (1 << 5) | (1 << 4)) /* read/write timeout */
93#define SCCSTO_29 ((1 << 3) | (1 << 2) | \
94 (1 << 1) | (1 << 0)) /* ccs timeout */
95
96/* CE_BUF_ACC */
97#define BUF_ACC_DMAWEN (1 << 25)
98#define BUF_ACC_DMAREN (1 << 24)
99#define BUF_ACC_BUSW_32 (0 << 17)
100#define BUF_ACC_BUSW_16 (1 << 17)
101#define BUF_ACC_ATYP (1 << 16)
102
103/* CE_INT */
104#define INT_CCSDE (1 << 29)
105#define INT_CMD12DRE (1 << 26)
106#define INT_CMD12RBE (1 << 25)
107#define INT_CMD12CRE (1 << 24)
108#define INT_DTRANE (1 << 23)
109#define INT_BUFRE (1 << 22)
110#define INT_BUFWEN (1 << 21)
111#define INT_BUFREN (1 << 20)
112#define INT_CCSRCV (1 << 19)
113#define INT_RBSYE (1 << 17)
114#define INT_CRSPE (1 << 16)
115#define INT_CMDVIO (1 << 15)
116#define INT_BUFVIO (1 << 14)
117#define INT_WDATERR (1 << 11)
118#define INT_RDATERR (1 << 10)
119#define INT_RIDXERR (1 << 9)
120#define INT_RSPERR (1 << 8)
121#define INT_CCSTO (1 << 5)
122#define INT_CRCSTO (1 << 4)
123#define INT_WDATTO (1 << 3)
124#define INT_RDATTO (1 << 2)
125#define INT_RBSYTO (1 << 1)
126#define INT_RSPTO (1 << 0)
127#define INT_ERR_STS (INT_CMDVIO | INT_BUFVIO | INT_WDATERR | \
128 INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
129 INT_CCSTO | INT_CRCSTO | INT_WDATTO | \
130 INT_RDATTO | INT_RBSYTO | INT_RSPTO)
131
132/* CE_INT_MASK */
133#define MASK_ALL 0x00000000
134#define MASK_MCCSDE (1 << 29)
135#define MASK_MCMD12DRE (1 << 26)
136#define MASK_MCMD12RBE (1 << 25)
137#define MASK_MCMD12CRE (1 << 24)
138#define MASK_MDTRANE (1 << 23)
139#define MASK_MBUFRE (1 << 22)
140#define MASK_MBUFWEN (1 << 21)
141#define MASK_MBUFREN (1 << 20)
142#define MASK_MCCSRCV (1 << 19)
143#define MASK_MRBSYE (1 << 17)
144#define MASK_MCRSPE (1 << 16)
145#define MASK_MCMDVIO (1 << 15)
146#define MASK_MBUFVIO (1 << 14)
147#define MASK_MWDATERR (1 << 11)
148#define MASK_MRDATERR (1 << 10)
149#define MASK_MRIDXERR (1 << 9)
150#define MASK_MRSPERR (1 << 8)
151#define MASK_MCCSTO (1 << 5)
152#define MASK_MCRCSTO (1 << 4)
153#define MASK_MWDATTO (1 << 3)
154#define MASK_MRDATTO (1 << 2)
155#define MASK_MRBSYTO (1 << 1)
156#define MASK_MRSPTO (1 << 0)
157
158/* CE_HOST_STS1 */
159#define STS1_CMDSEQ (1 << 31)
160
161/* CE_HOST_STS2 */
162#define STS2_CRCSTE (1 << 31)
163#define STS2_CRC16E (1 << 30)
164#define STS2_AC12CRCE (1 << 29)
165#define STS2_RSPCRC7E (1 << 28)
166#define STS2_CRCSTEBE (1 << 27)
167#define STS2_RDATEBE (1 << 26)
168#define STS2_AC12REBE (1 << 25)
169#define STS2_RSPEBE (1 << 24)
170#define STS2_AC12IDXE (1 << 23)
171#define STS2_RSPIDXE (1 << 22)
172#define STS2_CCSTO (1 << 15)
173#define STS2_RDATTO (1 << 14)
174#define STS2_DATBSYTO (1 << 13)
175#define STS2_CRCSTTO (1 << 12)
176#define STS2_AC12BSYTO (1 << 11)
177#define STS2_RSPBSYTO (1 << 10)
178#define STS2_AC12RSPTO (1 << 9)
179#define STS2_RSPTO (1 << 8)
180#define STS2_CRC_ERR (STS2_CRCSTE | STS2_CRC16E | \
181 STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
182#define STS2_TIMEOUT_ERR (STS2_CCSTO | STS2_RDATTO | \
183 STS2_DATBSYTO | STS2_CRCSTTO | \
184 STS2_AC12BSYTO | STS2_RSPBSYTO | \
185 STS2_AC12RSPTO | STS2_RSPTO)
186
187/* CE_VERSION */
188#define SOFT_RST_ON (1 << 31)
189#define SOFT_RST_OFF (0 << 31)
190
191#define CLKDEV_EMMC_DATA 52000000 /* 52MHz */
192#define CLKDEV_MMC_DATA 20000000 /* 20MHz */
193#define CLKDEV_INIT 400000 /* 400 KHz */
194
195struct sh_mmcif_host {
196 struct mmc_host *mmc;
197 struct mmc_data *data;
198 struct mmc_command *cmd;
199 struct platform_device *pd;
200 struct clk *hclk;
201 unsigned int clk;
202 int bus_width;
203 u16 wait_int;
204 u16 sd_error;
205 long timeout;
206 void __iomem *addr;
207 wait_queue_head_t intr_wait;
208};
209
210static inline u32 sh_mmcif_readl(struct sh_mmcif_host *host, unsigned int reg)
211{
212 return readl(host->addr + reg);
213}
214
215static inline void sh_mmcif_writel(struct sh_mmcif_host *host,
216 unsigned int reg, u32 val)
217{
218 writel(val, host->addr + reg);
219}
220
221static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
222 unsigned int reg, u32 val)
223{
224 writel(val | sh_mmcif_readl(host, reg), host->addr + reg);
225}
226
227static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
228 unsigned int reg, u32 val)
229{
230 writel(~val & sh_mmcif_readl(host, reg), host->addr + reg);
231}
232
233
234static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
235{
236 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
237
238 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
239 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
240
241 if (!clk)
242 return;
243 if (p->sup_pclk && clk == host->clk)
244 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
245 else
246 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
247 (ilog2(__rounddown_pow_of_two(host->clk / clk)) << 16));
248
249 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
250}
251
252static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
253{
254 u32 tmp;
255
256 tmp = 0x010f0000 & sh_mmcif_readl(host, MMCIF_CE_CLK_CTRL);
257
258 sh_mmcif_writel(host, MMCIF_CE_VERSION, SOFT_RST_ON);
259 sh_mmcif_writel(host, MMCIF_CE_VERSION, SOFT_RST_OFF);
260 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
261 SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
262 /* byte swap on */
263 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
264}
265
266static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
267{
268 u32 state1, state2;
269 int ret, timeout = 10000000;
270
271 host->sd_error = 0;
272 host->wait_int = 0;
273
274 state1 = sh_mmcif_readl(host, MMCIF_CE_HOST_STS1);
275 state2 = sh_mmcif_readl(host, MMCIF_CE_HOST_STS2);
276 pr_debug("%s: ERR HOST_STS1 = %08x\n", \
277 DRIVER_NAME, sh_mmcif_readl(host, MMCIF_CE_HOST_STS1));
278 pr_debug("%s: ERR HOST_STS2 = %08x\n", \
279 DRIVER_NAME, sh_mmcif_readl(host, MMCIF_CE_HOST_STS2));
280
281 if (state1 & STS1_CMDSEQ) {
282 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
283 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
284 while (1) {
285 timeout--;
286 if (timeout < 0) {
287 pr_err(DRIVER_NAME": Forceed end of " \
288 "command sequence timeout err\n");
289 return -EIO;
290 }
291 if (!(sh_mmcif_readl(host, MMCIF_CE_HOST_STS1)
292 & STS1_CMDSEQ))
293 break;
294 mdelay(1);
295 }
296 sh_mmcif_sync_reset(host);
297 pr_debug(DRIVER_NAME": Forced end of command sequence\n");
298 return -EIO;
299 }
300
301 if (state2 & STS2_CRC_ERR) {
302 pr_debug(DRIVER_NAME": Happened CRC error\n");
303 ret = -EIO;
304 } else if (state2 & STS2_TIMEOUT_ERR) {
305 pr_debug(DRIVER_NAME": Happened Timeout error\n");
306 ret = -ETIMEDOUT;
307 } else {
308 pr_debug(DRIVER_NAME": Happened End/Index error\n");
309 ret = -EIO;
310 }
311 return ret;
312}
313
314static int sh_mmcif_single_read(struct sh_mmcif_host *host,
315 struct mmc_request *mrq)
316{
317 struct mmc_data *data = mrq->data;
318 long time;
319 u32 blocksize, i, *p = sg_virt(data->sg);
320
321 host->wait_int = 0;
322
323 /* buf read enable */
324 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
325 time = wait_event_interruptible_timeout(host->intr_wait,
326 host->wait_int == 1 ||
327 host->sd_error == 1, host->timeout);
328 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
329 return sh_mmcif_error_manage(host);
330
331 host->wait_int = 0;
332 blocksize = (BLOCK_SIZE_MASK &
333 sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET)) + 3;
334 for (i = 0; i < blocksize / 4; i++)
335 *p++ = sh_mmcif_readl(host, MMCIF_CE_DATA);
336
337 /* buffer read end */
338 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
339 time = wait_event_interruptible_timeout(host->intr_wait,
340 host->wait_int == 1 ||
341 host->sd_error == 1, host->timeout);
342 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
343 return sh_mmcif_error_manage(host);
344
345 host->wait_int = 0;
346 return 0;
347}
348
349static int sh_mmcif_multi_read(struct sh_mmcif_host *host,
350 struct mmc_request *mrq)
351{
352 struct mmc_data *data = mrq->data;
353 long time;
354 u32 blocksize, i, j, sec, *p;
355
356 blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET);
357 for (j = 0; j < data->sg_len; j++) {
358 p = sg_virt(data->sg);
359 host->wait_int = 0;
360 for (sec = 0; sec < data->sg->length / blocksize; sec++) {
361 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
362 /* buf read enable */
363 time = wait_event_interruptible_timeout(host->intr_wait,
364 host->wait_int == 1 ||
365 host->sd_error == 1, host->timeout);
366
367 if (host->wait_int != 1 &&
368 (time == 0 || host->sd_error != 0))
369 return sh_mmcif_error_manage(host);
370
371 host->wait_int = 0;
372 for (i = 0; i < blocksize / 4; i++)
373 *p++ = sh_mmcif_readl(host, MMCIF_CE_DATA);
374 }
375 if (j < data->sg_len - 1)
376 data->sg++;
377 }
378 return 0;
379}
380
381static int sh_mmcif_single_write(struct sh_mmcif_host *host,
382 struct mmc_request *mrq)
383{
384 struct mmc_data *data = mrq->data;
385 long time;
386 u32 blocksize, i, *p = sg_virt(data->sg);
387
388 host->wait_int = 0;
389 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
390
391 /* buf write enable */
392 time = wait_event_interruptible_timeout(host->intr_wait,
393 host->wait_int == 1 ||
394 host->sd_error == 1, host->timeout);
395 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
396 return sh_mmcif_error_manage(host);
397
398 host->wait_int = 0;
399 blocksize = (BLOCK_SIZE_MASK &
400 sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET)) + 3;
401 for (i = 0; i < blocksize / 4; i++)
402 sh_mmcif_writel(host, MMCIF_CE_DATA, *p++);
403
404 /* buffer write end */
405 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
406
407 time = wait_event_interruptible_timeout(host->intr_wait,
408 host->wait_int == 1 ||
409 host->sd_error == 1, host->timeout);
410 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
411 return sh_mmcif_error_manage(host);
412
413 host->wait_int = 0;
414 return 0;
415}
416
417static int sh_mmcif_multi_write(struct sh_mmcif_host *host,
418 struct mmc_request *mrq)
419{
420 struct mmc_data *data = mrq->data;
421 long time;
422 u32 i, sec, j, blocksize, *p;
423
424 blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET);
425
426 for (j = 0; j < data->sg_len; j++) {
427 p = sg_virt(data->sg);
428 host->wait_int = 0;
429 for (sec = 0; sec < data->sg->length / blocksize; sec++) {
430 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
431 /* buf write enable*/
432 time = wait_event_interruptible_timeout(host->intr_wait,
433 host->wait_int == 1 ||
434 host->sd_error == 1, host->timeout);
435
436 if (host->wait_int != 1 &&
437 (time == 0 || host->sd_error != 0))
438 return sh_mmcif_error_manage(host);
439
440 host->wait_int = 0;
441 for (i = 0; i < blocksize / 4; i++)
442 sh_mmcif_writel(host, MMCIF_CE_DATA, *p++);
443 }
444 if (j < data->sg_len - 1)
445 data->sg++;
446 }
447 return 0;
448}
449
450static void sh_mmcif_get_response(struct sh_mmcif_host *host,
451 struct mmc_command *cmd)
452{
453 if (cmd->flags & MMC_RSP_136) {
454 cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP3);
455 cmd->resp[1] = sh_mmcif_readl(host, MMCIF_CE_RESP2);
456 cmd->resp[2] = sh_mmcif_readl(host, MMCIF_CE_RESP1);
457 cmd->resp[3] = sh_mmcif_readl(host, MMCIF_CE_RESP0);
458 } else
459 cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP0);
460}
461
462static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
463 struct mmc_command *cmd)
464{
465 cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP_CMD12);
466}
467
468static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
469 struct mmc_request *mrq, struct mmc_command *cmd, u32 opc)
470{
471 u32 tmp = 0;
472
473 /* Response Type check */
474 switch (mmc_resp_type(cmd)) {
475 case MMC_RSP_NONE:
476 tmp |= CMD_SET_RTYP_NO;
477 break;
478 case MMC_RSP_R1:
479 case MMC_RSP_R1B:
480 case MMC_RSP_R3:
481 tmp |= CMD_SET_RTYP_6B;
482 break;
483 case MMC_RSP_R2:
484 tmp |= CMD_SET_RTYP_17B;
485 break;
486 default:
487 pr_err(DRIVER_NAME": Not support type response.\n");
488 break;
489 }
490 switch (opc) {
491 /* RBSY */
492 case MMC_SWITCH:
493 case MMC_STOP_TRANSMISSION:
494 case MMC_SET_WRITE_PROT:
495 case MMC_CLR_WRITE_PROT:
496 case MMC_ERASE:
497 case MMC_GEN_CMD:
498 tmp |= CMD_SET_RBSY;
499 break;
500 }
501 /* WDAT / DATW */
502 if (host->data) {
503 tmp |= CMD_SET_WDAT;
504 switch (host->bus_width) {
505 case MMC_BUS_WIDTH_1:
506 tmp |= CMD_SET_DATW_1;
507 break;
508 case MMC_BUS_WIDTH_4:
509 tmp |= CMD_SET_DATW_4;
510 break;
511 case MMC_BUS_WIDTH_8:
512 tmp |= CMD_SET_DATW_8;
513 break;
514 default:
515 pr_err(DRIVER_NAME": Not support bus width.\n");
516 break;
517 }
518 }
519 /* DWEN */
520 if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
521 tmp |= CMD_SET_DWEN;
522 /* CMLTE/CMD12EN */
523 if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
524 tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
525 sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
526 mrq->data->blocks << 16);
527 }
528 /* RIDXC[1:0] check bits */
529 if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
530 opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
531 tmp |= CMD_SET_RIDXC_BITS;
532 /* RCRC7C[1:0] check bits */
533 if (opc == MMC_SEND_OP_COND)
534 tmp |= CMD_SET_CRC7C_BITS;
535 /* RCRC7C[1:0] internal CRC7 */
536 if (opc == MMC_ALL_SEND_CID ||
537 opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
538 tmp |= CMD_SET_CRC7C_INTERNAL;
539
540 return opc = ((opc << 24) | tmp);
541}
542
543static u32 sh_mmcif_data_trans(struct sh_mmcif_host *host,
544 struct mmc_request *mrq, u32 opc)
545{
546 u32 ret;
547
548 switch (opc) {
549 case MMC_READ_MULTIPLE_BLOCK:
550 ret = sh_mmcif_multi_read(host, mrq);
551 break;
552 case MMC_WRITE_MULTIPLE_BLOCK:
553 ret = sh_mmcif_multi_write(host, mrq);
554 break;
555 case MMC_WRITE_BLOCK:
556 ret = sh_mmcif_single_write(host, mrq);
557 break;
558 case MMC_READ_SINGLE_BLOCK:
559 case MMC_SEND_EXT_CSD:
560 ret = sh_mmcif_single_read(host, mrq);
561 break;
562 default:
563 pr_err(DRIVER_NAME": NOT SUPPORT CMD = d'%08d\n", opc);
564 ret = -EINVAL;
565 break;
566 }
567 return ret;
568}
569
570static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
571 struct mmc_request *mrq, struct mmc_command *cmd)
572{
573 long time;
574 int ret = 0, mask = 0;
575 u32 opc = cmd->opcode;
576
577 host->cmd = cmd;
578
579 switch (opc) {
580 /* respons busy check */
581 case MMC_SWITCH:
582 case MMC_STOP_TRANSMISSION:
583 case MMC_SET_WRITE_PROT:
584 case MMC_CLR_WRITE_PROT:
585 case MMC_ERASE:
586 case MMC_GEN_CMD:
587 mask = MASK_MRBSYE;
588 break;
589 default:
590 mask = MASK_MCRSPE;
591 break;
592 }
593 mask |= MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR |
594 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR |
595 MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO |
596 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO;
597
598 if (host->data) {
599 sh_mmcif_writel(host, MMCIF_CE_BLOCK_SET, 0);
600 sh_mmcif_writel(host, MMCIF_CE_BLOCK_SET, mrq->data->blksz);
601 }
602 opc = sh_mmcif_set_cmd(host, mrq, cmd, opc);
603
604 sh_mmcif_writel(host, MMCIF_CE_INT, 0xD80430C0);
605 sh_mmcif_writel(host, MMCIF_CE_INT_MASK, mask);
606 /* set arg */
607 sh_mmcif_writel(host, MMCIF_CE_ARG, cmd->arg);
608 host->wait_int = 0;
609 /* set cmd */
610 sh_mmcif_writel(host, MMCIF_CE_CMD_SET, opc);
611
612 time = wait_event_interruptible_timeout(host->intr_wait,
613 host->wait_int == 1 || host->sd_error == 1, host->timeout);
614 if (host->wait_int != 1 && time == 0) {
615 cmd->error = sh_mmcif_error_manage(host);
616 return;
617 }
618 if (host->sd_error) {
619 switch (cmd->opcode) {
620 case MMC_ALL_SEND_CID:
621 case MMC_SELECT_CARD:
622 case MMC_APP_CMD:
623 cmd->error = -ETIMEDOUT;
624 break;
625 default:
626 pr_debug("%s: Cmd(d'%d) err\n",
627 DRIVER_NAME, cmd->opcode);
628 cmd->error = sh_mmcif_error_manage(host);
629 break;
630 }
631 host->sd_error = 0;
632 host->wait_int = 0;
633 return;
634 }
635 if (!(cmd->flags & MMC_RSP_PRESENT)) {
636 cmd->error = ret;
637 host->wait_int = 0;
638 return;
639 }
640 if (host->wait_int == 1) {
641 sh_mmcif_get_response(host, cmd);
642 host->wait_int = 0;
643 }
644 if (host->data) {
645 ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
646 if (ret < 0)
647 mrq->data->bytes_xfered = 0;
648 else
649 mrq->data->bytes_xfered =
650 mrq->data->blocks * mrq->data->blksz;
651 }
652 cmd->error = ret;
653}
654
655static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
656 struct mmc_request *mrq, struct mmc_command *cmd)
657{
658 long time;
659
660 if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK)
661 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
662 else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
663 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
664 else {
665 pr_err(DRIVER_NAME": not support stop cmd\n");
666 cmd->error = sh_mmcif_error_manage(host);
667 return;
668 }
669
670 time = wait_event_interruptible_timeout(host->intr_wait,
671 host->wait_int == 1 ||
672 host->sd_error == 1, host->timeout);
673 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) {
674 cmd->error = sh_mmcif_error_manage(host);
675 return;
676 }
677 sh_mmcif_get_cmd12response(host, cmd);
678 host->wait_int = 0;
679 cmd->error = 0;
680}
681
682static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
683{
684 struct sh_mmcif_host *host = mmc_priv(mmc);
685
686 switch (mrq->cmd->opcode) {
687 /* MMCIF does not support SD/SDIO command */
688 case SD_IO_SEND_OP_COND:
689 case MMC_APP_CMD:
690 mrq->cmd->error = -ETIMEDOUT;
691 mmc_request_done(mmc, mrq);
692 return;
693 case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
694 if (!mrq->data) {
695 /* send_if_cond cmd (not support) */
696 mrq->cmd->error = -ETIMEDOUT;
697 mmc_request_done(mmc, mrq);
698 return;
699 }
700 break;
701 default:
702 break;
703 }
704 host->data = mrq->data;
705 sh_mmcif_start_cmd(host, mrq, mrq->cmd);
706 host->data = NULL;
707
708 if (mrq->cmd->error != 0) {
709 mmc_request_done(mmc, mrq);
710 return;
711 }
712 if (mrq->stop)
713 sh_mmcif_stop_cmd(host, mrq, mrq->stop);
714 mmc_request_done(mmc, mrq);
715}
716
717static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
718{
719 struct sh_mmcif_host *host = mmc_priv(mmc);
720 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
721
722 if (ios->power_mode == MMC_POWER_OFF) {
723 /* clock stop */
724 sh_mmcif_clock_control(host, 0);
725 if (p->down_pwr)
726 p->down_pwr(host->pd);
727 return;
728 } else if (ios->power_mode == MMC_POWER_UP) {
729 if (p->set_pwr)
730 p->set_pwr(host->pd, ios->power_mode);
731 }
732
733 if (ios->clock)
734 sh_mmcif_clock_control(host, ios->clock);
735
736 host->bus_width = ios->bus_width;
737}
738
739static struct mmc_host_ops sh_mmcif_ops = {
740 .request = sh_mmcif_request,
741 .set_ios = sh_mmcif_set_ios,
742};
743
744static void sh_mmcif_detect(struct mmc_host *mmc)
745{
746 mmc_detect_change(mmc, 0);
747}
748
749static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
750{
751 struct sh_mmcif_host *host = dev_id;
752 u32 state = 0;
753 int err = 0;
754
755 state = sh_mmcif_readl(host, MMCIF_CE_INT);
756
757 if (state & INT_RBSYE) {
758 sh_mmcif_writel(host, MMCIF_CE_INT, ~(INT_RBSYE | INT_CRSPE));
759 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE);
760 } else if (state & INT_CRSPE) {
761 sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_CRSPE);
762 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE);
763 } else if (state & INT_BUFREN) {
764 sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFREN);
765 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
766 } else if (state & INT_BUFWEN) {
767 sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFWEN);
768 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
769 } else if (state & INT_CMD12DRE) {
770 sh_mmcif_writel(host, MMCIF_CE_INT,
771 ~(INT_CMD12DRE | INT_CMD12RBE |
772 INT_CMD12CRE | INT_BUFRE));
773 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
774 } else if (state & INT_BUFRE) {
775 sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFRE);
776 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
777 } else if (state & INT_DTRANE) {
778 sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_DTRANE);
779 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
780 } else if (state & INT_CMD12RBE) {
781 sh_mmcif_writel(host, MMCIF_CE_INT,
782 ~(INT_CMD12RBE | INT_CMD12CRE));
783 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
784 } else if (state & INT_ERR_STS) {
785 /* err interrupts */
786 sh_mmcif_writel(host, MMCIF_CE_INT, ~state);
787 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
788 err = 1;
789 } else {
790 pr_debug("%s: Not support int\n", DRIVER_NAME);
791 sh_mmcif_writel(host, MMCIF_CE_INT, ~state);
792 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
793 err = 1;
794 }
795 if (err) {
796 host->sd_error = 1;
797 pr_debug("%s: int err state = %08x\n", DRIVER_NAME, state);
798 }
799 host->wait_int = 1;
800 wake_up(&host->intr_wait);
801
802 return IRQ_HANDLED;
803}
804
805static int __devinit sh_mmcif_probe(struct platform_device *pdev)
806{
807 int ret = 0, irq[2];
808 struct mmc_host *mmc;
809 struct sh_mmcif_host *host = NULL;
810 struct sh_mmcif_plat_data *pd = NULL;
811 struct resource *res;
812 void __iomem *reg;
813 char clk_name[8];
814
815 irq[0] = platform_get_irq(pdev, 0);
816 irq[1] = platform_get_irq(pdev, 1);
817 if (irq[0] < 0 || irq[1] < 0) {
818 pr_err(DRIVER_NAME": Get irq error\n");
819 return -ENXIO;
820 }
821 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
822 if (!res) {
823 dev_err(&pdev->dev, "platform_get_resource error.\n");
824 return -ENXIO;
825 }
826 reg = ioremap(res->start, resource_size(res));
827 if (!reg) {
828 dev_err(&pdev->dev, "ioremap error.\n");
829 return -ENOMEM;
830 }
831 pd = (struct sh_mmcif_plat_data *)(pdev->dev.platform_data);
832 if (!pd) {
833 dev_err(&pdev->dev, "sh_mmcif plat data error.\n");
834 ret = -ENXIO;
835 goto clean_up;
836 }
837 mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
838 if (!mmc) {
839 ret = -ENOMEM;
840 goto clean_up;
841 }
842 host = mmc_priv(mmc);
843 host->mmc = mmc;
844 host->addr = reg;
845 host->timeout = 1000;
846
847 snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id);
848 host->hclk = clk_get(&pdev->dev, clk_name);
849 if (IS_ERR(host->hclk)) {
850 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
851 ret = PTR_ERR(host->hclk);
852 goto clean_up1;
853 }
854 clk_enable(host->hclk);
855 host->clk = clk_get_rate(host->hclk);
856 host->pd = pdev;
857
858 init_waitqueue_head(&host->intr_wait);
859
860 mmc->ops = &sh_mmcif_ops;
861 mmc->f_max = host->clk;
862 /* close to 400KHz */
863 if (mmc->f_max < 51200000)
864 mmc->f_min = mmc->f_max / 128;
865 else if (mmc->f_max < 102400000)
866 mmc->f_min = mmc->f_max / 256;
867 else
868 mmc->f_min = mmc->f_max / 512;
869 if (pd->ocr)
870 mmc->ocr_avail = pd->ocr;
871 mmc->caps = MMC_CAP_MMC_HIGHSPEED;
872 if (pd->caps)
873 mmc->caps |= pd->caps;
874 mmc->max_phys_segs = 128;
875 mmc->max_hw_segs = 128;
876 mmc->max_blk_size = 512;
877 mmc->max_blk_count = 65535;
878 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
879 mmc->max_seg_size = mmc->max_req_size;
880
881 sh_mmcif_sync_reset(host);
882 platform_set_drvdata(pdev, host);
883 mmc_add_host(mmc);
884
885 ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
886 if (ret) {
887 pr_err(DRIVER_NAME": request_irq error (sh_mmc:error)\n");
888 goto clean_up2;
889 }
890 ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
891 if (ret) {
892 free_irq(irq[0], host);
893 pr_err(DRIVER_NAME": request_irq error (sh_mmc:int)\n");
894 goto clean_up2;
895 }
896
897 sh_mmcif_writel(host, MMCIF_CE_INT_MASK, MASK_ALL);
898 sh_mmcif_detect(host->mmc);
899
900 pr_info("%s: driver version %s\n", DRIVER_NAME, DRIVER_VERSION);
901 pr_debug("%s: chip ver H'%04x\n", DRIVER_NAME,
902 sh_mmcif_readl(host, MMCIF_CE_VERSION) & 0x0000ffff);
903 return ret;
904
905clean_up2:
906 clk_disable(host->hclk);
907clean_up1:
908 mmc_free_host(mmc);
909clean_up:
910 if (reg)
911 iounmap(reg);
912 return ret;
913}
914
915static int __devexit sh_mmcif_remove(struct platform_device *pdev)
916{
917 struct sh_mmcif_host *host = platform_get_drvdata(pdev);
918 int irq[2];
919
920 sh_mmcif_writel(host, MMCIF_CE_INT_MASK, MASK_ALL);
921
922 irq[0] = platform_get_irq(pdev, 0);
923 irq[1] = platform_get_irq(pdev, 1);
924
925 if (host->addr)
926 iounmap(host->addr);
927
928 platform_set_drvdata(pdev, NULL);
929 mmc_remove_host(host->mmc);
930
931 free_irq(irq[0], host);
932 free_irq(irq[1], host);
933
934 clk_disable(host->hclk);
935 mmc_free_host(host->mmc);
936
937 return 0;
938}
939
940static struct platform_driver sh_mmcif_driver = {
941 .probe = sh_mmcif_probe,
942 .remove = sh_mmcif_remove,
943 .driver = {
944 .name = DRIVER_NAME,
945 },
946};
947
948static int __init sh_mmcif_init(void)
949{
950 return platform_driver_register(&sh_mmcif_driver);
951}
952
953static void __exit sh_mmcif_exit(void)
954{
955 platform_driver_unregister(&sh_mmcif_driver);
956}
957
958module_init(sh_mmcif_init);
959module_exit(sh_mmcif_exit);
960
961
962MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
963MODULE_LICENSE("GPL");
964MODULE_ALIAS(DRIVER_NAME);
965MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 82554ddec6b3..cec99958b652 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -1032,7 +1032,7 @@ static void tifm_sd_remove(struct tifm_dev *sock)
1032 1032
1033static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state) 1033static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state)
1034{ 1034{
1035 return mmc_suspend_host(tifm_get_drvdata(sock), state); 1035 return mmc_suspend_host(tifm_get_drvdata(sock));
1036} 1036}
1037 1037
1038static int tifm_sd_resume(struct tifm_dev *sock) 1038static int tifm_sd_resume(struct tifm_dev *sock)
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 883fcac21004..ee7d0a5a51c4 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -768,7 +768,7 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
768 struct mmc_host *mmc = platform_get_drvdata(dev); 768 struct mmc_host *mmc = platform_get_drvdata(dev);
769 int ret; 769 int ret;
770 770
771 ret = mmc_suspend_host(mmc, state); 771 ret = mmc_suspend_host(mmc);
772 772
773 /* Tell MFD core it can disable us now.*/ 773 /* Tell MFD core it can disable us now.*/
774 if (!ret && cell->disable) 774 if (!ret && cell->disable)
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 632858a94376..19f2d72dbca5 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -1280,7 +1280,7 @@ static int via_sd_suspend(struct pci_dev *pcidev, pm_message_t state)
1280 via_save_pcictrlreg(host); 1280 via_save_pcictrlreg(host);
1281 via_save_sdcreg(host); 1281 via_save_sdcreg(host);
1282 1282
1283 ret = mmc_suspend_host(host->mmc, state); 1283 ret = mmc_suspend_host(host->mmc);
1284 1284
1285 pci_save_state(pcidev); 1285 pci_save_state(pcidev);
1286 pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0); 1286 pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 69efe01eece8..0012f5d13d28 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -1819,7 +1819,7 @@ static int wbsd_suspend(struct wbsd_host *host, pm_message_t state)
1819{ 1819{
1820 BUG_ON(host == NULL); 1820 BUG_ON(host == NULL);
1821 1821
1822 return mmc_suspend_host(host->mmc, state); 1822 return mmc_suspend_host(host->mmc);
1823} 1823}
1824 1824
1825static int wbsd_resume(struct wbsd_host *host) 1825static int wbsd_resume(struct wbsd_host *host)
diff --git a/drivers/parport/parport_amiga.c b/drivers/parport/parport_amiga.c
index 1586e1caa2f5..8bef6d60f88b 100644
--- a/drivers/parport/parport_amiga.c
+++ b/drivers/parport/parport_amiga.c
@@ -18,6 +18,8 @@
18#include <linux/parport.h> 18#include <linux/parport.h>
19#include <linux/ioport.h> 19#include <linux/ioport.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/platform_device.h>
22
21#include <asm/setup.h> 23#include <asm/setup.h>
22#include <asm/amigahw.h> 24#include <asm/amigahw.h>
23#include <asm/irq.h> 25#include <asm/irq.h>
@@ -31,7 +33,6 @@
31#define DPRINTK(x...) do { } while (0) 33#define DPRINTK(x...) do { } while (0)
32#endif 34#endif
33 35
34static struct parport *this_port = NULL;
35 36
36static void amiga_write_data(struct parport *p, unsigned char data) 37static void amiga_write_data(struct parport *p, unsigned char data)
37{ 38{
@@ -227,18 +228,11 @@ static struct parport_operations pp_amiga_ops = {
227 228
228/* ----------- Initialisation code --------------------------------- */ 229/* ----------- Initialisation code --------------------------------- */
229 230
230static int __init parport_amiga_init(void) 231static int __init amiga_parallel_probe(struct platform_device *pdev)
231{ 232{
232 struct parport *p; 233 struct parport *p;
233 int err; 234 int err;
234 235
235 if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_PARALLEL))
236 return -ENODEV;
237
238 err = -EBUSY;
239 if (!request_mem_region(CIAA_PHYSADDR-1+0x100, 0x100, "parallel"))
240 goto out_mem;
241
242 ciaa.ddrb = 0xff; 236 ciaa.ddrb = 0xff;
243 ciab.ddra &= 0xf8; 237 ciab.ddra &= 0xf8;
244 mb(); 238 mb();
@@ -246,41 +240,63 @@ static int __init parport_amiga_init(void)
246 p = parport_register_port((unsigned long)&ciaa.prb, IRQ_AMIGA_CIAA_FLG, 240 p = parport_register_port((unsigned long)&ciaa.prb, IRQ_AMIGA_CIAA_FLG,
247 PARPORT_DMA_NONE, &pp_amiga_ops); 241 PARPORT_DMA_NONE, &pp_amiga_ops);
248 if (!p) 242 if (!p)
249 goto out_port; 243 return -EBUSY;
250 244
251 err = request_irq(IRQ_AMIGA_CIAA_FLG, parport_irq_handler, 0, p->name, p); 245 err = request_irq(IRQ_AMIGA_CIAA_FLG, parport_irq_handler, 0, p->name,
246 p);
252 if (err) 247 if (err)
253 goto out_irq; 248 goto out_irq;
254 249
255 this_port = p;
256 printk(KERN_INFO "%s: Amiga built-in port using irq\n", p->name); 250 printk(KERN_INFO "%s: Amiga built-in port using irq\n", p->name);
257 /* XXX: set operating mode */ 251 /* XXX: set operating mode */
258 parport_announce_port(p); 252 parport_announce_port(p);
259 253
254 platform_set_drvdata(pdev, p);
255
260 return 0; 256 return 0;
261 257
262out_irq: 258out_irq:
263 parport_put_port(p); 259 parport_put_port(p);
264out_port:
265 release_mem_region(CIAA_PHYSADDR-1+0x100, 0x100);
266out_mem:
267 return err; 260 return err;
268} 261}
269 262
270static void __exit parport_amiga_exit(void) 263static int __exit amiga_parallel_remove(struct platform_device *pdev)
264{
265 struct parport *port = platform_get_drvdata(pdev);
266
267 parport_remove_port(port);
268 if (port->irq != PARPORT_IRQ_NONE)
269 free_irq(IRQ_AMIGA_CIAA_FLG, port);
270 parport_put_port(port);
271 platform_set_drvdata(pdev, NULL);
272 return 0;
273}
274
275static struct platform_driver amiga_parallel_driver = {
276 .remove = __exit_p(amiga_parallel_remove),
277 .driver = {
278 .name = "amiga-parallel",
279 .owner = THIS_MODULE,
280 },
281};
282
283static int __init amiga_parallel_init(void)
284{
285 return platform_driver_probe(&amiga_parallel_driver,
286 amiga_parallel_probe);
287}
288
289module_init(amiga_parallel_init);
290
291static void __exit amiga_parallel_exit(void)
271{ 292{
272 parport_remove_port(this_port); 293 platform_driver_unregister(&amiga_parallel_driver);
273 if (this_port->irq != PARPORT_IRQ_NONE)
274 free_irq(IRQ_AMIGA_CIAA_FLG, this_port);
275 parport_put_port(this_port);
276 release_mem_region(CIAA_PHYSADDR-1+0x100, 0x100);
277} 294}
278 295
296module_exit(amiga_parallel_exit);
279 297
280MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>"); 298MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>");
281MODULE_DESCRIPTION("Parport Driver for Amiga builtin Port"); 299MODULE_DESCRIPTION("Parport Driver for Amiga builtin Port");
282MODULE_SUPPORTED_DEVICE("Amiga builtin Parallel Port"); 300MODULE_SUPPORTED_DEVICE("Amiga builtin Parallel Port");
283MODULE_LICENSE("GPL"); 301MODULE_LICENSE("GPL");
284 302MODULE_ALIAS("platform:amiga-parallel");
285module_init(parport_amiga_init)
286module_exit(parport_amiga_exit)
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig
index c32822ad84a4..070211a5955c 100644
--- a/drivers/rapidio/Kconfig
+++ b/drivers/rapidio/Kconfig
@@ -8,3 +8,27 @@ config RAPIDIO_DISC_TIMEOUT
8 ---help--- 8 ---help---
9 Amount of time a discovery node waits for a host to complete 9 Amount of time a discovery node waits for a host to complete
10 enumeration before giving up. 10 enumeration before giving up.
11
12config RAPIDIO_ENABLE_RX_TX_PORTS
13 bool "Enable RapidIO Input/Output Ports"
14 depends on RAPIDIO
15 ---help---
16 The RapidIO specification describes a Output port transmit
17 enable and a Input port receive enable. The recommended state
18 for Input ports and Output ports should be disabled. When
19 this switch is set the RapidIO subsystem will enable all
20 ports for Input/Output direction to allow other traffic
21 than Maintenance transfers.
22
23source "drivers/rapidio/switches/Kconfig"
24
25config RAPIDIO_DEBUG
26 bool "RapidIO subsystem debug messages"
27 depends on RAPIDIO
28 help
29 Say Y here if you want the RapidIO subsystem to produce a bunch of
30 debug messages to the system log. Select this if you are having a
31 problem with the RapidIO subsystem and want to see more of what is
32 going on.
33
34 If you are unsure about this, say N here.
diff --git a/drivers/rapidio/Makefile b/drivers/rapidio/Makefile
index 7c0e1818de51..b6139fe187bf 100644
--- a/drivers/rapidio/Makefile
+++ b/drivers/rapidio/Makefile
@@ -4,3 +4,7 @@
4obj-y += rio.o rio-access.o rio-driver.o rio-scan.o rio-sysfs.o 4obj-y += rio.o rio-access.o rio-driver.o rio-scan.o rio-sysfs.o
5 5
6obj-$(CONFIG_RAPIDIO) += switches/ 6obj-$(CONFIG_RAPIDIO) += switches/
7
8ifeq ($(CONFIG_RAPIDIO_DEBUG),y)
9EXTRA_CFLAGS += -DDEBUG
10endif
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index 45415096c294..566432106cc5 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -4,6 +4,14 @@
4 * Copyright 2005 MontaVista Software, Inc. 4 * Copyright 2005 MontaVista Software, Inc.
5 * Matt Porter <mporter@kernel.crashing.org> 5 * Matt Porter <mporter@kernel.crashing.org>
6 * 6 *
7 * Copyright 2009 Integrated Device Technology, Inc.
8 * Alex Bounine <alexandre.bounine@idt.com>
9 * - Added Port-Write/Error Management initialization and handling
10 *
11 * Copyright 2009 Sysgo AG
12 * Thomas Moll <thomas.moll@sysgo.com>
13 * - Added Input- Output- enable functionality, to allow full communication
14 *
7 * This program is free software; you can redistribute it and/or modify it 15 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the 16 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your 17 * Free Software Foundation; either version 2 of the License, or (at your
@@ -31,15 +39,16 @@
31LIST_HEAD(rio_devices); 39LIST_HEAD(rio_devices);
32static LIST_HEAD(rio_switches); 40static LIST_HEAD(rio_switches);
33 41
34#define RIO_ENUM_CMPL_MAGIC 0xdeadbeef
35
36static void rio_enum_timeout(unsigned long); 42static void rio_enum_timeout(unsigned long);
37 43
44static void rio_init_em(struct rio_dev *rdev);
45
38DEFINE_SPINLOCK(rio_global_list_lock); 46DEFINE_SPINLOCK(rio_global_list_lock);
39 47
40static int next_destid = 0; 48static int next_destid = 0;
41static int next_switchid = 0; 49static int next_switchid = 0;
42static int next_net = 0; 50static int next_net = 0;
51static int next_comptag;
43 52
44static struct timer_list rio_enum_timer = 53static struct timer_list rio_enum_timer =
45TIMER_INITIALIZER(rio_enum_timeout, 0, 0); 54TIMER_INITIALIZER(rio_enum_timeout, 0, 0);
@@ -52,12 +61,6 @@ static int rio_mport_phys_table[] = {
52 -1, 61 -1,
53}; 62};
54 63
55static int rio_sport_phys_table[] = {
56 RIO_EFB_PAR_EP_FREE_ID,
57 RIO_EFB_SER_EP_FREE_ID,
58 -1,
59};
60
61/** 64/**
62 * rio_get_device_id - Get the base/extended device id for a device 65 * rio_get_device_id - Get the base/extended device id for a device
63 * @port: RIO master port 66 * @port: RIO master port
@@ -118,12 +121,26 @@ static int rio_clear_locks(struct rio_mport *port)
118 u32 result; 121 u32 result;
119 int ret = 0; 122 int ret = 0;
120 123
121 /* Write component tag CSR magic complete value */ 124 /* Assign component tag to all devices */
122 rio_local_write_config_32(port, RIO_COMPONENT_TAG_CSR, 125 next_comptag = 1;
123 RIO_ENUM_CMPL_MAGIC); 126 rio_local_write_config_32(port, RIO_COMPONENT_TAG_CSR, next_comptag++);
124 list_for_each_entry(rdev, &rio_devices, global_list) 127
125 rio_write_config_32(rdev, RIO_COMPONENT_TAG_CSR, 128 list_for_each_entry(rdev, &rio_devices, global_list) {
126 RIO_ENUM_CMPL_MAGIC); 129 /* Mark device as discovered */
130 rio_read_config_32(rdev,
131 rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
132 &result);
133 rio_write_config_32(rdev,
134 rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
135 result | RIO_PORT_GEN_DISCOVERED);
136
137 rio_write_config_32(rdev, RIO_COMPONENT_TAG_CSR, next_comptag);
138 rdev->comp_tag = next_comptag++;
139 if (next_comptag >= 0x10000) {
140 pr_err("RIO: Component Tag Counter Overflow\n");
141 break;
142 }
143 }
127 144
128 /* Release host device id locks */ 145 /* Release host device id locks */
129 rio_local_write_config_32(port, RIO_HOST_DID_LOCK_CSR, 146 rio_local_write_config_32(port, RIO_HOST_DID_LOCK_CSR,
@@ -229,27 +246,37 @@ static int rio_is_switch(struct rio_dev *rdev)
229} 246}
230 247
231/** 248/**
232 * rio_route_set_ops- Sets routing operations for a particular vendor switch 249 * rio_switch_init - Sets switch operations for a particular vendor switch
233 * @rdev: RIO device 250 * @rdev: RIO device
251 * @do_enum: Enumeration/Discovery mode flag
234 * 252 *
235 * Searches the RIO route ops table for known switch types. If the vid 253 * Searches the RIO switch ops table for known switch types. If the vid
236 * and did match a switch table entry, then set the add_entry() and 254 * and did match a switch table entry, then call switch initialization
237 * get_entry() ops to the table entry values. 255 * routine to setup switch-specific routines.
238 */ 256 */
239static void rio_route_set_ops(struct rio_dev *rdev) 257static void rio_switch_init(struct rio_dev *rdev, int do_enum)
240{ 258{
241 struct rio_route_ops *cur = __start_rio_route_ops; 259 struct rio_switch_ops *cur = __start_rio_switch_ops;
242 struct rio_route_ops *end = __end_rio_route_ops; 260 struct rio_switch_ops *end = __end_rio_switch_ops;
243 261
244 while (cur < end) { 262 while (cur < end) {
245 if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) { 263 if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) {
246 pr_debug("RIO: adding routing ops for %s\n", rio_name(rdev)); 264 pr_debug("RIO: calling init routine for %s\n",
247 rdev->rswitch->add_entry = cur->add_hook; 265 rio_name(rdev));
248 rdev->rswitch->get_entry = cur->get_hook; 266 cur->init_hook(rdev, do_enum);
267 break;
249 } 268 }
250 cur++; 269 cur++;
251 } 270 }
252 271
272 if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) {
273 pr_debug("RIO: adding STD routing ops for %s\n",
274 rio_name(rdev));
275 rdev->rswitch->add_entry = rio_std_route_add_entry;
276 rdev->rswitch->get_entry = rio_std_route_get_entry;
277 rdev->rswitch->clr_table = rio_std_route_clr_table;
278 }
279
253 if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry) 280 if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry)
254 printk(KERN_ERR "RIO: missing routing ops for %s\n", 281 printk(KERN_ERR "RIO: missing routing ops for %s\n",
255 rio_name(rdev)); 282 rio_name(rdev));
@@ -281,6 +308,65 @@ static int __devinit rio_add_device(struct rio_dev *rdev)
281} 308}
282 309
283/** 310/**
311 * rio_enable_rx_tx_port - enable input reciever and output transmitter of
312 * given port
313 * @port: Master port associated with the RIO network
314 * @local: local=1 select local port otherwise a far device is reached
315 * @destid: Destination ID of the device to check host bit
316 * @hopcount: Number of hops to reach the target
317 * @port_num: Port (-number on switch) to enable on a far end device
318 *
319 * Returns 0 or 1 from on General Control Command and Status Register
320 * (EXT_PTR+0x3C)
321 */
322inline int rio_enable_rx_tx_port(struct rio_mport *port,
323 int local, u16 destid,
324 u8 hopcount, u8 port_num) {
325#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS
326 u32 regval;
327 u32 ext_ftr_ptr;
328
329 /*
330 * enable rx input tx output port
331 */
332 pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = "
333 "%d, port_num = %d)\n", local, destid, hopcount, port_num);
334
335 ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount);
336
337 if (local) {
338 rio_local_read_config_32(port, ext_ftr_ptr +
339 RIO_PORT_N_CTL_CSR(0),
340 &regval);
341 } else {
342 if (rio_mport_read_config_32(port, destid, hopcount,
343 ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), &regval) < 0)
344 return -EIO;
345 }
346
347 if (regval & RIO_PORT_N_CTL_P_TYP_SER) {
348 /* serial */
349 regval = regval | RIO_PORT_N_CTL_EN_RX_SER
350 | RIO_PORT_N_CTL_EN_TX_SER;
351 } else {
352 /* parallel */
353 regval = regval | RIO_PORT_N_CTL_EN_RX_PAR
354 | RIO_PORT_N_CTL_EN_TX_PAR;
355 }
356
357 if (local) {
358 rio_local_write_config_32(port, ext_ftr_ptr +
359 RIO_PORT_N_CTL_CSR(0), regval);
360 } else {
361 if (rio_mport_write_config_32(port, destid, hopcount,
362 ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0)
363 return -EIO;
364 }
365#endif
366 return 0;
367}
368
369/**
284 * rio_setup_device- Allocates and sets up a RIO device 370 * rio_setup_device- Allocates and sets up a RIO device
285 * @net: RIO network 371 * @net: RIO network
286 * @port: Master port to send transactions 372 * @port: Master port to send transactions
@@ -325,8 +411,14 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
325 rdev->asm_rev = result >> 16; 411 rdev->asm_rev = result >> 16;
326 rio_mport_read_config_32(port, destid, hopcount, RIO_PEF_CAR, 412 rio_mport_read_config_32(port, destid, hopcount, RIO_PEF_CAR,
327 &rdev->pef); 413 &rdev->pef);
328 if (rdev->pef & RIO_PEF_EXT_FEATURES) 414 if (rdev->pef & RIO_PEF_EXT_FEATURES) {
329 rdev->efptr = result & 0xffff; 415 rdev->efptr = result & 0xffff;
416 rdev->phys_efptr = rio_mport_get_physefb(port, 0, destid,
417 hopcount);
418
419 rdev->em_efptr = rio_mport_get_feature(port, 0, destid,
420 hopcount, RIO_EFB_ERR_MGMNT);
421 }
330 422
331 rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR, 423 rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR,
332 &rdev->src_ops); 424 &rdev->src_ops);
@@ -349,12 +441,13 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
349 if (rio_is_switch(rdev)) { 441 if (rio_is_switch(rdev)) {
350 rio_mport_read_config_32(port, destid, hopcount, 442 rio_mport_read_config_32(port, destid, hopcount,
351 RIO_SWP_INFO_CAR, &rdev->swpinfo); 443 RIO_SWP_INFO_CAR, &rdev->swpinfo);
352 rswitch = kmalloc(sizeof(struct rio_switch), GFP_KERNEL); 444 rswitch = kzalloc(sizeof(struct rio_switch), GFP_KERNEL);
353 if (!rswitch) 445 if (!rswitch)
354 goto cleanup; 446 goto cleanup;
355 rswitch->switchid = next_switchid; 447 rswitch->switchid = next_switchid;
356 rswitch->hopcount = hopcount; 448 rswitch->hopcount = hopcount;
357 rswitch->destid = destid; 449 rswitch->destid = destid;
450 rswitch->port_ok = 0;
358 rswitch->route_table = kzalloc(sizeof(u8)* 451 rswitch->route_table = kzalloc(sizeof(u8)*
359 RIO_MAX_ROUTE_ENTRIES(port->sys_size), 452 RIO_MAX_ROUTE_ENTRIES(port->sys_size),
360 GFP_KERNEL); 453 GFP_KERNEL);
@@ -367,13 +460,22 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
367 rdev->rswitch = rswitch; 460 rdev->rswitch = rswitch;
368 dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id, 461 dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id,
369 rdev->rswitch->switchid); 462 rdev->rswitch->switchid);
370 rio_route_set_ops(rdev); 463 rio_switch_init(rdev, do_enum);
464
465 if (do_enum && rdev->rswitch->clr_table)
466 rdev->rswitch->clr_table(port, destid, hopcount,
467 RIO_GLOBAL_TABLE);
371 468
372 list_add_tail(&rswitch->node, &rio_switches); 469 list_add_tail(&rswitch->node, &rio_switches);
373 470
374 } else 471 } else {
472 if (do_enum)
473 /*Enable Input Output Port (transmitter reviever)*/
474 rio_enable_rx_tx_port(port, 0, destid, hopcount, 0);
475
375 dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id, 476 dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id,
376 rdev->destid); 477 rdev->destid);
478 }
377 479
378 rdev->dev.bus = &rio_bus_type; 480 rdev->dev.bus = &rio_bus_type;
379 481
@@ -414,23 +516,29 @@ cleanup:
414 * 516 *
415 * Reads the port error status CSR for a particular switch port to 517 * Reads the port error status CSR for a particular switch port to
416 * determine if the port has an active link. Returns 518 * determine if the port has an active link. Returns
417 * %PORT_N_ERR_STS_PORT_OK if the port is active or %0 if it is 519 * %RIO_PORT_N_ERR_STS_PORT_OK if the port is active or %0 if it is
418 * inactive. 520 * inactive.
419 */ 521 */
420static int 522static int
421rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport) 523rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport)
422{ 524{
423 u32 result; 525 u32 result = 0;
424 u32 ext_ftr_ptr; 526 u32 ext_ftr_ptr;
425 527
426 int *entry = rio_sport_phys_table; 528 ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount, 0);
427
428 do {
429 if ((ext_ftr_ptr =
430 rio_mport_get_feature(port, 0, destid, hopcount, *entry)))
431 529
530 while (ext_ftr_ptr) {
531 rio_mport_read_config_32(port, destid, hopcount,
532 ext_ftr_ptr, &result);
533 result = RIO_GET_BLOCK_ID(result);
534 if ((result == RIO_EFB_SER_EP_FREE_ID) ||
535 (result == RIO_EFB_SER_EP_FREE_ID_V13P) ||
536 (result == RIO_EFB_SER_EP_FREC_ID))
432 break; 537 break;
433 } while (*++entry >= 0); 538
539 ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount,
540 ext_ftr_ptr);
541 }
434 542
435 if (ext_ftr_ptr) 543 if (ext_ftr_ptr)
436 rio_mport_read_config_32(port, destid, hopcount, 544 rio_mport_read_config_32(port, destid, hopcount,
@@ -438,7 +546,81 @@ rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport)
438 RIO_PORT_N_ERR_STS_CSR(sport), 546 RIO_PORT_N_ERR_STS_CSR(sport),
439 &result); 547 &result);
440 548
441 return (result & PORT_N_ERR_STS_PORT_OK); 549 return result & RIO_PORT_N_ERR_STS_PORT_OK;
550}
551
552/**
553 * rio_lock_device - Acquires host device lock for specified device
554 * @port: Master port to send transaction
555 * @destid: Destination ID for device/switch
556 * @hopcount: Hopcount to reach switch
557 * @wait_ms: Max wait time in msec (0 = no timeout)
558 *
559 * Attepts to acquire host device lock for specified device
560 * Returns 0 if device lock acquired or EINVAL if timeout expires.
561 */
562static int
563rio_lock_device(struct rio_mport *port, u16 destid, u8 hopcount, int wait_ms)
564{
565 u32 result;
566 int tcnt = 0;
567
568 /* Attempt to acquire device lock */
569 rio_mport_write_config_32(port, destid, hopcount,
570 RIO_HOST_DID_LOCK_CSR, port->host_deviceid);
571 rio_mport_read_config_32(port, destid, hopcount,
572 RIO_HOST_DID_LOCK_CSR, &result);
573
574 while (result != port->host_deviceid) {
575 if (wait_ms != 0 && tcnt == wait_ms) {
576 pr_debug("RIO: timeout when locking device %x:%x\n",
577 destid, hopcount);
578 return -EINVAL;
579 }
580
581 /* Delay a bit */
582 mdelay(1);
583 tcnt++;
584 /* Try to acquire device lock again */
585 rio_mport_write_config_32(port, destid,
586 hopcount,
587 RIO_HOST_DID_LOCK_CSR,
588 port->host_deviceid);
589 rio_mport_read_config_32(port, destid,
590 hopcount,
591 RIO_HOST_DID_LOCK_CSR, &result);
592 }
593
594 return 0;
595}
596
597/**
598 * rio_unlock_device - Releases host device lock for specified device
599 * @port: Master port to send transaction
600 * @destid: Destination ID for device/switch
601 * @hopcount: Hopcount to reach switch
602 *
603 * Returns 0 if device lock released or EINVAL if fails.
604 */
605static int
606rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount)
607{
608 u32 result;
609
610 /* Release device lock */
611 rio_mport_write_config_32(port, destid,
612 hopcount,
613 RIO_HOST_DID_LOCK_CSR,
614 port->host_deviceid);
615 rio_mport_read_config_32(port, destid, hopcount,
616 RIO_HOST_DID_LOCK_CSR, &result);
617 if ((result & 0xffff) != 0xffff) {
618 pr_debug("RIO: badness when releasing device lock %x:%x\n",
619 destid, hopcount);
620 return -EINVAL;
621 }
622
623 return 0;
442} 624}
443 625
444/** 626/**
@@ -448,6 +630,7 @@ rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport)
448 * @table: Routing table ID 630 * @table: Routing table ID
449 * @route_destid: Destination ID to be routed 631 * @route_destid: Destination ID to be routed
450 * @route_port: Port number to be routed 632 * @route_port: Port number to be routed
633 * @lock: lock switch device flag
451 * 634 *
452 * Calls the switch specific add_entry() method to add a route entry 635 * Calls the switch specific add_entry() method to add a route entry
453 * on a switch. The route table can be specified using the @table 636 * on a switch. The route table can be specified using the @table
@@ -456,12 +639,26 @@ rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport)
456 * %RIO_GLOBAL_TABLE in @table. Returns %0 on success or %-EINVAL 639 * %RIO_GLOBAL_TABLE in @table. Returns %0 on success or %-EINVAL
457 * on failure. 640 * on failure.
458 */ 641 */
459static int rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswitch, 642static int
460 u16 table, u16 route_destid, u8 route_port) 643rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswitch,
644 u16 table, u16 route_destid, u8 route_port, int lock)
461{ 645{
462 return rswitch->add_entry(mport, rswitch->destid, 646 int rc;
647
648 if (lock) {
649 rc = rio_lock_device(mport, rswitch->destid,
650 rswitch->hopcount, 1000);
651 if (rc)
652 return rc;
653 }
654
655 rc = rswitch->add_entry(mport, rswitch->destid,
463 rswitch->hopcount, table, 656 rswitch->hopcount, table,
464 route_destid, route_port); 657 route_destid, route_port);
658 if (lock)
659 rio_unlock_device(mport, rswitch->destid, rswitch->hopcount);
660
661 return rc;
465} 662}
466 663
467/** 664/**
@@ -471,6 +668,7 @@ static int rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswit
471 * @table: Routing table ID 668 * @table: Routing table ID
472 * @route_destid: Destination ID to be routed 669 * @route_destid: Destination ID to be routed
473 * @route_port: Pointer to read port number into 670 * @route_port: Pointer to read port number into
671 * @lock: lock switch device flag
474 * 672 *
475 * Calls the switch specific get_entry() method to read a route entry 673 * Calls the switch specific get_entry() method to read a route entry
476 * in a switch. The route table can be specified using the @table 674 * in a switch. The route table can be specified using the @table
@@ -481,11 +679,24 @@ static int rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswit
481 */ 679 */
482static int 680static int
483rio_route_get_entry(struct rio_mport *mport, struct rio_switch *rswitch, u16 table, 681rio_route_get_entry(struct rio_mport *mport, struct rio_switch *rswitch, u16 table,
484 u16 route_destid, u8 * route_port) 682 u16 route_destid, u8 *route_port, int lock)
485{ 683{
486 return rswitch->get_entry(mport, rswitch->destid, 684 int rc;
685
686 if (lock) {
687 rc = rio_lock_device(mport, rswitch->destid,
688 rswitch->hopcount, 1000);
689 if (rc)
690 return rc;
691 }
692
693 rc = rswitch->get_entry(mport, rswitch->destid,
487 rswitch->hopcount, table, 694 rswitch->hopcount, table,
488 route_destid, route_port); 695 route_destid, route_port);
696 if (lock)
697 rio_unlock_device(mport, rswitch->destid, rswitch->hopcount);
698
699 return rc;
489} 700}
490 701
491/** 702/**
@@ -625,14 +836,14 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
625 sw_inport = rio_get_swpinfo_inport(port, 836 sw_inport = rio_get_swpinfo_inport(port,
626 RIO_ANY_DESTID(port->sys_size), hopcount); 837 RIO_ANY_DESTID(port->sys_size), hopcount);
627 rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE, 838 rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
628 port->host_deviceid, sw_inport); 839 port->host_deviceid, sw_inport, 0);
629 rdev->rswitch->route_table[port->host_deviceid] = sw_inport; 840 rdev->rswitch->route_table[port->host_deviceid] = sw_inport;
630 841
631 for (destid = 0; destid < next_destid; destid++) { 842 for (destid = 0; destid < next_destid; destid++) {
632 if (destid == port->host_deviceid) 843 if (destid == port->host_deviceid)
633 continue; 844 continue;
634 rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE, 845 rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
635 destid, sw_inport); 846 destid, sw_inport, 0);
636 rdev->rswitch->route_table[destid] = sw_inport; 847 rdev->rswitch->route_table[destid] = sw_inport;
637 } 848 }
638 849
@@ -644,8 +855,15 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
644 rio_name(rdev), rdev->vid, rdev->did, num_ports); 855 rio_name(rdev), rdev->vid, rdev->did, num_ports);
645 sw_destid = next_destid; 856 sw_destid = next_destid;
646 for (port_num = 0; port_num < num_ports; port_num++) { 857 for (port_num = 0; port_num < num_ports; port_num++) {
647 if (sw_inport == port_num) 858 /*Enable Input Output Port (transmitter reviever)*/
859 rio_enable_rx_tx_port(port, 0,
860 RIO_ANY_DESTID(port->sys_size),
861 hopcount, port_num);
862
863 if (sw_inport == port_num) {
864 rdev->rswitch->port_ok |= (1 << port_num);
648 continue; 865 continue;
866 }
649 867
650 cur_destid = next_destid; 868 cur_destid = next_destid;
651 869
@@ -655,10 +873,11 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
655 pr_debug( 873 pr_debug(
656 "RIO: scanning device on port %d\n", 874 "RIO: scanning device on port %d\n",
657 port_num); 875 port_num);
876 rdev->rswitch->port_ok |= (1 << port_num);
658 rio_route_add_entry(port, rdev->rswitch, 877 rio_route_add_entry(port, rdev->rswitch,
659 RIO_GLOBAL_TABLE, 878 RIO_GLOBAL_TABLE,
660 RIO_ANY_DESTID(port->sys_size), 879 RIO_ANY_DESTID(port->sys_size),
661 port_num); 880 port_num, 0);
662 881
663 if (rio_enum_peer(net, port, hopcount + 1) < 0) 882 if (rio_enum_peer(net, port, hopcount + 1) < 0)
664 return -1; 883 return -1;
@@ -672,15 +891,35 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
672 rio_route_add_entry(port, rdev->rswitch, 891 rio_route_add_entry(port, rdev->rswitch,
673 RIO_GLOBAL_TABLE, 892 RIO_GLOBAL_TABLE,
674 destid, 893 destid,
675 port_num); 894 port_num,
895 0);
676 rdev->rswitch-> 896 rdev->rswitch->
677 route_table[destid] = 897 route_table[destid] =
678 port_num; 898 port_num;
679 } 899 }
680 } 900 }
901 } else {
902 /* If switch supports Error Management,
903 * set PORT_LOCKOUT bit for unused port
904 */
905 if (rdev->em_efptr)
906 rio_set_port_lockout(rdev, port_num, 1);
907
908 rdev->rswitch->port_ok &= ~(1 << port_num);
681 } 909 }
682 } 910 }
683 911
912 /* Direct Port-write messages to the enumeratiing host */
913 if ((rdev->src_ops & RIO_SRC_OPS_PORT_WRITE) &&
914 (rdev->em_efptr)) {
915 rio_write_config_32(rdev,
916 rdev->em_efptr + RIO_EM_PW_TGT_DEVID,
917 (port->host_deviceid << 16) |
918 (port->sys_size << 15));
919 }
920
921 rio_init_em(rdev);
922
684 /* Check for empty switch */ 923 /* Check for empty switch */
685 if (next_destid == sw_destid) { 924 if (next_destid == sw_destid) {
686 next_destid++; 925 next_destid++;
@@ -700,21 +939,16 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
700 * rio_enum_complete- Tests if enumeration of a network is complete 939 * rio_enum_complete- Tests if enumeration of a network is complete
701 * @port: Master port to send transaction 940 * @port: Master port to send transaction
702 * 941 *
703 * Tests the Component Tag CSR for presence of the magic enumeration 942 * Tests the Component Tag CSR for non-zero value (enumeration
704 * complete flag. Return %1 if enumeration is complete or %0 if 943 * complete flag). Return %1 if enumeration is complete or %0 if
705 * enumeration is incomplete. 944 * enumeration is incomplete.
706 */ 945 */
707static int rio_enum_complete(struct rio_mport *port) 946static int rio_enum_complete(struct rio_mport *port)
708{ 947{
709 u32 tag_csr; 948 u32 tag_csr;
710 int ret = 0;
711 949
712 rio_local_read_config_32(port, RIO_COMPONENT_TAG_CSR, &tag_csr); 950 rio_local_read_config_32(port, RIO_COMPONENT_TAG_CSR, &tag_csr);
713 951 return (tag_csr & 0xffff) ? 1 : 0;
714 if (tag_csr == RIO_ENUM_CMPL_MAGIC)
715 ret = 1;
716
717 return ret;
718} 952}
719 953
720/** 954/**
@@ -763,17 +997,21 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
763 pr_debug( 997 pr_debug(
764 "RIO: scanning device on port %d\n", 998 "RIO: scanning device on port %d\n",
765 port_num); 999 port_num);
1000
1001 rio_lock_device(port, destid, hopcount, 1000);
1002
766 for (ndestid = 0; 1003 for (ndestid = 0;
767 ndestid < RIO_ANY_DESTID(port->sys_size); 1004 ndestid < RIO_ANY_DESTID(port->sys_size);
768 ndestid++) { 1005 ndestid++) {
769 rio_route_get_entry(port, rdev->rswitch, 1006 rio_route_get_entry(port, rdev->rswitch,
770 RIO_GLOBAL_TABLE, 1007 RIO_GLOBAL_TABLE,
771 ndestid, 1008 ndestid,
772 &route_port); 1009 &route_port, 0);
773 if (route_port == port_num) 1010 if (route_port == port_num)
774 break; 1011 break;
775 } 1012 }
776 1013
1014 rio_unlock_device(port, destid, hopcount);
777 if (rio_disc_peer 1015 if (rio_disc_peer
778 (net, port, ndestid, hopcount + 1) < 0) 1016 (net, port, ndestid, hopcount + 1) < 0)
779 return -1; 1017 return -1;
@@ -792,7 +1030,7 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
792 * 1030 *
793 * Reads the port error status CSR for the master port to 1031 * Reads the port error status CSR for the master port to
794 * determine if the port has an active link. Returns 1032 * determine if the port has an active link. Returns
795 * %PORT_N_ERR_STS_PORT_OK if the master port is active 1033 * %RIO_PORT_N_ERR_STS_PORT_OK if the master port is active
796 * or %0 if it is inactive. 1034 * or %0 if it is inactive.
797 */ 1035 */
798static int rio_mport_is_active(struct rio_mport *port) 1036static int rio_mport_is_active(struct rio_mport *port)
@@ -813,7 +1051,7 @@ static int rio_mport_is_active(struct rio_mport *port)
813 RIO_PORT_N_ERR_STS_CSR(port->index), 1051 RIO_PORT_N_ERR_STS_CSR(port->index),
814 &result); 1052 &result);
815 1053
816 return (result & PORT_N_ERR_STS_PORT_OK); 1054 return result & RIO_PORT_N_ERR_STS_PORT_OK;
817} 1055}
818 1056
819/** 1057/**
@@ -866,12 +1104,17 @@ static void rio_update_route_tables(struct rio_mport *port)
866 continue; 1104 continue;
867 1105
868 if (RIO_INVALID_ROUTE == rswitch->route_table[destid]) { 1106 if (RIO_INVALID_ROUTE == rswitch->route_table[destid]) {
1107 /* Skip if destid ends in empty switch*/
1108 if (rswitch->destid == destid)
1109 continue;
869 1110
870 sport = rio_get_swpinfo_inport(port, 1111 sport = rio_get_swpinfo_inport(port,
871 rswitch->destid, rswitch->hopcount); 1112 rswitch->destid, rswitch->hopcount);
872 1113
873 if (rswitch->add_entry) { 1114 if (rswitch->add_entry) {
874 rio_route_add_entry(port, rswitch, RIO_GLOBAL_TABLE, destid, sport); 1115 rio_route_add_entry(port, rswitch,
1116 RIO_GLOBAL_TABLE, destid,
1117 sport, 0);
875 rswitch->route_table[destid] = sport; 1118 rswitch->route_table[destid] = sport;
876 } 1119 }
877 } 1120 }
@@ -880,6 +1123,32 @@ static void rio_update_route_tables(struct rio_mport *port)
880} 1123}
881 1124
882/** 1125/**
1126 * rio_init_em - Initializes RIO Error Management (for switches)
1127 * @port: Master port associated with the RIO network
1128 *
1129 * For each enumerated switch, call device-specific error management
1130 * initialization routine (if supplied by the switch driver).
1131 */
1132static void rio_init_em(struct rio_dev *rdev)
1133{
1134 if (rio_is_switch(rdev) && (rdev->em_efptr) &&
1135 (rdev->rswitch->em_init)) {
1136 rdev->rswitch->em_init(rdev);
1137 }
1138}
1139
1140/**
1141 * rio_pw_enable - Enables/disables port-write handling by a master port
1142 * @port: Master port associated with port-write handling
1143 * @enable: 1=enable, 0=disable
1144 */
1145static void rio_pw_enable(struct rio_mport *port, int enable)
1146{
1147 if (port->ops->pwenable)
1148 port->ops->pwenable(port, enable);
1149}
1150
1151/**
883 * rio_enum_mport- Start enumeration through a master port 1152 * rio_enum_mport- Start enumeration through a master port
884 * @mport: Master port to send transactions 1153 * @mport: Master port to send transactions
885 * 1154 *
@@ -911,6 +1180,10 @@ int __devinit rio_enum_mport(struct rio_mport *mport)
911 rc = -ENOMEM; 1180 rc = -ENOMEM;
912 goto out; 1181 goto out;
913 } 1182 }
1183
1184 /* Enable Input Output Port (transmitter reviever) */
1185 rio_enable_rx_tx_port(mport, 1, 0, 0, 0);
1186
914 if (rio_enum_peer(net, mport, 0) < 0) { 1187 if (rio_enum_peer(net, mport, 0) < 0) {
915 /* A higher priority host won enumeration, bail. */ 1188 /* A higher priority host won enumeration, bail. */
916 printk(KERN_INFO 1189 printk(KERN_INFO
@@ -922,6 +1195,7 @@ int __devinit rio_enum_mport(struct rio_mport *mport)
922 } 1195 }
923 rio_update_route_tables(mport); 1196 rio_update_route_tables(mport);
924 rio_clear_locks(mport); 1197 rio_clear_locks(mport);
1198 rio_pw_enable(mport, 1);
925 } else { 1199 } else {
926 printk(KERN_INFO "RIO: master port %d link inactive\n", 1200 printk(KERN_INFO "RIO: master port %d link inactive\n",
927 mport->id); 1201 mport->id);
@@ -945,15 +1219,22 @@ static void rio_build_route_tables(void)
945 u8 sport; 1219 u8 sport;
946 1220
947 list_for_each_entry(rdev, &rio_devices, global_list) 1221 list_for_each_entry(rdev, &rio_devices, global_list)
948 if (rio_is_switch(rdev)) 1222 if (rio_is_switch(rdev)) {
949 for (i = 0; 1223 rio_lock_device(rdev->net->hport, rdev->rswitch->destid,
950 i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size); 1224 rdev->rswitch->hopcount, 1000);
951 i++) { 1225 for (i = 0;
952 if (rio_route_get_entry 1226 i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size);
953 (rdev->net->hport, rdev->rswitch, RIO_GLOBAL_TABLE, 1227 i++) {
954 i, &sport) < 0) 1228 if (rio_route_get_entry
955 continue; 1229 (rdev->net->hport, rdev->rswitch,
956 rdev->rswitch->route_table[i] = sport; 1230 RIO_GLOBAL_TABLE, i, &sport, 0) < 0)
1231 continue;
1232 rdev->rswitch->route_table[i] = sport;
1233 }
1234
1235 rio_unlock_device(rdev->net->hport,
1236 rdev->rswitch->destid,
1237 rdev->rswitch->hopcount);
957 } 1238 }
958} 1239}
959 1240
@@ -1012,6 +1293,13 @@ int __devinit rio_disc_mport(struct rio_mport *mport)
1012 del_timer_sync(&rio_enum_timer); 1293 del_timer_sync(&rio_enum_timer);
1013 1294
1014 pr_debug("done\n"); 1295 pr_debug("done\n");
1296
1297 /* Read DestID assigned by enumerator */
1298 rio_local_read_config_32(mport, RIO_DID_CSR,
1299 &mport->host_deviceid);
1300 mport->host_deviceid = RIO_GET_DID(mport->sys_size,
1301 mport->host_deviceid);
1302
1015 if (rio_disc_peer(net, mport, RIO_ANY_DESTID(mport->sys_size), 1303 if (rio_disc_peer(net, mport, RIO_ANY_DESTID(mport->sys_size),
1016 0) < 0) { 1304 0) < 0) {
1017 printk(KERN_INFO 1305 printk(KERN_INFO
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 6395c780008b..777e099a3d8f 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -5,6 +5,10 @@
5 * Copyright 2005 MontaVista Software, Inc. 5 * Copyright 2005 MontaVista Software, Inc.
6 * Matt Porter <mporter@kernel.crashing.org> 6 * Matt Porter <mporter@kernel.crashing.org>
7 * 7 *
8 * Copyright 2009 Integrated Device Technology, Inc.
9 * Alex Bounine <alexandre.bounine@idt.com>
10 * - Added Port-Write/Error Management initialization and handling
11 *
8 * This program is free software; you can redistribute it and/or modify it 12 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the 13 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your 14 * Free Software Foundation; either version 2 of the License, or (at your
@@ -333,6 +337,328 @@ int rio_release_outb_dbell(struct rio_dev *rdev, struct resource *res)
333} 337}
334 338
335/** 339/**
340 * rio_request_inb_pwrite - request inbound port-write message service
341 * @mport: RIO device to which register inbound port-write callback routine
342 * @pwcback: Callback routine to execute when port-write is received
343 *
344 * Binds a port-write callback function to the RapidIO device.
345 * Returns 0 if the request has been satisfied.
346 */
347int rio_request_inb_pwrite(struct rio_dev *rdev,
348 int (*pwcback)(struct rio_dev *rdev, union rio_pw_msg *msg, int step))
349{
350 int rc = 0;
351
352 spin_lock(&rio_global_list_lock);
353 if (rdev->pwcback != NULL)
354 rc = -ENOMEM;
355 else
356 rdev->pwcback = pwcback;
357
358 spin_unlock(&rio_global_list_lock);
359 return rc;
360}
361EXPORT_SYMBOL_GPL(rio_request_inb_pwrite);
362
363/**
364 * rio_release_inb_pwrite - release inbound port-write message service
365 * @rdev: RIO device which registered for inbound port-write callback
366 *
367 * Removes callback from the rio_dev structure. Returns 0 if the request
368 * has been satisfied.
369 */
370int rio_release_inb_pwrite(struct rio_dev *rdev)
371{
372 int rc = -ENOMEM;
373
374 spin_lock(&rio_global_list_lock);
375 if (rdev->pwcback) {
376 rdev->pwcback = NULL;
377 rc = 0;
378 }
379
380 spin_unlock(&rio_global_list_lock);
381 return rc;
382}
383EXPORT_SYMBOL_GPL(rio_release_inb_pwrite);
384
385/**
386 * rio_mport_get_physefb - Helper function that returns register offset
387 * for Physical Layer Extended Features Block.
388 * @rdev: RIO device
389 */
390u32
391rio_mport_get_physefb(struct rio_mport *port, int local,
392 u16 destid, u8 hopcount)
393{
394 u32 ext_ftr_ptr;
395 u32 ftr_header;
396
397 ext_ftr_ptr = rio_mport_get_efb(port, local, destid, hopcount, 0);
398
399 while (ext_ftr_ptr) {
400 if (local)
401 rio_local_read_config_32(port, ext_ftr_ptr,
402 &ftr_header);
403 else
404 rio_mport_read_config_32(port, destid, hopcount,
405 ext_ftr_ptr, &ftr_header);
406
407 ftr_header = RIO_GET_BLOCK_ID(ftr_header);
408 switch (ftr_header) {
409
410 case RIO_EFB_SER_EP_ID_V13P:
411 case RIO_EFB_SER_EP_REC_ID_V13P:
412 case RIO_EFB_SER_EP_FREE_ID_V13P:
413 case RIO_EFB_SER_EP_ID:
414 case RIO_EFB_SER_EP_REC_ID:
415 case RIO_EFB_SER_EP_FREE_ID:
416 case RIO_EFB_SER_EP_FREC_ID:
417
418 return ext_ftr_ptr;
419
420 default:
421 break;
422 }
423
424 ext_ftr_ptr = rio_mport_get_efb(port, local, destid,
425 hopcount, ext_ftr_ptr);
426 }
427
428 return ext_ftr_ptr;
429}
430
431/**
432 * rio_get_comptag - Begin or continue searching for a RIO device by component tag
433 * @comp_tag: RIO component tad to match
434 * @from: Previous RIO device found in search, or %NULL for new search
435 *
436 * Iterates through the list of known RIO devices. If a RIO device is
437 * found with a matching @comp_tag, a pointer to its device
438 * structure is returned. Otherwise, %NULL is returned. A new search
439 * is initiated by passing %NULL to the @from argument. Otherwise, if
440 * @from is not %NULL, searches continue from next device on the global
441 * list.
442 */
443static struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from)
444{
445 struct list_head *n;
446 struct rio_dev *rdev;
447
448 spin_lock(&rio_global_list_lock);
449 n = from ? from->global_list.next : rio_devices.next;
450
451 while (n && (n != &rio_devices)) {
452 rdev = rio_dev_g(n);
453 if (rdev->comp_tag == comp_tag)
454 goto exit;
455 n = n->next;
456 }
457 rdev = NULL;
458exit:
459 spin_unlock(&rio_global_list_lock);
460 return rdev;
461}
462
463/**
464 * rio_set_port_lockout - Sets/clears LOCKOUT bit (RIO EM 1.3) for a switch port.
465 * @rdev: Pointer to RIO device control structure
466 * @pnum: Switch port number to set LOCKOUT bit
467 * @lock: Operation : set (=1) or clear (=0)
468 */
469int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock)
470{
471 u8 hopcount = 0xff;
472 u16 destid = rdev->destid;
473 u32 regval;
474
475 if (rdev->rswitch) {
476 destid = rdev->rswitch->destid;
477 hopcount = rdev->rswitch->hopcount;
478 }
479
480 rio_mport_read_config_32(rdev->net->hport, destid, hopcount,
481 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
482 &regval);
483 if (lock)
484 regval |= RIO_PORT_N_CTL_LOCKOUT;
485 else
486 regval &= ~RIO_PORT_N_CTL_LOCKOUT;
487
488 rio_mport_write_config_32(rdev->net->hport, destid, hopcount,
489 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
490 regval);
491 return 0;
492}
493
494/**
495 * rio_inb_pwrite_handler - process inbound port-write message
496 * @pw_msg: pointer to inbound port-write message
497 *
498 * Processes an inbound port-write message. Returns 0 if the request
499 * has been satisfied.
500 */
501int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
502{
503 struct rio_dev *rdev;
504 struct rio_mport *mport;
505 u8 hopcount;
506 u16 destid;
507 u32 err_status;
508 int rc, portnum;
509
510 rdev = rio_get_comptag(pw_msg->em.comptag, NULL);
511 if (rdev == NULL) {
512 /* Someting bad here (probably enumeration error) */
513 pr_err("RIO: %s No matching device for CTag 0x%08x\n",
514 __func__, pw_msg->em.comptag);
515 return -EIO;
516 }
517
518 pr_debug("RIO: Port-Write message from %s\n", rio_name(rdev));
519
520#ifdef DEBUG_PW
521 {
522 u32 i;
523 for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32);) {
524 pr_debug("0x%02x: %08x %08x %08x %08x",
525 i*4, pw_msg->raw[i], pw_msg->raw[i + 1],
526 pw_msg->raw[i + 2], pw_msg->raw[i + 3]);
527 i += 4;
528 }
529 pr_debug("\n");
530 }
531#endif
532
533 /* Call an external service function (if such is registered
534 * for this device). This may be the service for endpoints that send
535 * device-specific port-write messages. End-point messages expected
536 * to be handled completely by EP specific device driver.
537 * For switches rc==0 signals that no standard processing required.
538 */
539 if (rdev->pwcback != NULL) {
540 rc = rdev->pwcback(rdev, pw_msg, 0);
541 if (rc == 0)
542 return 0;
543 }
544
545 /* For End-point devices processing stops here */
546 if (!(rdev->pef & RIO_PEF_SWITCH))
547 return 0;
548
549 if (rdev->phys_efptr == 0) {
550 pr_err("RIO_PW: Bad switch initialization for %s\n",
551 rio_name(rdev));
552 return 0;
553 }
554
555 mport = rdev->net->hport;
556 destid = rdev->rswitch->destid;
557 hopcount = rdev->rswitch->hopcount;
558
559 /*
560 * Process the port-write notification from switch
561 */
562
563 portnum = pw_msg->em.is_port & 0xFF;
564
565 if (rdev->rswitch->em_handle)
566 rdev->rswitch->em_handle(rdev, portnum);
567
568 rio_mport_read_config_32(mport, destid, hopcount,
569 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
570 &err_status);
571 pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status);
572
573 if (pw_msg->em.errdetect) {
574 pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n",
575 portnum, pw_msg->em.errdetect);
576 /* Clear EM Port N Error Detect CSR */
577 rio_mport_write_config_32(mport, destid, hopcount,
578 rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0);
579 }
580
581 if (pw_msg->em.ltlerrdet) {
582 pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n",
583 pw_msg->em.ltlerrdet);
584 /* Clear EM L/T Layer Error Detect CSR */
585 rio_mport_write_config_32(mport, destid, hopcount,
586 rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0);
587 }
588
589 /* Clear Port Errors */
590 rio_mport_write_config_32(mport, destid, hopcount,
591 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
592 err_status & RIO_PORT_N_ERR_STS_CLR_MASK);
593
594 if (rdev->rswitch->port_ok & (1 << portnum)) {
595 if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) {
596 rdev->rswitch->port_ok &= ~(1 << portnum);
597 rio_set_port_lockout(rdev, portnum, 1);
598
599 rio_mport_write_config_32(mport, destid, hopcount,
600 rdev->phys_efptr +
601 RIO_PORT_N_ACK_STS_CSR(portnum),
602 RIO_PORT_N_ACK_CLEAR);
603
604 /* Schedule Extraction Service */
605 pr_debug("RIO_PW: Device Extraction on [%s]-P%d\n",
606 rio_name(rdev), portnum);
607 }
608 } else {
609 if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) {
610 rdev->rswitch->port_ok |= (1 << portnum);
611 rio_set_port_lockout(rdev, portnum, 0);
612
613 /* Schedule Insertion Service */
614 pr_debug("RIO_PW: Device Insertion on [%s]-P%d\n",
615 rio_name(rdev), portnum);
616 }
617 }
618
619 /* Clear Port-Write Pending bit */
620 rio_mport_write_config_32(mport, destid, hopcount,
621 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
622 RIO_PORT_N_ERR_STS_PW_PEND);
623
624 return 0;
625}
626EXPORT_SYMBOL_GPL(rio_inb_pwrite_handler);
627
628/**
629 * rio_mport_get_efb - get pointer to next extended features block
630 * @port: Master port to issue transaction
631 * @local: Indicate a local master port or remote device access
632 * @destid: Destination ID of the device
633 * @hopcount: Number of switch hops to the device
634 * @from: Offset of current Extended Feature block header (if 0 starts
635 * from ExtFeaturePtr)
636 */
637u32
638rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
639 u8 hopcount, u32 from)
640{
641 u32 reg_val;
642
643 if (from == 0) {
644 if (local)
645 rio_local_read_config_32(port, RIO_ASM_INFO_CAR,
646 &reg_val);
647 else
648 rio_mport_read_config_32(port, destid, hopcount,
649 RIO_ASM_INFO_CAR, &reg_val);
650 return reg_val & RIO_EXT_FTR_PTR_MASK;
651 } else {
652 if (local)
653 rio_local_read_config_32(port, from, &reg_val);
654 else
655 rio_mport_read_config_32(port, destid, hopcount,
656 from, &reg_val);
657 return RIO_GET_BLOCK_ID(reg_val);
658 }
659}
660
661/**
336 * rio_mport_get_feature - query for devices' extended features 662 * rio_mport_get_feature - query for devices' extended features
337 * @port: Master port to issue transaction 663 * @port: Master port to issue transaction
338 * @local: Indicate a local master port or remote device access 664 * @local: Indicate a local master port or remote device access
@@ -451,6 +777,111 @@ struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from)
451 return rio_get_asm(vid, did, RIO_ANY_ID, RIO_ANY_ID, from); 777 return rio_get_asm(vid, did, RIO_ANY_ID, RIO_ANY_ID, from);
452} 778}
453 779
780/**
781 * rio_std_route_add_entry - Add switch route table entry using standard
782 * registers defined in RIO specification rev.1.3
783 * @mport: Master port to issue transaction
784 * @destid: Destination ID of the device
785 * @hopcount: Number of switch hops to the device
786 * @table: routing table ID (global or port-specific)
787 * @route_destid: destID entry in the RT
788 * @route_port: destination port for specified destID
789 */
790int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
791 u16 table, u16 route_destid, u8 route_port)
792{
793 if (table == RIO_GLOBAL_TABLE) {
794 rio_mport_write_config_32(mport, destid, hopcount,
795 RIO_STD_RTE_CONF_DESTID_SEL_CSR,
796 (u32)route_destid);
797 rio_mport_write_config_32(mport, destid, hopcount,
798 RIO_STD_RTE_CONF_PORT_SEL_CSR,
799 (u32)route_port);
800 }
801
802 udelay(10);
803 return 0;
804}
805
806/**
807 * rio_std_route_get_entry - Read switch route table entry (port number)
808 * assosiated with specified destID using standard registers defined in RIO
809 * specification rev.1.3
810 * @mport: Master port to issue transaction
811 * @destid: Destination ID of the device
812 * @hopcount: Number of switch hops to the device
813 * @table: routing table ID (global or port-specific)
814 * @route_destid: destID entry in the RT
815 * @route_port: returned destination port for specified destID
816 */
817int rio_std_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
818 u16 table, u16 route_destid, u8 *route_port)
819{
820 u32 result;
821
822 if (table == RIO_GLOBAL_TABLE) {
823 rio_mport_write_config_32(mport, destid, hopcount,
824 RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid);
825 rio_mport_read_config_32(mport, destid, hopcount,
826 RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
827
828 *route_port = (u8)result;
829 }
830
831 return 0;
832}
833
834/**
835 * rio_std_route_clr_table - Clear swotch route table using standard registers
836 * defined in RIO specification rev.1.3.
837 * @mport: Master port to issue transaction
838 * @local: Indicate a local master port or remote device access
839 * @destid: Destination ID of the device
840 * @hopcount: Number of switch hops to the device
841 * @table: routing table ID (global or port-specific)
842 */
843int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
844 u16 table)
845{
846 u32 max_destid = 0xff;
847 u32 i, pef, id_inc = 1, ext_cfg = 0;
848 u32 port_sel = RIO_INVALID_ROUTE;
849
850 if (table == RIO_GLOBAL_TABLE) {
851 rio_mport_read_config_32(mport, destid, hopcount,
852 RIO_PEF_CAR, &pef);
853
854 if (mport->sys_size) {
855 rio_mport_read_config_32(mport, destid, hopcount,
856 RIO_SWITCH_RT_LIMIT,
857 &max_destid);
858 max_destid &= RIO_RT_MAX_DESTID;
859 }
860
861 if (pef & RIO_PEF_EXT_RT) {
862 ext_cfg = 0x80000000;
863 id_inc = 4;
864 port_sel = (RIO_INVALID_ROUTE << 24) |
865 (RIO_INVALID_ROUTE << 16) |
866 (RIO_INVALID_ROUTE << 8) |
867 RIO_INVALID_ROUTE;
868 }
869
870 for (i = 0; i <= max_destid;) {
871 rio_mport_write_config_32(mport, destid, hopcount,
872 RIO_STD_RTE_CONF_DESTID_SEL_CSR,
873 ext_cfg | i);
874 rio_mport_write_config_32(mport, destid, hopcount,
875 RIO_STD_RTE_CONF_PORT_SEL_CSR,
876 port_sel);
877 i += id_inc;
878 }
879 }
880
881 udelay(10);
882 return 0;
883}
884
454static void rio_fixup_device(struct rio_dev *dev) 885static void rio_fixup_device(struct rio_dev *dev)
455{ 886{
456} 887}
diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h
index 7786d02581f2..f27b7a9c47d2 100644
--- a/drivers/rapidio/rio.h
+++ b/drivers/rapidio/rio.h
@@ -18,38 +18,50 @@
18 18
19extern u32 rio_mport_get_feature(struct rio_mport *mport, int local, u16 destid, 19extern u32 rio_mport_get_feature(struct rio_mport *mport, int local, u16 destid,
20 u8 hopcount, int ftr); 20 u8 hopcount, int ftr);
21extern u32 rio_mport_get_physefb(struct rio_mport *port, int local,
22 u16 destid, u8 hopcount);
23extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
24 u8 hopcount, u32 from);
21extern int rio_create_sysfs_dev_files(struct rio_dev *rdev); 25extern int rio_create_sysfs_dev_files(struct rio_dev *rdev);
22extern int rio_enum_mport(struct rio_mport *mport); 26extern int rio_enum_mport(struct rio_mport *mport);
23extern int rio_disc_mport(struct rio_mport *mport); 27extern int rio_disc_mport(struct rio_mport *mport);
28extern int rio_std_route_add_entry(struct rio_mport *mport, u16 destid,
29 u8 hopcount, u16 table, u16 route_destid,
30 u8 route_port);
31extern int rio_std_route_get_entry(struct rio_mport *mport, u16 destid,
32 u8 hopcount, u16 table, u16 route_destid,
33 u8 *route_port);
34extern int rio_std_route_clr_table(struct rio_mport *mport, u16 destid,
35 u8 hopcount, u16 table);
36extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock);
24 37
25/* Structures internal to the RIO core code */ 38/* Structures internal to the RIO core code */
26extern struct device_attribute rio_dev_attrs[]; 39extern struct device_attribute rio_dev_attrs[];
27extern spinlock_t rio_global_list_lock; 40extern spinlock_t rio_global_list_lock;
28 41
29extern struct rio_route_ops __start_rio_route_ops[]; 42extern struct rio_switch_ops __start_rio_switch_ops[];
30extern struct rio_route_ops __end_rio_route_ops[]; 43extern struct rio_switch_ops __end_rio_switch_ops[];
31 44
32/* Helpers internal to the RIO core code */ 45/* Helpers internal to the RIO core code */
33#define DECLARE_RIO_ROUTE_SECTION(section, vid, did, add_hook, get_hook) \ 46#define DECLARE_RIO_SWITCH_SECTION(section, name, vid, did, init_hook) \
34 static struct rio_route_ops __rio_route_ops __used \ 47 static const struct rio_switch_ops __rio_switch_##name __used \
35 __section(section)= { vid, did, add_hook, get_hook }; 48 __section(section) = { vid, did, init_hook };
36 49
37/** 50/**
38 * DECLARE_RIO_ROUTE_OPS - Registers switch routing operations 51 * DECLARE_RIO_SWITCH_INIT - Registers switch initialization routine
39 * @vid: RIO vendor ID 52 * @vid: RIO vendor ID
40 * @did: RIO device ID 53 * @did: RIO device ID
41 * @add_hook: Callback that adds a route entry 54 * @init_hook: Callback that performs switch-specific initialization
42 * @get_hook: Callback that gets a route entry
43 * 55 *
44 * Manipulating switch route tables in RIO is switch specific. This 56 * Manipulating switch route tables and error management in RIO
45 * registers a switch by vendor and device ID with two callbacks for 57 * is switch specific. This registers a switch by vendor and device ID with
46 * modifying and retrieving route entries in a switch. A &struct 58 * initialization callback for setting up switch operations and (if required)
47 * rio_route_ops is initialized with the ops and placed into a 59 * hardware initialization. A &struct rio_switch_ops is initialized with
48 * RIO-specific kernel section. 60 * pointer to the init routine and placed into a RIO-specific kernel section.
49 */ 61 */
50#define DECLARE_RIO_ROUTE_OPS(vid, did, add_hook, get_hook) \ 62#define DECLARE_RIO_SWITCH_INIT(vid, did, init_hook) \
51 DECLARE_RIO_ROUTE_SECTION(.rio_route_ops, \ 63 DECLARE_RIO_SWITCH_SECTION(.rio_switch_ops, vid##did, \
52 vid, did, add_hook, get_hook) 64 vid, did, init_hook)
53 65
54#define RIO_GET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x00ff0000) >> 16)) 66#define RIO_GET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x00ff0000) >> 16))
55#define RIO_SET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x000000ff) << 16)) 67#define RIO_SET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x000000ff) << 16))
diff --git a/drivers/rapidio/switches/Kconfig b/drivers/rapidio/switches/Kconfig
new file mode 100644
index 000000000000..2b4e9b2b6631
--- /dev/null
+++ b/drivers/rapidio/switches/Kconfig
@@ -0,0 +1,28 @@
1#
2# RapidIO switches configuration
3#
4config RAPIDIO_TSI57X
5 bool "IDT Tsi57x SRIO switches support"
6 depends on RAPIDIO
7 ---help---
8 Includes support for IDT Tsi57x family of serial RapidIO switches.
9
10config RAPIDIO_CPS_XX
11 bool "IDT CPS-xx SRIO switches support"
12 depends on RAPIDIO
13 ---help---
14 Includes support for IDT CPS-16/12/10/8 serial RapidIO switches.
15
16config RAPIDIO_TSI568
17 bool "Tsi568 SRIO switch support"
18 depends on RAPIDIO
19 default n
20 ---help---
21 Includes support for IDT Tsi568 serial RapidIO switch.
22
23config RAPIDIO_TSI500
24 bool "Tsi500 Parallel RapidIO switch support"
25 depends on RAPIDIO
26 default n
27 ---help---
28 Includes support for IDT Tsi500 parallel RapidIO switch.
diff --git a/drivers/rapidio/switches/Makefile b/drivers/rapidio/switches/Makefile
index b924f8301761..fe4adc3e8d5f 100644
--- a/drivers/rapidio/switches/Makefile
+++ b/drivers/rapidio/switches/Makefile
@@ -2,4 +2,11 @@
2# Makefile for RIO switches 2# Makefile for RIO switches
3# 3#
4 4
5obj-$(CONFIG_RAPIDIO) += tsi500.o 5obj-$(CONFIG_RAPIDIO_TSI57X) += tsi57x.o
6obj-$(CONFIG_RAPIDIO_CPS_XX) += idtcps.o
7obj-$(CONFIG_RAPIDIO_TSI568) += tsi568.o
8obj-$(CONFIG_RAPIDIO_TSI500) += tsi500.o
9
10ifeq ($(CONFIG_RAPIDIO_DEBUG),y)
11EXTRA_CFLAGS += -DDEBUG
12endif
diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c
new file mode 100644
index 000000000000..2c790c144f89
--- /dev/null
+++ b/drivers/rapidio/switches/idtcps.c
@@ -0,0 +1,137 @@
1/*
2 * IDT CPS RapidIO switches support
3 *
4 * Copyright 2009-2010 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13#include <linux/rio.h>
14#include <linux/rio_drv.h>
15#include <linux/rio_ids.h>
16#include "../rio.h"
17
18#define CPS_DEFAULT_ROUTE 0xde
19#define CPS_NO_ROUTE 0xdf
20
21#define IDTCPS_RIO_DOMAIN 0xf20020
22
23static int
24idtcps_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
25 u16 table, u16 route_destid, u8 route_port)
26{
27 u32 result;
28
29 if (table == RIO_GLOBAL_TABLE) {
30 rio_mport_write_config_32(mport, destid, hopcount,
31 RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid);
32
33 rio_mport_read_config_32(mport, destid, hopcount,
34 RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
35
36 result = (0xffffff00 & result) | (u32)route_port;
37 rio_mport_write_config_32(mport, destid, hopcount,
38 RIO_STD_RTE_CONF_PORT_SEL_CSR, result);
39 }
40
41 return 0;
42}
43
44static int
45idtcps_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
46 u16 table, u16 route_destid, u8 *route_port)
47{
48 u32 result;
49
50 if (table == RIO_GLOBAL_TABLE) {
51 rio_mport_write_config_32(mport, destid, hopcount,
52 RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid);
53
54 rio_mport_read_config_32(mport, destid, hopcount,
55 RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
56
57 if (CPS_DEFAULT_ROUTE == (u8)result ||
58 CPS_NO_ROUTE == (u8)result)
59 *route_port = RIO_INVALID_ROUTE;
60 else
61 *route_port = (u8)result;
62 }
63
64 return 0;
65}
66
67static int
68idtcps_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
69 u16 table)
70{
71 u32 i;
72
73 if (table == RIO_GLOBAL_TABLE) {
74 for (i = 0x80000000; i <= 0x800000ff;) {
75 rio_mport_write_config_32(mport, destid, hopcount,
76 RIO_STD_RTE_CONF_DESTID_SEL_CSR, i);
77 rio_mport_write_config_32(mport, destid, hopcount,
78 RIO_STD_RTE_CONF_PORT_SEL_CSR,
79 (CPS_DEFAULT_ROUTE << 24) |
80 (CPS_DEFAULT_ROUTE << 16) |
81 (CPS_DEFAULT_ROUTE << 8) | CPS_DEFAULT_ROUTE);
82 i += 4;
83 }
84 }
85
86 return 0;
87}
88
89static int
90idtcps_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
91 u8 sw_domain)
92{
93 /*
94 * Switch domain configuration operates only at global level
95 */
96 rio_mport_write_config_32(mport, destid, hopcount,
97 IDTCPS_RIO_DOMAIN, (u32)sw_domain);
98 return 0;
99}
100
101static int
102idtcps_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
103 u8 *sw_domain)
104{
105 u32 regval;
106
107 /*
108 * Switch domain configuration operates only at global level
109 */
110 rio_mport_read_config_32(mport, destid, hopcount,
111 IDTCPS_RIO_DOMAIN, &regval);
112
113 *sw_domain = (u8)(regval & 0xff);
114
115 return 0;
116}
117
118static int idtcps_switch_init(struct rio_dev *rdev, int do_enum)
119{
120 pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
121 rdev->rswitch->add_entry = idtcps_route_add_entry;
122 rdev->rswitch->get_entry = idtcps_route_get_entry;
123 rdev->rswitch->clr_table = idtcps_route_clr_table;
124 rdev->rswitch->set_domain = idtcps_set_domain;
125 rdev->rswitch->get_domain = idtcps_get_domain;
126 rdev->rswitch->em_init = NULL;
127 rdev->rswitch->em_handle = NULL;
128
129 return 0;
130}
131
132DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS6Q, idtcps_switch_init);
133DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS8, idtcps_switch_init);
134DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS10Q, idtcps_switch_init);
135DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS12, idtcps_switch_init);
136DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS16, idtcps_switch_init);
137DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDT70K200, idtcps_switch_init);
diff --git a/drivers/rapidio/switches/tsi500.c b/drivers/rapidio/switches/tsi500.c
index c77c23bd9840..914eddd5aa42 100644
--- a/drivers/rapidio/switches/tsi500.c
+++ b/drivers/rapidio/switches/tsi500.c
@@ -1,6 +1,10 @@
1/* 1/*
2 * RapidIO Tsi500 switch support 2 * RapidIO Tsi500 switch support
3 * 3 *
4 * Copyright 2009-2010 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
6 * - Modified switch operations initialization.
7 *
4 * Copyright 2005 MontaVista Software, Inc. 8 * Copyright 2005 MontaVista Software, Inc.
5 * Matt Porter <mporter@kernel.crashing.org> 9 * Matt Porter <mporter@kernel.crashing.org>
6 * 10 *
@@ -57,4 +61,18 @@ tsi500_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 tab
57 return ret; 61 return ret;
58} 62}
59 63
60DECLARE_RIO_ROUTE_OPS(RIO_VID_TUNDRA, RIO_DID_TSI500, tsi500_route_add_entry, tsi500_route_get_entry); 64static int tsi500_switch_init(struct rio_dev *rdev, int do_enum)
65{
66 pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
67 rdev->rswitch->add_entry = tsi500_route_add_entry;
68 rdev->rswitch->get_entry = tsi500_route_get_entry;
69 rdev->rswitch->clr_table = NULL;
70 rdev->rswitch->set_domain = NULL;
71 rdev->rswitch->get_domain = NULL;
72 rdev->rswitch->em_init = NULL;
73 rdev->rswitch->em_handle = NULL;
74
75 return 0;
76}
77
78DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI500, tsi500_switch_init);
diff --git a/drivers/rapidio/switches/tsi568.c b/drivers/rapidio/switches/tsi568.c
new file mode 100644
index 000000000000..f7fd7898606e
--- /dev/null
+++ b/drivers/rapidio/switches/tsi568.c
@@ -0,0 +1,146 @@
1/*
2 * RapidIO Tsi568 switch support
3 *
4 * Copyright 2009-2010 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
6 * - Added EM support
7 * - Modified switch operations initialization.
8 *
9 * Copyright 2005 MontaVista Software, Inc.
10 * Matt Porter <mporter@kernel.crashing.org>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/rio.h>
19#include <linux/rio_drv.h>
20#include <linux/rio_ids.h>
21#include <linux/delay.h>
22#include "../rio.h"
23
24/* Global (broadcast) route registers */
25#define SPBC_ROUTE_CFG_DESTID 0x10070
26#define SPBC_ROUTE_CFG_PORT 0x10074
27
28/* Per port route registers */
29#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n)
30#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n)
31
32#define TSI568_SP_MODE_BC 0x10004
33#define TSI568_SP_MODE_PW_DIS 0x08000000
34
35static int
36tsi568_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
37 u16 table, u16 route_destid, u8 route_port)
38{
39 if (table == RIO_GLOBAL_TABLE) {
40 rio_mport_write_config_32(mport, destid, hopcount,
41 SPBC_ROUTE_CFG_DESTID, route_destid);
42 rio_mport_write_config_32(mport, destid, hopcount,
43 SPBC_ROUTE_CFG_PORT, route_port);
44 } else {
45 rio_mport_write_config_32(mport, destid, hopcount,
46 SPP_ROUTE_CFG_DESTID(table),
47 route_destid);
48 rio_mport_write_config_32(mport, destid, hopcount,
49 SPP_ROUTE_CFG_PORT(table), route_port);
50 }
51
52 udelay(10);
53
54 return 0;
55}
56
57static int
58tsi568_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
59 u16 table, u16 route_destid, u8 *route_port)
60{
61 int ret = 0;
62 u32 result;
63
64 if (table == RIO_GLOBAL_TABLE) {
65 rio_mport_write_config_32(mport, destid, hopcount,
66 SPBC_ROUTE_CFG_DESTID, route_destid);
67 rio_mport_read_config_32(mport, destid, hopcount,
68 SPBC_ROUTE_CFG_PORT, &result);
69 } else {
70 rio_mport_write_config_32(mport, destid, hopcount,
71 SPP_ROUTE_CFG_DESTID(table),
72 route_destid);
73 rio_mport_read_config_32(mport, destid, hopcount,
74 SPP_ROUTE_CFG_PORT(table), &result);
75 }
76
77 *route_port = result;
78 if (*route_port > 15)
79 ret = -1;
80
81 return ret;
82}
83
84static int
85tsi568_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
86 u16 table)
87{
88 u32 route_idx;
89 u32 lut_size;
90
91 lut_size = (mport->sys_size) ? 0x1ff : 0xff;
92
93 if (table == RIO_GLOBAL_TABLE) {
94 rio_mport_write_config_32(mport, destid, hopcount,
95 SPBC_ROUTE_CFG_DESTID, 0x80000000);
96 for (route_idx = 0; route_idx <= lut_size; route_idx++)
97 rio_mport_write_config_32(mport, destid, hopcount,
98 SPBC_ROUTE_CFG_PORT,
99 RIO_INVALID_ROUTE);
100 } else {
101 rio_mport_write_config_32(mport, destid, hopcount,
102 SPP_ROUTE_CFG_DESTID(table),
103 0x80000000);
104 for (route_idx = 0; route_idx <= lut_size; route_idx++)
105 rio_mport_write_config_32(mport, destid, hopcount,
106 SPP_ROUTE_CFG_PORT(table),
107 RIO_INVALID_ROUTE);
108 }
109
110 return 0;
111}
112
113static int
114tsi568_em_init(struct rio_dev *rdev)
115{
116 struct rio_mport *mport = rdev->net->hport;
117 u16 destid = rdev->rswitch->destid;
118 u8 hopcount = rdev->rswitch->hopcount;
119 u32 regval;
120
121 pr_debug("TSI568 %s [%d:%d]\n", __func__, destid, hopcount);
122
123 /* Make sure that Port-Writes are disabled (for all ports) */
124 rio_mport_read_config_32(mport, destid, hopcount,
125 TSI568_SP_MODE_BC, &regval);
126 rio_mport_write_config_32(mport, destid, hopcount,
127 TSI568_SP_MODE_BC, regval | TSI568_SP_MODE_PW_DIS);
128
129 return 0;
130}
131
132static int tsi568_switch_init(struct rio_dev *rdev, int do_enum)
133{
134 pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
135 rdev->rswitch->add_entry = tsi568_route_add_entry;
136 rdev->rswitch->get_entry = tsi568_route_get_entry;
137 rdev->rswitch->clr_table = tsi568_route_clr_table;
138 rdev->rswitch->set_domain = NULL;
139 rdev->rswitch->get_domain = NULL;
140 rdev->rswitch->em_init = tsi568_em_init;
141 rdev->rswitch->em_handle = NULL;
142
143 return 0;
144}
145
146DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI568, tsi568_switch_init);
diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c
new file mode 100644
index 000000000000..d34df722d95f
--- /dev/null
+++ b/drivers/rapidio/switches/tsi57x.c
@@ -0,0 +1,315 @@
1/*
2 * RapidIO Tsi57x switch family support
3 *
4 * Copyright 2009-2010 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
6 * - Added EM support
7 * - Modified switch operations initialization.
8 *
9 * Copyright 2005 MontaVista Software, Inc.
10 * Matt Porter <mporter@kernel.crashing.org>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/rio.h>
19#include <linux/rio_drv.h>
20#include <linux/rio_ids.h>
21#include <linux/delay.h>
22#include "../rio.h"
23
24/* Global (broadcast) route registers */
25#define SPBC_ROUTE_CFG_DESTID 0x10070
26#define SPBC_ROUTE_CFG_PORT 0x10074
27
28/* Per port route registers */
29#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n)
30#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n)
31
32#define TSI578_SP_MODE(n) (0x11004 + n*0x100)
33#define TSI578_SP_MODE_GLBL 0x10004
34#define TSI578_SP_MODE_PW_DIS 0x08000000
35#define TSI578_SP_MODE_LUT_512 0x01000000
36
37#define TSI578_SP_CTL_INDEP(n) (0x13004 + n*0x100)
38#define TSI578_SP_LUT_PEINF(n) (0x13010 + n*0x100)
39#define TSI578_SP_CS_TX(n) (0x13014 + n*0x100)
40#define TSI578_SP_INT_STATUS(n) (0x13018 + n*0x100)
41
42#define TSI578_GLBL_ROUTE_BASE 0x10078
43
44static int
45tsi57x_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
46 u16 table, u16 route_destid, u8 route_port)
47{
48 if (table == RIO_GLOBAL_TABLE) {
49 rio_mport_write_config_32(mport, destid, hopcount,
50 SPBC_ROUTE_CFG_DESTID, route_destid);
51 rio_mport_write_config_32(mport, destid, hopcount,
52 SPBC_ROUTE_CFG_PORT, route_port);
53 } else {
54 rio_mport_write_config_32(mport, destid, hopcount,
55 SPP_ROUTE_CFG_DESTID(table), route_destid);
56 rio_mport_write_config_32(mport, destid, hopcount,
57 SPP_ROUTE_CFG_PORT(table), route_port);
58 }
59
60 udelay(10);
61
62 return 0;
63}
64
65static int
66tsi57x_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
67 u16 table, u16 route_destid, u8 *route_port)
68{
69 int ret = 0;
70 u32 result;
71
72 if (table == RIO_GLOBAL_TABLE) {
73 /* Use local RT of the ingress port to avoid possible
74 race condition */
75 rio_mport_read_config_32(mport, destid, hopcount,
76 RIO_SWP_INFO_CAR, &result);
77 table = (result & RIO_SWP_INFO_PORT_NUM_MASK);
78 }
79
80 rio_mport_write_config_32(mport, destid, hopcount,
81 SPP_ROUTE_CFG_DESTID(table), route_destid);
82 rio_mport_read_config_32(mport, destid, hopcount,
83 SPP_ROUTE_CFG_PORT(table), &result);
84
85 *route_port = (u8)result;
86 if (*route_port > 15)
87 ret = -1;
88
89 return ret;
90}
91
92static int
93tsi57x_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
94 u16 table)
95{
96 u32 route_idx;
97 u32 lut_size;
98
99 lut_size = (mport->sys_size) ? 0x1ff : 0xff;
100
101 if (table == RIO_GLOBAL_TABLE) {
102 rio_mport_write_config_32(mport, destid, hopcount,
103 SPBC_ROUTE_CFG_DESTID, 0x80000000);
104 for (route_idx = 0; route_idx <= lut_size; route_idx++)
105 rio_mport_write_config_32(mport, destid, hopcount,
106 SPBC_ROUTE_CFG_PORT,
107 RIO_INVALID_ROUTE);
108 } else {
109 rio_mport_write_config_32(mport, destid, hopcount,
110 SPP_ROUTE_CFG_DESTID(table), 0x80000000);
111 for (route_idx = 0; route_idx <= lut_size; route_idx++)
112 rio_mport_write_config_32(mport, destid, hopcount,
113 SPP_ROUTE_CFG_PORT(table) , RIO_INVALID_ROUTE);
114 }
115
116 return 0;
117}
118
119static int
120tsi57x_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
121 u8 sw_domain)
122{
123 u32 regval;
124
125 /*
126 * Switch domain configuration operates only at global level
127 */
128
129 /* Turn off flat (LUT_512) mode */
130 rio_mport_read_config_32(mport, destid, hopcount,
131 TSI578_SP_MODE_GLBL, &regval);
132 rio_mport_write_config_32(mport, destid, hopcount, TSI578_SP_MODE_GLBL,
133 regval & ~TSI578_SP_MODE_LUT_512);
134 /* Set switch domain base */
135 rio_mport_write_config_32(mport, destid, hopcount,
136 TSI578_GLBL_ROUTE_BASE,
137 (u32)(sw_domain << 24));
138 return 0;
139}
140
141static int
142tsi57x_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
143 u8 *sw_domain)
144{
145 u32 regval;
146
147 /*
148 * Switch domain configuration operates only at global level
149 */
150 rio_mport_read_config_32(mport, destid, hopcount,
151 TSI578_GLBL_ROUTE_BASE, &regval);
152
153 *sw_domain = (u8)(regval >> 24);
154
155 return 0;
156}
157
158static int
159tsi57x_em_init(struct rio_dev *rdev)
160{
161 struct rio_mport *mport = rdev->net->hport;
162 u16 destid = rdev->rswitch->destid;
163 u8 hopcount = rdev->rswitch->hopcount;
164 u32 regval;
165 int portnum;
166
167 pr_debug("TSI578 %s [%d:%d]\n", __func__, destid, hopcount);
168
169 for (portnum = 0; portnum < 16; portnum++) {
170 /* Make sure that Port-Writes are enabled (for all ports) */
171 rio_mport_read_config_32(mport, destid, hopcount,
172 TSI578_SP_MODE(portnum), &regval);
173 rio_mport_write_config_32(mport, destid, hopcount,
174 TSI578_SP_MODE(portnum),
175 regval & ~TSI578_SP_MODE_PW_DIS);
176
177 /* Clear all pending interrupts */
178 rio_mport_read_config_32(mport, destid, hopcount,
179 rdev->phys_efptr +
180 RIO_PORT_N_ERR_STS_CSR(portnum),
181 &regval);
182 rio_mport_write_config_32(mport, destid, hopcount,
183 rdev->phys_efptr +
184 RIO_PORT_N_ERR_STS_CSR(portnum),
185 regval & 0x07120214);
186
187 rio_mport_read_config_32(mport, destid, hopcount,
188 TSI578_SP_INT_STATUS(portnum), &regval);
189 rio_mport_write_config_32(mport, destid, hopcount,
190 TSI578_SP_INT_STATUS(portnum),
191 regval & 0x000700bd);
192
193 /* Enable all interrupts to allow ports to send a port-write */
194 rio_mport_read_config_32(mport, destid, hopcount,
195 TSI578_SP_CTL_INDEP(portnum), &regval);
196 rio_mport_write_config_32(mport, destid, hopcount,
197 TSI578_SP_CTL_INDEP(portnum),
198 regval | 0x000b0000);
199
200 /* Skip next (odd) port if the current port is in x4 mode */
201 rio_mport_read_config_32(mport, destid, hopcount,
202 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
203 &regval);
204 if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4)
205 portnum++;
206 }
207
208 return 0;
209}
210
211static int
212tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
213{
214 struct rio_mport *mport = rdev->net->hport;
215 u16 destid = rdev->rswitch->destid;
216 u8 hopcount = rdev->rswitch->hopcount;
217 u32 intstat, err_status;
218 int sendcount, checkcount;
219 u8 route_port;
220 u32 regval;
221
222 rio_mport_read_config_32(mport, destid, hopcount,
223 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
224 &err_status);
225
226 if ((err_status & RIO_PORT_N_ERR_STS_PORT_OK) &&
227 (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
228 RIO_PORT_N_ERR_STS_PW_INP_ES))) {
229 /* Remove any queued packets by locking/unlocking port */
230 rio_mport_read_config_32(mport, destid, hopcount,
231 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
232 &regval);
233 if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) {
234 rio_mport_write_config_32(mport, destid, hopcount,
235 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
236 regval | RIO_PORT_N_CTL_LOCKOUT);
237 udelay(50);
238 rio_mport_write_config_32(mport, destid, hopcount,
239 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
240 regval);
241 }
242
243 /* Read from link maintenance response register to clear
244 * valid bit
245 */
246 rio_mport_read_config_32(mport, destid, hopcount,
247 rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(portnum),
248 &regval);
249
250 /* Send a Packet-Not-Accepted/Link-Request-Input-Status control
251 * symbol to recover from IES/OES
252 */
253 sendcount = 3;
254 while (sendcount) {
255 rio_mport_write_config_32(mport, destid, hopcount,
256 TSI578_SP_CS_TX(portnum), 0x40fc8000);
257 checkcount = 3;
258 while (checkcount--) {
259 udelay(50);
260 rio_mport_read_config_32(
261 mport, destid, hopcount,
262 rdev->phys_efptr +
263 RIO_PORT_N_MNT_RSP_CSR(portnum),
264 &regval);
265 if (regval & RIO_PORT_N_MNT_RSP_RVAL)
266 goto exit_es;
267 }
268
269 sendcount--;
270 }
271 }
272
273exit_es:
274 /* Clear implementation specific error status bits */
275 rio_mport_read_config_32(mport, destid, hopcount,
276 TSI578_SP_INT_STATUS(portnum), &intstat);
277 pr_debug("TSI578[%x:%x] SP%d_INT_STATUS=0x%08x\n",
278 destid, hopcount, portnum, intstat);
279
280 if (intstat & 0x10000) {
281 rio_mport_read_config_32(mport, destid, hopcount,
282 TSI578_SP_LUT_PEINF(portnum), &regval);
283 regval = (mport->sys_size) ? (regval >> 16) : (regval >> 24);
284 route_port = rdev->rswitch->route_table[regval];
285 pr_debug("RIO: TSI578[%s] P%d LUT Parity Error (destID=%d)\n",
286 rio_name(rdev), portnum, regval);
287 tsi57x_route_add_entry(mport, destid, hopcount,
288 RIO_GLOBAL_TABLE, regval, route_port);
289 }
290
291 rio_mport_write_config_32(mport, destid, hopcount,
292 TSI578_SP_INT_STATUS(portnum),
293 intstat & 0x000700bd);
294
295 return 0;
296}
297
298static int tsi57x_switch_init(struct rio_dev *rdev, int do_enum)
299{
300 pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
301 rdev->rswitch->add_entry = tsi57x_route_add_entry;
302 rdev->rswitch->get_entry = tsi57x_route_get_entry;
303 rdev->rswitch->clr_table = tsi57x_route_clr_table;
304 rdev->rswitch->set_domain = tsi57x_set_domain;
305 rdev->rswitch->get_domain = tsi57x_get_domain;
306 rdev->rswitch->em_init = tsi57x_em_init;
307 rdev->rswitch->em_handle = tsi57x_em_handler;
308
309 return 0;
310}
311
312DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI572, tsi57x_switch_init);
313DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI574, tsi57x_switch_init);
314DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI577, tsi57x_switch_init);
315DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI578, tsi57x_switch_init);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index f1598324344c..10ba12c8c5e0 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -611,6 +611,13 @@ config RTC_DRV_AB3100
611 Select this to enable the ST-Ericsson AB3100 Mixed Signal IC RTC 611 Select this to enable the ST-Ericsson AB3100 Mixed Signal IC RTC
612 support. This chip contains a battery- and capacitor-backed RTC. 612 support. This chip contains a battery- and capacitor-backed RTC.
613 613
614config RTC_DRV_AB8500
615 tristate "ST-Ericsson AB8500 RTC"
616 depends on AB8500_CORE
617 help
618 Select this to enable the ST-Ericsson AB8500 power management IC RTC
619 support. This chip contains a battery- and capacitor-backed RTC.
620
614config RTC_DRV_NUC900 621config RTC_DRV_NUC900
615 tristate "NUC910/NUC920 RTC driver" 622 tristate "NUC910/NUC920 RTC driver"
616 depends on RTC_CLASS && ARCH_W90X900 623 depends on RTC_CLASS && ARCH_W90X900
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 245311a1348f..5adbba7cf89c 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -18,6 +18,7 @@ rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o
18# Keep the list ordered. 18# Keep the list ordered.
19 19
20obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o 20obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o
21obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o
21obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o 22obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o
22obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o 23obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
23obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o 24obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
new file mode 100644
index 000000000000..2fda03125e55
--- /dev/null
+++ b/drivers/rtc/rtc-ab8500.c
@@ -0,0 +1,363 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * License terms: GNU General Public License (GPL) version 2
5 * Author: Virupax Sadashivpetimath <virupax.sadashivpetimath@stericsson.com>
6 *
7 * RTC clock driver for the RTC part of the AB8500 Power management chip.
8 * Based on RTC clock driver for the AB3100 Analog Baseband Chip by
9 * Linus Walleij <linus.walleij@stericsson.com>
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/platform_device.h>
16#include <linux/rtc.h>
17#include <linux/mfd/ab8500.h>
18#include <linux/delay.h>
19
20#define AB8500_RTC_SOFF_STAT_REG 0x0F00
21#define AB8500_RTC_CC_CONF_REG 0x0F01
22#define AB8500_RTC_READ_REQ_REG 0x0F02
23#define AB8500_RTC_WATCH_TSECMID_REG 0x0F03
24#define AB8500_RTC_WATCH_TSECHI_REG 0x0F04
25#define AB8500_RTC_WATCH_TMIN_LOW_REG 0x0F05
26#define AB8500_RTC_WATCH_TMIN_MID_REG 0x0F06
27#define AB8500_RTC_WATCH_TMIN_HI_REG 0x0F07
28#define AB8500_RTC_ALRM_MIN_LOW_REG 0x0F08
29#define AB8500_RTC_ALRM_MIN_MID_REG 0x0F09
30#define AB8500_RTC_ALRM_MIN_HI_REG 0x0F0A
31#define AB8500_RTC_STAT_REG 0x0F0B
32#define AB8500_RTC_BKUP_CHG_REG 0x0F0C
33#define AB8500_RTC_FORCE_BKUP_REG 0x0F0D
34#define AB8500_RTC_CALIB_REG 0x0F0E
35#define AB8500_RTC_SWITCH_STAT_REG 0x0F0F
36#define AB8500_REV_REG 0x1080
37
38/* RtcReadRequest bits */
39#define RTC_READ_REQUEST 0x01
40#define RTC_WRITE_REQUEST 0x02
41
42/* RtcCtrl bits */
43#define RTC_ALARM_ENA 0x04
44#define RTC_STATUS_DATA 0x01
45
46#define COUNTS_PER_SEC (0xF000 / 60)
47#define AB8500_RTC_EPOCH 2000
48
49static const unsigned long ab8500_rtc_time_regs[] = {
50 AB8500_RTC_WATCH_TMIN_HI_REG, AB8500_RTC_WATCH_TMIN_MID_REG,
51 AB8500_RTC_WATCH_TMIN_LOW_REG, AB8500_RTC_WATCH_TSECHI_REG,
52 AB8500_RTC_WATCH_TSECMID_REG
53};
54
55static const unsigned long ab8500_rtc_alarm_regs[] = {
56 AB8500_RTC_ALRM_MIN_HI_REG, AB8500_RTC_ALRM_MIN_MID_REG,
57 AB8500_RTC_ALRM_MIN_LOW_REG
58};
59
60/* Calculate the seconds from 1970 to 01-01-2000 00:00:00 */
61static unsigned long get_elapsed_seconds(int year)
62{
63 unsigned long secs;
64 struct rtc_time tm = {
65 .tm_year = year - 1900,
66 .tm_mday = 1,
67 };
68
69 /*
70 * This function calculates secs from 1970 and not from
71 * 1900, even if we supply the offset from year 1900.
72 */
73 rtc_tm_to_time(&tm, &secs);
74 return secs;
75}
76
77static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
78{
79 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
80 unsigned long timeout = jiffies + HZ;
81 int retval, i;
82 unsigned long mins, secs;
83 unsigned char buf[ARRAY_SIZE(ab8500_rtc_time_regs)];
84
85 /* Request a data read */
86 retval = ab8500_write(ab8500, AB8500_RTC_READ_REQ_REG,
87 RTC_READ_REQUEST);
88 if (retval < 0)
89 return retval;
90
91 /* Early AB8500 chips will not clear the rtc read request bit */
92 if (ab8500->revision == 0) {
93 msleep(1);
94 } else {
95 /* Wait for some cycles after enabling the rtc read in ab8500 */
96 while (time_before(jiffies, timeout)) {
97 retval = ab8500_read(ab8500, AB8500_RTC_READ_REQ_REG);
98 if (retval < 0)
99 return retval;
100
101 if (!(retval & RTC_READ_REQUEST))
102 break;
103
104 msleep(1);
105 }
106 }
107
108 /* Read the Watchtime registers */
109 for (i = 0; i < ARRAY_SIZE(ab8500_rtc_time_regs); i++) {
110 retval = ab8500_read(ab8500, ab8500_rtc_time_regs[i]);
111 if (retval < 0)
112 return retval;
113 buf[i] = retval;
114 }
115
116 mins = (buf[0] << 16) | (buf[1] << 8) | buf[2];
117
118 secs = (buf[3] << 8) | buf[4];
119 secs = secs / COUNTS_PER_SEC;
120 secs = secs + (mins * 60);
121
122 /* Add back the initially subtracted number of seconds */
123 secs += get_elapsed_seconds(AB8500_RTC_EPOCH);
124
125 rtc_time_to_tm(secs, tm);
126 return rtc_valid_tm(tm);
127}
128
129static int ab8500_rtc_set_time(struct device *dev, struct rtc_time *tm)
130{
131 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
132 int retval, i;
133 unsigned char buf[ARRAY_SIZE(ab8500_rtc_time_regs)];
134 unsigned long no_secs, no_mins, secs = 0;
135
136 if (tm->tm_year < (AB8500_RTC_EPOCH - 1900)) {
137 dev_dbg(dev, "year should be equal to or greater than %d\n",
138 AB8500_RTC_EPOCH);
139 return -EINVAL;
140 }
141
142 /* Get the number of seconds since 1970 */
143 rtc_tm_to_time(tm, &secs);
144
145 /*
146 * Convert it to the number of seconds since 01-01-2000 00:00:00, since
147 * we only have a small counter in the RTC.
148 */
149 secs -= get_elapsed_seconds(AB8500_RTC_EPOCH);
150
151 no_mins = secs / 60;
152
153 no_secs = secs % 60;
154 /* Make the seconds count as per the RTC resolution */
155 no_secs = no_secs * COUNTS_PER_SEC;
156
157 buf[4] = no_secs & 0xFF;
158 buf[3] = (no_secs >> 8) & 0xFF;
159
160 buf[2] = no_mins & 0xFF;
161 buf[1] = (no_mins >> 8) & 0xFF;
162 buf[0] = (no_mins >> 16) & 0xFF;
163
164 for (i = 0; i < ARRAY_SIZE(ab8500_rtc_time_regs); i++) {
165 retval = ab8500_write(ab8500, ab8500_rtc_time_regs[i], buf[i]);
166 if (retval < 0)
167 return retval;
168 }
169
170 /* Request a data write */
171 return ab8500_write(ab8500, AB8500_RTC_READ_REQ_REG, RTC_WRITE_REQUEST);
172}
173
174static int ab8500_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
175{
176 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
177 int retval, i;
178 int rtc_ctrl;
179 unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)];
180 unsigned long secs, mins;
181
182 /* Check if the alarm is enabled or not */
183 rtc_ctrl = ab8500_read(ab8500, AB8500_RTC_STAT_REG);
184 if (rtc_ctrl < 0)
185 return rtc_ctrl;
186
187 if (rtc_ctrl & RTC_ALARM_ENA)
188 alarm->enabled = 1;
189 else
190 alarm->enabled = 0;
191
192 alarm->pending = 0;
193
194 for (i = 0; i < ARRAY_SIZE(ab8500_rtc_alarm_regs); i++) {
195 retval = ab8500_read(ab8500, ab8500_rtc_alarm_regs[i]);
196 if (retval < 0)
197 return retval;
198 buf[i] = retval;
199 }
200
201 mins = (buf[0] << 16) | (buf[1] << 8) | (buf[2]);
202 secs = mins * 60;
203
204 /* Add back the initially subtracted number of seconds */
205 secs += get_elapsed_seconds(AB8500_RTC_EPOCH);
206
207 rtc_time_to_tm(secs, &alarm->time);
208
209 return rtc_valid_tm(&alarm->time);
210}
211
212static int ab8500_rtc_irq_enable(struct device *dev, unsigned int enabled)
213{
214 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
215
216 return ab8500_set_bits(ab8500, AB8500_RTC_STAT_REG, RTC_ALARM_ENA,
217 enabled ? RTC_ALARM_ENA : 0);
218}
219
220static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
221{
222 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
223 int retval, i;
224 unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)];
225 unsigned long mins, secs = 0;
226
227 if (alarm->time.tm_year < (AB8500_RTC_EPOCH - 1900)) {
228 dev_dbg(dev, "year should be equal to or greater than %d\n",
229 AB8500_RTC_EPOCH);
230 return -EINVAL;
231 }
232
233 /* Get the number of seconds since 1970 */
234 rtc_tm_to_time(&alarm->time, &secs);
235
236 /*
237 * Convert it to the number of seconds since 01-01-2000 00:00:00, since
238 * we only have a small counter in the RTC.
239 */
240 secs -= get_elapsed_seconds(AB8500_RTC_EPOCH);
241
242 mins = secs / 60;
243
244 buf[2] = mins & 0xFF;
245 buf[1] = (mins >> 8) & 0xFF;
246 buf[0] = (mins >> 16) & 0xFF;
247
248 /* Set the alarm time */
249 for (i = 0; i < ARRAY_SIZE(ab8500_rtc_alarm_regs); i++) {
250 retval = ab8500_write(ab8500, ab8500_rtc_alarm_regs[i], buf[i]);
251 if (retval < 0)
252 return retval;
253 }
254
255 return ab8500_rtc_irq_enable(dev, alarm->enabled);
256}
257
258static irqreturn_t rtc_alarm_handler(int irq, void *data)
259{
260 struct rtc_device *rtc = data;
261 unsigned long events = RTC_IRQF | RTC_AF;
262
263 dev_dbg(&rtc->dev, "%s\n", __func__);
264 rtc_update_irq(rtc, 1, events);
265
266 return IRQ_HANDLED;
267}
268
269static const struct rtc_class_ops ab8500_rtc_ops = {
270 .read_time = ab8500_rtc_read_time,
271 .set_time = ab8500_rtc_set_time,
272 .read_alarm = ab8500_rtc_read_alarm,
273 .set_alarm = ab8500_rtc_set_alarm,
274 .alarm_irq_enable = ab8500_rtc_irq_enable,
275};
276
277static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
278{
279 struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
280 int err;
281 struct rtc_device *rtc;
282 int rtc_ctrl;
283 int irq;
284
285 irq = platform_get_irq_byname(pdev, "ALARM");
286 if (irq < 0)
287 return irq;
288
289 /* For RTC supply test */
290 err = ab8500_set_bits(ab8500, AB8500_RTC_STAT_REG, RTC_STATUS_DATA,
291 RTC_STATUS_DATA);
292 if (err < 0)
293 return err;
294
295 /* Wait for reset by the PorRtc */
296 msleep(1);
297
298 rtc_ctrl = ab8500_read(ab8500, AB8500_RTC_STAT_REG);
299 if (rtc_ctrl < 0)
300 return rtc_ctrl;
301
302 /* Check if the RTC Supply fails */
303 if (!(rtc_ctrl & RTC_STATUS_DATA)) {
304 dev_err(&pdev->dev, "RTC supply failure\n");
305 return -ENODEV;
306 }
307
308 rtc = rtc_device_register("ab8500-rtc", &pdev->dev, &ab8500_rtc_ops,
309 THIS_MODULE);
310 if (IS_ERR(rtc)) {
311 dev_err(&pdev->dev, "Registration failed\n");
312 err = PTR_ERR(rtc);
313 return err;
314 }
315
316 err = request_threaded_irq(irq, NULL, rtc_alarm_handler, 0,
317 "ab8500-rtc", rtc);
318 if (err < 0) {
319 rtc_device_unregister(rtc);
320 return err;
321 }
322
323 platform_set_drvdata(pdev, rtc);
324
325 return 0;
326}
327
328static int __devexit ab8500_rtc_remove(struct platform_device *pdev)
329{
330 struct rtc_device *rtc = platform_get_drvdata(pdev);
331 int irq = platform_get_irq_byname(pdev, "ALARM");
332
333 free_irq(irq, rtc);
334 rtc_device_unregister(rtc);
335 platform_set_drvdata(pdev, NULL);
336
337 return 0;
338}
339
340static struct platform_driver ab8500_rtc_driver = {
341 .driver = {
342 .name = "ab8500-rtc",
343 .owner = THIS_MODULE,
344 },
345 .probe = ab8500_rtc_probe,
346 .remove = __devexit_p(ab8500_rtc_remove),
347};
348
349static int __init ab8500_rtc_init(void)
350{
351 return platform_driver_register(&ab8500_rtc_driver);
352}
353
354static void __exit ab8500_rtc_exit(void)
355{
356 platform_driver_unregister(&ab8500_rtc_driver);
357}
358
359module_init(ab8500_rtc_init);
360module_exit(ab8500_rtc_exit);
361MODULE_AUTHOR("Virupax Sadashivpetimath <virupax.sadashivpetimath@stericsson.com>");
362MODULE_DESCRIPTION("AB8500 RTC Driver");
363MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 038095d99976..6dc4e6241418 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -595,10 +595,6 @@ static void wdt_disable(void)
595static ssize_t wdt_write(struct file *file, const char __user *buf, 595static ssize_t wdt_write(struct file *file, const char __user *buf,
596 size_t count, loff_t *ppos) 596 size_t count, loff_t *ppos)
597{ 597{
598 /* Can't seek (pwrite) on this device
599 if (ppos != &file->f_pos)
600 return -ESPIPE;
601 */
602 if (count) { 598 if (count) {
603 wdt_ping(); 599 wdt_ping();
604 return 1; 600 return 1;
@@ -707,7 +703,7 @@ static int wdt_open(struct inode *inode, struct file *file)
707 */ 703 */
708 wdt_is_open = 1; 704 wdt_is_open = 1;
709 unlock_kernel(); 705 unlock_kernel();
710 return 0; 706 return nonseekable_open(inode, file);
711 } 707 }
712 return -ENODEV; 708 return -ENODEV;
713} 709}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 0e86247d791e..33975e922d65 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1186,6 +1186,29 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1186 dasd_schedule_device_bh(device); 1186 dasd_schedule_device_bh(device);
1187} 1187}
1188 1188
1189enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
1190{
1191 struct dasd_device *device;
1192
1193 device = dasd_device_from_cdev_locked(cdev);
1194
1195 if (IS_ERR(device))
1196 goto out;
1197 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1198 device->state != device->target ||
1199 !device->discipline->handle_unsolicited_interrupt){
1200 dasd_put_device(device);
1201 goto out;
1202 }
1203
1204 dasd_device_clear_timer(device);
1205 device->discipline->handle_unsolicited_interrupt(device, irb);
1206 dasd_put_device(device);
1207out:
1208 return UC_TODO_RETRY;
1209}
1210EXPORT_SYMBOL_GPL(dasd_generic_uc_handler);
1211
1189/* 1212/*
1190 * If we have an error on a dasd_block layer request then we cancel 1213 * If we have an error on a dasd_block layer request then we cancel
1191 * and return all further requests from the same dasd_block as well. 1214 * and return all further requests from the same dasd_block as well.
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 5b1cd8d6e971..ab84da5592e8 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -3436,6 +3436,7 @@ static struct ccw_driver dasd_eckd_driver = {
3436 .freeze = dasd_generic_pm_freeze, 3436 .freeze = dasd_generic_pm_freeze,
3437 .thaw = dasd_generic_restore_device, 3437 .thaw = dasd_generic_restore_device,
3438 .restore = dasd_generic_restore_device, 3438 .restore = dasd_generic_restore_device,
3439 .uc_handler = dasd_generic_uc_handler,
3439}; 3440};
3440 3441
3441/* 3442/*
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 32fac186ba3f..49b431d135e0 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -617,6 +617,7 @@ int dasd_generic_notify(struct ccw_device *, int);
617void dasd_generic_handle_state_change(struct dasd_device *); 617void dasd_generic_handle_state_change(struct dasd_device *);
618int dasd_generic_pm_freeze(struct ccw_device *); 618int dasd_generic_pm_freeze(struct ccw_device *);
619int dasd_generic_restore_device(struct ccw_device *); 619int dasd_generic_restore_device(struct ccw_device *);
620enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *);
620 621
621int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int); 622int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
622char *dasd_get_sense(struct irb *); 623char *dasd_get_sense(struct irb *);
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 5f97ea2ee6b1..97b25d68e3e7 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -123,8 +123,10 @@ ccwgroup_release (struct device *dev)
123 123
124 for (i = 0; i < gdev->count; i++) { 124 for (i = 0; i < gdev->count; i++) {
125 if (gdev->cdev[i]) { 125 if (gdev->cdev[i]) {
126 spin_lock_irq(gdev->cdev[i]->ccwlock);
126 if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev) 127 if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
127 dev_set_drvdata(&gdev->cdev[i]->dev, NULL); 128 dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
129 spin_unlock_irq(gdev->cdev[i]->ccwlock);
128 put_device(&gdev->cdev[i]->dev); 130 put_device(&gdev->cdev[i]->dev);
129 } 131 }
130 } 132 }
@@ -262,11 +264,14 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
262 goto error; 264 goto error;
263 } 265 }
264 /* Don't allow a device to belong to more than one group. */ 266 /* Don't allow a device to belong to more than one group. */
267 spin_lock_irq(gdev->cdev[i]->ccwlock);
265 if (dev_get_drvdata(&gdev->cdev[i]->dev)) { 268 if (dev_get_drvdata(&gdev->cdev[i]->dev)) {
269 spin_unlock_irq(gdev->cdev[i]->ccwlock);
266 rc = -EINVAL; 270 rc = -EINVAL;
267 goto error; 271 goto error;
268 } 272 }
269 dev_set_drvdata(&gdev->cdev[i]->dev, gdev); 273 dev_set_drvdata(&gdev->cdev[i]->dev, gdev);
274 spin_unlock_irq(gdev->cdev[i]->ccwlock);
270 } 275 }
271 /* Check for sufficient number of bus ids. */ 276 /* Check for sufficient number of bus ids. */
272 if (i < num_devices && !curr_buf) { 277 if (i < num_devices && !curr_buf) {
@@ -303,8 +308,10 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
303error: 308error:
304 for (i = 0; i < num_devices; i++) 309 for (i = 0; i < num_devices; i++)
305 if (gdev->cdev[i]) { 310 if (gdev->cdev[i]) {
311 spin_lock_irq(gdev->cdev[i]->ccwlock);
306 if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev) 312 if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
307 dev_set_drvdata(&gdev->cdev[i]->dev, NULL); 313 dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
314 spin_unlock_irq(gdev->cdev[i]->ccwlock);
308 put_device(&gdev->cdev[i]->dev); 315 put_device(&gdev->cdev[i]->dev);
309 gdev->cdev[i] = NULL; 316 gdev->cdev[i] = NULL;
310 } 317 }
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
index 37df42af05ec..7f206ed44fdf 100644
--- a/drivers/s390/cio/ccwreq.c
+++ b/drivers/s390/cio/ccwreq.c
@@ -159,6 +159,7 @@ static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
159{ 159{
160 struct irb *irb = &cdev->private->irb; 160 struct irb *irb = &cdev->private->irb;
161 struct cmd_scsw *scsw = &irb->scsw.cmd; 161 struct cmd_scsw *scsw = &irb->scsw.cmd;
162 enum uc_todo todo;
162 163
163 /* Perform BASIC SENSE if needed. */ 164 /* Perform BASIC SENSE if needed. */
164 if (ccw_device_accumulate_and_sense(cdev, lcirb)) 165 if (ccw_device_accumulate_and_sense(cdev, lcirb))
@@ -178,6 +179,20 @@ static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
178 /* Check for command reject. */ 179 /* Check for command reject. */
179 if (irb->ecw[0] & SNS0_CMD_REJECT) 180 if (irb->ecw[0] & SNS0_CMD_REJECT)
180 return IO_REJECTED; 181 return IO_REJECTED;
182 /* Ask the driver what to do */
183 if (cdev->drv && cdev->drv->uc_handler) {
184 todo = cdev->drv->uc_handler(cdev, lcirb);
185 switch (todo) {
186 case UC_TODO_RETRY:
187 return IO_STATUS_ERROR;
188 case UC_TODO_RETRY_ON_NEW_PATH:
189 return IO_PATH_ERROR;
190 case UC_TODO_STOP:
191 return IO_REJECTED;
192 default:
193 return IO_STATUS_ERROR;
194 }
195 }
181 /* Assume that unexpected SENSE data implies an error. */ 196 /* Assume that unexpected SENSE data implies an error. */
182 return IO_STATUS_ERROR; 197 return IO_STATUS_ERROR;
183 } 198 }
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index 759262792633..fac06155773f 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -23,21 +23,6 @@ struct tpi_info {
23 * Some S390 specific IO instructions as inline 23 * Some S390 specific IO instructions as inline
24 */ 24 */
25 25
26static inline int stsch(struct subchannel_id schid, struct schib *addr)
27{
28 register struct subchannel_id reg1 asm ("1") = schid;
29 int ccode;
30
31 asm volatile(
32 " stsch 0(%3)\n"
33 " ipm %0\n"
34 " srl %0,28"
35 : "=d" (ccode), "=m" (*addr)
36 : "d" (reg1), "a" (addr)
37 : "cc");
38 return ccode;
39}
40
41static inline int stsch_err(struct subchannel_id schid, struct schib *addr) 26static inline int stsch_err(struct subchannel_id schid, struct schib *addr)
42{ 27{
43 register struct subchannel_id reg1 asm ("1") = schid; 28 register struct subchannel_id reg1 asm ("1") = schid;
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 308541ff85cf..1bb5d3f0e260 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -1,34 +1,31 @@
1#include <linux/types.h> 1#include <linux/types.h>
2#include <linux/mm.h>
3#include <linux/slab.h>
4#include <linux/blkdev.h>
5#include <linux/init.h> 2#include <linux/init.h>
6#include <linux/interrupt.h> 3#include <linux/interrupt.h>
4#include <linux/mm.h>
5#include <linux/slab.h>
6#include <linux/spinlock.h>
7#include <linux/zorro.h>
7 8
8#include <asm/setup.h>
9#include <asm/page.h> 9#include <asm/page.h>
10#include <asm/pgtable.h> 10#include <asm/pgtable.h>
11#include <asm/amigaints.h> 11#include <asm/amigaints.h>
12#include <asm/amigahw.h> 12#include <asm/amigahw.h>
13#include <linux/zorro.h>
14#include <asm/irq.h>
15#include <linux/spinlock.h>
16 13
17#include "scsi.h" 14#include "scsi.h"
18#include <scsi/scsi_host.h>
19#include "wd33c93.h" 15#include "wd33c93.h"
20#include "a2091.h" 16#include "a2091.h"
21 17
22#include <linux/stat.h>
23
24 18
25static int a2091_release(struct Scsi_Host *instance); 19struct a2091_hostdata {
20 struct WD33C93_hostdata wh;
21 struct a2091_scsiregs *regs;
22};
26 23
27static irqreturn_t a2091_intr(int irq, void *data) 24static irqreturn_t a2091_intr(int irq, void *data)
28{ 25{
29 struct Scsi_Host *instance = data; 26 struct Scsi_Host *instance = data;
30 a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base); 27 struct a2091_hostdata *hdata = shost_priv(instance);
31 unsigned int status = regs->ISTR; 28 unsigned int status = hdata->regs->ISTR;
32 unsigned long flags; 29 unsigned long flags;
33 30
34 if (!(status & (ISTR_INT_F | ISTR_INT_P)) || !(status & ISTR_INTS)) 31 if (!(status & (ISTR_INT_F | ISTR_INT_P)) || !(status & ISTR_INTS))
@@ -43,38 +40,39 @@ static irqreturn_t a2091_intr(int irq, void *data)
43static int dma_setup(struct scsi_cmnd *cmd, int dir_in) 40static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
44{ 41{
45 struct Scsi_Host *instance = cmd->device->host; 42 struct Scsi_Host *instance = cmd->device->host;
46 struct WD33C93_hostdata *hdata = shost_priv(instance); 43 struct a2091_hostdata *hdata = shost_priv(instance);
47 a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base); 44 struct WD33C93_hostdata *wh = &hdata->wh;
45 struct a2091_scsiregs *regs = hdata->regs;
48 unsigned short cntr = CNTR_PDMD | CNTR_INTEN; 46 unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
49 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 47 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
50 48
51 /* don't allow DMA if the physical address is bad */ 49 /* don't allow DMA if the physical address is bad */
52 if (addr & A2091_XFER_MASK) { 50 if (addr & A2091_XFER_MASK) {
53 hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; 51 wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
54 hdata->dma_bounce_buffer = kmalloc(hdata->dma_bounce_len, 52 wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
55 GFP_KERNEL); 53 GFP_KERNEL);
56 54
57 /* can't allocate memory; use PIO */ 55 /* can't allocate memory; use PIO */
58 if (!hdata->dma_bounce_buffer) { 56 if (!wh->dma_bounce_buffer) {
59 hdata->dma_bounce_len = 0; 57 wh->dma_bounce_len = 0;
60 return 1; 58 return 1;
61 } 59 }
62 60
63 /* get the physical address of the bounce buffer */ 61 /* get the physical address of the bounce buffer */
64 addr = virt_to_bus(hdata->dma_bounce_buffer); 62 addr = virt_to_bus(wh->dma_bounce_buffer);
65 63
66 /* the bounce buffer may not be in the first 16M of physmem */ 64 /* the bounce buffer may not be in the first 16M of physmem */
67 if (addr & A2091_XFER_MASK) { 65 if (addr & A2091_XFER_MASK) {
68 /* we could use chipmem... maybe later */ 66 /* we could use chipmem... maybe later */
69 kfree(hdata->dma_bounce_buffer); 67 kfree(wh->dma_bounce_buffer);
70 hdata->dma_bounce_buffer = NULL; 68 wh->dma_bounce_buffer = NULL;
71 hdata->dma_bounce_len = 0; 69 wh->dma_bounce_len = 0;
72 return 1; 70 return 1;
73 } 71 }
74 72
75 if (!dir_in) { 73 if (!dir_in) {
76 /* copy to bounce buffer for a write */ 74 /* copy to bounce buffer for a write */
77 memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr, 75 memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
78 cmd->SCp.this_residual); 76 cmd->SCp.this_residual);
79 } 77 }
80 } 78 }
@@ -84,7 +82,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
84 cntr |= CNTR_DDIR; 82 cntr |= CNTR_DDIR;
85 83
86 /* remember direction */ 84 /* remember direction */
87 hdata->dma_dir = dir_in; 85 wh->dma_dir = dir_in;
88 86
89 regs->CNTR = cntr; 87 regs->CNTR = cntr;
90 88
@@ -108,20 +106,21 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
108static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, 106static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
109 int status) 107 int status)
110{ 108{
111 struct WD33C93_hostdata *hdata = shost_priv(instance); 109 struct a2091_hostdata *hdata = shost_priv(instance);
112 a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base); 110 struct WD33C93_hostdata *wh = &hdata->wh;
111 struct a2091_scsiregs *regs = hdata->regs;
113 112
114 /* disable SCSI interrupts */ 113 /* disable SCSI interrupts */
115 unsigned short cntr = CNTR_PDMD; 114 unsigned short cntr = CNTR_PDMD;
116 115
117 if (!hdata->dma_dir) 116 if (!wh->dma_dir)
118 cntr |= CNTR_DDIR; 117 cntr |= CNTR_DDIR;
119 118
120 /* disable SCSI interrupts */ 119 /* disable SCSI interrupts */
121 regs->CNTR = cntr; 120 regs->CNTR = cntr;
122 121
123 /* flush if we were reading */ 122 /* flush if we were reading */
124 if (hdata->dma_dir) { 123 if (wh->dma_dir) {
125 regs->FLUSH = 1; 124 regs->FLUSH = 1;
126 while (!(regs->ISTR & ISTR_FE_FLG)) 125 while (!(regs->ISTR & ISTR_FE_FLG))
127 ; 126 ;
@@ -137,95 +136,37 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
137 regs->CNTR = CNTR_PDMD | CNTR_INTEN; 136 regs->CNTR = CNTR_PDMD | CNTR_INTEN;
138 137
139 /* copy from a bounce buffer, if necessary */ 138 /* copy from a bounce buffer, if necessary */
140 if (status && hdata->dma_bounce_buffer) { 139 if (status && wh->dma_bounce_buffer) {
141 if (hdata->dma_dir) 140 if (wh->dma_dir)
142 memcpy(SCpnt->SCp.ptr, hdata->dma_bounce_buffer, 141 memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
143 SCpnt->SCp.this_residual); 142 SCpnt->SCp.this_residual);
144 kfree(hdata->dma_bounce_buffer); 143 kfree(wh->dma_bounce_buffer);
145 hdata->dma_bounce_buffer = NULL; 144 wh->dma_bounce_buffer = NULL;
146 hdata->dma_bounce_len = 0; 145 wh->dma_bounce_len = 0;
147 }
148}
149
150static int __init a2091_detect(struct scsi_host_template *tpnt)
151{
152 static unsigned char called = 0;
153 struct Scsi_Host *instance;
154 unsigned long address;
155 struct zorro_dev *z = NULL;
156 wd33c93_regs wdregs;
157 a2091_scsiregs *regs;
158 struct WD33C93_hostdata *hdata;
159 int num_a2091 = 0;
160
161 if (!MACH_IS_AMIGA || called)
162 return 0;
163 called = 1;
164
165 tpnt->proc_name = "A2091";
166 tpnt->proc_info = &wd33c93_proc_info;
167
168 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
169 if (z->id != ZORRO_PROD_CBM_A590_A2091_1 &&
170 z->id != ZORRO_PROD_CBM_A590_A2091_2)
171 continue;
172 address = z->resource.start;
173 if (!request_mem_region(address, 256, "wd33c93"))
174 continue;
175
176 instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
177 if (instance == NULL)
178 goto release;
179 instance->base = ZTWO_VADDR(address);
180 instance->irq = IRQ_AMIGA_PORTS;
181 instance->unique_id = z->slotaddr;
182 regs = (a2091_scsiregs *)(instance->base);
183 regs->DAWR = DAWR_A2091;
184 wdregs.SASR = &regs->SASR;
185 wdregs.SCMD = &regs->SCMD;
186 hdata = shost_priv(instance);
187 hdata->no_sync = 0xff;
188 hdata->fast = 0;
189 hdata->dma_mode = CTRL_DMA;
190 wd33c93_init(instance, wdregs, dma_setup, dma_stop,
191 WD33C93_FS_8_10);
192 if (request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED,
193 "A2091 SCSI", instance))
194 goto unregister;
195 regs->CNTR = CNTR_PDMD | CNTR_INTEN;
196 num_a2091++;
197 continue;
198
199unregister:
200 scsi_unregister(instance);
201release:
202 release_mem_region(address, 256);
203 } 146 }
204
205 return num_a2091;
206} 147}
207 148
208static int a2091_bus_reset(struct scsi_cmnd *cmd) 149static int a2091_bus_reset(struct scsi_cmnd *cmd)
209{ 150{
151 struct Scsi_Host *instance = cmd->device->host;
152
210 /* FIXME perform bus-specific reset */ 153 /* FIXME perform bus-specific reset */
211 154
212 /* FIXME 2: kill this function, and let midlayer fall back 155 /* FIXME 2: kill this function, and let midlayer fall back
213 to the same action, calling wd33c93_host_reset() */ 156 to the same action, calling wd33c93_host_reset() */
214 157
215 spin_lock_irq(cmd->device->host->host_lock); 158 spin_lock_irq(instance->host_lock);
216 wd33c93_host_reset(cmd); 159 wd33c93_host_reset(cmd);
217 spin_unlock_irq(cmd->device->host->host_lock); 160 spin_unlock_irq(instance->host_lock);
218 161
219 return SUCCESS; 162 return SUCCESS;
220} 163}
221 164
222#define HOSTS_C 165static struct scsi_host_template a2091_scsi_template = {
223 166 .module = THIS_MODULE,
224static struct scsi_host_template driver_template = {
225 .proc_name = "A2901",
226 .name = "Commodore A2091/A590 SCSI", 167 .name = "Commodore A2091/A590 SCSI",
227 .detect = a2091_detect, 168 .proc_info = wd33c93_proc_info,
228 .release = a2091_release, 169 .proc_name = "A2901",
229 .queuecommand = wd33c93_queuecommand, 170 .queuecommand = wd33c93_queuecommand,
230 .eh_abort_handler = wd33c93_abort, 171 .eh_abort_handler = wd33c93_abort,
231 .eh_bus_reset_handler = a2091_bus_reset, 172 .eh_bus_reset_handler = a2091_bus_reset,
@@ -237,19 +178,103 @@ static struct scsi_host_template driver_template = {
237 .use_clustering = DISABLE_CLUSTERING 178 .use_clustering = DISABLE_CLUSTERING
238}; 179};
239 180
181static int __devinit a2091_probe(struct zorro_dev *z,
182 const struct zorro_device_id *ent)
183{
184 struct Scsi_Host *instance;
185 int error;
186 struct a2091_scsiregs *regs;
187 wd33c93_regs wdregs;
188 struct a2091_hostdata *hdata;
240 189
241#include "scsi_module.c" 190 if (!request_mem_region(z->resource.start, 256, "wd33c93"))
191 return -EBUSY;
242 192
243static int a2091_release(struct Scsi_Host *instance) 193 instance = scsi_host_alloc(&a2091_scsi_template,
194 sizeof(struct a2091_hostdata));
195 if (!instance) {
196 error = -ENOMEM;
197 goto fail_alloc;
198 }
199
200 instance->irq = IRQ_AMIGA_PORTS;
201 instance->unique_id = z->slotaddr;
202
203 regs = (struct a2091_scsiregs *)ZTWO_VADDR(z->resource.start);
204 regs->DAWR = DAWR_A2091;
205
206 wdregs.SASR = &regs->SASR;
207 wdregs.SCMD = &regs->SCMD;
208
209 hdata = shost_priv(instance);
210 hdata->wh.no_sync = 0xff;
211 hdata->wh.fast = 0;
212 hdata->wh.dma_mode = CTRL_DMA;
213 hdata->regs = regs;
214
215 wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_8_10);
216 error = request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED,
217 "A2091 SCSI", instance);
218 if (error)
219 goto fail_irq;
220
221 regs->CNTR = CNTR_PDMD | CNTR_INTEN;
222
223 error = scsi_add_host(instance, NULL);
224 if (error)
225 goto fail_host;
226
227 zorro_set_drvdata(z, instance);
228
229 scsi_scan_host(instance);
230 return 0;
231
232fail_host:
233 free_irq(IRQ_AMIGA_PORTS, instance);
234fail_irq:
235 scsi_host_put(instance);
236fail_alloc:
237 release_mem_region(z->resource.start, 256);
238 return error;
239}
240
241static void __devexit a2091_remove(struct zorro_dev *z)
244{ 242{
245#ifdef MODULE 243 struct Scsi_Host *instance = zorro_get_drvdata(z);
246 a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base); 244 struct a2091_hostdata *hdata = shost_priv(instance);
247 245
248 regs->CNTR = 0; 246 hdata->regs->CNTR = 0;
249 release_mem_region(ZTWO_PADDR(instance->base), 256); 247 scsi_remove_host(instance);
250 free_irq(IRQ_AMIGA_PORTS, instance); 248 free_irq(IRQ_AMIGA_PORTS, instance);
251#endif 249 scsi_host_put(instance);
252 return 1; 250 release_mem_region(z->resource.start, 256);
251}
252
253static struct zorro_device_id a2091_zorro_tbl[] __devinitdata = {
254 { ZORRO_PROD_CBM_A590_A2091_1 },
255 { ZORRO_PROD_CBM_A590_A2091_2 },
256 { 0 }
257};
258MODULE_DEVICE_TABLE(zorro, a2091_zorro_tbl);
259
260static struct zorro_driver a2091_driver = {
261 .name = "a2091",
262 .id_table = a2091_zorro_tbl,
263 .probe = a2091_probe,
264 .remove = __devexit_p(a2091_remove),
265};
266
267static int __init a2091_init(void)
268{
269 return zorro_register_driver(&a2091_driver);
270}
271module_init(a2091_init);
272
273static void __exit a2091_exit(void)
274{
275 zorro_unregister_driver(&a2091_driver);
253} 276}
277module_exit(a2091_exit);
254 278
279MODULE_DESCRIPTION("Commodore A2091/A590 SCSI");
255MODULE_LICENSE("GPL"); 280MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/a2091.h b/drivers/scsi/a2091.h
index 1c3daa1fd754..794b8e65c711 100644
--- a/drivers/scsi/a2091.h
+++ b/drivers/scsi/a2091.h
@@ -25,7 +25,7 @@
25 */ 25 */
26#define A2091_XFER_MASK (0xff000001) 26#define A2091_XFER_MASK (0xff000001)
27 27
28typedef struct { 28struct a2091_scsiregs {
29 unsigned char pad1[64]; 29 unsigned char pad1[64];
30 volatile unsigned short ISTR; 30 volatile unsigned short ISTR;
31 volatile unsigned short CNTR; 31 volatile unsigned short CNTR;
@@ -44,7 +44,7 @@ typedef struct {
44 volatile unsigned short CINT; 44 volatile unsigned short CINT;
45 unsigned char pad7[2]; 45 unsigned char pad7[2];
46 volatile unsigned short FLUSH; 46 volatile unsigned short FLUSH;
47} a2091_scsiregs; 47};
48 48
49#define DAWR_A2091 (3) 49#define DAWR_A2091 (3)
50 50
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index bc6eb69f5fd0..d9468027fb61 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -1,53 +1,52 @@
1#include <linux/types.h> 1#include <linux/types.h>
2#include <linux/mm.h> 2#include <linux/mm.h>
3#include <linux/slab.h>
4#include <linux/blkdev.h>
5#include <linux/ioport.h> 3#include <linux/ioport.h>
6#include <linux/init.h> 4#include <linux/init.h>
5#include <linux/slab.h>
7#include <linux/spinlock.h> 6#include <linux/spinlock.h>
8#include <linux/interrupt.h> 7#include <linux/interrupt.h>
8#include <linux/platform_device.h>
9 9
10#include <asm/setup.h>
11#include <asm/page.h> 10#include <asm/page.h>
12#include <asm/pgtable.h> 11#include <asm/pgtable.h>
13#include <asm/amigaints.h> 12#include <asm/amigaints.h>
14#include <asm/amigahw.h> 13#include <asm/amigahw.h>
15#include <asm/irq.h>
16 14
17#include "scsi.h" 15#include "scsi.h"
18#include <scsi/scsi_host.h>
19#include "wd33c93.h" 16#include "wd33c93.h"
20#include "a3000.h" 17#include "a3000.h"
21 18
22#include <linux/stat.h>
23
24 19
25#define DMA(ptr) ((a3000_scsiregs *)((ptr)->base)) 20struct a3000_hostdata {
26 21 struct WD33C93_hostdata wh;
27static struct Scsi_Host *a3000_host = NULL; 22 struct a3000_scsiregs *regs;
28 23};
29static int a3000_release(struct Scsi_Host *instance);
30 24
31static irqreturn_t a3000_intr(int irq, void *dummy) 25static irqreturn_t a3000_intr(int irq, void *data)
32{ 26{
27 struct Scsi_Host *instance = data;
28 struct a3000_hostdata *hdata = shost_priv(instance);
29 unsigned int status = hdata->regs->ISTR;
33 unsigned long flags; 30 unsigned long flags;
34 unsigned int status = DMA(a3000_host)->ISTR;
35 31
36 if (!(status & ISTR_INT_P)) 32 if (!(status & ISTR_INT_P))
37 return IRQ_NONE; 33 return IRQ_NONE;
38 if (status & ISTR_INTS) { 34 if (status & ISTR_INTS) {
39 spin_lock_irqsave(a3000_host->host_lock, flags); 35 spin_lock_irqsave(instance->host_lock, flags);
40 wd33c93_intr(a3000_host); 36 wd33c93_intr(instance);
41 spin_unlock_irqrestore(a3000_host->host_lock, flags); 37 spin_unlock_irqrestore(instance->host_lock, flags);
42 return IRQ_HANDLED; 38 return IRQ_HANDLED;
43 } 39 }
44 printk("Non-serviced A3000 SCSI-interrupt? ISTR = %02x\n", status); 40 pr_warning("Non-serviced A3000 SCSI-interrupt? ISTR = %02x\n", status);
45 return IRQ_NONE; 41 return IRQ_NONE;
46} 42}
47 43
48static int dma_setup(struct scsi_cmnd *cmd, int dir_in) 44static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
49{ 45{
50 struct WD33C93_hostdata *hdata = shost_priv(a3000_host); 46 struct Scsi_Host *instance = cmd->device->host;
47 struct a3000_hostdata *hdata = shost_priv(instance);
48 struct WD33C93_hostdata *wh = &hdata->wh;
49 struct a3000_scsiregs *regs = hdata->regs;
51 unsigned short cntr = CNTR_PDMD | CNTR_INTEN; 50 unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
52 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 51 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
53 52
@@ -58,23 +57,23 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
58 * buffer 57 * buffer
59 */ 58 */
60 if (addr & A3000_XFER_MASK) { 59 if (addr & A3000_XFER_MASK) {
61 hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; 60 wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
62 hdata->dma_bounce_buffer = kmalloc(hdata->dma_bounce_len, 61 wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
63 GFP_KERNEL); 62 GFP_KERNEL);
64 63
65 /* can't allocate memory; use PIO */ 64 /* can't allocate memory; use PIO */
66 if (!hdata->dma_bounce_buffer) { 65 if (!wh->dma_bounce_buffer) {
67 hdata->dma_bounce_len = 0; 66 wh->dma_bounce_len = 0;
68 return 1; 67 return 1;
69 } 68 }
70 69
71 if (!dir_in) { 70 if (!dir_in) {
72 /* copy to bounce buffer for a write */ 71 /* copy to bounce buffer for a write */
73 memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr, 72 memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
74 cmd->SCp.this_residual); 73 cmd->SCp.this_residual);
75 } 74 }
76 75
77 addr = virt_to_bus(hdata->dma_bounce_buffer); 76 addr = virt_to_bus(wh->dma_bounce_buffer);
78 } 77 }
79 78
80 /* setup dma direction */ 79 /* setup dma direction */
@@ -82,12 +81,12 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
82 cntr |= CNTR_DDIR; 81 cntr |= CNTR_DDIR;
83 82
84 /* remember direction */ 83 /* remember direction */
85 hdata->dma_dir = dir_in; 84 wh->dma_dir = dir_in;
86 85
87 DMA(a3000_host)->CNTR = cntr; 86 regs->CNTR = cntr;
88 87
89 /* setup DMA *physical* address */ 88 /* setup DMA *physical* address */
90 DMA(a3000_host)->ACR = addr; 89 regs->ACR = addr;
91 90
92 if (dir_in) { 91 if (dir_in) {
93 /* invalidate any cache */ 92 /* invalidate any cache */
@@ -99,7 +98,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
99 98
100 /* start DMA */ 99 /* start DMA */
101 mb(); /* make sure setup is completed */ 100 mb(); /* make sure setup is completed */
102 DMA(a3000_host)->ST_DMA = 1; 101 regs->ST_DMA = 1;
103 mb(); /* make sure DMA has started before next IO */ 102 mb(); /* make sure DMA has started before next IO */
104 103
105 /* return success */ 104 /* return success */
@@ -109,22 +108,24 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
109static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, 108static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
110 int status) 109 int status)
111{ 110{
112 struct WD33C93_hostdata *hdata = shost_priv(instance); 111 struct a3000_hostdata *hdata = shost_priv(instance);
112 struct WD33C93_hostdata *wh = &hdata->wh;
113 struct a3000_scsiregs *regs = hdata->regs;
113 114
114 /* disable SCSI interrupts */ 115 /* disable SCSI interrupts */
115 unsigned short cntr = CNTR_PDMD; 116 unsigned short cntr = CNTR_PDMD;
116 117
117 if (!hdata->dma_dir) 118 if (!wh->dma_dir)
118 cntr |= CNTR_DDIR; 119 cntr |= CNTR_DDIR;
119 120
120 DMA(instance)->CNTR = cntr; 121 regs->CNTR = cntr;
121 mb(); /* make sure CNTR is updated before next IO */ 122 mb(); /* make sure CNTR is updated before next IO */
122 123
123 /* flush if we were reading */ 124 /* flush if we were reading */
124 if (hdata->dma_dir) { 125 if (wh->dma_dir) {
125 DMA(instance)->FLUSH = 1; 126 regs->FLUSH = 1;
126 mb(); /* don't allow prefetch */ 127 mb(); /* don't allow prefetch */
127 while (!(DMA(instance)->ISTR & ISTR_FE_FLG)) 128 while (!(regs->ISTR & ISTR_FE_FLG))
128 barrier(); 129 barrier();
129 mb(); /* no IO until FLUSH is done */ 130 mb(); /* no IO until FLUSH is done */
130 } 131 }
@@ -133,96 +134,54 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
133 /* I think that this CINT is only necessary if you are 134 /* I think that this CINT is only necessary if you are
134 * using the terminal count features. HM 7 Mar 1994 135 * using the terminal count features. HM 7 Mar 1994
135 */ 136 */
136 DMA(instance)->CINT = 1; 137 regs->CINT = 1;
137 138
138 /* stop DMA */ 139 /* stop DMA */
139 DMA(instance)->SP_DMA = 1; 140 regs->SP_DMA = 1;
140 mb(); /* make sure DMA is stopped before next IO */ 141 mb(); /* make sure DMA is stopped before next IO */
141 142
142 /* restore the CONTROL bits (minus the direction flag) */ 143 /* restore the CONTROL bits (minus the direction flag) */
143 DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN; 144 regs->CNTR = CNTR_PDMD | CNTR_INTEN;
144 mb(); /* make sure CNTR is updated before next IO */ 145 mb(); /* make sure CNTR is updated before next IO */
145 146
146 /* copy from a bounce buffer, if necessary */ 147 /* copy from a bounce buffer, if necessary */
147 if (status && hdata->dma_bounce_buffer) { 148 if (status && wh->dma_bounce_buffer) {
148 if (SCpnt) { 149 if (SCpnt) {
149 if (hdata->dma_dir && SCpnt) 150 if (wh->dma_dir && SCpnt)
150 memcpy(SCpnt->SCp.ptr, 151 memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
151 hdata->dma_bounce_buffer,
152 SCpnt->SCp.this_residual); 152 SCpnt->SCp.this_residual);
153 kfree(hdata->dma_bounce_buffer); 153 kfree(wh->dma_bounce_buffer);
154 hdata->dma_bounce_buffer = NULL; 154 wh->dma_bounce_buffer = NULL;
155 hdata->dma_bounce_len = 0; 155 wh->dma_bounce_len = 0;
156 } else { 156 } else {
157 kfree(hdata->dma_bounce_buffer); 157 kfree(wh->dma_bounce_buffer);
158 hdata->dma_bounce_buffer = NULL; 158 wh->dma_bounce_buffer = NULL;
159 hdata->dma_bounce_len = 0; 159 wh->dma_bounce_len = 0;
160 } 160 }
161 } 161 }
162} 162}
163 163
164static int __init a3000_detect(struct scsi_host_template *tpnt)
165{
166 wd33c93_regs regs;
167 struct WD33C93_hostdata *hdata;
168
169 if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(A3000_SCSI))
170 return 0;
171 if (!request_mem_region(0xDD0000, 256, "wd33c93"))
172 return 0;
173
174 tpnt->proc_name = "A3000";
175 tpnt->proc_info = &wd33c93_proc_info;
176
177 a3000_host = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
178 if (a3000_host == NULL)
179 goto fail_register;
180
181 a3000_host->base = ZTWO_VADDR(0xDD0000);
182 a3000_host->irq = IRQ_AMIGA_PORTS;
183 DMA(a3000_host)->DAWR = DAWR_A3000;
184 regs.SASR = &(DMA(a3000_host)->SASR);
185 regs.SCMD = &(DMA(a3000_host)->SCMD);
186 hdata = shost_priv(a3000_host);
187 hdata->no_sync = 0xff;
188 hdata->fast = 0;
189 hdata->dma_mode = CTRL_DMA;
190 wd33c93_init(a3000_host, regs, dma_setup, dma_stop, WD33C93_FS_12_15);
191 if (request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED, "A3000 SCSI",
192 a3000_intr))
193 goto fail_irq;
194 DMA(a3000_host)->CNTR = CNTR_PDMD | CNTR_INTEN;
195
196 return 1;
197
198fail_irq:
199 scsi_unregister(a3000_host);
200fail_register:
201 release_mem_region(0xDD0000, 256);
202 return 0;
203}
204
205static int a3000_bus_reset(struct scsi_cmnd *cmd) 164static int a3000_bus_reset(struct scsi_cmnd *cmd)
206{ 165{
166 struct Scsi_Host *instance = cmd->device->host;
167
207 /* FIXME perform bus-specific reset */ 168 /* FIXME perform bus-specific reset */
208 169
209 /* FIXME 2: kill this entire function, which should 170 /* FIXME 2: kill this entire function, which should
210 cause mid-layer to call wd33c93_host_reset anyway? */ 171 cause mid-layer to call wd33c93_host_reset anyway? */
211 172
212 spin_lock_irq(cmd->device->host->host_lock); 173 spin_lock_irq(instance->host_lock);
213 wd33c93_host_reset(cmd); 174 wd33c93_host_reset(cmd);
214 spin_unlock_irq(cmd->device->host->host_lock); 175 spin_unlock_irq(instance->host_lock);
215 176
216 return SUCCESS; 177 return SUCCESS;
217} 178}
218 179
219#define HOSTS_C 180static struct scsi_host_template amiga_a3000_scsi_template = {
220 181 .module = THIS_MODULE,
221static struct scsi_host_template driver_template = {
222 .proc_name = "A3000",
223 .name = "Amiga 3000 built-in SCSI", 182 .name = "Amiga 3000 built-in SCSI",
224 .detect = a3000_detect, 183 .proc_info = wd33c93_proc_info,
225 .release = a3000_release, 184 .proc_name = "A3000",
226 .queuecommand = wd33c93_queuecommand, 185 .queuecommand = wd33c93_queuecommand,
227 .eh_abort_handler = wd33c93_abort, 186 .eh_abort_handler = wd33c93_abort,
228 .eh_bus_reset_handler = a3000_bus_reset, 187 .eh_bus_reset_handler = a3000_bus_reset,
@@ -234,15 +193,104 @@ static struct scsi_host_template driver_template = {
234 .use_clustering = ENABLE_CLUSTERING 193 .use_clustering = ENABLE_CLUSTERING
235}; 194};
236 195
196static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
197{
198 struct resource *res;
199 struct Scsi_Host *instance;
200 int error;
201 struct a3000_scsiregs *regs;
202 wd33c93_regs wdregs;
203 struct a3000_hostdata *hdata;
204
205 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
206 if (!res)
207 return -ENODEV;
208
209 if (!request_mem_region(res->start, resource_size(res), "wd33c93"))
210 return -EBUSY;
211
212 instance = scsi_host_alloc(&amiga_a3000_scsi_template,
213 sizeof(struct a3000_hostdata));
214 if (!instance) {
215 error = -ENOMEM;
216 goto fail_alloc;
217 }
218
219 instance->irq = IRQ_AMIGA_PORTS;
237 220
238#include "scsi_module.c" 221 regs = (struct a3000_scsiregs *)ZTWO_VADDR(res->start);
222 regs->DAWR = DAWR_A3000;
223
224 wdregs.SASR = &regs->SASR;
225 wdregs.SCMD = &regs->SCMD;
226
227 hdata = shost_priv(instance);
228 hdata->wh.no_sync = 0xff;
229 hdata->wh.fast = 0;
230 hdata->wh.dma_mode = CTRL_DMA;
231 hdata->regs = regs;
232
233 wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_12_15);
234 error = request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED,
235 "A3000 SCSI", instance);
236 if (error)
237 goto fail_irq;
238
239 regs->CNTR = CNTR_PDMD | CNTR_INTEN;
240
241 error = scsi_add_host(instance, NULL);
242 if (error)
243 goto fail_host;
244
245 platform_set_drvdata(pdev, instance);
246
247 scsi_scan_host(instance);
248 return 0;
249
250fail_host:
251 free_irq(IRQ_AMIGA_PORTS, instance);
252fail_irq:
253 scsi_host_put(instance);
254fail_alloc:
255 release_mem_region(res->start, resource_size(res));
256 return error;
257}
258
259static int __exit amiga_a3000_scsi_remove(struct platform_device *pdev)
260{
261 struct Scsi_Host *instance = platform_get_drvdata(pdev);
262 struct a3000_hostdata *hdata = shost_priv(instance);
263 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
264
265 hdata->regs->CNTR = 0;
266 scsi_remove_host(instance);
267 free_irq(IRQ_AMIGA_PORTS, instance);
268 scsi_host_put(instance);
269 release_mem_region(res->start, resource_size(res));
270 return 0;
271}
272
273static struct platform_driver amiga_a3000_scsi_driver = {
274 .remove = __exit_p(amiga_a3000_scsi_remove),
275 .driver = {
276 .name = "amiga-a3000-scsi",
277 .owner = THIS_MODULE,
278 },
279};
280
281static int __init amiga_a3000_scsi_init(void)
282{
283 return platform_driver_probe(&amiga_a3000_scsi_driver,
284 amiga_a3000_scsi_probe);
285}
286module_init(amiga_a3000_scsi_init);
239 287
240static int a3000_release(struct Scsi_Host *instance) 288static void __exit amiga_a3000_scsi_exit(void)
241{ 289{
242 DMA(instance)->CNTR = 0; 290 platform_driver_unregister(&amiga_a3000_scsi_driver);
243 release_mem_region(0xDD0000, 256);
244 free_irq(IRQ_AMIGA_PORTS, a3000_intr);
245 return 1;
246} 291}
292module_exit(amiga_a3000_scsi_exit);
247 293
294MODULE_DESCRIPTION("Amiga 3000 built-in SCSI");
248MODULE_LICENSE("GPL"); 295MODULE_LICENSE("GPL");
296MODULE_ALIAS("platform:amiga-a3000-scsi");
diff --git a/drivers/scsi/a3000.h b/drivers/scsi/a3000.h
index 684813ee378c..49db4a335aab 100644
--- a/drivers/scsi/a3000.h
+++ b/drivers/scsi/a3000.h
@@ -25,7 +25,7 @@
25 */ 25 */
26#define A3000_XFER_MASK (0x00000003) 26#define A3000_XFER_MASK (0x00000003)
27 27
28typedef struct { 28struct a3000_scsiregs {
29 unsigned char pad1[2]; 29 unsigned char pad1[2];
30 volatile unsigned short DAWR; 30 volatile unsigned short DAWR;
31 volatile unsigned int WTC; 31 volatile unsigned int WTC;
@@ -46,7 +46,7 @@ typedef struct {
46 volatile unsigned char SASR; 46 volatile unsigned char SASR;
47 unsigned char pad9; 47 unsigned char pad9;
48 volatile unsigned char SCMD; 48 volatile unsigned char SCMD;
49} a3000_scsiregs; 49};
50 50
51#define DAWR_A3000 (3) 51#define DAWR_A3000 (3)
52 52
diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c
index 11ae6be8aeaf..23c76f41883c 100644
--- a/drivers/scsi/a4000t.c
+++ b/drivers/scsi/a4000t.c
@@ -20,10 +20,6 @@
20 20
21#include "53c700.h" 21#include "53c700.h"
22 22
23MODULE_AUTHOR("Alan Hourihane <alanh@fairlite.demon.co.uk> / Kars de Jong <jongk@linux-m68k.org>");
24MODULE_DESCRIPTION("Amiga A4000T NCR53C710 driver");
25MODULE_LICENSE("GPL");
26
27 23
28static struct scsi_host_template a4000t_scsi_driver_template = { 24static struct scsi_host_template a4000t_scsi_driver_template = {
29 .name = "A4000T builtin SCSI", 25 .name = "A4000T builtin SCSI",
@@ -32,30 +28,35 @@ static struct scsi_host_template a4000t_scsi_driver_template = {
32 .module = THIS_MODULE, 28 .module = THIS_MODULE,
33}; 29};
34 30
35static struct platform_device *a4000t_scsi_device;
36 31
37#define A4000T_SCSI_ADDR 0xdd0040 32#define A4000T_SCSI_OFFSET 0x40
38 33
39static int __devinit a4000t_probe(struct platform_device *dev) 34static int __init amiga_a4000t_scsi_probe(struct platform_device *pdev)
40{ 35{
41 struct Scsi_Host *host; 36 struct resource *res;
37 phys_addr_t scsi_addr;
42 struct NCR_700_Host_Parameters *hostdata; 38 struct NCR_700_Host_Parameters *hostdata;
39 struct Scsi_Host *host;
43 40
44 if (!(MACH_IS_AMIGA && AMIGAHW_PRESENT(A4000_SCSI))) 41 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
45 goto out; 42 if (!res)
43 return -ENODEV;
46 44
47 if (!request_mem_region(A4000T_SCSI_ADDR, 0x1000, 45 if (!request_mem_region(res->start, resource_size(res),
48 "A4000T builtin SCSI")) 46 "A4000T builtin SCSI"))
49 goto out; 47 return -EBUSY;
50 48
51 hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL); 49 hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters),
50 GFP_KERNEL);
52 if (!hostdata) { 51 if (!hostdata) {
53 printk(KERN_ERR "a4000t-scsi: Failed to allocate host data\n"); 52 dev_err(&pdev->dev, "Failed to allocate host data\n");
54 goto out_release; 53 goto out_release;
55 } 54 }
56 55
56 scsi_addr = res->start + A4000T_SCSI_OFFSET;
57
57 /* Fill in the required pieces of hostdata */ 58 /* Fill in the required pieces of hostdata */
58 hostdata->base = (void __iomem *)ZTWO_VADDR(A4000T_SCSI_ADDR); 59 hostdata->base = (void __iomem *)ZTWO_VADDR(scsi_addr);
59 hostdata->clock = 50; 60 hostdata->clock = 50;
60 hostdata->chip710 = 1; 61 hostdata->chip710 = 1;
61 hostdata->dmode_extra = DMODE_FC2; 62 hostdata->dmode_extra = DMODE_FC2;
@@ -63,26 +64,25 @@ static int __devinit a4000t_probe(struct platform_device *dev)
63 64
64 /* and register the chip */ 65 /* and register the chip */
65 host = NCR_700_detect(&a4000t_scsi_driver_template, hostdata, 66 host = NCR_700_detect(&a4000t_scsi_driver_template, hostdata,
66 &dev->dev); 67 &pdev->dev);
67 if (!host) { 68 if (!host) {
68 printk(KERN_ERR "a4000t-scsi: No host detected; " 69 dev_err(&pdev->dev,
69 "board configuration problem?\n"); 70 "No host detected; board configuration problem?\n");
70 goto out_free; 71 goto out_free;
71 } 72 }
72 73
73 host->this_id = 7; 74 host->this_id = 7;
74 host->base = A4000T_SCSI_ADDR; 75 host->base = scsi_addr;
75 host->irq = IRQ_AMIGA_PORTS; 76 host->irq = IRQ_AMIGA_PORTS;
76 77
77 if (request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "a4000t-scsi", 78 if (request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "a4000t-scsi",
78 host)) { 79 host)) {
79 printk(KERN_ERR "a4000t-scsi: request_irq failed\n"); 80 dev_err(&pdev->dev, "request_irq failed\n");
80 goto out_put_host; 81 goto out_put_host;
81 } 82 }
82 83
83 platform_set_drvdata(dev, host); 84 platform_set_drvdata(pdev, host);
84 scsi_scan_host(host); 85 scsi_scan_host(host);
85
86 return 0; 86 return 0;
87 87
88 out_put_host: 88 out_put_host:
@@ -90,58 +90,49 @@ static int __devinit a4000t_probe(struct platform_device *dev)
90 out_free: 90 out_free:
91 kfree(hostdata); 91 kfree(hostdata);
92 out_release: 92 out_release:
93 release_mem_region(A4000T_SCSI_ADDR, 0x1000); 93 release_mem_region(res->start, resource_size(res));
94 out:
95 return -ENODEV; 94 return -ENODEV;
96} 95}
97 96
98static __devexit int a4000t_device_remove(struct platform_device *dev) 97static int __exit amiga_a4000t_scsi_remove(struct platform_device *pdev)
99{ 98{
100 struct Scsi_Host *host = platform_get_drvdata(dev); 99 struct Scsi_Host *host = platform_get_drvdata(pdev);
101 struct NCR_700_Host_Parameters *hostdata = shost_priv(host); 100 struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
101 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
102 102
103 scsi_remove_host(host); 103 scsi_remove_host(host);
104
105 NCR_700_release(host); 104 NCR_700_release(host);
106 kfree(hostdata); 105 kfree(hostdata);
107 free_irq(host->irq, host); 106 free_irq(host->irq, host);
108 release_mem_region(A4000T_SCSI_ADDR, 0x1000); 107 release_mem_region(res->start, resource_size(res));
109
110 return 0; 108 return 0;
111} 109}
112 110
113static struct platform_driver a4000t_scsi_driver = { 111static struct platform_driver amiga_a4000t_scsi_driver = {
114 .driver = { 112 .remove = __exit_p(amiga_a4000t_scsi_remove),
115 .name = "a4000t-scsi", 113 .driver = {
116 .owner = THIS_MODULE, 114 .name = "amiga-a4000t-scsi",
115 .owner = THIS_MODULE,
117 }, 116 },
118 .probe = a4000t_probe,
119 .remove = __devexit_p(a4000t_device_remove),
120}; 117};
121 118
122static int __init a4000t_scsi_init(void) 119static int __init amiga_a4000t_scsi_init(void)
123{ 120{
124 int err; 121 return platform_driver_probe(&amiga_a4000t_scsi_driver,
125 122 amiga_a4000t_scsi_probe);
126 err = platform_driver_register(&a4000t_scsi_driver);
127 if (err)
128 return err;
129
130 a4000t_scsi_device = platform_device_register_simple("a4000t-scsi",
131 -1, NULL, 0);
132 if (IS_ERR(a4000t_scsi_device)) {
133 platform_driver_unregister(&a4000t_scsi_driver);
134 return PTR_ERR(a4000t_scsi_device);
135 }
136
137 return err;
138} 123}
139 124
140static void __exit a4000t_scsi_exit(void) 125module_init(amiga_a4000t_scsi_init);
126
127static void __exit amiga_a4000t_scsi_exit(void)
141{ 128{
142 platform_device_unregister(a4000t_scsi_device); 129 platform_driver_unregister(&amiga_a4000t_scsi_driver);
143 platform_driver_unregister(&a4000t_scsi_driver);
144} 130}
145 131
146module_init(a4000t_scsi_init); 132module_exit(amiga_a4000t_scsi_exit);
147module_exit(a4000t_scsi_exit); 133
134MODULE_AUTHOR("Alan Hourihane <alanh@fairlite.demon.co.uk> / "
135 "Kars de Jong <jongk@linux-m68k.org>");
136MODULE_DESCRIPTION("Amiga A4000T NCR53C710 driver");
137MODULE_LICENSE("GPL");
138MODULE_ALIAS("platform:amiga-a4000t-scsi");
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 9c0c91178538..1a5bf5724750 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -655,9 +655,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
655 /* Does this really need to be GFP_DMA? */ 655 /* Does this really need to be GFP_DMA? */
656 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); 656 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
657 if(!p) { 657 if(!p) {
658 kfree (usg); 658 dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
659 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
660 usg->sg[i].count,i,usg->count)); 659 usg->sg[i].count,i,usg->count));
660 kfree(usg);
661 rcode = -ENOMEM; 661 rcode = -ENOMEM;
662 goto cleanup; 662 goto cleanup;
663 } 663 }
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index ab646e580d64..ce5371b3cdd5 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -48,7 +48,7 @@ struct device_attribute;
48/*The limit of outstanding scsi command that firmware can handle*/ 48/*The limit of outstanding scsi command that firmware can handle*/
49#define ARCMSR_MAX_OUTSTANDING_CMD 256 49#define ARCMSR_MAX_OUTSTANDING_CMD 256
50#define ARCMSR_MAX_FREECCB_NUM 320 50#define ARCMSR_MAX_FREECCB_NUM 320
51#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2008/02/27" 51#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2008/11/03"
52#define ARCMSR_SCSI_INITIATOR_ID 255 52#define ARCMSR_SCSI_INITIATOR_ID 255
53#define ARCMSR_MAX_XFER_SECTORS 512 53#define ARCMSR_MAX_XFER_SECTORS 512
54#define ARCMSR_MAX_XFER_SECTORS_B 4096 54#define ARCMSR_MAX_XFER_SECTORS_B 4096
@@ -110,6 +110,8 @@ struct CMD_MESSAGE_FIELD
110#define FUNCTION_SAY_HELLO 0x0807 110#define FUNCTION_SAY_HELLO 0x0807
111#define FUNCTION_SAY_GOODBYE 0x0808 111#define FUNCTION_SAY_GOODBYE 0x0808
112#define FUNCTION_FLUSH_ADAPTER_CACHE 0x0809 112#define FUNCTION_FLUSH_ADAPTER_CACHE 0x0809
113#define FUNCTION_GET_FIRMWARE_STATUS 0x080A
114#define FUNCTION_HARDWARE_RESET 0x080B
113/* ARECA IO CONTROL CODE*/ 115/* ARECA IO CONTROL CODE*/
114#define ARCMSR_MESSAGE_READ_RQBUFFER \ 116#define ARCMSR_MESSAGE_READ_RQBUFFER \
115 ARECA_SATA_RAID | FUNCTION_READ_RQBUFFER 117 ARECA_SATA_RAID | FUNCTION_READ_RQBUFFER
@@ -133,6 +135,7 @@ struct CMD_MESSAGE_FIELD
133#define ARCMSR_MESSAGE_RETURNCODE_OK 0x00000001 135#define ARCMSR_MESSAGE_RETURNCODE_OK 0x00000001
134#define ARCMSR_MESSAGE_RETURNCODE_ERROR 0x00000006 136#define ARCMSR_MESSAGE_RETURNCODE_ERROR 0x00000006
135#define ARCMSR_MESSAGE_RETURNCODE_3F 0x0000003F 137#define ARCMSR_MESSAGE_RETURNCODE_3F 0x0000003F
138#define ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON 0x00000088
136/* 139/*
137************************************************************* 140*************************************************************
138** structure for holding DMA address data 141** structure for holding DMA address data
@@ -341,13 +344,13 @@ struct MessageUnit_B
341 uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE]; 344 uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE];
342 uint32_t postq_index; 345 uint32_t postq_index;
343 uint32_t doneq_index; 346 uint32_t doneq_index;
344 void __iomem *drv2iop_doorbell_reg; 347 uint32_t __iomem *drv2iop_doorbell_reg;
345 void __iomem *drv2iop_doorbell_mask_reg; 348 uint32_t __iomem *drv2iop_doorbell_mask_reg;
346 void __iomem *iop2drv_doorbell_reg; 349 uint32_t __iomem *iop2drv_doorbell_reg;
347 void __iomem *iop2drv_doorbell_mask_reg; 350 uint32_t __iomem *iop2drv_doorbell_mask_reg;
348 void __iomem *msgcode_rwbuffer_reg; 351 uint32_t __iomem *msgcode_rwbuffer_reg;
349 void __iomem *ioctl_wbuffer_reg; 352 uint32_t __iomem *ioctl_wbuffer_reg;
350 void __iomem *ioctl_rbuffer_reg; 353 uint32_t __iomem *ioctl_rbuffer_reg;
351}; 354};
352 355
353/* 356/*
@@ -375,6 +378,7 @@ struct AdapterControlBlock
375 /* message unit ATU inbound base address0 */ 378 /* message unit ATU inbound base address0 */
376 379
377 uint32_t acb_flags; 380 uint32_t acb_flags;
381 uint8_t adapter_index;
378 #define ACB_F_SCSISTOPADAPTER 0x0001 382 #define ACB_F_SCSISTOPADAPTER 0x0001
379 #define ACB_F_MSG_STOP_BGRB 0x0002 383 #define ACB_F_MSG_STOP_BGRB 0x0002
380 /* stop RAID background rebuild */ 384 /* stop RAID background rebuild */
@@ -390,7 +394,7 @@ struct AdapterControlBlock
390 #define ACB_F_BUS_RESET 0x0080 394 #define ACB_F_BUS_RESET 0x0080
391 #define ACB_F_IOP_INITED 0x0100 395 #define ACB_F_IOP_INITED 0x0100
392 /* iop init */ 396 /* iop init */
393 397 #define ACB_F_FIRMWARE_TRAP 0x0400
394 struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM]; 398 struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
395 /* used for memory free */ 399 /* used for memory free */
396 struct list_head ccb_free_list; 400 struct list_head ccb_free_list;
@@ -423,12 +427,19 @@ struct AdapterControlBlock
423#define ARECA_RAID_GOOD 0xaa 427#define ARECA_RAID_GOOD 0xaa
424 uint32_t num_resets; 428 uint32_t num_resets;
425 uint32_t num_aborts; 429 uint32_t num_aborts;
430 uint32_t signature;
426 uint32_t firm_request_len; 431 uint32_t firm_request_len;
427 uint32_t firm_numbers_queue; 432 uint32_t firm_numbers_queue;
428 uint32_t firm_sdram_size; 433 uint32_t firm_sdram_size;
429 uint32_t firm_hd_channels; 434 uint32_t firm_hd_channels;
430 char firm_model[12]; 435 char firm_model[12];
431 char firm_version[20]; 436 char firm_version[20];
437 char device_map[20]; /*21,84-99*/
438 struct work_struct arcmsr_do_message_isr_bh;
439 struct timer_list eternal_timer;
440 unsigned short fw_state;
441 atomic_t rq_map_token;
442 int ante_token_value;
432};/* HW_DEVICE_EXTENSION */ 443};/* HW_DEVICE_EXTENSION */
433/* 444/*
434******************************************************************************* 445*******************************************************************************
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index a4e04c50c436..07fdfe57e38e 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -192,6 +192,7 @@ static struct bin_attribute arcmsr_sysfs_message_read_attr = {
192 .attr = { 192 .attr = {
193 .name = "mu_read", 193 .name = "mu_read",
194 .mode = S_IRUSR , 194 .mode = S_IRUSR ,
195 .owner = THIS_MODULE,
195 }, 196 },
196 .size = 1032, 197 .size = 1032,
197 .read = arcmsr_sysfs_iop_message_read, 198 .read = arcmsr_sysfs_iop_message_read,
@@ -201,6 +202,7 @@ static struct bin_attribute arcmsr_sysfs_message_write_attr = {
201 .attr = { 202 .attr = {
202 .name = "mu_write", 203 .name = "mu_write",
203 .mode = S_IWUSR, 204 .mode = S_IWUSR,
205 .owner = THIS_MODULE,
204 }, 206 },
205 .size = 1032, 207 .size = 1032,
206 .write = arcmsr_sysfs_iop_message_write, 208 .write = arcmsr_sysfs_iop_message_write,
@@ -210,6 +212,7 @@ static struct bin_attribute arcmsr_sysfs_message_clear_attr = {
210 .attr = { 212 .attr = {
211 .name = "mu_clear", 213 .name = "mu_clear",
212 .mode = S_IWUSR, 214 .mode = S_IWUSR,
215 .owner = THIS_MODULE,
213 }, 216 },
214 .size = 1, 217 .size = 1,
215 .write = arcmsr_sysfs_iop_message_clear, 218 .write = arcmsr_sysfs_iop_message_clear,
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index ffbe2192da3c..ffa54792bb33 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -72,8 +72,16 @@
72#include <scsi/scsicam.h> 72#include <scsi/scsicam.h>
73#include "arcmsr.h" 73#include "arcmsr.h"
74 74
75#ifdef CONFIG_SCSI_ARCMSR_RESET
76 static int sleeptime = 20;
77 static int retrycount = 12;
78 module_param(sleeptime, int, S_IRUGO|S_IWUSR);
79 MODULE_PARM_DESC(sleeptime, "The waiting period for FW ready while bus reset");
80 module_param(retrycount, int, S_IRUGO|S_IWUSR);
81 MODULE_PARM_DESC(retrycount, "The retry count for FW ready while bus reset");
82#endif
75MODULE_AUTHOR("Erich Chen <support@areca.com.tw>"); 83MODULE_AUTHOR("Erich Chen <support@areca.com.tw>");
76MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID HOST Adapter"); 84MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID Host Bus Adapter");
77MODULE_LICENSE("Dual BSD/GPL"); 85MODULE_LICENSE("Dual BSD/GPL");
78MODULE_VERSION(ARCMSR_DRIVER_VERSION); 86MODULE_VERSION(ARCMSR_DRIVER_VERSION);
79 87
@@ -96,6 +104,13 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
96static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); 104static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
97static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb); 105static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb);
98static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb); 106static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb);
107static void arcmsr_request_device_map(unsigned long pacb);
108static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb);
109static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb);
110static void arcmsr_message_isr_bh_fn(struct work_struct *work);
111static void *arcmsr_get_firmware_spec(struct AdapterControlBlock *acb, int mode);
112static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
113
99static const char *arcmsr_info(struct Scsi_Host *); 114static const char *arcmsr_info(struct Scsi_Host *);
100static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); 115static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
101static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, 116static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
@@ -112,7 +127,7 @@ static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
112 127
113static struct scsi_host_template arcmsr_scsi_host_template = { 128static struct scsi_host_template arcmsr_scsi_host_template = {
114 .module = THIS_MODULE, 129 .module = THIS_MODULE,
115 .name = "ARCMSR ARECA SATA/SAS RAID HOST Adapter" 130 .name = "ARCMSR ARECA SATA/SAS RAID Host Bus Adapter"
116 ARCMSR_DRIVER_VERSION, 131 ARCMSR_DRIVER_VERSION,
117 .info = arcmsr_info, 132 .info = arcmsr_info,
118 .queuecommand = arcmsr_queue_command, 133 .queuecommand = arcmsr_queue_command,
@@ -128,16 +143,6 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
128 .use_clustering = ENABLE_CLUSTERING, 143 .use_clustering = ENABLE_CLUSTERING,
129 .shost_attrs = arcmsr_host_attrs, 144 .shost_attrs = arcmsr_host_attrs,
130}; 145};
131#ifdef CONFIG_SCSI_ARCMSR_AER
132static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev);
133static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
134 pci_channel_state_t state);
135
136static struct pci_error_handlers arcmsr_pci_error_handlers = {
137 .error_detected = arcmsr_pci_error_detected,
138 .slot_reset = arcmsr_pci_slot_reset,
139};
140#endif
141static struct pci_device_id arcmsr_device_id_table[] = { 146static struct pci_device_id arcmsr_device_id_table[] = {
142 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)}, 147 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
143 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)}, 148 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)},
@@ -166,9 +171,6 @@ static struct pci_driver arcmsr_pci_driver = {
166 .probe = arcmsr_probe, 171 .probe = arcmsr_probe,
167 .remove = arcmsr_remove, 172 .remove = arcmsr_remove,
168 .shutdown = arcmsr_shutdown, 173 .shutdown = arcmsr_shutdown,
169 #ifdef CONFIG_SCSI_ARCMSR_AER
170 .err_handler = &arcmsr_pci_error_handlers,
171 #endif
172}; 174};
173 175
174static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id) 176static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
@@ -236,10 +238,9 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
236 void *dma_coherent; 238 void *dma_coherent;
237 dma_addr_t dma_coherent_handle, dma_addr; 239 dma_addr_t dma_coherent_handle, dma_addr;
238 struct CommandControlBlock *ccb_tmp; 240 struct CommandControlBlock *ccb_tmp;
239 uint32_t intmask_org;
240 int i, j; 241 int i, j;
241 242
242 acb->pmuA = pci_ioremap_bar(pdev, 0); 243 acb->pmuA = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
243 if (!acb->pmuA) { 244 if (!acb->pmuA) {
244 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", 245 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n",
245 acb->host->host_no); 246 acb->host->host_no);
@@ -281,12 +282,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
281 for (i = 0; i < ARCMSR_MAX_TARGETID; i++) 282 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
282 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) 283 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
283 acb->devstate[i][j] = ARECA_RAID_GONE; 284 acb->devstate[i][j] = ARECA_RAID_GONE;
284
285 /*
286 ** here we need to tell iop 331 our ccb_tmp.HighPart
287 ** if ccb_tmp.HighPart is not zero
288 */
289 intmask_org = arcmsr_disable_outbound_ints(acb);
290 } 285 }
291 break; 286 break;
292 287
@@ -297,7 +292,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
297 void __iomem *mem_base0, *mem_base1; 292 void __iomem *mem_base0, *mem_base1;
298 void *dma_coherent; 293 void *dma_coherent;
299 dma_addr_t dma_coherent_handle, dma_addr; 294 dma_addr_t dma_coherent_handle, dma_addr;
300 uint32_t intmask_org;
301 struct CommandControlBlock *ccb_tmp; 295 struct CommandControlBlock *ccb_tmp;
302 int i, j; 296 int i, j;
303 297
@@ -333,11 +327,13 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
333 reg = (struct MessageUnit_B *)(dma_coherent + 327 reg = (struct MessageUnit_B *)(dma_coherent +
334 ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock)); 328 ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
335 acb->pmuB = reg; 329 acb->pmuB = reg;
336 mem_base0 = pci_ioremap_bar(pdev, 0); 330 mem_base0 = ioremap(pci_resource_start(pdev, 0),
331 pci_resource_len(pdev, 0));
337 if (!mem_base0) 332 if (!mem_base0)
338 goto out; 333 goto out;
339 334
340 mem_base1 = pci_ioremap_bar(pdev, 2); 335 mem_base1 = ioremap(pci_resource_start(pdev, 2),
336 pci_resource_len(pdev, 2));
341 if (!mem_base1) { 337 if (!mem_base1) {
342 iounmap(mem_base0); 338 iounmap(mem_base0);
343 goto out; 339 goto out;
@@ -357,12 +353,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
357 for (i = 0; i < ARCMSR_MAX_TARGETID; i++) 353 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
358 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) 354 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
359 acb->devstate[i][j] = ARECA_RAID_GOOD; 355 acb->devstate[i][j] = ARECA_RAID_GOOD;
360
361 /*
362 ** here we need to tell iop 331 our ccb_tmp.HighPart
363 ** if ccb_tmp.HighPart is not zero
364 */
365 intmask_org = arcmsr_disable_outbound_ints(acb);
366 } 356 }
367 break; 357 break;
368 } 358 }
@@ -374,6 +364,88 @@ out:
374 sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle); 364 sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
375 return -ENOMEM; 365 return -ENOMEM;
376} 366}
367static void arcmsr_message_isr_bh_fn(struct work_struct *work)
368{
369 struct AdapterControlBlock *acb = container_of(work, struct AdapterControlBlock, arcmsr_do_message_isr_bh);
370
371 switch (acb->adapter_type) {
372 case ACB_ADAPTER_TYPE_A: {
373
374 struct MessageUnit_A __iomem *reg = acb->pmuA;
375 char *acb_dev_map = (char *)acb->device_map;
376 uint32_t __iomem *signature = (uint32_t __iomem *) (&reg->message_rwbuffer[0]);
377 char __iomem *devicemap = (char __iomem *) (&reg->message_rwbuffer[21]);
378 int target, lun;
379 struct scsi_device *psdev;
380 char diff;
381
382 atomic_inc(&acb->rq_map_token);
383 if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
384 for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
385 diff = (*acb_dev_map)^readb(devicemap);
386 if (diff != 0) {
387 char temp;
388 *acb_dev_map = readb(devicemap);
389 temp = *acb_dev_map;
390 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
391 if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
392 scsi_add_device(acb->host, 0, target, lun);
393 } else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) {
394 psdev = scsi_device_lookup(acb->host, 0, target, lun);
395 if (psdev != NULL) {
396 scsi_remove_device(psdev);
397 scsi_device_put(psdev);
398 }
399 }
400 temp >>= 1;
401 diff >>= 1;
402 }
403 }
404 devicemap++;
405 acb_dev_map++;
406 }
407 }
408 break;
409 }
410
411 case ACB_ADAPTER_TYPE_B: {
412 struct MessageUnit_B *reg = acb->pmuB;
413 char *acb_dev_map = (char *)acb->device_map;
414 uint32_t __iomem *signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer_reg[0]);
415 char __iomem *devicemap = (char __iomem *)(&reg->msgcode_rwbuffer_reg[21]);
416 int target, lun;
417 struct scsi_device *psdev;
418 char diff;
419
420 atomic_inc(&acb->rq_map_token);
421 if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
422 for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
423 diff = (*acb_dev_map)^readb(devicemap);
424 if (diff != 0) {
425 char temp;
426 *acb_dev_map = readb(devicemap);
427 temp = *acb_dev_map;
428 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
429 if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
430 scsi_add_device(acb->host, 0, target, lun);
431 } else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) {
432 psdev = scsi_device_lookup(acb->host, 0, target, lun);
433 if (psdev != NULL) {
434 scsi_remove_device(psdev);
435 scsi_device_put(psdev);
436 }
437 }
438 temp >>= 1;
439 diff >>= 1;
440 }
441 }
442 devicemap++;
443 acb_dev_map++;
444 }
445 }
446 }
447 }
448}
377 449
378static int arcmsr_probe(struct pci_dev *pdev, 450static int arcmsr_probe(struct pci_dev *pdev,
379 const struct pci_device_id *id) 451 const struct pci_device_id *id)
@@ -432,17 +504,17 @@ static int arcmsr_probe(struct pci_dev *pdev,
432 ACB_F_MESSAGE_WQBUFFER_READED); 504 ACB_F_MESSAGE_WQBUFFER_READED);
433 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; 505 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
434 INIT_LIST_HEAD(&acb->ccb_free_list); 506 INIT_LIST_HEAD(&acb->ccb_free_list);
435 507 INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
436 error = arcmsr_alloc_ccb_pool(acb); 508 error = arcmsr_alloc_ccb_pool(acb);
437 if (error) 509 if (error)
438 goto out_release_regions; 510 goto out_release_regions;
439 511
512 arcmsr_iop_init(acb);
440 error = request_irq(pdev->irq, arcmsr_do_interrupt, 513 error = request_irq(pdev->irq, arcmsr_do_interrupt,
441 IRQF_SHARED, "arcmsr", acb); 514 IRQF_SHARED, "arcmsr", acb);
442 if (error) 515 if (error)
443 goto out_free_ccb_pool; 516 goto out_free_ccb_pool;
444 517
445 arcmsr_iop_init(acb);
446 pci_set_drvdata(pdev, host); 518 pci_set_drvdata(pdev, host);
447 if (strncmp(acb->firm_version, "V1.42", 5) >= 0) 519 if (strncmp(acb->firm_version, "V1.42", 5) >= 0)
448 host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B; 520 host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B;
@@ -459,6 +531,14 @@ static int arcmsr_probe(struct pci_dev *pdev,
459 #ifdef CONFIG_SCSI_ARCMSR_AER 531 #ifdef CONFIG_SCSI_ARCMSR_AER
460 pci_enable_pcie_error_reporting(pdev); 532 pci_enable_pcie_error_reporting(pdev);
461 #endif 533 #endif
534 atomic_set(&acb->rq_map_token, 16);
535 acb->fw_state = true;
536 init_timer(&acb->eternal_timer);
537 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(10*HZ);
538 acb->eternal_timer.data = (unsigned long) acb;
539 acb->eternal_timer.function = &arcmsr_request_device_map;
540 add_timer(&acb->eternal_timer);
541
462 return 0; 542 return 0;
463 out_free_sysfs: 543 out_free_sysfs:
464 out_free_irq: 544 out_free_irq:
@@ -518,40 +598,48 @@ static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
518 return 0xff; 598 return 0xff;
519} 599}
520 600
521static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) 601static uint8_t arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
522{ 602{
523 struct MessageUnit_A __iomem *reg = acb->pmuA; 603 struct MessageUnit_A __iomem *reg = acb->pmuA;
524 604
525 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0); 605 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
526 if (arcmsr_hba_wait_msgint_ready(acb)) 606 if (arcmsr_hba_wait_msgint_ready(acb)) {
527 printk(KERN_NOTICE 607 printk(KERN_NOTICE
528 "arcmsr%d: wait 'abort all outstanding command' timeout \n" 608 "arcmsr%d: wait 'abort all outstanding command' timeout \n"
529 , acb->host->host_no); 609 , acb->host->host_no);
610 return 0xff;
611 }
612 return 0x00;
530} 613}
531 614
532static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) 615static uint8_t arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
533{ 616{
534 struct MessageUnit_B *reg = acb->pmuB; 617 struct MessageUnit_B *reg = acb->pmuB;
535 618
536 writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg); 619 writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg);
537 if (arcmsr_hbb_wait_msgint_ready(acb)) 620 if (arcmsr_hbb_wait_msgint_ready(acb)) {
538 printk(KERN_NOTICE 621 printk(KERN_NOTICE
539 "arcmsr%d: wait 'abort all outstanding command' timeout \n" 622 "arcmsr%d: wait 'abort all outstanding command' timeout \n"
540 , acb->host->host_no); 623 , acb->host->host_no);
624 return 0xff;
625 }
626 return 0x00;
541} 627}
542 628
543static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb) 629static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
544{ 630{
631 uint8_t rtnval = 0;
545 switch (acb->adapter_type) { 632 switch (acb->adapter_type) {
546 case ACB_ADAPTER_TYPE_A: { 633 case ACB_ADAPTER_TYPE_A: {
547 arcmsr_abort_hba_allcmd(acb); 634 rtnval = arcmsr_abort_hba_allcmd(acb);
548 } 635 }
549 break; 636 break;
550 637
551 case ACB_ADAPTER_TYPE_B: { 638 case ACB_ADAPTER_TYPE_B: {
552 arcmsr_abort_hbb_allcmd(acb); 639 rtnval = arcmsr_abort_hbb_allcmd(acb);
553 } 640 }
554 } 641 }
642 return rtnval;
555} 643}
556 644
557static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb) 645static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
@@ -649,8 +737,7 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
649 737
650 case ACB_ADAPTER_TYPE_A : { 738 case ACB_ADAPTER_TYPE_A : {
651 struct MessageUnit_A __iomem *reg = acb->pmuA; 739 struct MessageUnit_A __iomem *reg = acb->pmuA;
652 orig_mask = readl(&reg->outbound_intmask)|\ 740 orig_mask = readl(&reg->outbound_intmask);
653 ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE;
654 writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \ 741 writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
655 &reg->outbound_intmask); 742 &reg->outbound_intmask);
656 } 743 }
@@ -658,8 +745,7 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
658 745
659 case ACB_ADAPTER_TYPE_B : { 746 case ACB_ADAPTER_TYPE_B : {
660 struct MessageUnit_B *reg = acb->pmuB; 747 struct MessageUnit_B *reg = acb->pmuB;
661 orig_mask = readl(reg->iop2drv_doorbell_mask_reg) & \ 748 orig_mask = readl(reg->iop2drv_doorbell_mask_reg);
662 (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
663 writel(0, reg->iop2drv_doorbell_mask_reg); 749 writel(0, reg->iop2drv_doorbell_mask_reg);
664 } 750 }
665 break; 751 break;
@@ -795,12 +881,13 @@ static void arcmsr_remove(struct pci_dev *pdev)
795 struct AdapterControlBlock *acb = 881 struct AdapterControlBlock *acb =
796 (struct AdapterControlBlock *) host->hostdata; 882 (struct AdapterControlBlock *) host->hostdata;
797 int poll_count = 0; 883 int poll_count = 0;
798
799 arcmsr_free_sysfs_attr(acb); 884 arcmsr_free_sysfs_attr(acb);
800 scsi_remove_host(host); 885 scsi_remove_host(host);
886 flush_scheduled_work();
887 del_timer_sync(&acb->eternal_timer);
888 arcmsr_disable_outbound_ints(acb);
801 arcmsr_stop_adapter_bgrb(acb); 889 arcmsr_stop_adapter_bgrb(acb);
802 arcmsr_flush_adapter_cache(acb); 890 arcmsr_flush_adapter_cache(acb);
803 arcmsr_disable_outbound_ints(acb);
804 acb->acb_flags |= ACB_F_SCSISTOPADAPTER; 891 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
805 acb->acb_flags &= ~ACB_F_IOP_INITED; 892 acb->acb_flags &= ~ACB_F_IOP_INITED;
806 893
@@ -841,7 +928,9 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
841 struct Scsi_Host *host = pci_get_drvdata(pdev); 928 struct Scsi_Host *host = pci_get_drvdata(pdev);
842 struct AdapterControlBlock *acb = 929 struct AdapterControlBlock *acb =
843 (struct AdapterControlBlock *)host->hostdata; 930 (struct AdapterControlBlock *)host->hostdata;
844 931 del_timer_sync(&acb->eternal_timer);
932 arcmsr_disable_outbound_ints(acb);
933 flush_scheduled_work();
845 arcmsr_stop_adapter_bgrb(acb); 934 arcmsr_stop_adapter_bgrb(acb);
846 arcmsr_flush_adapter_cache(acb); 935 arcmsr_flush_adapter_cache(acb);
847} 936}
@@ -861,7 +950,7 @@ static void arcmsr_module_exit(void)
861module_init(arcmsr_module_init); 950module_init(arcmsr_module_init);
862module_exit(arcmsr_module_exit); 951module_exit(arcmsr_module_exit);
863 952
864static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \ 953static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
865 u32 intmask_org) 954 u32 intmask_org)
866{ 955{
867 u32 mask; 956 u32 mask;
@@ -871,7 +960,8 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \
871 case ACB_ADAPTER_TYPE_A : { 960 case ACB_ADAPTER_TYPE_A : {
872 struct MessageUnit_A __iomem *reg = acb->pmuA; 961 struct MessageUnit_A __iomem *reg = acb->pmuA;
873 mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE | 962 mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
874 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE); 963 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|
964 ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
875 writel(mask, &reg->outbound_intmask); 965 writel(mask, &reg->outbound_intmask);
876 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; 966 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
877 } 967 }
@@ -879,8 +969,10 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \
879 969
880 case ACB_ADAPTER_TYPE_B : { 970 case ACB_ADAPTER_TYPE_B : {
881 struct MessageUnit_B *reg = acb->pmuB; 971 struct MessageUnit_B *reg = acb->pmuB;
882 mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | \ 972 mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK |
883 ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE); 973 ARCMSR_IOP2DRV_DATA_READ_OK |
974 ARCMSR_IOP2DRV_CDB_DONE |
975 ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
884 writel(mask, reg->iop2drv_doorbell_mask_reg); 976 writel(mask, reg->iop2drv_doorbell_mask_reg);
885 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; 977 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
886 } 978 }
@@ -1048,8 +1140,8 @@ static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
1048 } 1140 }
1049 case ACB_ADAPTER_TYPE_B: { 1141 case ACB_ADAPTER_TYPE_B: {
1050 struct MessageUnit_B *reg = acb->pmuB; 1142 struct MessageUnit_B *reg = acb->pmuB;
1051 iounmap(reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL); 1143 iounmap((u8 *)reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL);
1052 iounmap(reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER); 1144 iounmap((u8 *)reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER);
1053 dma_free_coherent(&acb->pdev->dev, 1145 dma_free_coherent(&acb->pdev->dev,
1054 (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 + 1146 (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
1055 sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle); 1147 sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
@@ -1249,13 +1341,36 @@ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
1249 reg->doneq_index = index; 1341 reg->doneq_index = index;
1250 } 1342 }
1251} 1343}
1344/*
1345**********************************************************************************
1346** Handle a message interrupt
1347**
1348** The only message interrupt we expect is in response to a query for the current adapter config.
1349** We want this in order to compare the drivemap so that we can detect newly-attached drives.
1350**********************************************************************************
1351*/
1352static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb)
1353{
1354 struct MessageUnit_A *reg = acb->pmuA;
1355
1356 /*clear interrupt and message state*/
1357 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus);
1358 schedule_work(&acb->arcmsr_do_message_isr_bh);
1359}
1360static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb)
1361{
1362 struct MessageUnit_B *reg = acb->pmuB;
1252 1363
1364 /*clear interrupt and message state*/
1365 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg);
1366 schedule_work(&acb->arcmsr_do_message_isr_bh);
1367}
1253static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) 1368static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb)
1254{ 1369{
1255 uint32_t outbound_intstatus; 1370 uint32_t outbound_intstatus;
1256 struct MessageUnit_A __iomem *reg = acb->pmuA; 1371 struct MessageUnit_A __iomem *reg = acb->pmuA;
1257 1372
1258 outbound_intstatus = readl(&reg->outbound_intstatus) & \ 1373 outbound_intstatus = readl(&reg->outbound_intstatus) &
1259 acb->outbound_int_enable; 1374 acb->outbound_int_enable;
1260 if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) { 1375 if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) {
1261 return 1; 1376 return 1;
@@ -1267,6 +1382,10 @@ static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb)
1267 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { 1382 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
1268 arcmsr_hba_postqueue_isr(acb); 1383 arcmsr_hba_postqueue_isr(acb);
1269 } 1384 }
1385 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
1386 /* messenger of "driver to iop commands" */
1387 arcmsr_hba_message_isr(acb);
1388 }
1270 return 0; 1389 return 0;
1271} 1390}
1272 1391
@@ -1275,13 +1394,14 @@ static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
1275 uint32_t outbound_doorbell; 1394 uint32_t outbound_doorbell;
1276 struct MessageUnit_B *reg = acb->pmuB; 1395 struct MessageUnit_B *reg = acb->pmuB;
1277 1396
1278 outbound_doorbell = readl(reg->iop2drv_doorbell_reg) & \ 1397 outbound_doorbell = readl(reg->iop2drv_doorbell_reg) &
1279 acb->outbound_int_enable; 1398 acb->outbound_int_enable;
1280 if (!outbound_doorbell) 1399 if (!outbound_doorbell)
1281 return 1; 1400 return 1;
1282 1401
1283 writel(~outbound_doorbell, reg->iop2drv_doorbell_reg); 1402 writel(~outbound_doorbell, reg->iop2drv_doorbell_reg);
1284 /*in case the last action of doorbell interrupt clearance is cached, this action can push HW to write down the clear bit*/ 1403 /*in case the last action of doorbell interrupt clearance is cached,
1404 this action can push HW to write down the clear bit*/
1285 readl(reg->iop2drv_doorbell_reg); 1405 readl(reg->iop2drv_doorbell_reg);
1286 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg); 1406 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
1287 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { 1407 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
@@ -1293,6 +1413,10 @@ static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
1293 if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) { 1413 if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
1294 arcmsr_hbb_postqueue_isr(acb); 1414 arcmsr_hbb_postqueue_isr(acb);
1295 } 1415 }
1416 if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
1417 /* messenger of "driver to iop commands" */
1418 arcmsr_hbb_message_isr(acb);
1419 }
1296 1420
1297 return 0; 1421 return 0;
1298} 1422}
@@ -1360,7 +1484,7 @@ void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
1360 } 1484 }
1361} 1485}
1362 1486
1363static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ 1487static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
1364 struct scsi_cmnd *cmd) 1488 struct scsi_cmnd *cmd)
1365{ 1489{
1366 struct CMD_MESSAGE_FIELD *pcmdmessagefld; 1490 struct CMD_MESSAGE_FIELD *pcmdmessagefld;
@@ -1398,6 +1522,13 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1398 retvalue = ARCMSR_MESSAGE_FAIL; 1522 retvalue = ARCMSR_MESSAGE_FAIL;
1399 goto message_out; 1523 goto message_out;
1400 } 1524 }
1525
1526 if (!acb->fw_state) {
1527 pcmdmessagefld->cmdmessage.ReturnCode =
1528 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1529 goto message_out;
1530 }
1531
1401 ptmpQbuffer = ver_addr; 1532 ptmpQbuffer = ver_addr;
1402 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) 1533 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
1403 && (allxfer_len < 1031)) { 1534 && (allxfer_len < 1031)) {
@@ -1444,6 +1575,12 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1444 retvalue = ARCMSR_MESSAGE_FAIL; 1575 retvalue = ARCMSR_MESSAGE_FAIL;
1445 goto message_out; 1576 goto message_out;
1446 } 1577 }
1578 if (!acb->fw_state) {
1579 pcmdmessagefld->cmdmessage.ReturnCode =
1580 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1581 goto message_out;
1582 }
1583
1447 ptmpuserbuffer = ver_addr; 1584 ptmpuserbuffer = ver_addr;
1448 user_len = pcmdmessagefld->cmdmessage.Length; 1585 user_len = pcmdmessagefld->cmdmessage.Length;
1449 memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); 1586 memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
@@ -1496,6 +1633,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1496 1633
1497 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { 1634 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
1498 uint8_t *pQbuffer = acb->rqbuffer; 1635 uint8_t *pQbuffer = acb->rqbuffer;
1636 if (!acb->fw_state) {
1637 pcmdmessagefld->cmdmessage.ReturnCode =
1638 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1639 goto message_out;
1640 }
1499 1641
1500 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1642 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1501 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1643 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
@@ -1511,6 +1653,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1511 1653
1512 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { 1654 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
1513 uint8_t *pQbuffer = acb->wqbuffer; 1655 uint8_t *pQbuffer = acb->wqbuffer;
1656 if (!acb->fw_state) {
1657 pcmdmessagefld->cmdmessage.ReturnCode =
1658 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1659 goto message_out;
1660 }
1514 1661
1515 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1662 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1516 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1663 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
@@ -1529,6 +1676,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1529 1676
1530 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { 1677 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
1531 uint8_t *pQbuffer; 1678 uint8_t *pQbuffer;
1679 if (!acb->fw_state) {
1680 pcmdmessagefld->cmdmessage.ReturnCode =
1681 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1682 goto message_out;
1683 }
1532 1684
1533 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1685 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1534 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1686 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
@@ -1551,13 +1703,22 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1551 break; 1703 break;
1552 1704
1553 case ARCMSR_MESSAGE_RETURN_CODE_3F: { 1705 case ARCMSR_MESSAGE_RETURN_CODE_3F: {
1706 if (!acb->fw_state) {
1707 pcmdmessagefld->cmdmessage.ReturnCode =
1708 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1709 goto message_out;
1710 }
1554 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; 1711 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
1555 } 1712 }
1556 break; 1713 break;
1557 1714
1558 case ARCMSR_MESSAGE_SAY_HELLO: { 1715 case ARCMSR_MESSAGE_SAY_HELLO: {
1559 int8_t *hello_string = "Hello! I am ARCMSR"; 1716 int8_t *hello_string = "Hello! I am ARCMSR";
1560 1717 if (!acb->fw_state) {
1718 pcmdmessagefld->cmdmessage.ReturnCode =
1719 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1720 goto message_out;
1721 }
1561 memcpy(pcmdmessagefld->messagedatabuffer, hello_string 1722 memcpy(pcmdmessagefld->messagedatabuffer, hello_string
1562 , (int16_t)strlen(hello_string)); 1723 , (int16_t)strlen(hello_string));
1563 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1724 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
@@ -1565,10 +1726,20 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1565 break; 1726 break;
1566 1727
1567 case ARCMSR_MESSAGE_SAY_GOODBYE: 1728 case ARCMSR_MESSAGE_SAY_GOODBYE:
1729 if (!acb->fw_state) {
1730 pcmdmessagefld->cmdmessage.ReturnCode =
1731 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1732 goto message_out;
1733 }
1568 arcmsr_iop_parking(acb); 1734 arcmsr_iop_parking(acb);
1569 break; 1735 break;
1570 1736
1571 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: 1737 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
1738 if (!acb->fw_state) {
1739 pcmdmessagefld->cmdmessage.ReturnCode =
1740 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1741 goto message_out;
1742 }
1572 arcmsr_flush_adapter_cache(acb); 1743 arcmsr_flush_adapter_cache(acb);
1573 break; 1744 break;
1574 1745
@@ -1651,16 +1822,57 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd,
1651 struct CommandControlBlock *ccb; 1822 struct CommandControlBlock *ccb;
1652 int target = cmd->device->id; 1823 int target = cmd->device->id;
1653 int lun = cmd->device->lun; 1824 int lun = cmd->device->lun;
1654 1825 uint8_t scsicmd = cmd->cmnd[0];
1655 cmd->scsi_done = done; 1826 cmd->scsi_done = done;
1656 cmd->host_scribble = NULL; 1827 cmd->host_scribble = NULL;
1657 cmd->result = 0; 1828 cmd->result = 0;
1829
1830 if ((scsicmd == SYNCHRONIZE_CACHE) || (scsicmd == SEND_DIAGNOSTIC)) {
1831 if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
1832 cmd->result = (DID_NO_CONNECT << 16);
1833 }
1834 cmd->scsi_done(cmd);
1835 return 0;
1836 }
1837
1658 if (acb->acb_flags & ACB_F_BUS_RESET) { 1838 if (acb->acb_flags & ACB_F_BUS_RESET) {
1659 printk(KERN_NOTICE "arcmsr%d: bus reset" 1839 switch (acb->adapter_type) {
1660 " and return busy \n" 1840 case ACB_ADAPTER_TYPE_A: {
1661 , acb->host->host_no); 1841 struct MessageUnit_A __iomem *reg = acb->pmuA;
1842 uint32_t intmask_org, outbound_doorbell;
1843
1844 if ((readl(&reg->outbound_msgaddr1) &
1845 ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
1846 printk(KERN_NOTICE "arcmsr%d: bus reset and return busy\n",
1847 acb->host->host_no);
1662 return SCSI_MLQUEUE_HOST_BUSY; 1848 return SCSI_MLQUEUE_HOST_BUSY;
1663 } 1849 }
1850
1851 acb->acb_flags &= ~ACB_F_FIRMWARE_TRAP;
1852 printk(KERN_NOTICE "arcmsr%d: hardware bus reset and reset ok\n",
1853 acb->host->host_no);
1854 /* disable all outbound interrupt */
1855 intmask_org = arcmsr_disable_outbound_ints(acb);
1856 arcmsr_get_firmware_spec(acb, 1);
1857 /*start background rebuild*/
1858 arcmsr_start_adapter_bgrb(acb);
1859 /* clear Qbuffer if door bell ringed */
1860 outbound_doorbell = readl(&reg->outbound_doorbell);
1861 /*clear interrupt */
1862 writel(outbound_doorbell, &reg->outbound_doorbell);
1863 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
1864 &reg->inbound_doorbell);
1865 /* enable outbound Post Queue,outbound doorbell Interrupt */
1866 arcmsr_enable_outbound_ints(acb, intmask_org);
1867 acb->acb_flags |= ACB_F_IOP_INITED;
1868 acb->acb_flags &= ~ACB_F_BUS_RESET;
1869 }
1870 break;
1871 case ACB_ADAPTER_TYPE_B: {
1872 }
1873 }
1874 }
1875
1664 if (target == 16) { 1876 if (target == 16) {
1665 /* virtual device for iop message transfer */ 1877 /* virtual device for iop message transfer */
1666 arcmsr_handle_virtual_command(acb, cmd); 1878 arcmsr_handle_virtual_command(acb, cmd);
@@ -1699,21 +1911,25 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd,
1699 return 0; 1911 return 0;
1700} 1912}
1701 1913
1702static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) 1914static void *arcmsr_get_hba_config(struct AdapterControlBlock *acb, int mode)
1703{ 1915{
1704 struct MessageUnit_A __iomem *reg = acb->pmuA; 1916 struct MessageUnit_A __iomem *reg = acb->pmuA;
1705 char *acb_firm_model = acb->firm_model; 1917 char *acb_firm_model = acb->firm_model;
1706 char *acb_firm_version = acb->firm_version; 1918 char *acb_firm_version = acb->firm_version;
1919 char *acb_device_map = acb->device_map;
1707 char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]); 1920 char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);
1708 char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]); 1921 char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);
1922 char __iomem *iop_device_map = (char __iomem *) (&reg->message_rwbuffer[21]);
1709 int count; 1923 int count;
1710 1924
1711 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); 1925 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
1712 if (arcmsr_hba_wait_msgint_ready(acb)) { 1926 if (arcmsr_hba_wait_msgint_ready(acb)) {
1713 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ 1927 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
1714 miscellaneous data' timeout \n", acb->host->host_no); 1928 miscellaneous data' timeout \n", acb->host->host_no);
1929 return NULL;
1715 } 1930 }
1716 1931
1932 if (mode == 1) {
1717 count = 8; 1933 count = 8;
1718 while (count) { 1934 while (count) {
1719 *acb_firm_model = readb(iop_firm_model); 1935 *acb_firm_model = readb(iop_firm_model);
@@ -1730,34 +1946,48 @@ static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
1730 count--; 1946 count--;
1731 } 1947 }
1732 1948
1949 count = 16;
1950 while (count) {
1951 *acb_device_map = readb(iop_device_map);
1952 acb_device_map++;
1953 iop_device_map++;
1954 count--;
1955 }
1956
1733 printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n" 1957 printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n"
1734 , acb->host->host_no 1958 , acb->host->host_no
1735 , acb->firm_version); 1959 , acb->firm_version);
1736 1960 acb->signature = readl(&reg->message_rwbuffer[0]);
1737 acb->firm_request_len = readl(&reg->message_rwbuffer[1]); 1961 acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
1738 acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]); 1962 acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
1739 acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]); 1963 acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
1740 acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]); 1964 acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
1741} 1965}
1742 1966 return reg->message_rwbuffer;
1743static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) 1967}
1968static void __iomem *arcmsr_get_hbb_config(struct AdapterControlBlock *acb, int mode)
1744{ 1969{
1745 struct MessageUnit_B *reg = acb->pmuB; 1970 struct MessageUnit_B *reg = acb->pmuB;
1746 uint32_t __iomem *lrwbuffer = reg->msgcode_rwbuffer_reg; 1971 uint32_t __iomem *lrwbuffer = reg->msgcode_rwbuffer_reg;
1747 char *acb_firm_model = acb->firm_model; 1972 char *acb_firm_model = acb->firm_model;
1748 char *acb_firm_version = acb->firm_version; 1973 char *acb_firm_version = acb->firm_version;
1974 char *acb_device_map = acb->device_map;
1749 char __iomem *iop_firm_model = (char __iomem *)(&lrwbuffer[15]); 1975 char __iomem *iop_firm_model = (char __iomem *)(&lrwbuffer[15]);
1750 /*firm_model,15,60-67*/ 1976 /*firm_model,15,60-67*/
1751 char __iomem *iop_firm_version = (char __iomem *)(&lrwbuffer[17]); 1977 char __iomem *iop_firm_version = (char __iomem *)(&lrwbuffer[17]);
1752 /*firm_version,17,68-83*/ 1978 /*firm_version,17,68-83*/
1979 char __iomem *iop_device_map = (char __iomem *) (&lrwbuffer[21]);
1980 /*firm_version,21,84-99*/
1753 int count; 1981 int count;
1754 1982
1755 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg); 1983 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg);
1756 if (arcmsr_hbb_wait_msgint_ready(acb)) { 1984 if (arcmsr_hbb_wait_msgint_ready(acb)) {
1757 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ 1985 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
1758 miscellaneous data' timeout \n", acb->host->host_no); 1986 miscellaneous data' timeout \n", acb->host->host_no);
1987 return NULL;
1759 } 1988 }
1760 1989
1990 if (mode == 1) {
1761 count = 8; 1991 count = 8;
1762 while (count) 1992 while (count)
1763 { 1993 {
@@ -1776,11 +2006,20 @@ static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
1776 count--; 2006 count--;
1777 } 2007 }
1778 2008
2009 count = 16;
2010 while (count) {
2011 *acb_device_map = readb(iop_device_map);
2012 acb_device_map++;
2013 iop_device_map++;
2014 count--;
2015 }
2016
1779 printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", 2017 printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n",
1780 acb->host->host_no, 2018 acb->host->host_no,
1781 acb->firm_version); 2019 acb->firm_version);
1782 2020
1783 lrwbuffer++; 2021 acb->signature = readl(lrwbuffer++);
2022 /*firm_signature,1,00-03*/
1784 acb->firm_request_len = readl(lrwbuffer++); 2023 acb->firm_request_len = readl(lrwbuffer++);
1785 /*firm_request_len,1,04-07*/ 2024 /*firm_request_len,1,04-07*/
1786 acb->firm_numbers_queue = readl(lrwbuffer++); 2025 acb->firm_numbers_queue = readl(lrwbuffer++);
@@ -1790,20 +2029,23 @@ static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
1790 acb->firm_hd_channels = readl(lrwbuffer); 2029 acb->firm_hd_channels = readl(lrwbuffer);
1791 /*firm_ide_channels,4,16-19*/ 2030 /*firm_ide_channels,4,16-19*/
1792} 2031}
1793 2032 return reg->msgcode_rwbuffer_reg;
1794static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) 2033}
2034static void *arcmsr_get_firmware_spec(struct AdapterControlBlock *acb, int mode)
1795{ 2035{
2036 void *rtnval = 0;
1796 switch (acb->adapter_type) { 2037 switch (acb->adapter_type) {
1797 case ACB_ADAPTER_TYPE_A: { 2038 case ACB_ADAPTER_TYPE_A: {
1798 arcmsr_get_hba_config(acb); 2039 rtnval = arcmsr_get_hba_config(acb, mode);
1799 } 2040 }
1800 break; 2041 break;
1801 2042
1802 case ACB_ADAPTER_TYPE_B: { 2043 case ACB_ADAPTER_TYPE_B: {
1803 arcmsr_get_hbb_config(acb); 2044 rtnval = arcmsr_get_hbb_config(acb, mode);
1804 } 2045 }
1805 break; 2046 break;
1806 } 2047 }
2048 return rtnval;
1807} 2049}
1808 2050
1809static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb, 2051static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
@@ -2043,6 +2285,66 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
2043 } 2285 }
2044} 2286}
2045 2287
2288static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb)
2289{
2290 struct MessageUnit_A __iomem *reg = acb->pmuA;
2291
2292 if (unlikely(atomic_read(&acb->rq_map_token) == 0)) {
2293 acb->fw_state = false;
2294 } else {
2295 /*to prevent rq_map_token from changing by other interrupt, then
2296 avoid the dead-lock*/
2297 acb->fw_state = true;
2298 atomic_dec(&acb->rq_map_token);
2299 if (!(acb->fw_state) ||
2300 (acb->ante_token_value == atomic_read(&acb->rq_map_token))) {
2301 atomic_set(&acb->rq_map_token, 16);
2302 }
2303 acb->ante_token_value = atomic_read(&acb->rq_map_token);
2304 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
2305 }
2306 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6000));
2307 return;
2308}
2309
2310static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb)
2311{
2312 struct MessageUnit_B __iomem *reg = acb->pmuB;
2313
2314 if (unlikely(atomic_read(&acb->rq_map_token) == 0)) {
2315 acb->fw_state = false;
2316 } else {
2317 /*to prevent rq_map_token from changing by other interrupt, then
2318 avoid the dead-lock*/
2319 acb->fw_state = true;
2320 atomic_dec(&acb->rq_map_token);
2321 if (!(acb->fw_state) ||
2322 (acb->ante_token_value == atomic_read(&acb->rq_map_token))) {
2323 atomic_set(&acb->rq_map_token, 16);
2324 }
2325 acb->ante_token_value = atomic_read(&acb->rq_map_token);
2326 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg);
2327 }
2328 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6000));
2329 return;
2330}
2331
2332static void arcmsr_request_device_map(unsigned long pacb)
2333{
2334 struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb;
2335
2336 switch (acb->adapter_type) {
2337 case ACB_ADAPTER_TYPE_A: {
2338 arcmsr_request_hba_device_map(acb);
2339 }
2340 break;
2341 case ACB_ADAPTER_TYPE_B: {
2342 arcmsr_request_hbb_device_map(acb);
2343 }
2344 break;
2345 }
2346}
2347
2046static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) 2348static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
2047{ 2349{
2048 struct MessageUnit_A __iomem *reg = acb->pmuA; 2350 struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -2121,6 +2423,60 @@ static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
2121 return; 2423 return;
2122} 2424}
2123 2425
2426static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
2427{
2428 uint8_t value[64];
2429 int i;
2430
2431 /* backup pci config data */
2432 for (i = 0; i < 64; i++) {
2433 pci_read_config_byte(acb->pdev, i, &value[i]);
2434 }
2435 /* hardware reset signal */
2436 pci_write_config_byte(acb->pdev, 0x84, 0x20);
2437 msleep(1000);
2438 /* write back pci config data */
2439 for (i = 0; i < 64; i++) {
2440 pci_write_config_byte(acb->pdev, i, value[i]);
2441 }
2442 msleep(1000);
2443 return;
2444}
2445/*
2446****************************************************************************
2447****************************************************************************
2448*/
2449#ifdef CONFIG_SCSI_ARCMSR_RESET
2450 int arcmsr_sleep_for_bus_reset(struct scsi_cmnd *cmd)
2451 {
2452 struct Scsi_Host *shost = NULL;
2453 spinlock_t *host_lock = NULL;
2454 int i, isleep;
2455
2456 shost = cmd->device->host;
2457 host_lock = shost->host_lock;
2458
2459 printk(KERN_NOTICE "Host %d bus reset over, sleep %d seconds (busy %d, can queue %d) ...........\n",
2460 shost->host_no, sleeptime, shost->host_busy, shost->can_queue);
2461 isleep = sleeptime / 10;
2462 spin_unlock_irq(host_lock);
2463 if (isleep > 0) {
2464 for (i = 0; i < isleep; i++) {
2465 msleep(10000);
2466 printk(KERN_NOTICE "^%d^\n", i);
2467 }
2468 }
2469
2470 isleep = sleeptime % 10;
2471 if (isleep > 0) {
2472 msleep(isleep * 1000);
2473 printk(KERN_NOTICE "^v^\n");
2474 }
2475 spin_lock_irq(host_lock);
2476 printk(KERN_NOTICE "***** wake up *****\n");
2477 return 0;
2478 }
2479#endif
2124static void arcmsr_iop_init(struct AdapterControlBlock *acb) 2480static void arcmsr_iop_init(struct AdapterControlBlock *acb)
2125{ 2481{
2126 uint32_t intmask_org; 2482 uint32_t intmask_org;
@@ -2129,7 +2485,7 @@ static void arcmsr_iop_init(struct AdapterControlBlock *acb)
2129 intmask_org = arcmsr_disable_outbound_ints(acb); 2485 intmask_org = arcmsr_disable_outbound_ints(acb);
2130 arcmsr_wait_firmware_ready(acb); 2486 arcmsr_wait_firmware_ready(acb);
2131 arcmsr_iop_confirm(acb); 2487 arcmsr_iop_confirm(acb);
2132 arcmsr_get_firmware_spec(acb); 2488 arcmsr_get_firmware_spec(acb, 1);
2133 /*start background rebuild*/ 2489 /*start background rebuild*/
2134 arcmsr_start_adapter_bgrb(acb); 2490 arcmsr_start_adapter_bgrb(acb);
2135 /* empty doorbell Qbuffer if door bell ringed */ 2491 /* empty doorbell Qbuffer if door bell ringed */
@@ -2140,51 +2496,110 @@ static void arcmsr_iop_init(struct AdapterControlBlock *acb)
2140 acb->acb_flags |= ACB_F_IOP_INITED; 2496 acb->acb_flags |= ACB_F_IOP_INITED;
2141} 2497}
2142 2498
2143static void arcmsr_iop_reset(struct AdapterControlBlock *acb) 2499static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
2144{ 2500{
2145 struct CommandControlBlock *ccb; 2501 struct CommandControlBlock *ccb;
2146 uint32_t intmask_org; 2502 uint32_t intmask_org;
2503 uint8_t rtnval = 0x00;
2147 int i = 0; 2504 int i = 0;
2148 2505
2149 if (atomic_read(&acb->ccboutstandingcount) != 0) { 2506 if (atomic_read(&acb->ccboutstandingcount) != 0) {
2507 /* disable all outbound interrupt */
2508 intmask_org = arcmsr_disable_outbound_ints(acb);
2150 /* talk to iop 331 outstanding command aborted */ 2509 /* talk to iop 331 outstanding command aborted */
2151 arcmsr_abort_allcmd(acb); 2510 rtnval = arcmsr_abort_allcmd(acb);
2152
2153 /* wait for 3 sec for all command aborted*/ 2511 /* wait for 3 sec for all command aborted*/
2154 ssleep(3); 2512 ssleep(3);
2155
2156 /* disable all outbound interrupt */
2157 intmask_org = arcmsr_disable_outbound_ints(acb);
2158 /* clear all outbound posted Q */ 2513 /* clear all outbound posted Q */
2159 arcmsr_done4abort_postqueue(acb); 2514 arcmsr_done4abort_postqueue(acb);
2160 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { 2515 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2161 ccb = acb->pccb_pool[i]; 2516 ccb = acb->pccb_pool[i];
2162 if (ccb->startdone == ARCMSR_CCB_START) { 2517 if (ccb->startdone == ARCMSR_CCB_START) {
2163 ccb->startdone = ARCMSR_CCB_ABORTED;
2164 arcmsr_ccb_complete(ccb, 1); 2518 arcmsr_ccb_complete(ccb, 1);
2165 } 2519 }
2166 } 2520 }
2521 atomic_set(&acb->ccboutstandingcount, 0);
2167 /* enable all outbound interrupt */ 2522 /* enable all outbound interrupt */
2168 arcmsr_enable_outbound_ints(acb, intmask_org); 2523 arcmsr_enable_outbound_ints(acb, intmask_org);
2524 return rtnval;
2169 } 2525 }
2526 return rtnval;
2170} 2527}
2171 2528
2172static int arcmsr_bus_reset(struct scsi_cmnd *cmd) 2529static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
2173{ 2530{
2174 struct AdapterControlBlock *acb = 2531 struct AdapterControlBlock *acb =
2175 (struct AdapterControlBlock *)cmd->device->host->hostdata; 2532 (struct AdapterControlBlock *)cmd->device->host->hostdata;
2176 int i; 2533 int retry = 0;
2177 2534
2178 acb->num_resets++; 2535 if (acb->acb_flags & ACB_F_BUS_RESET)
2536 return SUCCESS;
2537
2538 printk(KERN_NOTICE "arcmsr%d: bus reset ..... \n", acb->adapter_index);
2179 acb->acb_flags |= ACB_F_BUS_RESET; 2539 acb->acb_flags |= ACB_F_BUS_RESET;
2180 for (i = 0; i < 400; i++) { 2540 acb->num_resets++;
2181 if (!atomic_read(&acb->ccboutstandingcount)) 2541 while (atomic_read(&acb->ccboutstandingcount) != 0 && retry < 4) {
2542 arcmsr_interrupt(acb);
2543 retry++;
2544 }
2545
2546 if (arcmsr_iop_reset(acb)) {
2547 switch (acb->adapter_type) {
2548 case ACB_ADAPTER_TYPE_A: {
2549 printk(KERN_NOTICE "arcmsr%d: do hardware bus reset, num_resets = %d num_aborts = %d \n",
2550 acb->adapter_index, acb->num_resets, acb->num_aborts);
2551 arcmsr_hardware_reset(acb);
2552 acb->acb_flags |= ACB_F_FIRMWARE_TRAP;
2553 acb->acb_flags &= ~ACB_F_IOP_INITED;
2554 #ifdef CONFIG_SCSI_ARCMSR_RESET
2555 struct MessageUnit_A __iomem *reg = acb->pmuA;
2556 uint32_t intmask_org, outbound_doorbell;
2557 int retry_count = 0;
2558sleep_again:
2559 arcmsr_sleep_for_bus_reset(cmd);
2560 if ((readl(&reg->outbound_msgaddr1) &
2561 ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
2562 printk(KERN_NOTICE "arcmsr%d: hardware bus reset and return busy, retry=%d \n",
2563 acb->host->host_no, retry_count);
2564 if (retry_count > retrycount) {
2565 printk(KERN_NOTICE "arcmsr%d: hardware bus reset and return busy, retry aborted \n",
2566 acb->host->host_no);
2567 return SUCCESS;
2568 }
2569 retry_count++;
2570 goto sleep_again;
2571 }
2572 acb->acb_flags &= ~ACB_F_FIRMWARE_TRAP;
2573 acb->acb_flags |= ACB_F_IOP_INITED;
2574 acb->acb_flags &= ~ACB_F_BUS_RESET;
2575 printk(KERN_NOTICE "arcmsr%d: hardware bus reset and reset ok \n",
2576 acb->host->host_no);
2577 /* disable all outbound interrupt */
2578 intmask_org = arcmsr_disable_outbound_ints(acb);
2579 arcmsr_get_firmware_spec(acb, 1);
2580 /*start background rebuild*/
2581 arcmsr_start_adapter_bgrb(acb);
2582 /* clear Qbuffer if door bell ringed */
2583 outbound_doorbell = readl(&reg->outbound_doorbell);
2584 writel(outbound_doorbell, &reg->outbound_doorbell); /*clear interrupt */
2585 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
2586 /* enable outbound Post Queue,outbound doorbell Interrupt */
2587 arcmsr_enable_outbound_ints(acb, intmask_org);
2588 atomic_set(&acb->rq_map_token, 16);
2589 init_timer(&acb->eternal_timer);
2590 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(20*HZ);
2591 acb->eternal_timer.data = (unsigned long) acb;
2592 acb->eternal_timer.function = &arcmsr_request_device_map;
2593 add_timer(&acb->eternal_timer);
2594 #endif
2595 }
2182 break; 2596 break;
2183 arcmsr_interrupt(acb);/* FIXME: need spinlock */ 2597 case ACB_ADAPTER_TYPE_B: {
2184 msleep(25);
2185 } 2598 }
2186 arcmsr_iop_reset(acb); 2599 }
2600 } else {
2187 acb->acb_flags &= ~ACB_F_BUS_RESET; 2601 acb->acb_flags &= ~ACB_F_BUS_RESET;
2602 }
2188 return SUCCESS; 2603 return SUCCESS;
2189} 2604}
2190 2605
@@ -2277,98 +2692,3 @@ static const char *arcmsr_info(struct Scsi_Host *host)
2277 ARCMSR_DRIVER_VERSION); 2692 ARCMSR_DRIVER_VERSION);
2278 return buf; 2693 return buf;
2279} 2694}
2280#ifdef CONFIG_SCSI_ARCMSR_AER
2281static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev)
2282{
2283 struct Scsi_Host *host = pci_get_drvdata(pdev);
2284 struct AdapterControlBlock *acb =
2285 (struct AdapterControlBlock *) host->hostdata;
2286 uint32_t intmask_org;
2287 int i, j;
2288
2289 if (pci_enable_device(pdev)) {
2290 return PCI_ERS_RESULT_DISCONNECT;
2291 }
2292 pci_set_master(pdev);
2293 intmask_org = arcmsr_disable_outbound_ints(acb);
2294 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2295 ACB_F_MESSAGE_RQBUFFER_CLEARED |
2296 ACB_F_MESSAGE_WQBUFFER_READED);
2297 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
2298 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
2299 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
2300 acb->devstate[i][j] = ARECA_RAID_GONE;
2301
2302 arcmsr_wait_firmware_ready(acb);
2303 arcmsr_iop_confirm(acb);
2304 /* disable all outbound interrupt */
2305 arcmsr_get_firmware_spec(acb);
2306 /*start background rebuild*/
2307 arcmsr_start_adapter_bgrb(acb);
2308 /* empty doorbell Qbuffer if door bell ringed */
2309 arcmsr_clear_doorbell_queue_buffer(acb);
2310 arcmsr_enable_eoi_mode(acb);
2311 /* enable outbound Post Queue,outbound doorbell Interrupt */
2312 arcmsr_enable_outbound_ints(acb, intmask_org);
2313 acb->acb_flags |= ACB_F_IOP_INITED;
2314
2315 pci_enable_pcie_error_reporting(pdev);
2316 return PCI_ERS_RESULT_RECOVERED;
2317}
2318
2319static void arcmsr_pci_ers_need_reset_forepart(struct pci_dev *pdev)
2320{
2321 struct Scsi_Host *host = pci_get_drvdata(pdev);
2322 struct AdapterControlBlock *acb = (struct AdapterControlBlock *)host->hostdata;
2323 struct CommandControlBlock *ccb;
2324 uint32_t intmask_org;
2325 int i = 0;
2326
2327 if (atomic_read(&acb->ccboutstandingcount) != 0) {
2328 /* talk to iop 331 outstanding command aborted */
2329 arcmsr_abort_allcmd(acb);
2330 /* wait for 3 sec for all command aborted*/
2331 ssleep(3);
2332 /* disable all outbound interrupt */
2333 intmask_org = arcmsr_disable_outbound_ints(acb);
2334 /* clear all outbound posted Q */
2335 arcmsr_done4abort_postqueue(acb);
2336 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2337 ccb = acb->pccb_pool[i];
2338 if (ccb->startdone == ARCMSR_CCB_START) {
2339 ccb->startdone = ARCMSR_CCB_ABORTED;
2340 arcmsr_ccb_complete(ccb, 1);
2341 }
2342 }
2343 /* enable all outbound interrupt */
2344 arcmsr_enable_outbound_ints(acb, intmask_org);
2345 }
2346 pci_disable_device(pdev);
2347}
2348
2349static void arcmsr_pci_ers_disconnect_forepart(struct pci_dev *pdev)
2350{
2351 struct Scsi_Host *host = pci_get_drvdata(pdev);
2352 struct AdapterControlBlock *acb = \
2353 (struct AdapterControlBlock *)host->hostdata;
2354
2355 arcmsr_stop_adapter_bgrb(acb);
2356 arcmsr_flush_adapter_cache(acb);
2357}
2358
2359static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
2360 pci_channel_state_t state)
2361{
2362 switch (state) {
2363 case pci_channel_io_frozen:
2364 arcmsr_pci_ers_need_reset_forepart(pdev);
2365 return PCI_ERS_RESULT_NEED_RESET;
2366 case pci_channel_io_perm_failure:
2367 arcmsr_pci_ers_disconnect_forepart(pdev);
2368 return PCI_ERS_RESULT_DISCONNECT;
2369 break;
2370 default:
2371 return PCI_ERS_RESULT_NEED_RESET;
2372 }
2373}
2374#endif
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index e641922f20bc..350cbeaae160 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -167,10 +167,9 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
167 &nonemb_cmd.dma); 167 &nonemb_cmd.dma);
168 if (nonemb_cmd.va == NULL) { 168 if (nonemb_cmd.va == NULL) {
169 SE_DEBUG(DBG_LVL_1, 169 SE_DEBUG(DBG_LVL_1,
170 "Failed to allocate memory for" 170 "Failed to allocate memory for mgmt_invalidate_icds\n");
171 "mgmt_invalidate_icds \n");
172 spin_unlock(&ctrl->mbox_lock); 171 spin_unlock(&ctrl->mbox_lock);
173 return -1; 172 return 0;
174 } 173 }
175 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); 174 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
176 req = nonemb_cmd.va; 175 req = nonemb_cmd.va;
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 0c08e185a766..3a7b3f88932f 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -84,11 +84,32 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
84 for (i = 0; hal_mods[i]; i++) 84 for (i = 0; hal_mods[i]; i++)
85 hal_mods[i]->meminfo(cfg, &km_len, &dm_len); 85 hal_mods[i]->meminfo(cfg, &km_len, &dm_len);
86 86
87 dm_len += bfa_port_meminfo();
87 88
88 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len; 89 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len;
89 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len; 90 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
90} 91}
91 92
93static void
94bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
95{
96 struct bfa_port_s *port = &bfa->modules.port;
97 uint32_t dm_len;
98 uint8_t *dm_kva;
99 uint64_t dm_pa;
100
101 dm_len = bfa_port_meminfo();
102 dm_kva = bfa_meminfo_dma_virt(mi);
103 dm_pa = bfa_meminfo_dma_phys(mi);
104
105 memset(port, 0, sizeof(struct bfa_port_s));
106 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod, bfa->logm);
107 bfa_port_mem_claim(port, dm_kva, dm_pa);
108
109 bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
110 bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
111}
112
92/** 113/**
93 * Use this function to do attach the driver instance with the BFA 114 * Use this function to do attach the driver instance with the BFA
94 * library. This function will not trigger any HW initialization 115 * library. This function will not trigger any HW initialization
@@ -140,6 +161,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
140 for (i = 0; hal_mods[i]; i++) 161 for (i = 0; hal_mods[i]; i++)
141 hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev); 162 hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev);
142 163
164 bfa_com_port_attach(bfa, meminfo);
143} 165}
144 166
145/** 167/**
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 18b7102bb80e..2ce26eb7a1ec 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -1,36 +1,35 @@
1#include <linux/types.h> 1#include <linux/types.h>
2#include <linux/mm.h>
3#include <linux/slab.h>
4#include <linux/blkdev.h>
5#include <linux/init.h> 2#include <linux/init.h>
6#include <linux/interrupt.h> 3#include <linux/interrupt.h>
4#include <linux/mm.h>
5#include <linux/slab.h>
6#include <linux/spinlock.h>
7#include <linux/zorro.h>
7 8
8#include <asm/setup.h>
9#include <asm/page.h> 9#include <asm/page.h>
10#include <asm/pgtable.h> 10#include <asm/pgtable.h>
11#include <asm/amigaints.h> 11#include <asm/amigaints.h>
12#include <asm/amigahw.h> 12#include <asm/amigahw.h>
13#include <linux/zorro.h>
14#include <asm/irq.h>
15#include <linux/spinlock.h>
16 13
17#include "scsi.h" 14#include "scsi.h"
18#include <scsi/scsi_host.h>
19#include "wd33c93.h" 15#include "wd33c93.h"
20#include "gvp11.h" 16#include "gvp11.h"
21 17
22#include <linux/stat.h>
23 18
19#define CHECK_WD33C93
24 20
25#define DMA(ptr) ((gvp11_scsiregs *)((ptr)->base)) 21struct gvp11_hostdata {
22 struct WD33C93_hostdata wh;
23 struct gvp11_scsiregs *regs;
24};
26 25
27static irqreturn_t gvp11_intr(int irq, void *_instance) 26static irqreturn_t gvp11_intr(int irq, void *data)
28{ 27{
28 struct Scsi_Host *instance = data;
29 struct gvp11_hostdata *hdata = shost_priv(instance);
30 unsigned int status = hdata->regs->CNTR;
29 unsigned long flags; 31 unsigned long flags;
30 unsigned int status;
31 struct Scsi_Host *instance = (struct Scsi_Host *)_instance;
32 32
33 status = DMA(instance)->CNTR;
34 if (!(status & GVP11_DMAC_INT_PENDING)) 33 if (!(status & GVP11_DMAC_INT_PENDING))
35 return IRQ_NONE; 34 return IRQ_NONE;
36 35
@@ -50,64 +49,66 @@ void gvp11_setup(char *str, int *ints)
50static int dma_setup(struct scsi_cmnd *cmd, int dir_in) 49static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
51{ 50{
52 struct Scsi_Host *instance = cmd->device->host; 51 struct Scsi_Host *instance = cmd->device->host;
53 struct WD33C93_hostdata *hdata = shost_priv(instance); 52 struct gvp11_hostdata *hdata = shost_priv(instance);
53 struct WD33C93_hostdata *wh = &hdata->wh;
54 struct gvp11_scsiregs *regs = hdata->regs;
54 unsigned short cntr = GVP11_DMAC_INT_ENABLE; 55 unsigned short cntr = GVP11_DMAC_INT_ENABLE;
55 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 56 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
56 int bank_mask; 57 int bank_mask;
57 static int scsi_alloc_out_of_range = 0; 58 static int scsi_alloc_out_of_range = 0;
58 59
59 /* use bounce buffer if the physical address is bad */ 60 /* use bounce buffer if the physical address is bad */
60 if (addr & hdata->dma_xfer_mask) { 61 if (addr & wh->dma_xfer_mask) {
61 hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; 62 wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
62 63
63 if (!scsi_alloc_out_of_range) { 64 if (!scsi_alloc_out_of_range) {
64 hdata->dma_bounce_buffer = 65 wh->dma_bounce_buffer =
65 kmalloc(hdata->dma_bounce_len, GFP_KERNEL); 66 kmalloc(wh->dma_bounce_len, GFP_KERNEL);
66 hdata->dma_buffer_pool = BUF_SCSI_ALLOCED; 67 wh->dma_buffer_pool = BUF_SCSI_ALLOCED;
67 } 68 }
68 69
69 if (scsi_alloc_out_of_range || 70 if (scsi_alloc_out_of_range ||
70 !hdata->dma_bounce_buffer) { 71 !wh->dma_bounce_buffer) {
71 hdata->dma_bounce_buffer = 72 wh->dma_bounce_buffer =
72 amiga_chip_alloc(hdata->dma_bounce_len, 73 amiga_chip_alloc(wh->dma_bounce_len,
73 "GVP II SCSI Bounce Buffer"); 74 "GVP II SCSI Bounce Buffer");
74 75
75 if (!hdata->dma_bounce_buffer) { 76 if (!wh->dma_bounce_buffer) {
76 hdata->dma_bounce_len = 0; 77 wh->dma_bounce_len = 0;
77 return 1; 78 return 1;
78 } 79 }
79 80
80 hdata->dma_buffer_pool = BUF_CHIP_ALLOCED; 81 wh->dma_buffer_pool = BUF_CHIP_ALLOCED;
81 } 82 }
82 83
83 /* check if the address of the bounce buffer is OK */ 84 /* check if the address of the bounce buffer is OK */
84 addr = virt_to_bus(hdata->dma_bounce_buffer); 85 addr = virt_to_bus(wh->dma_bounce_buffer);
85 86
86 if (addr & hdata->dma_xfer_mask) { 87 if (addr & wh->dma_xfer_mask) {
87 /* fall back to Chip RAM if address out of range */ 88 /* fall back to Chip RAM if address out of range */
88 if (hdata->dma_buffer_pool == BUF_SCSI_ALLOCED) { 89 if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) {
89 kfree(hdata->dma_bounce_buffer); 90 kfree(wh->dma_bounce_buffer);
90 scsi_alloc_out_of_range = 1; 91 scsi_alloc_out_of_range = 1;
91 } else { 92 } else {
92 amiga_chip_free(hdata->dma_bounce_buffer); 93 amiga_chip_free(wh->dma_bounce_buffer);
93 } 94 }
94 95
95 hdata->dma_bounce_buffer = 96 wh->dma_bounce_buffer =
96 amiga_chip_alloc(hdata->dma_bounce_len, 97 amiga_chip_alloc(wh->dma_bounce_len,
97 "GVP II SCSI Bounce Buffer"); 98 "GVP II SCSI Bounce Buffer");
98 99
99 if (!hdata->dma_bounce_buffer) { 100 if (!wh->dma_bounce_buffer) {
100 hdata->dma_bounce_len = 0; 101 wh->dma_bounce_len = 0;
101 return 1; 102 return 1;
102 } 103 }
103 104
104 addr = virt_to_bus(hdata->dma_bounce_buffer); 105 addr = virt_to_bus(wh->dma_bounce_buffer);
105 hdata->dma_buffer_pool = BUF_CHIP_ALLOCED; 106 wh->dma_buffer_pool = BUF_CHIP_ALLOCED;
106 } 107 }
107 108
108 if (!dir_in) { 109 if (!dir_in) {
109 /* copy to bounce buffer for a write */ 110 /* copy to bounce buffer for a write */
110 memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr, 111 memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
111 cmd->SCp.this_residual); 112 cmd->SCp.this_residual);
112 } 113 }
113 } 114 }
@@ -116,11 +117,11 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
116 if (!dir_in) 117 if (!dir_in)
117 cntr |= GVP11_DMAC_DIR_WRITE; 118 cntr |= GVP11_DMAC_DIR_WRITE;
118 119
119 hdata->dma_dir = dir_in; 120 wh->dma_dir = dir_in;
120 DMA(cmd->device->host)->CNTR = cntr; 121 regs->CNTR = cntr;
121 122
122 /* setup DMA *physical* address */ 123 /* setup DMA *physical* address */
123 DMA(cmd->device->host)->ACR = addr; 124 regs->ACR = addr;
124 125
125 if (dir_in) { 126 if (dir_in) {
126 /* invalidate any cache */ 127 /* invalidate any cache */
@@ -130,12 +131,12 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
130 cache_push(addr, cmd->SCp.this_residual); 131 cache_push(addr, cmd->SCp.this_residual);
131 } 132 }
132 133
133 bank_mask = (~hdata->dma_xfer_mask >> 18) & 0x01c0; 134 bank_mask = (~wh->dma_xfer_mask >> 18) & 0x01c0;
134 if (bank_mask) 135 if (bank_mask)
135 DMA(cmd->device->host)->BANK = bank_mask & (addr >> 18); 136 regs->BANK = bank_mask & (addr >> 18);
136 137
137 /* start DMA */ 138 /* start DMA */
138 DMA(cmd->device->host)->ST_DMA = 1; 139 regs->ST_DMA = 1;
139 140
140 /* return success */ 141 /* return success */
141 return 0; 142 return 0;
@@ -144,236 +145,53 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
144static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, 145static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
145 int status) 146 int status)
146{ 147{
147 struct WD33C93_hostdata *hdata = shost_priv(instance); 148 struct gvp11_hostdata *hdata = shost_priv(instance);
149 struct WD33C93_hostdata *wh = &hdata->wh;
150 struct gvp11_scsiregs *regs = hdata->regs;
148 151
149 /* stop DMA */ 152 /* stop DMA */
150 DMA(instance)->SP_DMA = 1; 153 regs->SP_DMA = 1;
151 /* remove write bit from CONTROL bits */ 154 /* remove write bit from CONTROL bits */
152 DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE; 155 regs->CNTR = GVP11_DMAC_INT_ENABLE;
153 156
154 /* copy from a bounce buffer, if necessary */ 157 /* copy from a bounce buffer, if necessary */
155 if (status && hdata->dma_bounce_buffer) { 158 if (status && wh->dma_bounce_buffer) {
156 if (hdata->dma_dir && SCpnt) 159 if (wh->dma_dir && SCpnt)
157 memcpy(SCpnt->SCp.ptr, hdata->dma_bounce_buffer, 160 memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
158 SCpnt->SCp.this_residual); 161 SCpnt->SCp.this_residual);
159 162
160 if (hdata->dma_buffer_pool == BUF_SCSI_ALLOCED) 163 if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED)
161 kfree(hdata->dma_bounce_buffer); 164 kfree(wh->dma_bounce_buffer);
162 else
163 amiga_chip_free(hdata->dma_bounce_buffer);
164
165 hdata->dma_bounce_buffer = NULL;
166 hdata->dma_bounce_len = 0;
167 }
168}
169
170#define CHECK_WD33C93
171
172int __init gvp11_detect(struct scsi_host_template *tpnt)
173{
174 static unsigned char called = 0;
175 struct Scsi_Host *instance;
176 unsigned long address;
177 unsigned int epc;
178 struct zorro_dev *z = NULL;
179 unsigned int default_dma_xfer_mask;
180 struct WD33C93_hostdata *hdata;
181 wd33c93_regs regs;
182 int num_gvp11 = 0;
183#ifdef CHECK_WD33C93
184 volatile unsigned char *sasr_3393, *scmd_3393;
185 unsigned char save_sasr;
186 unsigned char q, qq;
187#endif
188
189 if (!MACH_IS_AMIGA || called)
190 return 0;
191 called = 1;
192
193 tpnt->proc_name = "GVP11";
194 tpnt->proc_info = &wd33c93_proc_info;
195
196 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
197 /*
198 * This should (hopefully) be the correct way to identify
199 * all the different GVP SCSI controllers (except for the
200 * SERIES I though).
201 */
202
203 if (z->id == ZORRO_PROD_GVP_COMBO_030_R3_SCSI ||
204 z->id == ZORRO_PROD_GVP_SERIES_II)
205 default_dma_xfer_mask = ~0x00ffffff;
206 else if (z->id == ZORRO_PROD_GVP_GFORCE_030_SCSI ||
207 z->id == ZORRO_PROD_GVP_A530_SCSI ||
208 z->id == ZORRO_PROD_GVP_COMBO_030_R4_SCSI)
209 default_dma_xfer_mask = ~0x01ffffff;
210 else if (z->id == ZORRO_PROD_GVP_A1291 ||
211 z->id == ZORRO_PROD_GVP_GFORCE_040_SCSI_1)
212 default_dma_xfer_mask = ~0x07ffffff;
213 else 165 else
214 continue; 166 amiga_chip_free(wh->dma_bounce_buffer);
215
216 /*
217 * Rumors state that some GVP ram boards use the same product
218 * code as the SCSI controllers. Therefore if the board-size
219 * is not 64KB we asume it is a ram board and bail out.
220 */
221 if (z->resource.end - z->resource.start != 0xffff)
222 continue;
223 167
224 address = z->resource.start; 168 wh->dma_bounce_buffer = NULL;
225 if (!request_mem_region(address, 256, "wd33c93")) 169 wh->dma_bounce_len = 0;
226 continue;
227
228#ifdef CHECK_WD33C93
229
230 /*
231 * These darn GVP boards are a problem - it can be tough to tell
232 * whether or not they include a SCSI controller. This is the
233 * ultimate Yet-Another-GVP-Detection-Hack in that it actually
234 * probes for a WD33c93 chip: If we find one, it's extremely
235 * likely that this card supports SCSI, regardless of Product_
236 * Code, Board_Size, etc.
237 */
238
239 /* Get pointers to the presumed register locations and save contents */
240
241 sasr_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SASR);
242 scmd_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SCMD);
243 save_sasr = *sasr_3393;
244
245 /* First test the AuxStatus Reg */
246
247 q = *sasr_3393; /* read it */
248 if (q & 0x08) /* bit 3 should always be clear */
249 goto release;
250 *sasr_3393 = WD_AUXILIARY_STATUS; /* setup indirect address */
251 if (*sasr_3393 == WD_AUXILIARY_STATUS) { /* shouldn't retain the write */
252 *sasr_3393 = save_sasr; /* Oops - restore this byte */
253 goto release;
254 }
255 if (*sasr_3393 != q) { /* should still read the same */
256 *sasr_3393 = save_sasr; /* Oops - restore this byte */
257 goto release;
258 }
259 if (*scmd_3393 != q) /* and so should the image at 0x1f */
260 goto release;
261
262 /*
263 * Ok, we probably have a wd33c93, but let's check a few other places
264 * for good measure. Make sure that this works for both 'A and 'B
265 * chip versions.
266 */
267
268 *sasr_3393 = WD_SCSI_STATUS;
269 q = *scmd_3393;
270 *sasr_3393 = WD_SCSI_STATUS;
271 *scmd_3393 = ~q;
272 *sasr_3393 = WD_SCSI_STATUS;
273 qq = *scmd_3393;
274 *sasr_3393 = WD_SCSI_STATUS;
275 *scmd_3393 = q;
276 if (qq != q) /* should be read only */
277 goto release;
278 *sasr_3393 = 0x1e; /* this register is unimplemented */
279 q = *scmd_3393;
280 *sasr_3393 = 0x1e;
281 *scmd_3393 = ~q;
282 *sasr_3393 = 0x1e;
283 qq = *scmd_3393;
284 *sasr_3393 = 0x1e;
285 *scmd_3393 = q;
286 if (qq != q || qq != 0xff) /* should be read only, all 1's */
287 goto release;
288 *sasr_3393 = WD_TIMEOUT_PERIOD;
289 q = *scmd_3393;
290 *sasr_3393 = WD_TIMEOUT_PERIOD;
291 *scmd_3393 = ~q;
292 *sasr_3393 = WD_TIMEOUT_PERIOD;
293 qq = *scmd_3393;
294 *sasr_3393 = WD_TIMEOUT_PERIOD;
295 *scmd_3393 = q;
296 if (qq != (~q & 0xff)) /* should be read/write */
297 goto release;
298#endif
299
300 instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
301 if (instance == NULL)
302 goto release;
303 instance->base = ZTWO_VADDR(address);
304 instance->irq = IRQ_AMIGA_PORTS;
305 instance->unique_id = z->slotaddr;
306
307 hdata = shost_priv(instance);
308 if (gvp11_xfer_mask)
309 hdata->dma_xfer_mask = gvp11_xfer_mask;
310 else
311 hdata->dma_xfer_mask = default_dma_xfer_mask;
312
313 DMA(instance)->secret2 = 1;
314 DMA(instance)->secret1 = 0;
315 DMA(instance)->secret3 = 15;
316 while (DMA(instance)->CNTR & GVP11_DMAC_BUSY)
317 ;
318 DMA(instance)->CNTR = 0;
319
320 DMA(instance)->BANK = 0;
321
322 epc = *(unsigned short *)(ZTWO_VADDR(address) + 0x8000);
323
324 /*
325 * Check for 14MHz SCSI clock
326 */
327 regs.SASR = &(DMA(instance)->SASR);
328 regs.SCMD = &(DMA(instance)->SCMD);
329 hdata->no_sync = 0xff;
330 hdata->fast = 0;
331 hdata->dma_mode = CTRL_DMA;
332 wd33c93_init(instance, regs, dma_setup, dma_stop,
333 (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10
334 : WD33C93_FS_12_15);
335
336 if (request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED,
337 "GVP11 SCSI", instance))
338 goto unregister;
339 DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE;
340 num_gvp11++;
341 continue;
342
343unregister:
344 scsi_unregister(instance);
345release:
346 release_mem_region(address, 256);
347 } 170 }
348
349 return num_gvp11;
350} 171}
351 172
352static int gvp11_bus_reset(struct scsi_cmnd *cmd) 173static int gvp11_bus_reset(struct scsi_cmnd *cmd)
353{ 174{
175 struct Scsi_Host *instance = cmd->device->host;
176
354 /* FIXME perform bus-specific reset */ 177 /* FIXME perform bus-specific reset */
355 178
356 /* FIXME 2: shouldn't we no-op this function (return 179 /* FIXME 2: shouldn't we no-op this function (return
357 FAILED), and fall back to host reset function, 180 FAILED), and fall back to host reset function,
358 wd33c93_host_reset ? */ 181 wd33c93_host_reset ? */
359 182
360 spin_lock_irq(cmd->device->host->host_lock); 183 spin_lock_irq(instance->host_lock);
361 wd33c93_host_reset(cmd); 184 wd33c93_host_reset(cmd);
362 spin_unlock_irq(cmd->device->host->host_lock); 185 spin_unlock_irq(instance->host_lock);
363 186
364 return SUCCESS; 187 return SUCCESS;
365} 188}
366 189
367 190static struct scsi_host_template gvp11_scsi_template = {
368#define HOSTS_C 191 .module = THIS_MODULE,
369
370#include "gvp11.h"
371
372static struct scsi_host_template driver_template = {
373 .proc_name = "GVP11",
374 .name = "GVP Series II SCSI", 192 .name = "GVP Series II SCSI",
375 .detect = gvp11_detect, 193 .proc_info = wd33c93_proc_info,
376 .release = gvp11_release, 194 .proc_name = "GVP11",
377 .queuecommand = wd33c93_queuecommand, 195 .queuecommand = wd33c93_queuecommand,
378 .eh_abort_handler = wd33c93_abort, 196 .eh_abort_handler = wd33c93_abort,
379 .eh_bus_reset_handler = gvp11_bus_reset, 197 .eh_bus_reset_handler = gvp11_bus_reset,
@@ -385,17 +203,230 @@ static struct scsi_host_template driver_template = {
385 .use_clustering = DISABLE_CLUSTERING 203 .use_clustering = DISABLE_CLUSTERING
386}; 204};
387 205
206static int __devinit check_wd33c93(struct gvp11_scsiregs *regs)
207{
208#ifdef CHECK_WD33C93
209 volatile unsigned char *sasr_3393, *scmd_3393;
210 unsigned char save_sasr;
211 unsigned char q, qq;
388 212
389#include "scsi_module.c" 213 /*
214 * These darn GVP boards are a problem - it can be tough to tell
215 * whether or not they include a SCSI controller. This is the
216 * ultimate Yet-Another-GVP-Detection-Hack in that it actually
217 * probes for a WD33c93 chip: If we find one, it's extremely
218 * likely that this card supports SCSI, regardless of Product_
219 * Code, Board_Size, etc.
220 */
221
222 /* Get pointers to the presumed register locations and save contents */
223
224 sasr_3393 = &regs->SASR;
225 scmd_3393 = &regs->SCMD;
226 save_sasr = *sasr_3393;
227
228 /* First test the AuxStatus Reg */
229
230 q = *sasr_3393; /* read it */
231 if (q & 0x08) /* bit 3 should always be clear */
232 return -ENODEV;
233 *sasr_3393 = WD_AUXILIARY_STATUS; /* setup indirect address */
234 if (*sasr_3393 == WD_AUXILIARY_STATUS) { /* shouldn't retain the write */
235 *sasr_3393 = save_sasr; /* Oops - restore this byte */
236 return -ENODEV;
237 }
238 if (*sasr_3393 != q) { /* should still read the same */
239 *sasr_3393 = save_sasr; /* Oops - restore this byte */
240 return -ENODEV;
241 }
242 if (*scmd_3393 != q) /* and so should the image at 0x1f */
243 return -ENODEV;
244
245 /*
246 * Ok, we probably have a wd33c93, but let's check a few other places
247 * for good measure. Make sure that this works for both 'A and 'B
248 * chip versions.
249 */
250
251 *sasr_3393 = WD_SCSI_STATUS;
252 q = *scmd_3393;
253 *sasr_3393 = WD_SCSI_STATUS;
254 *scmd_3393 = ~q;
255 *sasr_3393 = WD_SCSI_STATUS;
256 qq = *scmd_3393;
257 *sasr_3393 = WD_SCSI_STATUS;
258 *scmd_3393 = q;
259 if (qq != q) /* should be read only */
260 return -ENODEV;
261 *sasr_3393 = 0x1e; /* this register is unimplemented */
262 q = *scmd_3393;
263 *sasr_3393 = 0x1e;
264 *scmd_3393 = ~q;
265 *sasr_3393 = 0x1e;
266 qq = *scmd_3393;
267 *sasr_3393 = 0x1e;
268 *scmd_3393 = q;
269 if (qq != q || qq != 0xff) /* should be read only, all 1's */
270 return -ENODEV;
271 *sasr_3393 = WD_TIMEOUT_PERIOD;
272 q = *scmd_3393;
273 *sasr_3393 = WD_TIMEOUT_PERIOD;
274 *scmd_3393 = ~q;
275 *sasr_3393 = WD_TIMEOUT_PERIOD;
276 qq = *scmd_3393;
277 *sasr_3393 = WD_TIMEOUT_PERIOD;
278 *scmd_3393 = q;
279 if (qq != (~q & 0xff)) /* should be read/write */
280 return -ENODEV;
281#endif /* CHECK_WD33C93 */
390 282
391int gvp11_release(struct Scsi_Host *instance) 283 return 0;
284}
285
286static int __devinit gvp11_probe(struct zorro_dev *z,
287 const struct zorro_device_id *ent)
392{ 288{
393#ifdef MODULE 289 struct Scsi_Host *instance;
394 DMA(instance)->CNTR = 0; 290 unsigned long address;
395 release_mem_region(ZTWO_PADDR(instance->base), 256); 291 int error;
292 unsigned int epc;
293 unsigned int default_dma_xfer_mask;
294 struct gvp11_hostdata *hdata;
295 struct gvp11_scsiregs *regs;
296 wd33c93_regs wdregs;
297
298 default_dma_xfer_mask = ent->driver_data;
299
300 /*
301 * Rumors state that some GVP ram boards use the same product
302 * code as the SCSI controllers. Therefore if the board-size
303 * is not 64KB we asume it is a ram board and bail out.
304 */
305 if (zorro_resource_len(z) != 0x10000)
306 return -ENODEV;
307
308 address = z->resource.start;
309 if (!request_mem_region(address, 256, "wd33c93"))
310 return -EBUSY;
311
312 regs = (struct gvp11_scsiregs *)(ZTWO_VADDR(address));
313
314 error = check_wd33c93(regs);
315 if (error)
316 goto fail_check_or_alloc;
317
318 instance = scsi_host_alloc(&gvp11_scsi_template,
319 sizeof(struct gvp11_hostdata));
320 if (!instance) {
321 error = -ENOMEM;
322 goto fail_check_or_alloc;
323 }
324
325 instance->irq = IRQ_AMIGA_PORTS;
326 instance->unique_id = z->slotaddr;
327
328 regs->secret2 = 1;
329 regs->secret1 = 0;
330 regs->secret3 = 15;
331 while (regs->CNTR & GVP11_DMAC_BUSY)
332 ;
333 regs->CNTR = 0;
334 regs->BANK = 0;
335
336 wdregs.SASR = &regs->SASR;
337 wdregs.SCMD = &regs->SCMD;
338
339 hdata = shost_priv(instance);
340 if (gvp11_xfer_mask)
341 hdata->wh.dma_xfer_mask = gvp11_xfer_mask;
342 else
343 hdata->wh.dma_xfer_mask = default_dma_xfer_mask;
344
345 hdata->wh.no_sync = 0xff;
346 hdata->wh.fast = 0;
347 hdata->wh.dma_mode = CTRL_DMA;
348 hdata->regs = regs;
349
350 /*
351 * Check for 14MHz SCSI clock
352 */
353 epc = *(unsigned short *)(ZTWO_VADDR(address) + 0x8000);
354 wd33c93_init(instance, wdregs, dma_setup, dma_stop,
355 (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10
356 : WD33C93_FS_12_15);
357
358 error = request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED,
359 "GVP11 SCSI", instance);
360 if (error)
361 goto fail_irq;
362
363 regs->CNTR = GVP11_DMAC_INT_ENABLE;
364
365 error = scsi_add_host(instance, NULL);
366 if (error)
367 goto fail_host;
368
369 zorro_set_drvdata(z, instance);
370 scsi_scan_host(instance);
371 return 0;
372
373fail_host:
396 free_irq(IRQ_AMIGA_PORTS, instance); 374 free_irq(IRQ_AMIGA_PORTS, instance);
397#endif 375fail_irq:
398 return 1; 376 scsi_host_put(instance);
377fail_check_or_alloc:
378 release_mem_region(address, 256);
379 return error;
380}
381
382static void __devexit gvp11_remove(struct zorro_dev *z)
383{
384 struct Scsi_Host *instance = zorro_get_drvdata(z);
385 struct gvp11_hostdata *hdata = shost_priv(instance);
386
387 hdata->regs->CNTR = 0;
388 scsi_remove_host(instance);
389 free_irq(IRQ_AMIGA_PORTS, instance);
390 scsi_host_put(instance);
391 release_mem_region(z->resource.start, 256);
392}
393
394 /*
395 * This should (hopefully) be the correct way to identify
396 * all the different GVP SCSI controllers (except for the
397 * SERIES I though).
398 */
399
400static struct zorro_device_id gvp11_zorro_tbl[] __devinitdata = {
401 { ZORRO_PROD_GVP_COMBO_030_R3_SCSI, ~0x00ffffff },
402 { ZORRO_PROD_GVP_SERIES_II, ~0x00ffffff },
403 { ZORRO_PROD_GVP_GFORCE_030_SCSI, ~0x01ffffff },
404 { ZORRO_PROD_GVP_A530_SCSI, ~0x01ffffff },
405 { ZORRO_PROD_GVP_COMBO_030_R4_SCSI, ~0x01ffffff },
406 { ZORRO_PROD_GVP_A1291, ~0x07ffffff },
407 { ZORRO_PROD_GVP_GFORCE_040_SCSI_1, ~0x07ffffff },
408 { 0 }
409};
410MODULE_DEVICE_TABLE(zorro, gvp11_zorro_tbl);
411
412static struct zorro_driver gvp11_driver = {
413 .name = "gvp11",
414 .id_table = gvp11_zorro_tbl,
415 .probe = gvp11_probe,
416 .remove = __devexit_p(gvp11_remove),
417};
418
419static int __init gvp11_init(void)
420{
421 return zorro_register_driver(&gvp11_driver);
422}
423module_init(gvp11_init);
424
425static void __exit gvp11_exit(void)
426{
427 zorro_unregister_driver(&gvp11_driver);
399} 428}
429module_exit(gvp11_exit);
400 430
431MODULE_DESCRIPTION("GVP Series II SCSI");
401MODULE_LICENSE("GPL"); 432MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/gvp11.h b/drivers/scsi/gvp11.h
index e2efdf9601ef..852913cde5dd 100644
--- a/drivers/scsi/gvp11.h
+++ b/drivers/scsi/gvp11.h
@@ -11,9 +11,6 @@
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13 13
14int gvp11_detect(struct scsi_host_template *);
15int gvp11_release(struct Scsi_Host *);
16
17#ifndef CMD_PER_LUN 14#ifndef CMD_PER_LUN
18#define CMD_PER_LUN 2 15#define CMD_PER_LUN 2
19#endif 16#endif
@@ -22,15 +19,13 @@ int gvp11_release(struct Scsi_Host *);
22#define CAN_QUEUE 16 19#define CAN_QUEUE 16
23#endif 20#endif
24 21
25#ifndef HOSTS_C
26
27/* 22/*
28 * if the transfer address ANDed with this results in a non-zero 23 * if the transfer address ANDed with this results in a non-zero
29 * result, then we can't use DMA. 24 * result, then we can't use DMA.
30 */ 25 */
31#define GVP11_XFER_MASK (0xff000001) 26#define GVP11_XFER_MASK (0xff000001)
32 27
33typedef struct { 28struct gvp11_scsiregs {
34 unsigned char pad1[64]; 29 unsigned char pad1[64];
35 volatile unsigned short CNTR; 30 volatile unsigned short CNTR;
36 unsigned char pad2[31]; 31 unsigned char pad2[31];
@@ -46,7 +41,7 @@ typedef struct {
46 volatile unsigned short SP_DMA; 41 volatile unsigned short SP_DMA;
47 volatile unsigned short secret2; /* store 1 here */ 42 volatile unsigned short secret2; /* store 1 here */
48 volatile unsigned short secret3; /* store 15 here */ 43 volatile unsigned short secret3; /* store 15 here */
49} gvp11_scsiregs; 44};
50 45
51/* bits in CNTR */ 46/* bits in CNTR */
52#define GVP11_DMAC_BUSY (1<<0) 47#define GVP11_DMAC_BUSY (1<<0)
@@ -54,6 +49,4 @@ typedef struct {
54#define GVP11_DMAC_INT_ENABLE (1<<3) 49#define GVP11_DMAC_INT_ENABLE (1<<3)
55#define GVP11_DMAC_DIR_WRITE (1<<4) 50#define GVP11_DMAC_DIR_WRITE (1<<4)
56 51
57#endif /* else def HOSTS_C */
58
59#endif /* GVP11_H */ 52#endif /* GVP11_H */
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 6a6661c35b2f..82ea4a8226b0 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -567,7 +567,8 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
567static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) 567static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
568{ 568{
569 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 569 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
570 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; 570 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
571 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
571 dma_addr_t dma_addr = ipr_cmd->dma_addr; 572 dma_addr_t dma_addr = ipr_cmd->dma_addr;
572 573
573 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 574 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
@@ -576,19 +577,19 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
576 ioarcb->ioadl_len = 0; 577 ioarcb->ioadl_len = 0;
577 ioarcb->read_ioadl_len = 0; 578 ioarcb->read_ioadl_len = 0;
578 579
579 if (ipr_cmd->ioa_cfg->sis64) 580 if (ipr_cmd->ioa_cfg->sis64) {
580 ioarcb->u.sis64_addr_data.data_ioadl_addr = 581 ioarcb->u.sis64_addr_data.data_ioadl_addr =
581 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); 582 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
582 else { 583 ioasa64->u.gata.status = 0;
584 } else {
583 ioarcb->write_ioadl_addr = 585 ioarcb->write_ioadl_addr =
584 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); 586 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
585 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 587 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
588 ioasa->u.gata.status = 0;
586 } 589 }
587 590
588 ioasa->ioasc = 0; 591 ioasa->hdr.ioasc = 0;
589 ioasa->residual_data_len = 0; 592 ioasa->hdr.residual_data_len = 0;
590 ioasa->u.gata.status = 0;
591
592 ipr_cmd->scsi_cmd = NULL; 593 ipr_cmd->scsi_cmd = NULL;
593 ipr_cmd->qc = NULL; 594 ipr_cmd->qc = NULL;
594 ipr_cmd->sense_buffer[0] = 0; 595 ipr_cmd->sense_buffer[0] = 0;
@@ -768,8 +769,8 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
768 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) { 769 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
769 list_del(&ipr_cmd->queue); 770 list_del(&ipr_cmd->queue);
770 771
771 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET); 772 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
772 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID); 773 ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
773 774
774 if (ipr_cmd->scsi_cmd) 775 if (ipr_cmd->scsi_cmd)
775 ipr_cmd->done = ipr_scsi_eh_done; 776 ipr_cmd->done = ipr_scsi_eh_done;
@@ -1040,7 +1041,7 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
1040 proto = cfgtew->u.cfgte64->proto; 1041 proto = cfgtew->u.cfgte64->proto;
1041 res->res_flags = cfgtew->u.cfgte64->res_flags; 1042 res->res_flags = cfgtew->u.cfgte64->res_flags;
1042 res->qmodel = IPR_QUEUEING_MODEL64(res); 1043 res->qmodel = IPR_QUEUEING_MODEL64(res);
1043 res->type = cfgtew->u.cfgte64->res_type & 0x0f; 1044 res->type = cfgtew->u.cfgte64->res_type;
1044 1045
1045 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, 1046 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1046 sizeof(res->res_path)); 1047 sizeof(res->res_path));
@@ -1319,7 +1320,7 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1319{ 1320{
1320 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 1321 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1321 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; 1322 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1322 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 1323 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1323 1324
1324 list_del(&hostrcb->queue); 1325 list_del(&hostrcb->queue);
1325 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 1326 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
@@ -2354,7 +2355,7 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2354{ 2355{
2355 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 2356 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2356 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; 2357 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2357 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 2358 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2358 u32 fd_ioasc; 2359 u32 fd_ioasc;
2359 2360
2360 if (ioa_cfg->sis64) 2361 if (ioa_cfg->sis64)
@@ -4509,11 +4510,16 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4509 } 4510 }
4510 4511
4511 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); 4512 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4512 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 4513 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4513 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 4514 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4514 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) 4515 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4515 memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata, 4516 if (ipr_cmd->ioa_cfg->sis64)
4516 sizeof(struct ipr_ioasa_gata)); 4517 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4518 sizeof(struct ipr_ioasa_gata));
4519 else
4520 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4521 sizeof(struct ipr_ioasa_gata));
4522 }
4517 4523
4518 LEAVE; 4524 LEAVE;
4519 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0); 4525 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
@@ -4768,7 +4774,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4768 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n", 4774 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4769 scsi_cmd->cmnd[0]); 4775 scsi_cmd->cmnd[0]);
4770 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT); 4776 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4771 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 4777 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4772 4778
4773 /* 4779 /*
4774 * If the abort task timed out and we sent a bus reset, we will get 4780 * If the abort task timed out and we sent a bus reset, we will get
@@ -4812,15 +4818,39 @@ static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4812/** 4818/**
4813 * ipr_handle_other_interrupt - Handle "other" interrupts 4819 * ipr_handle_other_interrupt - Handle "other" interrupts
4814 * @ioa_cfg: ioa config struct 4820 * @ioa_cfg: ioa config struct
4815 * @int_reg: interrupt register
4816 * 4821 *
4817 * Return value: 4822 * Return value:
4818 * IRQ_NONE / IRQ_HANDLED 4823 * IRQ_NONE / IRQ_HANDLED
4819 **/ 4824 **/
4820static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, 4825static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg)
4821 volatile u32 int_reg)
4822{ 4826{
4823 irqreturn_t rc = IRQ_HANDLED; 4827 irqreturn_t rc = IRQ_HANDLED;
4828 volatile u32 int_reg, int_mask_reg;
4829
4830 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
4831 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4832
4833 /* If an interrupt on the adapter did not occur, ignore it.
4834 * Or in the case of SIS 64, check for a stage change interrupt.
4835 */
4836 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
4837 if (ioa_cfg->sis64) {
4838 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4839 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4840 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
4841
4842 /* clear stage change */
4843 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
4844 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4845 list_del(&ioa_cfg->reset_cmd->queue);
4846 del_timer(&ioa_cfg->reset_cmd->timer);
4847 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4848 return IRQ_HANDLED;
4849 }
4850 }
4851
4852 return IRQ_NONE;
4853 }
4824 4854
4825 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 4855 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4826 /* Mask the interrupt */ 4856 /* Mask the interrupt */
@@ -4881,7 +4911,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4881{ 4911{
4882 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; 4912 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4883 unsigned long lock_flags = 0; 4913 unsigned long lock_flags = 0;
4884 volatile u32 int_reg, int_mask_reg; 4914 volatile u32 int_reg;
4885 u32 ioasc; 4915 u32 ioasc;
4886 u16 cmd_index; 4916 u16 cmd_index;
4887 int num_hrrq = 0; 4917 int num_hrrq = 0;
@@ -4896,33 +4926,6 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4896 return IRQ_NONE; 4926 return IRQ_NONE;
4897 } 4927 }
4898 4928
4899 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
4900 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4901
4902 /* If an interrupt on the adapter did not occur, ignore it.
4903 * Or in the case of SIS 64, check for a stage change interrupt.
4904 */
4905 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4906 if (ioa_cfg->sis64) {
4907 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4908 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4909 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
4910
4911 /* clear stage change */
4912 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
4913 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4914 list_del(&ioa_cfg->reset_cmd->queue);
4915 del_timer(&ioa_cfg->reset_cmd->timer);
4916 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4917 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4918 return IRQ_HANDLED;
4919 }
4920 }
4921
4922 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4923 return IRQ_NONE;
4924 }
4925
4926 while (1) { 4929 while (1) {
4927 ipr_cmd = NULL; 4930 ipr_cmd = NULL;
4928 4931
@@ -4940,7 +4943,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4940 4943
4941 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; 4944 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4942 4945
4943 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 4946 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4944 4947
4945 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc); 4948 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4946 4949
@@ -4962,7 +4965,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4962 /* Clear the PCI interrupt */ 4965 /* Clear the PCI interrupt */
4963 do { 4966 do {
4964 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); 4967 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
4965 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; 4968 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
4966 } while (int_reg & IPR_PCII_HRRQ_UPDATED && 4969 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
4967 num_hrrq++ < IPR_MAX_HRRQ_RETRIES); 4970 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
4968 4971
@@ -4977,7 +4980,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4977 } 4980 }
4978 4981
4979 if (unlikely(rc == IRQ_NONE)) 4982 if (unlikely(rc == IRQ_NONE))
4980 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); 4983 rc = ipr_handle_other_interrupt(ioa_cfg);
4981 4984
4982 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4985 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4983 return rc; 4986 return rc;
@@ -5014,6 +5017,10 @@ static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5014 5017
5015 ipr_cmd->dma_use_sg = nseg; 5018 ipr_cmd->dma_use_sg = nseg;
5016 5019
5020 ioarcb->data_transfer_length = cpu_to_be32(length);
5021 ioarcb->ioadl_len =
5022 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5023
5017 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { 5024 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5018 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 5025 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5019 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 5026 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
@@ -5135,7 +5142,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5135 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5142 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5136 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; 5143 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5137 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 5144 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5138 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 5145 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5139 5146
5140 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { 5147 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5141 scsi_cmd->result |= (DID_ERROR << 16); 5148 scsi_cmd->result |= (DID_ERROR << 16);
@@ -5166,7 +5173,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5166static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) 5173static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5167{ 5174{
5168 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5175 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5169 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; 5176 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5170 dma_addr_t dma_addr = ipr_cmd->dma_addr; 5177 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5171 5178
5172 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 5179 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
@@ -5174,8 +5181,8 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5174 ioarcb->read_data_transfer_length = 0; 5181 ioarcb->read_data_transfer_length = 0;
5175 ioarcb->ioadl_len = 0; 5182 ioarcb->ioadl_len = 0;
5176 ioarcb->read_ioadl_len = 0; 5183 ioarcb->read_ioadl_len = 0;
5177 ioasa->ioasc = 0; 5184 ioasa->hdr.ioasc = 0;
5178 ioasa->residual_data_len = 0; 5185 ioasa->hdr.residual_data_len = 0;
5179 5186
5180 if (ipr_cmd->ioa_cfg->sis64) 5187 if (ipr_cmd->ioa_cfg->sis64)
5181 ioarcb->u.sis64_addr_data.data_ioadl_addr = 5188 ioarcb->u.sis64_addr_data.data_ioadl_addr =
@@ -5200,7 +5207,7 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5200static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) 5207static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5201{ 5208{
5202 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 5209 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5203 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 5210 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5204 5211
5205 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { 5212 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5206 ipr_erp_done(ipr_cmd); 5213 ipr_erp_done(ipr_cmd);
@@ -5277,12 +5284,12 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5277 int i; 5284 int i;
5278 u16 data_len; 5285 u16 data_len;
5279 u32 ioasc, fd_ioasc; 5286 u32 ioasc, fd_ioasc;
5280 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; 5287 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5281 __be32 *ioasa_data = (__be32 *)ioasa; 5288 __be32 *ioasa_data = (__be32 *)ioasa;
5282 int error_index; 5289 int error_index;
5283 5290
5284 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK; 5291 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5285 fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK; 5292 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5286 5293
5287 if (0 == ioasc) 5294 if (0 == ioasc)
5288 return; 5295 return;
@@ -5297,7 +5304,7 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5297 5304
5298 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { 5305 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5299 /* Don't log an error if the IOA already logged one */ 5306 /* Don't log an error if the IOA already logged one */
5300 if (ioasa->ilid != 0) 5307 if (ioasa->hdr.ilid != 0)
5301 return; 5308 return;
5302 5309
5303 if (!ipr_is_gscsi(res)) 5310 if (!ipr_is_gscsi(res))
@@ -5309,10 +5316,11 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5309 5316
5310 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error); 5317 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5311 5318
5312 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len)) 5319 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5320 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5321 data_len = sizeof(struct ipr_ioasa64);
5322 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5313 data_len = sizeof(struct ipr_ioasa); 5323 data_len = sizeof(struct ipr_ioasa);
5314 else
5315 data_len = be16_to_cpu(ioasa->ret_stat_len);
5316 5324
5317 ipr_err("IOASA Dump:\n"); 5325 ipr_err("IOASA Dump:\n");
5318 5326
@@ -5338,8 +5346,8 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5338 u32 failing_lba; 5346 u32 failing_lba;
5339 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer; 5347 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5340 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata; 5348 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5341 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; 5349 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5342 u32 ioasc = be32_to_cpu(ioasa->ioasc); 5350 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5343 5351
5344 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 5352 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5345 5353
@@ -5382,7 +5390,7 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5382 5390
5383 /* Illegal request */ 5391 /* Illegal request */
5384 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) && 5392 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5385 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) { 5393 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5386 sense_buf[7] = 10; /* additional length */ 5394 sense_buf[7] = 10; /* additional length */
5387 5395
5388 /* IOARCB was in error */ 5396 /* IOARCB was in error */
@@ -5393,10 +5401,10 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5393 5401
5394 sense_buf[16] = 5402 sense_buf[16] =
5395 ((IPR_FIELD_POINTER_MASK & 5403 ((IPR_FIELD_POINTER_MASK &
5396 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff; 5404 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5397 sense_buf[17] = 5405 sense_buf[17] =
5398 (IPR_FIELD_POINTER_MASK & 5406 (IPR_FIELD_POINTER_MASK &
5399 be32_to_cpu(ioasa->ioasc_specific)) & 0xff; 5407 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5400 } else { 5408 } else {
5401 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) { 5409 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5402 if (ipr_is_vset_device(res)) 5410 if (ipr_is_vset_device(res))
@@ -5428,14 +5436,20 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5428 **/ 5436 **/
5429static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd) 5437static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5430{ 5438{
5431 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; 5439 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5440 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
5432 5441
5433 if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0) 5442 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
5434 return 0; 5443 return 0;
5435 5444
5436 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, 5445 if (ipr_cmd->ioa_cfg->sis64)
5437 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len), 5446 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5438 SCSI_SENSE_BUFFERSIZE)); 5447 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5448 SCSI_SENSE_BUFFERSIZE));
5449 else
5450 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5451 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5452 SCSI_SENSE_BUFFERSIZE));
5439 return 1; 5453 return 1;
5440} 5454}
5441 5455
@@ -5455,7 +5469,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5455{ 5469{
5456 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5470 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5457 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; 5471 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5458 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 5472 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5459 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK; 5473 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
5460 5474
5461 if (!res) { 5475 if (!res) {
@@ -5547,9 +5561,9 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5547{ 5561{
5548 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 5562 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5549 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5563 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5550 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 5564 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5551 5565
5552 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len)); 5566 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
5553 5567
5554 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { 5568 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
5555 scsi_dma_unmap(ipr_cmd->scsi_cmd); 5569 scsi_dma_unmap(ipr_cmd->scsi_cmd);
@@ -5839,19 +5853,23 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5839 struct ata_queued_cmd *qc = ipr_cmd->qc; 5853 struct ata_queued_cmd *qc = ipr_cmd->qc;
5840 struct ipr_sata_port *sata_port = qc->ap->private_data; 5854 struct ipr_sata_port *sata_port = qc->ap->private_data;
5841 struct ipr_resource_entry *res = sata_port->res; 5855 struct ipr_resource_entry *res = sata_port->res;
5842 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 5856 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5843 5857
5844 memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata, 5858 if (ipr_cmd->ioa_cfg->sis64)
5845 sizeof(struct ipr_ioasa_gata)); 5859 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5860 sizeof(struct ipr_ioasa_gata));
5861 else
5862 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5863 sizeof(struct ipr_ioasa_gata));
5846 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); 5864 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5847 5865
5848 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET) 5866 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5849 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target); 5867 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
5850 5868
5851 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) 5869 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5852 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status); 5870 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
5853 else 5871 else
5854 qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status); 5872 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
5855 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 5873 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5856 ata_qc_complete(qc); 5874 ata_qc_complete(qc);
5857} 5875}
@@ -6520,7 +6538,7 @@ static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6520static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd) 6538static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6521{ 6539{
6522 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6540 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6523 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 6541 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6524 6542
6525 dev_err(&ioa_cfg->pdev->dev, 6543 dev_err(&ioa_cfg->pdev->dev,
6526 "0x%02X failed with IOASC: 0x%08X\n", 6544 "0x%02X failed with IOASC: 0x%08X\n",
@@ -6544,7 +6562,7 @@ static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6544static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd) 6562static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6545{ 6563{
6546 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6564 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6547 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 6565 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6548 6566
6549 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { 6567 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6550 ipr_cmd->job_step = ipr_set_supported_devs; 6568 ipr_cmd->job_step = ipr_set_supported_devs;
@@ -6634,7 +6652,7 @@ static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6634 **/ 6652 **/
6635static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd) 6653static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6636{ 6654{
6637 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 6655 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6638 6656
6639 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { 6657 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6640 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; 6658 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
@@ -6706,7 +6724,7 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6706 list_move_tail(&res->queue, &old_res); 6724 list_move_tail(&res->queue, &old_res);
6707 6725
6708 if (ioa_cfg->sis64) 6726 if (ioa_cfg->sis64)
6709 entries = ioa_cfg->u.cfg_table64->hdr64.num_entries; 6727 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
6710 else 6728 else
6711 entries = ioa_cfg->u.cfg_table->hdr.num_entries; 6729 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6712 6730
@@ -6792,6 +6810,7 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6792 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 6810 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6793 6811
6794 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; 6812 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
6813 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
6795 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff; 6814 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
6796 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff; 6815 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
6797 6816
@@ -7122,7 +7141,9 @@ static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7122 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time); 7141 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7123 7142
7124 /* sanity check the stage_time value */ 7143 /* sanity check the stage_time value */
7125 if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME) 7144 if (stage_time == 0)
7145 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7146 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7126 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME; 7147 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7127 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT) 7148 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7128 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT; 7149 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
@@ -7165,13 +7186,14 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7165{ 7186{
7166 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7187 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7167 volatile u32 int_reg; 7188 volatile u32 int_reg;
7189 volatile u64 maskval;
7168 7190
7169 ENTER; 7191 ENTER;
7170 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 7192 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7171 ipr_init_ioa_mem(ioa_cfg); 7193 ipr_init_ioa_mem(ioa_cfg);
7172 7194
7173 ioa_cfg->allow_interrupts = 1; 7195 ioa_cfg->allow_interrupts = 1;
7174 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 7196 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7175 7197
7176 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 7198 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7177 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED), 7199 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
@@ -7183,9 +7205,12 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7183 /* Enable destructive diagnostics on IOA */ 7205 /* Enable destructive diagnostics on IOA */
7184 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32); 7206 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7185 7207
7186 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); 7208 if (ioa_cfg->sis64) {
7187 if (ioa_cfg->sis64) 7209 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7188 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_mask_reg); 7210 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7211 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7212 } else
7213 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7189 7214
7190 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 7215 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7191 7216
@@ -7332,12 +7357,12 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7332 rc = pci_restore_state(ioa_cfg->pdev); 7357 rc = pci_restore_state(ioa_cfg->pdev);
7333 7358
7334 if (rc != PCIBIOS_SUCCESSFUL) { 7359 if (rc != PCIBIOS_SUCCESSFUL) {
7335 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); 7360 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7336 return IPR_RC_JOB_CONTINUE; 7361 return IPR_RC_JOB_CONTINUE;
7337 } 7362 }
7338 7363
7339 if (ipr_set_pcix_cmd_reg(ioa_cfg)) { 7364 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7340 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); 7365 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7341 return IPR_RC_JOB_CONTINUE; 7366 return IPR_RC_JOB_CONTINUE;
7342 } 7367 }
7343 7368
@@ -7364,7 +7389,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7364 } 7389 }
7365 } 7390 }
7366 7391
7367 ENTER; 7392 LEAVE;
7368 return IPR_RC_JOB_CONTINUE; 7393 return IPR_RC_JOB_CONTINUE;
7369} 7394}
7370 7395
@@ -7406,7 +7431,7 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7406 7431
7407 if (rc != PCIBIOS_SUCCESSFUL) { 7432 if (rc != PCIBIOS_SUCCESSFUL) {
7408 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev); 7433 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7409 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); 7434 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7410 rc = IPR_RC_JOB_CONTINUE; 7435 rc = IPR_RC_JOB_CONTINUE;
7411 } else { 7436 } else {
7412 ipr_cmd->job_step = ipr_reset_bist_done; 7437 ipr_cmd->job_step = ipr_reset_bist_done;
@@ -7665,7 +7690,7 @@ static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
7665 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7690 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7666 7691
7667 do { 7692 do {
7668 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 7693 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7669 7694
7670 if (ioa_cfg->reset_cmd != ipr_cmd) { 7695 if (ioa_cfg->reset_cmd != ipr_cmd) {
7671 /* 7696 /*
@@ -8048,13 +8073,13 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8048 ioarcb->u.sis64_addr_data.data_ioadl_addr = 8073 ioarcb->u.sis64_addr_data.data_ioadl_addr =
8049 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); 8074 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8050 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr = 8075 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
8051 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa)); 8076 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
8052 } else { 8077 } else {
8053 ioarcb->write_ioadl_addr = 8078 ioarcb->write_ioadl_addr =
8054 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); 8079 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8055 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 8080 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8056 ioarcb->ioasa_host_pci_addr = 8081 ioarcb->ioasa_host_pci_addr =
8057 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa)); 8082 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
8058 } 8083 }
8059 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); 8084 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8060 ipr_cmd->cmd_index = i; 8085 ipr_cmd->cmd_index = i;
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 4c267b5e0b96..9ecd2259eb39 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -244,6 +244,7 @@
244#define IPR_RUNTIME_RESET 0x40000000 244#define IPR_RUNTIME_RESET 0x40000000
245 245
246#define IPR_IPL_INIT_MIN_STAGE_TIME 5 246#define IPR_IPL_INIT_MIN_STAGE_TIME 5
247#define IPR_IPL_INIT_DEFAULT_STAGE_TIME 15
247#define IPR_IPL_INIT_STAGE_UNKNOWN 0x0 248#define IPR_IPL_INIT_STAGE_UNKNOWN 0x0
248#define IPR_IPL_INIT_STAGE_TRANSOP 0xB0000000 249#define IPR_IPL_INIT_STAGE_TRANSOP 0xB0000000
249#define IPR_IPL_INIT_STAGE_MASK 0xff000000 250#define IPR_IPL_INIT_STAGE_MASK 0xff000000
@@ -613,7 +614,7 @@ struct ipr_auto_sense {
613 __be32 data[SCSI_SENSE_BUFFERSIZE/sizeof(__be32)]; 614 __be32 data[SCSI_SENSE_BUFFERSIZE/sizeof(__be32)];
614}; 615};
615 616
616struct ipr_ioasa { 617struct ipr_ioasa_hdr {
617 __be32 ioasc; 618 __be32 ioasc;
618#define IPR_IOASC_SENSE_KEY(ioasc) ((ioasc) >> 24) 619#define IPR_IOASC_SENSE_KEY(ioasc) ((ioasc) >> 24)
619#define IPR_IOASC_SENSE_CODE(ioasc) (((ioasc) & 0x00ff0000) >> 16) 620#define IPR_IOASC_SENSE_CODE(ioasc) (((ioasc) & 0x00ff0000) >> 16)
@@ -645,6 +646,25 @@ struct ipr_ioasa {
645#define IPR_FIELD_POINTER_VALID (0x80000000 >> 8) 646#define IPR_FIELD_POINTER_VALID (0x80000000 >> 8)
646#define IPR_FIELD_POINTER_MASK 0x0000ffff 647#define IPR_FIELD_POINTER_MASK 0x0000ffff
647 648
649}__attribute__((packed, aligned (4)));
650
651struct ipr_ioasa {
652 struct ipr_ioasa_hdr hdr;
653
654 union {
655 struct ipr_ioasa_vset vset;
656 struct ipr_ioasa_af_dasd dasd;
657 struct ipr_ioasa_gpdd gpdd;
658 struct ipr_ioasa_gata gata;
659 } u;
660
661 struct ipr_auto_sense auto_sense;
662}__attribute__((packed, aligned (4)));
663
664struct ipr_ioasa64 {
665 struct ipr_ioasa_hdr hdr;
666 u8 fd_res_path[8];
667
648 union { 668 union {
649 struct ipr_ioasa_vset vset; 669 struct ipr_ioasa_vset vset;
650 struct ipr_ioasa_af_dasd dasd; 670 struct ipr_ioasa_af_dasd dasd;
@@ -804,7 +824,7 @@ struct ipr_hostrcb_array_data_entry_enhanced {
804}__attribute__((packed, aligned (4))); 824}__attribute__((packed, aligned (4)));
805 825
806struct ipr_hostrcb_type_ff_error { 826struct ipr_hostrcb_type_ff_error {
807 __be32 ioa_data[502]; 827 __be32 ioa_data[758];
808}__attribute__((packed, aligned (4))); 828}__attribute__((packed, aligned (4)));
809 829
810struct ipr_hostrcb_type_01_error { 830struct ipr_hostrcb_type_01_error {
@@ -1181,7 +1201,7 @@ struct ipr_resource_entry {
1181 u8 flags; 1201 u8 flags;
1182 __be16 res_flags; 1202 __be16 res_flags;
1183 1203
1184 __be32 type; 1204 u8 type;
1185 1205
1186 u8 qmodel; 1206 u8 qmodel;
1187 struct ipr_std_inq_data std_inq_data; 1207 struct ipr_std_inq_data std_inq_data;
@@ -1464,7 +1484,10 @@ struct ipr_cmnd {
1464 struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES]; 1484 struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES];
1465 struct ipr_ata64_ioadl ata_ioadl; 1485 struct ipr_ata64_ioadl ata_ioadl;
1466 } i; 1486 } i;
1467 struct ipr_ioasa ioasa; 1487 union {
1488 struct ipr_ioasa ioasa;
1489 struct ipr_ioasa64 ioasa64;
1490 } s;
1468 struct list_head queue; 1491 struct list_head queue;
1469 struct scsi_cmnd *scsi_cmd; 1492 struct scsi_cmnd *scsi_cmd;
1470 struct ata_queued_cmd *qc; 1493 struct ata_queued_cmd *qc;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index bf55d3057413..fec47de72535 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -601,10 +601,8 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
601 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); 601 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
602 write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock); 602 write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
603 603
604 if (sk_sleep(sock->sk)) { 604 sock->sk->sk_err = EIO;
605 sock->sk->sk_err = EIO; 605 wake_up_interruptible(sk_sleep(sock->sk));
606 wake_up_interruptible(sk_sleep(sock->sk));
607 }
608 606
609 iscsi_conn_stop(cls_conn, flag); 607 iscsi_conn_stop(cls_conn, flag);
610 iscsi_sw_tcp_release_conn(conn); 608 iscsi_sw_tcp_release_conn(conn);
diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c
index 716d1785cda7..c29d0dbb9660 100644
--- a/drivers/scsi/mvme147.c
+++ b/drivers/scsi/mvme147.c
@@ -16,12 +16,12 @@
16#include <linux/stat.h> 16#include <linux/stat.h>
17 17
18 18
19static struct Scsi_Host *mvme147_host = NULL; 19static irqreturn_t mvme147_intr(int irq, void *data)
20
21static irqreturn_t mvme147_intr(int irq, void *dummy)
22{ 20{
21 struct Scsi_Host *instance = data;
22
23 if (irq == MVME147_IRQ_SCSI_PORT) 23 if (irq == MVME147_IRQ_SCSI_PORT)
24 wd33c93_intr(mvme147_host); 24 wd33c93_intr(instance);
25 else 25 else
26 m147_pcc->dma_intr = 0x89; /* Ack and enable ints */ 26 m147_pcc->dma_intr = 0x89; /* Ack and enable ints */
27 return IRQ_HANDLED; 27 return IRQ_HANDLED;
@@ -29,7 +29,8 @@ static irqreturn_t mvme147_intr(int irq, void *dummy)
29 29
30static int dma_setup(struct scsi_cmnd *cmd, int dir_in) 30static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
31{ 31{
32 struct WD33C93_hostdata *hdata = shost_priv(mvme147_host); 32 struct Scsi_Host *instance = cmd->device->host;
33 struct WD33C93_hostdata *hdata = shost_priv(instance);
33 unsigned char flags = 0x01; 34 unsigned char flags = 0x01;
34 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 35 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
35 36
@@ -66,6 +67,7 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
66int mvme147_detect(struct scsi_host_template *tpnt) 67int mvme147_detect(struct scsi_host_template *tpnt)
67{ 68{
68 static unsigned char called = 0; 69 static unsigned char called = 0;
70 struct Scsi_Host *instance;
69 wd33c93_regs regs; 71 wd33c93_regs regs;
70 struct WD33C93_hostdata *hdata; 72 struct WD33C93_hostdata *hdata;
71 73
@@ -76,25 +78,25 @@ int mvme147_detect(struct scsi_host_template *tpnt)
76 tpnt->proc_name = "MVME147"; 78 tpnt->proc_name = "MVME147";
77 tpnt->proc_info = &wd33c93_proc_info; 79 tpnt->proc_info = &wd33c93_proc_info;
78 80
79 mvme147_host = scsi_register(tpnt, sizeof(struct WD33C93_hostdata)); 81 instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
80 if (!mvme147_host) 82 if (!instance)
81 goto err_out; 83 goto err_out;
82 84
83 mvme147_host->base = 0xfffe4000; 85 instance->base = 0xfffe4000;
84 mvme147_host->irq = MVME147_IRQ_SCSI_PORT; 86 instance->irq = MVME147_IRQ_SCSI_PORT;
85 regs.SASR = (volatile unsigned char *)0xfffe4000; 87 regs.SASR = (volatile unsigned char *)0xfffe4000;
86 regs.SCMD = (volatile unsigned char *)0xfffe4001; 88 regs.SCMD = (volatile unsigned char *)0xfffe4001;
87 hdata = shost_priv(mvme147_host); 89 hdata = shost_priv(instance);
88 hdata->no_sync = 0xff; 90 hdata->no_sync = 0xff;
89 hdata->fast = 0; 91 hdata->fast = 0;
90 hdata->dma_mode = CTRL_DMA; 92 hdata->dma_mode = CTRL_DMA;
91 wd33c93_init(mvme147_host, regs, dma_setup, dma_stop, WD33C93_FS_8_10); 93 wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10);
92 94
93 if (request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0, 95 if (request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0,
94 "MVME147 SCSI PORT", mvme147_intr)) 96 "MVME147 SCSI PORT", instance))
95 goto err_unregister; 97 goto err_unregister;
96 if (request_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr, 0, 98 if (request_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr, 0,
97 "MVME147 SCSI DMA", mvme147_intr)) 99 "MVME147 SCSI DMA", instance))
98 goto err_free_irq; 100 goto err_free_irq;
99#if 0 /* Disabled; causes problems booting */ 101#if 0 /* Disabled; causes problems booting */
100 m147_pcc->scsi_interrupt = 0x10; /* Assert SCSI bus reset */ 102 m147_pcc->scsi_interrupt = 0x10; /* Assert SCSI bus reset */
@@ -113,7 +115,7 @@ int mvme147_detect(struct scsi_host_template *tpnt)
113err_free_irq: 115err_free_irq:
114 free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr); 116 free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr);
115err_unregister: 117err_unregister:
116 scsi_unregister(mvme147_host); 118 scsi_unregister(instance);
117err_out: 119err_out:
118 return 0; 120 return 0;
119} 121}
@@ -132,9 +134,6 @@ static int mvme147_bus_reset(struct scsi_cmnd *cmd)
132 return SUCCESS; 134 return SUCCESS;
133} 135}
134 136
135#define HOSTS_C
136
137#include "mvme147.h"
138 137
139static struct scsi_host_template driver_template = { 138static struct scsi_host_template driver_template = {
140 .proc_name = "MVME147", 139 .proc_name = "MVME147",
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 8dbf1c3afb7b..d64b7178fa08 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -3587,7 +3587,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name
3587 if (i == (-ENOSPC)) { 3587 if (i == (-ENOSPC)) {
3588 transfer = STp->buffer->writing; /* FIXME -- check this logic */ 3588 transfer = STp->buffer->writing; /* FIXME -- check this logic */
3589 if (transfer <= do_count) { 3589 if (transfer <= do_count) {
3590 filp->f_pos += do_count - transfer; 3590 *ppos += do_count - transfer;
3591 count -= do_count - transfer; 3591 count -= do_count - transfer;
3592 if (STps->drv_block >= 0) { 3592 if (STps->drv_block >= 0) {
3593 STps->drv_block += (do_count - transfer) / STp->block_size; 3593 STps->drv_block += (do_count - transfer) / STp->block_size;
@@ -3625,7 +3625,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name
3625 goto out; 3625 goto out;
3626 } 3626 }
3627 3627
3628 filp->f_pos += do_count; 3628 *ppos += do_count;
3629 b_point += do_count; 3629 b_point += do_count;
3630 count -= do_count; 3630 count -= do_count;
3631 if (STps->drv_block >= 0) { 3631 if (STps->drv_block >= 0) {
@@ -3647,7 +3647,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name
3647 if (STps->drv_block >= 0) { 3647 if (STps->drv_block >= 0) {
3648 STps->drv_block += blks; 3648 STps->drv_block += blks;
3649 } 3649 }
3650 filp->f_pos += count; 3650 *ppos += count;
3651 count = 0; 3651 count = 0;
3652 } 3652 }
3653 3653
@@ -3823,7 +3823,7 @@ static ssize_t osst_read(struct file * filp, char __user * buf, size_t count, lo
3823 } 3823 }
3824 STp->logical_blk_num += transfer / STp->block_size; 3824 STp->logical_blk_num += transfer / STp->block_size;
3825 STps->drv_block += transfer / STp->block_size; 3825 STps->drv_block += transfer / STp->block_size;
3826 filp->f_pos += transfer; 3826 *ppos += transfer;
3827 buf += transfer; 3827 buf += transfer;
3828 total += transfer; 3828 total += transfer;
3829 } 3829 }
@@ -5626,6 +5626,7 @@ static const struct file_operations osst_fops = {
5626 .open = os_scsi_tape_open, 5626 .open = os_scsi_tape_open,
5627 .flush = os_scsi_tape_flush, 5627 .flush = os_scsi_tape_flush,
5628 .release = os_scsi_tape_close, 5628 .release = os_scsi_tape_close,
5629 .llseek = noop_llseek,
5629}; 5630};
5630 5631
5631static int osst_supports(struct scsi_device * SDp) 5632static int osst_supports(struct scsi_device * SDp)
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 9798c2c06b93..1c027a97d8b9 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -492,19 +492,20 @@ void scsi_target_reap(struct scsi_target *starget)
492 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 492 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
493 unsigned long flags; 493 unsigned long flags;
494 enum scsi_target_state state; 494 enum scsi_target_state state;
495 int empty; 495 int empty = 0;
496 496
497 spin_lock_irqsave(shost->host_lock, flags); 497 spin_lock_irqsave(shost->host_lock, flags);
498 state = starget->state; 498 state = starget->state;
499 empty = --starget->reap_ref == 0 && 499 if (--starget->reap_ref == 0 && list_empty(&starget->devices)) {
500 list_empty(&starget->devices) ? 1 : 0; 500 empty = 1;
501 starget->state = STARGET_DEL;
502 }
501 spin_unlock_irqrestore(shost->host_lock, flags); 503 spin_unlock_irqrestore(shost->host_lock, flags);
502 504
503 if (!empty) 505 if (!empty)
504 return; 506 return;
505 507
506 BUG_ON(state == STARGET_DEL); 508 BUG_ON(state == STARGET_DEL);
507 starget->state = STARGET_DEL;
508 if (state == STARGET_CREATED) 509 if (state == STARGET_CREATED)
509 scsi_target_destroy(starget); 510 scsi_target_destroy(starget);
510 else 511 else
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 3ea1a713ef25..24211d0efa6d 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3962,6 +3962,7 @@ static const struct file_operations st_fops =
3962 .open = st_open, 3962 .open = st_open,
3963 .flush = st_flush, 3963 .flush = st_flush,
3964 .release = st_release, 3964 .release = st_release,
3965 .llseek = noop_llseek,
3965}; 3966};
3966 3967
3967static int st_probe(struct device *dev) 3968static int st_probe(struct device *dev)
diff --git a/drivers/sfi/sfi_acpi.c b/drivers/sfi/sfi_acpi.c
index 34aba30eb84b..f5b4ca581541 100644
--- a/drivers/sfi/sfi_acpi.c
+++ b/drivers/sfi/sfi_acpi.c
@@ -173,3 +173,44 @@ int sfi_acpi_table_parse(char *signature, char *oem_id, char *oem_table_id,
173 sfi_acpi_put_table(table); 173 sfi_acpi_put_table(table);
174 return ret; 174 return ret;
175} 175}
176
177static ssize_t sfi_acpi_table_show(struct file *filp, struct kobject *kobj,
178 struct bin_attribute *bin_attr, char *buf,
179 loff_t offset, size_t count)
180{
181 struct sfi_table_attr *tbl_attr =
182 container_of(bin_attr, struct sfi_table_attr, attr);
183 struct acpi_table_header *th = NULL;
184 struct sfi_table_key key;
185 ssize_t cnt;
186
187 key.sig = tbl_attr->name;
188 key.oem_id = NULL;
189 key.oem_table_id = NULL;
190
191 th = sfi_acpi_get_table(&key);
192 if (!th)
193 return 0;
194
195 cnt = memory_read_from_buffer(buf, count, &offset,
196 th, th->length);
197 sfi_acpi_put_table(th);
198
199 return cnt;
200}
201
202
203void __init sfi_acpi_sysfs_init(void)
204{
205 u32 tbl_cnt, i;
206 struct sfi_table_attr *tbl_attr;
207
208 tbl_cnt = XSDT_GET_NUM_ENTRIES(xsdt_va, u64);
209 for (i = 0; i < tbl_cnt; i++) {
210 tbl_attr =
211 sfi_sysfs_install_table(xsdt_va->table_offset_entry[i]);
212 tbl_attr->attr.read = sfi_acpi_table_show;
213 }
214
215 return;
216}
diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c
index b204a0929139..005195958647 100644
--- a/drivers/sfi/sfi_core.c
+++ b/drivers/sfi/sfi_core.c
@@ -67,6 +67,7 @@
67#include <linux/acpi.h> 67#include <linux/acpi.h>
68#include <linux/init.h> 68#include <linux/init.h>
69#include <linux/sfi.h> 69#include <linux/sfi.h>
70#include <linux/slab.h>
70 71
71#include "sfi_core.h" 72#include "sfi_core.h"
72 73
@@ -382,6 +383,102 @@ static __init int sfi_find_syst(void)
382 return -1; 383 return -1;
383} 384}
384 385
386static struct kobject *sfi_kobj;
387static struct kobject *tables_kobj;
388
389static ssize_t sfi_table_show(struct file *filp, struct kobject *kobj,
390 struct bin_attribute *bin_attr, char *buf,
391 loff_t offset, size_t count)
392{
393 struct sfi_table_attr *tbl_attr =
394 container_of(bin_attr, struct sfi_table_attr, attr);
395 struct sfi_table_header *th = NULL;
396 struct sfi_table_key key;
397 ssize_t cnt;
398
399 key.sig = tbl_attr->name;
400 key.oem_id = NULL;
401 key.oem_table_id = NULL;
402
403 if (strncmp(SFI_SIG_SYST, tbl_attr->name, SFI_SIGNATURE_SIZE)) {
404 th = sfi_get_table(&key);
405 if (!th)
406 return 0;
407
408 cnt = memory_read_from_buffer(buf, count, &offset,
409 th, th->len);
410 sfi_put_table(th);
411 } else
412 cnt = memory_read_from_buffer(buf, count, &offset,
413 syst_va, syst_va->header.len);
414
415 return cnt;
416}
417
418struct sfi_table_attr __init *sfi_sysfs_install_table(u64 pa)
419{
420 struct sfi_table_attr *tbl_attr;
421 struct sfi_table_header *th;
422 int ret;
423
424 tbl_attr = kzalloc(sizeof(struct sfi_table_attr), GFP_KERNEL);
425 if (!tbl_attr)
426 return NULL;
427
428 th = sfi_map_table(pa);
429 if (!th || !th->sig[0]) {
430 kfree(tbl_attr);
431 return NULL;
432 }
433
434 sysfs_attr_init(&tbl_attr->attr.attr);
435 memcpy(tbl_attr->name, th->sig, SFI_SIGNATURE_SIZE);
436
437 tbl_attr->attr.size = 0;
438 tbl_attr->attr.read = sfi_table_show;
439 tbl_attr->attr.attr.name = tbl_attr->name;
440 tbl_attr->attr.attr.mode = 0400;
441
442 ret = sysfs_create_bin_file(tables_kobj,
443 &tbl_attr->attr);
444 if (ret)
445 kfree(tbl_attr);
446
447 sfi_unmap_table(th);
448 return tbl_attr;
449}
450
451static int __init sfi_sysfs_init(void)
452{
453 int tbl_cnt, i;
454
455 if (sfi_disabled)
456 return 0;
457
458 sfi_kobj = kobject_create_and_add("sfi", firmware_kobj);
459 if (!sfi_kobj)
460 return 0;
461
462 tables_kobj = kobject_create_and_add("tables", sfi_kobj);
463 if (!tables_kobj) {
464 kobject_put(sfi_kobj);
465 return 0;
466 }
467
468 sfi_sysfs_install_table(syst_pa);
469
470 tbl_cnt = SFI_GET_NUM_ENTRIES(syst_va, u64);
471
472 for (i = 0; i < tbl_cnt; i++)
473 sfi_sysfs_install_table(syst_va->pentry[i]);
474
475 sfi_acpi_sysfs_init();
476 kobject_uevent(sfi_kobj, KOBJ_ADD);
477 kobject_uevent(tables_kobj, KOBJ_ADD);
478 pr_info("SFI sysfs interfaces init success\n");
479 return 0;
480}
481
385void __init sfi_init(void) 482void __init sfi_init(void)
386{ 483{
387 if (!acpi_disabled) 484 if (!acpi_disabled)
@@ -390,7 +487,7 @@ void __init sfi_init(void)
390 if (sfi_disabled) 487 if (sfi_disabled)
391 return; 488 return;
392 489
393 pr_info("Simple Firmware Interface v0.7 http://simplefirmware.org\n"); 490 pr_info("Simple Firmware Interface v0.81 http://simplefirmware.org\n");
394 491
395 if (sfi_find_syst() || sfi_parse_syst() || sfi_platform_init()) 492 if (sfi_find_syst() || sfi_parse_syst() || sfi_platform_init())
396 disable_sfi(); 493 disable_sfi();
@@ -414,3 +511,9 @@ void __init sfi_init_late(void)
414 511
415 sfi_acpi_init(); 512 sfi_acpi_init();
416} 513}
514
515/*
516 * The reason we put it here becasue we need wait till the /sys/firmware
517 * is setup, then our interface can be registered in /sys/firmware/sfi
518 */
519core_initcall(sfi_sysfs_init);
diff --git a/drivers/sfi/sfi_core.h b/drivers/sfi/sfi_core.h
index da82d39e104d..b7cf220d44ec 100644
--- a/drivers/sfi/sfi_core.h
+++ b/drivers/sfi/sfi_core.h
@@ -61,6 +61,12 @@ struct sfi_table_key{
61 char *oem_table_id; 61 char *oem_table_id;
62}; 62};
63 63
64/* sysfs interface */
65struct sfi_table_attr {
66 struct bin_attribute attr;
67 char name[8];
68};
69
64#define SFI_ANY_KEY { .sig = NULL, .oem_id = NULL, .oem_table_id = NULL } 70#define SFI_ANY_KEY { .sig = NULL, .oem_id = NULL, .oem_table_id = NULL }
65 71
66extern int __init sfi_acpi_init(void); 72extern int __init sfi_acpi_init(void);
@@ -68,3 +74,5 @@ extern struct sfi_table_header *sfi_check_table(u64 paddr,
68 struct sfi_table_key *key); 74 struct sfi_table_key *key);
69struct sfi_table_header *sfi_get_table(struct sfi_table_key *key); 75struct sfi_table_header *sfi_get_table(struct sfi_table_key *key);
70extern void sfi_put_table(struct sfi_table_header *table); 76extern void sfi_put_table(struct sfi_table_header *table);
77extern struct sfi_table_attr __init *sfi_sysfs_install_table(u64 pa);
78extern void __init sfi_acpi_sysfs_init(void);
diff --git a/drivers/staging/go7007/saa7134-go7007.c b/drivers/staging/go7007/saa7134-go7007.c
index 49f0d31c118a..cf7c34a99459 100644
--- a/drivers/staging/go7007/saa7134-go7007.c
+++ b/drivers/staging/go7007/saa7134-go7007.c
@@ -242,13 +242,13 @@ static void saa7134_go7007_irq_ts_done(struct saa7134_dev *dev,
242 printk(KERN_DEBUG "saa7134-go7007: irq: lost %ld\n", 242 printk(KERN_DEBUG "saa7134-go7007: irq: lost %ld\n",
243 (status >> 16) & 0x0f); 243 (status >> 16) & 0x0f);
244 if (status & 0x100000) { 244 if (status & 0x100000) {
245 dma_sync_single(&dev->pci->dev, 245 dma_sync_single_for_cpu(&dev->pci->dev,
246 saa->bottom_dma, PAGE_SIZE, DMA_FROM_DEVICE); 246 saa->bottom_dma, PAGE_SIZE, DMA_FROM_DEVICE);
247 go7007_parse_video_stream(go, saa->bottom, PAGE_SIZE); 247 go7007_parse_video_stream(go, saa->bottom, PAGE_SIZE);
248 saa_writel(SAA7134_RS_BA2(5), cpu_to_le32(saa->bottom_dma)); 248 saa_writel(SAA7134_RS_BA2(5), cpu_to_le32(saa->bottom_dma));
249 } else { 249 } else {
250 dma_sync_single(&dev->pci->dev, 250 dma_sync_single_for_cpu(&dev->pci->dev,
251 saa->top_dma, PAGE_SIZE, DMA_FROM_DEVICE); 251 saa->top_dma, PAGE_SIZE, DMA_FROM_DEVICE);
252 go7007_parse_video_stream(go, saa->top, PAGE_SIZE); 252 go7007_parse_video_stream(go, saa->top, PAGE_SIZE);
253 saa_writel(SAA7134_RS_BA1(5), cpu_to_le32(saa->top_dma)); 253 saa_writel(SAA7134_RS_BA1(5), cpu_to_le32(saa->top_dma));
254 } 254 }
diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
index e89304c72568..b53deee25d74 100644
--- a/drivers/telephony/ixj.c
+++ b/drivers/telephony/ixj.c
@@ -5879,20 +5879,13 @@ out:
5879static int ixj_build_filter_cadence(IXJ *j, IXJ_FILTER_CADENCE __user * cp) 5879static int ixj_build_filter_cadence(IXJ *j, IXJ_FILTER_CADENCE __user * cp)
5880{ 5880{
5881 IXJ_FILTER_CADENCE *lcp; 5881 IXJ_FILTER_CADENCE *lcp;
5882 lcp = kmalloc(sizeof(IXJ_FILTER_CADENCE), GFP_KERNEL); 5882 lcp = memdup_user(cp, sizeof(IXJ_FILTER_CADENCE));
5883 if (lcp == NULL) { 5883 if (IS_ERR(lcp)) {
5884 if(ixjdebug & 0x0001) { 5884 if(ixjdebug & 0x0001) {
5885 printk(KERN_INFO "Could not allocate memory for cadence\n"); 5885 printk(KERN_INFO "Could not allocate memory for cadence or could not copy cadence to kernel\n");
5886 } 5886 }
5887 return -ENOMEM; 5887 return PTR_ERR(lcp);
5888 } 5888 }
5889 if (copy_from_user(lcp, cp, sizeof(IXJ_FILTER_CADENCE))) {
5890 if(ixjdebug & 0x0001) {
5891 printk(KERN_INFO "Could not copy cadence to kernel\n");
5892 }
5893 kfree(lcp);
5894 return -EFAULT;
5895 }
5896 if (lcp->filter > 5) { 5889 if (lcp->filter > 5) {
5897 if(ixjdebug & 0x0001) { 5890 if(ixjdebug & 0x0001) {
5898 printk(KERN_INFO "Cadence out of range\n"); 5891 printk(KERN_INFO "Cadence out of range\n");
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 23b2a8c0dbfc..b020ba7f1cf2 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -501,7 +501,9 @@ static irqreturn_t bfin_bf54x_irq_error(int irq, void *dev_id)
501 501
502static int __devinit bfin_bf54x_probe(struct platform_device *pdev) 502static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
503{ 503{
504#ifndef NO_BL_SUPPORT
504 struct backlight_properties props; 505 struct backlight_properties props;
506#endif
505 struct bfin_bf54xfb_info *info; 507 struct bfin_bf54xfb_info *info;
506 struct fb_info *fbinfo; 508 struct fb_info *fbinfo;
507 int ret; 509 int ret;
@@ -654,7 +656,8 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
654 printk(KERN_ERR DRIVER_NAME 656 printk(KERN_ERR DRIVER_NAME
655 ": unable to register backlight.\n"); 657 ": unable to register backlight.\n");
656 ret = -EINVAL; 658 ret = -EINVAL;
657 goto out9; 659 unregister_framebuffer(fbinfo);
660 goto out8;
658 } 661 }
659 662
660 lcd_dev = lcd_device_register(DRIVER_NAME, &pdev->dev, NULL, &bfin_lcd_ops); 663 lcd_dev = lcd_device_register(DRIVER_NAME, &pdev->dev, NULL, &bfin_lcd_ops);
@@ -663,8 +666,6 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
663 666
664 return 0; 667 return 0;
665 668
666out9:
667 unregister_framebuffer(fbinfo);
668out8: 669out8:
669 free_irq(info->irq, info); 670 free_irq(info->irq, info);
670out7: 671out7:
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index c2ec3dcd4e91..7a50272eaab9 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -420,7 +420,9 @@ static irqreturn_t bfin_t350mcqb_irq_error(int irq, void *dev_id)
420 420
421static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev) 421static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
422{ 422{
423#ifndef NO_BL_SUPPORT
423 struct backlight_properties props; 424 struct backlight_properties props;
425#endif
424 struct bfin_t350mcqbfb_info *info; 426 struct bfin_t350mcqbfb_info *info;
425 struct fb_info *fbinfo; 427 struct fb_info *fbinfo;
426 int ret; 428 int ret;
@@ -550,7 +552,8 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
550 printk(KERN_ERR DRIVER_NAME 552 printk(KERN_ERR DRIVER_NAME
551 ": unable to register backlight.\n"); 553 ": unable to register backlight.\n");
552 ret = -EINVAL; 554 ret = -EINVAL;
553 goto out9; 555 unregister_framebuffer(fbinfo);
556 goto out8;
554 } 557 }
555 558
556 lcd_dev = lcd_device_register(DRIVER_NAME, NULL, &bfin_lcd_ops); 559 lcd_dev = lcd_device_register(DRIVER_NAME, NULL, &bfin_lcd_ops);
@@ -559,8 +562,6 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
559 562
560 return 0; 563 return 0;
561 564
562out9:
563 unregister_framebuffer(fbinfo);
564out8: 565out8:
565 free_irq(info->irq, info); 566 free_irq(info->irq, info);
566out7: 567out7:
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index d4471b4c0374..dce8c97b4333 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -71,7 +71,8 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64",
71 "S3 Trio64UV+", "S3 Trio64V2/DX", "S3 Trio64V2/GX", 71 "S3 Trio64UV+", "S3 Trio64V2/DX", "S3 Trio64V2/GX",
72 "S3 Plato/PX", "S3 Aurora64VP", "S3 Virge", 72 "S3 Plato/PX", "S3 Aurora64VP", "S3 Virge",
73 "S3 Virge/VX", "S3 Virge/DX", "S3 Virge/GX", 73 "S3 Virge/VX", "S3 Virge/DX", "S3 Virge/GX",
74 "S3 Virge/GX2", "S3 Virge/GX2P", "S3 Virge/GX2P"}; 74 "S3 Virge/GX2", "S3 Virge/GX2P", "S3 Virge/GX2P",
75 "S3 Trio3D/1X", "S3 Trio3D/2X", "S3 Trio3D/2X"};
75 76
76#define CHIP_UNKNOWN 0x00 77#define CHIP_UNKNOWN 0x00
77#define CHIP_732_TRIO32 0x01 78#define CHIP_732_TRIO32 0x01
@@ -89,10 +90,14 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64",
89#define CHIP_356_VIRGE_GX2 0x0D 90#define CHIP_356_VIRGE_GX2 0x0D
90#define CHIP_357_VIRGE_GX2P 0x0E 91#define CHIP_357_VIRGE_GX2P 0x0E
91#define CHIP_359_VIRGE_GX2P 0x0F 92#define CHIP_359_VIRGE_GX2P 0x0F
93#define CHIP_360_TRIO3D_1X 0x10
94#define CHIP_362_TRIO3D_2X 0x11
95#define CHIP_368_TRIO3D_2X 0x12
92 96
93#define CHIP_XXX_TRIO 0x80 97#define CHIP_XXX_TRIO 0x80
94#define CHIP_XXX_TRIO64V2_DXGX 0x81 98#define CHIP_XXX_TRIO64V2_DXGX 0x81
95#define CHIP_XXX_VIRGE_DXGX 0x82 99#define CHIP_XXX_VIRGE_DXGX 0x82
100#define CHIP_36X_TRIO3D_1X_2X 0x83
96 101
97#define CHIP_UNDECIDED_FLAG 0x80 102#define CHIP_UNDECIDED_FLAG 0x80
98#define CHIP_MASK 0xFF 103#define CHIP_MASK 0xFF
@@ -324,6 +329,7 @@ static void s3fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
324 329
325static void s3_set_pixclock(struct fb_info *info, u32 pixclock) 330static void s3_set_pixclock(struct fb_info *info, u32 pixclock)
326{ 331{
332 struct s3fb_info *par = info->par;
327 u16 m, n, r; 333 u16 m, n, r;
328 u8 regval; 334 u8 regval;
329 int rv; 335 int rv;
@@ -339,7 +345,13 @@ static void s3_set_pixclock(struct fb_info *info, u32 pixclock)
339 vga_w(NULL, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD); 345 vga_w(NULL, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD);
340 346
341 /* Set S3 clock registers */ 347 /* Set S3 clock registers */
342 vga_wseq(NULL, 0x12, ((n - 2) | (r << 5))); 348 if (par->chip == CHIP_360_TRIO3D_1X ||
349 par->chip == CHIP_362_TRIO3D_2X ||
350 par->chip == CHIP_368_TRIO3D_2X) {
351 vga_wseq(NULL, 0x12, (n - 2) | ((r & 3) << 6)); /* n and two bits of r */
352 vga_wseq(NULL, 0x29, r >> 2); /* remaining highest bit of r */
353 } else
354 vga_wseq(NULL, 0x12, (n - 2) | (r << 5));
343 vga_wseq(NULL, 0x13, m - 2); 355 vga_wseq(NULL, 0x13, m - 2);
344 356
345 udelay(1000); 357 udelay(1000);
@@ -456,7 +468,7 @@ static int s3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
456static int s3fb_set_par(struct fb_info *info) 468static int s3fb_set_par(struct fb_info *info)
457{ 469{
458 struct s3fb_info *par = info->par; 470 struct s3fb_info *par = info->par;
459 u32 value, mode, hmul, offset_value, screen_size, multiplex; 471 u32 value, mode, hmul, offset_value, screen_size, multiplex, dbytes;
460 u32 bpp = info->var.bits_per_pixel; 472 u32 bpp = info->var.bits_per_pixel;
461 473
462 if (bpp != 0) { 474 if (bpp != 0) {
@@ -518,7 +530,7 @@ static int s3fb_set_par(struct fb_info *info)
518 svga_wcrt_mask(0x33, 0x00, 0x08); /* no DDR ? */ 530 svga_wcrt_mask(0x33, 0x00, 0x08); /* no DDR ? */
519 svga_wcrt_mask(0x43, 0x00, 0x01); /* no DDR ? */ 531 svga_wcrt_mask(0x43, 0x00, 0x01); /* no DDR ? */
520 532
521 svga_wcrt_mask(0x5D, 0x00, 0x28); // Clear strange HSlen bits 533 svga_wcrt_mask(0x5D, 0x00, 0x28); /* Clear strange HSlen bits */
522 534
523/* svga_wcrt_mask(0x58, 0x03, 0x03); */ 535/* svga_wcrt_mask(0x58, 0x03, 0x03); */
524 536
@@ -530,10 +542,14 @@ static int s3fb_set_par(struct fb_info *info)
530 pr_debug("fb%d: offset register : %d\n", info->node, offset_value); 542 pr_debug("fb%d: offset register : %d\n", info->node, offset_value);
531 svga_wcrt_multi(s3_offset_regs, offset_value); 543 svga_wcrt_multi(s3_offset_regs, offset_value);
532 544
533 vga_wcrt(NULL, 0x54, 0x18); /* M parameter */ 545 if (par->chip != CHIP_360_TRIO3D_1X &&
534 vga_wcrt(NULL, 0x60, 0xff); /* N parameter */ 546 par->chip != CHIP_362_TRIO3D_2X &&
535 vga_wcrt(NULL, 0x61, 0xff); /* L parameter */ 547 par->chip != CHIP_368_TRIO3D_2X) {
536 vga_wcrt(NULL, 0x62, 0xff); /* L parameter */ 548 vga_wcrt(NULL, 0x54, 0x18); /* M parameter */
549 vga_wcrt(NULL, 0x60, 0xff); /* N parameter */
550 vga_wcrt(NULL, 0x61, 0xff); /* L parameter */
551 vga_wcrt(NULL, 0x62, 0xff); /* L parameter */
552 }
537 553
538 vga_wcrt(NULL, 0x3A, 0x35); 554 vga_wcrt(NULL, 0x3A, 0x35);
539 svga_wattr(0x33, 0x00); 555 svga_wattr(0x33, 0x00);
@@ -570,6 +586,16 @@ static int s3fb_set_par(struct fb_info *info)
570 vga_wcrt(NULL, 0x66, 0x90); 586 vga_wcrt(NULL, 0x66, 0x90);
571 } 587 }
572 588
589 if (par->chip == CHIP_360_TRIO3D_1X ||
590 par->chip == CHIP_362_TRIO3D_2X ||
591 par->chip == CHIP_368_TRIO3D_2X) {
592 dbytes = info->var.xres * ((bpp+7)/8);
593 vga_wcrt(NULL, 0x91, (dbytes + 7) / 8);
594 vga_wcrt(NULL, 0x90, (((dbytes + 7) / 8) >> 8) | 0x80);
595
596 vga_wcrt(NULL, 0x66, 0x81);
597 }
598
573 svga_wcrt_mask(0x31, 0x00, 0x40); 599 svga_wcrt_mask(0x31, 0x00, 0x40);
574 multiplex = 0; 600 multiplex = 0;
575 hmul = 1; 601 hmul = 1;
@@ -615,11 +641,13 @@ static int s3fb_set_par(struct fb_info *info)
615 break; 641 break;
616 case 3: 642 case 3:
617 pr_debug("fb%d: 8 bit pseudocolor\n", info->node); 643 pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
618 if (info->var.pixclock > 20000) { 644 svga_wcrt_mask(0x50, 0x00, 0x30);
619 svga_wcrt_mask(0x50, 0x00, 0x30); 645 if (info->var.pixclock > 20000 ||
646 par->chip == CHIP_360_TRIO3D_1X ||
647 par->chip == CHIP_362_TRIO3D_2X ||
648 par->chip == CHIP_368_TRIO3D_2X)
620 svga_wcrt_mask(0x67, 0x00, 0xF0); 649 svga_wcrt_mask(0x67, 0x00, 0xF0);
621 } else { 650 else {
622 svga_wcrt_mask(0x50, 0x00, 0x30);
623 svga_wcrt_mask(0x67, 0x10, 0xF0); 651 svga_wcrt_mask(0x67, 0x10, 0xF0);
624 multiplex = 1; 652 multiplex = 1;
625 } 653 }
@@ -634,7 +662,10 @@ static int s3fb_set_par(struct fb_info *info)
634 } else { 662 } else {
635 svga_wcrt_mask(0x50, 0x10, 0x30); 663 svga_wcrt_mask(0x50, 0x10, 0x30);
636 svga_wcrt_mask(0x67, 0x30, 0xF0); 664 svga_wcrt_mask(0x67, 0x30, 0xF0);
637 hmul = 2; 665 if (par->chip != CHIP_360_TRIO3D_1X &&
666 par->chip != CHIP_362_TRIO3D_2X &&
667 par->chip != CHIP_368_TRIO3D_2X)
668 hmul = 2;
638 } 669 }
639 break; 670 break;
640 case 5: 671 case 5:
@@ -647,7 +678,10 @@ static int s3fb_set_par(struct fb_info *info)
647 } else { 678 } else {
648 svga_wcrt_mask(0x50, 0x10, 0x30); 679 svga_wcrt_mask(0x50, 0x10, 0x30);
649 svga_wcrt_mask(0x67, 0x50, 0xF0); 680 svga_wcrt_mask(0x67, 0x50, 0xF0);
650 hmul = 2; 681 if (par->chip != CHIP_360_TRIO3D_1X &&
682 par->chip != CHIP_362_TRIO3D_2X &&
683 par->chip != CHIP_368_TRIO3D_2X)
684 hmul = 2;
651 } 685 }
652 break; 686 break;
653 case 6: 687 case 6:
@@ -866,6 +900,17 @@ static int __devinit s3_identification(int chip)
866 return CHIP_385_VIRGE_GX; 900 return CHIP_385_VIRGE_GX;
867 } 901 }
868 902
903 if (chip == CHIP_36X_TRIO3D_1X_2X) {
904 switch (vga_rcrt(NULL, 0x2f)) {
905 case 0x00:
906 return CHIP_360_TRIO3D_1X;
907 case 0x01:
908 return CHIP_362_TRIO3D_2X;
909 case 0x02:
910 return CHIP_368_TRIO3D_2X;
911 }
912 }
913
869 return CHIP_UNKNOWN; 914 return CHIP_UNKNOWN;
870} 915}
871 916
@@ -930,17 +975,32 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
930 vga_wcrt(NULL, 0x38, 0x48); 975 vga_wcrt(NULL, 0x38, 0x48);
931 vga_wcrt(NULL, 0x39, 0xA5); 976 vga_wcrt(NULL, 0x39, 0xA5);
932 977
933 /* Find how many physical memory there is on card */ 978 /* Identify chip type */
934 /* 0x36 register is accessible even if other registers are locked */
935 regval = vga_rcrt(NULL, 0x36);
936 info->screen_size = s3_memsizes[regval >> 5] << 10;
937 info->fix.smem_len = info->screen_size;
938
939 par->chip = id->driver_data & CHIP_MASK; 979 par->chip = id->driver_data & CHIP_MASK;
940 par->rev = vga_rcrt(NULL, 0x2f); 980 par->rev = vga_rcrt(NULL, 0x2f);
941 if (par->chip & CHIP_UNDECIDED_FLAG) 981 if (par->chip & CHIP_UNDECIDED_FLAG)
942 par->chip = s3_identification(par->chip); 982 par->chip = s3_identification(par->chip);
943 983
984 /* Find how many physical memory there is on card */
985 /* 0x36 register is accessible even if other registers are locked */
986 regval = vga_rcrt(NULL, 0x36);
987 if (par->chip == CHIP_360_TRIO3D_1X ||
988 par->chip == CHIP_362_TRIO3D_2X ||
989 par->chip == CHIP_368_TRIO3D_2X) {
990 switch ((regval & 0xE0) >> 5) {
991 case 0: /* 8MB -- only 4MB usable for display */
992 case 1: /* 4MB with 32-bit bus */
993 case 2: /* 4MB */
994 info->screen_size = 4 << 20;
995 break;
996 case 6: /* 2MB */
997 info->screen_size = 2 << 20;
998 break;
999 }
1000 } else
1001 info->screen_size = s3_memsizes[regval >> 5] << 10;
1002 info->fix.smem_len = info->screen_size;
1003
944 /* Find MCLK frequency */ 1004 /* Find MCLK frequency */
945 regval = vga_rseq(NULL, 0x10); 1005 regval = vga_rseq(NULL, 0x10);
946 par->mclk_freq = ((vga_rseq(NULL, 0x11) + 2) * 14318) / ((regval & 0x1F) + 2); 1006 par->mclk_freq = ((vga_rseq(NULL, 0x11) + 2) * 14318) / ((regval & 0x1F) + 2);
@@ -1131,6 +1191,7 @@ static struct pci_device_id s3_devices[] __devinitdata = {
1131 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A10), .driver_data = CHIP_356_VIRGE_GX2}, 1191 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A10), .driver_data = CHIP_356_VIRGE_GX2},
1132 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A11), .driver_data = CHIP_357_VIRGE_GX2P}, 1192 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A11), .driver_data = CHIP_357_VIRGE_GX2P},
1133 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A12), .driver_data = CHIP_359_VIRGE_GX2P}, 1193 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A12), .driver_data = CHIP_359_VIRGE_GX2P},
1194 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A13), .driver_data = CHIP_36X_TRIO3D_1X_2X},
1134 1195
1135 {0, 0, 0, 0, 0, 0, 0} 1196 {0, 0, 0, 0, 0, 0, 0}
1136}; 1197};
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 2bc40e682f95..1082541358f0 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -578,14 +578,9 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
578 break; 578 break;
579 579
580 case VIAFB_SET_GAMMA_LUT: 580 case VIAFB_SET_GAMMA_LUT:
581 viafb_gamma_table = kmalloc(256 * sizeof(u32), GFP_KERNEL); 581 viafb_gamma_table = memdup_user(argp, 256 * sizeof(u32));
582 if (!viafb_gamma_table) 582 if (IS_ERR(viafb_gamma_table))
583 return -ENOMEM; 583 return PTR_ERR(viafb_gamma_table);
584 if (copy_from_user(viafb_gamma_table, argp,
585 256 * sizeof(u32))) {
586 kfree(viafb_gamma_table);
587 return -EFAULT;
588 }
589 viafb_set_gamma_table(viafb_bpp, viafb_gamma_table); 584 viafb_set_gamma_table(viafb_bpp, viafb_gamma_table);
590 kfree(viafb_gamma_table); 585 kfree(viafb_gamma_table);
591 break; 586 break;