aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/processor_throttling.c17
-rw-r--r--drivers/base/cpu.c4
-rw-r--r--drivers/char/nvram.c1
-rw-r--r--drivers/char/tty_io.c2
-rw-r--r--drivers/cpufreq/cpufreq.c14
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c4
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c79
-rw-r--r--drivers/dca/dca-core.c131
-rw-r--r--drivers/dca/dca-sysfs.c3
-rw-r--r--drivers/dma/Kconfig37
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/dmaengine.c35
-rw-r--r--drivers/dma/dmatest.c444
-rw-r--r--drivers/dma/dw_dmac.c1122
-rw-r--r--drivers/dma/dw_dmac_regs.h225
-rw-r--r--drivers/dma/fsldma.c38
-rw-r--r--drivers/dma/ioat.c15
-rw-r--r--drivers/dma/ioat_dca.c244
-rw-r--r--drivers/dma/ioat_dma.c402
-rw-r--r--drivers/dma/ioatdma.h28
-rw-r--r--drivers/dma/ioatdma_hw.h1
-rw-r--r--drivers/dma/ioatdma_registers.h20
-rw-r--r--drivers/dma/iop-adma.c53
-rw-r--r--drivers/dma/mv_xor.c1375
-rw-r--r--drivers/dma/mv_xor.h183
-rw-r--r--drivers/firmware/dcdbas.c3
-rw-r--r--drivers/hid/hid-core.c10
-rw-r--r--drivers/hid/hid-input-quirks.c40
-rw-r--r--drivers/hid/hid-input.c3
-rw-r--r--drivers/hid/hidraw.c48
-rw-r--r--drivers/hid/usbhid/hid-quirks.c22
-rw-r--r--drivers/hid/usbhid/hiddev.c14
-rw-r--r--drivers/hid/usbhid/usbkbd.c10
-rw-r--r--drivers/hid/usbhid/usbmouse.c8
-rw-r--r--drivers/ide/Kconfig2
-rw-r--r--drivers/ide/arm/icside.c71
-rw-r--r--drivers/ide/arm/ide_arm.c14
-rw-r--r--drivers/ide/arm/palm_bk3710.c30
-rw-r--r--drivers/ide/arm/rapide.c24
-rw-r--r--drivers/ide/h8300/ide-h8300.c48
-rw-r--r--drivers/ide/ide-atapi.c58
-rw-r--r--drivers/ide/ide-cd.c157
-rw-r--r--drivers/ide/ide-cd.h38
-rw-r--r--drivers/ide/ide-cd_ioctl.c35
-rw-r--r--drivers/ide/ide-disk.c2
-rw-r--r--drivers/ide/ide-dma.c103
-rw-r--r--drivers/ide/ide-floppy.c90
-rw-r--r--drivers/ide/ide-generic.c73
-rw-r--r--drivers/ide/ide-io.c42
-rw-r--r--drivers/ide/ide-iops.c230
-rw-r--r--drivers/ide/ide-lib.c17
-rw-r--r--drivers/ide/ide-pnp.c29
-rw-r--r--drivers/ide/ide-probe.c366
-rw-r--r--drivers/ide/ide-proc.c4
-rw-r--r--drivers/ide/ide-tape.c127
-rw-r--r--drivers/ide/ide-taskfile.c38
-rw-r--r--drivers/ide/ide.c49
-rw-r--r--drivers/ide/legacy/buddha.c24
-rw-r--r--drivers/ide/legacy/falconide.c56
-rw-r--r--drivers/ide/legacy/gayle.c39
-rw-r--r--drivers/ide/legacy/ide-4drives.c20
-rw-r--r--drivers/ide/legacy/ide-cs.c54
-rw-r--r--drivers/ide/legacy/ide_platform.c32
-rw-r--r--drivers/ide/legacy/macide.c15
-rw-r--r--drivers/ide/legacy/q40ide.c47
-rw-r--r--drivers/ide/mips/au1xxx-ide.c56
-rw-r--r--drivers/ide/mips/swarm.c24
-rw-r--r--drivers/ide/pci/aec62xx.c5
-rw-r--r--drivers/ide/pci/alim15x3.c12
-rw-r--r--drivers/ide/pci/amd74xx.c1
-rw-r--r--drivers/ide/pci/cmd640.c29
-rw-r--r--drivers/ide/pci/cmd64x.c12
-rw-r--r--drivers/ide/pci/cs5520.c41
-rw-r--r--drivers/ide/pci/cs5535.c3
-rw-r--r--drivers/ide/pci/delkin_cb.c25
-rw-r--r--drivers/ide/pci/hpt34x.c1
-rw-r--r--drivers/ide/pci/hpt366.c23
-rw-r--r--drivers/ide/pci/ns87415.c115
-rw-r--r--drivers/ide/pci/pdc202xx_old.c3
-rw-r--r--drivers/ide/pci/piix.c4
-rw-r--r--drivers/ide/pci/scc_pata.c139
-rw-r--r--drivers/ide/pci/serverworks.c4
-rw-r--r--drivers/ide/pci/sgiioc4.c65
-rw-r--r--drivers/ide/pci/siimage.c6
-rw-r--r--drivers/ide/pci/sl82c105.c4
-rw-r--r--drivers/ide/pci/tc86c001.c16
-rw-r--r--drivers/ide/pci/via82cxxx.c1
-rw-r--r--drivers/ide/ppc/pmac.c222
-rw-r--r--drivers/ide/setup-pci.c109
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c4
-rw-r--r--drivers/input/keyboard/tosakbd.c2
-rw-r--r--drivers/mfd/Kconfig11
-rw-r--r--drivers/mfd/Makefile4
-rw-r--r--drivers/mfd/mfd-core.c114
-rw-r--r--drivers/mfd/tc6393xb.c600
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c3
-rw-r--r--drivers/mmc/card/mmc_test.c225
-rw-r--r--drivers/mmc/card/queue.c97
-rw-r--r--drivers/mmc/host/au1xmmc.c54
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/s3cmci.c50
-rw-r--r--drivers/mmc/host/sdhci.c167
-rw-r--r--drivers/mmc/host/sdhci.h7
-rw-r--r--drivers/mtd/nand/cmx270_nand.c79
-rw-r--r--drivers/net/smc91x.c94
-rw-r--r--drivers/net/smc91x.h76
-rw-r--r--drivers/pcmcia/Kconfig3
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x270.c93
-rw-r--r--drivers/pcmcia/pxa2xx_palmtx.c118
-rw-r--r--drivers/power/Kconfig6
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/palmtx_battery.c198
-rw-r--r--drivers/scsi/ide-scsi.c32
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_core.c95
-rw-r--r--drivers/serial/mpsc.c148
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c6
-rw-r--r--drivers/video/pxafb.c64
119 files changed, 7834 insertions, 2014 deletions
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 0622ace05220..a2c3f9cfa549 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -827,6 +827,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
827static int acpi_processor_get_throttling(struct acpi_processor *pr) 827static int acpi_processor_get_throttling(struct acpi_processor *pr)
828{ 828{
829 cpumask_t saved_mask; 829 cpumask_t saved_mask;
830 cpumask_of_cpu_ptr_declare(new_mask);
830 int ret; 831 int ret;
831 832
832 if (!pr) 833 if (!pr)
@@ -838,7 +839,8 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
838 * Migrate task to the cpu pointed by pr. 839 * Migrate task to the cpu pointed by pr.
839 */ 840 */
840 saved_mask = current->cpus_allowed; 841 saved_mask = current->cpus_allowed;
841 set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); 842 cpumask_of_cpu_ptr_next(new_mask, pr->id);
843 set_cpus_allowed_ptr(current, new_mask);
842 ret = pr->throttling.acpi_processor_get_throttling(pr); 844 ret = pr->throttling.acpi_processor_get_throttling(pr);
843 /* restore the previous state */ 845 /* restore the previous state */
844 set_cpus_allowed_ptr(current, &saved_mask); 846 set_cpus_allowed_ptr(current, &saved_mask);
@@ -987,6 +989,7 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
987int acpi_processor_set_throttling(struct acpi_processor *pr, int state) 989int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
988{ 990{
989 cpumask_t saved_mask; 991 cpumask_t saved_mask;
992 cpumask_of_cpu_ptr_declare(new_mask);
990 int ret = 0; 993 int ret = 0;
991 unsigned int i; 994 unsigned int i;
992 struct acpi_processor *match_pr; 995 struct acpi_processor *match_pr;
@@ -1013,7 +1016,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1013 * affected cpu in order to get one proper T-state. 1016 * affected cpu in order to get one proper T-state.
1014 * The notifier event is THROTTLING_PRECHANGE. 1017 * The notifier event is THROTTLING_PRECHANGE.
1015 */ 1018 */
1016 for_each_cpu_mask(i, online_throttling_cpus) { 1019 for_each_cpu_mask_nr(i, online_throttling_cpus) {
1017 t_state.cpu = i; 1020 t_state.cpu = i;
1018 acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, 1021 acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
1019 &t_state); 1022 &t_state);
@@ -1025,7 +1028,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1025 * it can be called only for the cpu pointed by pr. 1028 * it can be called only for the cpu pointed by pr.
1026 */ 1029 */
1027 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { 1030 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
1028 set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); 1031 cpumask_of_cpu_ptr_next(new_mask, pr->id);
1032 set_cpus_allowed_ptr(current, new_mask);
1029 ret = p_throttling->acpi_processor_set_throttling(pr, 1033 ret = p_throttling->acpi_processor_set_throttling(pr,
1030 t_state.target_state); 1034 t_state.target_state);
1031 } else { 1035 } else {
@@ -1034,7 +1038,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1034 * it is necessary to set T-state for every affected 1038 * it is necessary to set T-state for every affected
1035 * cpus. 1039 * cpus.
1036 */ 1040 */
1037 for_each_cpu_mask(i, online_throttling_cpus) { 1041 for_each_cpu_mask_nr(i, online_throttling_cpus) {
1038 match_pr = per_cpu(processors, i); 1042 match_pr = per_cpu(processors, i);
1039 /* 1043 /*
1040 * If the pointer is invalid, we will report the 1044 * If the pointer is invalid, we will report the
@@ -1056,7 +1060,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1056 continue; 1060 continue;
1057 } 1061 }
1058 t_state.cpu = i; 1062 t_state.cpu = i;
1059 set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); 1063 cpumask_of_cpu_ptr_next(new_mask, i);
1064 set_cpus_allowed_ptr(current, new_mask);
1060 ret = match_pr->throttling. 1065 ret = match_pr->throttling.
1061 acpi_processor_set_throttling( 1066 acpi_processor_set_throttling(
1062 match_pr, t_state.target_state); 1067 match_pr, t_state.target_state);
@@ -1068,7 +1073,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1068 * affected cpu to update the T-states. 1073 * affected cpu to update the T-states.
1069 * The notifier event is THROTTLING_POSTCHANGE 1074 * The notifier event is THROTTLING_POSTCHANGE
1070 */ 1075 */
1071 for_each_cpu_mask(i, online_throttling_cpus) { 1076 for_each_cpu_mask_nr(i, online_throttling_cpus) {
1072 t_state.cpu = i; 1077 t_state.cpu = i;
1073 acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, 1078 acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
1074 &t_state); 1079 &t_state);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 20537d507909..64f5d54f7edc 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -121,14 +121,14 @@ static ssize_t print_cpus_##type(struct sysdev_class *class, char *buf) \
121{ \ 121{ \
122 return print_cpus_map(buf, &cpu_##type##_map); \ 122 return print_cpus_map(buf, &cpu_##type##_map); \
123} \ 123} \
124struct sysdev_class_attribute attr_##type##_map = \ 124static struct sysdev_class_attribute attr_##type##_map = \
125 _SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL) 125 _SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL)
126 126
127print_cpus_func(online); 127print_cpus_func(online);
128print_cpus_func(possible); 128print_cpus_func(possible);
129print_cpus_func(present); 129print_cpus_func(present);
130 130
131struct sysdev_class_attribute *cpu_state_attr[] = { 131static struct sysdev_class_attribute *cpu_state_attr[] = {
132 &attr_online_map, 132 &attr_online_map,
133 &attr_possible_map, 133 &attr_possible_map,
134 &attr_present_map, 134 &attr_present_map,
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index a22662b6a1a5..39f6357e3b5d 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -107,7 +107,6 @@
107#include <linux/init.h> 107#include <linux/init.h>
108#include <linux/proc_fs.h> 108#include <linux/proc_fs.h>
109#include <linux/spinlock.h> 109#include <linux/spinlock.h>
110#include <linux/smp_lock.h>
111 110
112#include <asm/io.h> 111#include <asm/io.h>
113#include <asm/uaccess.h> 112#include <asm/uaccess.h>
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 15e597d03002..fa48dba5ba5e 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -915,7 +915,7 @@ static void tty_reset_termios(struct tty_struct *tty)
915 * do_tty_hangup - actual handler for hangup events 915 * do_tty_hangup - actual handler for hangup events
916 * @work: tty device 916 * @work: tty device
917 * 917 *
918k * This can be called by the "eventd" kernel thread. That is process 918 * This can be called by the "eventd" kernel thread. That is process
919 * synchronous but doesn't hold any locks, so we need to make sure we 919 * synchronous but doesn't hold any locks, so we need to make sure we
920 * have the appropriate locks for what we're doing. 920 * have the appropriate locks for what we're doing.
921 * 921 *
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index ee1df0d45e81..8d6a3ff02672 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -589,7 +589,7 @@ static ssize_t show_cpus(cpumask_t mask, char *buf)
589 ssize_t i = 0; 589 ssize_t i = 0;
590 unsigned int cpu; 590 unsigned int cpu;
591 591
592 for_each_cpu_mask(cpu, mask) { 592 for_each_cpu_mask_nr(cpu, mask) {
593 if (i) 593 if (i)
594 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); 594 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
595 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); 595 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
@@ -835,7 +835,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
835 } 835 }
836#endif 836#endif
837 837
838 for_each_cpu_mask(j, policy->cpus) { 838 for_each_cpu_mask_nr(j, policy->cpus) {
839 if (cpu == j) 839 if (cpu == j)
840 continue; 840 continue;
841 841
@@ -898,14 +898,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
898 } 898 }
899 899
900 spin_lock_irqsave(&cpufreq_driver_lock, flags); 900 spin_lock_irqsave(&cpufreq_driver_lock, flags);
901 for_each_cpu_mask(j, policy->cpus) { 901 for_each_cpu_mask_nr(j, policy->cpus) {
902 per_cpu(cpufreq_cpu_data, j) = policy; 902 per_cpu(cpufreq_cpu_data, j) = policy;
903 per_cpu(policy_cpu, j) = policy->cpu; 903 per_cpu(policy_cpu, j) = policy->cpu;
904 } 904 }
905 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 905 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
906 906
907 /* symlink affected CPUs */ 907 /* symlink affected CPUs */
908 for_each_cpu_mask(j, policy->cpus) { 908 for_each_cpu_mask_nr(j, policy->cpus) {
909 if (j == cpu) 909 if (j == cpu)
910 continue; 910 continue;
911 if (!cpu_online(j)) 911 if (!cpu_online(j))
@@ -945,7 +945,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
945 945
946err_out_unregister: 946err_out_unregister:
947 spin_lock_irqsave(&cpufreq_driver_lock, flags); 947 spin_lock_irqsave(&cpufreq_driver_lock, flags);
948 for_each_cpu_mask(j, policy->cpus) 948 for_each_cpu_mask_nr(j, policy->cpus)
949 per_cpu(cpufreq_cpu_data, j) = NULL; 949 per_cpu(cpufreq_cpu_data, j) = NULL;
950 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 950 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
951 951
@@ -1028,7 +1028,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1028 * the sysfs links afterwards. 1028 * the sysfs links afterwards.
1029 */ 1029 */
1030 if (unlikely(cpus_weight(data->cpus) > 1)) { 1030 if (unlikely(cpus_weight(data->cpus) > 1)) {
1031 for_each_cpu_mask(j, data->cpus) { 1031 for_each_cpu_mask_nr(j, data->cpus) {
1032 if (j == cpu) 1032 if (j == cpu)
1033 continue; 1033 continue;
1034 per_cpu(cpufreq_cpu_data, j) = NULL; 1034 per_cpu(cpufreq_cpu_data, j) = NULL;
@@ -1038,7 +1038,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1038 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1038 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1039 1039
1040 if (unlikely(cpus_weight(data->cpus) > 1)) { 1040 if (unlikely(cpus_weight(data->cpus) > 1)) {
1041 for_each_cpu_mask(j, data->cpus) { 1041 for_each_cpu_mask_nr(j, data->cpus) {
1042 if (j == cpu) 1042 if (j == cpu)
1043 continue; 1043 continue;
1044 dprintk("removing link for cpu %u\n", j); 1044 dprintk("removing link for cpu %u\n", j);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 5d3a04ba6ad2..fe565ee43757 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -497,7 +497,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
497 return rc; 497 return rc;
498 } 498 }
499 499
500 for_each_cpu_mask(j, policy->cpus) { 500 for_each_cpu_mask_nr(j, policy->cpus) {
501 struct cpu_dbs_info_s *j_dbs_info; 501 struct cpu_dbs_info_s *j_dbs_info;
502 j_dbs_info = &per_cpu(cpu_dbs_info, j); 502 j_dbs_info = &per_cpu(cpu_dbs_info, j);
503 j_dbs_info->cur_policy = policy; 503 j_dbs_info->cur_policy = policy;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index d2af20dda382..33855cb3cf16 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -367,7 +367,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
367 367
368 /* Get Idle Time */ 368 /* Get Idle Time */
369 idle_ticks = UINT_MAX; 369 idle_ticks = UINT_MAX;
370 for_each_cpu_mask(j, policy->cpus) { 370 for_each_cpu_mask_nr(j, policy->cpus) {
371 cputime64_t total_idle_ticks; 371 cputime64_t total_idle_ticks;
372 unsigned int tmp_idle_ticks; 372 unsigned int tmp_idle_ticks;
373 struct cpu_dbs_info_s *j_dbs_info; 373 struct cpu_dbs_info_s *j_dbs_info;
@@ -521,7 +521,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
521 return rc; 521 return rc;
522 } 522 }
523 523
524 for_each_cpu_mask(j, policy->cpus) { 524 for_each_cpu_mask_nr(j, policy->cpus) {
525 struct cpu_dbs_info_s *j_dbs_info; 525 struct cpu_dbs_info_s *j_dbs_info;
526 j_dbs_info = &per_cpu(cpu_dbs_info, j); 526 j_dbs_info = &per_cpu(cpu_dbs_info, j);
527 j_dbs_info->cur_policy = policy; 527 j_dbs_info->cur_policy = policy;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index cb2ac01a41a1..32244aa7cc0c 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -30,16 +30,18 @@
30/** 30/**
31 * A few values needed by the userspace governor 31 * A few values needed by the userspace governor
32 */ 32 */
33static unsigned int cpu_max_freq[NR_CPUS]; 33static DEFINE_PER_CPU(unsigned int, cpu_max_freq);
34static unsigned int cpu_min_freq[NR_CPUS]; 34static DEFINE_PER_CPU(unsigned int, cpu_min_freq);
35static unsigned int cpu_cur_freq[NR_CPUS]; /* current CPU freq */ 35static DEFINE_PER_CPU(unsigned int, cpu_cur_freq); /* current CPU freq */
36static unsigned int cpu_set_freq[NR_CPUS]; /* CPU freq desired by userspace */ 36static DEFINE_PER_CPU(unsigned int, cpu_set_freq); /* CPU freq desired by
37static unsigned int cpu_is_managed[NR_CPUS]; 37 userspace */
38static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
38 39
39static DEFINE_MUTEX (userspace_mutex); 40static DEFINE_MUTEX (userspace_mutex);
40static int cpus_using_userspace_governor; 41static int cpus_using_userspace_governor;
41 42
42#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg) 43#define dprintk(msg...) \
44 cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg)
43 45
44/* keep track of frequency transitions */ 46/* keep track of frequency transitions */
45static int 47static int
@@ -48,12 +50,12 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
48{ 50{
49 struct cpufreq_freqs *freq = data; 51 struct cpufreq_freqs *freq = data;
50 52
51 if (!cpu_is_managed[freq->cpu]) 53 if (!per_cpu(cpu_is_managed, freq->cpu))
52 return 0; 54 return 0;
53 55
54 dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n", 56 dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n",
55 freq->cpu, freq->new); 57 freq->cpu, freq->new);
56 cpu_cur_freq[freq->cpu] = freq->new; 58 per_cpu(cpu_cur_freq, freq->cpu) = freq->new;
57 59
58 return 0; 60 return 0;
59} 61}
@@ -77,15 +79,15 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
77 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); 79 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
78 80
79 mutex_lock(&userspace_mutex); 81 mutex_lock(&userspace_mutex);
80 if (!cpu_is_managed[policy->cpu]) 82 if (!per_cpu(cpu_is_managed, policy->cpu))
81 goto err; 83 goto err;
82 84
83 cpu_set_freq[policy->cpu] = freq; 85 per_cpu(cpu_set_freq, policy->cpu) = freq;
84 86
85 if (freq < cpu_min_freq[policy->cpu]) 87 if (freq < per_cpu(cpu_min_freq, policy->cpu))
86 freq = cpu_min_freq[policy->cpu]; 88 freq = per_cpu(cpu_min_freq, policy->cpu);
87 if (freq > cpu_max_freq[policy->cpu]) 89 if (freq > per_cpu(cpu_max_freq, policy->cpu))
88 freq = cpu_max_freq[policy->cpu]; 90 freq = per_cpu(cpu_max_freq, policy->cpu);
89 91
90 /* 92 /*
91 * We're safe from concurrent calls to ->target() here 93 * We're safe from concurrent calls to ->target() here
@@ -104,7 +106,7 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
104 106
105static ssize_t show_speed(struct cpufreq_policy *policy, char *buf) 107static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
106{ 108{
107 return sprintf(buf, "%u\n", cpu_cur_freq[policy->cpu]); 109 return sprintf(buf, "%u\n", per_cpu(cpu_cur_freq, policy->cpu));
108} 110}
109 111
110static int cpufreq_governor_userspace(struct cpufreq_policy *policy, 112static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
@@ -127,12 +129,17 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
127 } 129 }
128 cpus_using_userspace_governor++; 130 cpus_using_userspace_governor++;
129 131
130 cpu_is_managed[cpu] = 1; 132 per_cpu(cpu_is_managed, cpu) = 1;
131 cpu_min_freq[cpu] = policy->min; 133 per_cpu(cpu_min_freq, cpu) = policy->min;
132 cpu_max_freq[cpu] = policy->max; 134 per_cpu(cpu_max_freq, cpu) = policy->max;
133 cpu_cur_freq[cpu] = policy->cur; 135 per_cpu(cpu_cur_freq, cpu) = policy->cur;
134 cpu_set_freq[cpu] = policy->cur; 136 per_cpu(cpu_set_freq, cpu) = policy->cur;
135 dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]); 137 dprintk("managing cpu %u started "
138 "(%u - %u kHz, currently %u kHz)\n",
139 cpu,
140 per_cpu(cpu_min_freq, cpu),
141 per_cpu(cpu_max_freq, cpu),
142 per_cpu(cpu_cur_freq, cpu));
136 143
137 mutex_unlock(&userspace_mutex); 144 mutex_unlock(&userspace_mutex);
138 break; 145 break;
@@ -145,34 +152,34 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
145 CPUFREQ_TRANSITION_NOTIFIER); 152 CPUFREQ_TRANSITION_NOTIFIER);
146 } 153 }
147 154
148 cpu_is_managed[cpu] = 0; 155 per_cpu(cpu_is_managed, cpu) = 0;
149 cpu_min_freq[cpu] = 0; 156 per_cpu(cpu_min_freq, cpu) = 0;
150 cpu_max_freq[cpu] = 0; 157 per_cpu(cpu_max_freq, cpu) = 0;
151 cpu_set_freq[cpu] = 0; 158 per_cpu(cpu_set_freq, cpu) = 0;
152 dprintk("managing cpu %u stopped\n", cpu); 159 dprintk("managing cpu %u stopped\n", cpu);
153 mutex_unlock(&userspace_mutex); 160 mutex_unlock(&userspace_mutex);
154 break; 161 break;
155 case CPUFREQ_GOV_LIMITS: 162 case CPUFREQ_GOV_LIMITS:
156 mutex_lock(&userspace_mutex); 163 mutex_lock(&userspace_mutex);
157 dprintk("limit event for cpu %u: %u - %u kHz," 164 dprintk("limit event for cpu %u: %u - %u kHz, "
158 "currently %u kHz, last set to %u kHz\n", 165 "currently %u kHz, last set to %u kHz\n",
159 cpu, policy->min, policy->max, 166 cpu, policy->min, policy->max,
160 cpu_cur_freq[cpu], cpu_set_freq[cpu]); 167 per_cpu(cpu_cur_freq, cpu),
161 if (policy->max < cpu_set_freq[cpu]) { 168 per_cpu(cpu_set_freq, cpu));
169 if (policy->max < per_cpu(cpu_set_freq, cpu)) {
162 __cpufreq_driver_target(policy, policy->max, 170 __cpufreq_driver_target(policy, policy->max,
163 CPUFREQ_RELATION_H); 171 CPUFREQ_RELATION_H);
164 } 172 } else if (policy->min > per_cpu(cpu_set_freq, cpu)) {
165 else if (policy->min > cpu_set_freq[cpu]) {
166 __cpufreq_driver_target(policy, policy->min, 173 __cpufreq_driver_target(policy, policy->min,
167 CPUFREQ_RELATION_L); 174 CPUFREQ_RELATION_L);
168 } 175 } else {
169 else { 176 __cpufreq_driver_target(policy,
170 __cpufreq_driver_target(policy, cpu_set_freq[cpu], 177 per_cpu(cpu_set_freq, cpu),
171 CPUFREQ_RELATION_L); 178 CPUFREQ_RELATION_L);
172 } 179 }
173 cpu_min_freq[cpu] = policy->min; 180 per_cpu(cpu_min_freq, cpu) = policy->min;
174 cpu_max_freq[cpu] = policy->max; 181 per_cpu(cpu_max_freq, cpu) = policy->max;
175 cpu_cur_freq[cpu] = policy->cur; 182 per_cpu(cpu_cur_freq, cpu) = policy->cur;
176 mutex_unlock(&userspace_mutex); 183 mutex_unlock(&userspace_mutex);
177 break; 184 break;
178 } 185 }
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index bf5b92f86df7..ec249d2db633 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -28,13 +28,29 @@
28#include <linux/device.h> 28#include <linux/device.h>
29#include <linux/dca.h> 29#include <linux/dca.h>
30 30
31MODULE_LICENSE("GPL"); 31#define DCA_VERSION "1.4"
32 32
33/* For now we're assuming a single, global, DCA provider for the system. */ 33MODULE_VERSION(DCA_VERSION);
34MODULE_LICENSE("GPL");
35MODULE_AUTHOR("Intel Corporation");
34 36
35static DEFINE_SPINLOCK(dca_lock); 37static DEFINE_SPINLOCK(dca_lock);
36 38
37static struct dca_provider *global_dca = NULL; 39static LIST_HEAD(dca_providers);
40
41static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
42{
43 struct dca_provider *dca, *ret = NULL;
44
45 list_for_each_entry(dca, &dca_providers, node) {
46 if ((!dev) || (dca->ops->dev_managed(dca, dev))) {
47 ret = dca;
48 break;
49 }
50 }
51
52 return ret;
53}
38 54
39/** 55/**
40 * dca_add_requester - add a dca client to the list 56 * dca_add_requester - add a dca client to the list
@@ -42,25 +58,39 @@ static struct dca_provider *global_dca = NULL;
42 */ 58 */
43int dca_add_requester(struct device *dev) 59int dca_add_requester(struct device *dev)
44{ 60{
45 int err, slot; 61 struct dca_provider *dca;
62 int err, slot = -ENODEV;
46 63
47 if (!global_dca) 64 if (!dev)
48 return -ENODEV; 65 return -EFAULT;
49 66
50 spin_lock(&dca_lock); 67 spin_lock(&dca_lock);
51 slot = global_dca->ops->add_requester(global_dca, dev); 68
52 spin_unlock(&dca_lock); 69 /* check if the requester has not been added already */
53 if (slot < 0) 70 dca = dca_find_provider_by_dev(dev);
71 if (dca) {
72 spin_unlock(&dca_lock);
73 return -EEXIST;
74 }
75
76 list_for_each_entry(dca, &dca_providers, node) {
77 slot = dca->ops->add_requester(dca, dev);
78 if (slot >= 0)
79 break;
80 }
81 if (slot < 0) {
82 spin_unlock(&dca_lock);
54 return slot; 83 return slot;
84 }
55 85
56 err = dca_sysfs_add_req(global_dca, dev, slot); 86 err = dca_sysfs_add_req(dca, dev, slot);
57 if (err) { 87 if (err) {
58 spin_lock(&dca_lock); 88 dca->ops->remove_requester(dca, dev);
59 global_dca->ops->remove_requester(global_dca, dev);
60 spin_unlock(&dca_lock); 89 spin_unlock(&dca_lock);
61 return err; 90 return err;
62 } 91 }
63 92
93 spin_unlock(&dca_lock);
64 return 0; 94 return 0;
65} 95}
66EXPORT_SYMBOL_GPL(dca_add_requester); 96EXPORT_SYMBOL_GPL(dca_add_requester);
@@ -71,30 +101,78 @@ EXPORT_SYMBOL_GPL(dca_add_requester);
71 */ 101 */
72int dca_remove_requester(struct device *dev) 102int dca_remove_requester(struct device *dev)
73{ 103{
104 struct dca_provider *dca;
74 int slot; 105 int slot;
75 if (!global_dca) 106
76 return -ENODEV; 107 if (!dev)
108 return -EFAULT;
77 109
78 spin_lock(&dca_lock); 110 spin_lock(&dca_lock);
79 slot = global_dca->ops->remove_requester(global_dca, dev); 111 dca = dca_find_provider_by_dev(dev);
80 spin_unlock(&dca_lock); 112 if (!dca) {
81 if (slot < 0) 113 spin_unlock(&dca_lock);
114 return -ENODEV;
115 }
116 slot = dca->ops->remove_requester(dca, dev);
117 if (slot < 0) {
118 spin_unlock(&dca_lock);
82 return slot; 119 return slot;
120 }
83 121
84 dca_sysfs_remove_req(global_dca, slot); 122 dca_sysfs_remove_req(dca, slot);
123
124 spin_unlock(&dca_lock);
85 return 0; 125 return 0;
86} 126}
87EXPORT_SYMBOL_GPL(dca_remove_requester); 127EXPORT_SYMBOL_GPL(dca_remove_requester);
88 128
89/** 129/**
90 * dca_get_tag - return the dca tag for the given cpu 130 * dca_common_get_tag - return the dca tag (serves both new and old api)
131 * @dev - the device that wants dca service
91 * @cpu - the cpuid as returned by get_cpu() 132 * @cpu - the cpuid as returned by get_cpu()
92 */ 133 */
93u8 dca_get_tag(int cpu) 134u8 dca_common_get_tag(struct device *dev, int cpu)
94{ 135{
95 if (!global_dca) 136 struct dca_provider *dca;
137 u8 tag;
138
139 spin_lock(&dca_lock);
140
141 dca = dca_find_provider_by_dev(dev);
142 if (!dca) {
143 spin_unlock(&dca_lock);
96 return -ENODEV; 144 return -ENODEV;
97 return global_dca->ops->get_tag(global_dca, cpu); 145 }
146 tag = dca->ops->get_tag(dca, dev, cpu);
147
148 spin_unlock(&dca_lock);
149 return tag;
150}
151
152/**
153 * dca3_get_tag - return the dca tag to the requester device
154 * for the given cpu (new api)
155 * @dev - the device that wants dca service
156 * @cpu - the cpuid as returned by get_cpu()
157 */
158u8 dca3_get_tag(struct device *dev, int cpu)
159{
160 if (!dev)
161 return -EFAULT;
162
163 return dca_common_get_tag(dev, cpu);
164}
165EXPORT_SYMBOL_GPL(dca3_get_tag);
166
167/**
168 * dca_get_tag - return the dca tag for the given cpu (old api)
169 * @cpu - the cpuid as returned by get_cpu()
170 */
171u8 dca_get_tag(int cpu)
172{
173 struct device *dev = NULL;
174
175 return dca_common_get_tag(dev, cpu);
98} 176}
99EXPORT_SYMBOL_GPL(dca_get_tag); 177EXPORT_SYMBOL_GPL(dca_get_tag);
100 178
@@ -140,12 +218,10 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
140{ 218{
141 int err; 219 int err;
142 220
143 if (global_dca)
144 return -EEXIST;
145 err = dca_sysfs_add_provider(dca, dev); 221 err = dca_sysfs_add_provider(dca, dev);
146 if (err) 222 if (err)
147 return err; 223 return err;
148 global_dca = dca; 224 list_add(&dca->node, &dca_providers);
149 blocking_notifier_call_chain(&dca_provider_chain, 225 blocking_notifier_call_chain(&dca_provider_chain,
150 DCA_PROVIDER_ADD, NULL); 226 DCA_PROVIDER_ADD, NULL);
151 return 0; 227 return 0;
@@ -158,11 +234,9 @@ EXPORT_SYMBOL_GPL(register_dca_provider);
158 */ 234 */
159void unregister_dca_provider(struct dca_provider *dca) 235void unregister_dca_provider(struct dca_provider *dca)
160{ 236{
161 if (!global_dca)
162 return;
163 blocking_notifier_call_chain(&dca_provider_chain, 237 blocking_notifier_call_chain(&dca_provider_chain,
164 DCA_PROVIDER_REMOVE, NULL); 238 DCA_PROVIDER_REMOVE, NULL);
165 global_dca = NULL; 239 list_del(&dca->node);
166 dca_sysfs_remove_provider(dca); 240 dca_sysfs_remove_provider(dca);
167} 241}
168EXPORT_SYMBOL_GPL(unregister_dca_provider); 242EXPORT_SYMBOL_GPL(unregister_dca_provider);
@@ -187,6 +261,7 @@ EXPORT_SYMBOL_GPL(dca_unregister_notify);
187 261
188static int __init dca_init(void) 262static int __init dca_init(void)
189{ 263{
264 printk(KERN_ERR "dca service started, version %s\n", DCA_VERSION);
190 return dca_sysfs_init(); 265 return dca_sysfs_init();
191} 266}
192 267
diff --git a/drivers/dca/dca-sysfs.c b/drivers/dca/dca-sysfs.c
index 9a70377bfb34..7af4b403bd2d 100644
--- a/drivers/dca/dca-sysfs.c
+++ b/drivers/dca/dca-sysfs.c
@@ -13,10 +13,11 @@ static spinlock_t dca_idr_lock;
13int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot) 13int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot)
14{ 14{
15 struct device *cd; 15 struct device *cd;
16 static int req_count;
16 17
17 cd = device_create_drvdata(dca_class, dca->cd, 18 cd = device_create_drvdata(dca_class, dca->cd,
18 MKDEV(0, slot + 1), NULL, 19 MKDEV(0, slot + 1), NULL,
19 "requester%d", slot); 20 "requester%d", req_count++);
20 if (IS_ERR(cd)) 21 if (IS_ERR(cd))
21 return PTR_ERR(cd); 22 return PTR_ERR(cd);
22 return 0; 23 return 0;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6239c3df30ac..cd303901eb5b 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -4,13 +4,14 @@
4 4
5menuconfig DMADEVICES 5menuconfig DMADEVICES
6 bool "DMA Engine support" 6 bool "DMA Engine support"
7 depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX || PPC 7 depends on !HIGHMEM64G && HAS_DMA
8 depends on !HIGHMEM64G
9 help 8 help
10 DMA engines can do asynchronous data transfers without 9 DMA engines can do asynchronous data transfers without
11 involving the host CPU. Currently, this framework can be 10 involving the host CPU. Currently, this framework can be
12 used to offload memory copies in the network stack and 11 used to offload memory copies in the network stack and
13 RAID operations in the MD driver. 12 RAID operations in the MD driver. This menu only presents
13 DMA Device drivers supported by the configured arch, it may
14 be empty in some cases.
14 15
15if DMADEVICES 16if DMADEVICES
16 17
@@ -37,6 +38,15 @@ config INTEL_IOP_ADMA
37 help 38 help
38 Enable support for the Intel(R) IOP Series RAID engines. 39 Enable support for the Intel(R) IOP Series RAID engines.
39 40
41config DW_DMAC
42 tristate "Synopsys DesignWare AHB DMA support"
43 depends on AVR32
44 select DMA_ENGINE
45 default y if CPU_AT32AP7000
46 help
47 Support the Synopsys DesignWare AHB DMA controller. This
48 can be integrated in chips such as the Atmel AT32ap7000.
49
40config FSL_DMA 50config FSL_DMA
41 bool "Freescale MPC85xx/MPC83xx DMA support" 51 bool "Freescale MPC85xx/MPC83xx DMA support"
42 depends on PPC 52 depends on PPC
@@ -46,6 +56,14 @@ config FSL_DMA
46 MPC8560/40, MPC8555, MPC8548 and MPC8641 processors. 56 MPC8560/40, MPC8555, MPC8548 and MPC8641 processors.
47 The MPC8349, MPC8360 is also supported. 57 The MPC8349, MPC8360 is also supported.
48 58
59config MV_XOR
60 bool "Marvell XOR engine support"
61 depends on PLAT_ORION
62 select ASYNC_CORE
63 select DMA_ENGINE
64 ---help---
65 Enable support for the Marvell XOR engine.
66
49config DMA_ENGINE 67config DMA_ENGINE
50 bool 68 bool
51 69
@@ -55,10 +73,19 @@ comment "DMA Clients"
55config NET_DMA 73config NET_DMA
56 bool "Network: TCP receive copy offload" 74 bool "Network: TCP receive copy offload"
57 depends on DMA_ENGINE && NET 75 depends on DMA_ENGINE && NET
76 default (INTEL_IOATDMA || FSL_DMA)
58 help 77 help
59 This enables the use of DMA engines in the network stack to 78 This enables the use of DMA engines in the network stack to
60 offload receive copy-to-user operations, freeing CPU cycles. 79 offload receive copy-to-user operations, freeing CPU cycles.
61 Since this is the main user of the DMA engine, it should be enabled; 80
62 say Y here. 81 Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise
82 say N.
83
84config DMATEST
85 tristate "DMA Test client"
86 depends on DMA_ENGINE
87 help
88 Simple DMA test client. Say N unless you're debugging a
89 DMA Device driver.
63 90
64endif 91endif
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index c8036d945902..14f59527d4f6 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -1,6 +1,9 @@
1obj-$(CONFIG_DMA_ENGINE) += dmaengine.o 1obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
2obj-$(CONFIG_NET_DMA) += iovlock.o 2obj-$(CONFIG_NET_DMA) += iovlock.o
3obj-$(CONFIG_DMATEST) += dmatest.o
3obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o 4obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
4ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o 5ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o
5obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o 6obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
6obj-$(CONFIG_FSL_DMA) += fsldma.o 7obj-$(CONFIG_FSL_DMA) += fsldma.o
8obj-$(CONFIG_MV_XOR) += mv_xor.o
9obj-$(CONFIG_DW_DMAC) += dw_dmac.o
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 97b329e76798..dc003a3a787d 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -169,12 +169,18 @@ static void dma_client_chan_alloc(struct dma_client *client)
169 enum dma_state_client ack; 169 enum dma_state_client ack;
170 170
171 /* Find a channel */ 171 /* Find a channel */
172 list_for_each_entry(device, &dma_device_list, global_node) 172 list_for_each_entry(device, &dma_device_list, global_node) {
173 /* Does the client require a specific DMA controller? */
174 if (client->slave && client->slave->dma_dev
175 && client->slave->dma_dev != device->dev)
176 continue;
177
173 list_for_each_entry(chan, &device->channels, device_node) { 178 list_for_each_entry(chan, &device->channels, device_node) {
174 if (!dma_chan_satisfies_mask(chan, client->cap_mask)) 179 if (!dma_chan_satisfies_mask(chan, client->cap_mask))
175 continue; 180 continue;
176 181
177 desc = chan->device->device_alloc_chan_resources(chan); 182 desc = chan->device->device_alloc_chan_resources(
183 chan, client);
178 if (desc >= 0) { 184 if (desc >= 0) {
179 ack = client->event_callback(client, 185 ack = client->event_callback(client,
180 chan, 186 chan,
@@ -183,12 +189,14 @@ static void dma_client_chan_alloc(struct dma_client *client)
183 /* we are done once this client rejects 189 /* we are done once this client rejects
184 * an available resource 190 * an available resource
185 */ 191 */
186 if (ack == DMA_ACK) 192 if (ack == DMA_ACK) {
187 dma_chan_get(chan); 193 dma_chan_get(chan);
188 else if (ack == DMA_NAK) 194 chan->client_count++;
195 } else if (ack == DMA_NAK)
189 return; 196 return;
190 } 197 }
191 } 198 }
199 }
192} 200}
193 201
194enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 202enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
@@ -272,8 +280,10 @@ static void dma_clients_notify_removed(struct dma_chan *chan)
272 /* client was holding resources for this channel so 280 /* client was holding resources for this channel so
273 * free it 281 * free it
274 */ 282 */
275 if (ack == DMA_ACK) 283 if (ack == DMA_ACK) {
276 dma_chan_put(chan); 284 dma_chan_put(chan);
285 chan->client_count--;
286 }
277 } 287 }
278 288
279 mutex_unlock(&dma_list_mutex); 289 mutex_unlock(&dma_list_mutex);
@@ -285,6 +295,10 @@ static void dma_clients_notify_removed(struct dma_chan *chan)
285 */ 295 */
286void dma_async_client_register(struct dma_client *client) 296void dma_async_client_register(struct dma_client *client)
287{ 297{
298 /* validate client data */
299 BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) &&
300 !client->slave);
301
288 mutex_lock(&dma_list_mutex); 302 mutex_lock(&dma_list_mutex);
289 list_add_tail(&client->global_node, &dma_client_list); 303 list_add_tail(&client->global_node, &dma_client_list);
290 mutex_unlock(&dma_list_mutex); 304 mutex_unlock(&dma_list_mutex);
@@ -313,8 +327,10 @@ void dma_async_client_unregister(struct dma_client *client)
313 ack = client->event_callback(client, chan, 327 ack = client->event_callback(client, chan,
314 DMA_RESOURCE_REMOVED); 328 DMA_RESOURCE_REMOVED);
315 329
316 if (ack == DMA_ACK) 330 if (ack == DMA_ACK) {
317 dma_chan_put(chan); 331 dma_chan_put(chan);
332 chan->client_count--;
333 }
318 } 334 }
319 335
320 list_del(&client->global_node); 336 list_del(&client->global_node);
@@ -359,6 +375,10 @@ int dma_async_device_register(struct dma_device *device)
359 !device->device_prep_dma_memset); 375 !device->device_prep_dma_memset);
360 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && 376 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
361 !device->device_prep_dma_interrupt); 377 !device->device_prep_dma_interrupt);
378 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
379 !device->device_prep_slave_sg);
380 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
381 !device->device_terminate_all);
362 382
363 BUG_ON(!device->device_alloc_chan_resources); 383 BUG_ON(!device->device_alloc_chan_resources);
364 BUG_ON(!device->device_free_chan_resources); 384 BUG_ON(!device->device_free_chan_resources);
@@ -378,7 +398,7 @@ int dma_async_device_register(struct dma_device *device)
378 398
379 chan->chan_id = chancnt++; 399 chan->chan_id = chancnt++;
380 chan->dev.class = &dma_devclass; 400 chan->dev.class = &dma_devclass;
381 chan->dev.parent = NULL; 401 chan->dev.parent = device->dev;
382 snprintf(chan->dev.bus_id, BUS_ID_SIZE, "dma%dchan%d", 402 snprintf(chan->dev.bus_id, BUS_ID_SIZE, "dma%dchan%d",
383 device->dev_id, chan->chan_id); 403 device->dev_id, chan->chan_id);
384 404
@@ -394,6 +414,7 @@ int dma_async_device_register(struct dma_device *device)
394 kref_get(&device->refcount); 414 kref_get(&device->refcount);
395 kref_get(&device->refcount); 415 kref_get(&device->refcount);
396 kref_init(&chan->refcount); 416 kref_init(&chan->refcount);
417 chan->client_count = 0;
397 chan->slow_ref = 0; 418 chan->slow_ref = 0;
398 INIT_RCU_HEAD(&chan->rcu); 419 INIT_RCU_HEAD(&chan->rcu);
399 } 420 }
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
new file mode 100644
index 000000000000..a08d19704743
--- /dev/null
+++ b/drivers/dma/dmatest.c
@@ -0,0 +1,444 @@
1/*
2 * DMA Engine test module
3 *
4 * Copyright (C) 2007 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/delay.h>
11#include <linux/dmaengine.h>
12#include <linux/init.h>
13#include <linux/kthread.h>
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/random.h>
17#include <linux/wait.h>
18
19static unsigned int test_buf_size = 16384;
20module_param(test_buf_size, uint, S_IRUGO);
21MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
22
23static char test_channel[BUS_ID_SIZE];
24module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
25MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
26
27static char test_device[BUS_ID_SIZE];
28module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
29MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
30
31static unsigned int threads_per_chan = 1;
32module_param(threads_per_chan, uint, S_IRUGO);
33MODULE_PARM_DESC(threads_per_chan,
34 "Number of threads to start per channel (default: 1)");
35
36static unsigned int max_channels;
37module_param(max_channels, uint, S_IRUGO);
38MODULE_PARM_DESC(nr_channels,
39 "Maximum number of channels to use (default: all)");
40
41/*
42 * Initialization patterns. All bytes in the source buffer has bit 7
43 * set, all bytes in the destination buffer has bit 7 cleared.
44 *
45 * Bit 6 is set for all bytes which are to be copied by the DMA
46 * engine. Bit 5 is set for all bytes which are to be overwritten by
47 * the DMA engine.
48 *
49 * The remaining bits are the inverse of a counter which increments by
50 * one for each byte address.
51 */
52#define PATTERN_SRC 0x80
53#define PATTERN_DST 0x00
54#define PATTERN_COPY 0x40
55#define PATTERN_OVERWRITE 0x20
56#define PATTERN_COUNT_MASK 0x1f
57
58struct dmatest_thread {
59 struct list_head node;
60 struct task_struct *task;
61 struct dma_chan *chan;
62 u8 *srcbuf;
63 u8 *dstbuf;
64};
65
66struct dmatest_chan {
67 struct list_head node;
68 struct dma_chan *chan;
69 struct list_head threads;
70};
71
72/*
73 * These are protected by dma_list_mutex since they're only used by
74 * the DMA client event callback
75 */
76static LIST_HEAD(dmatest_channels);
77static unsigned int nr_channels;
78
79static bool dmatest_match_channel(struct dma_chan *chan)
80{
81 if (test_channel[0] == '\0')
82 return true;
83 return strcmp(chan->dev.bus_id, test_channel) == 0;
84}
85
86static bool dmatest_match_device(struct dma_device *device)
87{
88 if (test_device[0] == '\0')
89 return true;
90 return strcmp(device->dev->bus_id, test_device) == 0;
91}
92
93static unsigned long dmatest_random(void)
94{
95 unsigned long buf;
96
97 get_random_bytes(&buf, sizeof(buf));
98 return buf;
99}
100
101static void dmatest_init_srcbuf(u8 *buf, unsigned int start, unsigned int len)
102{
103 unsigned int i;
104
105 for (i = 0; i < start; i++)
106 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
107 for ( ; i < start + len; i++)
108 buf[i] = PATTERN_SRC | PATTERN_COPY
109 | (~i & PATTERN_COUNT_MASK);;
110 for ( ; i < test_buf_size; i++)
111 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
112}
113
114static void dmatest_init_dstbuf(u8 *buf, unsigned int start, unsigned int len)
115{
116 unsigned int i;
117
118 for (i = 0; i < start; i++)
119 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
120 for ( ; i < start + len; i++)
121 buf[i] = PATTERN_DST | PATTERN_OVERWRITE
122 | (~i & PATTERN_COUNT_MASK);
123 for ( ; i < test_buf_size; i++)
124 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
125}
126
127static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
128 unsigned int counter, bool is_srcbuf)
129{
130 u8 diff = actual ^ pattern;
131 u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
132 const char *thread_name = current->comm;
133
134 if (is_srcbuf)
135 pr_warning("%s: srcbuf[0x%x] overwritten!"
136 " Expected %02x, got %02x\n",
137 thread_name, index, expected, actual);
138 else if ((pattern & PATTERN_COPY)
139 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
140 pr_warning("%s: dstbuf[0x%x] not copied!"
141 " Expected %02x, got %02x\n",
142 thread_name, index, expected, actual);
143 else if (diff & PATTERN_SRC)
144 pr_warning("%s: dstbuf[0x%x] was copied!"
145 " Expected %02x, got %02x\n",
146 thread_name, index, expected, actual);
147 else
148 pr_warning("%s: dstbuf[0x%x] mismatch!"
149 " Expected %02x, got %02x\n",
150 thread_name, index, expected, actual);
151}
152
153static unsigned int dmatest_verify(u8 *buf, unsigned int start,
154 unsigned int end, unsigned int counter, u8 pattern,
155 bool is_srcbuf)
156{
157 unsigned int i;
158 unsigned int error_count = 0;
159 u8 actual;
160
161 for (i = start; i < end; i++) {
162 actual = buf[i];
163 if (actual != (pattern | (~counter & PATTERN_COUNT_MASK))) {
164 if (error_count < 32)
165 dmatest_mismatch(actual, pattern, i, counter,
166 is_srcbuf);
167 error_count++;
168 }
169 counter++;
170 }
171
172 if (error_count > 32)
173 pr_warning("%s: %u errors suppressed\n",
174 current->comm, error_count - 32);
175
176 return error_count;
177}
178
179/*
180 * This function repeatedly tests DMA transfers of various lengths and
181 * offsets until it is told to exit by kthread_stop(). There may be
182 * multiple threads running this function in parallel for a single
183 * channel, and there may be multiple channels being tested in
184 * parallel.
185 *
186 * Before each test, the source and destination buffer is initialized
187 * with a known pattern. This pattern is different depending on
188 * whether it's in an area which is supposed to be copied or
189 * overwritten, and different in the source and destination buffers.
190 * So if the DMA engine doesn't copy exactly what we tell it to copy,
191 * we'll notice.
192 */
193static int dmatest_func(void *data)
194{
195 struct dmatest_thread *thread = data;
196 struct dma_chan *chan;
197 const char *thread_name;
198 unsigned int src_off, dst_off, len;
199 unsigned int error_count;
200 unsigned int failed_tests = 0;
201 unsigned int total_tests = 0;
202 dma_cookie_t cookie;
203 enum dma_status status;
204 int ret;
205
206 thread_name = current->comm;
207
208 ret = -ENOMEM;
209 thread->srcbuf = kmalloc(test_buf_size, GFP_KERNEL);
210 if (!thread->srcbuf)
211 goto err_srcbuf;
212 thread->dstbuf = kmalloc(test_buf_size, GFP_KERNEL);
213 if (!thread->dstbuf)
214 goto err_dstbuf;
215
216 smp_rmb();
217 chan = thread->chan;
218 dma_chan_get(chan);
219
220 while (!kthread_should_stop()) {
221 total_tests++;
222
223 len = dmatest_random() % test_buf_size + 1;
224 src_off = dmatest_random() % (test_buf_size - len + 1);
225 dst_off = dmatest_random() % (test_buf_size - len + 1);
226
227 dmatest_init_srcbuf(thread->srcbuf, src_off, len);
228 dmatest_init_dstbuf(thread->dstbuf, dst_off, len);
229
230 cookie = dma_async_memcpy_buf_to_buf(chan,
231 thread->dstbuf + dst_off,
232 thread->srcbuf + src_off,
233 len);
234 if (dma_submit_error(cookie)) {
235 pr_warning("%s: #%u: submit error %d with src_off=0x%x "
236 "dst_off=0x%x len=0x%x\n",
237 thread_name, total_tests - 1, cookie,
238 src_off, dst_off, len);
239 msleep(100);
240 failed_tests++;
241 continue;
242 }
243 dma_async_memcpy_issue_pending(chan);
244
245 do {
246 msleep(1);
247 status = dma_async_memcpy_complete(
248 chan, cookie, NULL, NULL);
249 } while (status == DMA_IN_PROGRESS);
250
251 if (status == DMA_ERROR) {
252 pr_warning("%s: #%u: error during copy\n",
253 thread_name, total_tests - 1);
254 failed_tests++;
255 continue;
256 }
257
258 error_count = 0;
259
260 pr_debug("%s: verifying source buffer...\n", thread_name);
261 error_count += dmatest_verify(thread->srcbuf, 0, src_off,
262 0, PATTERN_SRC, true);
263 error_count += dmatest_verify(thread->srcbuf, src_off,
264 src_off + len, src_off,
265 PATTERN_SRC | PATTERN_COPY, true);
266 error_count += dmatest_verify(thread->srcbuf, src_off + len,
267 test_buf_size, src_off + len,
268 PATTERN_SRC, true);
269
270 pr_debug("%s: verifying dest buffer...\n",
271 thread->task->comm);
272 error_count += dmatest_verify(thread->dstbuf, 0, dst_off,
273 0, PATTERN_DST, false);
274 error_count += dmatest_verify(thread->dstbuf, dst_off,
275 dst_off + len, src_off,
276 PATTERN_SRC | PATTERN_COPY, false);
277 error_count += dmatest_verify(thread->dstbuf, dst_off + len,
278 test_buf_size, dst_off + len,
279 PATTERN_DST, false);
280
281 if (error_count) {
282 pr_warning("%s: #%u: %u errors with "
283 "src_off=0x%x dst_off=0x%x len=0x%x\n",
284 thread_name, total_tests - 1, error_count,
285 src_off, dst_off, len);
286 failed_tests++;
287 } else {
288 pr_debug("%s: #%u: No errors with "
289 "src_off=0x%x dst_off=0x%x len=0x%x\n",
290 thread_name, total_tests - 1,
291 src_off, dst_off, len);
292 }
293 }
294
295 ret = 0;
296 dma_chan_put(chan);
297 kfree(thread->dstbuf);
298err_dstbuf:
299 kfree(thread->srcbuf);
300err_srcbuf:
301 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
302 thread_name, total_tests, failed_tests, ret);
303 return ret;
304}
305
306static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
307{
308 struct dmatest_thread *thread;
309 struct dmatest_thread *_thread;
310 int ret;
311
312 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
313 ret = kthread_stop(thread->task);
314 pr_debug("dmatest: thread %s exited with status %d\n",
315 thread->task->comm, ret);
316 list_del(&thread->node);
317 kfree(thread);
318 }
319 kfree(dtc);
320}
321
322static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
323{
324 struct dmatest_chan *dtc;
325 struct dmatest_thread *thread;
326 unsigned int i;
327
328 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_ATOMIC);
329 if (!dtc) {
330 pr_warning("dmatest: No memory for %s\n", chan->dev.bus_id);
331 return DMA_NAK;
332 }
333
334 dtc->chan = chan;
335 INIT_LIST_HEAD(&dtc->threads);
336
337 for (i = 0; i < threads_per_chan; i++) {
338 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
339 if (!thread) {
340 pr_warning("dmatest: No memory for %s-test%u\n",
341 chan->dev.bus_id, i);
342 break;
343 }
344 thread->chan = dtc->chan;
345 smp_wmb();
346 thread->task = kthread_run(dmatest_func, thread, "%s-test%u",
347 chan->dev.bus_id, i);
348 if (IS_ERR(thread->task)) {
349 pr_warning("dmatest: Failed to run thread %s-test%u\n",
350 chan->dev.bus_id, i);
351 kfree(thread);
352 break;
353 }
354
355 /* srcbuf and dstbuf are allocated by the thread itself */
356
357 list_add_tail(&thread->node, &dtc->threads);
358 }
359
360 pr_info("dmatest: Started %u threads using %s\n", i, chan->dev.bus_id);
361
362 list_add_tail(&dtc->node, &dmatest_channels);
363 nr_channels++;
364
365 return DMA_ACK;
366}
367
368static enum dma_state_client dmatest_remove_channel(struct dma_chan *chan)
369{
370 struct dmatest_chan *dtc, *_dtc;
371
372 list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
373 if (dtc->chan == chan) {
374 list_del(&dtc->node);
375 dmatest_cleanup_channel(dtc);
376 pr_debug("dmatest: lost channel %s\n",
377 chan->dev.bus_id);
378 return DMA_ACK;
379 }
380 }
381
382 return DMA_DUP;
383}
384
385/*
386 * Start testing threads as new channels are assigned to us, and kill
387 * them when the channels go away.
388 *
389 * When we unregister the client, all channels are removed so this
390 * will also take care of cleaning things up when the module is
391 * unloaded.
392 */
393static enum dma_state_client
394dmatest_event(struct dma_client *client, struct dma_chan *chan,
395 enum dma_state state)
396{
397 enum dma_state_client ack = DMA_NAK;
398
399 switch (state) {
400 case DMA_RESOURCE_AVAILABLE:
401 if (!dmatest_match_channel(chan)
402 || !dmatest_match_device(chan->device))
403 ack = DMA_DUP;
404 else if (max_channels && nr_channels >= max_channels)
405 ack = DMA_NAK;
406 else
407 ack = dmatest_add_channel(chan);
408 break;
409
410 case DMA_RESOURCE_REMOVED:
411 ack = dmatest_remove_channel(chan);
412 break;
413
414 default:
415 pr_info("dmatest: Unhandled event %u (%s)\n",
416 state, chan->dev.bus_id);
417 break;
418 }
419
420 return ack;
421}
422
423static struct dma_client dmatest_client = {
424 .event_callback = dmatest_event,
425};
426
427static int __init dmatest_init(void)
428{
429 dma_cap_set(DMA_MEMCPY, dmatest_client.cap_mask);
430 dma_async_client_register(&dmatest_client);
431 dma_async_client_chan_request(&dmatest_client);
432
433 return 0;
434}
435module_init(dmatest_init);
436
437static void __exit dmatest_exit(void)
438{
439 dma_async_client_unregister(&dmatest_client);
440}
441module_exit(dmatest_exit);
442
443MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>");
444MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
new file mode 100644
index 000000000000..94df91771243
--- /dev/null
+++ b/drivers/dma/dw_dmac.c
@@ -0,0 +1,1122 @@
1/*
2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
3 * AVR32 systems.)
4 *
5 * Copyright (C) 2007-2008 Atmel Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/clk.h>
12#include <linux/delay.h>
13#include <linux/dmaengine.h>
14#include <linux/dma-mapping.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/mm.h>
19#include <linux/module.h>
20#include <linux/platform_device.h>
21#include <linux/slab.h>
22
23#include "dw_dmac_regs.h"
24
25/*
26 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
27 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
28 * of which use ARM any more). See the "Databook" from Synopsys for
29 * information beyond what licensees probably provide.
30 *
31 * The driver has currently been tested only with the Atmel AT32AP7000,
32 * which does not support descriptor writeback.
33 */
34
35/* NOTE: DMS+SMS is system-specific. We should get this information
36 * from the platform code somehow.
37 */
38#define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \
39 | DWC_CTLL_SRC_MSIZE(0) \
40 | DWC_CTLL_DMS(0) \
41 | DWC_CTLL_SMS(1) \
42 | DWC_CTLL_LLP_D_EN \
43 | DWC_CTLL_LLP_S_EN)
44
45/*
46 * This is configuration-dependent and usually a funny size like 4095.
47 * Let's round it down to the nearest power of two.
48 *
49 * Note that this is a transfer count, i.e. if we transfer 32-bit
50 * words, we can do 8192 bytes per descriptor.
51 *
52 * This parameter is also system-specific.
53 */
54#define DWC_MAX_COUNT 2048U
55
56/*
57 * Number of descriptors to allocate for each channel. This should be
58 * made configurable somehow; preferably, the clients (at least the
59 * ones using slave transfers) should be able to give us a hint.
60 */
61#define NR_DESCS_PER_CHANNEL 64
62
63/*----------------------------------------------------------------------*/
64
65/*
66 * Because we're not relying on writeback from the controller (it may not
67 * even be configured into the core!) we don't need to use dma_pool. These
68 * descriptors -- and associated data -- are cacheable. We do need to make
69 * sure their dcache entries are written back before handing them off to
70 * the controller, though.
71 */
72
73static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
74{
75 return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
76}
77
78static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc)
79{
80 return list_entry(dwc->queue.next, struct dw_desc, desc_node);
81}
82
83static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
84{
85 struct dw_desc *desc, *_desc;
86 struct dw_desc *ret = NULL;
87 unsigned int i = 0;
88
89 spin_lock_bh(&dwc->lock);
90 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
91 if (async_tx_test_ack(&desc->txd)) {
92 list_del(&desc->desc_node);
93 ret = desc;
94 break;
95 }
96 dev_dbg(&dwc->chan.dev, "desc %p not ACKed\n", desc);
97 i++;
98 }
99 spin_unlock_bh(&dwc->lock);
100
101 dev_vdbg(&dwc->chan.dev, "scanned %u descriptors on freelist\n", i);
102
103 return ret;
104}
105
106static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
107{
108 struct dw_desc *child;
109
110 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
111 dma_sync_single_for_cpu(dwc->chan.dev.parent,
112 child->txd.phys, sizeof(child->lli),
113 DMA_TO_DEVICE);
114 dma_sync_single_for_cpu(dwc->chan.dev.parent,
115 desc->txd.phys, sizeof(desc->lli),
116 DMA_TO_DEVICE);
117}
118
119/*
120 * Move a descriptor, including any children, to the free list.
121 * `desc' must not be on any lists.
122 */
123static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
124{
125 if (desc) {
126 struct dw_desc *child;
127
128 dwc_sync_desc_for_cpu(dwc, desc);
129
130 spin_lock_bh(&dwc->lock);
131 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
132 dev_vdbg(&dwc->chan.dev,
133 "moving child desc %p to freelist\n",
134 child);
135 list_splice_init(&desc->txd.tx_list, &dwc->free_list);
136 dev_vdbg(&dwc->chan.dev, "moving desc %p to freelist\n", desc);
137 list_add(&desc->desc_node, &dwc->free_list);
138 spin_unlock_bh(&dwc->lock);
139 }
140}
141
142/* Called with dwc->lock held and bh disabled */
143static dma_cookie_t
144dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
145{
146 dma_cookie_t cookie = dwc->chan.cookie;
147
148 if (++cookie < 0)
149 cookie = 1;
150
151 dwc->chan.cookie = cookie;
152 desc->txd.cookie = cookie;
153
154 return cookie;
155}
156
157/*----------------------------------------------------------------------*/
158
159/* Called with dwc->lock held and bh disabled */
160static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
161{
162 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
163
164 /* ASSERT: channel is idle */
165 if (dma_readl(dw, CH_EN) & dwc->mask) {
166 dev_err(&dwc->chan.dev,
167 "BUG: Attempted to start non-idle channel\n");
168 dev_err(&dwc->chan.dev,
169 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
170 channel_readl(dwc, SAR),
171 channel_readl(dwc, DAR),
172 channel_readl(dwc, LLP),
173 channel_readl(dwc, CTL_HI),
174 channel_readl(dwc, CTL_LO));
175
176 /* The tasklet will hopefully advance the queue... */
177 return;
178 }
179
180 channel_writel(dwc, LLP, first->txd.phys);
181 channel_writel(dwc, CTL_LO,
182 DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
183 channel_writel(dwc, CTL_HI, 0);
184 channel_set_bit(dw, CH_EN, dwc->mask);
185}
186
187/*----------------------------------------------------------------------*/
188
189static void
190dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
191{
192 dma_async_tx_callback callback;
193 void *param;
194 struct dma_async_tx_descriptor *txd = &desc->txd;
195
196 dev_vdbg(&dwc->chan.dev, "descriptor %u complete\n", txd->cookie);
197
198 dwc->completed = txd->cookie;
199 callback = txd->callback;
200 param = txd->callback_param;
201
202 dwc_sync_desc_for_cpu(dwc, desc);
203 list_splice_init(&txd->tx_list, &dwc->free_list);
204 list_move(&desc->desc_node, &dwc->free_list);
205
206 /*
207 * We use dma_unmap_page() regardless of how the buffers were
208 * mapped before they were submitted...
209 */
210 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP))
211 dma_unmap_page(dwc->chan.dev.parent, desc->lli.dar, desc->len,
212 DMA_FROM_DEVICE);
213 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
214 dma_unmap_page(dwc->chan.dev.parent, desc->lli.sar, desc->len,
215 DMA_TO_DEVICE);
216
217 /*
218 * The API requires that no submissions are done from a
219 * callback, so we don't need to drop the lock here
220 */
221 if (callback)
222 callback(param);
223}
224
225static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
226{
227 struct dw_desc *desc, *_desc;
228 LIST_HEAD(list);
229
230 if (dma_readl(dw, CH_EN) & dwc->mask) {
231 dev_err(&dwc->chan.dev,
232 "BUG: XFER bit set, but channel not idle!\n");
233
234 /* Try to continue after resetting the channel... */
235 channel_clear_bit(dw, CH_EN, dwc->mask);
236 while (dma_readl(dw, CH_EN) & dwc->mask)
237 cpu_relax();
238 }
239
240 /*
241 * Submit queued descriptors ASAP, i.e. before we go through
242 * the completed ones.
243 */
244 if (!list_empty(&dwc->queue))
245 dwc_dostart(dwc, dwc_first_queued(dwc));
246 list_splice_init(&dwc->active_list, &list);
247 list_splice_init(&dwc->queue, &dwc->active_list);
248
249 list_for_each_entry_safe(desc, _desc, &list, desc_node)
250 dwc_descriptor_complete(dwc, desc);
251}
252
253static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
254{
255 dma_addr_t llp;
256 struct dw_desc *desc, *_desc;
257 struct dw_desc *child;
258 u32 status_xfer;
259
260 /*
261 * Clear block interrupt flag before scanning so that we don't
262 * miss any, and read LLP before RAW_XFER to ensure it is
263 * valid if we decide to scan the list.
264 */
265 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
266 llp = channel_readl(dwc, LLP);
267 status_xfer = dma_readl(dw, RAW.XFER);
268
269 if (status_xfer & dwc->mask) {
270 /* Everything we've submitted is done */
271 dma_writel(dw, CLEAR.XFER, dwc->mask);
272 dwc_complete_all(dw, dwc);
273 return;
274 }
275
276 dev_vdbg(&dwc->chan.dev, "scan_descriptors: llp=0x%x\n", llp);
277
278 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
279 if (desc->lli.llp == llp)
280 /* This one is currently in progress */
281 return;
282
283 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
284 if (child->lli.llp == llp)
285 /* Currently in progress */
286 return;
287
288 /*
289 * No descriptors so far seem to be in progress, i.e.
290 * this one must be done.
291 */
292 dwc_descriptor_complete(dwc, desc);
293 }
294
295 dev_err(&dwc->chan.dev,
296 "BUG: All descriptors done, but channel not idle!\n");
297
298 /* Try to continue after resetting the channel... */
299 channel_clear_bit(dw, CH_EN, dwc->mask);
300 while (dma_readl(dw, CH_EN) & dwc->mask)
301 cpu_relax();
302
303 if (!list_empty(&dwc->queue)) {
304 dwc_dostart(dwc, dwc_first_queued(dwc));
305 list_splice_init(&dwc->queue, &dwc->active_list);
306 }
307}
308
309static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
310{
311 dev_printk(KERN_CRIT, &dwc->chan.dev,
312 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
313 lli->sar, lli->dar, lli->llp,
314 lli->ctlhi, lli->ctllo);
315}
316
317static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
318{
319 struct dw_desc *bad_desc;
320 struct dw_desc *child;
321
322 dwc_scan_descriptors(dw, dwc);
323
324 /*
325 * The descriptor currently at the head of the active list is
326 * borked. Since we don't have any way to report errors, we'll
327 * just have to scream loudly and try to carry on.
328 */
329 bad_desc = dwc_first_active(dwc);
330 list_del_init(&bad_desc->desc_node);
331 list_splice_init(&dwc->queue, dwc->active_list.prev);
332
333 /* Clear the error flag and try to restart the controller */
334 dma_writel(dw, CLEAR.ERROR, dwc->mask);
335 if (!list_empty(&dwc->active_list))
336 dwc_dostart(dwc, dwc_first_active(dwc));
337
338 /*
339 * KERN_CRITICAL may seem harsh, but since this only happens
340 * when someone submits a bad physical address in a
341 * descriptor, we should consider ourselves lucky that the
342 * controller flagged an error instead of scribbling over
343 * random memory locations.
344 */
345 dev_printk(KERN_CRIT, &dwc->chan.dev,
346 "Bad descriptor submitted for DMA!\n");
347 dev_printk(KERN_CRIT, &dwc->chan.dev,
348 " cookie: %d\n", bad_desc->txd.cookie);
349 dwc_dump_lli(dwc, &bad_desc->lli);
350 list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
351 dwc_dump_lli(dwc, &child->lli);
352
353 /* Pretend the descriptor completed successfully */
354 dwc_descriptor_complete(dwc, bad_desc);
355}
356
357static void dw_dma_tasklet(unsigned long data)
358{
359 struct dw_dma *dw = (struct dw_dma *)data;
360 struct dw_dma_chan *dwc;
361 u32 status_block;
362 u32 status_xfer;
363 u32 status_err;
364 int i;
365
366 status_block = dma_readl(dw, RAW.BLOCK);
367 status_xfer = dma_readl(dw, RAW.BLOCK);
368 status_err = dma_readl(dw, RAW.ERROR);
369
370 dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n",
371 status_block, status_err);
372
373 for (i = 0; i < dw->dma.chancnt; i++) {
374 dwc = &dw->chan[i];
375 spin_lock(&dwc->lock);
376 if (status_err & (1 << i))
377 dwc_handle_error(dw, dwc);
378 else if ((status_block | status_xfer) & (1 << i))
379 dwc_scan_descriptors(dw, dwc);
380 spin_unlock(&dwc->lock);
381 }
382
383 /*
384 * Re-enable interrupts. Block Complete interrupts are only
385 * enabled if the INT_EN bit in the descriptor is set. This
386 * will trigger a scan before the whole list is done.
387 */
388 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
389 channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
390 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
391}
392
393static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
394{
395 struct dw_dma *dw = dev_id;
396 u32 status;
397
398 dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n",
399 dma_readl(dw, STATUS_INT));
400
401 /*
402 * Just disable the interrupts. We'll turn them back on in the
403 * softirq handler.
404 */
405 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
406 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
407 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
408
409 status = dma_readl(dw, STATUS_INT);
410 if (status) {
411 dev_err(dw->dma.dev,
412 "BUG: Unexpected interrupts pending: 0x%x\n",
413 status);
414
415 /* Try to recover */
416 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
417 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
418 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
419 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
420 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
421 }
422
423 tasklet_schedule(&dw->tasklet);
424
425 return IRQ_HANDLED;
426}
427
428/*----------------------------------------------------------------------*/
429
430static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
431{
432 struct dw_desc *desc = txd_to_dw_desc(tx);
433 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
434 dma_cookie_t cookie;
435
436 spin_lock_bh(&dwc->lock);
437 cookie = dwc_assign_cookie(dwc, desc);
438
439 /*
440 * REVISIT: We should attempt to chain as many descriptors as
441 * possible, perhaps even appending to those already submitted
442 * for DMA. But this is hard to do in a race-free manner.
443 */
444 if (list_empty(&dwc->active_list)) {
445 dev_vdbg(&tx->chan->dev, "tx_submit: started %u\n",
446 desc->txd.cookie);
447 dwc_dostart(dwc, desc);
448 list_add_tail(&desc->desc_node, &dwc->active_list);
449 } else {
450 dev_vdbg(&tx->chan->dev, "tx_submit: queued %u\n",
451 desc->txd.cookie);
452
453 list_add_tail(&desc->desc_node, &dwc->queue);
454 }
455
456 spin_unlock_bh(&dwc->lock);
457
458 return cookie;
459}
460
461static struct dma_async_tx_descriptor *
462dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
463 size_t len, unsigned long flags)
464{
465 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
466 struct dw_desc *desc;
467 struct dw_desc *first;
468 struct dw_desc *prev;
469 size_t xfer_count;
470 size_t offset;
471 unsigned int src_width;
472 unsigned int dst_width;
473 u32 ctllo;
474
475 dev_vdbg(&chan->dev, "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
476 dest, src, len, flags);
477
478 if (unlikely(!len)) {
479 dev_dbg(&chan->dev, "prep_dma_memcpy: length is zero!\n");
480 return NULL;
481 }
482
483 /*
484 * We can be a lot more clever here, but this should take care
485 * of the most common optimization.
486 */
487 if (!((src | dest | len) & 3))
488 src_width = dst_width = 2;
489 else if (!((src | dest | len) & 1))
490 src_width = dst_width = 1;
491 else
492 src_width = dst_width = 0;
493
494 ctllo = DWC_DEFAULT_CTLLO
495 | DWC_CTLL_DST_WIDTH(dst_width)
496 | DWC_CTLL_SRC_WIDTH(src_width)
497 | DWC_CTLL_DST_INC
498 | DWC_CTLL_SRC_INC
499 | DWC_CTLL_FC_M2M;
500 prev = first = NULL;
501
502 for (offset = 0; offset < len; offset += xfer_count << src_width) {
503 xfer_count = min_t(size_t, (len - offset) >> src_width,
504 DWC_MAX_COUNT);
505
506 desc = dwc_desc_get(dwc);
507 if (!desc)
508 goto err_desc_get;
509
510 desc->lli.sar = src + offset;
511 desc->lli.dar = dest + offset;
512 desc->lli.ctllo = ctllo;
513 desc->lli.ctlhi = xfer_count;
514
515 if (!first) {
516 first = desc;
517 } else {
518 prev->lli.llp = desc->txd.phys;
519 dma_sync_single_for_device(chan->dev.parent,
520 prev->txd.phys, sizeof(prev->lli),
521 DMA_TO_DEVICE);
522 list_add_tail(&desc->desc_node,
523 &first->txd.tx_list);
524 }
525 prev = desc;
526 }
527
528
529 if (flags & DMA_PREP_INTERRUPT)
530 /* Trigger interrupt after last block */
531 prev->lli.ctllo |= DWC_CTLL_INT_EN;
532
533 prev->lli.llp = 0;
534 dma_sync_single_for_device(chan->dev.parent,
535 prev->txd.phys, sizeof(prev->lli),
536 DMA_TO_DEVICE);
537
538 first->txd.flags = flags;
539 first->len = len;
540
541 return &first->txd;
542
543err_desc_get:
544 dwc_desc_put(dwc, first);
545 return NULL;
546}
547
548static struct dma_async_tx_descriptor *
549dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
550 unsigned int sg_len, enum dma_data_direction direction,
551 unsigned long flags)
552{
553 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
554 struct dw_dma_slave *dws = dwc->dws;
555 struct dw_desc *prev;
556 struct dw_desc *first;
557 u32 ctllo;
558 dma_addr_t reg;
559 unsigned int reg_width;
560 unsigned int mem_width;
561 unsigned int i;
562 struct scatterlist *sg;
563 size_t total_len = 0;
564
565 dev_vdbg(&chan->dev, "prep_dma_slave\n");
566
567 if (unlikely(!dws || !sg_len))
568 return NULL;
569
570 reg_width = dws->slave.reg_width;
571 prev = first = NULL;
572
573 sg_len = dma_map_sg(chan->dev.parent, sgl, sg_len, direction);
574
575 switch (direction) {
576 case DMA_TO_DEVICE:
577 ctllo = (DWC_DEFAULT_CTLLO
578 | DWC_CTLL_DST_WIDTH(reg_width)
579 | DWC_CTLL_DST_FIX
580 | DWC_CTLL_SRC_INC
581 | DWC_CTLL_FC_M2P);
582 reg = dws->slave.tx_reg;
583 for_each_sg(sgl, sg, sg_len, i) {
584 struct dw_desc *desc;
585 u32 len;
586 u32 mem;
587
588 desc = dwc_desc_get(dwc);
589 if (!desc) {
590 dev_err(&chan->dev,
591 "not enough descriptors available\n");
592 goto err_desc_get;
593 }
594
595 mem = sg_phys(sg);
596 len = sg_dma_len(sg);
597 mem_width = 2;
598 if (unlikely(mem & 3 || len & 3))
599 mem_width = 0;
600
601 desc->lli.sar = mem;
602 desc->lli.dar = reg;
603 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
604 desc->lli.ctlhi = len >> mem_width;
605
606 if (!first) {
607 first = desc;
608 } else {
609 prev->lli.llp = desc->txd.phys;
610 dma_sync_single_for_device(chan->dev.parent,
611 prev->txd.phys,
612 sizeof(prev->lli),
613 DMA_TO_DEVICE);
614 list_add_tail(&desc->desc_node,
615 &first->txd.tx_list);
616 }
617 prev = desc;
618 total_len += len;
619 }
620 break;
621 case DMA_FROM_DEVICE:
622 ctllo = (DWC_DEFAULT_CTLLO
623 | DWC_CTLL_SRC_WIDTH(reg_width)
624 | DWC_CTLL_DST_INC
625 | DWC_CTLL_SRC_FIX
626 | DWC_CTLL_FC_P2M);
627
628 reg = dws->slave.rx_reg;
629 for_each_sg(sgl, sg, sg_len, i) {
630 struct dw_desc *desc;
631 u32 len;
632 u32 mem;
633
634 desc = dwc_desc_get(dwc);
635 if (!desc) {
636 dev_err(&chan->dev,
637 "not enough descriptors available\n");
638 goto err_desc_get;
639 }
640
641 mem = sg_phys(sg);
642 len = sg_dma_len(sg);
643 mem_width = 2;
644 if (unlikely(mem & 3 || len & 3))
645 mem_width = 0;
646
647 desc->lli.sar = reg;
648 desc->lli.dar = mem;
649 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
650 desc->lli.ctlhi = len >> reg_width;
651
652 if (!first) {
653 first = desc;
654 } else {
655 prev->lli.llp = desc->txd.phys;
656 dma_sync_single_for_device(chan->dev.parent,
657 prev->txd.phys,
658 sizeof(prev->lli),
659 DMA_TO_DEVICE);
660 list_add_tail(&desc->desc_node,
661 &first->txd.tx_list);
662 }
663 prev = desc;
664 total_len += len;
665 }
666 break;
667 default:
668 return NULL;
669 }
670
671 if (flags & DMA_PREP_INTERRUPT)
672 /* Trigger interrupt after last block */
673 prev->lli.ctllo |= DWC_CTLL_INT_EN;
674
675 prev->lli.llp = 0;
676 dma_sync_single_for_device(chan->dev.parent,
677 prev->txd.phys, sizeof(prev->lli),
678 DMA_TO_DEVICE);
679
680 first->len = total_len;
681
682 return &first->txd;
683
684err_desc_get:
685 dwc_desc_put(dwc, first);
686 return NULL;
687}
688
689static void dwc_terminate_all(struct dma_chan *chan)
690{
691 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
692 struct dw_dma *dw = to_dw_dma(chan->device);
693 struct dw_desc *desc, *_desc;
694 LIST_HEAD(list);
695
696 /*
697 * This is only called when something went wrong elsewhere, so
698 * we don't really care about the data. Just disable the
699 * channel. We still have to poll the channel enable bit due
700 * to AHB/HSB limitations.
701 */
702 spin_lock_bh(&dwc->lock);
703
704 channel_clear_bit(dw, CH_EN, dwc->mask);
705
706 while (dma_readl(dw, CH_EN) & dwc->mask)
707 cpu_relax();
708
709 /* active_list entries will end up before queued entries */
710 list_splice_init(&dwc->queue, &list);
711 list_splice_init(&dwc->active_list, &list);
712
713 spin_unlock_bh(&dwc->lock);
714
715 /* Flush all pending and queued descriptors */
716 list_for_each_entry_safe(desc, _desc, &list, desc_node)
717 dwc_descriptor_complete(dwc, desc);
718}
719
720static enum dma_status
721dwc_is_tx_complete(struct dma_chan *chan,
722 dma_cookie_t cookie,
723 dma_cookie_t *done, dma_cookie_t *used)
724{
725 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
726 dma_cookie_t last_used;
727 dma_cookie_t last_complete;
728 int ret;
729
730 last_complete = dwc->completed;
731 last_used = chan->cookie;
732
733 ret = dma_async_is_complete(cookie, last_complete, last_used);
734 if (ret != DMA_SUCCESS) {
735 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
736
737 last_complete = dwc->completed;
738 last_used = chan->cookie;
739
740 ret = dma_async_is_complete(cookie, last_complete, last_used);
741 }
742
743 if (done)
744 *done = last_complete;
745 if (used)
746 *used = last_used;
747
748 return ret;
749}
750
751static void dwc_issue_pending(struct dma_chan *chan)
752{
753 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
754
755 spin_lock_bh(&dwc->lock);
756 if (!list_empty(&dwc->queue))
757 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
758 spin_unlock_bh(&dwc->lock);
759}
760
761static int dwc_alloc_chan_resources(struct dma_chan *chan,
762 struct dma_client *client)
763{
764 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
765 struct dw_dma *dw = to_dw_dma(chan->device);
766 struct dw_desc *desc;
767 struct dma_slave *slave;
768 struct dw_dma_slave *dws;
769 int i;
770 u32 cfghi;
771 u32 cfglo;
772
773 dev_vdbg(&chan->dev, "alloc_chan_resources\n");
774
775 /* Channels doing slave DMA can only handle one client. */
776 if (dwc->dws || client->slave) {
777 if (chan->client_count)
778 return -EBUSY;
779 }
780
781 /* ASSERT: channel is idle */
782 if (dma_readl(dw, CH_EN) & dwc->mask) {
783 dev_dbg(&chan->dev, "DMA channel not idle?\n");
784 return -EIO;
785 }
786
787 dwc->completed = chan->cookie = 1;
788
789 cfghi = DWC_CFGH_FIFO_MODE;
790 cfglo = 0;
791
792 slave = client->slave;
793 if (slave) {
794 /*
795 * We need controller-specific data to set up slave
796 * transfers.
797 */
798 BUG_ON(!slave->dma_dev || slave->dma_dev != dw->dma.dev);
799
800 dws = container_of(slave, struct dw_dma_slave, slave);
801
802 dwc->dws = dws;
803 cfghi = dws->cfg_hi;
804 cfglo = dws->cfg_lo;
805 } else {
806 dwc->dws = NULL;
807 }
808
809 channel_writel(dwc, CFG_LO, cfglo);
810 channel_writel(dwc, CFG_HI, cfghi);
811
812 /*
813 * NOTE: some controllers may have additional features that we
814 * need to initialize here, like "scatter-gather" (which
815 * doesn't mean what you think it means), and status writeback.
816 */
817
818 spin_lock_bh(&dwc->lock);
819 i = dwc->descs_allocated;
820 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
821 spin_unlock_bh(&dwc->lock);
822
823 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
824 if (!desc) {
825 dev_info(&chan->dev,
826 "only allocated %d descriptors\n", i);
827 spin_lock_bh(&dwc->lock);
828 break;
829 }
830
831 dma_async_tx_descriptor_init(&desc->txd, chan);
832 desc->txd.tx_submit = dwc_tx_submit;
833 desc->txd.flags = DMA_CTRL_ACK;
834 INIT_LIST_HEAD(&desc->txd.tx_list);
835 desc->txd.phys = dma_map_single(chan->dev.parent, &desc->lli,
836 sizeof(desc->lli), DMA_TO_DEVICE);
837 dwc_desc_put(dwc, desc);
838
839 spin_lock_bh(&dwc->lock);
840 i = ++dwc->descs_allocated;
841 }
842
843 /* Enable interrupts */
844 channel_set_bit(dw, MASK.XFER, dwc->mask);
845 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
846 channel_set_bit(dw, MASK.ERROR, dwc->mask);
847
848 spin_unlock_bh(&dwc->lock);
849
850 dev_dbg(&chan->dev,
851 "alloc_chan_resources allocated %d descriptors\n", i);
852
853 return i;
854}
855
856static void dwc_free_chan_resources(struct dma_chan *chan)
857{
858 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
859 struct dw_dma *dw = to_dw_dma(chan->device);
860 struct dw_desc *desc, *_desc;
861 LIST_HEAD(list);
862
863 dev_dbg(&chan->dev, "free_chan_resources (descs allocated=%u)\n",
864 dwc->descs_allocated);
865
866 /* ASSERT: channel is idle */
867 BUG_ON(!list_empty(&dwc->active_list));
868 BUG_ON(!list_empty(&dwc->queue));
869 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
870
871 spin_lock_bh(&dwc->lock);
872 list_splice_init(&dwc->free_list, &list);
873 dwc->descs_allocated = 0;
874 dwc->dws = NULL;
875
876 /* Disable interrupts */
877 channel_clear_bit(dw, MASK.XFER, dwc->mask);
878 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
879 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
880
881 spin_unlock_bh(&dwc->lock);
882
883 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
884 dev_vdbg(&chan->dev, " freeing descriptor %p\n", desc);
885 dma_unmap_single(chan->dev.parent, desc->txd.phys,
886 sizeof(desc->lli), DMA_TO_DEVICE);
887 kfree(desc);
888 }
889
890 dev_vdbg(&chan->dev, "free_chan_resources done\n");
891}
892
893/*----------------------------------------------------------------------*/
894
895static void dw_dma_off(struct dw_dma *dw)
896{
897 dma_writel(dw, CFG, 0);
898
899 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
900 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
901 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
902 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
903 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
904
905 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
906 cpu_relax();
907}
908
909static int __init dw_probe(struct platform_device *pdev)
910{
911 struct dw_dma_platform_data *pdata;
912 struct resource *io;
913 struct dw_dma *dw;
914 size_t size;
915 int irq;
916 int err;
917 int i;
918
919 pdata = pdev->dev.platform_data;
920 if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
921 return -EINVAL;
922
923 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
924 if (!io)
925 return -EINVAL;
926
927 irq = platform_get_irq(pdev, 0);
928 if (irq < 0)
929 return irq;
930
931 size = sizeof(struct dw_dma);
932 size += pdata->nr_channels * sizeof(struct dw_dma_chan);
933 dw = kzalloc(size, GFP_KERNEL);
934 if (!dw)
935 return -ENOMEM;
936
937 if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) {
938 err = -EBUSY;
939 goto err_kfree;
940 }
941
942 memset(dw, 0, sizeof *dw);
943
944 dw->regs = ioremap(io->start, DW_REGLEN);
945 if (!dw->regs) {
946 err = -ENOMEM;
947 goto err_release_r;
948 }
949
950 dw->clk = clk_get(&pdev->dev, "hclk");
951 if (IS_ERR(dw->clk)) {
952 err = PTR_ERR(dw->clk);
953 goto err_clk;
954 }
955 clk_enable(dw->clk);
956
957 /* force dma off, just in case */
958 dw_dma_off(dw);
959
960 err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw);
961 if (err)
962 goto err_irq;
963
964 platform_set_drvdata(pdev, dw);
965
966 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
967
968 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
969
970 INIT_LIST_HEAD(&dw->dma.channels);
971 for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) {
972 struct dw_dma_chan *dwc = &dw->chan[i];
973
974 dwc->chan.device = &dw->dma;
975 dwc->chan.cookie = dwc->completed = 1;
976 dwc->chan.chan_id = i;
977 list_add_tail(&dwc->chan.device_node, &dw->dma.channels);
978
979 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
980 spin_lock_init(&dwc->lock);
981 dwc->mask = 1 << i;
982
983 INIT_LIST_HEAD(&dwc->active_list);
984 INIT_LIST_HEAD(&dwc->queue);
985 INIT_LIST_HEAD(&dwc->free_list);
986
987 channel_clear_bit(dw, CH_EN, dwc->mask);
988 }
989
990 /* Clear/disable all interrupts on all channels. */
991 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
992 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
993 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
994 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
995 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
996
997 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
998 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
999 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1000 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1001 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1002
1003 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1004 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1005 dw->dma.dev = &pdev->dev;
1006 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1007 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1008
1009 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1010
1011 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1012 dw->dma.device_terminate_all = dwc_terminate_all;
1013
1014 dw->dma.device_is_tx_complete = dwc_is_tx_complete;
1015 dw->dma.device_issue_pending = dwc_issue_pending;
1016
1017 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1018
1019 printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
1020 pdev->dev.bus_id, dw->dma.chancnt);
1021
1022 dma_async_device_register(&dw->dma);
1023
1024 return 0;
1025
1026err_irq:
1027 clk_disable(dw->clk);
1028 clk_put(dw->clk);
1029err_clk:
1030 iounmap(dw->regs);
1031 dw->regs = NULL;
1032err_release_r:
1033 release_resource(io);
1034err_kfree:
1035 kfree(dw);
1036 return err;
1037}
1038
1039static int __exit dw_remove(struct platform_device *pdev)
1040{
1041 struct dw_dma *dw = platform_get_drvdata(pdev);
1042 struct dw_dma_chan *dwc, *_dwc;
1043 struct resource *io;
1044
1045 dw_dma_off(dw);
1046 dma_async_device_unregister(&dw->dma);
1047
1048 free_irq(platform_get_irq(pdev, 0), dw);
1049 tasklet_kill(&dw->tasklet);
1050
1051 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1052 chan.device_node) {
1053 list_del(&dwc->chan.device_node);
1054 channel_clear_bit(dw, CH_EN, dwc->mask);
1055 }
1056
1057 clk_disable(dw->clk);
1058 clk_put(dw->clk);
1059
1060 iounmap(dw->regs);
1061 dw->regs = NULL;
1062
1063 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1064 release_mem_region(io->start, DW_REGLEN);
1065
1066 kfree(dw);
1067
1068 return 0;
1069}
1070
1071static void dw_shutdown(struct platform_device *pdev)
1072{
1073 struct dw_dma *dw = platform_get_drvdata(pdev);
1074
1075 dw_dma_off(platform_get_drvdata(pdev));
1076 clk_disable(dw->clk);
1077}
1078
1079static int dw_suspend_late(struct platform_device *pdev, pm_message_t mesg)
1080{
1081 struct dw_dma *dw = platform_get_drvdata(pdev);
1082
1083 dw_dma_off(platform_get_drvdata(pdev));
1084 clk_disable(dw->clk);
1085 return 0;
1086}
1087
1088static int dw_resume_early(struct platform_device *pdev)
1089{
1090 struct dw_dma *dw = platform_get_drvdata(pdev);
1091
1092 clk_enable(dw->clk);
1093 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1094 return 0;
1095
1096}
1097
1098static struct platform_driver dw_driver = {
1099 .remove = __exit_p(dw_remove),
1100 .shutdown = dw_shutdown,
1101 .suspend_late = dw_suspend_late,
1102 .resume_early = dw_resume_early,
1103 .driver = {
1104 .name = "dw_dmac",
1105 },
1106};
1107
1108static int __init dw_init(void)
1109{
1110 return platform_driver_probe(&dw_driver, dw_probe);
1111}
1112module_init(dw_init);
1113
1114static void __exit dw_exit(void)
1115{
1116 platform_driver_unregister(&dw_driver);
1117}
1118module_exit(dw_exit);
1119
1120MODULE_LICENSE("GPL v2");
1121MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1122MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>");
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
new file mode 100644
index 000000000000..00fdd187bb0c
--- /dev/null
+++ b/drivers/dma/dw_dmac_regs.h
@@ -0,0 +1,225 @@
1/*
2 * Driver for the Synopsys DesignWare AHB DMA Controller
3 *
4 * Copyright (C) 2005-2007 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/dw_dmac.h>
12
13#define DW_DMA_MAX_NR_CHANNELS 8
14
15/*
16 * Redefine this macro to handle differences between 32- and 64-bit
17 * addressing, big vs. little endian, etc.
18 */
19#define DW_REG(name) u32 name; u32 __pad_##name
20
21/* Hardware register definitions. */
22struct dw_dma_chan_regs {
23 DW_REG(SAR); /* Source Address Register */
24 DW_REG(DAR); /* Destination Address Register */
25 DW_REG(LLP); /* Linked List Pointer */
26 u32 CTL_LO; /* Control Register Low */
27 u32 CTL_HI; /* Control Register High */
28 DW_REG(SSTAT);
29 DW_REG(DSTAT);
30 DW_REG(SSTATAR);
31 DW_REG(DSTATAR);
32 u32 CFG_LO; /* Configuration Register Low */
33 u32 CFG_HI; /* Configuration Register High */
34 DW_REG(SGR);
35 DW_REG(DSR);
36};
37
38struct dw_dma_irq_regs {
39 DW_REG(XFER);
40 DW_REG(BLOCK);
41 DW_REG(SRC_TRAN);
42 DW_REG(DST_TRAN);
43 DW_REG(ERROR);
44};
45
46struct dw_dma_regs {
47 /* per-channel registers */
48 struct dw_dma_chan_regs CHAN[DW_DMA_MAX_NR_CHANNELS];
49
50 /* irq handling */
51 struct dw_dma_irq_regs RAW; /* r */
52 struct dw_dma_irq_regs STATUS; /* r (raw & mask) */
53 struct dw_dma_irq_regs MASK; /* rw (set = irq enabled) */
54 struct dw_dma_irq_regs CLEAR; /* w (ack, affects "raw") */
55
56 DW_REG(STATUS_INT); /* r */
57
58 /* software handshaking */
59 DW_REG(REQ_SRC);
60 DW_REG(REQ_DST);
61 DW_REG(SGL_REQ_SRC);
62 DW_REG(SGL_REQ_DST);
63 DW_REG(LAST_SRC);
64 DW_REG(LAST_DST);
65
66 /* miscellaneous */
67 DW_REG(CFG);
68 DW_REG(CH_EN);
69 DW_REG(ID);
70 DW_REG(TEST);
71
72 /* optional encoded params, 0x3c8..0x3 */
73};
74
75/* Bitfields in CTL_LO */
76#define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */
77#define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */
78#define DWC_CTLL_SRC_WIDTH(n) ((n)<<4)
79#define DWC_CTLL_DST_INC (0<<7) /* DAR update/not */
80#define DWC_CTLL_DST_DEC (1<<7)
81#define DWC_CTLL_DST_FIX (2<<7)
82#define DWC_CTLL_SRC_INC (0<<7) /* SAR update/not */
83#define DWC_CTLL_SRC_DEC (1<<9)
84#define DWC_CTLL_SRC_FIX (2<<9)
85#define DWC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */
86#define DWC_CTLL_SRC_MSIZE(n) ((n)<<14)
87#define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */
88#define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */
89#define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */
90#define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */
91#define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */
92#define DWC_CTLL_FC_P2P (3 << 20) /* periph-to-periph */
93/* plus 4 transfer types for peripheral-as-flow-controller */
94#define DWC_CTLL_DMS(n) ((n)<<23) /* dst master select */
95#define DWC_CTLL_SMS(n) ((n)<<25) /* src master select */
96#define DWC_CTLL_LLP_D_EN (1 << 27) /* dest block chain */
97#define DWC_CTLL_LLP_S_EN (1 << 28) /* src block chain */
98
99/* Bitfields in CTL_HI */
100#define DWC_CTLH_DONE 0x00001000
101#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff
102
103/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */
104#define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */
105#define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */
106#define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */
107#define DWC_CFGL_HS_SRC (1 << 11) /* handshake w/src */
108#define DWC_CFGL_MAX_BURST(x) ((x) << 20)
109#define DWC_CFGL_RELOAD_SAR (1 << 30)
110#define DWC_CFGL_RELOAD_DAR (1 << 31)
111
112/* Bitfields in CFG_HI. Platform-configurable bits are in <linux/dw_dmac.h> */
113#define DWC_CFGH_DS_UPD_EN (1 << 5)
114#define DWC_CFGH_SS_UPD_EN (1 << 6)
115
116/* Bitfields in SGR */
117#define DWC_SGR_SGI(x) ((x) << 0)
118#define DWC_SGR_SGC(x) ((x) << 20)
119
120/* Bitfields in DSR */
121#define DWC_DSR_DSI(x) ((x) << 0)
122#define DWC_DSR_DSC(x) ((x) << 20)
123
124/* Bitfields in CFG */
125#define DW_CFG_DMA_EN (1 << 0)
126
127#define DW_REGLEN 0x400
128
129struct dw_dma_chan {
130 struct dma_chan chan;
131 void __iomem *ch_regs;
132 u8 mask;
133
134 spinlock_t lock;
135
136 /* these other elements are all protected by lock */
137 dma_cookie_t completed;
138 struct list_head active_list;
139 struct list_head queue;
140 struct list_head free_list;
141
142 struct dw_dma_slave *dws;
143
144 unsigned int descs_allocated;
145};
146
147static inline struct dw_dma_chan_regs __iomem *
148__dwc_regs(struct dw_dma_chan *dwc)
149{
150 return dwc->ch_regs;
151}
152
153#define channel_readl(dwc, name) \
154 __raw_readl(&(__dwc_regs(dwc)->name))
155#define channel_writel(dwc, name, val) \
156 __raw_writel((val), &(__dwc_regs(dwc)->name))
157
158static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
159{
160 return container_of(chan, struct dw_dma_chan, chan);
161}
162
163
164struct dw_dma {
165 struct dma_device dma;
166 void __iomem *regs;
167 struct tasklet_struct tasklet;
168 struct clk *clk;
169
170 u8 all_chan_mask;
171
172 struct dw_dma_chan chan[0];
173};
174
175static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
176{
177 return dw->regs;
178}
179
180#define dma_readl(dw, name) \
181 __raw_readl(&(__dw_regs(dw)->name))
182#define dma_writel(dw, name, val) \
183 __raw_writel((val), &(__dw_regs(dw)->name))
184
185#define channel_set_bit(dw, reg, mask) \
186 dma_writel(dw, reg, ((mask) << 8) | (mask))
187#define channel_clear_bit(dw, reg, mask) \
188 dma_writel(dw, reg, ((mask) << 8) | 0)
189
190static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
191{
192 return container_of(ddev, struct dw_dma, dma);
193}
194
195/* LLI == Linked List Item; a.k.a. DMA block descriptor */
196struct dw_lli {
197 /* values that are not changed by hardware */
198 dma_addr_t sar;
199 dma_addr_t dar;
200 dma_addr_t llp; /* chain to next lli */
201 u32 ctllo;
202 /* values that may get written back: */
203 u32 ctlhi;
204 /* sstat and dstat can snapshot peripheral register state.
205 * silicon config may discard either or both...
206 */
207 u32 sstat;
208 u32 dstat;
209};
210
211struct dw_desc {
212 /* FIRST values the hardware uses */
213 struct dw_lli lli;
214
215 /* THEN values for driver housekeeping */
216 struct list_head desc_node;
217 struct dma_async_tx_descriptor txd;
218 size_t len;
219};
220
221static inline struct dw_desc *
222txd_to_dw_desc(struct dma_async_tx_descriptor *txd)
223{
224 return container_of(txd, struct dw_desc, txd);
225}
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 054eabffc185..c0059ca58340 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -366,7 +366,8 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
366 * 366 *
367 * Return - The number of descriptors allocated. 367 * Return - The number of descriptors allocated.
368 */ 368 */
369static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) 369static int fsl_dma_alloc_chan_resources(struct dma_chan *chan,
370 struct dma_client *client)
370{ 371{
371 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 372 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
372 LIST_HEAD(tmp_list); 373 LIST_HEAD(tmp_list);
@@ -809,8 +810,7 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
809 if (!src) { 810 if (!src) {
810 dev_err(fsl_chan->dev, 811 dev_err(fsl_chan->dev,
811 "selftest: Cannot alloc memory for test!\n"); 812 "selftest: Cannot alloc memory for test!\n");
812 err = -ENOMEM; 813 return -ENOMEM;
813 goto out;
814 } 814 }
815 815
816 dest = src + test_size; 816 dest = src + test_size;
@@ -820,7 +820,7 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
820 820
821 chan = &fsl_chan->common; 821 chan = &fsl_chan->common;
822 822
823 if (fsl_dma_alloc_chan_resources(chan) < 1) { 823 if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) {
824 dev_err(fsl_chan->dev, 824 dev_err(fsl_chan->dev,
825 "selftest: Cannot alloc resources for DMA\n"); 825 "selftest: Cannot alloc resources for DMA\n");
826 err = -ENODEV; 826 err = -ENODEV;
@@ -842,13 +842,13 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
842 if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) { 842 if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) {
843 dev_err(fsl_chan->dev, "selftest: Time out!\n"); 843 dev_err(fsl_chan->dev, "selftest: Time out!\n");
844 err = -ENODEV; 844 err = -ENODEV;
845 goto out; 845 goto free_resources;
846 } 846 }
847 847
848 /* Test free and re-alloc channel resources */ 848 /* Test free and re-alloc channel resources */
849 fsl_dma_free_chan_resources(chan); 849 fsl_dma_free_chan_resources(chan);
850 850
851 if (fsl_dma_alloc_chan_resources(chan) < 1) { 851 if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) {
852 dev_err(fsl_chan->dev, 852 dev_err(fsl_chan->dev,
853 "selftest: Cannot alloc resources for DMA\n"); 853 "selftest: Cannot alloc resources for DMA\n");
854 err = -ENODEV; 854 err = -ENODEV;
@@ -927,8 +927,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
927 if (!new_fsl_chan) { 927 if (!new_fsl_chan) {
928 dev_err(&dev->dev, "No free memory for allocating " 928 dev_err(&dev->dev, "No free memory for allocating "
929 "dma channels!\n"); 929 "dma channels!\n");
930 err = -ENOMEM; 930 return -ENOMEM;
931 goto err;
932 } 931 }
933 932
934 /* get dma channel register base */ 933 /* get dma channel register base */
@@ -936,7 +935,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
936 if (err) { 935 if (err) {
937 dev_err(&dev->dev, "Can't get %s property 'reg'\n", 936 dev_err(&dev->dev, "Can't get %s property 'reg'\n",
938 dev->node->full_name); 937 dev->node->full_name);
939 goto err; 938 goto err_no_reg;
940 } 939 }
941 940
942 new_fsl_chan->feature = *(u32 *)match->data; 941 new_fsl_chan->feature = *(u32 *)match->data;
@@ -958,7 +957,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
958 dev_err(&dev->dev, "There is no %d channel!\n", 957 dev_err(&dev->dev, "There is no %d channel!\n",
959 new_fsl_chan->id); 958 new_fsl_chan->id);
960 err = -EINVAL; 959 err = -EINVAL;
961 goto err; 960 goto err_no_chan;
962 } 961 }
963 fdev->chan[new_fsl_chan->id] = new_fsl_chan; 962 fdev->chan[new_fsl_chan->id] = new_fsl_chan;
964 tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet, 963 tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
@@ -997,23 +996,26 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
997 if (err) { 996 if (err) {
998 dev_err(&dev->dev, "DMA channel %s request_irq error " 997 dev_err(&dev->dev, "DMA channel %s request_irq error "
999 "with return %d\n", dev->node->full_name, err); 998 "with return %d\n", dev->node->full_name, err);
1000 goto err; 999 goto err_no_irq;
1001 } 1000 }
1002 } 1001 }
1003 1002
1004 err = fsl_dma_self_test(new_fsl_chan); 1003 err = fsl_dma_self_test(new_fsl_chan);
1005 if (err) 1004 if (err)
1006 goto err; 1005 goto err_self_test;
1007 1006
1008 dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, 1007 dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
1009 match->compatible, new_fsl_chan->irq); 1008 match->compatible, new_fsl_chan->irq);
1010 1009
1011 return 0; 1010 return 0;
1012err: 1011
1013 dma_halt(new_fsl_chan); 1012err_self_test:
1014 iounmap(new_fsl_chan->reg_base);
1015 free_irq(new_fsl_chan->irq, new_fsl_chan); 1013 free_irq(new_fsl_chan->irq, new_fsl_chan);
1014err_no_irq:
1016 list_del(&new_fsl_chan->common.device_node); 1015 list_del(&new_fsl_chan->common.device_node);
1016err_no_chan:
1017 iounmap(new_fsl_chan->reg_base);
1018err_no_reg:
1017 kfree(new_fsl_chan); 1019 kfree(new_fsl_chan);
1018 return err; 1020 return err;
1019} 1021}
@@ -1054,8 +1056,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
1054 fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); 1056 fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
1055 if (!fdev) { 1057 if (!fdev) {
1056 dev_err(&dev->dev, "No enough memory for 'priv'\n"); 1058 dev_err(&dev->dev, "No enough memory for 'priv'\n");
1057 err = -ENOMEM; 1059 return -ENOMEM;
1058 goto err;
1059 } 1060 }
1060 fdev->dev = &dev->dev; 1061 fdev->dev = &dev->dev;
1061 INIT_LIST_HEAD(&fdev->common.channels); 1062 INIT_LIST_HEAD(&fdev->common.channels);
@@ -1065,7 +1066,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
1065 if (err) { 1066 if (err) {
1066 dev_err(&dev->dev, "Can't get %s property 'reg'\n", 1067 dev_err(&dev->dev, "Can't get %s property 'reg'\n",
1067 dev->node->full_name); 1068 dev->node->full_name);
1068 goto err; 1069 goto err_no_reg;
1069 } 1070 }
1070 1071
1071 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " 1072 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
@@ -1103,6 +1104,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
1103 1104
1104err: 1105err:
1105 iounmap(fdev->reg_base); 1106 iounmap(fdev->reg_base);
1107err_no_reg:
1106 kfree(fdev); 1108 kfree(fdev);
1107 return err; 1109 return err;
1108} 1110}
diff --git a/drivers/dma/ioat.c b/drivers/dma/ioat.c
index 16e0fd8facfb..9b16a3af9a0a 100644
--- a/drivers/dma/ioat.c
+++ b/drivers/dma/ioat.c
@@ -47,6 +47,16 @@ static struct pci_device_id ioat_pci_tbl[] = {
47 47
48 /* I/OAT v2 platforms */ 48 /* I/OAT v2 platforms */
49 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) }, 49 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) },
50
51 /* I/OAT v3 platforms */
52 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
53 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
54 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
55 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
56 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
57 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
58 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
59 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
50 { 0, } 60 { 0, }
51}; 61};
52 62
@@ -83,6 +93,11 @@ static int ioat_setup_functionality(struct pci_dev *pdev, void __iomem *iobase)
83 if (device->dma && ioat_dca_enabled) 93 if (device->dma && ioat_dca_enabled)
84 device->dca = ioat2_dca_init(pdev, iobase); 94 device->dca = ioat2_dca_init(pdev, iobase);
85 break; 95 break;
96 case IOAT_VER_3_0:
97 device->dma = ioat_dma_probe(pdev, iobase);
98 if (device->dma && ioat_dca_enabled)
99 device->dca = ioat3_dca_init(pdev, iobase);
100 break;
86 default: 101 default:
87 err = -ENODEV; 102 err = -ENODEV;
88 break; 103 break;
diff --git a/drivers/dma/ioat_dca.c b/drivers/dma/ioat_dca.c
index 9e922760b7ff..6cf622da0286 100644
--- a/drivers/dma/ioat_dca.c
+++ b/drivers/dma/ioat_dca.c
@@ -37,12 +37,18 @@
37#include "ioatdma_registers.h" 37#include "ioatdma_registers.h"
38 38
39/* 39/*
40 * Bit 16 of a tag map entry is the "valid" bit, if it is set then bits 0:15 40 * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
41 * contain the bit number of the APIC ID to map into the DCA tag. If the valid 41 * contain the bit number of the APIC ID to map into the DCA tag. If the valid
42 * bit is not set, then the value must be 0 or 1 and defines the bit in the tag. 42 * bit is not set, then the value must be 0 or 1 and defines the bit in the tag.
43 */ 43 */
44#define DCA_TAG_MAP_VALID 0x80 44#define DCA_TAG_MAP_VALID 0x80
45 45
46#define DCA3_TAG_MAP_BIT_TO_INV 0x80
47#define DCA3_TAG_MAP_BIT_TO_SEL 0x40
48#define DCA3_TAG_MAP_LITERAL_VAL 0x1
49
50#define DCA_TAG_MAP_MASK 0xDF
51
46/* 52/*
47 * "Legacy" DCA systems do not implement the DCA register set in the 53 * "Legacy" DCA systems do not implement the DCA register set in the
48 * I/OAT device. Software needs direct support for their tag mappings. 54 * I/OAT device. Software needs direct support for their tag mappings.
@@ -95,6 +101,7 @@ struct ioat_dca_slot {
95}; 101};
96 102
97#define IOAT_DCA_MAX_REQ 6 103#define IOAT_DCA_MAX_REQ 6
104#define IOAT3_DCA_MAX_REQ 2
98 105
99struct ioat_dca_priv { 106struct ioat_dca_priv {
100 void __iomem *iobase; 107 void __iomem *iobase;
@@ -171,7 +178,9 @@ static int ioat_dca_remove_requester(struct dca_provider *dca,
171 return -ENODEV; 178 return -ENODEV;
172} 179}
173 180
174static u8 ioat_dca_get_tag(struct dca_provider *dca, int cpu) 181static u8 ioat_dca_get_tag(struct dca_provider *dca,
182 struct device *dev,
183 int cpu)
175{ 184{
176 struct ioat_dca_priv *ioatdca = dca_priv(dca); 185 struct ioat_dca_priv *ioatdca = dca_priv(dca);
177 int i, apic_id, bit, value; 186 int i, apic_id, bit, value;
@@ -193,10 +202,26 @@ static u8 ioat_dca_get_tag(struct dca_provider *dca, int cpu)
193 return tag; 202 return tag;
194} 203}
195 204
205static int ioat_dca_dev_managed(struct dca_provider *dca,
206 struct device *dev)
207{
208 struct ioat_dca_priv *ioatdca = dca_priv(dca);
209 struct pci_dev *pdev;
210 int i;
211
212 pdev = to_pci_dev(dev);
213 for (i = 0; i < ioatdca->max_requesters; i++) {
214 if (ioatdca->req_slots[i].pdev == pdev)
215 return 1;
216 }
217 return 0;
218}
219
196static struct dca_ops ioat_dca_ops = { 220static struct dca_ops ioat_dca_ops = {
197 .add_requester = ioat_dca_add_requester, 221 .add_requester = ioat_dca_add_requester,
198 .remove_requester = ioat_dca_remove_requester, 222 .remove_requester = ioat_dca_remove_requester,
199 .get_tag = ioat_dca_get_tag, 223 .get_tag = ioat_dca_get_tag,
224 .dev_managed = ioat_dca_dev_managed,
200}; 225};
201 226
202 227
@@ -207,6 +232,8 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
207 u8 *tag_map = NULL; 232 u8 *tag_map = NULL;
208 int i; 233 int i;
209 int err; 234 int err;
235 u8 version;
236 u8 max_requesters;
210 237
211 if (!system_has_dca_enabled(pdev)) 238 if (!system_has_dca_enabled(pdev))
212 return NULL; 239 return NULL;
@@ -237,15 +264,20 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
237 if (tag_map == NULL) 264 if (tag_map == NULL)
238 return NULL; 265 return NULL;
239 266
267 version = readb(iobase + IOAT_VER_OFFSET);
268 if (version == IOAT_VER_3_0)
269 max_requesters = IOAT3_DCA_MAX_REQ;
270 else
271 max_requesters = IOAT_DCA_MAX_REQ;
272
240 dca = alloc_dca_provider(&ioat_dca_ops, 273 dca = alloc_dca_provider(&ioat_dca_ops,
241 sizeof(*ioatdca) + 274 sizeof(*ioatdca) +
242 (sizeof(struct ioat_dca_slot) * IOAT_DCA_MAX_REQ)); 275 (sizeof(struct ioat_dca_slot) * max_requesters));
243 if (!dca) 276 if (!dca)
244 return NULL; 277 return NULL;
245 278
246 ioatdca = dca_priv(dca); 279 ioatdca = dca_priv(dca);
247 ioatdca->max_requesters = IOAT_DCA_MAX_REQ; 280 ioatdca->max_requesters = max_requesters;
248
249 ioatdca->dca_base = iobase + 0x54; 281 ioatdca->dca_base = iobase + 0x54;
250 282
251 /* copy over the APIC ID to DCA tag mapping */ 283 /* copy over the APIC ID to DCA tag mapping */
@@ -323,11 +355,13 @@ static int ioat2_dca_remove_requester(struct dca_provider *dca,
323 return -ENODEV; 355 return -ENODEV;
324} 356}
325 357
326static u8 ioat2_dca_get_tag(struct dca_provider *dca, int cpu) 358static u8 ioat2_dca_get_tag(struct dca_provider *dca,
359 struct device *dev,
360 int cpu)
327{ 361{
328 u8 tag; 362 u8 tag;
329 363
330 tag = ioat_dca_get_tag(dca, cpu); 364 tag = ioat_dca_get_tag(dca, dev, cpu);
331 tag = (~tag) & 0x1F; 365 tag = (~tag) & 0x1F;
332 return tag; 366 return tag;
333} 367}
@@ -336,6 +370,7 @@ static struct dca_ops ioat2_dca_ops = {
336 .add_requester = ioat2_dca_add_requester, 370 .add_requester = ioat2_dca_add_requester,
337 .remove_requester = ioat2_dca_remove_requester, 371 .remove_requester = ioat2_dca_remove_requester,
338 .get_tag = ioat2_dca_get_tag, 372 .get_tag = ioat2_dca_get_tag,
373 .dev_managed = ioat_dca_dev_managed,
339}; 374};
340 375
341static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset) 376static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
@@ -425,3 +460,198 @@ struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
425 460
426 return dca; 461 return dca;
427} 462}
463
464static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
465{
466 struct ioat_dca_priv *ioatdca = dca_priv(dca);
467 struct pci_dev *pdev;
468 int i;
469 u16 id;
470 u16 global_req_table;
471
472 /* This implementation only supports PCI-Express */
473 if (dev->bus != &pci_bus_type)
474 return -ENODEV;
475 pdev = to_pci_dev(dev);
476 id = dcaid_from_pcidev(pdev);
477
478 if (ioatdca->requester_count == ioatdca->max_requesters)
479 return -ENODEV;
480
481 for (i = 0; i < ioatdca->max_requesters; i++) {
482 if (ioatdca->req_slots[i].pdev == NULL) {
483 /* found an empty slot */
484 ioatdca->requester_count++;
485 ioatdca->req_slots[i].pdev = pdev;
486 ioatdca->req_slots[i].rid = id;
487 global_req_table =
488 readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
489 writel(id | IOAT_DCA_GREQID_VALID,
490 ioatdca->iobase + global_req_table + (i * 4));
491 return i;
492 }
493 }
494 /* Error, ioatdma->requester_count is out of whack */
495 return -EFAULT;
496}
497
498static int ioat3_dca_remove_requester(struct dca_provider *dca,
499 struct device *dev)
500{
501 struct ioat_dca_priv *ioatdca = dca_priv(dca);
502 struct pci_dev *pdev;
503 int i;
504 u16 global_req_table;
505
506 /* This implementation only supports PCI-Express */
507 if (dev->bus != &pci_bus_type)
508 return -ENODEV;
509 pdev = to_pci_dev(dev);
510
511 for (i = 0; i < ioatdca->max_requesters; i++) {
512 if (ioatdca->req_slots[i].pdev == pdev) {
513 global_req_table =
514 readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
515 writel(0, ioatdca->iobase + global_req_table + (i * 4));
516 ioatdca->req_slots[i].pdev = NULL;
517 ioatdca->req_slots[i].rid = 0;
518 ioatdca->requester_count--;
519 return i;
520 }
521 }
522 return -ENODEV;
523}
524
525static u8 ioat3_dca_get_tag(struct dca_provider *dca,
526 struct device *dev,
527 int cpu)
528{
529 u8 tag;
530
531 struct ioat_dca_priv *ioatdca = dca_priv(dca);
532 int i, apic_id, bit, value;
533 u8 entry;
534
535 tag = 0;
536 apic_id = cpu_physical_id(cpu);
537
538 for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
539 entry = ioatdca->tag_map[i];
540 if (entry & DCA3_TAG_MAP_BIT_TO_SEL) {
541 bit = entry &
542 ~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV);
543 value = (apic_id & (1 << bit)) ? 1 : 0;
544 } else if (entry & DCA3_TAG_MAP_BIT_TO_INV) {
545 bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV;
546 value = (apic_id & (1 << bit)) ? 0 : 1;
547 } else {
548 value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0;
549 }
550 tag |= (value << i);
551 }
552
553 return tag;
554}
555
556static struct dca_ops ioat3_dca_ops = {
557 .add_requester = ioat3_dca_add_requester,
558 .remove_requester = ioat3_dca_remove_requester,
559 .get_tag = ioat3_dca_get_tag,
560 .dev_managed = ioat_dca_dev_managed,
561};
562
563static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
564{
565 int slots = 0;
566 u32 req;
567 u16 global_req_table;
568
569 global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET);
570 if (global_req_table == 0)
571 return 0;
572
573 do {
574 req = readl(iobase + global_req_table + (slots * sizeof(u32)));
575 slots++;
576 } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
577
578 return slots;
579}
580
581struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
582{
583 struct dca_provider *dca;
584 struct ioat_dca_priv *ioatdca;
585 int slots;
586 int i;
587 int err;
588 u16 dca_offset;
589 u16 csi_fsb_control;
590 u16 pcie_control;
591 u8 bit;
592
593 union {
594 u64 full;
595 struct {
596 u32 low;
597 u32 high;
598 };
599 } tag_map;
600
601 if (!system_has_dca_enabled(pdev))
602 return NULL;
603
604 dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
605 if (dca_offset == 0)
606 return NULL;
607
608 slots = ioat3_dca_count_dca_slots(iobase, dca_offset);
609 if (slots == 0)
610 return NULL;
611
612 dca = alloc_dca_provider(&ioat3_dca_ops,
613 sizeof(*ioatdca)
614 + (sizeof(struct ioat_dca_slot) * slots));
615 if (!dca)
616 return NULL;
617
618 ioatdca = dca_priv(dca);
619 ioatdca->iobase = iobase;
620 ioatdca->dca_base = iobase + dca_offset;
621 ioatdca->max_requesters = slots;
622
623 /* some bios might not know to turn these on */
624 csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
625 if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) {
626 csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH;
627 writew(csi_fsb_control,
628 ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
629 }
630 pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
631 if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) {
632 pcie_control |= IOAT3_PCI_CONTROL_MEMWR;
633 writew(pcie_control,
634 ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
635 }
636
637
638 /* TODO version, compatibility and configuration checks */
639
640 /* copy out the APIC to DCA tag map */
641 tag_map.low =
642 readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW);
643 tag_map.high =
644 readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH);
645 for (i = 0; i < 8; i++) {
646 bit = tag_map.full >> (8 * i);
647 ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK;
648 }
649
650 err = register_dca_provider(dca, &pdev->dev);
651 if (err) {
652 free_dca_provider(dca);
653 return NULL;
654 }
655
656 return dca;
657}
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 318e8a22d814..a52156e56886 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -32,6 +32,7 @@
32#include <linux/dmaengine.h> 32#include <linux/dmaengine.h>
33#include <linux/delay.h> 33#include <linux/delay.h>
34#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35#include <linux/workqueue.h>
35#include "ioatdma.h" 36#include "ioatdma.h"
36#include "ioatdma_registers.h" 37#include "ioatdma_registers.h"
37#include "ioatdma_hw.h" 38#include "ioatdma_hw.h"
@@ -41,11 +42,23 @@
41#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) 42#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
42#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) 43#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
43 44
45#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
44static int ioat_pending_level = 4; 46static int ioat_pending_level = 4;
45module_param(ioat_pending_level, int, 0644); 47module_param(ioat_pending_level, int, 0644);
46MODULE_PARM_DESC(ioat_pending_level, 48MODULE_PARM_DESC(ioat_pending_level,
47 "high-water mark for pushing ioat descriptors (default: 4)"); 49 "high-water mark for pushing ioat descriptors (default: 4)");
48 50
51#define RESET_DELAY msecs_to_jiffies(100)
52#define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000))
53static void ioat_dma_chan_reset_part2(struct work_struct *work);
54static void ioat_dma_chan_watchdog(struct work_struct *work);
55
56/*
57 * workaround for IOAT ver.3.0 null descriptor issue
58 * (channel returns error when size is 0)
59 */
60#define NULL_DESC_BUFFER_SIZE 1
61
49/* internal functions */ 62/* internal functions */
50static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); 63static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
51static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); 64static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
@@ -122,6 +135,38 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
122 int i; 135 int i;
123 struct ioat_dma_chan *ioat_chan; 136 struct ioat_dma_chan *ioat_chan;
124 137
138 /*
139 * IOAT ver.3 workarounds
140 */
141 if (device->version == IOAT_VER_3_0) {
142 u32 chan_err_mask;
143 u16 dev_id;
144 u32 dmauncerrsts;
145
146 /*
147 * Write CHANERRMSK_INT with 3E07h to mask out the errors
148 * that can cause stability issues for IOAT ver.3
149 */
150 chan_err_mask = 0x3E07;
151 pci_write_config_dword(device->pdev,
152 IOAT_PCI_CHANERRMASK_INT_OFFSET,
153 chan_err_mask);
154
155 /*
156 * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
157 * (workaround for spurious config parity error after restart)
158 */
159 pci_read_config_word(device->pdev,
160 IOAT_PCI_DEVICE_ID_OFFSET,
161 &dev_id);
162 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
163 dmauncerrsts = 0x10;
164 pci_write_config_dword(device->pdev,
165 IOAT_PCI_DMAUNCERRSTS_OFFSET,
166 dmauncerrsts);
167 }
168 }
169
125 device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); 170 device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
126 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); 171 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
127 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); 172 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
@@ -137,6 +182,7 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
137 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1)); 182 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
138 ioat_chan->xfercap = xfercap; 183 ioat_chan->xfercap = xfercap;
139 ioat_chan->desccount = 0; 184 ioat_chan->desccount = 0;
185 INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2);
140 if (ioat_chan->device->version != IOAT_VER_1_2) { 186 if (ioat_chan->device->version != IOAT_VER_1_2) {
141 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE 187 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
142 | IOAT_DMA_DCA_ANY_CPU, 188 | IOAT_DMA_DCA_ANY_CPU,
@@ -175,7 +221,7 @@ static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
175{ 221{
176 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 222 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
177 223
178 if (ioat_chan->pending != 0) { 224 if (ioat_chan->pending > 0) {
179 spin_lock_bh(&ioat_chan->desc_lock); 225 spin_lock_bh(&ioat_chan->desc_lock);
180 __ioat1_dma_memcpy_issue_pending(ioat_chan); 226 __ioat1_dma_memcpy_issue_pending(ioat_chan);
181 spin_unlock_bh(&ioat_chan->desc_lock); 227 spin_unlock_bh(&ioat_chan->desc_lock);
@@ -194,13 +240,228 @@ static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
194{ 240{
195 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 241 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
196 242
197 if (ioat_chan->pending != 0) { 243 if (ioat_chan->pending > 0) {
198 spin_lock_bh(&ioat_chan->desc_lock); 244 spin_lock_bh(&ioat_chan->desc_lock);
199 __ioat2_dma_memcpy_issue_pending(ioat_chan); 245 __ioat2_dma_memcpy_issue_pending(ioat_chan);
200 spin_unlock_bh(&ioat_chan->desc_lock); 246 spin_unlock_bh(&ioat_chan->desc_lock);
201 } 247 }
202} 248}
203 249
250
251/**
252 * ioat_dma_chan_reset_part2 - reinit the channel after a reset
253 */
254static void ioat_dma_chan_reset_part2(struct work_struct *work)
255{
256 struct ioat_dma_chan *ioat_chan =
257 container_of(work, struct ioat_dma_chan, work.work);
258 struct ioat_desc_sw *desc;
259
260 spin_lock_bh(&ioat_chan->cleanup_lock);
261 spin_lock_bh(&ioat_chan->desc_lock);
262
263 ioat_chan->completion_virt->low = 0;
264 ioat_chan->completion_virt->high = 0;
265 ioat_chan->pending = 0;
266
267 /*
268 * count the descriptors waiting, and be sure to do it
269 * right for both the CB1 line and the CB2 ring
270 */
271 ioat_chan->dmacount = 0;
272 if (ioat_chan->used_desc.prev) {
273 desc = to_ioat_desc(ioat_chan->used_desc.prev);
274 do {
275 ioat_chan->dmacount++;
276 desc = to_ioat_desc(desc->node.next);
277 } while (&desc->node != ioat_chan->used_desc.next);
278 }
279
280 /*
281 * write the new starting descriptor address
282 * this puts channel engine into ARMED state
283 */
284 desc = to_ioat_desc(ioat_chan->used_desc.prev);
285 switch (ioat_chan->device->version) {
286 case IOAT_VER_1_2:
287 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
288 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
289 writel(((u64) desc->async_tx.phys) >> 32,
290 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
291
292 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
293 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
294 break;
295 case IOAT_VER_2_0:
296 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
297 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
298 writel(((u64) desc->async_tx.phys) >> 32,
299 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
300
301 /* tell the engine to go with what's left to be done */
302 writew(ioat_chan->dmacount,
303 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
304
305 break;
306 }
307 dev_err(&ioat_chan->device->pdev->dev,
308 "chan%d reset - %d descs waiting, %d total desc\n",
309 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
310
311 spin_unlock_bh(&ioat_chan->desc_lock);
312 spin_unlock_bh(&ioat_chan->cleanup_lock);
313}
314
315/**
316 * ioat_dma_reset_channel - restart a channel
317 * @ioat_chan: IOAT DMA channel handle
318 */
319static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan)
320{
321 u32 chansts, chanerr;
322
323 if (!ioat_chan->used_desc.prev)
324 return;
325
326 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
327 chansts = (ioat_chan->completion_virt->low
328 & IOAT_CHANSTS_DMA_TRANSFER_STATUS);
329 if (chanerr) {
330 dev_err(&ioat_chan->device->pdev->dev,
331 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
332 chan_num(ioat_chan), chansts, chanerr);
333 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
334 }
335
336 /*
337 * whack it upside the head with a reset
338 * and wait for things to settle out.
339 * force the pending count to a really big negative
340 * to make sure no one forces an issue_pending
341 * while we're waiting.
342 */
343
344 spin_lock_bh(&ioat_chan->desc_lock);
345 ioat_chan->pending = INT_MIN;
346 writeb(IOAT_CHANCMD_RESET,
347 ioat_chan->reg_base
348 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
349 spin_unlock_bh(&ioat_chan->desc_lock);
350
351 /* schedule the 2nd half instead of sleeping a long time */
352 schedule_delayed_work(&ioat_chan->work, RESET_DELAY);
353}
354
355/**
356 * ioat_dma_chan_watchdog - watch for stuck channels
357 */
358static void ioat_dma_chan_watchdog(struct work_struct *work)
359{
360 struct ioatdma_device *device =
361 container_of(work, struct ioatdma_device, work.work);
362 struct ioat_dma_chan *ioat_chan;
363 int i;
364
365 union {
366 u64 full;
367 struct {
368 u32 low;
369 u32 high;
370 };
371 } completion_hw;
372 unsigned long compl_desc_addr_hw;
373
374 for (i = 0; i < device->common.chancnt; i++) {
375 ioat_chan = ioat_lookup_chan_by_index(device, i);
376
377 if (ioat_chan->device->version == IOAT_VER_1_2
378 /* have we started processing anything yet */
379 && ioat_chan->last_completion
380 /* have we completed any since last watchdog cycle? */
381 && (ioat_chan->last_completion ==
382 ioat_chan->watchdog_completion)
383 /* has TCP stuck on one cookie since last watchdog? */
384 && (ioat_chan->watchdog_tcp_cookie ==
385 ioat_chan->watchdog_last_tcp_cookie)
386 && (ioat_chan->watchdog_tcp_cookie !=
387 ioat_chan->completed_cookie)
388 /* is there something in the chain to be processed? */
389 /* CB1 chain always has at least the last one processed */
390 && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next)
391 && ioat_chan->pending == 0) {
392
393 /*
394 * check CHANSTS register for completed
395 * descriptor address.
396 * if it is different than completion writeback,
397 * it is not zero
398 * and it has changed since the last watchdog
399 * we can assume that channel
400 * is still working correctly
401 * and the problem is in completion writeback.
402 * update completion writeback
403 * with actual CHANSTS value
404 * else
405 * try resetting the channel
406 */
407
408 completion_hw.low = readl(ioat_chan->reg_base +
409 IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version));
410 completion_hw.high = readl(ioat_chan->reg_base +
411 IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version));
412#if (BITS_PER_LONG == 64)
413 compl_desc_addr_hw =
414 completion_hw.full
415 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
416#else
417 compl_desc_addr_hw =
418 completion_hw.low & IOAT_LOW_COMPLETION_MASK;
419#endif
420
421 if ((compl_desc_addr_hw != 0)
422 && (compl_desc_addr_hw != ioat_chan->watchdog_completion)
423 && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) {
424 ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
425 ioat_chan->completion_virt->low = completion_hw.low;
426 ioat_chan->completion_virt->high = completion_hw.high;
427 } else {
428 ioat_dma_reset_channel(ioat_chan);
429 ioat_chan->watchdog_completion = 0;
430 ioat_chan->last_compl_desc_addr_hw = 0;
431 }
432
433 /*
434 * for version 2.0 if there are descriptors yet to be processed
435 * and the last completed hasn't changed since the last watchdog
436 * if they haven't hit the pending level
437 * issue the pending to push them through
438 * else
439 * try resetting the channel
440 */
441 } else if (ioat_chan->device->version == IOAT_VER_2_0
442 && ioat_chan->used_desc.prev
443 && ioat_chan->last_completion
444 && ioat_chan->last_completion == ioat_chan->watchdog_completion) {
445
446 if (ioat_chan->pending < ioat_pending_level)
447 ioat2_dma_memcpy_issue_pending(&ioat_chan->common);
448 else {
449 ioat_dma_reset_channel(ioat_chan);
450 ioat_chan->watchdog_completion = 0;
451 }
452 } else {
453 ioat_chan->last_compl_desc_addr_hw = 0;
454 ioat_chan->watchdog_completion
455 = ioat_chan->last_completion;
456 }
457
458 ioat_chan->watchdog_last_tcp_cookie =
459 ioat_chan->watchdog_tcp_cookie;
460 }
461
462 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
463}
464
204static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) 465static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
205{ 466{
206 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); 467 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
@@ -250,6 +511,13 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
250 prev = new; 511 prev = new;
251 } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan))); 512 } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
252 513
514 if (!new) {
515 dev_err(&ioat_chan->device->pdev->dev,
516 "tx submit failed\n");
517 spin_unlock_bh(&ioat_chan->desc_lock);
518 return -ENOMEM;
519 }
520
253 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; 521 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
254 if (new->async_tx.callback) { 522 if (new->async_tx.callback) {
255 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; 523 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
@@ -335,7 +603,14 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
335 desc_count++; 603 desc_count++;
336 } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan))); 604 } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
337 605
338 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; 606 if (!new) {
607 dev_err(&ioat_chan->device->pdev->dev,
608 "tx submit failed\n");
609 spin_unlock_bh(&ioat_chan->desc_lock);
610 return -ENOMEM;
611 }
612
613 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
339 if (new->async_tx.callback) { 614 if (new->async_tx.callback) {
340 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; 615 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
341 if (first != new) { 616 if (first != new) {
@@ -406,6 +681,7 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
406 desc_sw->async_tx.tx_submit = ioat1_tx_submit; 681 desc_sw->async_tx.tx_submit = ioat1_tx_submit;
407 break; 682 break;
408 case IOAT_VER_2_0: 683 case IOAT_VER_2_0:
684 case IOAT_VER_3_0:
409 desc_sw->async_tx.tx_submit = ioat2_tx_submit; 685 desc_sw->async_tx.tx_submit = ioat2_tx_submit;
410 break; 686 break;
411 } 687 }
@@ -452,7 +728,8 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
452 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors 728 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
453 * @chan: the channel to be filled out 729 * @chan: the channel to be filled out
454 */ 730 */
455static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) 731static int ioat_dma_alloc_chan_resources(struct dma_chan *chan,
732 struct dma_client *client)
456{ 733{
457 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 734 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
458 struct ioat_desc_sw *desc; 735 struct ioat_desc_sw *desc;
@@ -555,6 +832,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
555 } 832 }
556 break; 833 break;
557 case IOAT_VER_2_0: 834 case IOAT_VER_2_0:
835 case IOAT_VER_3_0:
558 list_for_each_entry_safe(desc, _desc, 836 list_for_each_entry_safe(desc, _desc,
559 ioat_chan->free_desc.next, node) { 837 ioat_chan->free_desc.next, node) {
560 list_del(&desc->node); 838 list_del(&desc->node);
@@ -585,6 +863,10 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
585 ioat_chan->last_completion = ioat_chan->completion_addr = 0; 863 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
586 ioat_chan->pending = 0; 864 ioat_chan->pending = 0;
587 ioat_chan->dmacount = 0; 865 ioat_chan->dmacount = 0;
866 ioat_chan->watchdog_completion = 0;
867 ioat_chan->last_compl_desc_addr_hw = 0;
868 ioat_chan->watchdog_tcp_cookie =
869 ioat_chan->watchdog_last_tcp_cookie = 0;
588} 870}
589 871
590/** 872/**
@@ -640,7 +922,8 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
640 922
641 /* set up the noop descriptor */ 923 /* set up the noop descriptor */
642 noop_desc = to_ioat_desc(ioat_chan->used_desc.next); 924 noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
643 noop_desc->hw->size = 0; 925 /* set size to non-zero value (channel returns error when size is 0) */
926 noop_desc->hw->size = NULL_DESC_BUFFER_SIZE;
644 noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL; 927 noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
645 noop_desc->hw->src_addr = 0; 928 noop_desc->hw->src_addr = 0;
646 noop_desc->hw->dst_addr = 0; 929 noop_desc->hw->dst_addr = 0;
@@ -690,6 +973,7 @@ static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
690 return ioat1_dma_get_next_descriptor(ioat_chan); 973 return ioat1_dma_get_next_descriptor(ioat_chan);
691 break; 974 break;
692 case IOAT_VER_2_0: 975 case IOAT_VER_2_0:
976 case IOAT_VER_3_0:
693 return ioat2_dma_get_next_descriptor(ioat_chan); 977 return ioat2_dma_get_next_descriptor(ioat_chan);
694 break; 978 break;
695 } 979 }
@@ -716,8 +1000,12 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
716 new->src = dma_src; 1000 new->src = dma_src;
717 new->async_tx.flags = flags; 1001 new->async_tx.flags = flags;
718 return &new->async_tx; 1002 return &new->async_tx;
719 } else 1003 } else {
1004 dev_err(&ioat_chan->device->pdev->dev,
1005 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
1006 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
720 return NULL; 1007 return NULL;
1008 }
721} 1009}
722 1010
723static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( 1011static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
@@ -744,8 +1032,13 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
744 new->src = dma_src; 1032 new->src = dma_src;
745 new->async_tx.flags = flags; 1033 new->async_tx.flags = flags;
746 return &new->async_tx; 1034 return &new->async_tx;
747 } else 1035 } else {
1036 spin_unlock_bh(&ioat_chan->desc_lock);
1037 dev_err(&ioat_chan->device->pdev->dev,
1038 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
1039 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
748 return NULL; 1040 return NULL;
1041 }
749} 1042}
750 1043
751static void ioat_dma_cleanup_tasklet(unsigned long data) 1044static void ioat_dma_cleanup_tasklet(unsigned long data)
@@ -756,6 +1049,27 @@ static void ioat_dma_cleanup_tasklet(unsigned long data)
756 chan->reg_base + IOAT_CHANCTRL_OFFSET); 1049 chan->reg_base + IOAT_CHANCTRL_OFFSET);
757} 1050}
758 1051
1052static void
1053ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
1054{
1055 /*
1056 * yes we are unmapping both _page and _single
1057 * alloc'd regions with unmap_page. Is this
1058 * *really* that bad?
1059 */
1060 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP))
1061 pci_unmap_page(ioat_chan->device->pdev,
1062 pci_unmap_addr(desc, dst),
1063 pci_unmap_len(desc, len),
1064 PCI_DMA_FROMDEVICE);
1065
1066 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP))
1067 pci_unmap_page(ioat_chan->device->pdev,
1068 pci_unmap_addr(desc, src),
1069 pci_unmap_len(desc, len),
1070 PCI_DMA_TODEVICE);
1071}
1072
759/** 1073/**
760 * ioat_dma_memcpy_cleanup - cleanup up finished descriptors 1074 * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
761 * @chan: ioat channel to be cleaned up 1075 * @chan: ioat channel to be cleaned up
@@ -799,11 +1113,27 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
799 1113
800 if (phys_complete == ioat_chan->last_completion) { 1114 if (phys_complete == ioat_chan->last_completion) {
801 spin_unlock_bh(&ioat_chan->cleanup_lock); 1115 spin_unlock_bh(&ioat_chan->cleanup_lock);
1116 /*
1117 * perhaps we're stuck so hard that the watchdog can't go off?
1118 * try to catch it after 2 seconds
1119 */
1120 if (ioat_chan->device->version != IOAT_VER_3_0) {
1121 if (time_after(jiffies,
1122 ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
1123 ioat_dma_chan_watchdog(&(ioat_chan->device->work.work));
1124 ioat_chan->last_completion_time = jiffies;
1125 }
1126 }
802 return; 1127 return;
803 } 1128 }
1129 ioat_chan->last_completion_time = jiffies;
804 1130
805 cookie = 0; 1131 cookie = 0;
806 spin_lock_bh(&ioat_chan->desc_lock); 1132 if (!spin_trylock_bh(&ioat_chan->desc_lock)) {
1133 spin_unlock_bh(&ioat_chan->cleanup_lock);
1134 return;
1135 }
1136
807 switch (ioat_chan->device->version) { 1137 switch (ioat_chan->device->version) {
808 case IOAT_VER_1_2: 1138 case IOAT_VER_1_2:
809 list_for_each_entry_safe(desc, _desc, 1139 list_for_each_entry_safe(desc, _desc,
@@ -816,21 +1146,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
816 */ 1146 */
817 if (desc->async_tx.cookie) { 1147 if (desc->async_tx.cookie) {
818 cookie = desc->async_tx.cookie; 1148 cookie = desc->async_tx.cookie;
819 1149 ioat_dma_unmap(ioat_chan, desc);
820 /*
821 * yes we are unmapping both _page and _single
822 * alloc'd regions with unmap_page. Is this
823 * *really* that bad?
824 */
825 pci_unmap_page(ioat_chan->device->pdev,
826 pci_unmap_addr(desc, dst),
827 pci_unmap_len(desc, len),
828 PCI_DMA_FROMDEVICE);
829 pci_unmap_page(ioat_chan->device->pdev,
830 pci_unmap_addr(desc, src),
831 pci_unmap_len(desc, len),
832 PCI_DMA_TODEVICE);
833
834 if (desc->async_tx.callback) { 1150 if (desc->async_tx.callback) {
835 desc->async_tx.callback(desc->async_tx.callback_param); 1151 desc->async_tx.callback(desc->async_tx.callback_param);
836 desc->async_tx.callback = NULL; 1152 desc->async_tx.callback = NULL;
@@ -862,6 +1178,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
862 } 1178 }
863 break; 1179 break;
864 case IOAT_VER_2_0: 1180 case IOAT_VER_2_0:
1181 case IOAT_VER_3_0:
865 /* has some other thread has already cleaned up? */ 1182 /* has some other thread has already cleaned up? */
866 if (ioat_chan->used_desc.prev == NULL) 1183 if (ioat_chan->used_desc.prev == NULL)
867 break; 1184 break;
@@ -889,16 +1206,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
889 if (desc->async_tx.cookie) { 1206 if (desc->async_tx.cookie) {
890 cookie = desc->async_tx.cookie; 1207 cookie = desc->async_tx.cookie;
891 desc->async_tx.cookie = 0; 1208 desc->async_tx.cookie = 0;
892 1209 ioat_dma_unmap(ioat_chan, desc);
893 pci_unmap_page(ioat_chan->device->pdev,
894 pci_unmap_addr(desc, dst),
895 pci_unmap_len(desc, len),
896 PCI_DMA_FROMDEVICE);
897 pci_unmap_page(ioat_chan->device->pdev,
898 pci_unmap_addr(desc, src),
899 pci_unmap_len(desc, len),
900 PCI_DMA_TODEVICE);
901
902 if (desc->async_tx.callback) { 1210 if (desc->async_tx.callback) {
903 desc->async_tx.callback(desc->async_tx.callback_param); 1211 desc->async_tx.callback(desc->async_tx.callback_param);
904 desc->async_tx.callback = NULL; 1212 desc->async_tx.callback = NULL;
@@ -943,6 +1251,7 @@ static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
943 1251
944 last_used = chan->cookie; 1252 last_used = chan->cookie;
945 last_complete = ioat_chan->completed_cookie; 1253 last_complete = ioat_chan->completed_cookie;
1254 ioat_chan->watchdog_tcp_cookie = cookie;
946 1255
947 if (done) 1256 if (done)
948 *done = last_complete; 1257 *done = last_complete;
@@ -973,10 +1282,19 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
973 spin_lock_bh(&ioat_chan->desc_lock); 1282 spin_lock_bh(&ioat_chan->desc_lock);
974 1283
975 desc = ioat_dma_get_next_descriptor(ioat_chan); 1284 desc = ioat_dma_get_next_descriptor(ioat_chan);
1285
1286 if (!desc) {
1287 dev_err(&ioat_chan->device->pdev->dev,
1288 "Unable to start null desc - get next desc failed\n");
1289 spin_unlock_bh(&ioat_chan->desc_lock);
1290 return;
1291 }
1292
976 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL 1293 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
977 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN 1294 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
978 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS; 1295 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
979 desc->hw->size = 0; 1296 /* set size to non-zero value (channel returns error when size is 0) */
1297 desc->hw->size = NULL_DESC_BUFFER_SIZE;
980 desc->hw->src_addr = 0; 1298 desc->hw->src_addr = 0;
981 desc->hw->dst_addr = 0; 1299 desc->hw->dst_addr = 0;
982 async_tx_ack(&desc->async_tx); 1300 async_tx_ack(&desc->async_tx);
@@ -994,6 +1312,7 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
994 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); 1312 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
995 break; 1313 break;
996 case IOAT_VER_2_0: 1314 case IOAT_VER_2_0:
1315 case IOAT_VER_3_0:
997 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, 1316 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
998 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); 1317 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
999 writel(((u64) desc->async_tx.phys) >> 32, 1318 writel(((u64) desc->async_tx.phys) >> 32,
@@ -1049,7 +1368,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
1049 dma_chan = container_of(device->common.channels.next, 1368 dma_chan = container_of(device->common.channels.next,
1050 struct dma_chan, 1369 struct dma_chan,
1051 device_node); 1370 device_node);
1052 if (device->common.device_alloc_chan_resources(dma_chan) < 1) { 1371 if (device->common.device_alloc_chan_resources(dma_chan, NULL) < 1) {
1053 dev_err(&device->pdev->dev, 1372 dev_err(&device->pdev->dev,
1054 "selftest cannot allocate chan resource\n"); 1373 "selftest cannot allocate chan resource\n");
1055 err = -ENODEV; 1374 err = -ENODEV;
@@ -1312,6 +1631,7 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1312 ioat1_dma_memcpy_issue_pending; 1631 ioat1_dma_memcpy_issue_pending;
1313 break; 1632 break;
1314 case IOAT_VER_2_0: 1633 case IOAT_VER_2_0:
1634 case IOAT_VER_3_0:
1315 device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy; 1635 device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
1316 device->common.device_issue_pending = 1636 device->common.device_issue_pending =
1317 ioat2_dma_memcpy_issue_pending; 1637 ioat2_dma_memcpy_issue_pending;
@@ -1331,8 +1651,16 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1331 if (err) 1651 if (err)
1332 goto err_self_test; 1652 goto err_self_test;
1333 1653
1654 ioat_set_tcp_copy_break(device);
1655
1334 dma_async_device_register(&device->common); 1656 dma_async_device_register(&device->common);
1335 1657
1658 if (device->version != IOAT_VER_3_0) {
1659 INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
1660 schedule_delayed_work(&device->work,
1661 WATCHDOG_DELAY);
1662 }
1663
1336 return device; 1664 return device;
1337 1665
1338err_self_test: 1666err_self_test:
@@ -1365,6 +1693,10 @@ void ioat_dma_remove(struct ioatdma_device *device)
1365 pci_release_regions(device->pdev); 1693 pci_release_regions(device->pdev);
1366 pci_disable_device(device->pdev); 1694 pci_disable_device(device->pdev);
1367 1695
1696 if (device->version != IOAT_VER_3_0) {
1697 cancel_delayed_work(&device->work);
1698 }
1699
1368 list_for_each_entry_safe(chan, _chan, 1700 list_for_each_entry_safe(chan, _chan,
1369 &device->common.channels, device_node) { 1701 &device->common.channels, device_node) {
1370 ioat_chan = to_ioat_chan(chan); 1702 ioat_chan = to_ioat_chan(chan);
diff --git a/drivers/dma/ioatdma.h b/drivers/dma/ioatdma.h
index f2c7fedbf009..a3306d0e1372 100644
--- a/drivers/dma/ioatdma.h
+++ b/drivers/dma/ioatdma.h
@@ -27,8 +27,9 @@
27#include <linux/dmapool.h> 27#include <linux/dmapool.h>
28#include <linux/cache.h> 28#include <linux/cache.h>
29#include <linux/pci_ids.h> 29#include <linux/pci_ids.h>
30#include <net/tcp.h>
30 31
31#define IOAT_DMA_VERSION "2.04" 32#define IOAT_DMA_VERSION "3.30"
32 33
33enum ioat_interrupt { 34enum ioat_interrupt {
34 none = 0, 35 none = 0,
@@ -40,6 +41,7 @@ enum ioat_interrupt {
40 41
41#define IOAT_LOW_COMPLETION_MASK 0xffffffc0 42#define IOAT_LOW_COMPLETION_MASK 0xffffffc0
42#define IOAT_DMA_DCA_ANY_CPU ~0 43#define IOAT_DMA_DCA_ANY_CPU ~0
44#define IOAT_WATCHDOG_PERIOD (2 * HZ)
43 45
44 46
45/** 47/**
@@ -62,6 +64,7 @@ struct ioatdma_device {
62 struct dma_device common; 64 struct dma_device common;
63 u8 version; 65 u8 version;
64 enum ioat_interrupt irq_mode; 66 enum ioat_interrupt irq_mode;
67 struct delayed_work work;
65 struct msix_entry msix_entries[4]; 68 struct msix_entry msix_entries[4];
66 struct ioat_dma_chan *idx[4]; 69 struct ioat_dma_chan *idx[4];
67}; 70};
@@ -75,6 +78,7 @@ struct ioat_dma_chan {
75 78
76 dma_cookie_t completed_cookie; 79 dma_cookie_t completed_cookie;
77 unsigned long last_completion; 80 unsigned long last_completion;
81 unsigned long last_completion_time;
78 82
79 size_t xfercap; /* XFERCAP register value expanded out */ 83 size_t xfercap; /* XFERCAP register value expanded out */
80 84
@@ -82,6 +86,10 @@ struct ioat_dma_chan {
82 spinlock_t desc_lock; 86 spinlock_t desc_lock;
83 struct list_head free_desc; 87 struct list_head free_desc;
84 struct list_head used_desc; 88 struct list_head used_desc;
89 unsigned long watchdog_completion;
90 int watchdog_tcp_cookie;
91 u32 watchdog_last_tcp_cookie;
92 struct delayed_work work;
85 93
86 int pending; 94 int pending;
87 int dmacount; 95 int dmacount;
@@ -98,6 +106,7 @@ struct ioat_dma_chan {
98 u32 high; 106 u32 high;
99 }; 107 };
100 } *completion_virt; 108 } *completion_virt;
109 unsigned long last_compl_desc_addr_hw;
101 struct tasklet_struct cleanup_task; 110 struct tasklet_struct cleanup_task;
102}; 111};
103 112
@@ -121,17 +130,34 @@ struct ioat_desc_sw {
121 struct dma_async_tx_descriptor async_tx; 130 struct dma_async_tx_descriptor async_tx;
122}; 131};
123 132
133static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev)
134{
135 #ifdef CONFIG_NET_DMA
136 switch (dev->version) {
137 case IOAT_VER_1_2:
138 case IOAT_VER_3_0:
139 sysctl_tcp_dma_copybreak = 4096;
140 break;
141 case IOAT_VER_2_0:
142 sysctl_tcp_dma_copybreak = 2048;
143 break;
144 }
145 #endif
146}
147
124#if defined(CONFIG_INTEL_IOATDMA) || defined(CONFIG_INTEL_IOATDMA_MODULE) 148#if defined(CONFIG_INTEL_IOATDMA) || defined(CONFIG_INTEL_IOATDMA_MODULE)
125struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, 149struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
126 void __iomem *iobase); 150 void __iomem *iobase);
127void ioat_dma_remove(struct ioatdma_device *device); 151void ioat_dma_remove(struct ioatdma_device *device);
128struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); 152struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
129struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); 153struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
154struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
130#else 155#else
131#define ioat_dma_probe(pdev, iobase) NULL 156#define ioat_dma_probe(pdev, iobase) NULL
132#define ioat_dma_remove(device) do { } while (0) 157#define ioat_dma_remove(device) do { } while (0)
133#define ioat_dca_init(pdev, iobase) NULL 158#define ioat_dca_init(pdev, iobase) NULL
134#define ioat2_dca_init(pdev, iobase) NULL 159#define ioat2_dca_init(pdev, iobase) NULL
160#define ioat3_dca_init(pdev, iobase) NULL
135#endif 161#endif
136 162
137#endif /* IOATDMA_H */ 163#endif /* IOATDMA_H */
diff --git a/drivers/dma/ioatdma_hw.h b/drivers/dma/ioatdma_hw.h
index dd470fa91d86..f1ae2c776f74 100644
--- a/drivers/dma/ioatdma_hw.h
+++ b/drivers/dma/ioatdma_hw.h
@@ -35,6 +35,7 @@
35#define IOAT_PCI_SID 0x8086 35#define IOAT_PCI_SID 0x8086
36#define IOAT_VER_1_2 0x12 /* Version 1.2 */ 36#define IOAT_VER_1_2 0x12 /* Version 1.2 */
37#define IOAT_VER_2_0 0x20 /* Version 2.0 */ 37#define IOAT_VER_2_0 0x20 /* Version 2.0 */
38#define IOAT_VER_3_0 0x30 /* Version 3.0 */
38 39
39struct ioat_dma_descriptor { 40struct ioat_dma_descriptor {
40 uint32_t size; 41 uint32_t size;
diff --git a/drivers/dma/ioatdma_registers.h b/drivers/dma/ioatdma_registers.h
index 9832d7ebd931..827cb503cac6 100644
--- a/drivers/dma/ioatdma_registers.h
+++ b/drivers/dma/ioatdma_registers.h
@@ -25,6 +25,10 @@
25#define IOAT_PCI_DMACTRL_DMA_EN 0x00000001 25#define IOAT_PCI_DMACTRL_DMA_EN 0x00000001
26#define IOAT_PCI_DMACTRL_MSI_EN 0x00000002 26#define IOAT_PCI_DMACTRL_MSI_EN 0x00000002
27 27
28#define IOAT_PCI_DEVICE_ID_OFFSET 0x02
29#define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148
30#define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184
31
28/* MMIO Device Registers */ 32/* MMIO Device Registers */
29#define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */ 33#define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */
30 34
@@ -149,7 +153,23 @@
149#define IOAT_DCA_GREQID_VALID 0x20000000 153#define IOAT_DCA_GREQID_VALID 0x20000000
150#define IOAT_DCA_GREQID_LASTID 0x80000000 154#define IOAT_DCA_GREQID_LASTID 0x80000000
151 155
156#define IOAT3_CSI_CAPABILITY_OFFSET 0x08
157#define IOAT3_CSI_CAPABILITY_PREFETCH 0x1
158
159#define IOAT3_PCI_CAPABILITY_OFFSET 0x0A
160#define IOAT3_PCI_CAPABILITY_MEMWR 0x1
161
162#define IOAT3_CSI_CONTROL_OFFSET 0x0C
163#define IOAT3_CSI_CONTROL_PREFETCH 0x1
164
165#define IOAT3_PCI_CONTROL_OFFSET 0x0E
166#define IOAT3_PCI_CONTROL_MEMWR 0x1
167
168#define IOAT3_APICID_TAG_MAP_OFFSET 0x10
169#define IOAT3_APICID_TAG_MAP_OFFSET_LOW 0x10
170#define IOAT3_APICID_TAG_MAP_OFFSET_HIGH 0x14
152 171
172#define IOAT3_DCA_GREQID_OFFSET 0x02
153 173
154#define IOAT1_CHAINADDR_OFFSET 0x0C /* 64-bit Descriptor Chain Address Register */ 174#define IOAT1_CHAINADDR_OFFSET 0x0C /* 64-bit Descriptor Chain Address Register */
155#define IOAT2_CHAINADDR_OFFSET 0x10 /* 64-bit Descriptor Chain Address Register */ 175#define IOAT2_CHAINADDR_OFFSET 0x10 /* 64-bit Descriptor Chain Address Register */
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 0ec0f431e6a1..85bfeba4d85e 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -82,17 +82,24 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
82 struct device *dev = 82 struct device *dev =
83 &iop_chan->device->pdev->dev; 83 &iop_chan->device->pdev->dev;
84 u32 len = unmap->unmap_len; 84 u32 len = unmap->unmap_len;
85 u32 src_cnt = unmap->unmap_src_cnt; 85 enum dma_ctrl_flags flags = desc->async_tx.flags;
86 dma_addr_t addr = iop_desc_get_dest_addr(unmap, 86 u32 src_cnt;
87 iop_chan); 87 dma_addr_t addr;
88 88
89 dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); 89 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
90 while (src_cnt--) { 90 addr = iop_desc_get_dest_addr(unmap, iop_chan);
91 addr = iop_desc_get_src_addr(unmap, 91 dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
92 iop_chan, 92 }
93 src_cnt); 93
94 dma_unmap_page(dev, addr, len, 94 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
95 DMA_TO_DEVICE); 95 src_cnt = unmap->unmap_src_cnt;
96 while (src_cnt--) {
97 addr = iop_desc_get_src_addr(unmap,
98 iop_chan,
99 src_cnt);
100 dma_unmap_page(dev, addr, len,
101 DMA_TO_DEVICE);
102 }
96 } 103 }
97 desc->group_head = NULL; 104 desc->group_head = NULL;
98 } 105 }
@@ -366,8 +373,8 @@ retry:
366 if (!retry++) 373 if (!retry++)
367 goto retry; 374 goto retry;
368 375
369 /* try to free some slots if the allocation fails */ 376 /* perform direct reclaim if the allocation fails */
370 tasklet_schedule(&iop_chan->irq_tasklet); 377 __iop_adma_slot_cleanup(iop_chan);
371 378
372 return NULL; 379 return NULL;
373} 380}
@@ -443,8 +450,18 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
443static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan); 450static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
444static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan); 451static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
445 452
446/* returns the number of allocated descriptors */ 453/**
447static int iop_adma_alloc_chan_resources(struct dma_chan *chan) 454 * iop_adma_alloc_chan_resources - returns the number of allocated descriptors
455 * @chan - allocate descriptor resources for this channel
456 * @client - current client requesting the channel be ready for requests
457 *
458 * Note: We keep the slots for 1 operation on iop_chan->chain at all times. To
459 * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be
460 * greater than 2x the number slots needed to satisfy a device->max_xor
461 * request.
462 * */
463static int iop_adma_alloc_chan_resources(struct dma_chan *chan,
464 struct dma_client *client)
448{ 465{
449 char *hw_desc; 466 char *hw_desc;
450 int idx; 467 int idx;
@@ -838,7 +855,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
838 dma_chan = container_of(device->common.channels.next, 855 dma_chan = container_of(device->common.channels.next,
839 struct dma_chan, 856 struct dma_chan,
840 device_node); 857 device_node);
841 if (iop_adma_alloc_chan_resources(dma_chan) < 1) { 858 if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) {
842 err = -ENODEV; 859 err = -ENODEV;
843 goto out; 860 goto out;
844 } 861 }
@@ -936,7 +953,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
936 dma_chan = container_of(device->common.channels.next, 953 dma_chan = container_of(device->common.channels.next,
937 struct dma_chan, 954 struct dma_chan,
938 device_node); 955 device_node);
939 if (iop_adma_alloc_chan_resources(dma_chan) < 1) { 956 if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) {
940 err = -ENODEV; 957 err = -ENODEV;
941 goto out; 958 goto out;
942 } 959 }
@@ -1387,6 +1404,8 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
1387 spin_unlock_bh(&iop_chan->lock); 1404 spin_unlock_bh(&iop_chan->lock);
1388} 1405}
1389 1406
1407MODULE_ALIAS("platform:iop-adma");
1408
1390static struct platform_driver iop_adma_driver = { 1409static struct platform_driver iop_adma_driver = {
1391 .probe = iop_adma_probe, 1410 .probe = iop_adma_probe,
1392 .remove = iop_adma_remove, 1411 .remove = iop_adma_remove,
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
new file mode 100644
index 000000000000..a4e4494663bf
--- /dev/null
+++ b/drivers/dma/mv_xor.c
@@ -0,0 +1,1375 @@
1/*
2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/async_tx.h>
22#include <linux/delay.h>
23#include <linux/dma-mapping.h>
24#include <linux/spinlock.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/memory.h>
28#include <asm/plat-orion/mv_xor.h>
29#include "mv_xor.h"
30
31static void mv_xor_issue_pending(struct dma_chan *chan);
32
33#define to_mv_xor_chan(chan) \
34 container_of(chan, struct mv_xor_chan, common)
35
36#define to_mv_xor_device(dev) \
37 container_of(dev, struct mv_xor_device, common)
38
39#define to_mv_xor_slot(tx) \
40 container_of(tx, struct mv_xor_desc_slot, async_tx)
41
42static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
43{
44 struct mv_xor_desc *hw_desc = desc->hw_desc;
45
46 hw_desc->status = (1 << 31);
47 hw_desc->phy_next_desc = 0;
48 hw_desc->desc_command = (1 << 31);
49}
50
51static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
52{
53 struct mv_xor_desc *hw_desc = desc->hw_desc;
54 return hw_desc->phy_dest_addr;
55}
56
57static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
58 int src_idx)
59{
60 struct mv_xor_desc *hw_desc = desc->hw_desc;
61 return hw_desc->phy_src_addr[src_idx];
62}
63
64
65static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
66 u32 byte_count)
67{
68 struct mv_xor_desc *hw_desc = desc->hw_desc;
69 hw_desc->byte_count = byte_count;
70}
71
72static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
73 u32 next_desc_addr)
74{
75 struct mv_xor_desc *hw_desc = desc->hw_desc;
76 BUG_ON(hw_desc->phy_next_desc);
77 hw_desc->phy_next_desc = next_desc_addr;
78}
79
80static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
81{
82 struct mv_xor_desc *hw_desc = desc->hw_desc;
83 hw_desc->phy_next_desc = 0;
84}
85
86static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
87{
88 desc->value = val;
89}
90
91static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
92 dma_addr_t addr)
93{
94 struct mv_xor_desc *hw_desc = desc->hw_desc;
95 hw_desc->phy_dest_addr = addr;
96}
97
98static int mv_chan_memset_slot_count(size_t len)
99{
100 return 1;
101}
102
103#define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
104
105static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
106 int index, dma_addr_t addr)
107{
108 struct mv_xor_desc *hw_desc = desc->hw_desc;
109 hw_desc->phy_src_addr[index] = addr;
110 if (desc->type == DMA_XOR)
111 hw_desc->desc_command |= (1 << index);
112}
113
114static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
115{
116 return __raw_readl(XOR_CURR_DESC(chan));
117}
118
119static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
120 u32 next_desc_addr)
121{
122 __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
123}
124
125static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
126{
127 __raw_writel(desc_addr, XOR_DEST_POINTER(chan));
128}
129
130static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
131{
132 __raw_writel(block_size, XOR_BLOCK_SIZE(chan));
133}
134
135static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
136{
137 __raw_writel(value, XOR_INIT_VALUE_LOW(chan));
138 __raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
139}
140
141static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
142{
143 u32 val = __raw_readl(XOR_INTR_MASK(chan));
144 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
145 __raw_writel(val, XOR_INTR_MASK(chan));
146}
147
148static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
149{
150 u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
151 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
152 return intr_cause;
153}
154
155static int mv_is_err_intr(u32 intr_cause)
156{
157 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
158 return 1;
159
160 return 0;
161}
162
163static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
164{
165 u32 val = (1 << (1 + (chan->idx * 16)));
166 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
167 __raw_writel(val, XOR_INTR_CAUSE(chan));
168}
169
170static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
171{
172 u32 val = 0xFFFF0000 >> (chan->idx * 16);
173 __raw_writel(val, XOR_INTR_CAUSE(chan));
174}
175
176static int mv_can_chain(struct mv_xor_desc_slot *desc)
177{
178 struct mv_xor_desc_slot *chain_old_tail = list_entry(
179 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
180
181 if (chain_old_tail->type != desc->type)
182 return 0;
183 if (desc->type == DMA_MEMSET)
184 return 0;
185
186 return 1;
187}
188
189static void mv_set_mode(struct mv_xor_chan *chan,
190 enum dma_transaction_type type)
191{
192 u32 op_mode;
193 u32 config = __raw_readl(XOR_CONFIG(chan));
194
195 switch (type) {
196 case DMA_XOR:
197 op_mode = XOR_OPERATION_MODE_XOR;
198 break;
199 case DMA_MEMCPY:
200 op_mode = XOR_OPERATION_MODE_MEMCPY;
201 break;
202 case DMA_MEMSET:
203 op_mode = XOR_OPERATION_MODE_MEMSET;
204 break;
205 default:
206 dev_printk(KERN_ERR, chan->device->common.dev,
207 "error: unsupported operation %d.\n",
208 type);
209 BUG();
210 return;
211 }
212
213 config &= ~0x7;
214 config |= op_mode;
215 __raw_writel(config, XOR_CONFIG(chan));
216 chan->current_type = type;
217}
218
219static void mv_chan_activate(struct mv_xor_chan *chan)
220{
221 u32 activation;
222
223 dev_dbg(chan->device->common.dev, " activate chan.\n");
224 activation = __raw_readl(XOR_ACTIVATION(chan));
225 activation |= 0x1;
226 __raw_writel(activation, XOR_ACTIVATION(chan));
227}
228
229static char mv_chan_is_busy(struct mv_xor_chan *chan)
230{
231 u32 state = __raw_readl(XOR_ACTIVATION(chan));
232
233 state = (state >> 4) & 0x3;
234
235 return (state == 1) ? 1 : 0;
236}
237
238static int mv_chan_xor_slot_count(size_t len, int src_cnt)
239{
240 return 1;
241}
242
243/**
244 * mv_xor_free_slots - flags descriptor slots for reuse
245 * @slot: Slot to free
246 * Caller must hold &mv_chan->lock while calling this function
247 */
248static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
249 struct mv_xor_desc_slot *slot)
250{
251 dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n",
252 __func__, __LINE__, slot);
253
254 slot->slots_per_op = 0;
255
256}
257
258/*
259 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
260 * sw_desc
261 * Caller must hold &mv_chan->lock while calling this function
262 */
263static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
264 struct mv_xor_desc_slot *sw_desc)
265{
266 dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n",
267 __func__, __LINE__, sw_desc);
268 if (sw_desc->type != mv_chan->current_type)
269 mv_set_mode(mv_chan, sw_desc->type);
270
271 if (sw_desc->type == DMA_MEMSET) {
272 /* for memset requests we need to program the engine, no
273 * descriptors used.
274 */
275 struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
276 mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
277 mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
278 mv_chan_set_value(mv_chan, sw_desc->value);
279 } else {
280 /* set the hardware chain */
281 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
282 }
283 mv_chan->pending += sw_desc->slot_cnt;
284 mv_xor_issue_pending(&mv_chan->common);
285}
286
287static dma_cookie_t
288mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
289 struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
290{
291 BUG_ON(desc->async_tx.cookie < 0);
292
293 if (desc->async_tx.cookie > 0) {
294 cookie = desc->async_tx.cookie;
295
296 /* call the callback (must not sleep or submit new
297 * operations to this channel)
298 */
299 if (desc->async_tx.callback)
300 desc->async_tx.callback(
301 desc->async_tx.callback_param);
302
303 /* unmap dma addresses
304 * (unmap_single vs unmap_page?)
305 */
306 if (desc->group_head && desc->unmap_len) {
307 struct mv_xor_desc_slot *unmap = desc->group_head;
308 struct device *dev =
309 &mv_chan->device->pdev->dev;
310 u32 len = unmap->unmap_len;
311 enum dma_ctrl_flags flags = desc->async_tx.flags;
312 u32 src_cnt;
313 dma_addr_t addr;
314
315 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
316 addr = mv_desc_get_dest_addr(unmap);
317 dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
318 }
319
320 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
321 src_cnt = unmap->unmap_src_cnt;
322 while (src_cnt--) {
323 addr = mv_desc_get_src_addr(unmap,
324 src_cnt);
325 dma_unmap_page(dev, addr, len,
326 DMA_TO_DEVICE);
327 }
328 }
329 desc->group_head = NULL;
330 }
331 }
332
333 /* run dependent operations */
334 async_tx_run_dependencies(&desc->async_tx);
335
336 return cookie;
337}
338
339static int
340mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
341{
342 struct mv_xor_desc_slot *iter, *_iter;
343
344 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
345 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
346 completed_node) {
347
348 if (async_tx_test_ack(&iter->async_tx)) {
349 list_del(&iter->completed_node);
350 mv_xor_free_slots(mv_chan, iter);
351 }
352 }
353 return 0;
354}
355
356static int
357mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
358 struct mv_xor_chan *mv_chan)
359{
360 dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n",
361 __func__, __LINE__, desc, desc->async_tx.flags);
362 list_del(&desc->chain_node);
363 /* the client is allowed to attach dependent operations
364 * until 'ack' is set
365 */
366 if (!async_tx_test_ack(&desc->async_tx)) {
367 /* move this slot to the completed_slots */
368 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
369 return 0;
370 }
371
372 mv_xor_free_slots(mv_chan, desc);
373 return 0;
374}
375
376static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
377{
378 struct mv_xor_desc_slot *iter, *_iter;
379 dma_cookie_t cookie = 0;
380 int busy = mv_chan_is_busy(mv_chan);
381 u32 current_desc = mv_chan_get_current_desc(mv_chan);
382 int seen_current = 0;
383
384 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
385 dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc);
386 mv_xor_clean_completed_slots(mv_chan);
387
388 /* free completed slots from the chain starting with
389 * the oldest descriptor
390 */
391
392 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
393 chain_node) {
394 prefetch(_iter);
395 prefetch(&_iter->async_tx);
396
397 /* do not advance past the current descriptor loaded into the
398 * hardware channel, subsequent descriptors are either in
399 * process or have not been submitted
400 */
401 if (seen_current)
402 break;
403
404 /* stop the search if we reach the current descriptor and the
405 * channel is busy
406 */
407 if (iter->async_tx.phys == current_desc) {
408 seen_current = 1;
409 if (busy)
410 break;
411 }
412
413 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
414
415 if (mv_xor_clean_slot(iter, mv_chan))
416 break;
417 }
418
419 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
420 struct mv_xor_desc_slot *chain_head;
421 chain_head = list_entry(mv_chan->chain.next,
422 struct mv_xor_desc_slot,
423 chain_node);
424
425 mv_xor_start_new_chain(mv_chan, chain_head);
426 }
427
428 if (cookie > 0)
429 mv_chan->completed_cookie = cookie;
430}
431
432static void
433mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
434{
435 spin_lock_bh(&mv_chan->lock);
436 __mv_xor_slot_cleanup(mv_chan);
437 spin_unlock_bh(&mv_chan->lock);
438}
439
440static void mv_xor_tasklet(unsigned long data)
441{
442 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
443 __mv_xor_slot_cleanup(chan);
444}
445
446static struct mv_xor_desc_slot *
447mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
448 int slots_per_op)
449{
450 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
451 LIST_HEAD(chain);
452 int slots_found, retry = 0;
453
454 /* start search from the last allocated descrtiptor
455 * if a contiguous allocation can not be found start searching
456 * from the beginning of the list
457 */
458retry:
459 slots_found = 0;
460 if (retry == 0)
461 iter = mv_chan->last_used;
462 else
463 iter = list_entry(&mv_chan->all_slots,
464 struct mv_xor_desc_slot,
465 slot_node);
466
467 list_for_each_entry_safe_continue(
468 iter, _iter, &mv_chan->all_slots, slot_node) {
469 prefetch(_iter);
470 prefetch(&_iter->async_tx);
471 if (iter->slots_per_op) {
472 /* give up after finding the first busy slot
473 * on the second pass through the list
474 */
475 if (retry)
476 break;
477
478 slots_found = 0;
479 continue;
480 }
481
482 /* start the allocation if the slot is correctly aligned */
483 if (!slots_found++)
484 alloc_start = iter;
485
486 if (slots_found == num_slots) {
487 struct mv_xor_desc_slot *alloc_tail = NULL;
488 struct mv_xor_desc_slot *last_used = NULL;
489 iter = alloc_start;
490 while (num_slots) {
491 int i;
492
493 /* pre-ack all but the last descriptor */
494 async_tx_ack(&iter->async_tx);
495
496 list_add_tail(&iter->chain_node, &chain);
497 alloc_tail = iter;
498 iter->async_tx.cookie = 0;
499 iter->slot_cnt = num_slots;
500 iter->xor_check_result = NULL;
501 for (i = 0; i < slots_per_op; i++) {
502 iter->slots_per_op = slots_per_op - i;
503 last_used = iter;
504 iter = list_entry(iter->slot_node.next,
505 struct mv_xor_desc_slot,
506 slot_node);
507 }
508 num_slots -= slots_per_op;
509 }
510 alloc_tail->group_head = alloc_start;
511 alloc_tail->async_tx.cookie = -EBUSY;
512 list_splice(&chain, &alloc_tail->async_tx.tx_list);
513 mv_chan->last_used = last_used;
514 mv_desc_clear_next_desc(alloc_start);
515 mv_desc_clear_next_desc(alloc_tail);
516 return alloc_tail;
517 }
518 }
519 if (!retry++)
520 goto retry;
521
522 /* try to free some slots if the allocation fails */
523 tasklet_schedule(&mv_chan->irq_tasklet);
524
525 return NULL;
526}
527
528static dma_cookie_t
529mv_desc_assign_cookie(struct mv_xor_chan *mv_chan,
530 struct mv_xor_desc_slot *desc)
531{
532 dma_cookie_t cookie = mv_chan->common.cookie;
533
534 if (++cookie < 0)
535 cookie = 1;
536 mv_chan->common.cookie = desc->async_tx.cookie = cookie;
537 return cookie;
538}
539
540/************************ DMA engine API functions ****************************/
541static dma_cookie_t
542mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
543{
544 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
545 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
546 struct mv_xor_desc_slot *grp_start, *old_chain_tail;
547 dma_cookie_t cookie;
548 int new_hw_chain = 1;
549
550 dev_dbg(mv_chan->device->common.dev,
551 "%s sw_desc %p: async_tx %p\n",
552 __func__, sw_desc, &sw_desc->async_tx);
553
554 grp_start = sw_desc->group_head;
555
556 spin_lock_bh(&mv_chan->lock);
557 cookie = mv_desc_assign_cookie(mv_chan, sw_desc);
558
559 if (list_empty(&mv_chan->chain))
560 list_splice_init(&sw_desc->async_tx.tx_list, &mv_chan->chain);
561 else {
562 new_hw_chain = 0;
563
564 old_chain_tail = list_entry(mv_chan->chain.prev,
565 struct mv_xor_desc_slot,
566 chain_node);
567 list_splice_init(&grp_start->async_tx.tx_list,
568 &old_chain_tail->chain_node);
569
570 if (!mv_can_chain(grp_start))
571 goto submit_done;
572
573 dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n",
574 old_chain_tail->async_tx.phys);
575
576 /* fix up the hardware chain */
577 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
578
579 /* if the channel is not busy */
580 if (!mv_chan_is_busy(mv_chan)) {
581 u32 current_desc = mv_chan_get_current_desc(mv_chan);
582 /*
583 * and the curren desc is the end of the chain before
584 * the append, then we need to start the channel
585 */
586 if (current_desc == old_chain_tail->async_tx.phys)
587 new_hw_chain = 1;
588 }
589 }
590
591 if (new_hw_chain)
592 mv_xor_start_new_chain(mv_chan, grp_start);
593
594submit_done:
595 spin_unlock_bh(&mv_chan->lock);
596
597 return cookie;
598}
599
600/* returns the number of allocated descriptors */
601static int mv_xor_alloc_chan_resources(struct dma_chan *chan,
602 struct dma_client *client)
603{
604 char *hw_desc;
605 int idx;
606 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
607 struct mv_xor_desc_slot *slot = NULL;
608 struct mv_xor_platform_data *plat_data =
609 mv_chan->device->pdev->dev.platform_data;
610 int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE;
611
612 /* Allocate descriptor slots */
613 idx = mv_chan->slots_allocated;
614 while (idx < num_descs_in_pool) {
615 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
616 if (!slot) {
617 printk(KERN_INFO "MV XOR Channel only initialized"
618 " %d descriptor slots", idx);
619 break;
620 }
621 hw_desc = (char *) mv_chan->device->dma_desc_pool_virt;
622 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
623
624 dma_async_tx_descriptor_init(&slot->async_tx, chan);
625 slot->async_tx.tx_submit = mv_xor_tx_submit;
626 INIT_LIST_HEAD(&slot->chain_node);
627 INIT_LIST_HEAD(&slot->slot_node);
628 INIT_LIST_HEAD(&slot->async_tx.tx_list);
629 hw_desc = (char *) mv_chan->device->dma_desc_pool;
630 slot->async_tx.phys =
631 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
632 slot->idx = idx++;
633
634 spin_lock_bh(&mv_chan->lock);
635 mv_chan->slots_allocated = idx;
636 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
637 spin_unlock_bh(&mv_chan->lock);
638 }
639
640 if (mv_chan->slots_allocated && !mv_chan->last_used)
641 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
642 struct mv_xor_desc_slot,
643 slot_node);
644
645 dev_dbg(mv_chan->device->common.dev,
646 "allocated %d descriptor slots last_used: %p\n",
647 mv_chan->slots_allocated, mv_chan->last_used);
648
649 return mv_chan->slots_allocated ? : -ENOMEM;
650}
651
652static struct dma_async_tx_descriptor *
653mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
654 size_t len, unsigned long flags)
655{
656 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
657 struct mv_xor_desc_slot *sw_desc, *grp_start;
658 int slot_cnt;
659
660 dev_dbg(mv_chan->device->common.dev,
661 "%s dest: %x src %x len: %u flags: %ld\n",
662 __func__, dest, src, len, flags);
663 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
664 return NULL;
665
666 BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
667
668 spin_lock_bh(&mv_chan->lock);
669 slot_cnt = mv_chan_memcpy_slot_count(len);
670 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
671 if (sw_desc) {
672 sw_desc->type = DMA_MEMCPY;
673 sw_desc->async_tx.flags = flags;
674 grp_start = sw_desc->group_head;
675 mv_desc_init(grp_start, flags);
676 mv_desc_set_byte_count(grp_start, len);
677 mv_desc_set_dest_addr(sw_desc->group_head, dest);
678 mv_desc_set_src_addr(grp_start, 0, src);
679 sw_desc->unmap_src_cnt = 1;
680 sw_desc->unmap_len = len;
681 }
682 spin_unlock_bh(&mv_chan->lock);
683
684 dev_dbg(mv_chan->device->common.dev,
685 "%s sw_desc %p async_tx %p\n",
686 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
687
688 return sw_desc ? &sw_desc->async_tx : NULL;
689}
690
691static struct dma_async_tx_descriptor *
692mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
693 size_t len, unsigned long flags)
694{
695 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
696 struct mv_xor_desc_slot *sw_desc, *grp_start;
697 int slot_cnt;
698
699 dev_dbg(mv_chan->device->common.dev,
700 "%s dest: %x len: %u flags: %ld\n",
701 __func__, dest, len, flags);
702 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
703 return NULL;
704
705 BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
706
707 spin_lock_bh(&mv_chan->lock);
708 slot_cnt = mv_chan_memset_slot_count(len);
709 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
710 if (sw_desc) {
711 sw_desc->type = DMA_MEMSET;
712 sw_desc->async_tx.flags = flags;
713 grp_start = sw_desc->group_head;
714 mv_desc_init(grp_start, flags);
715 mv_desc_set_byte_count(grp_start, len);
716 mv_desc_set_dest_addr(sw_desc->group_head, dest);
717 mv_desc_set_block_fill_val(grp_start, value);
718 sw_desc->unmap_src_cnt = 1;
719 sw_desc->unmap_len = len;
720 }
721 spin_unlock_bh(&mv_chan->lock);
722 dev_dbg(mv_chan->device->common.dev,
723 "%s sw_desc %p async_tx %p \n",
724 __func__, sw_desc, &sw_desc->async_tx);
725 return sw_desc ? &sw_desc->async_tx : NULL;
726}
727
728static struct dma_async_tx_descriptor *
729mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
730 unsigned int src_cnt, size_t len, unsigned long flags)
731{
732 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
733 struct mv_xor_desc_slot *sw_desc, *grp_start;
734 int slot_cnt;
735
736 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
737 return NULL;
738
739 BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
740
741 dev_dbg(mv_chan->device->common.dev,
742 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
743 __func__, src_cnt, len, dest, flags);
744
745 spin_lock_bh(&mv_chan->lock);
746 slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
747 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
748 if (sw_desc) {
749 sw_desc->type = DMA_XOR;
750 sw_desc->async_tx.flags = flags;
751 grp_start = sw_desc->group_head;
752 mv_desc_init(grp_start, flags);
753 /* the byte count field is the same as in memcpy desc*/
754 mv_desc_set_byte_count(grp_start, len);
755 mv_desc_set_dest_addr(sw_desc->group_head, dest);
756 sw_desc->unmap_src_cnt = src_cnt;
757 sw_desc->unmap_len = len;
758 while (src_cnt--)
759 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
760 }
761 spin_unlock_bh(&mv_chan->lock);
762 dev_dbg(mv_chan->device->common.dev,
763 "%s sw_desc %p async_tx %p \n",
764 __func__, sw_desc, &sw_desc->async_tx);
765 return sw_desc ? &sw_desc->async_tx : NULL;
766}
767
768static void mv_xor_free_chan_resources(struct dma_chan *chan)
769{
770 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
771 struct mv_xor_desc_slot *iter, *_iter;
772 int in_use_descs = 0;
773
774 mv_xor_slot_cleanup(mv_chan);
775
776 spin_lock_bh(&mv_chan->lock);
777 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
778 chain_node) {
779 in_use_descs++;
780 list_del(&iter->chain_node);
781 }
782 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
783 completed_node) {
784 in_use_descs++;
785 list_del(&iter->completed_node);
786 }
787 list_for_each_entry_safe_reverse(
788 iter, _iter, &mv_chan->all_slots, slot_node) {
789 list_del(&iter->slot_node);
790 kfree(iter);
791 mv_chan->slots_allocated--;
792 }
793 mv_chan->last_used = NULL;
794
795 dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n",
796 __func__, mv_chan->slots_allocated);
797 spin_unlock_bh(&mv_chan->lock);
798
799 if (in_use_descs)
800 dev_err(mv_chan->device->common.dev,
801 "freeing %d in use descriptors!\n", in_use_descs);
802}
803
804/**
805 * mv_xor_is_complete - poll the status of an XOR transaction
806 * @chan: XOR channel handle
807 * @cookie: XOR transaction identifier
808 */
809static enum dma_status mv_xor_is_complete(struct dma_chan *chan,
810 dma_cookie_t cookie,
811 dma_cookie_t *done,
812 dma_cookie_t *used)
813{
814 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
815 dma_cookie_t last_used;
816 dma_cookie_t last_complete;
817 enum dma_status ret;
818
819 last_used = chan->cookie;
820 last_complete = mv_chan->completed_cookie;
821 mv_chan->is_complete_cookie = cookie;
822 if (done)
823 *done = last_complete;
824 if (used)
825 *used = last_used;
826
827 ret = dma_async_is_complete(cookie, last_complete, last_used);
828 if (ret == DMA_SUCCESS) {
829 mv_xor_clean_completed_slots(mv_chan);
830 return ret;
831 }
832 mv_xor_slot_cleanup(mv_chan);
833
834 last_used = chan->cookie;
835 last_complete = mv_chan->completed_cookie;
836
837 if (done)
838 *done = last_complete;
839 if (used)
840 *used = last_used;
841
842 return dma_async_is_complete(cookie, last_complete, last_used);
843}
844
845static void mv_dump_xor_regs(struct mv_xor_chan *chan)
846{
847 u32 val;
848
849 val = __raw_readl(XOR_CONFIG(chan));
850 dev_printk(KERN_ERR, chan->device->common.dev,
851 "config 0x%08x.\n", val);
852
853 val = __raw_readl(XOR_ACTIVATION(chan));
854 dev_printk(KERN_ERR, chan->device->common.dev,
855 "activation 0x%08x.\n", val);
856
857 val = __raw_readl(XOR_INTR_CAUSE(chan));
858 dev_printk(KERN_ERR, chan->device->common.dev,
859 "intr cause 0x%08x.\n", val);
860
861 val = __raw_readl(XOR_INTR_MASK(chan));
862 dev_printk(KERN_ERR, chan->device->common.dev,
863 "intr mask 0x%08x.\n", val);
864
865 val = __raw_readl(XOR_ERROR_CAUSE(chan));
866 dev_printk(KERN_ERR, chan->device->common.dev,
867 "error cause 0x%08x.\n", val);
868
869 val = __raw_readl(XOR_ERROR_ADDR(chan));
870 dev_printk(KERN_ERR, chan->device->common.dev,
871 "error addr 0x%08x.\n", val);
872}
873
874static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
875 u32 intr_cause)
876{
877 if (intr_cause & (1 << 4)) {
878 dev_dbg(chan->device->common.dev,
879 "ignore this error\n");
880 return;
881 }
882
883 dev_printk(KERN_ERR, chan->device->common.dev,
884 "error on chan %d. intr cause 0x%08x.\n",
885 chan->idx, intr_cause);
886
887 mv_dump_xor_regs(chan);
888 BUG();
889}
890
891static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
892{
893 struct mv_xor_chan *chan = data;
894 u32 intr_cause = mv_chan_get_intr_cause(chan);
895
896 dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause);
897
898 if (mv_is_err_intr(intr_cause))
899 mv_xor_err_interrupt_handler(chan, intr_cause);
900
901 tasklet_schedule(&chan->irq_tasklet);
902
903 mv_xor_device_clear_eoc_cause(chan);
904
905 return IRQ_HANDLED;
906}
907
908static void mv_xor_issue_pending(struct dma_chan *chan)
909{
910 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
911
912 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
913 mv_chan->pending = 0;
914 mv_chan_activate(mv_chan);
915 }
916}
917
918/*
919 * Perform a transaction to verify the HW works.
920 */
921#define MV_XOR_TEST_SIZE 2000
922
923static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
924{
925 int i;
926 void *src, *dest;
927 dma_addr_t src_dma, dest_dma;
928 struct dma_chan *dma_chan;
929 dma_cookie_t cookie;
930 struct dma_async_tx_descriptor *tx;
931 int err = 0;
932 struct mv_xor_chan *mv_chan;
933
934 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
935 if (!src)
936 return -ENOMEM;
937
938 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
939 if (!dest) {
940 kfree(src);
941 return -ENOMEM;
942 }
943
944 /* Fill in src buffer */
945 for (i = 0; i < MV_XOR_TEST_SIZE; i++)
946 ((u8 *) src)[i] = (u8)i;
947
948 /* Start copy, using first DMA channel */
949 dma_chan = container_of(device->common.channels.next,
950 struct dma_chan,
951 device_node);
952 if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) {
953 err = -ENODEV;
954 goto out;
955 }
956
957 dest_dma = dma_map_single(dma_chan->device->dev, dest,
958 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
959
960 src_dma = dma_map_single(dma_chan->device->dev, src,
961 MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
962
963 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
964 MV_XOR_TEST_SIZE, 0);
965 cookie = mv_xor_tx_submit(tx);
966 mv_xor_issue_pending(dma_chan);
967 async_tx_ack(tx);
968 msleep(1);
969
970 if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
971 DMA_SUCCESS) {
972 dev_printk(KERN_ERR, dma_chan->device->dev,
973 "Self-test copy timed out, disabling\n");
974 err = -ENODEV;
975 goto free_resources;
976 }
977
978 mv_chan = to_mv_xor_chan(dma_chan);
979 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
980 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
981 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
982 dev_printk(KERN_ERR, dma_chan->device->dev,
983 "Self-test copy failed compare, disabling\n");
984 err = -ENODEV;
985 goto free_resources;
986 }
987
988free_resources:
989 mv_xor_free_chan_resources(dma_chan);
990out:
991 kfree(src);
992 kfree(dest);
993 return err;
994}
995
996#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
997static int __devinit
998mv_xor_xor_self_test(struct mv_xor_device *device)
999{
1000 int i, src_idx;
1001 struct page *dest;
1002 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
1003 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
1004 dma_addr_t dest_dma;
1005 struct dma_async_tx_descriptor *tx;
1006 struct dma_chan *dma_chan;
1007 dma_cookie_t cookie;
1008 u8 cmp_byte = 0;
1009 u32 cmp_word;
1010 int err = 0;
1011 struct mv_xor_chan *mv_chan;
1012
1013 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1014 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
1015 if (!xor_srcs[src_idx])
1016 while (src_idx--) {
1017 __free_page(xor_srcs[src_idx]);
1018 return -ENOMEM;
1019 }
1020 }
1021
1022 dest = alloc_page(GFP_KERNEL);
1023 if (!dest)
1024 while (src_idx--) {
1025 __free_page(xor_srcs[src_idx]);
1026 return -ENOMEM;
1027 }
1028
1029 /* Fill in src buffers */
1030 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1031 u8 *ptr = page_address(xor_srcs[src_idx]);
1032 for (i = 0; i < PAGE_SIZE; i++)
1033 ptr[i] = (1 << src_idx);
1034 }
1035
1036 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
1037 cmp_byte ^= (u8) (1 << src_idx);
1038
1039 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1040 (cmp_byte << 8) | cmp_byte;
1041
1042 memset(page_address(dest), 0, PAGE_SIZE);
1043
1044 dma_chan = container_of(device->common.channels.next,
1045 struct dma_chan,
1046 device_node);
1047 if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) {
1048 err = -ENODEV;
1049 goto out;
1050 }
1051
1052 /* test xor */
1053 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
1054 DMA_FROM_DEVICE);
1055
1056 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
1057 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
1058 0, PAGE_SIZE, DMA_TO_DEVICE);
1059
1060 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1061 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
1062
1063 cookie = mv_xor_tx_submit(tx);
1064 mv_xor_issue_pending(dma_chan);
1065 async_tx_ack(tx);
1066 msleep(8);
1067
1068 if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
1069 DMA_SUCCESS) {
1070 dev_printk(KERN_ERR, dma_chan->device->dev,
1071 "Self-test xor timed out, disabling\n");
1072 err = -ENODEV;
1073 goto free_resources;
1074 }
1075
1076 mv_chan = to_mv_xor_chan(dma_chan);
1077 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
1078 PAGE_SIZE, DMA_FROM_DEVICE);
1079 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1080 u32 *ptr = page_address(dest);
1081 if (ptr[i] != cmp_word) {
1082 dev_printk(KERN_ERR, dma_chan->device->dev,
1083 "Self-test xor failed compare, disabling."
1084 " index %d, data %x, expected %x\n", i,
1085 ptr[i], cmp_word);
1086 err = -ENODEV;
1087 goto free_resources;
1088 }
1089 }
1090
1091free_resources:
1092 mv_xor_free_chan_resources(dma_chan);
1093out:
1094 src_idx = MV_XOR_NUM_SRC_TEST;
1095 while (src_idx--)
1096 __free_page(xor_srcs[src_idx]);
1097 __free_page(dest);
1098 return err;
1099}
1100
1101static int __devexit mv_xor_remove(struct platform_device *dev)
1102{
1103 struct mv_xor_device *device = platform_get_drvdata(dev);
1104 struct dma_chan *chan, *_chan;
1105 struct mv_xor_chan *mv_chan;
1106 struct mv_xor_platform_data *plat_data = dev->dev.platform_data;
1107
1108 dma_async_device_unregister(&device->common);
1109
1110 dma_free_coherent(&dev->dev, plat_data->pool_size,
1111 device->dma_desc_pool_virt, device->dma_desc_pool);
1112
1113 list_for_each_entry_safe(chan, _chan, &device->common.channels,
1114 device_node) {
1115 mv_chan = to_mv_xor_chan(chan);
1116 list_del(&chan->device_node);
1117 }
1118
1119 return 0;
1120}
1121
1122static int __devinit mv_xor_probe(struct platform_device *pdev)
1123{
1124 int ret = 0;
1125 int irq;
1126 struct mv_xor_device *adev;
1127 struct mv_xor_chan *mv_chan;
1128 struct dma_device *dma_dev;
1129 struct mv_xor_platform_data *plat_data = pdev->dev.platform_data;
1130
1131
1132 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
1133 if (!adev)
1134 return -ENOMEM;
1135
1136 dma_dev = &adev->common;
1137
1138 /* allocate coherent memory for hardware descriptors
1139 * note: writecombine gives slightly better performance, but
1140 * requires that we explicitly flush the writes
1141 */
1142 adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
1143 plat_data->pool_size,
1144 &adev->dma_desc_pool,
1145 GFP_KERNEL);
1146 if (!adev->dma_desc_pool_virt)
1147 return -ENOMEM;
1148
1149 adev->id = plat_data->hw_id;
1150
1151 /* discover transaction capabilites from the platform data */
1152 dma_dev->cap_mask = plat_data->cap_mask;
1153 adev->pdev = pdev;
1154 platform_set_drvdata(pdev, adev);
1155
1156 adev->shared = platform_get_drvdata(plat_data->shared);
1157
1158 INIT_LIST_HEAD(&dma_dev->channels);
1159
1160 /* set base routines */
1161 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1162 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1163 dma_dev->device_is_tx_complete = mv_xor_is_complete;
1164 dma_dev->device_issue_pending = mv_xor_issue_pending;
1165 dma_dev->dev = &pdev->dev;
1166
1167 /* set prep routines based on capability */
1168 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1169 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1170 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1171 dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
1172 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1173 dma_dev->max_xor = 8; ;
1174 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1175 }
1176
1177 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1178 if (!mv_chan) {
1179 ret = -ENOMEM;
1180 goto err_free_dma;
1181 }
1182 mv_chan->device = adev;
1183 mv_chan->idx = plat_data->hw_id;
1184 mv_chan->mmr_base = adev->shared->xor_base;
1185
1186 if (!mv_chan->mmr_base) {
1187 ret = -ENOMEM;
1188 goto err_free_dma;
1189 }
1190 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1191 mv_chan);
1192
1193 /* clear errors before enabling interrupts */
1194 mv_xor_device_clear_err_status(mv_chan);
1195
1196 irq = platform_get_irq(pdev, 0);
1197 if (irq < 0) {
1198 ret = irq;
1199 goto err_free_dma;
1200 }
1201 ret = devm_request_irq(&pdev->dev, irq,
1202 mv_xor_interrupt_handler,
1203 0, dev_name(&pdev->dev), mv_chan);
1204 if (ret)
1205 goto err_free_dma;
1206
1207 mv_chan_unmask_interrupts(mv_chan);
1208
1209 mv_set_mode(mv_chan, DMA_MEMCPY);
1210
1211 spin_lock_init(&mv_chan->lock);
1212 INIT_LIST_HEAD(&mv_chan->chain);
1213 INIT_LIST_HEAD(&mv_chan->completed_slots);
1214 INIT_LIST_HEAD(&mv_chan->all_slots);
1215 INIT_RCU_HEAD(&mv_chan->common.rcu);
1216 mv_chan->common.device = dma_dev;
1217
1218 list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
1219
1220 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1221 ret = mv_xor_memcpy_self_test(adev);
1222 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1223 if (ret)
1224 goto err_free_dma;
1225 }
1226
1227 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1228 ret = mv_xor_xor_self_test(adev);
1229 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1230 if (ret)
1231 goto err_free_dma;
1232 }
1233
1234 dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: "
1235 "( %s%s%s%s)\n",
1236 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1237 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
1238 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1239 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1240
1241 dma_async_device_register(dma_dev);
1242 goto out;
1243
1244 err_free_dma:
1245 dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
1246 adev->dma_desc_pool_virt, adev->dma_desc_pool);
1247 out:
1248 return ret;
1249}
1250
1251static void
1252mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
1253 struct mbus_dram_target_info *dram)
1254{
1255 void __iomem *base = msp->xor_base;
1256 u32 win_enable = 0;
1257 int i;
1258
1259 for (i = 0; i < 8; i++) {
1260 writel(0, base + WINDOW_BASE(i));
1261 writel(0, base + WINDOW_SIZE(i));
1262 if (i < 4)
1263 writel(0, base + WINDOW_REMAP_HIGH(i));
1264 }
1265
1266 for (i = 0; i < dram->num_cs; i++) {
1267 struct mbus_dram_window *cs = dram->cs + i;
1268
1269 writel((cs->base & 0xffff0000) |
1270 (cs->mbus_attr << 8) |
1271 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1272 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1273
1274 win_enable |= (1 << i);
1275 win_enable |= 3 << (16 + (2 * i));
1276 }
1277
1278 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1279 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1280}
1281
1282static struct platform_driver mv_xor_driver = {
1283 .probe = mv_xor_probe,
1284 .remove = mv_xor_remove,
1285 .driver = {
1286 .owner = THIS_MODULE,
1287 .name = MV_XOR_NAME,
1288 },
1289};
1290
1291static int mv_xor_shared_probe(struct platform_device *pdev)
1292{
1293 struct mv_xor_platform_shared_data *msd = pdev->dev.platform_data;
1294 struct mv_xor_shared_private *msp;
1295 struct resource *res;
1296
1297 dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n");
1298
1299 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
1300 if (!msp)
1301 return -ENOMEM;
1302
1303 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1304 if (!res)
1305 return -ENODEV;
1306
1307 msp->xor_base = devm_ioremap(&pdev->dev, res->start,
1308 res->end - res->start + 1);
1309 if (!msp->xor_base)
1310 return -EBUSY;
1311
1312 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1313 if (!res)
1314 return -ENODEV;
1315
1316 msp->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1317 res->end - res->start + 1);
1318 if (!msp->xor_high_base)
1319 return -EBUSY;
1320
1321 platform_set_drvdata(pdev, msp);
1322
1323 /*
1324 * (Re-)program MBUS remapping windows if we are asked to.
1325 */
1326 if (msd != NULL && msd->dram != NULL)
1327 mv_xor_conf_mbus_windows(msp, msd->dram);
1328
1329 return 0;
1330}
1331
1332static int mv_xor_shared_remove(struct platform_device *pdev)
1333{
1334 return 0;
1335}
1336
1337static struct platform_driver mv_xor_shared_driver = {
1338 .probe = mv_xor_shared_probe,
1339 .remove = mv_xor_shared_remove,
1340 .driver = {
1341 .owner = THIS_MODULE,
1342 .name = MV_XOR_SHARED_NAME,
1343 },
1344};
1345
1346
1347static int __init mv_xor_init(void)
1348{
1349 int rc;
1350
1351 rc = platform_driver_register(&mv_xor_shared_driver);
1352 if (!rc) {
1353 rc = platform_driver_register(&mv_xor_driver);
1354 if (rc)
1355 platform_driver_unregister(&mv_xor_shared_driver);
1356 }
1357 return rc;
1358}
1359module_init(mv_xor_init);
1360
1361/* it's currently unsafe to unload this module */
1362#if 0
1363static void __exit mv_xor_exit(void)
1364{
1365 platform_driver_unregister(&mv_xor_driver);
1366 platform_driver_unregister(&mv_xor_shared_driver);
1367 return;
1368}
1369
1370module_exit(mv_xor_exit);
1371#endif
1372
1373MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1374MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1375MODULE_LICENSE("GPL");
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
new file mode 100644
index 000000000000..06cafe1ef521
--- /dev/null
+++ b/drivers/dma/mv_xor.h
@@ -0,0 +1,183 @@
1/*
2 * Copyright (C) 2007, 2008, Marvell International Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software Foundation,
15 * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18#ifndef MV_XOR_H
19#define MV_XOR_H
20
21#include <linux/types.h>
22#include <linux/io.h>
23#include <linux/dmaengine.h>
24#include <linux/interrupt.h>
25
26#define USE_TIMER
27#define MV_XOR_SLOT_SIZE 64
28#define MV_XOR_THRESHOLD 1
29
30#define XOR_OPERATION_MODE_XOR 0
31#define XOR_OPERATION_MODE_MEMCPY 2
32#define XOR_OPERATION_MODE_MEMSET 4
33
34#define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4))
35#define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4))
36#define XOR_BYTE_COUNT(chan) (chan->mmr_base + 0x220 + (chan->idx * 4))
37#define XOR_DEST_POINTER(chan) (chan->mmr_base + 0x2B0 + (chan->idx * 4))
38#define XOR_BLOCK_SIZE(chan) (chan->mmr_base + 0x2C0 + (chan->idx * 4))
39#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_base + 0x2E0)
40#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_base + 0x2E4)
41
42#define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4))
43#define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4))
44#define XOR_INTR_CAUSE(chan) (chan->mmr_base + 0x30)
45#define XOR_INTR_MASK(chan) (chan->mmr_base + 0x40)
46#define XOR_ERROR_CAUSE(chan) (chan->mmr_base + 0x50)
47#define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60)
48#define XOR_INTR_MASK_VALUE 0x3F5
49
50#define WINDOW_BASE(w) (0x250 + ((w) << 2))
51#define WINDOW_SIZE(w) (0x270 + ((w) << 2))
52#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2))
53#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2))
54
55struct mv_xor_shared_private {
56 void __iomem *xor_base;
57 void __iomem *xor_high_base;
58};
59
60
61/**
62 * struct mv_xor_device - internal representation of a XOR device
63 * @pdev: Platform device
64 * @id: HW XOR Device selector
65 * @dma_desc_pool: base of DMA descriptor region (DMA address)
66 * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
67 * @common: embedded struct dma_device
68 */
69struct mv_xor_device {
70 struct platform_device *pdev;
71 int id;
72 dma_addr_t dma_desc_pool;
73 void *dma_desc_pool_virt;
74 struct dma_device common;
75 struct mv_xor_shared_private *shared;
76};
77
78/**
79 * struct mv_xor_chan - internal representation of a XOR channel
80 * @pending: allows batching of hardware operations
81 * @completed_cookie: identifier for the most recently completed operation
82 * @lock: serializes enqueue/dequeue operations to the descriptors pool
83 * @mmr_base: memory mapped register base
84 * @idx: the index of the xor channel
85 * @chain: device chain view of the descriptors
86 * @completed_slots: slots completed by HW but still need to be acked
87 * @device: parent device
88 * @common: common dmaengine channel object members
89 * @last_used: place holder for allocation to continue from where it left off
90 * @all_slots: complete domain of slots usable by the channel
91 * @slots_allocated: records the actual size of the descriptor slot pool
92 * @irq_tasklet: bottom half where mv_xor_slot_cleanup runs
93 */
94struct mv_xor_chan {
95 int pending;
96 dma_cookie_t completed_cookie;
97 spinlock_t lock; /* protects the descriptor slot pool */
98 void __iomem *mmr_base;
99 unsigned int idx;
100 enum dma_transaction_type current_type;
101 struct list_head chain;
102 struct list_head completed_slots;
103 struct mv_xor_device *device;
104 struct dma_chan common;
105 struct mv_xor_desc_slot *last_used;
106 struct list_head all_slots;
107 int slots_allocated;
108 struct tasklet_struct irq_tasklet;
109#ifdef USE_TIMER
110 unsigned long cleanup_time;
111 u32 current_on_last_cleanup;
112 dma_cookie_t is_complete_cookie;
113#endif
114};
115
116/**
117 * struct mv_xor_desc_slot - software descriptor
118 * @slot_node: node on the mv_xor_chan.all_slots list
119 * @chain_node: node on the mv_xor_chan.chain list
120 * @completed_node: node on the mv_xor_chan.completed_slots list
121 * @hw_desc: virtual address of the hardware descriptor chain
122 * @phys: hardware address of the hardware descriptor chain
123 * @group_head: first operation in a transaction
124 * @slot_cnt: total slots used in an transaction (group of operations)
125 * @slots_per_op: number of slots per operation
126 * @idx: pool index
127 * @unmap_src_cnt: number of xor sources
128 * @unmap_len: transaction bytecount
129 * @async_tx: support for the async_tx api
130 * @group_list: list of slots that make up a multi-descriptor transaction
131 * for example transfer lengths larger than the supported hw max
132 * @xor_check_result: result of zero sum
133 * @crc32_result: result crc calculation
134 */
135struct mv_xor_desc_slot {
136 struct list_head slot_node;
137 struct list_head chain_node;
138 struct list_head completed_node;
139 enum dma_transaction_type type;
140 void *hw_desc;
141 struct mv_xor_desc_slot *group_head;
142 u16 slot_cnt;
143 u16 slots_per_op;
144 u16 idx;
145 u16 unmap_src_cnt;
146 u32 value;
147 size_t unmap_len;
148 struct dma_async_tx_descriptor async_tx;
149 union {
150 u32 *xor_check_result;
151 u32 *crc32_result;
152 };
153#ifdef USE_TIMER
154 unsigned long arrival_time;
155 struct timer_list timeout;
156#endif
157};
158
159/* This structure describes XOR descriptor size 64bytes */
160struct mv_xor_desc {
161 u32 status; /* descriptor execution status */
162 u32 crc32_result; /* result of CRC-32 calculation */
163 u32 desc_command; /* type of operation to be carried out */
164 u32 phy_next_desc; /* next descriptor address pointer */
165 u32 byte_count; /* size of src/dst blocks in bytes */
166 u32 phy_dest_addr; /* destination block address */
167 u32 phy_src_addr[8]; /* source block addresses */
168 u32 reserved0;
169 u32 reserved1;
170};
171
172#define to_mv_sw_desc(addr_hw_desc) \
173 container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc)
174
175#define mv_hw_desc_slot_idx(hw_desc, idx) \
176 ((void *)(((unsigned long)hw_desc) + ((idx) << 5)))
177
178#define MV_XOR_MIN_BYTE_COUNT (128)
179#define XOR_MAX_BYTE_COUNT ((16 * 1024 * 1024) - 1)
180#define MV_XOR_MAX_BYTE_COUNT XOR_MAX_BYTE_COUNT
181
182
183#endif
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index 25918f7dfd0f..0b624e927a6f 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -254,6 +254,7 @@ static ssize_t host_control_on_shutdown_store(struct device *dev,
254static int smi_request(struct smi_cmd *smi_cmd) 254static int smi_request(struct smi_cmd *smi_cmd)
255{ 255{
256 cpumask_t old_mask; 256 cpumask_t old_mask;
257 cpumask_of_cpu_ptr(new_mask, 0);
257 int ret = 0; 258 int ret = 0;
258 259
259 if (smi_cmd->magic != SMI_CMD_MAGIC) { 260 if (smi_cmd->magic != SMI_CMD_MAGIC) {
@@ -264,7 +265,7 @@ static int smi_request(struct smi_cmd *smi_cmd)
264 265
265 /* SMI requires CPU 0 */ 266 /* SMI requires CPU 0 */
266 old_mask = current->cpus_allowed; 267 old_mask = current->cpus_allowed;
267 set_cpus_allowed_ptr(current, &cpumask_of_cpu(0)); 268 set_cpus_allowed_ptr(current, new_mask);
268 if (smp_processor_id() != 0) { 269 if (smp_processor_id() != 0) {
269 dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", 270 dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
270 __func__); 271 __func__);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f43d6d3cf2fa..426ac5add585 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -780,7 +780,7 @@ static __inline__ __u32 extract(__u8 *report, unsigned offset, unsigned n)
780 */ 780 */
781static __inline__ void implement(__u8 *report, unsigned offset, unsigned n, __u32 value) 781static __inline__ void implement(__u8 *report, unsigned offset, unsigned n, __u32 value)
782{ 782{
783 __le64 x; 783 u64 x;
784 u64 m = (1ULL << n) - 1; 784 u64 m = (1ULL << n) - 1;
785 785
786 if (n > 32) 786 if (n > 32)
@@ -796,10 +796,10 @@ static __inline__ void implement(__u8 *report, unsigned offset, unsigned n, __u3
796 report += offset >> 3; 796 report += offset >> 3;
797 offset &= 7; 797 offset &= 7;
798 798
799 x = get_unaligned((__le64 *)report); 799 x = get_unaligned_le64(report);
800 x &= cpu_to_le64(~(m << offset)); 800 x &= ~(m << offset);
801 x |= cpu_to_le64(((u64) value) << offset); 801 x |= ((u64)value) << offset;
802 put_unaligned(x, (__le64 *) report); 802 put_unaligned_le64(x, report);
803} 803}
804 804
805/* 805/*
diff --git a/drivers/hid/hid-input-quirks.c b/drivers/hid/hid-input-quirks.c
index 4c2052c658f1..16feea014494 100644
--- a/drivers/hid/hid-input-quirks.c
+++ b/drivers/hid/hid-input-quirks.c
@@ -89,6 +89,29 @@ static int quirk_logitech_ultrax_remote(struct hid_usage *usage, struct input_de
89 return 1; 89 return 1;
90} 90}
91 91
92static int quirk_gyration_remote(struct hid_usage *usage, struct input_dev *input,
93 unsigned long **bit, int *max)
94{
95 if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR)
96 return 0;
97
98 set_bit(EV_REP, input->evbit);
99 switch(usage->hid & HID_USAGE) {
100 /* Reported on Gyration MCE Remote */
101 case 0x00d: map_key_clear(KEY_HOME); break;
102 case 0x024: map_key_clear(KEY_DVD); break;
103 case 0x025: map_key_clear(KEY_PVR); break;
104 case 0x046: map_key_clear(KEY_MEDIA); break;
105 case 0x047: map_key_clear(KEY_MP3); break;
106 case 0x049: map_key_clear(KEY_CAMERA); break;
107 case 0x04a: map_key_clear(KEY_VIDEO); break;
108
109 default:
110 return 0;
111 }
112 return 1;
113}
114
92static int quirk_chicony_tactical_pad(struct hid_usage *usage, struct input_dev *input, 115static int quirk_chicony_tactical_pad(struct hid_usage *usage, struct input_dev *input,
93 unsigned long **bit, int *max) 116 unsigned long **bit, int *max)
94{ 117{
@@ -303,6 +326,9 @@ static int quirk_sunplus_wdesktop(struct hid_usage *usage, struct input_dev *inp
303#define VENDOR_ID_EZKEY 0x0518 326#define VENDOR_ID_EZKEY 0x0518
304#define DEVICE_ID_BTC_8193 0x0002 327#define DEVICE_ID_BTC_8193 0x0002
305 328
329#define VENDOR_ID_GYRATION 0x0c16
330#define DEVICE_ID_GYRATION_REMOTE 0x0002
331
306#define VENDOR_ID_LOGITECH 0x046d 332#define VENDOR_ID_LOGITECH 0x046d
307#define DEVICE_ID_LOGITECH_RECEIVER 0xc101 333#define DEVICE_ID_LOGITECH_RECEIVER 0xc101
308#define DEVICE_ID_S510_RECEIVER 0xc50c 334#define DEVICE_ID_S510_RECEIVER 0xc50c
@@ -337,6 +363,8 @@ static const struct hid_input_blacklist {
337 363
338 { VENDOR_ID_EZKEY, DEVICE_ID_BTC_8193, quirk_btc_8193 }, 364 { VENDOR_ID_EZKEY, DEVICE_ID_BTC_8193, quirk_btc_8193 },
339 365
366 { VENDOR_ID_GYRATION, DEVICE_ID_GYRATION_REMOTE, quirk_gyration_remote },
367
340 { VENDOR_ID_LOGITECH, DEVICE_ID_LOGITECH_RECEIVER, quirk_logitech_ultrax_remote }, 368 { VENDOR_ID_LOGITECH, DEVICE_ID_LOGITECH_RECEIVER, quirk_logitech_ultrax_remote },
341 { VENDOR_ID_LOGITECH, DEVICE_ID_S510_RECEIVER, quirk_logitech_wireless }, 369 { VENDOR_ID_LOGITECH, DEVICE_ID_S510_RECEIVER, quirk_logitech_wireless },
342 { VENDOR_ID_LOGITECH, DEVICE_ID_S510_RECEIVER_2, quirk_logitech_wireless }, 370 { VENDOR_ID_LOGITECH, DEVICE_ID_S510_RECEIVER_2, quirk_logitech_wireless },
@@ -438,6 +466,18 @@ int hidinput_event_quirks(struct hid_device *hid, struct hid_field *field, struc
438 input_event(input, usage->type, REL_WHEEL, -value); 466 input_event(input, usage->type, REL_WHEEL, -value);
439 return 1; 467 return 1;
440 } 468 }
469
470 /* Gyration MCE remote "Sleep" key */
471 if (hid->vendor == VENDOR_ID_GYRATION &&
472 hid->product == DEVICE_ID_GYRATION_REMOTE &&
473 (usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK &&
474 (usage->hid & 0xff) == 0x82) {
475 input_event(input, usage->type, usage->code, 1);
476 input_sync(input);
477 input_event(input, usage->type, usage->code, 0);
478 input_sync(input);
479 return 1;
480 }
441 return 0; 481 return 0;
442} 482}
443 483
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 5c52a20ad344..1b2e8dc3398d 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -100,6 +100,8 @@ static struct hidinput_key_translation apple_fn_keys[] = {
100 { KEY_F2, KEY_BRIGHTNESSUP, APPLE_FLAG_FKEY }, 100 { KEY_F2, KEY_BRIGHTNESSUP, APPLE_FLAG_FKEY },
101 { KEY_F3, KEY_FN_F5, APPLE_FLAG_FKEY }, /* Exposé */ 101 { KEY_F3, KEY_FN_F5, APPLE_FLAG_FKEY }, /* Exposé */
102 { KEY_F4, KEY_FN_F4, APPLE_FLAG_FKEY }, /* Dashboard */ 102 { KEY_F4, KEY_FN_F4, APPLE_FLAG_FKEY }, /* Dashboard */
103 { KEY_F5, KEY_KBDILLUMDOWN, APPLE_FLAG_FKEY },
104 { KEY_F6, KEY_KBDILLUMUP, APPLE_FLAG_FKEY },
103 { KEY_F7, KEY_PREVIOUSSONG, APPLE_FLAG_FKEY }, 105 { KEY_F7, KEY_PREVIOUSSONG, APPLE_FLAG_FKEY },
104 { KEY_F8, KEY_PLAYPAUSE, APPLE_FLAG_FKEY }, 106 { KEY_F8, KEY_PLAYPAUSE, APPLE_FLAG_FKEY },
105 { KEY_F9, KEY_NEXTSONG, APPLE_FLAG_FKEY }, 107 { KEY_F9, KEY_NEXTSONG, APPLE_FLAG_FKEY },
@@ -612,6 +614,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
612 case 0x0b6: map_key_clear(KEY_PREVIOUSSONG); break; 614 case 0x0b6: map_key_clear(KEY_PREVIOUSSONG); break;
613 case 0x0b7: map_key_clear(KEY_STOPCD); break; 615 case 0x0b7: map_key_clear(KEY_STOPCD); break;
614 case 0x0b8: map_key_clear(KEY_EJECTCD); break; 616 case 0x0b8: map_key_clear(KEY_EJECTCD); break;
617 case 0x0bc: map_key_clear(KEY_MEDIA_REPEAT); break;
615 618
616 case 0x0cd: map_key_clear(KEY_PLAYPAUSE); break; 619 case 0x0cd: map_key_clear(KEY_PLAYPAUSE); break;
617 case 0x0e0: map_abs_clear(ABS_VOLUME); break; 620 case 0x0e0: map_abs_clear(ABS_VOLUME); break;
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 0c6b4d4e7e27..c40f0403edaf 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -105,6 +105,7 @@ out:
105static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) 105static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
106{ 106{
107 unsigned int minor = iminor(file->f_path.dentry->d_inode); 107 unsigned int minor = iminor(file->f_path.dentry->d_inode);
108 /* FIXME: What stops hidraw_table going NULL */
108 struct hid_device *dev = hidraw_table[minor]->hid; 109 struct hid_device *dev = hidraw_table[minor]->hid;
109 __u8 *buf; 110 __u8 *buf;
110 int ret = 0; 111 int ret = 0;
@@ -211,38 +212,43 @@ static int hidraw_release(struct inode * inode, struct file * file)
211 kfree(list->hidraw); 212 kfree(list->hidraw);
212 } 213 }
213 214
215 kfree(list);
216
214 return 0; 217 return 0;
215} 218}
216 219
217static int hidraw_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 220static long hidraw_ioctl(struct file *file, unsigned int cmd,
221 unsigned long arg)
218{ 222{
223 struct inode *inode = file->f_path.dentry->d_inode;
219 unsigned int minor = iminor(inode); 224 unsigned int minor = iminor(inode);
225 long ret = 0;
226 /* FIXME: What stops hidraw_table going NULL */
220 struct hidraw *dev = hidraw_table[minor]; 227 struct hidraw *dev = hidraw_table[minor];
221 void __user *user_arg = (void __user*) arg; 228 void __user *user_arg = (void __user*) arg;
222 229
230 lock_kernel();
223 switch (cmd) { 231 switch (cmd) {
224 case HIDIOCGRDESCSIZE: 232 case HIDIOCGRDESCSIZE:
225 if (put_user(dev->hid->rsize, (int __user *)arg)) 233 if (put_user(dev->hid->rsize, (int __user *)arg))
226 return -EFAULT; 234 ret = -EFAULT;
227 return 0; 235 break;
228 236
229 case HIDIOCGRDESC: 237 case HIDIOCGRDESC:
230 { 238 {
231 __u32 len; 239 __u32 len;
232 240
233 if (get_user(len, (int __user *)arg)) 241 if (get_user(len, (int __user *)arg))
234 return -EFAULT; 242 ret = -EFAULT;
235 243 else if (len > HID_MAX_DESCRIPTOR_SIZE - 1)
236 if (len > HID_MAX_DESCRIPTOR_SIZE - 1) 244 ret = -EINVAL;
237 return -EINVAL; 245 else if (copy_to_user(user_arg + offsetof(
238 246 struct hidraw_report_descriptor,
239 if (copy_to_user(user_arg + offsetof( 247 value[0]),
240 struct hidraw_report_descriptor, 248 dev->hid->rdesc,
241 value[0]), 249 min(dev->hid->rsize, len)))
242 dev->hid->rdesc, 250 ret = -EFAULT;
243 min(dev->hid->rsize, len))) 251 break;
244 return -EFAULT;
245 return 0;
246 } 252 }
247 case HIDIOCGRAWINFO: 253 case HIDIOCGRAWINFO:
248 { 254 {
@@ -252,15 +258,13 @@ static int hidraw_ioctl(struct inode *inode, struct file *file, unsigned int cmd
252 dinfo.vendor = dev->hid->vendor; 258 dinfo.vendor = dev->hid->vendor;
253 dinfo.product = dev->hid->product; 259 dinfo.product = dev->hid->product;
254 if (copy_to_user(user_arg, &dinfo, sizeof(dinfo))) 260 if (copy_to_user(user_arg, &dinfo, sizeof(dinfo)))
255 return -EFAULT; 261 ret = -EFAULT;
256 262 break;
257 return 0;
258 } 263 }
259 default: 264 default:
260 printk(KERN_EMERG "hidraw: unsupported ioctl() %x\n", 265 ret = -ENOTTY;
261 cmd);
262 } 266 }
263 return -EINVAL; 267 return ret;
264} 268}
265 269
266static const struct file_operations hidraw_ops = { 270static const struct file_operations hidraw_ops = {
@@ -270,7 +274,7 @@ static const struct file_operations hidraw_ops = {
270 .poll = hidraw_poll, 274 .poll = hidraw_poll,
271 .open = hidraw_open, 275 .open = hidraw_open,
272 .release = hidraw_release, 276 .release = hidraw_release,
273 .ioctl = hidraw_ioctl, 277 .unlocked_ioctl = hidraw_ioctl,
274}; 278};
275 279
276void hidraw_report_event(struct hid_device *hid, u8 *data, int len) 280void hidraw_report_event(struct hid_device *hid, u8 *data, int len)
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 1df832a8fcbc..61e78a4369b9 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -69,12 +69,18 @@
69#define USB_DEVICE_ID_APPLE_ALU_ANSI 0x0220 69#define USB_DEVICE_ID_APPLE_ALU_ANSI 0x0220
70#define USB_DEVICE_ID_APPLE_ALU_ISO 0x0221 70#define USB_DEVICE_ID_APPLE_ALU_ISO 0x0221
71#define USB_DEVICE_ID_APPLE_ALU_JIS 0x0222 71#define USB_DEVICE_ID_APPLE_ALU_JIS 0x0222
72#define USB_DEVICE_ID_APPLE_WELLSPRING_ANSI 0x0223
73#define USB_DEVICE_ID_APPLE_WELLSPRING_ISO 0x0224
74#define USB_DEVICE_ID_APPLE_WELLSPRING_JIS 0x0225
72#define USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI 0x0229 75#define USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI 0x0229
73#define USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO 0x022a 76#define USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO 0x022a
74#define USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS 0x022b 77#define USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS 0x022b
75#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI 0x022c 78#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI 0x022c
76#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO 0x022d 79#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO 0x022d
77#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS 0x022e 80#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS 0x022e
81#define USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI 0x0230
82#define USB_DEVICE_ID_APPLE_WELLSPRING2_ISO 0x0231
83#define USB_DEVICE_ID_APPLE_WELLSPRING2_JIS 0x0232
78#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a 84#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
79#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b 85#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
80#define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242 86#define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242
@@ -241,6 +247,8 @@
241#define USB_DEVICE_ID_LD_MACHINETEST 0x2040 247#define USB_DEVICE_ID_LD_MACHINETEST 0x2040
242 248
243#define USB_VENDOR_ID_LOGITECH 0x046d 249#define USB_VENDOR_ID_LOGITECH 0x046d
250#define USB_DEVICE_ID_LOGITECH_LX3 0xc044
251#define USB_DEVICE_ID_LOGITECH_V150 0xc047
244#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 252#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
245#define USB_DEVICE_ID_LOGITECH_HARMONY 0xc110 253#define USB_DEVICE_ID_LOGITECH_HARMONY 0xc110
246#define USB_DEVICE_ID_LOGITECH_HARMONY_2 0xc111 254#define USB_DEVICE_ID_LOGITECH_HARMONY_2 0xc111
@@ -314,6 +322,7 @@
314#define USB_DEVICE_ID_S510_RECEIVER_2 0xc517 322#define USB_DEVICE_ID_S510_RECEIVER_2 0xc517
315#define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512 323#define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512
316#define USB_DEVICE_ID_MX3000_RECEIVER 0xc513 324#define USB_DEVICE_ID_MX3000_RECEIVER 0xc513
325#define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704
317#define USB_DEVICE_ID_DINOVO_EDGE 0xc714 326#define USB_DEVICE_ID_DINOVO_EDGE 0xc714
318#define USB_DEVICE_ID_DINOVO_MINI 0xc71f 327#define USB_DEVICE_ID_DINOVO_MINI 0xc71f
319 328
@@ -443,7 +452,8 @@ static const struct hid_blacklist {
443 { USB_VENDOR_ID_NEC, USB_DEVICE_ID_NEC_USB_GAME_PAD, HID_QUIRK_BADPAD }, 452 { USB_VENDOR_ID_NEC, USB_DEVICE_ID_NEC_USB_GAME_PAD, HID_QUIRK_BADPAD },
444 { USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD, HID_QUIRK_BADPAD }, 453 { USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD, HID_QUIRK_BADPAD },
445 { USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD, HID_QUIRK_BADPAD }, 454 { USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD, HID_QUIRK_BADPAD },
446 455
456 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP, HID_QUIRK_DUPLICATE_USAGES },
447 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE, HID_QUIRK_DUPLICATE_USAGES }, 457 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE, HID_QUIRK_DUPLICATE_USAGES },
448 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI, HID_QUIRK_DUPLICATE_USAGES }, 458 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI, HID_QUIRK_DUPLICATE_USAGES },
449 459
@@ -593,6 +603,8 @@ static const struct hid_blacklist {
593 603
594 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP }, 604 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP },
595 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP }, 605 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP },
606 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_LX3, HID_QUIRK_INVERT_HWHEEL },
607 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_V150, HID_QUIRK_INVERT_HWHEEL },
596 608
597 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K, HID_QUIRK_MICROSOFT_KEYS }, 609 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K, HID_QUIRK_MICROSOFT_KEYS },
598 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K, HID_QUIRK_MICROSOFT_KEYS }, 610 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K, HID_QUIRK_MICROSOFT_KEYS },
@@ -642,6 +654,12 @@ static const struct hid_blacklist {
642 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN }, 654 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN },
643 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD }, 655 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
644 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN }, 656 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN },
657 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI, HID_QUIRK_APPLE_HAS_FN },
658 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
659 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS, HID_QUIRK_APPLE_HAS_FN },
660 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI, HID_QUIRK_APPLE_HAS_FN },
661 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
662 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, HID_QUIRK_APPLE_HAS_FN },
645 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE }, 663 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
646 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE }, 664 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
647 665
@@ -1128,7 +1146,7 @@ static void usbhid_fixup_microsoft_descriptor(unsigned char *rdesc, int rsize)
1128 && rdesc[557] == 0x19 1146 && rdesc[557] == 0x19
1129 && rdesc[559] == 0x29) { 1147 && rdesc[559] == 0x29) {
1130 printk(KERN_INFO "Fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n"); 1148 printk(KERN_INFO "Fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n");
1131 rdesc[284] = rdesc[304] = rdesc[558] = 0x35; 1149 rdesc[284] = rdesc[304] = rdesc[557] = 0x35;
1132 rdesc[352] = 0x36; 1150 rdesc[352] = 0x36;
1133 rdesc[286] = rdesc[355] = 0x46; 1151 rdesc[286] = rdesc[355] = 0x46;
1134 rdesc[306] = rdesc[559] = 0x45; 1152 rdesc[306] = rdesc[559] = 0x45;
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 95cc192bc7af..842e9edb888e 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -406,6 +406,7 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
406 uref_multi = kmalloc(sizeof(struct hiddev_usage_ref_multi), GFP_KERNEL); 406 uref_multi = kmalloc(sizeof(struct hiddev_usage_ref_multi), GFP_KERNEL);
407 if (!uref_multi) 407 if (!uref_multi)
408 return -ENOMEM; 408 return -ENOMEM;
409 lock_kernel();
409 uref = &uref_multi->uref; 410 uref = &uref_multi->uref;
410 if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) { 411 if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) {
411 if (copy_from_user(uref_multi, user_arg, 412 if (copy_from_user(uref_multi, user_arg,
@@ -501,12 +502,15 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
501 } 502 }
502 503
503goodreturn: 504goodreturn:
505 unlock_kernel();
504 kfree(uref_multi); 506 kfree(uref_multi);
505 return 0; 507 return 0;
506fault: 508fault:
509 unlock_kernel();
507 kfree(uref_multi); 510 kfree(uref_multi);
508 return -EFAULT; 511 return -EFAULT;
509inval: 512inval:
513 unlock_kernel();
510 kfree(uref_multi); 514 kfree(uref_multi);
511 return -EINVAL; 515 return -EINVAL;
512 } 516 }
@@ -540,7 +544,7 @@ static noinline int hiddev_ioctl_string(struct hiddev *hiddev, unsigned int cmd,
540 return len; 544 return len;
541} 545}
542 546
543static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 547static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
544{ 548{
545 struct hiddev_list *list = file->private_data; 549 struct hiddev_list *list = file->private_data;
546 struct hiddev *hiddev = list->hiddev; 550 struct hiddev *hiddev = list->hiddev;
@@ -555,7 +559,10 @@ static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd
555 struct usbhid_device *usbhid = hid->driver_data; 559 struct usbhid_device *usbhid = hid->driver_data;
556 void __user *user_arg = (void __user *)arg; 560 void __user *user_arg = (void __user *)arg;
557 int i; 561 int i;
562
563 /* Called without BKL by compat methods so no BKL taken */
558 564
565 /* FIXME: Who or what stop this racing with a disconnect ?? */
559 if (!hiddev->exist) 566 if (!hiddev->exist)
560 return -EIO; 567 return -EIO;
561 568
@@ -756,8 +763,7 @@ static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd
756#ifdef CONFIG_COMPAT 763#ifdef CONFIG_COMPAT
757static long hiddev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 764static long hiddev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
758{ 765{
759 struct inode *inode = file->f_path.dentry->d_inode; 766 return hiddev_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
760 return hiddev_ioctl(inode, file, cmd, (unsigned long)compat_ptr(arg));
761} 767}
762#endif 768#endif
763 769
@@ -768,7 +774,7 @@ static const struct file_operations hiddev_fops = {
768 .poll = hiddev_poll, 774 .poll = hiddev_poll,
769 .open = hiddev_open, 775 .open = hiddev_open,
770 .release = hiddev_release, 776 .release = hiddev_release,
771 .ioctl = hiddev_ioctl, 777 .unlocked_ioctl = hiddev_ioctl,
772 .fasync = hiddev_fasync, 778 .fasync = hiddev_fasync,
773#ifdef CONFIG_COMPAT 779#ifdef CONFIG_COMPAT
774 .compat_ioctl = hiddev_compat_ioctl, 780 .compat_ioctl = hiddev_compat_ioctl,
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index 3cd46d2e53c1..0caaafe01843 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -43,7 +43,7 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
43MODULE_DESCRIPTION(DRIVER_DESC); 43MODULE_DESCRIPTION(DRIVER_DESC);
44MODULE_LICENSE(DRIVER_LICENSE); 44MODULE_LICENSE(DRIVER_LICENSE);
45 45
46static unsigned char usb_kbd_keycode[256] = { 46static const unsigned char usb_kbd_keycode[256] = {
47 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38, 47 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38,
48 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3, 48 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3,
49 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26, 49 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26,
@@ -233,14 +233,6 @@ static int usb_kbd_probe(struct usb_interface *iface,
233 if (!usb_endpoint_is_int_in(endpoint)) 233 if (!usb_endpoint_is_int_in(endpoint))
234 return -ENODEV; 234 return -ENODEV;
235 235
236#ifdef CONFIG_USB_HID
237 if (usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
238 le16_to_cpu(dev->descriptor.idProduct))
239 & HID_QUIRK_IGNORE) {
240 return -ENODEV;
241 }
242#endif
243
244 pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress); 236 pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
245 maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); 237 maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
246 238
diff --git a/drivers/hid/usbhid/usbmouse.c b/drivers/hid/usbhid/usbmouse.c
index 703e9d0e8714..35689ef172cc 100644
--- a/drivers/hid/usbhid/usbmouse.c
+++ b/drivers/hid/usbhid/usbmouse.c
@@ -129,14 +129,6 @@ static int usb_mouse_probe(struct usb_interface *intf, const struct usb_device_i
129 if (!usb_endpoint_is_int_in(endpoint)) 129 if (!usb_endpoint_is_int_in(endpoint))
130 return -ENODEV; 130 return -ENODEV;
131 131
132#ifdef CONFIG_USB_HID
133 if (usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
134 le16_to_cpu(dev->descriptor.idProduct))
135 & (HID_QUIRK_IGNORE|HID_QUIRK_IGNORE_MOUSE)) {
136 return -ENODEV;
137 }
138#endif
139
140 pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress); 132 pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
141 maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); 133 maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
142 134
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 15b09b89588a..04d9c4d459d0 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -510,6 +510,7 @@ config BLK_DEV_TRIFLEX
510 510
511config BLK_DEV_CY82C693 511config BLK_DEV_CY82C693
512 tristate "CY82C693 chipset support" 512 tristate "CY82C693 chipset support"
513 depends on ALPHA
513 select IDE_TIMINGS 514 select IDE_TIMINGS
514 select BLK_DEV_IDEDMA_PCI 515 select BLK_DEV_IDEDMA_PCI
515 help 516 help
@@ -548,6 +549,7 @@ config BLK_DEV_CS5535
548 549
549config BLK_DEV_HPT34X 550config BLK_DEV_HPT34X
550 tristate "HPT34X chipset support" 551 tristate "HPT34X chipset support"
552 depends on BROKEN
551 select BLK_DEV_IDEDMA_PCI 553 select BLK_DEV_IDEDMA_PCI
552 help 554 help
553 This driver adds up to 4 more EIDE devices sharing a single 555 This driver adds up to 4 more EIDE devices sharing a single
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c
index 52f58c885783..f575e8341aec 100644
--- a/drivers/ide/arm/icside.c
+++ b/drivers/ide/arm/icside.c
@@ -72,7 +72,7 @@ struct icside_state {
72 void __iomem *ioc_base; 72 void __iomem *ioc_base;
73 unsigned int sel; 73 unsigned int sel;
74 unsigned int type; 74 unsigned int type;
75 ide_hwif_t *hwif[2]; 75 struct ide_host *host;
76}; 76};
77 77
78#define ICS_TYPE_A3IN 0 78#define ICS_TYPE_A3IN 0
@@ -375,12 +375,14 @@ static int icside_dma_test_irq(ide_drive_t *drive)
375 375
376static void icside_dma_timeout(ide_drive_t *drive) 376static void icside_dma_timeout(ide_drive_t *drive)
377{ 377{
378 ide_hwif_t *hwif = drive->hwif;
379
378 printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name); 380 printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
379 381
380 if (icside_dma_test_irq(drive)) 382 if (icside_dma_test_irq(drive))
381 return; 383 return;
382 384
383 ide_dump_status(drive, "DMA timeout", ide_read_status(drive)); 385 ide_dump_status(drive, "DMA timeout", hwif->tp_ops->read_status(hwif));
384 386
385 icside_dma_end(drive); 387 icside_dma_end(drive);
386} 388}
@@ -440,10 +442,10 @@ static void icside_setup_ports(hw_regs_t *hw, void __iomem *base,
440static int __init 442static int __init
441icside_register_v5(struct icside_state *state, struct expansion_card *ec) 443icside_register_v5(struct icside_state *state, struct expansion_card *ec)
442{ 444{
443 ide_hwif_t *hwif;
444 void __iomem *base; 445 void __iomem *base;
445 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 446 struct ide_host *host;
446 hw_regs_t hw; 447 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
448 int ret;
447 449
448 base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); 450 base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
449 if (!base) 451 if (!base)
@@ -463,22 +465,23 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec)
463 465
464 icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec); 466 icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec);
465 467
466 hwif = ide_find_port(); 468 host = ide_host_alloc(NULL, hws);
467 if (!hwif) 469 if (host == NULL)
468 return -ENODEV; 470 return -ENODEV;
469 471
470 ide_init_port_hw(hwif, &hw); 472 state->host = host;
471 default_hwif_mmiops(hwif);
472
473 state->hwif[0] = hwif;
474 473
475 ecard_set_drvdata(ec, state); 474 ecard_set_drvdata(ec, state);
476 475
477 idx[0] = hwif->index; 476 ret = ide_host_register(host, NULL, hws);
478 477 if (ret)
479 ide_device_add(idx, NULL); 478 goto err_free;
480 479
481 return 0; 480 return 0;
481err_free:
482 ide_host_free(host);
483 ecard_set_drvdata(ec, NULL);
484 return ret;
482} 485}
483 486
484static const struct ide_port_info icside_v6_port_info __initdata = { 487static const struct ide_port_info icside_v6_port_info __initdata = {
@@ -493,13 +496,12 @@ static const struct ide_port_info icside_v6_port_info __initdata = {
493static int __init 496static int __init
494icside_register_v6(struct icside_state *state, struct expansion_card *ec) 497icside_register_v6(struct icside_state *state, struct expansion_card *ec)
495{ 498{
496 ide_hwif_t *hwif, *mate;
497 void __iomem *ioc_base, *easi_base; 499 void __iomem *ioc_base, *easi_base;
500 struct ide_host *host;
498 unsigned int sel = 0; 501 unsigned int sel = 0;
499 int ret; 502 int ret;
500 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 503 hw_regs_t hw[2], *hws[] = { &hw[0], NULL, NULL, NULL };
501 struct ide_port_info d = icside_v6_port_info; 504 struct ide_port_info d = icside_v6_port_info;
502 hw_regs_t hw[2];
503 505
504 ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); 506 ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
505 if (!ioc_base) { 507 if (!ioc_base) {
@@ -538,28 +540,11 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
538 icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec); 540 icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec);
539 icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec); 541 icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec);
540 542
541 /* 543 host = ide_host_alloc(&d, hws);
542 * Find and register the interfaces. 544 if (host == NULL)
543 */
544 hwif = ide_find_port();
545 if (hwif == NULL)
546 return -ENODEV; 545 return -ENODEV;
547 546
548 ide_init_port_hw(hwif, &hw[0]); 547 state->host = host;
549 default_hwif_mmiops(hwif);
550
551 idx[0] = hwif->index;
552
553 mate = ide_find_port();
554 if (mate) {
555 ide_init_port_hw(mate, &hw[1]);
556 default_hwif_mmiops(mate);
557
558 idx[1] = mate->index;
559 }
560
561 state->hwif[0] = hwif;
562 state->hwif[1] = mate;
563 548
564 ecard_set_drvdata(ec, state); 549 ecard_set_drvdata(ec, state);
565 550
@@ -569,11 +554,17 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
569 d.dma_ops = NULL; 554 d.dma_ops = NULL;
570 } 555 }
571 556
572 ide_device_add(idx, &d); 557 ret = ide_host_register(host, NULL, hws);
558 if (ret)
559 goto err_free;
573 560
574 return 0; 561 return 0;
575 562err_free:
576 out: 563 ide_host_free(host);
564 if (d.dma_ops)
565 free_dma(ec->dma);
566 ecard_set_drvdata(ec, NULL);
567out:
577 return ret; 568 return ret;
578} 569}
579 570
diff --git a/drivers/ide/arm/ide_arm.c b/drivers/ide/arm/ide_arm.c
index 2f311da4c963..176532ffae0e 100644
--- a/drivers/ide/arm/ide_arm.c
+++ b/drivers/ide/arm/ide_arm.c
@@ -28,10 +28,8 @@
28 28
29static int __init ide_arm_init(void) 29static int __init ide_arm_init(void)
30{ 30{
31 ide_hwif_t *hwif;
32 hw_regs_t hw;
33 unsigned long base = IDE_ARM_IO, ctl = IDE_ARM_IO + 0x206; 31 unsigned long base = IDE_ARM_IO, ctl = IDE_ARM_IO + 0x206;
34 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 32 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
35 33
36 if (!request_region(base, 8, DRV_NAME)) { 34 if (!request_region(base, 8, DRV_NAME)) {
37 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n", 35 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
@@ -51,15 +49,7 @@ static int __init ide_arm_init(void)
51 hw.irq = IDE_ARM_IRQ; 49 hw.irq = IDE_ARM_IRQ;
52 hw.chipset = ide_generic; 50 hw.chipset = ide_generic;
53 51
54 hwif = ide_find_port(); 52 return ide_host_add(NULL, hws, NULL);
55 if (hwif) {
56 ide_init_port_hw(hwif, &hw);
57 idx[0] = hwif->index;
58
59 ide_device_add(idx, NULL);
60 }
61
62 return 0;
63} 53}
64 54
65module_init(ide_arm_init); 55module_init(ide_arm_init);
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/arm/palm_bk3710.c
index c79b85b6e4a3..65bb4b8fd570 100644
--- a/drivers/ide/arm/palm_bk3710.c
+++ b/drivers/ide/arm/palm_bk3710.c
@@ -316,15 +316,14 @@ static u8 __devinit palm_bk3710_cable_detect(ide_hwif_t *hwif)
316static int __devinit palm_bk3710_init_dma(ide_hwif_t *hwif, 316static int __devinit palm_bk3710_init_dma(ide_hwif_t *hwif,
317 const struct ide_port_info *d) 317 const struct ide_port_info *d)
318{ 318{
319 unsigned long base =
320 hwif->io_ports.data_addr - IDE_PALM_ATA_PRI_REG_OFFSET;
321
322 printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name); 319 printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name);
323 320
324 if (ide_allocate_dma_engine(hwif)) 321 if (ide_allocate_dma_engine(hwif))
325 return -1; 322 return -1;
326 323
327 ide_setup_dma(hwif, base); 324 hwif->dma_base = hwif->io_ports.data_addr - IDE_PALM_ATA_PRI_REG_OFFSET;
325
326 hwif->dma_ops = &sff_dma_ops;
328 327
329 return 0; 328 return 0;
330} 329}
@@ -348,11 +347,10 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
348{ 347{
349 struct clk *clk; 348 struct clk *clk;
350 struct resource *mem, *irq; 349 struct resource *mem, *irq;
351 ide_hwif_t *hwif; 350 struct ide_host *host;
352 unsigned long base, rate; 351 unsigned long base, rate;
353 int i; 352 int i, rc;
354 hw_regs_t hw; 353 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
355 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
356 354
357 clk = clk_get(NULL, "IDECLK"); 355 clk = clk_get(NULL, "IDECLK");
358 if (IS_ERR(clk)) 356 if (IS_ERR(clk))
@@ -394,24 +392,14 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
394 hw.irq = irq->start; 392 hw.irq = irq->start;
395 hw.chipset = ide_palm3710; 393 hw.chipset = ide_palm3710;
396 394
397 hwif = ide_find_port(); 395 rc = ide_host_add(&palm_bk3710_port_info, hws, NULL);
398 if (hwif == NULL) 396 if (rc)
399 goto out; 397 goto out;
400 398
401 i = hwif->index;
402
403 ide_init_port_hw(hwif, &hw);
404
405 default_hwif_mmiops(hwif);
406
407 idx[0] = i;
408
409 ide_device_add(idx, &palm_bk3710_port_info);
410
411 return 0; 399 return 0;
412out: 400out:
413 printk(KERN_WARNING "Palm Chip BK3710 IDE Register Fail\n"); 401 printk(KERN_WARNING "Palm Chip BK3710 IDE Register Fail\n");
414 return -ENODEV; 402 return rc;
415} 403}
416 404
417/* work with hotplug and coldplug */ 405/* work with hotplug and coldplug */
diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/arm/rapide.c
index 43057e0303c8..2bdd8b734afb 100644
--- a/drivers/ide/arm/rapide.c
+++ b/drivers/ide/arm/rapide.c
@@ -32,11 +32,10 @@ static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base,
32static int __devinit 32static int __devinit
33rapide_probe(struct expansion_card *ec, const struct ecard_id *id) 33rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
34{ 34{
35 ide_hwif_t *hwif;
36 void __iomem *base; 35 void __iomem *base;
36 struct ide_host *host;
37 int ret; 37 int ret;
38 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 38 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
39 hw_regs_t hw;
40 39
41 ret = ecard_request_resources(ec); 40 ret = ecard_request_resources(ec);
42 if (ret) 41 if (ret)
@@ -53,20 +52,11 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
53 hw.chipset = ide_generic; 52 hw.chipset = ide_generic;
54 hw.dev = &ec->dev; 53 hw.dev = &ec->dev;
55 54
56 hwif = ide_find_port(); 55 ret = ide_host_add(&rapide_port_info, hws, &host);
57 if (hwif == NULL) { 56 if (ret)
58 ret = -ENOENT;
59 goto release; 57 goto release;
60 }
61
62 ide_init_port_hw(hwif, &hw);
63 default_hwif_mmiops(hwif);
64
65 idx[0] = hwif->index;
66
67 ide_device_add(idx, &rapide_port_info);
68 58
69 ecard_set_drvdata(ec, hwif); 59 ecard_set_drvdata(ec, host);
70 goto out; 60 goto out;
71 61
72 release: 62 release:
@@ -77,11 +67,11 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
77 67
78static void __devexit rapide_remove(struct expansion_card *ec) 68static void __devexit rapide_remove(struct expansion_card *ec)
79{ 69{
80 ide_hwif_t *hwif = ecard_get_drvdata(ec); 70 struct ide_host *host = ecard_get_drvdata(ec);
81 71
82 ecard_set_drvdata(ec, NULL); 72 ecard_set_drvdata(ec, NULL);
83 73
84 ide_unregister(hwif); 74 ide_host_remove(host);
85 75
86 ecard_release_resources(ec); 76 ecard_release_resources(ec);
87} 77}
diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/h8300/ide-h8300.c
index 20fad6d542cc..bde7a585f198 100644
--- a/drivers/ide/h8300/ide-h8300.c
+++ b/drivers/ide/h8300/ide-h8300.c
@@ -100,6 +100,8 @@ static void h8300_tf_read(ide_drive_t *drive, ide_task_t *task)
100 /* be sure we're looking at the low order bits */ 100 /* be sure we're looking at the low order bits */
101 outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr); 101 outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
102 102
103 if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
104 tf->feature = inb(io_ports->feature_addr);
103 if (task->tf_flags & IDE_TFLAG_IN_NSECT) 105 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
104 tf->nsect = inb(io_ports->nsect_addr); 106 tf->nsect = inb(io_ports->nsect_addr);
105 if (task->tf_flags & IDE_TFLAG_IN_LBAL) 107 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
@@ -153,6 +155,21 @@ static void h8300_output_data(ide_drive_t *drive, struct request *rq,
153 mm_outsw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2); 155 mm_outsw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
154} 156}
155 157
158static const struct ide_tp_ops h8300_tp_ops = {
159 .exec_command = ide_exec_command,
160 .read_status = ide_read_status,
161 .read_altstatus = ide_read_altstatus,
162 .read_sff_dma_status = ide_read_sff_dma_status,
163
164 .set_irq = ide_set_irq,
165
166 .tf_load = h8300_tf_load,
167 .tf_read = h8300_tf_read,
168
169 .input_data = h8300_input_data,
170 .output_data = h8300_output_data,
171};
172
156#define H8300_IDE_GAP (2) 173#define H8300_IDE_GAP (2)
157 174
158static inline void hw_setup(hw_regs_t *hw) 175static inline void hw_setup(hw_regs_t *hw)
@@ -167,27 +184,14 @@ static inline void hw_setup(hw_regs_t *hw)
167 hw->chipset = ide_generic; 184 hw->chipset = ide_generic;
168} 185}
169 186
170static inline void hwif_setup(ide_hwif_t *hwif)
171{
172 default_hwif_iops(hwif);
173
174 hwif->tf_load = h8300_tf_load;
175 hwif->tf_read = h8300_tf_read;
176
177 hwif->input_data = h8300_input_data;
178 hwif->output_data = h8300_output_data;
179}
180
181static const struct ide_port_info h8300_port_info = { 187static const struct ide_port_info h8300_port_info = {
188 .tp_ops = &h8300_tp_ops,
182 .host_flags = IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_NO_DMA, 189 .host_flags = IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_NO_DMA,
183}; 190};
184 191
185static int __init h8300_ide_init(void) 192static int __init h8300_ide_init(void)
186{ 193{
187 hw_regs_t hw; 194 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
188 ide_hwif_t *hwif;
189 int index;
190 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
191 195
192 printk(KERN_INFO DRV_NAME ": H8/300 generic IDE interface\n"); 196 printk(KERN_INFO DRV_NAME ": H8/300 generic IDE interface\n");
193 197
@@ -200,19 +204,7 @@ static int __init h8300_ide_init(void)
200 204
201 hw_setup(&hw); 205 hw_setup(&hw);
202 206
203 hwif = ide_find_port_slot(&h8300_port_info); 207 return ide_host_add(&h8300_port_info, hws, NULL);
204 if (hwif == NULL)
205 return -ENOENT;
206
207 index = hwif->index;
208 ide_init_port_hw(hwif, &hw);
209 hwif_setup(hwif);
210
211 idx[0] = index;
212
213 ide_device_add(idx, &h8300_port_info);
214
215 return 0;
216 208
217out_busy: 209out_busy:
218 printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n"); 210 printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n");
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 2802031de670..adf04f99cdeb 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -22,6 +22,8 @@ ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
22 void (*io_buffers)(ide_drive_t *, struct ide_atapi_pc *, unsigned, int)) 22 void (*io_buffers)(ide_drive_t *, struct ide_atapi_pc *, unsigned, int))
23{ 23{
24 ide_hwif_t *hwif = drive->hwif; 24 ide_hwif_t *hwif = drive->hwif;
25 struct request *rq = hwif->hwgroup->rq;
26 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
25 xfer_func_t *xferfunc; 27 xfer_func_t *xferfunc;
26 unsigned int temp; 28 unsigned int temp;
27 u16 bcount; 29 u16 bcount;
@@ -30,12 +32,12 @@ ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
30 debug_log("Enter %s - interrupt handler\n", __func__); 32 debug_log("Enter %s - interrupt handler\n", __func__);
31 33
32 if (pc->flags & PC_FLAG_TIMEDOUT) { 34 if (pc->flags & PC_FLAG_TIMEDOUT) {
33 pc->callback(drive); 35 drive->pc_callback(drive);
34 return ide_stopped; 36 return ide_stopped;
35 } 37 }
36 38
37 /* Clear the interrupt */ 39 /* Clear the interrupt */
38 stat = ide_read_status(drive); 40 stat = tp_ops->read_status(hwif);
39 41
40 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) { 42 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
41 if (hwif->dma_ops->dma_end(drive) || 43 if (hwif->dma_ops->dma_end(drive) ||
@@ -63,8 +65,9 @@ ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
63 local_irq_enable_in_hardirq(); 65 local_irq_enable_in_hardirq();
64 66
65 if (drive->media == ide_tape && !scsi && 67 if (drive->media == ide_tape && !scsi &&
66 (stat & ERR_STAT) && pc->c[0] == REQUEST_SENSE) 68 (stat & ERR_STAT) && rq->cmd[0] == REQUEST_SENSE)
67 stat &= ~ERR_STAT; 69 stat &= ~ERR_STAT;
70
68 if ((stat & ERR_STAT) || (pc->flags & PC_FLAG_DMA_ERROR)) { 71 if ((stat & ERR_STAT) || (pc->flags & PC_FLAG_DMA_ERROR)) {
69 /* Error detected */ 72 /* Error detected */
70 debug_log("%s: I/O error\n", drive->name); 73 debug_log("%s: I/O error\n", drive->name);
@@ -75,16 +78,17 @@ ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
75 goto cmd_finished; 78 goto cmd_finished;
76 } 79 }
77 80
78 if (pc->c[0] == REQUEST_SENSE) { 81 if (rq->cmd[0] == REQUEST_SENSE) {
79 printk(KERN_ERR "%s: I/O error in request sense" 82 printk(KERN_ERR "%s: I/O error in request sense"
80 " command\n", drive->name); 83 " command\n", drive->name);
81 return ide_do_reset(drive); 84 return ide_do_reset(drive);
82 } 85 }
83 86
84 debug_log("[cmd %x]: check condition\n", pc->c[0]); 87 debug_log("[cmd %x]: check condition\n", rq->cmd[0]);
85 88
86 /* Retry operation */ 89 /* Retry operation */
87 retry_pc(drive); 90 retry_pc(drive);
91
88 /* queued, but not started */ 92 /* queued, but not started */
89 return ide_stopped; 93 return ide_stopped;
90 } 94 }
@@ -95,8 +99,10 @@ cmd_finished:
95 dsc_handle(drive); 99 dsc_handle(drive);
96 return ide_stopped; 100 return ide_stopped;
97 } 101 }
102
98 /* Command finished - Call the callback function */ 103 /* Command finished - Call the callback function */
99 pc->callback(drive); 104 drive->pc_callback(drive);
105
100 return ide_stopped; 106 return ide_stopped;
101 } 107 }
102 108
@@ -107,16 +113,15 @@ cmd_finished:
107 ide_dma_off(drive); 113 ide_dma_off(drive);
108 return ide_do_reset(drive); 114 return ide_do_reset(drive);
109 } 115 }
110 /* Get the number of bytes to transfer on this interrupt. */
111 bcount = (hwif->INB(hwif->io_ports.lbah_addr) << 8) |
112 hwif->INB(hwif->io_ports.lbam_addr);
113 116
114 ireason = hwif->INB(hwif->io_ports.nsect_addr); 117 /* Get the number of bytes to transfer on this interrupt. */
118 ide_read_bcount_and_ireason(drive, &bcount, &ireason);
115 119
116 if (ireason & CD) { 120 if (ireason & CD) {
117 printk(KERN_ERR "%s: CoD != 0 in %s\n", drive->name, __func__); 121 printk(KERN_ERR "%s: CoD != 0 in %s\n", drive->name, __func__);
118 return ide_do_reset(drive); 122 return ide_do_reset(drive);
119 } 123 }
124
120 if (((ireason & IO) == IO) == !!(pc->flags & PC_FLAG_WRITING)) { 125 if (((ireason & IO) == IO) == !!(pc->flags & PC_FLAG_WRITING)) {
121 /* Hopefully, we will never get here */ 126 /* Hopefully, we will never get here */
122 printk(KERN_ERR "%s: We wanted to %s, but the device wants us " 127 printk(KERN_ERR "%s: We wanted to %s, but the device wants us "
@@ -125,6 +130,7 @@ cmd_finished:
125 (ireason & IO) ? "Read" : "Write"); 130 (ireason & IO) ? "Read" : "Write");
126 return ide_do_reset(drive); 131 return ide_do_reset(drive);
127 } 132 }
133
128 if (!(pc->flags & PC_FLAG_WRITING)) { 134 if (!(pc->flags & PC_FLAG_WRITING)) {
129 /* Reading - Check that we have enough space */ 135 /* Reading - Check that we have enough space */
130 temp = pc->xferred + bcount; 136 temp = pc->xferred + bcount;
@@ -142,7 +148,7 @@ cmd_finished:
142 if (pc->sg) 148 if (pc->sg)
143 io_buffers(drive, pc, temp, 0); 149 io_buffers(drive, pc, temp, 0);
144 else 150 else
145 hwif->input_data(drive, NULL, 151 tp_ops->input_data(drive, NULL,
146 pc->cur_pos, temp); 152 pc->cur_pos, temp);
147 printk(KERN_ERR "%s: transferred %d of " 153 printk(KERN_ERR "%s: transferred %d of "
148 "%d bytes\n", 154 "%d bytes\n",
@@ -159,9 +165,9 @@ cmd_finished:
159 debug_log("The device wants to send us more data than " 165 debug_log("The device wants to send us more data than "
160 "expected - allowing transfer\n"); 166 "expected - allowing transfer\n");
161 } 167 }
162 xferfunc = hwif->input_data; 168 xferfunc = tp_ops->input_data;
163 } else 169 } else
164 xferfunc = hwif->output_data; 170 xferfunc = tp_ops->output_data;
165 171
166 if ((drive->media == ide_floppy && !scsi && !pc->buf) || 172 if ((drive->media == ide_floppy && !scsi && !pc->buf) ||
167 (drive->media == ide_tape && !scsi && pc->bh) || 173 (drive->media == ide_tape && !scsi && pc->bh) ||
@@ -175,7 +181,7 @@ cmd_finished:
175 pc->cur_pos += bcount; 181 pc->cur_pos += bcount;
176 182
177 debug_log("[cmd %x] transferred %d bytes on that intr.\n", 183 debug_log("[cmd %x] transferred %d bytes on that intr.\n",
178 pc->c[0], bcount); 184 rq->cmd[0], bcount);
179 185
180 /* And set the interrupt handler again */ 186 /* And set the interrupt handler again */
181 ide_set_handler(drive, handler, timeout, expiry); 187 ide_set_handler(drive, handler, timeout, expiry);
@@ -183,16 +189,27 @@ cmd_finished:
183} 189}
184EXPORT_SYMBOL_GPL(ide_pc_intr); 190EXPORT_SYMBOL_GPL(ide_pc_intr);
185 191
192static u8 ide_read_ireason(ide_drive_t *drive)
193{
194 ide_task_t task;
195
196 memset(&task, 0, sizeof(task));
197 task.tf_flags = IDE_TFLAG_IN_NSECT;
198
199 drive->hwif->tp_ops->tf_read(drive, &task);
200
201 return task.tf.nsect & 3;
202}
203
186static u8 ide_wait_ireason(ide_drive_t *drive, u8 ireason) 204static u8 ide_wait_ireason(ide_drive_t *drive, u8 ireason)
187{ 205{
188 ide_hwif_t *hwif = drive->hwif;
189 int retries = 100; 206 int retries = 100;
190 207
191 while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) { 208 while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) {
192 printk(KERN_ERR "%s: (IO,CoD != (0,1) while issuing " 209 printk(KERN_ERR "%s: (IO,CoD != (0,1) while issuing "
193 "a packet command, retrying\n", drive->name); 210 "a packet command, retrying\n", drive->name);
194 udelay(100); 211 udelay(100);
195 ireason = hwif->INB(hwif->io_ports.nsect_addr); 212 ireason = ide_read_ireason(drive);
196 if (retries == 0) { 213 if (retries == 0) {
197 printk(KERN_ERR "%s: (IO,CoD != (0,1) while issuing " 214 printk(KERN_ERR "%s: (IO,CoD != (0,1) while issuing "
198 "a packet command, ignoring\n", 215 "a packet command, ignoring\n",
@@ -210,6 +227,7 @@ ide_startstop_t ide_transfer_pc(ide_drive_t *drive, struct ide_atapi_pc *pc,
210 ide_expiry_t *expiry) 227 ide_expiry_t *expiry)
211{ 228{
212 ide_hwif_t *hwif = drive->hwif; 229 ide_hwif_t *hwif = drive->hwif;
230 struct request *rq = hwif->hwgroup->rq;
213 ide_startstop_t startstop; 231 ide_startstop_t startstop;
214 u8 ireason; 232 u8 ireason;
215 233
@@ -219,7 +237,7 @@ ide_startstop_t ide_transfer_pc(ide_drive_t *drive, struct ide_atapi_pc *pc,
219 return startstop; 237 return startstop;
220 } 238 }
221 239
222 ireason = hwif->INB(hwif->io_ports.nsect_addr); 240 ireason = ide_read_ireason(drive);
223 if (drive->media == ide_tape && !drive->scsi) 241 if (drive->media == ide_tape && !drive->scsi)
224 ireason = ide_wait_ireason(drive, ireason); 242 ireason = ide_wait_ireason(drive, ireason);
225 243
@@ -239,8 +257,8 @@ ide_startstop_t ide_transfer_pc(ide_drive_t *drive, struct ide_atapi_pc *pc,
239 } 257 }
240 258
241 /* Send the actual packet */ 259 /* Send the actual packet */
242 if ((pc->flags & PC_FLAG_ZIP_DRIVE) == 0) 260 if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0)
243 hwif->output_data(drive, NULL, pc->c, 12); 261 hwif->tp_ops->output_data(drive, NULL, rq->cmd, 12);
244 262
245 return ide_started; 263 return ide_started;
246} 264}
@@ -284,7 +302,7 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_atapi_pc *pc,
284 bcount, dma); 302 bcount, dma);
285 303
286 /* Issue the packet command */ 304 /* Issue the packet command */
287 if (pc->flags & PC_FLAG_DRQ_INTERRUPT) { 305 if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) {
288 ide_execute_command(drive, WIN_PACKETCMD, handler, 306 ide_execute_command(drive, WIN_PACKETCMD, handler,
289 timeout, NULL); 307 timeout, NULL);
290 return ide_started; 308 return ide_started;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 6e29dd532090..4e73aeee4053 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -85,10 +85,8 @@ static void ide_cd_put(struct cdrom_info *cd)
85/* Mark that we've seen a media change and invalidate our internal buffers. */ 85/* Mark that we've seen a media change and invalidate our internal buffers. */
86static void cdrom_saw_media_change(ide_drive_t *drive) 86static void cdrom_saw_media_change(ide_drive_t *drive)
87{ 87{
88 struct cdrom_info *cd = drive->driver_data; 88 drive->atapi_flags |= IDE_AFLAG_MEDIA_CHANGED;
89 89 drive->atapi_flags &= ~IDE_AFLAG_TOC_VALID;
90 cd->cd_flags |= IDE_CD_FLAG_MEDIA_CHANGED;
91 cd->cd_flags &= ~IDE_CD_FLAG_TOC_VALID;
92} 90}
93 91
94static int cdrom_log_sense(ide_drive_t *drive, struct request *rq, 92static int cdrom_log_sense(ide_drive_t *drive, struct request *rq,
@@ -280,11 +278,12 @@ static void ide_dump_status_no_sense(ide_drive_t *drive, const char *msg, u8 st)
280 */ 278 */
281static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) 279static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
282{ 280{
283 struct request *rq = HWGROUP(drive)->rq; 281 ide_hwif_t *hwif = drive->hwif;
282 struct request *rq = hwif->hwgroup->rq;
284 int stat, err, sense_key; 283 int stat, err, sense_key;
285 284
286 /* check for errors */ 285 /* check for errors */
287 stat = ide_read_status(drive); 286 stat = hwif->tp_ops->read_status(hwif);
288 287
289 if (stat_ret) 288 if (stat_ret)
290 *stat_ret = stat; 289 *stat_ret = stat;
@@ -528,7 +527,7 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
528 ide_pktcmd_tf_load(drive, IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL, 527 ide_pktcmd_tf_load(drive, IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL,
529 xferlen, info->dma); 528 xferlen, info->dma);
530 529
531 if (info->cd_flags & IDE_CD_FLAG_DRQ_INTERRUPT) { 530 if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) {
532 /* waiting for CDB interrupt, not DMA yet. */ 531 /* waiting for CDB interrupt, not DMA yet. */
533 if (info->dma) 532 if (info->dma)
534 drive->waiting_for_dma = 0; 533 drive->waiting_for_dma = 0;
@@ -560,7 +559,7 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive,
560 struct cdrom_info *info = drive->driver_data; 559 struct cdrom_info *info = drive->driver_data;
561 ide_startstop_t startstop; 560 ide_startstop_t startstop;
562 561
563 if (info->cd_flags & IDE_CD_FLAG_DRQ_INTERRUPT) { 562 if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) {
564 /* 563 /*
565 * Here we should have been called after receiving an interrupt 564 * Here we should have been called after receiving an interrupt
566 * from the device. DRQ should how be set. 565 * from the device. DRQ should how be set.
@@ -589,7 +588,7 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive,
589 cmd_len = ATAPI_MIN_CDB_BYTES; 588 cmd_len = ATAPI_MIN_CDB_BYTES;
590 589
591 /* send the command to the device */ 590 /* send the command to the device */
592 hwif->output_data(drive, NULL, rq->cmd, cmd_len); 591 hwif->tp_ops->output_data(drive, NULL, rq->cmd, cmd_len);
593 592
594 /* start the DMA if need be */ 593 /* start the DMA if need be */
595 if (info->dma) 594 if (info->dma)
@@ -606,6 +605,8 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive,
606static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq, 605static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq,
607 int len, int ireason, int rw) 606 int len, int ireason, int rw)
608{ 607{
608 ide_hwif_t *hwif = drive->hwif;
609
609 /* 610 /*
610 * ireason == 0: the drive wants to receive data from us 611 * ireason == 0: the drive wants to receive data from us
611 * ireason == 2: the drive is expecting to transfer data to us 612 * ireason == 2: the drive is expecting to transfer data to us
@@ -624,7 +625,7 @@ static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq,
624 * Some drives (ASUS) seem to tell us that status info is 625 * Some drives (ASUS) seem to tell us that status info is
625 * available. Just get it and ignore. 626 * available. Just get it and ignore.
626 */ 627 */
627 (void)ide_read_status(drive); 628 (void)hwif->tp_ops->read_status(hwif);
628 return 0; 629 return 0;
629 } else { 630 } else {
630 /* drive wants a command packet, or invalid ireason... */ 631 /* drive wants a command packet, or invalid ireason... */
@@ -645,20 +646,18 @@ static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq,
645 */ 646 */
646static int ide_cd_check_transfer_size(ide_drive_t *drive, int len) 647static int ide_cd_check_transfer_size(ide_drive_t *drive, int len)
647{ 648{
648 struct cdrom_info *cd = drive->driver_data;
649
650 if ((len % SECTOR_SIZE) == 0) 649 if ((len % SECTOR_SIZE) == 0)
651 return 0; 650 return 0;
652 651
653 printk(KERN_ERR "%s: %s: Bad transfer size %d\n", 652 printk(KERN_ERR "%s: %s: Bad transfer size %d\n",
654 drive->name, __func__, len); 653 drive->name, __func__, len);
655 654
656 if (cd->cd_flags & IDE_CD_FLAG_LIMIT_NFRAMES) 655 if (drive->atapi_flags & IDE_AFLAG_LIMIT_NFRAMES)
657 printk(KERN_ERR " This drive is not supported by " 656 printk(KERN_ERR " This drive is not supported by "
658 "this version of the driver\n"); 657 "this version of the driver\n");
659 else { 658 else {
660 printk(KERN_ERR " Trying to limit transfer sizes\n"); 659 printk(KERN_ERR " Trying to limit transfer sizes\n");
661 cd->cd_flags |= IDE_CD_FLAG_LIMIT_NFRAMES; 660 drive->atapi_flags |= IDE_AFLAG_LIMIT_NFRAMES;
662 } 661 }
663 662
664 return 1; 663 return 1;
@@ -735,7 +734,7 @@ static ide_startstop_t cdrom_seek_intr(ide_drive_t *drive)
735 if (cdrom_decode_status(drive, 0, &stat)) 734 if (cdrom_decode_status(drive, 0, &stat))
736 return ide_stopped; 735 return ide_stopped;
737 736
738 info->cd_flags |= IDE_CD_FLAG_SEEKING; 737 drive->atapi_flags |= IDE_AFLAG_SEEKING;
739 738
740 if (retry && time_after(jiffies, info->start_seek + IDECD_SEEK_TIMER)) { 739 if (retry && time_after(jiffies, info->start_seek + IDECD_SEEK_TIMER)) {
741 if (--retry == 0) 740 if (--retry == 0)
@@ -892,10 +891,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
892 struct request *rq = HWGROUP(drive)->rq; 891 struct request *rq = HWGROUP(drive)->rq;
893 xfer_func_t *xferfunc; 892 xfer_func_t *xferfunc;
894 ide_expiry_t *expiry = NULL; 893 ide_expiry_t *expiry = NULL;
895 int dma_error = 0, dma, stat, ireason, len, thislen, uptodate = 0; 894 int dma_error = 0, dma, stat, thislen, uptodate = 0;
896 int write = (rq_data_dir(rq) == WRITE) ? 1 : 0; 895 int write = (rq_data_dir(rq) == WRITE) ? 1 : 0;
897 unsigned int timeout; 896 unsigned int timeout;
898 u8 lowcyl, highcyl; 897 u16 len;
898 u8 ireason;
899 899
900 /* check for errors */ 900 /* check for errors */
901 dma = info->dma; 901 dma = info->dma;
@@ -923,12 +923,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
923 goto end_request; 923 goto end_request;
924 } 924 }
925 925
926 /* ok we fall to pio :/ */ 926 ide_read_bcount_and_ireason(drive, &len, &ireason);
927 ireason = hwif->INB(hwif->io_ports.nsect_addr) & 0x3;
928 lowcyl = hwif->INB(hwif->io_ports.lbam_addr);
929 highcyl = hwif->INB(hwif->io_ports.lbah_addr);
930
931 len = lowcyl + (256 * highcyl);
932 927
933 thislen = blk_fs_request(rq) ? len : rq->data_len; 928 thislen = blk_fs_request(rq) ? len : rq->data_len;
934 if (thislen > len) 929 if (thislen > len)
@@ -991,10 +986,10 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
991 986
992 if (ireason == 0) { 987 if (ireason == 0) {
993 write = 1; 988 write = 1;
994 xferfunc = hwif->output_data; 989 xferfunc = hwif->tp_ops->output_data;
995 } else { 990 } else {
996 write = 0; 991 write = 0;
997 xferfunc = hwif->input_data; 992 xferfunc = hwif->tp_ops->input_data;
998 } 993 }
999 994
1000 /* transfer data */ 995 /* transfer data */
@@ -1198,9 +1193,10 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
1198 int xferlen; 1193 int xferlen;
1199 1194
1200 if (blk_fs_request(rq)) { 1195 if (blk_fs_request(rq)) {
1201 if (info->cd_flags & IDE_CD_FLAG_SEEKING) { 1196 if (drive->atapi_flags & IDE_AFLAG_SEEKING) {
1197 ide_hwif_t *hwif = drive->hwif;
1202 unsigned long elapsed = jiffies - info->start_seek; 1198 unsigned long elapsed = jiffies - info->start_seek;
1203 int stat = ide_read_status(drive); 1199 int stat = hwif->tp_ops->read_status(hwif);
1204 1200
1205 if ((stat & SEEK_STAT) != SEEK_STAT) { 1201 if ((stat & SEEK_STAT) != SEEK_STAT) {
1206 if (elapsed < IDECD_SEEK_TIMEOUT) { 1202 if (elapsed < IDECD_SEEK_TIMEOUT) {
@@ -1211,7 +1207,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
1211 printk(KERN_ERR "%s: DSC timeout\n", 1207 printk(KERN_ERR "%s: DSC timeout\n",
1212 drive->name); 1208 drive->name);
1213 } 1209 }
1214 info->cd_flags &= ~IDE_CD_FLAG_SEEKING; 1210 drive->atapi_flags &= ~IDE_AFLAG_SEEKING;
1215 } 1211 }
1216 if (rq_data_dir(rq) == READ && 1212 if (rq_data_dir(rq) == READ &&
1217 IDE_LARGE_SEEK(info->last_block, block, 1213 IDE_LARGE_SEEK(info->last_block, block,
@@ -1288,7 +1284,7 @@ int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
1288 */ 1284 */
1289 cmd[7] = cdi->sanyo_slot % 3; 1285 cmd[7] = cdi->sanyo_slot % 3;
1290 1286
1291 return ide_cd_queue_pc(drive, cmd, 0, NULL, 0, sense, 0, REQ_QUIET); 1287 return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, REQ_QUIET);
1292} 1288}
1293 1289
1294static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, 1290static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
@@ -1296,8 +1292,8 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
1296 struct request_sense *sense) 1292 struct request_sense *sense)
1297{ 1293{
1298 struct { 1294 struct {
1299 __u32 lba; 1295 __be32 lba;
1300 __u32 blocklen; 1296 __be32 blocklen;
1301 } capbuf; 1297 } capbuf;
1302 1298
1303 int stat; 1299 int stat;
@@ -1369,7 +1365,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1369 */ 1365 */
1370 (void) cdrom_check_status(drive, sense); 1366 (void) cdrom_check_status(drive, sense);
1371 1367
1372 if (info->cd_flags & IDE_CD_FLAG_TOC_VALID) 1368 if (drive->atapi_flags & IDE_AFLAG_TOC_VALID)
1373 return 0; 1369 return 0;
1374 1370
1375 /* try to get the total cdrom capacity and sector size */ 1371 /* try to get the total cdrom capacity and sector size */
@@ -1391,7 +1387,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1391 if (stat) 1387 if (stat)
1392 return stat; 1388 return stat;
1393 1389
1394 if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD) { 1390 if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
1395 toc->hdr.first_track = BCD2BIN(toc->hdr.first_track); 1391 toc->hdr.first_track = BCD2BIN(toc->hdr.first_track);
1396 toc->hdr.last_track = BCD2BIN(toc->hdr.last_track); 1392 toc->hdr.last_track = BCD2BIN(toc->hdr.last_track);
1397 } 1393 }
@@ -1432,7 +1428,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1432 if (stat) 1428 if (stat)
1433 return stat; 1429 return stat;
1434 1430
1435 if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD) { 1431 if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
1436 toc->hdr.first_track = (u8)BIN2BCD(CDROM_LEADOUT); 1432 toc->hdr.first_track = (u8)BIN2BCD(CDROM_LEADOUT);
1437 toc->hdr.last_track = (u8)BIN2BCD(CDROM_LEADOUT); 1433 toc->hdr.last_track = (u8)BIN2BCD(CDROM_LEADOUT);
1438 } else { 1434 } else {
@@ -1446,14 +1442,14 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1446 1442
1447 toc->hdr.toc_length = be16_to_cpu(toc->hdr.toc_length); 1443 toc->hdr.toc_length = be16_to_cpu(toc->hdr.toc_length);
1448 1444
1449 if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD) { 1445 if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
1450 toc->hdr.first_track = BCD2BIN(toc->hdr.first_track); 1446 toc->hdr.first_track = BCD2BIN(toc->hdr.first_track);
1451 toc->hdr.last_track = BCD2BIN(toc->hdr.last_track); 1447 toc->hdr.last_track = BCD2BIN(toc->hdr.last_track);
1452 } 1448 }
1453 1449
1454 for (i = 0; i <= ntracks; i++) { 1450 for (i = 0; i <= ntracks; i++) {
1455 if (info->cd_flags & IDE_CD_FLAG_TOCADDR_AS_BCD) { 1451 if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) {
1456 if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD) 1452 if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD)
1457 toc->ent[i].track = BCD2BIN(toc->ent[i].track); 1453 toc->ent[i].track = BCD2BIN(toc->ent[i].track);
1458 msf_from_bcd(&toc->ent[i].addr.msf); 1454 msf_from_bcd(&toc->ent[i].addr.msf);
1459 } 1455 }
@@ -1476,7 +1472,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1476 toc->last_session_lba = msf_to_lba(0, 2, 0); /* 0m 2s 0f */ 1472 toc->last_session_lba = msf_to_lba(0, 2, 0); /* 0m 2s 0f */
1477 } 1473 }
1478 1474
1479 if (info->cd_flags & IDE_CD_FLAG_TOCADDR_AS_BCD) { 1475 if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) {
1480 /* re-read multisession information using MSF format */ 1476 /* re-read multisession information using MSF format */
1481 stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp, 1477 stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp,
1482 sizeof(ms_tmp), sense); 1478 sizeof(ms_tmp), sense);
@@ -1500,7 +1496,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1500 } 1496 }
1501 1497
1502 /* Remember that we've read this stuff. */ 1498 /* Remember that we've read this stuff. */
1503 info->cd_flags |= IDE_CD_FLAG_TOC_VALID; 1499 drive->atapi_flags |= IDE_AFLAG_TOC_VALID;
1504 1500
1505 return 0; 1501 return 0;
1506} 1502}
@@ -1512,7 +1508,7 @@ int ide_cdrom_get_capabilities(ide_drive_t *drive, u8 *buf)
1512 struct packet_command cgc; 1508 struct packet_command cgc;
1513 int stat, attempts = 3, size = ATAPI_CAPABILITIES_PAGE_SIZE; 1509 int stat, attempts = 3, size = ATAPI_CAPABILITIES_PAGE_SIZE;
1514 1510
1515 if ((info->cd_flags & IDE_CD_FLAG_FULL_CAPS_PAGE) == 0) 1511 if ((drive->atapi_flags & IDE_AFLAG_FULL_CAPS_PAGE) == 0)
1516 size -= ATAPI_CAPABILITIES_PAGE_PAD_SIZE; 1512 size -= ATAPI_CAPABILITIES_PAGE_PAD_SIZE;
1517 1513
1518 init_cdrom_command(&cgc, buf, size, CGC_DATA_UNKNOWN); 1514 init_cdrom_command(&cgc, buf, size, CGC_DATA_UNKNOWN);
@@ -1530,15 +1526,12 @@ void ide_cdrom_update_speed(ide_drive_t *drive, u8 *buf)
1530 struct cdrom_info *cd = drive->driver_data; 1526 struct cdrom_info *cd = drive->driver_data;
1531 u16 curspeed, maxspeed; 1527 u16 curspeed, maxspeed;
1532 1528
1533 curspeed = *(u16 *)&buf[8 + 14]; 1529 if (drive->atapi_flags & IDE_AFLAG_LE_SPEED_FIELDS) {
1534 maxspeed = *(u16 *)&buf[8 + 8]; 1530 curspeed = le16_to_cpup((__le16 *)&buf[8 + 14]);
1535 1531 maxspeed = le16_to_cpup((__le16 *)&buf[8 + 8]);
1536 if (cd->cd_flags & IDE_CD_FLAG_LE_SPEED_FIELDS) {
1537 curspeed = le16_to_cpu(curspeed);
1538 maxspeed = le16_to_cpu(maxspeed);
1539 } else { 1532 } else {
1540 curspeed = be16_to_cpu(curspeed); 1533 curspeed = be16_to_cpup((__be16 *)&buf[8 + 14]);
1541 maxspeed = be16_to_cpu(maxspeed); 1534 maxspeed = be16_to_cpup((__be16 *)&buf[8 + 8]);
1542 } 1535 }
1543 1536
1544 cd->current_speed = (curspeed + (176/2)) / 176; 1537 cd->current_speed = (curspeed + (176/2)) / 176;
@@ -1579,7 +1572,7 @@ static int ide_cdrom_register(ide_drive_t *drive, int nslots)
1579 devinfo->handle = drive; 1572 devinfo->handle = drive;
1580 strcpy(devinfo->name, drive->name); 1573 strcpy(devinfo->name, drive->name);
1581 1574
1582 if (info->cd_flags & IDE_CD_FLAG_NO_SPEED_SELECT) 1575 if (drive->atapi_flags & IDE_AFLAG_NO_SPEED_SELECT)
1583 devinfo->mask |= CDC_SELECT_SPEED; 1576 devinfo->mask |= CDC_SELECT_SPEED;
1584 1577
1585 devinfo->disk = info->disk; 1578 devinfo->disk = info->disk;
@@ -1605,8 +1598,8 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
1605 return nslots; 1598 return nslots;
1606 } 1599 }
1607 1600
1608 if (cd->cd_flags & IDE_CD_FLAG_PRE_ATAPI12) { 1601 if (drive->atapi_flags & IDE_AFLAG_PRE_ATAPI12) {
1609 cd->cd_flags &= ~IDE_CD_FLAG_NO_EJECT; 1602 drive->atapi_flags &= ~IDE_AFLAG_NO_EJECT;
1610 cdi->mask &= ~CDC_PLAY_AUDIO; 1603 cdi->mask &= ~CDC_PLAY_AUDIO;
1611 return nslots; 1604 return nslots;
1612 } 1605 }
@@ -1624,9 +1617,9 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
1624 return 0; 1617 return 0;
1625 1618
1626 if ((buf[8 + 6] & 0x01) == 0) 1619 if ((buf[8 + 6] & 0x01) == 0)
1627 cd->cd_flags |= IDE_CD_FLAG_NO_DOORLOCK; 1620 drive->atapi_flags |= IDE_AFLAG_NO_DOORLOCK;
1628 if (buf[8 + 6] & 0x08) 1621 if (buf[8 + 6] & 0x08)
1629 cd->cd_flags &= ~IDE_CD_FLAG_NO_EJECT; 1622 drive->atapi_flags &= ~IDE_AFLAG_NO_EJECT;
1630 if (buf[8 + 3] & 0x01) 1623 if (buf[8 + 3] & 0x01)
1631 cdi->mask &= ~CDC_CD_R; 1624 cdi->mask &= ~CDC_CD_R;
1632 if (buf[8 + 3] & 0x02) 1625 if (buf[8 + 3] & 0x02)
@@ -1637,7 +1630,7 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
1637 cdi->mask &= ~(CDC_DVD_RAM | CDC_RAM); 1630 cdi->mask &= ~(CDC_DVD_RAM | CDC_RAM);
1638 if (buf[8 + 3] & 0x10) 1631 if (buf[8 + 3] & 0x10)
1639 cdi->mask &= ~CDC_DVD_R; 1632 cdi->mask &= ~CDC_DVD_R;
1640 if ((buf[8 + 4] & 0x01) || (cd->cd_flags & IDE_CD_FLAG_PLAY_AUDIO_OK)) 1633 if ((buf[8 + 4] & 0x01) || (drive->atapi_flags & IDE_AFLAG_PLAY_AUDIO_OK))
1641 cdi->mask &= ~CDC_PLAY_AUDIO; 1634 cdi->mask &= ~CDC_PLAY_AUDIO;
1642 1635
1643 mechtype = buf[8 + 6] >> 5; 1636 mechtype = buf[8 + 6] >> 5;
@@ -1679,7 +1672,7 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
1679 else 1672 else
1680 printk(KERN_CONT " drive"); 1673 printk(KERN_CONT " drive");
1681 1674
1682 printk(KERN_CONT ", %dkB Cache\n", be16_to_cpu(*(u16 *)&buf[8 + 12])); 1675 printk(KERN_CONT ", %dkB Cache\n", be16_to_cpup((__be16 *)&buf[8 + 12]));
1683 1676
1684 return nslots; 1677 return nslots;
1685} 1678}
@@ -1802,43 +1795,43 @@ static inline void ide_cdrom_add_settings(ide_drive_t *drive) { ; }
1802 1795
1803static const struct cd_list_entry ide_cd_quirks_list[] = { 1796static const struct cd_list_entry ide_cd_quirks_list[] = {
1804 /* Limit transfer size per interrupt. */ 1797 /* Limit transfer size per interrupt. */
1805 { "SAMSUNG CD-ROM SCR-2430", NULL, IDE_CD_FLAG_LIMIT_NFRAMES }, 1798 { "SAMSUNG CD-ROM SCR-2430", NULL, IDE_AFLAG_LIMIT_NFRAMES },
1806 { "SAMSUNG CD-ROM SCR-2432", NULL, IDE_CD_FLAG_LIMIT_NFRAMES }, 1799 { "SAMSUNG CD-ROM SCR-2432", NULL, IDE_AFLAG_LIMIT_NFRAMES },
1807 /* SCR-3231 doesn't support the SET_CD_SPEED command. */ 1800 /* SCR-3231 doesn't support the SET_CD_SPEED command. */
1808 { "SAMSUNG CD-ROM SCR-3231", NULL, IDE_CD_FLAG_NO_SPEED_SELECT }, 1801 { "SAMSUNG CD-ROM SCR-3231", NULL, IDE_AFLAG_NO_SPEED_SELECT },
1809 /* Old NEC260 (not R) was released before ATAPI 1.2 spec. */ 1802 /* Old NEC260 (not R) was released before ATAPI 1.2 spec. */
1810 { "NEC CD-ROM DRIVE:260", "1.01", IDE_CD_FLAG_TOCADDR_AS_BCD | 1803 { "NEC CD-ROM DRIVE:260", "1.01", IDE_AFLAG_TOCADDR_AS_BCD |
1811 IDE_CD_FLAG_PRE_ATAPI12, }, 1804 IDE_AFLAG_PRE_ATAPI12, },
1812 /* Vertos 300, some versions of this drive like to talk BCD. */ 1805 /* Vertos 300, some versions of this drive like to talk BCD. */
1813 { "V003S0DS", NULL, IDE_CD_FLAG_VERTOS_300_SSD, }, 1806 { "V003S0DS", NULL, IDE_AFLAG_VERTOS_300_SSD, },
1814 /* Vertos 600 ESD. */ 1807 /* Vertos 600 ESD. */
1815 { "V006E0DS", NULL, IDE_CD_FLAG_VERTOS_600_ESD, }, 1808 { "V006E0DS", NULL, IDE_AFLAG_VERTOS_600_ESD, },
1816 /* 1809 /*
1817 * Sanyo 3 CD changer uses a non-standard command for CD changing 1810 * Sanyo 3 CD changer uses a non-standard command for CD changing
1818 * (by default standard ATAPI support for CD changers is used). 1811 * (by default standard ATAPI support for CD changers is used).
1819 */ 1812 */
1820 { "CD-ROM CDR-C3 G", NULL, IDE_CD_FLAG_SANYO_3CD }, 1813 { "CD-ROM CDR-C3 G", NULL, IDE_AFLAG_SANYO_3CD },
1821 { "CD-ROM CDR-C3G", NULL, IDE_CD_FLAG_SANYO_3CD }, 1814 { "CD-ROM CDR-C3G", NULL, IDE_AFLAG_SANYO_3CD },
1822 { "CD-ROM CDR_C36", NULL, IDE_CD_FLAG_SANYO_3CD }, 1815 { "CD-ROM CDR_C36", NULL, IDE_AFLAG_SANYO_3CD },
1823 /* Stingray 8X CD-ROM. */ 1816 /* Stingray 8X CD-ROM. */
1824 { "STINGRAY 8422 IDE 8X CD-ROM 7-27-95", NULL, IDE_CD_FLAG_PRE_ATAPI12}, 1817 { "STINGRAY 8422 IDE 8X CD-ROM 7-27-95", NULL, IDE_AFLAG_PRE_ATAPI12 },
1825 /* 1818 /*
1826 * ACER 50X CD-ROM and WPI 32X CD-ROM require the full spec length 1819 * ACER 50X CD-ROM and WPI 32X CD-ROM require the full spec length
1827 * mode sense page capabilities size, but older drives break. 1820 * mode sense page capabilities size, but older drives break.
1828 */ 1821 */
1829 { "ATAPI CD ROM DRIVE 50X MAX", NULL, IDE_CD_FLAG_FULL_CAPS_PAGE }, 1822 { "ATAPI CD ROM DRIVE 50X MAX", NULL, IDE_AFLAG_FULL_CAPS_PAGE },
1830 { "WPI CDS-32X", NULL, IDE_CD_FLAG_FULL_CAPS_PAGE }, 1823 { "WPI CDS-32X", NULL, IDE_AFLAG_FULL_CAPS_PAGE },
1831 /* ACER/AOpen 24X CD-ROM has the speed fields byte-swapped. */ 1824 /* ACER/AOpen 24X CD-ROM has the speed fields byte-swapped. */
1832 { "", "241N", IDE_CD_FLAG_LE_SPEED_FIELDS }, 1825 { "", "241N", IDE_AFLAG_LE_SPEED_FIELDS },
1833 /* 1826 /*
1834 * Some drives used by Apple don't advertise audio play 1827 * Some drives used by Apple don't advertise audio play
1835 * but they do support reading TOC & audio datas. 1828 * but they do support reading TOC & audio datas.
1836 */ 1829 */
1837 { "MATSHITADVD-ROM SR-8187", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, 1830 { "MATSHITADVD-ROM SR-8187", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
1838 { "MATSHITADVD-ROM SR-8186", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, 1831 { "MATSHITADVD-ROM SR-8186", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
1839 { "MATSHITADVD-ROM SR-8176", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, 1832 { "MATSHITADVD-ROM SR-8176", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
1840 { "MATSHITADVD-ROM SR-8174", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, 1833 { "MATSHITADVD-ROM SR-8174", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
1841 { "Optiarc DVD RW AD-5200A", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, 1834 { "Optiarc DVD RW AD-5200A", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
1842 { NULL, NULL, 0 } 1835 { NULL, NULL, 0 }
1843}; 1836};
1844 1837
@@ -1873,20 +1866,20 @@ static int ide_cdrom_setup(ide_drive_t *drive)
1873 1866
1874 drive->special.all = 0; 1867 drive->special.all = 0;
1875 1868
1876 cd->cd_flags = IDE_CD_FLAG_MEDIA_CHANGED | IDE_CD_FLAG_NO_EJECT | 1869 drive->atapi_flags = IDE_AFLAG_MEDIA_CHANGED | IDE_AFLAG_NO_EJECT |
1877 ide_cd_flags(id); 1870 ide_cd_flags(id);
1878 1871
1879 if ((id->config & 0x0060) == 0x20) 1872 if ((id->config & 0x0060) == 0x20)
1880 cd->cd_flags |= IDE_CD_FLAG_DRQ_INTERRUPT; 1873 drive->atapi_flags |= IDE_AFLAG_DRQ_INTERRUPT;
1881 1874
1882 if ((cd->cd_flags & IDE_CD_FLAG_VERTOS_300_SSD) && 1875 if ((drive->atapi_flags & IDE_AFLAG_VERTOS_300_SSD) &&
1883 id->fw_rev[4] == '1' && id->fw_rev[6] <= '2') 1876 id->fw_rev[4] == '1' && id->fw_rev[6] <= '2')
1884 cd->cd_flags |= (IDE_CD_FLAG_TOCTRACKS_AS_BCD | 1877 drive->atapi_flags |= (IDE_AFLAG_TOCTRACKS_AS_BCD |
1885 IDE_CD_FLAG_TOCADDR_AS_BCD); 1878 IDE_AFLAG_TOCADDR_AS_BCD);
1886 else if ((cd->cd_flags & IDE_CD_FLAG_VERTOS_600_ESD) && 1879 else if ((drive->atapi_flags & IDE_AFLAG_VERTOS_600_ESD) &&
1887 id->fw_rev[4] == '1' && id->fw_rev[6] <= '2') 1880 id->fw_rev[4] == '1' && id->fw_rev[6] <= '2')
1888 cd->cd_flags |= IDE_CD_FLAG_TOCTRACKS_AS_BCD; 1881 drive->atapi_flags |= IDE_AFLAG_TOCTRACKS_AS_BCD;
1889 else if (cd->cd_flags & IDE_CD_FLAG_SANYO_3CD) 1882 else if (drive->atapi_flags & IDE_AFLAG_SANYO_3CD)
1890 /* 3 => use CD in slot 0 */ 1883 /* 3 => use CD in slot 0 */
1891 cdi->sanyo_slot = 3; 1884 cdi->sanyo_slot = 3;
1892 1885
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index fe0ea36e4124..61a4599b77db 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -27,42 +27,6 @@
27#define ATAPI_CAPABILITIES_PAGE_SIZE (8 + 20) 27#define ATAPI_CAPABILITIES_PAGE_SIZE (8 + 20)
28#define ATAPI_CAPABILITIES_PAGE_PAD_SIZE 4 28#define ATAPI_CAPABILITIES_PAGE_PAD_SIZE 4
29 29
30enum {
31 /* Device sends an interrupt when ready for a packet command. */
32 IDE_CD_FLAG_DRQ_INTERRUPT = (1 << 0),
33 /* Drive cannot lock the door. */
34 IDE_CD_FLAG_NO_DOORLOCK = (1 << 1),
35 /* Drive cannot eject the disc. */
36 IDE_CD_FLAG_NO_EJECT = (1 << 2),
37 /* Drive is a pre ATAPI 1.2 drive. */
38 IDE_CD_FLAG_PRE_ATAPI12 = (1 << 3),
39 /* TOC addresses are in BCD. */
40 IDE_CD_FLAG_TOCADDR_AS_BCD = (1 << 4),
41 /* TOC track numbers are in BCD. */
42 IDE_CD_FLAG_TOCTRACKS_AS_BCD = (1 << 5),
43 /*
44 * Drive does not provide data in multiples of SECTOR_SIZE
45 * when more than one interrupt is needed.
46 */
47 IDE_CD_FLAG_LIMIT_NFRAMES = (1 << 6),
48 /* Seeking in progress. */
49 IDE_CD_FLAG_SEEKING = (1 << 7),
50 /* Driver has noticed a media change. */
51 IDE_CD_FLAG_MEDIA_CHANGED = (1 << 8),
52 /* Saved TOC information is current. */
53 IDE_CD_FLAG_TOC_VALID = (1 << 9),
54 /* We think that the drive door is locked. */
55 IDE_CD_FLAG_DOOR_LOCKED = (1 << 10),
56 /* SET_CD_SPEED command is unsupported. */
57 IDE_CD_FLAG_NO_SPEED_SELECT = (1 << 11),
58 IDE_CD_FLAG_VERTOS_300_SSD = (1 << 12),
59 IDE_CD_FLAG_VERTOS_600_ESD = (1 << 13),
60 IDE_CD_FLAG_SANYO_3CD = (1 << 14),
61 IDE_CD_FLAG_FULL_CAPS_PAGE = (1 << 15),
62 IDE_CD_FLAG_PLAY_AUDIO_OK = (1 << 16),
63 IDE_CD_FLAG_LE_SPEED_FIELDS = (1 << 17),
64};
65
66/* Structure of a MSF cdrom address. */ 30/* Structure of a MSF cdrom address. */
67struct atapi_msf { 31struct atapi_msf {
68 byte reserved; 32 byte reserved;
@@ -128,8 +92,6 @@ struct cdrom_info {
128 unsigned long last_block; 92 unsigned long last_block;
129 unsigned long start_seek; 93 unsigned long start_seek;
130 94
131 unsigned int cd_flags;
132
133 u8 max_speed; /* Max speed of the drive. */ 95 u8 max_speed; /* Max speed of the drive. */
134 u8 current_speed; /* Current speed of the drive. */ 96 u8 current_speed; /* Current speed of the drive. */
135 97
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 24d002addf73..74231b41f611 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -27,10 +27,9 @@ int ide_cdrom_open_real(struct cdrom_device_info *cdi, int purpose)
27void ide_cdrom_release_real(struct cdrom_device_info *cdi) 27void ide_cdrom_release_real(struct cdrom_device_info *cdi)
28{ 28{
29 ide_drive_t *drive = cdi->handle; 29 ide_drive_t *drive = cdi->handle;
30 struct cdrom_info *cd = drive->driver_data;
31 30
32 if (!cdi->use_count) 31 if (!cdi->use_count)
33 cd->cd_flags &= ~IDE_CD_FLAG_TOC_VALID; 32 drive->atapi_flags &= ~IDE_AFLAG_TOC_VALID;
34} 33}
35 34
36/* 35/*
@@ -83,13 +82,12 @@ int ide_cdrom_check_media_change_real(struct cdrom_device_info *cdi,
83 int slot_nr) 82 int slot_nr)
84{ 83{
85 ide_drive_t *drive = cdi->handle; 84 ide_drive_t *drive = cdi->handle;
86 struct cdrom_info *cd = drive->driver_data;
87 int retval; 85 int retval;
88 86
89 if (slot_nr == CDSL_CURRENT) { 87 if (slot_nr == CDSL_CURRENT) {
90 (void) cdrom_check_status(drive, NULL); 88 (void) cdrom_check_status(drive, NULL);
91 retval = (cd->cd_flags & IDE_CD_FLAG_MEDIA_CHANGED) ? 1 : 0; 89 retval = (drive->atapi_flags & IDE_AFLAG_MEDIA_CHANGED) ? 1 : 0;
92 cd->cd_flags &= ~IDE_CD_FLAG_MEDIA_CHANGED; 90 drive->atapi_flags &= ~IDE_AFLAG_MEDIA_CHANGED;
93 return retval; 91 return retval;
94 } else { 92 } else {
95 return -EINVAL; 93 return -EINVAL;
@@ -107,11 +105,11 @@ int cdrom_eject(ide_drive_t *drive, int ejectflag,
107 char loej = 0x02; 105 char loej = 0x02;
108 unsigned char cmd[BLK_MAX_CDB]; 106 unsigned char cmd[BLK_MAX_CDB];
109 107
110 if ((cd->cd_flags & IDE_CD_FLAG_NO_EJECT) && !ejectflag) 108 if ((drive->atapi_flags & IDE_AFLAG_NO_EJECT) && !ejectflag)
111 return -EDRIVE_CANT_DO_THIS; 109 return -EDRIVE_CANT_DO_THIS;
112 110
113 /* reload fails on some drives, if the tray is locked */ 111 /* reload fails on some drives, if the tray is locked */
114 if ((cd->cd_flags & IDE_CD_FLAG_DOOR_LOCKED) && ejectflag) 112 if ((drive->atapi_flags & IDE_AFLAG_DOOR_LOCKED) && ejectflag)
115 return 0; 113 return 0;
116 114
117 /* only tell drive to close tray if open, if it can do that */ 115 /* only tell drive to close tray if open, if it can do that */
@@ -123,7 +121,7 @@ int cdrom_eject(ide_drive_t *drive, int ejectflag,
123 cmd[0] = GPCMD_START_STOP_UNIT; 121 cmd[0] = GPCMD_START_STOP_UNIT;
124 cmd[4] = loej | (ejectflag != 0); 122 cmd[4] = loej | (ejectflag != 0);
125 123
126 return ide_cd_queue_pc(drive, cmd, 0, NULL, 0, sense, 0, 0); 124 return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, 0);
127} 125}
128 126
129/* Lock the door if LOCKFLAG is nonzero; unlock it otherwise. */ 127/* Lock the door if LOCKFLAG is nonzero; unlock it otherwise. */
@@ -131,7 +129,6 @@ static
131int ide_cd_lockdoor(ide_drive_t *drive, int lockflag, 129int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
132 struct request_sense *sense) 130 struct request_sense *sense)
133{ 131{
134 struct cdrom_info *cd = drive->driver_data;
135 struct request_sense my_sense; 132 struct request_sense my_sense;
136 int stat; 133 int stat;
137 134
@@ -139,7 +136,7 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
139 sense = &my_sense; 136 sense = &my_sense;
140 137
141 /* If the drive cannot lock the door, just pretend. */ 138 /* If the drive cannot lock the door, just pretend. */
142 if (cd->cd_flags & IDE_CD_FLAG_NO_DOORLOCK) { 139 if (drive->atapi_flags & IDE_AFLAG_NO_DOORLOCK) {
143 stat = 0; 140 stat = 0;
144 } else { 141 } else {
145 unsigned char cmd[BLK_MAX_CDB]; 142 unsigned char cmd[BLK_MAX_CDB];
@@ -149,7 +146,7 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
149 cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL; 146 cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
150 cmd[4] = lockflag ? 1 : 0; 147 cmd[4] = lockflag ? 1 : 0;
151 148
152 stat = ide_cd_queue_pc(drive, cmd, 0, NULL, 0, 149 stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL,
153 sense, 0, 0); 150 sense, 0, 0);
154 } 151 }
155 152
@@ -160,7 +157,7 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
160 (sense->asc == 0x24 || sense->asc == 0x20)) { 157 (sense->asc == 0x24 || sense->asc == 0x20)) {
161 printk(KERN_ERR "%s: door locking not supported\n", 158 printk(KERN_ERR "%s: door locking not supported\n",
162 drive->name); 159 drive->name);
163 cd->cd_flags |= IDE_CD_FLAG_NO_DOORLOCK; 160 drive->atapi_flags |= IDE_AFLAG_NO_DOORLOCK;
164 stat = 0; 161 stat = 0;
165 } 162 }
166 163
@@ -170,9 +167,9 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
170 167
171 if (stat == 0) { 168 if (stat == 0) {
172 if (lockflag) 169 if (lockflag)
173 cd->cd_flags |= IDE_CD_FLAG_DOOR_LOCKED; 170 drive->atapi_flags |= IDE_AFLAG_DOOR_LOCKED;
174 else 171 else
175 cd->cd_flags &= ~IDE_CD_FLAG_DOOR_LOCKED; 172 drive->atapi_flags &= ~IDE_AFLAG_DOOR_LOCKED;
176 } 173 }
177 174
178 return stat; 175 return stat;
@@ -231,7 +228,7 @@ int ide_cdrom_select_speed(struct cdrom_device_info *cdi, int speed)
231 cmd[5] = speed & 0xff; 228 cmd[5] = speed & 0xff;
232 } 229 }
233 230
234 stat = ide_cd_queue_pc(drive, cmd, 0, NULL, 0, &sense, 0, 0); 231 stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, &sense, 0, 0);
235 232
236 if (!ide_cdrom_get_capabilities(drive, buf)) { 233 if (!ide_cdrom_get_capabilities(drive, buf)) {
237 ide_cdrom_update_speed(drive, buf); 234 ide_cdrom_update_speed(drive, buf);
@@ -250,7 +247,7 @@ int ide_cdrom_get_last_session(struct cdrom_device_info *cdi,
250 struct request_sense sense; 247 struct request_sense sense;
251 int ret; 248 int ret;
252 249
253 if ((info->cd_flags & IDE_CD_FLAG_TOC_VALID) == 0 || !info->toc) { 250 if ((drive->atapi_flags & IDE_AFLAG_TOC_VALID) == 0 || !info->toc) {
254 ret = ide_cd_read_toc(drive, &sense); 251 ret = ide_cd_read_toc(drive, &sense);
255 if (ret) 252 if (ret)
256 return ret; 253 return ret;
@@ -308,7 +305,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
308 * A reset will unlock the door. If it was previously locked, 305 * A reset will unlock the door. If it was previously locked,
309 * lock it again. 306 * lock it again.
310 */ 307 */
311 if (cd->cd_flags & IDE_CD_FLAG_DOOR_LOCKED) 308 if (drive->atapi_flags & IDE_AFLAG_DOOR_LOCKED)
312 (void)ide_cd_lockdoor(drive, 1, &sense); 309 (void)ide_cd_lockdoor(drive, 1, &sense);
313 310
314 return ret; 311 return ret;
@@ -324,7 +321,7 @@ static int ide_cd_get_toc_entry(ide_drive_t *drive, int track,
324 /* 321 /*
325 * don't serve cached data, if the toc isn't valid 322 * don't serve cached data, if the toc isn't valid
326 */ 323 */
327 if ((info->cd_flags & IDE_CD_FLAG_TOC_VALID) == 0) 324 if ((drive->atapi_flags & IDE_AFLAG_TOC_VALID) == 0)
328 return -EINVAL; 325 return -EINVAL;
329 326
330 /* Check validity of requested track number. */ 327 /* Check validity of requested track number. */
@@ -374,7 +371,7 @@ static int ide_cd_fake_play_trkind(ide_drive_t *drive, void *arg)
374 lba_to_msf(lba_start, &cmd[3], &cmd[4], &cmd[5]); 371 lba_to_msf(lba_start, &cmd[3], &cmd[4], &cmd[5]);
375 lba_to_msf(lba_end - 1, &cmd[6], &cmd[7], &cmd[8]); 372 lba_to_msf(lba_end - 1, &cmd[6], &cmd[7], &cmd[8]);
376 373
377 return ide_cd_queue_pc(drive, cmd, 0, NULL, 0, &sense, 0, 0); 374 return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, &sense, 0, 0);
378} 375}
379 376
380static int ide_cd_read_tochdr(ide_drive_t *drive, void *arg) 377static int ide_cd_read_tochdr(ide_drive_t *drive, void *arg)
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 3a2e80237c10..df5fe5756871 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -158,7 +158,7 @@ static void ide_tf_set_cmd(ide_drive_t *drive, ide_task_t *task, u8 dma)
158 write = (task->tf_flags & IDE_TFLAG_WRITE) ? 1 : 0; 158 write = (task->tf_flags & IDE_TFLAG_WRITE) ? 1 : 0;
159 159
160 if (dma) 160 if (dma)
161 index = drive->vdma ? 4 : 8; 161 index = 8;
162 else 162 else
163 index = drive->mult_count ? 0 : 4; 163 index = drive->mult_count ? 0 : 4;
164 164
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 7ee44f86bc54..be99d463dcc7 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -100,10 +100,11 @@ static const struct drive_list_entry drive_blacklist [] = {
100 100
101ide_startstop_t ide_dma_intr (ide_drive_t *drive) 101ide_startstop_t ide_dma_intr (ide_drive_t *drive)
102{ 102{
103 ide_hwif_t *hwif = drive->hwif;
103 u8 stat = 0, dma_stat = 0; 104 u8 stat = 0, dma_stat = 0;
104 105
105 dma_stat = drive->hwif->dma_ops->dma_end(drive); 106 dma_stat = hwif->dma_ops->dma_end(drive);
106 stat = ide_read_status(drive); 107 stat = hwif->tp_ops->read_status(hwif);
107 108
108 if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) { 109 if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
109 if (!dma_stat) { 110 if (!dma_stat) {
@@ -334,7 +335,7 @@ static int config_drive_for_dma (ide_drive_t *drive)
334static int dma_timer_expiry (ide_drive_t *drive) 335static int dma_timer_expiry (ide_drive_t *drive)
335{ 336{
336 ide_hwif_t *hwif = HWIF(drive); 337 ide_hwif_t *hwif = HWIF(drive);
337 u8 dma_stat = hwif->INB(hwif->dma_status); 338 u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
338 339
339 printk(KERN_WARNING "%s: dma_timer_expiry: dma status == 0x%02x\n", 340 printk(KERN_WARNING "%s: dma_timer_expiry: dma status == 0x%02x\n",
340 drive->name, dma_stat); 341 drive->name, dma_stat);
@@ -369,14 +370,18 @@ void ide_dma_host_set(ide_drive_t *drive, int on)
369{ 370{
370 ide_hwif_t *hwif = HWIF(drive); 371 ide_hwif_t *hwif = HWIF(drive);
371 u8 unit = (drive->select.b.unit & 0x01); 372 u8 unit = (drive->select.b.unit & 0x01);
372 u8 dma_stat = hwif->INB(hwif->dma_status); 373 u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
373 374
374 if (on) 375 if (on)
375 dma_stat |= (1 << (5 + unit)); 376 dma_stat |= (1 << (5 + unit));
376 else 377 else
377 dma_stat &= ~(1 << (5 + unit)); 378 dma_stat &= ~(1 << (5 + unit));
378 379
379 hwif->OUTB(dma_stat, hwif->dma_status); 380 if (hwif->host_flags & IDE_HFLAG_MMIO)
381 writeb(dma_stat,
382 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
383 else
384 outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS);
380} 385}
381 386
382EXPORT_SYMBOL_GPL(ide_dma_host_set); 387EXPORT_SYMBOL_GPL(ide_dma_host_set);
@@ -449,6 +454,7 @@ int ide_dma_setup(ide_drive_t *drive)
449 ide_hwif_t *hwif = drive->hwif; 454 ide_hwif_t *hwif = drive->hwif;
450 struct request *rq = HWGROUP(drive)->rq; 455 struct request *rq = HWGROUP(drive)->rq;
451 unsigned int reading; 456 unsigned int reading;
457 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
452 u8 dma_stat; 458 u8 dma_stat;
453 459
454 if (rq_data_dir(rq)) 460 if (rq_data_dir(rq))
@@ -470,13 +476,21 @@ int ide_dma_setup(ide_drive_t *drive)
470 outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS); 476 outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS);
471 477
472 /* specify r/w */ 478 /* specify r/w */
473 hwif->OUTB(reading, hwif->dma_command); 479 if (mmio)
480 writeb(reading, (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
481 else
482 outb(reading, hwif->dma_base + ATA_DMA_CMD);
474 483
475 /* read dma_status for INTR & ERROR flags */ 484 /* read DMA status for INTR & ERROR flags */
476 dma_stat = hwif->INB(hwif->dma_status); 485 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
477 486
478 /* clear INTR & ERROR flags */ 487 /* clear INTR & ERROR flags */
479 hwif->OUTB(dma_stat|6, hwif->dma_status); 488 if (mmio)
489 writeb(dma_stat | 6,
490 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
491 else
492 outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS);
493
480 drive->waiting_for_dma = 1; 494 drive->waiting_for_dma = 1;
481 return 0; 495 return 0;
482} 496}
@@ -492,16 +506,24 @@ EXPORT_SYMBOL_GPL(ide_dma_exec_cmd);
492 506
493void ide_dma_start(ide_drive_t *drive) 507void ide_dma_start(ide_drive_t *drive)
494{ 508{
495 ide_hwif_t *hwif = HWIF(drive); 509 ide_hwif_t *hwif = drive->hwif;
496 u8 dma_cmd = hwif->INB(hwif->dma_command); 510 u8 dma_cmd;
497 511
498 /* Note that this is done *after* the cmd has 512 /* Note that this is done *after* the cmd has
499 * been issued to the drive, as per the BM-IDE spec. 513 * been issued to the drive, as per the BM-IDE spec.
500 * The Promise Ultra33 doesn't work correctly when 514 * The Promise Ultra33 doesn't work correctly when
501 * we do this part before issuing the drive cmd. 515 * we do this part before issuing the drive cmd.
502 */ 516 */
503 /* start DMA */ 517 if (hwif->host_flags & IDE_HFLAG_MMIO) {
504 hwif->OUTB(dma_cmd|1, hwif->dma_command); 518 dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
519 /* start DMA */
520 writeb(dma_cmd | 1,
521 (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
522 } else {
523 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
524 outb(dma_cmd | 1, hwif->dma_base + ATA_DMA_CMD);
525 }
526
505 hwif->dma = 1; 527 hwif->dma = 1;
506 wmb(); 528 wmb();
507} 529}
@@ -511,18 +533,33 @@ EXPORT_SYMBOL_GPL(ide_dma_start);
511/* returns 1 on error, 0 otherwise */ 533/* returns 1 on error, 0 otherwise */
512int __ide_dma_end (ide_drive_t *drive) 534int __ide_dma_end (ide_drive_t *drive)
513{ 535{
514 ide_hwif_t *hwif = HWIF(drive); 536 ide_hwif_t *hwif = drive->hwif;
537 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
515 u8 dma_stat = 0, dma_cmd = 0; 538 u8 dma_stat = 0, dma_cmd = 0;
516 539
517 drive->waiting_for_dma = 0; 540 drive->waiting_for_dma = 0;
518 /* get dma_command mode */ 541
519 dma_cmd = hwif->INB(hwif->dma_command); 542 if (mmio) {
520 /* stop DMA */ 543 /* get DMA command mode */
521 hwif->OUTB(dma_cmd&~1, hwif->dma_command); 544 dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
545 /* stop DMA */
546 writeb(dma_cmd & ~1,
547 (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
548 } else {
549 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
550 outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
551 }
552
522 /* get DMA status */ 553 /* get DMA status */
523 dma_stat = hwif->INB(hwif->dma_status); 554 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
524 /* clear the INTR & ERROR bits */ 555
525 hwif->OUTB(dma_stat|6, hwif->dma_status); 556 if (mmio)
557 /* clear the INTR & ERROR bits */
558 writeb(dma_stat | 6,
559 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
560 else
561 outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS);
562
526 /* purge DMA mappings */ 563 /* purge DMA mappings */
527 ide_destroy_dmatable(drive); 564 ide_destroy_dmatable(drive);
528 /* verify good DMA status */ 565 /* verify good DMA status */
@@ -537,7 +574,7 @@ EXPORT_SYMBOL(__ide_dma_end);
537int ide_dma_test_irq(ide_drive_t *drive) 574int ide_dma_test_irq(ide_drive_t *drive)
538{ 575{
539 ide_hwif_t *hwif = HWIF(drive); 576 ide_hwif_t *hwif = HWIF(drive);
540 u8 dma_stat = hwif->INB(hwif->dma_status); 577 u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
541 578
542 /* return 1 if INTR asserted */ 579 /* return 1 if INTR asserted */
543 if ((dma_stat & 4) == 4) 580 if ((dma_stat & 4) == 4)
@@ -719,9 +756,8 @@ static int ide_tune_dma(ide_drive_t *drive)
719static int ide_dma_check(ide_drive_t *drive) 756static int ide_dma_check(ide_drive_t *drive)
720{ 757{
721 ide_hwif_t *hwif = drive->hwif; 758 ide_hwif_t *hwif = drive->hwif;
722 int vdma = (hwif->host_flags & IDE_HFLAG_VDMA)? 1 : 0;
723 759
724 if (!vdma && ide_tune_dma(drive)) 760 if (ide_tune_dma(drive))
725 return 0; 761 return 0;
726 762
727 /* TODO: always do PIO fallback */ 763 /* TODO: always do PIO fallback */
@@ -730,7 +766,7 @@ static int ide_dma_check(ide_drive_t *drive)
730 766
731 ide_set_max_pio(drive); 767 ide_set_max_pio(drive);
732 768
733 return vdma ? 0 : -1; 769 return -1;
734} 770}
735 771
736int ide_id_dma_bug(ide_drive_t *drive) 772int ide_id_dma_bug(ide_drive_t *drive)
@@ -842,7 +878,7 @@ int ide_allocate_dma_engine(ide_hwif_t *hwif)
842} 878}
843EXPORT_SYMBOL_GPL(ide_allocate_dma_engine); 879EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);
844 880
845static const struct ide_dma_ops sff_dma_ops = { 881const struct ide_dma_ops sff_dma_ops = {
846 .dma_host_set = ide_dma_host_set, 882 .dma_host_set = ide_dma_host_set,
847 .dma_setup = ide_dma_setup, 883 .dma_setup = ide_dma_setup,
848 .dma_exec_cmd = ide_dma_exec_cmd, 884 .dma_exec_cmd = ide_dma_exec_cmd,
@@ -852,18 +888,5 @@ static const struct ide_dma_ops sff_dma_ops = {
852 .dma_timeout = ide_dma_timeout, 888 .dma_timeout = ide_dma_timeout,
853 .dma_lost_irq = ide_dma_lost_irq, 889 .dma_lost_irq = ide_dma_lost_irq,
854}; 890};
855 891EXPORT_SYMBOL_GPL(sff_dma_ops);
856void ide_setup_dma(ide_hwif_t *hwif, unsigned long base)
857{
858 hwif->dma_base = base;
859
860 if (!hwif->dma_command)
861 hwif->dma_command = hwif->dma_base + 0;
862 if (!hwif->dma_status)
863 hwif->dma_status = hwif->dma_base + 2;
864
865 hwif->dma_ops = &sff_dma_ops;
866}
867
868EXPORT_SYMBOL_GPL(ide_setup_dma);
869#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ 892#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 011d72011cc4..3d8e6dd0f41e 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -125,26 +125,10 @@ typedef struct ide_floppy_obj {
125 int wp; 125 int wp;
126 /* Supports format progress report */ 126 /* Supports format progress report */
127 int srfp; 127 int srfp;
128 /* Status/Action flags */
129 unsigned long flags;
130} idefloppy_floppy_t; 128} idefloppy_floppy_t;
131 129
132#define IDEFLOPPY_TICKS_DELAY HZ/20 /* default delay for ZIP 100 (50ms) */ 130#define IDEFLOPPY_TICKS_DELAY HZ/20 /* default delay for ZIP 100 (50ms) */
133 131
134/* Floppy flag bits values. */
135enum {
136 /* DRQ interrupt device */
137 IDEFLOPPY_FLAG_DRQ_INTERRUPT = (1 << 0),
138 /* Media may have changed */
139 IDEFLOPPY_FLAG_MEDIA_CHANGED = (1 << 1),
140 /* Format in progress */
141 IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS = (1 << 2),
142 /* Avoid commands not supported in Clik drive */
143 IDEFLOPPY_FLAG_CLIK_DRIVE = (1 << 3),
144 /* Requires BH algorithm for packets */
145 IDEFLOPPY_FLAG_ZIP_DRIVE = (1 << 4),
146};
147
148/* Defines for the MODE SENSE command */ 132/* Defines for the MODE SENSE command */
149#define MODE_SENSE_CURRENT 0x00 133#define MODE_SENSE_CURRENT 0x00
150#define MODE_SENSE_CHANGEABLE 0x01 134#define MODE_SENSE_CHANGEABLE 0x01
@@ -247,9 +231,9 @@ static void ide_floppy_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
247 231
248 data = bvec_kmap_irq(bvec, &flags); 232 data = bvec_kmap_irq(bvec, &flags);
249 if (direction) 233 if (direction)
250 hwif->output_data(drive, NULL, data, count); 234 hwif->tp_ops->output_data(drive, NULL, data, count);
251 else 235 else
252 hwif->input_data(drive, NULL, data, count); 236 hwif->tp_ops->input_data(drive, NULL, data, count);
253 bvec_kunmap_irq(data, &flags); 237 bvec_kunmap_irq(data, &flags);
254 238
255 bcount -= count; 239 bcount -= count;
@@ -291,6 +275,7 @@ static void idefloppy_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
291 rq->cmd_type = REQ_TYPE_SPECIAL; 275 rq->cmd_type = REQ_TYPE_SPECIAL;
292 rq->cmd_flags |= REQ_PREEMPT; 276 rq->cmd_flags |= REQ_PREEMPT;
293 rq->rq_disk = floppy->disk; 277 rq->rq_disk = floppy->disk;
278 memcpy(rq->cmd, pc->c, 12);
294 ide_do_drive_cmd(drive, rq); 279 ide_do_drive_cmd(drive, rq);
295} 280}
296 281
@@ -354,7 +339,6 @@ static void idefloppy_init_pc(struct ide_atapi_pc *pc)
354 memset(pc, 0, sizeof(*pc)); 339 memset(pc, 0, sizeof(*pc));
355 pc->buf = pc->pc_buf; 340 pc->buf = pc->pc_buf;
356 pc->buf_size = IDEFLOPPY_PC_BUFFER_SIZE; 341 pc->buf_size = IDEFLOPPY_PC_BUFFER_SIZE;
357 pc->callback = ide_floppy_callback;
358} 342}
359 343
360static void idefloppy_create_request_sense_cmd(struct ide_atapi_pc *pc) 344static void idefloppy_create_request_sense_cmd(struct ide_atapi_pc *pc)
@@ -402,7 +386,7 @@ static int idefloppy_transfer_pc(ide_drive_t *drive)
402 idefloppy_floppy_t *floppy = drive->driver_data; 386 idefloppy_floppy_t *floppy = drive->driver_data;
403 387
404 /* Send the actual packet */ 388 /* Send the actual packet */
405 drive->hwif->output_data(drive, NULL, floppy->pc->c, 12); 389 drive->hwif->tp_ops->output_data(drive, NULL, floppy->pc->c, 12);
406 390
407 /* Timeout for the packet command */ 391 /* Timeout for the packet command */
408 return IDEFLOPPY_WAIT_CMD; 392 return IDEFLOPPY_WAIT_CMD;
@@ -429,7 +413,7 @@ static ide_startstop_t idefloppy_start_pc_transfer(ide_drive_t *drive)
429 * 40 and 50msec work well. idefloppy_pc_intr will not be actually 413 * 40 and 50msec work well. idefloppy_pc_intr will not be actually
430 * used until after the packet is moved in about 50 msec. 414 * used until after the packet is moved in about 50 msec.
431 */ 415 */
432 if (pc->flags & PC_FLAG_ZIP_DRIVE) { 416 if (drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) {
433 timeout = floppy->ticks; 417 timeout = floppy->ticks;
434 expiry = &idefloppy_transfer_pc; 418 expiry = &idefloppy_transfer_pc;
435 } else { 419 } else {
@@ -474,7 +458,7 @@ static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive,
474 pc->error = IDEFLOPPY_ERROR_GENERAL; 458 pc->error = IDEFLOPPY_ERROR_GENERAL;
475 459
476 floppy->failed_pc = NULL; 460 floppy->failed_pc = NULL;
477 pc->callback(drive); 461 drive->pc_callback(drive);
478 return ide_stopped; 462 return ide_stopped;
479 } 463 }
480 464
@@ -574,6 +558,8 @@ static void idefloppy_create_rw_cmd(idefloppy_floppy_t *floppy,
574 put_unaligned(cpu_to_be16(blocks), (unsigned short *)&pc->c[7]); 558 put_unaligned(cpu_to_be16(blocks), (unsigned short *)&pc->c[7]);
575 put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[2]); 559 put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[2]);
576 560
561 memcpy(rq->cmd, pc->c, 12);
562
577 pc->rq = rq; 563 pc->rq = rq;
578 pc->b_count = cmd == READ ? 0 : rq->bio->bi_size; 564 pc->b_count = cmd == READ ? 0 : rq->bio->bi_size;
579 if (rq->cmd_flags & REQ_RW) 565 if (rq->cmd_flags & REQ_RW)
@@ -647,12 +633,6 @@ static ide_startstop_t idefloppy_do_request(ide_drive_t *drive,
647 return ide_stopped; 633 return ide_stopped;
648 } 634 }
649 635
650 if (floppy->flags & IDEFLOPPY_FLAG_DRQ_INTERRUPT)
651 pc->flags |= PC_FLAG_DRQ_INTERRUPT;
652
653 if (floppy->flags & IDEFLOPPY_FLAG_ZIP_DRIVE)
654 pc->flags |= PC_FLAG_ZIP_DRIVE;
655
656 pc->rq = rq; 636 pc->rq = rq;
657 637
658 return idefloppy_issue_pc(drive, pc); 638 return idefloppy_issue_pc(drive, pc);
@@ -671,6 +651,7 @@ static int idefloppy_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
671 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 651 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
672 rq->buffer = (char *) pc; 652 rq->buffer = (char *) pc;
673 rq->cmd_type = REQ_TYPE_SPECIAL; 653 rq->cmd_type = REQ_TYPE_SPECIAL;
654 memcpy(rq->cmd, pc->c, 12);
674 error = blk_execute_rq(drive->queue, floppy->disk, rq, 0); 655 error = blk_execute_rq(drive->queue, floppy->disk, rq, 0);
675 blk_put_request(rq); 656 blk_put_request(rq);
676 657
@@ -795,7 +776,7 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
795 switch (pc.buf[desc_start + 4] & 0x03) { 776 switch (pc.buf[desc_start + 4] & 0x03) {
796 /* Clik! drive returns this instead of CAPACITY_CURRENT */ 777 /* Clik! drive returns this instead of CAPACITY_CURRENT */
797 case CAPACITY_UNFORMATTED: 778 case CAPACITY_UNFORMATTED:
798 if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) 779 if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE))
799 /* 780 /*
800 * If it is not a clik drive, break out 781 * If it is not a clik drive, break out
801 * (maintains previous driver behaviour) 782 * (maintains previous driver behaviour)
@@ -841,7 +822,7 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
841 } 822 }
842 823
843 /* Clik! disk does not support get_flexible_disk_page */ 824 /* Clik! disk does not support get_flexible_disk_page */
844 if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) 825 if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE))
845 (void) ide_floppy_get_flexible_disk_page(drive); 826 (void) ide_floppy_get_flexible_disk_page(drive);
846 827
847 set_capacity(floppy->disk, floppy->blocks * floppy->bs_factor); 828 set_capacity(floppy->disk, floppy->blocks * floppy->bs_factor);
@@ -949,11 +930,12 @@ static int idefloppy_get_format_progress(ide_drive_t *drive, int __user *arg)
949 930
950 /* Else assume format_unit has finished, and we're at 0x10000 */ 931 /* Else assume format_unit has finished, and we're at 0x10000 */
951 } else { 932 } else {
933 ide_hwif_t *hwif = drive->hwif;
952 unsigned long flags; 934 unsigned long flags;
953 u8 stat; 935 u8 stat;
954 936
955 local_irq_save(flags); 937 local_irq_save(flags);
956 stat = ide_read_status(drive); 938 stat = hwif->tp_ops->read_status(hwif);
957 local_irq_restore(flags); 939 local_irq_restore(flags);
958 940
959 progress_indication = ((stat & SEEK_STAT) == 0) ? 0 : 0x10000; 941 progress_indication = ((stat & SEEK_STAT) == 0) ? 0 : 0x10000;
@@ -1039,9 +1021,10 @@ static void idefloppy_setup(ide_drive_t *drive, idefloppy_floppy_t *floppy)
1039 1021
1040 *((u16 *) &gcw) = drive->id->config; 1022 *((u16 *) &gcw) = drive->id->config;
1041 floppy->pc = floppy->pc_stack; 1023 floppy->pc = floppy->pc_stack;
1024 drive->pc_callback = ide_floppy_callback;
1042 1025
1043 if (((gcw[0] & 0x60) >> 5) == 1) 1026 if (((gcw[0] & 0x60) >> 5) == 1)
1044 floppy->flags |= IDEFLOPPY_FLAG_DRQ_INTERRUPT; 1027 drive->atapi_flags |= IDE_AFLAG_DRQ_INTERRUPT;
1045 /* 1028 /*
1046 * We used to check revisions here. At this point however I'm giving up. 1029 * We used to check revisions here. At this point however I'm giving up.
1047 * Just assume they are all broken, its easier. 1030 * Just assume they are all broken, its easier.
@@ -1052,7 +1035,7 @@ static void idefloppy_setup(ide_drive_t *drive, idefloppy_floppy_t *floppy)
1052 * we'll leave the limitation below for the 2.2.x tree. 1035 * we'll leave the limitation below for the 2.2.x tree.
1053 */ 1036 */
1054 if (!strncmp(drive->id->model, "IOMEGA ZIP 100 ATAPI", 20)) { 1037 if (!strncmp(drive->id->model, "IOMEGA ZIP 100 ATAPI", 20)) {
1055 floppy->flags |= IDEFLOPPY_FLAG_ZIP_DRIVE; 1038 drive->atapi_flags |= IDE_AFLAG_ZIP_DRIVE;
1056 /* This value will be visible in the /proc/ide/hdx/settings */ 1039 /* This value will be visible in the /proc/ide/hdx/settings */
1057 floppy->ticks = IDEFLOPPY_TICKS_DELAY; 1040 floppy->ticks = IDEFLOPPY_TICKS_DELAY;
1058 blk_queue_max_sectors(drive->queue, 64); 1041 blk_queue_max_sectors(drive->queue, 64);
@@ -1064,7 +1047,7 @@ static void idefloppy_setup(ide_drive_t *drive, idefloppy_floppy_t *floppy)
1064 */ 1047 */
1065 if (strncmp(drive->id->model, "IOMEGA Clik!", 11) == 0) { 1048 if (strncmp(drive->id->model, "IOMEGA Clik!", 11) == 0) {
1066 blk_queue_max_sectors(drive->queue, 64); 1049 blk_queue_max_sectors(drive->queue, 64);
1067 floppy->flags |= IDEFLOPPY_FLAG_CLIK_DRIVE; 1050 drive->atapi_flags |= IDE_AFLAG_CLIK_DRIVE;
1068 } 1051 }
1069 1052
1070 (void) ide_floppy_get_capacity(drive); 1053 (void) ide_floppy_get_capacity(drive);
@@ -1153,7 +1136,7 @@ static int idefloppy_open(struct inode *inode, struct file *filp)
1153 floppy->openers++; 1136 floppy->openers++;
1154 1137
1155 if (floppy->openers == 1) { 1138 if (floppy->openers == 1) {
1156 floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS; 1139 drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS;
1157 /* Just in case */ 1140 /* Just in case */
1158 1141
1159 idefloppy_init_pc(&pc); 1142 idefloppy_init_pc(&pc);
@@ -1180,14 +1163,14 @@ static int idefloppy_open(struct inode *inode, struct file *filp)
1180 ret = -EROFS; 1163 ret = -EROFS;
1181 goto out_put_floppy; 1164 goto out_put_floppy;
1182 } 1165 }
1183 floppy->flags |= IDEFLOPPY_FLAG_MEDIA_CHANGED; 1166 drive->atapi_flags |= IDE_AFLAG_MEDIA_CHANGED;
1184 /* IOMEGA Clik! drives do not support lock/unlock commands */ 1167 /* IOMEGA Clik! drives do not support lock/unlock commands */
1185 if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) { 1168 if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) {
1186 idefloppy_create_prevent_cmd(&pc, 1); 1169 idefloppy_create_prevent_cmd(&pc, 1);
1187 (void) idefloppy_queue_pc_tail(drive, &pc); 1170 (void) idefloppy_queue_pc_tail(drive, &pc);
1188 } 1171 }
1189 check_disk_change(inode->i_bdev); 1172 check_disk_change(inode->i_bdev);
1190 } else if (floppy->flags & IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS) { 1173 } else if (drive->atapi_flags & IDE_AFLAG_FORMAT_IN_PROGRESS) {
1191 ret = -EBUSY; 1174 ret = -EBUSY;
1192 goto out_put_floppy; 1175 goto out_put_floppy;
1193 } 1176 }
@@ -1210,12 +1193,12 @@ static int idefloppy_release(struct inode *inode, struct file *filp)
1210 1193
1211 if (floppy->openers == 1) { 1194 if (floppy->openers == 1) {
1212 /* IOMEGA Clik! drives do not support lock/unlock commands */ 1195 /* IOMEGA Clik! drives do not support lock/unlock commands */
1213 if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) { 1196 if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) {
1214 idefloppy_create_prevent_cmd(&pc, 0); 1197 idefloppy_create_prevent_cmd(&pc, 0);
1215 (void) idefloppy_queue_pc_tail(drive, &pc); 1198 (void) idefloppy_queue_pc_tail(drive, &pc);
1216 } 1199 }
1217 1200
1218 floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS; 1201 drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS;
1219 } 1202 }
1220 1203
1221 floppy->openers--; 1204 floppy->openers--;
@@ -1236,15 +1219,17 @@ static int idefloppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1236 return 0; 1219 return 0;
1237} 1220}
1238 1221
1239static int ide_floppy_lockdoor(idefloppy_floppy_t *floppy, 1222static int ide_floppy_lockdoor(ide_drive_t *drive, struct ide_atapi_pc *pc,
1240 struct ide_atapi_pc *pc, unsigned long arg, unsigned int cmd) 1223 unsigned long arg, unsigned int cmd)
1241{ 1224{
1225 idefloppy_floppy_t *floppy = drive->driver_data;
1226
1242 if (floppy->openers > 1) 1227 if (floppy->openers > 1)
1243 return -EBUSY; 1228 return -EBUSY;
1244 1229
1245 /* The IOMEGA Clik! Drive doesn't support this command - 1230 /* The IOMEGA Clik! Drive doesn't support this command -
1246 * no room for an eject mechanism */ 1231 * no room for an eject mechanism */
1247 if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) { 1232 if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) {
1248 int prevent = arg ? 1 : 0; 1233 int prevent = arg ? 1 : 0;
1249 1234
1250 if (cmd == CDROMEJECT) 1235 if (cmd == CDROMEJECT)
@@ -1265,16 +1250,17 @@ static int ide_floppy_lockdoor(idefloppy_floppy_t *floppy,
1265static int ide_floppy_format_unit(idefloppy_floppy_t *floppy, 1250static int ide_floppy_format_unit(idefloppy_floppy_t *floppy,
1266 int __user *arg) 1251 int __user *arg)
1267{ 1252{
1268 int blocks, length, flags, err = 0;
1269 struct ide_atapi_pc pc; 1253 struct ide_atapi_pc pc;
1254 ide_drive_t *drive = floppy->drive;
1255 int blocks, length, flags, err = 0;
1270 1256
1271 if (floppy->openers > 1) { 1257 if (floppy->openers > 1) {
1272 /* Don't format if someone is using the disk */ 1258 /* Don't format if someone is using the disk */
1273 floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS; 1259 drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS;
1274 return -EBUSY; 1260 return -EBUSY;
1275 } 1261 }
1276 1262
1277 floppy->flags |= IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS; 1263 drive->atapi_flags |= IDE_AFLAG_FORMAT_IN_PROGRESS;
1278 1264
1279 /* 1265 /*
1280 * Send ATAPI_FORMAT_UNIT to the drive. 1266 * Send ATAPI_FORMAT_UNIT to the drive.
@@ -1298,15 +1284,15 @@ static int ide_floppy_format_unit(idefloppy_floppy_t *floppy,
1298 goto out; 1284 goto out;
1299 } 1285 }
1300 1286
1301 (void) idefloppy_get_sfrp_bit(floppy->drive); 1287 (void) idefloppy_get_sfrp_bit(drive);
1302 idefloppy_create_format_unit_cmd(&pc, blocks, length, flags); 1288 idefloppy_create_format_unit_cmd(&pc, blocks, length, flags);
1303 1289
1304 if (idefloppy_queue_pc_tail(floppy->drive, &pc)) 1290 if (idefloppy_queue_pc_tail(drive, &pc))
1305 err = -EIO; 1291 err = -EIO;
1306 1292
1307out: 1293out:
1308 if (err) 1294 if (err)
1309 floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS; 1295 drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS;
1310 return err; 1296 return err;
1311} 1297}
1312 1298
@@ -1325,7 +1311,7 @@ static int idefloppy_ioctl(struct inode *inode, struct file *file,
1325 case CDROMEJECT: 1311 case CDROMEJECT:
1326 /* fall through */ 1312 /* fall through */
1327 case CDROM_LOCKDOOR: 1313 case CDROM_LOCKDOOR:
1328 return ide_floppy_lockdoor(floppy, &pc, arg, cmd); 1314 return ide_floppy_lockdoor(drive, &pc, arg, cmd);
1329 case IDEFLOPPY_IOCTL_FORMAT_SUPPORTED: 1315 case IDEFLOPPY_IOCTL_FORMAT_SUPPORTED:
1330 return 0; 1316 return 0;
1331 case IDEFLOPPY_IOCTL_FORMAT_GET_CAPACITY: 1317 case IDEFLOPPY_IOCTL_FORMAT_GET_CAPACITY:
@@ -1366,8 +1352,8 @@ static int idefloppy_media_changed(struct gendisk *disk)
1366 drive->attach = 0; 1352 drive->attach = 0;
1367 return 0; 1353 return 0;
1368 } 1354 }
1369 ret = !!(floppy->flags & IDEFLOPPY_FLAG_MEDIA_CHANGED); 1355 ret = !!(drive->atapi_flags & IDE_AFLAG_MEDIA_CHANGED);
1370 floppy->flags &= ~IDEFLOPPY_FLAG_MEDIA_CHANGED; 1356 drive->atapi_flags &= ~IDE_AFLAG_MEDIA_CHANGED;
1371 return ret; 1357 return ret;
1372} 1358}
1373 1359
diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c
index 2d92214096ab..31d98fec775f 100644
--- a/drivers/ide/ide-generic.c
+++ b/drivers/ide/ide-generic.c
@@ -28,29 +28,21 @@ MODULE_PARM_DESC(probe_mask, "probe mask for legacy ISA IDE ports");
28 28
29static ssize_t store_add(struct class *cls, const char *buf, size_t n) 29static ssize_t store_add(struct class *cls, const char *buf, size_t n)
30{ 30{
31 ide_hwif_t *hwif;
32 unsigned int base, ctl; 31 unsigned int base, ctl;
33 int irq; 32 int irq, rc;
34 hw_regs_t hw; 33 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
35 u8 idx[] = { 0xff, 0xff, 0xff, 0xff };
36 34
37 if (sscanf(buf, "%x:%x:%d", &base, &ctl, &irq) != 3) 35 if (sscanf(buf, "%x:%x:%d", &base, &ctl, &irq) != 3)
38 return -EINVAL; 36 return -EINVAL;
39 37
40 hwif = ide_find_port();
41 if (hwif == NULL)
42 return -ENOENT;
43
44 memset(&hw, 0, sizeof(hw)); 38 memset(&hw, 0, sizeof(hw));
45 ide_std_init_ports(&hw, base, ctl); 39 ide_std_init_ports(&hw, base, ctl);
46 hw.irq = irq; 40 hw.irq = irq;
47 hw.chipset = ide_generic; 41 hw.chipset = ide_generic;
48 42
49 ide_init_port_hw(hwif, &hw); 43 rc = ide_host_add(NULL, hws, NULL);
50 44 if (rc)
51 idx[0] = hwif->index; 45 return rc;
52
53 ide_device_add(idx, NULL);
54 46
55 return n; 47 return n;
56}; 48};
@@ -90,18 +82,18 @@ static int __init ide_generic_sysfs_init(void)
90 82
91static int __init ide_generic_init(void) 83static int __init ide_generic_init(void)
92{ 84{
93 u8 idx[MAX_HWIFS]; 85 hw_regs_t hw[MAX_HWIFS], *hws[MAX_HWIFS];
94 int i; 86 struct ide_host *host;
87 unsigned long io_addr;
88 int i, rc;
95 89
96 printk(KERN_INFO DRV_NAME ": please use \"probe_mask=0x3f\" module " 90 printk(KERN_INFO DRV_NAME ": please use \"probe_mask=0x3f\" module "
97 "parameter for probing all legacy ISA IDE ports\n"); 91 "parameter for probing all legacy ISA IDE ports\n");
98 92
99 for (i = 0; i < MAX_HWIFS; i++) { 93 for (i = 0; i < MAX_HWIFS; i++) {
100 ide_hwif_t *hwif; 94 io_addr = ide_default_io_base(i);
101 unsigned long io_addr = ide_default_io_base(i);
102 hw_regs_t hw;
103 95
104 idx[i] = 0xff; 96 hws[i] = NULL;
105 97
106 if ((probe_mask & (1 << i)) && io_addr) { 98 if ((probe_mask & (1 << i)) && io_addr) {
107 if (!request_region(io_addr, 8, DRV_NAME)) { 99 if (!request_region(io_addr, 8, DRV_NAME)) {
@@ -119,33 +111,42 @@ static int __init ide_generic_init(void)
119 continue; 111 continue;
120 } 112 }
121 113
122 /* 114 memset(&hw[i], 0, sizeof(hw[i]));
123 * Skip probing if the corresponding 115 ide_std_init_ports(&hw[i], io_addr, io_addr + 0x206);
124 * slot is already occupied. 116 hw[i].irq = ide_default_irq(io_addr);
125 */ 117 hw[i].chipset = ide_generic;
126 hwif = ide_find_port();
127 if (hwif == NULL || hwif->index != i) {
128 idx[i] = 0xff;
129 continue;
130 }
131
132 memset(&hw, 0, sizeof(hw));
133 ide_std_init_ports(&hw, io_addr, io_addr + 0x206);
134 hw.irq = ide_default_irq(io_addr);
135 hw.chipset = ide_generic;
136 ide_init_port_hw(hwif, &hw);
137 118
138 idx[i] = i; 119 hws[i] = &hw[i];
139 } 120 }
140 } 121 }
141 122
142 ide_device_add_all(idx, NULL); 123 host = ide_host_alloc_all(NULL, hws);
124 if (host == NULL) {
125 rc = -ENOMEM;
126 goto err;
127 }
128
129 rc = ide_host_register(host, NULL, hws);
130 if (rc)
131 goto err_free;
143 132
144 if (ide_generic_sysfs_init()) 133 if (ide_generic_sysfs_init())
145 printk(KERN_ERR DRV_NAME ": failed to create ide_generic " 134 printk(KERN_ERR DRV_NAME ": failed to create ide_generic "
146 "class\n"); 135 "class\n");
147 136
148 return 0; 137 return 0;
138err_free:
139 ide_host_free(host);
140err:
141 for (i = 0; i < MAX_HWIFS; i++) {
142 if (hws[i] == NULL)
143 continue;
144
145 io_addr = hws[i]->io_ports.data_addr;
146 release_region(io_addr + 0x206, 1);
147 release_region(io_addr, 8);
148 }
149 return rc;
149} 150}
150 151
151module_init(ide_generic_init); 152module_init(ide_generic_init);
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 661b75a89d4d..a896a283f27f 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -330,7 +330,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
330 tf->error = err; 330 tf->error = err;
331 tf->status = stat; 331 tf->status = stat;
332 332
333 drive->hwif->tf_read(drive, task); 333 drive->hwif->tp_ops->tf_read(drive, task);
334 334
335 if (task->tf_flags & IDE_TFLAG_DYN) 335 if (task->tf_flags & IDE_TFLAG_DYN)
336 kfree(task); 336 kfree(task);
@@ -381,8 +381,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8
381 if (err == ABRT_ERR) { 381 if (err == ABRT_ERR) {
382 if (drive->select.b.lba && 382 if (drive->select.b.lba &&
383 /* some newer drives don't support WIN_SPECIFY */ 383 /* some newer drives don't support WIN_SPECIFY */
384 hwif->INB(hwif->io_ports.command_addr) == 384 hwif->tp_ops->read_status(hwif) == WIN_SPECIFY)
385 WIN_SPECIFY)
386 return ide_stopped; 385 return ide_stopped;
387 } else if ((err & BAD_CRC) == BAD_CRC) { 386 } else if ((err & BAD_CRC) == BAD_CRC) {
388 /* UDMA crc error, just retry the operation */ 387 /* UDMA crc error, just retry the operation */
@@ -408,7 +407,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8
408 return ide_stopped; 407 return ide_stopped;
409 } 408 }
410 409
411 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) 410 if (hwif->tp_ops->read_status(hwif) & (BUSY_STAT | DRQ_STAT))
412 rq->errors |= ERROR_RESET; 411 rq->errors |= ERROR_RESET;
413 412
414 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 413 if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
@@ -435,10 +434,9 @@ static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u
435 /* add decoding error stuff */ 434 /* add decoding error stuff */
436 } 435 }
437 436
438 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) 437 if (hwif->tp_ops->read_status(hwif) & (BUSY_STAT | DRQ_STAT))
439 /* force an abort */ 438 /* force an abort */
440 hwif->OUTBSYNC(hwif, WIN_IDLEIMMEDIATE, 439 hwif->tp_ops->exec_command(hwif, WIN_IDLEIMMEDIATE);
441 hwif->io_ports.command_addr);
442 440
443 if (rq->errors >= ERROR_MAX) { 441 if (rq->errors >= ERROR_MAX) {
444 ide_kill_rq(drive, rq); 442 ide_kill_rq(drive, rq);
@@ -712,7 +710,8 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
712#ifdef DEBUG 710#ifdef DEBUG
713 printk("%s: DRIVE_CMD (null)\n", drive->name); 711 printk("%s: DRIVE_CMD (null)\n", drive->name);
714#endif 712#endif
715 ide_end_drive_cmd(drive, ide_read_status(drive), ide_read_error(drive)); 713 ide_end_drive_cmd(drive, hwif->tp_ops->read_status(hwif),
714 ide_read_error(drive));
716 715
717 return ide_stopped; 716 return ide_stopped;
718} 717}
@@ -747,16 +746,17 @@ static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
747 * the bus may be broken enough to walk on our toes at this 746 * the bus may be broken enough to walk on our toes at this
748 * point. 747 * point.
749 */ 748 */
749 ide_hwif_t *hwif = drive->hwif;
750 int rc; 750 int rc;
751#ifdef DEBUG_PM 751#ifdef DEBUG_PM
752 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name); 752 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
753#endif 753#endif
754 rc = ide_wait_not_busy(HWIF(drive), 35000); 754 rc = ide_wait_not_busy(hwif, 35000);
755 if (rc) 755 if (rc)
756 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); 756 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
757 SELECT_DRIVE(drive); 757 SELECT_DRIVE(drive);
758 ide_set_irq(drive, 1); 758 hwif->tp_ops->set_irq(hwif, 1);
759 rc = ide_wait_not_busy(HWIF(drive), 100000); 759 rc = ide_wait_not_busy(hwif, 100000);
760 if (rc) 760 if (rc)
761 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); 761 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
762 } 762 }
@@ -1042,7 +1042,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
1042 * quirk_list may not like intr setups/cleanups 1042 * quirk_list may not like intr setups/cleanups
1043 */ 1043 */
1044 if (drive->quirk_list != 1) 1044 if (drive->quirk_list != 1)
1045 ide_set_irq(drive, 0); 1045 hwif->tp_ops->set_irq(hwif, 0);
1046 } 1046 }
1047 hwgroup->hwif = hwif; 1047 hwgroup->hwif = hwif;
1048 hwgroup->drive = drive; 1048 hwgroup->drive = drive;
@@ -1142,7 +1142,7 @@ static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
1142 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); 1142 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
1143 (void)hwif->dma_ops->dma_end(drive); 1143 (void)hwif->dma_ops->dma_end(drive);
1144 ret = ide_error(drive, "dma timeout error", 1144 ret = ide_error(drive, "dma timeout error",
1145 ide_read_status(drive)); 1145 hwif->tp_ops->read_status(hwif));
1146 } else { 1146 } else {
1147 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); 1147 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
1148 hwif->dma_ops->dma_timeout(drive); 1148 hwif->dma_ops->dma_timeout(drive);
@@ -1267,7 +1267,7 @@ void ide_timer_expiry (unsigned long data)
1267 } else 1267 } else
1268 startstop = 1268 startstop =
1269 ide_error(drive, "irq timeout", 1269 ide_error(drive, "irq timeout",
1270 ide_read_status(drive)); 1270 hwif->tp_ops->read_status(hwif));
1271 } 1271 }
1272 drive->service_time = jiffies - drive->service_start; 1272 drive->service_time = jiffies - drive->service_start;
1273 spin_lock_irq(&ide_lock); 1273 spin_lock_irq(&ide_lock);
@@ -1323,7 +1323,8 @@ static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup)
1323 */ 1323 */
1324 do { 1324 do {
1325 if (hwif->irq == irq) { 1325 if (hwif->irq == irq) {
1326 stat = hwif->INB(hwif->io_ports.status_addr); 1326 stat = hwif->tp_ops->read_status(hwif);
1327
1327 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) { 1328 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) {
1328 /* Try to not flood the console with msgs */ 1329 /* Try to not flood the console with msgs */
1329 static unsigned long last_msgtime, count; 1330 static unsigned long last_msgtime, count;
@@ -1413,7 +1414,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1413 * Whack the status register, just in case 1414 * Whack the status register, just in case
1414 * we have a leftover pending IRQ. 1415 * we have a leftover pending IRQ.
1415 */ 1416 */
1416 (void) hwif->INB(hwif->io_ports.status_addr); 1417 (void)hwif->tp_ops->read_status(hwif);
1417#endif /* CONFIG_BLK_DEV_IDEPCI */ 1418#endif /* CONFIG_BLK_DEV_IDEPCI */
1418 } 1419 }
1419 spin_unlock_irqrestore(&ide_lock, flags); 1420 spin_unlock_irqrestore(&ide_lock, flags);
@@ -1519,6 +1520,7 @@ EXPORT_SYMBOL(ide_do_drive_cmd);
1519 1520
1520void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma) 1521void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
1521{ 1522{
1523 ide_hwif_t *hwif = drive->hwif;
1522 ide_task_t task; 1524 ide_task_t task;
1523 1525
1524 memset(&task, 0, sizeof(task)); 1526 memset(&task, 0, sizeof(task));
@@ -1529,9 +1531,9 @@ void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
1529 task.tf.lbah = (bcount >> 8) & 0xff; 1531 task.tf.lbah = (bcount >> 8) & 0xff;
1530 1532
1531 ide_tf_dump(drive->name, &task.tf); 1533 ide_tf_dump(drive->name, &task.tf);
1532 ide_set_irq(drive, 1); 1534 hwif->tp_ops->set_irq(hwif, 1);
1533 SELECT_MASK(drive, 0); 1535 SELECT_MASK(drive, 0);
1534 drive->hwif->tf_load(drive, &task); 1536 hwif->tp_ops->tf_load(drive, &task);
1535} 1537}
1536 1538
1537EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load); 1539EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load);
@@ -1543,9 +1545,9 @@ void ide_pad_transfer(ide_drive_t *drive, int write, int len)
1543 1545
1544 while (len > 0) { 1546 while (len > 0) {
1545 if (write) 1547 if (write)
1546 hwif->output_data(drive, NULL, buf, min(4, len)); 1548 hwif->tp_ops->output_data(drive, NULL, buf, min(4, len));
1547 else 1549 else
1548 hwif->input_data(drive, NULL, buf, min(4, len)); 1550 hwif->tp_ops->input_data(drive, NULL, buf, min(4, len));
1549 len -= 4; 1551 len -= 4;
1550 } 1552 }
1551} 1553}
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 44aaec256a30..07da5fb9eaff 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -42,18 +42,6 @@ static void ide_outb (u8 val, unsigned long port)
42 outb(val, port); 42 outb(val, port);
43} 43}
44 44
45static void ide_outbsync(ide_hwif_t *hwif, u8 addr, unsigned long port)
46{
47 outb(addr, port);
48}
49
50void default_hwif_iops (ide_hwif_t *hwif)
51{
52 hwif->OUTB = ide_outb;
53 hwif->OUTBSYNC = ide_outbsync;
54 hwif->INB = ide_inb;
55}
56
57/* 45/*
58 * MMIO operations, typically used for SATA controllers 46 * MMIO operations, typically used for SATA controllers
59 */ 47 */
@@ -68,31 +56,19 @@ static void ide_mm_outb (u8 value, unsigned long port)
68 writeb(value, (void __iomem *) port); 56 writeb(value, (void __iomem *) port);
69} 57}
70 58
71static void ide_mm_outbsync(ide_hwif_t *hwif, u8 value, unsigned long port)
72{
73 writeb(value, (void __iomem *) port);
74}
75
76void default_hwif_mmiops (ide_hwif_t *hwif)
77{
78 hwif->OUTB = ide_mm_outb;
79 /* Most systems will need to override OUTBSYNC, alas however
80 this one is controller specific! */
81 hwif->OUTBSYNC = ide_mm_outbsync;
82 hwif->INB = ide_mm_inb;
83}
84
85EXPORT_SYMBOL(default_hwif_mmiops);
86
87void SELECT_DRIVE (ide_drive_t *drive) 59void SELECT_DRIVE (ide_drive_t *drive)
88{ 60{
89 ide_hwif_t *hwif = drive->hwif; 61 ide_hwif_t *hwif = drive->hwif;
90 const struct ide_port_ops *port_ops = hwif->port_ops; 62 const struct ide_port_ops *port_ops = hwif->port_ops;
63 ide_task_t task;
91 64
92 if (port_ops && port_ops->selectproc) 65 if (port_ops && port_ops->selectproc)
93 port_ops->selectproc(drive); 66 port_ops->selectproc(drive);
94 67
95 hwif->OUTB(drive->select.all, hwif->io_ports.device_addr); 68 memset(&task, 0, sizeof(task));
69 task.tf_flags = IDE_TFLAG_OUT_DEVICE;
70
71 drive->hwif->tp_ops->tf_load(drive, &task);
96} 72}
97 73
98void SELECT_MASK(ide_drive_t *drive, int mask) 74void SELECT_MASK(ide_drive_t *drive, int mask)
@@ -103,7 +79,61 @@ void SELECT_MASK(ide_drive_t *drive, int mask)
103 port_ops->maskproc(drive, mask); 79 port_ops->maskproc(drive, mask);
104} 80}
105 81
106static void ide_tf_load(ide_drive_t *drive, ide_task_t *task) 82void ide_exec_command(ide_hwif_t *hwif, u8 cmd)
83{
84 if (hwif->host_flags & IDE_HFLAG_MMIO)
85 writeb(cmd, (void __iomem *)hwif->io_ports.command_addr);
86 else
87 outb(cmd, hwif->io_ports.command_addr);
88}
89EXPORT_SYMBOL_GPL(ide_exec_command);
90
91u8 ide_read_status(ide_hwif_t *hwif)
92{
93 if (hwif->host_flags & IDE_HFLAG_MMIO)
94 return readb((void __iomem *)hwif->io_ports.status_addr);
95 else
96 return inb(hwif->io_ports.status_addr);
97}
98EXPORT_SYMBOL_GPL(ide_read_status);
99
100u8 ide_read_altstatus(ide_hwif_t *hwif)
101{
102 if (hwif->host_flags & IDE_HFLAG_MMIO)
103 return readb((void __iomem *)hwif->io_ports.ctl_addr);
104 else
105 return inb(hwif->io_ports.ctl_addr);
106}
107EXPORT_SYMBOL_GPL(ide_read_altstatus);
108
109u8 ide_read_sff_dma_status(ide_hwif_t *hwif)
110{
111 if (hwif->host_flags & IDE_HFLAG_MMIO)
112 return readb((void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
113 else
114 return inb(hwif->dma_base + ATA_DMA_STATUS);
115}
116EXPORT_SYMBOL_GPL(ide_read_sff_dma_status);
117
118void ide_set_irq(ide_hwif_t *hwif, int on)
119{
120 u8 ctl = ATA_DEVCTL_OBS;
121
122 if (on == 4) { /* hack for SRST */
123 ctl |= 4;
124 on &= ~4;
125 }
126
127 ctl |= on ? 0 : 2;
128
129 if (hwif->host_flags & IDE_HFLAG_MMIO)
130 writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr);
131 else
132 outb(ctl, hwif->io_ports.ctl_addr);
133}
134EXPORT_SYMBOL_GPL(ide_set_irq);
135
136void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
107{ 137{
108 ide_hwif_t *hwif = drive->hwif; 138 ide_hwif_t *hwif = drive->hwif;
109 struct ide_io_ports *io_ports = &hwif->io_ports; 139 struct ide_io_ports *io_ports = &hwif->io_ports;
@@ -155,8 +185,9 @@ static void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
155 tf_outb((tf->device & HIHI) | drive->select.all, 185 tf_outb((tf->device & HIHI) | drive->select.all,
156 io_ports->device_addr); 186 io_ports->device_addr);
157} 187}
188EXPORT_SYMBOL_GPL(ide_tf_load);
158 189
159static void ide_tf_read(ide_drive_t *drive, ide_task_t *task) 190void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
160{ 191{
161 ide_hwif_t *hwif = drive->hwif; 192 ide_hwif_t *hwif = drive->hwif;
162 struct ide_io_ports *io_ports = &hwif->io_ports; 193 struct ide_io_ports *io_ports = &hwif->io_ports;
@@ -188,6 +219,8 @@ static void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
188 /* be sure we're looking at the low order bits */ 219 /* be sure we're looking at the low order bits */
189 tf_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr); 220 tf_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
190 221
222 if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
223 tf->feature = tf_inb(io_ports->feature_addr);
191 if (task->tf_flags & IDE_TFLAG_IN_NSECT) 224 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
192 tf->nsect = tf_inb(io_ports->nsect_addr); 225 tf->nsect = tf_inb(io_ports->nsect_addr);
193 if (task->tf_flags & IDE_TFLAG_IN_LBAL) 226 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
@@ -214,6 +247,7 @@ static void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
214 tf->hob_lbah = tf_inb(io_ports->lbah_addr); 247 tf->hob_lbah = tf_inb(io_ports->lbah_addr);
215 } 248 }
216} 249}
250EXPORT_SYMBOL_GPL(ide_tf_read);
217 251
218/* 252/*
219 * Some localbus EIDE interfaces require a special access sequence 253 * Some localbus EIDE interfaces require a special access sequence
@@ -236,8 +270,8 @@ static void ata_vlb_sync(unsigned long port)
236 * so if an odd len is specified, be sure that there's at least one 270 * so if an odd len is specified, be sure that there's at least one
237 * extra byte allocated for the buffer. 271 * extra byte allocated for the buffer.
238 */ 272 */
239static void ata_input_data(ide_drive_t *drive, struct request *rq, 273void ide_input_data(ide_drive_t *drive, struct request *rq, void *buf,
240 void *buf, unsigned int len) 274 unsigned int len)
241{ 275{
242 ide_hwif_t *hwif = drive->hwif; 276 ide_hwif_t *hwif = drive->hwif;
243 struct ide_io_ports *io_ports = &hwif->io_ports; 277 struct ide_io_ports *io_ports = &hwif->io_ports;
@@ -277,12 +311,13 @@ static void ata_input_data(ide_drive_t *drive, struct request *rq,
277 insw(data_addr, buf, len / 2); 311 insw(data_addr, buf, len / 2);
278 } 312 }
279} 313}
314EXPORT_SYMBOL_GPL(ide_input_data);
280 315
281/* 316/*
282 * This is used for most PIO data transfers *to* the IDE interface 317 * This is used for most PIO data transfers *to* the IDE interface
283 */ 318 */
284static void ata_output_data(ide_drive_t *drive, struct request *rq, 319void ide_output_data(ide_drive_t *drive, struct request *rq, void *buf,
285 void *buf, unsigned int len) 320 unsigned int len)
286{ 321{
287 ide_hwif_t *hwif = drive->hwif; 322 ide_hwif_t *hwif = drive->hwif;
288 struct ide_io_ports *io_ports = &hwif->io_ports; 323 struct ide_io_ports *io_ports = &hwif->io_ports;
@@ -320,15 +355,50 @@ static void ata_output_data(ide_drive_t *drive, struct request *rq,
320 outsw(data_addr, buf, len / 2); 355 outsw(data_addr, buf, len / 2);
321 } 356 }
322} 357}
358EXPORT_SYMBOL_GPL(ide_output_data);
359
360u8 ide_read_error(ide_drive_t *drive)
361{
362 ide_task_t task;
363
364 memset(&task, 0, sizeof(task));
365 task.tf_flags = IDE_TFLAG_IN_FEATURE;
366
367 drive->hwif->tp_ops->tf_read(drive, &task);
368
369 return task.tf.error;
370}
371EXPORT_SYMBOL_GPL(ide_read_error);
323 372
324void default_hwif_transport(ide_hwif_t *hwif) 373void ide_read_bcount_and_ireason(ide_drive_t *drive, u16 *bcount, u8 *ireason)
325{ 374{
326 hwif->tf_load = ide_tf_load; 375 ide_task_t task;
327 hwif->tf_read = ide_tf_read; 376
377 memset(&task, 0, sizeof(task));
378 task.tf_flags = IDE_TFLAG_IN_LBAH | IDE_TFLAG_IN_LBAM |
379 IDE_TFLAG_IN_NSECT;
328 380
329 hwif->input_data = ata_input_data; 381 drive->hwif->tp_ops->tf_read(drive, &task);
330 hwif->output_data = ata_output_data; 382
383 *bcount = (task.tf.lbah << 8) | task.tf.lbam;
384 *ireason = task.tf.nsect & 3;
331} 385}
386EXPORT_SYMBOL_GPL(ide_read_bcount_and_ireason);
387
388const struct ide_tp_ops default_tp_ops = {
389 .exec_command = ide_exec_command,
390 .read_status = ide_read_status,
391 .read_altstatus = ide_read_altstatus,
392 .read_sff_dma_status = ide_read_sff_dma_status,
393
394 .set_irq = ide_set_irq,
395
396 .tf_load = ide_tf_load,
397 .tf_read = ide_tf_read,
398
399 .input_data = ide_input_data,
400 .output_data = ide_output_data,
401};
332 402
333void ide_fix_driveid (struct hd_driveid *id) 403void ide_fix_driveid (struct hd_driveid *id)
334{ 404{
@@ -483,10 +553,10 @@ int drive_is_ready (ide_drive_t *drive)
483 * about possible isa-pnp and pci-pnp issues yet. 553 * about possible isa-pnp and pci-pnp issues yet.
484 */ 554 */
485 if (hwif->io_ports.ctl_addr) 555 if (hwif->io_ports.ctl_addr)
486 stat = ide_read_altstatus(drive); 556 stat = hwif->tp_ops->read_altstatus(hwif);
487 else 557 else
488 /* Note: this may clear a pending IRQ!! */ 558 /* Note: this may clear a pending IRQ!! */
489 stat = ide_read_status(drive); 559 stat = hwif->tp_ops->read_status(hwif);
490 560
491 if (stat & BUSY_STAT) 561 if (stat & BUSY_STAT)
492 /* drive busy: definitely not interrupting */ 562 /* drive busy: definitely not interrupting */
@@ -511,24 +581,26 @@ EXPORT_SYMBOL(drive_is_ready);
511 */ 581 */
512static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout, u8 *rstat) 582static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout, u8 *rstat)
513{ 583{
584 ide_hwif_t *hwif = drive->hwif;
585 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
514 unsigned long flags; 586 unsigned long flags;
515 int i; 587 int i;
516 u8 stat; 588 u8 stat;
517 589
518 udelay(1); /* spec allows drive 400ns to assert "BUSY" */ 590 udelay(1); /* spec allows drive 400ns to assert "BUSY" */
519 stat = ide_read_status(drive); 591 stat = tp_ops->read_status(hwif);
520 592
521 if (stat & BUSY_STAT) { 593 if (stat & BUSY_STAT) {
522 local_irq_set(flags); 594 local_irq_set(flags);
523 timeout += jiffies; 595 timeout += jiffies;
524 while ((stat = ide_read_status(drive)) & BUSY_STAT) { 596 while ((stat = tp_ops->read_status(hwif)) & BUSY_STAT) {
525 if (time_after(jiffies, timeout)) { 597 if (time_after(jiffies, timeout)) {
526 /* 598 /*
527 * One last read after the timeout in case 599 * One last read after the timeout in case
528 * heavy interrupt load made us not make any 600 * heavy interrupt load made us not make any
529 * progress during the timeout.. 601 * progress during the timeout..
530 */ 602 */
531 stat = ide_read_status(drive); 603 stat = tp_ops->read_status(hwif);
532 if (!(stat & BUSY_STAT)) 604 if (!(stat & BUSY_STAT))
533 break; 605 break;
534 606
@@ -548,7 +620,7 @@ static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long ti
548 */ 620 */
549 for (i = 0; i < 10; i++) { 621 for (i = 0; i < 10; i++) {
550 udelay(1); 622 udelay(1);
551 stat = ide_read_status(drive); 623 stat = tp_ops->read_status(hwif);
552 624
553 if (OK_STAT(stat, good, bad)) { 625 if (OK_STAT(stat, good, bad)) {
554 *rstat = stat; 626 *rstat = stat;
@@ -674,6 +746,7 @@ no_80w:
674int ide_driveid_update(ide_drive_t *drive) 746int ide_driveid_update(ide_drive_t *drive)
675{ 747{
676 ide_hwif_t *hwif = drive->hwif; 748 ide_hwif_t *hwif = drive->hwif;
749 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
677 struct hd_driveid *id; 750 struct hd_driveid *id;
678 unsigned long timeout, flags; 751 unsigned long timeout, flags;
679 u8 stat; 752 u8 stat;
@@ -684,9 +757,9 @@ int ide_driveid_update(ide_drive_t *drive)
684 */ 757 */
685 758
686 SELECT_MASK(drive, 1); 759 SELECT_MASK(drive, 1);
687 ide_set_irq(drive, 0); 760 tp_ops->set_irq(hwif, 0);
688 msleep(50); 761 msleep(50);
689 hwif->OUTBSYNC(hwif, WIN_IDENTIFY, hwif->io_ports.command_addr); 762 tp_ops->exec_command(hwif, WIN_IDENTIFY);
690 timeout = jiffies + WAIT_WORSTCASE; 763 timeout = jiffies + WAIT_WORSTCASE;
691 do { 764 do {
692 if (time_after(jiffies, timeout)) { 765 if (time_after(jiffies, timeout)) {
@@ -695,11 +768,11 @@ int ide_driveid_update(ide_drive_t *drive)
695 } 768 }
696 769
697 msleep(50); /* give drive a breather */ 770 msleep(50); /* give drive a breather */
698 stat = ide_read_altstatus(drive); 771 stat = tp_ops->read_altstatus(hwif);
699 } while (stat & BUSY_STAT); 772 } while (stat & BUSY_STAT);
700 773
701 msleep(50); /* wait for IRQ and DRQ_STAT */ 774 msleep(50); /* wait for IRQ and DRQ_STAT */
702 stat = ide_read_status(drive); 775 stat = tp_ops->read_status(hwif);
703 776
704 if (!OK_STAT(stat, DRQ_STAT, BAD_R_STAT)) { 777 if (!OK_STAT(stat, DRQ_STAT, BAD_R_STAT)) {
705 SELECT_MASK(drive, 0); 778 SELECT_MASK(drive, 0);
@@ -713,8 +786,8 @@ int ide_driveid_update(ide_drive_t *drive)
713 local_irq_restore(flags); 786 local_irq_restore(flags);
714 return 0; 787 return 0;
715 } 788 }
716 hwif->input_data(drive, NULL, id, SECTOR_SIZE); 789 tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
717 (void)ide_read_status(drive); /* clear drive IRQ */ 790 (void)tp_ops->read_status(hwif); /* clear drive IRQ */
718 local_irq_enable(); 791 local_irq_enable();
719 local_irq_restore(flags); 792 local_irq_restore(flags);
720 ide_fix_driveid(id); 793 ide_fix_driveid(id);
@@ -735,9 +808,10 @@ int ide_driveid_update(ide_drive_t *drive)
735int ide_config_drive_speed(ide_drive_t *drive, u8 speed) 808int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
736{ 809{
737 ide_hwif_t *hwif = drive->hwif; 810 ide_hwif_t *hwif = drive->hwif;
738 struct ide_io_ports *io_ports = &hwif->io_ports; 811 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
739 int error = 0; 812 int error = 0;
740 u8 stat; 813 u8 stat;
814 ide_task_t task;
741 815
742#ifdef CONFIG_BLK_DEV_IDEDMA 816#ifdef CONFIG_BLK_DEV_IDEDMA
743 if (hwif->dma_ops) /* check if host supports DMA */ 817 if (hwif->dma_ops) /* check if host supports DMA */
@@ -770,12 +844,19 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
770 SELECT_DRIVE(drive); 844 SELECT_DRIVE(drive);
771 SELECT_MASK(drive, 0); 845 SELECT_MASK(drive, 0);
772 udelay(1); 846 udelay(1);
773 ide_set_irq(drive, 0); 847 tp_ops->set_irq(hwif, 0);
774 hwif->OUTB(speed, io_ports->nsect_addr); 848
775 hwif->OUTB(SETFEATURES_XFER, io_ports->feature_addr); 849 memset(&task, 0, sizeof(task));
776 hwif->OUTBSYNC(hwif, WIN_SETFEATURES, io_ports->command_addr); 850 task.tf_flags = IDE_TFLAG_OUT_FEATURE | IDE_TFLAG_OUT_NSECT;
851 task.tf.feature = SETFEATURES_XFER;
852 task.tf.nsect = speed;
853
854 tp_ops->tf_load(drive, &task);
855
856 tp_ops->exec_command(hwif, WIN_SETFEATURES);
857
777 if (drive->quirk_list == 2) 858 if (drive->quirk_list == 2)
778 ide_set_irq(drive, 1); 859 tp_ops->set_irq(hwif, 1);
779 860
780 error = __ide_wait_stat(drive, drive->ready_stat, 861 error = __ide_wait_stat(drive, drive->ready_stat,
781 BUSY_STAT|DRQ_STAT|ERR_STAT, 862 BUSY_STAT|DRQ_STAT|ERR_STAT,
@@ -796,8 +877,7 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
796 877
797 skip: 878 skip:
798#ifdef CONFIG_BLK_DEV_IDEDMA 879#ifdef CONFIG_BLK_DEV_IDEDMA
799 if ((speed >= XFER_SW_DMA_0 || (hwif->host_flags & IDE_HFLAG_VDMA)) && 880 if (speed >= XFER_SW_DMA_0 && drive->using_dma)
800 drive->using_dma)
801 hwif->dma_ops->dma_host_set(drive, 1); 881 hwif->dma_ops->dma_host_set(drive, 1);
802 else if (hwif->dma_ops) /* check if host supports DMA */ 882 else if (hwif->dma_ops) /* check if host supports DMA */
803 ide_dma_off_quietly(drive); 883 ide_dma_off_quietly(drive);
@@ -881,7 +961,7 @@ void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
881 961
882 spin_lock_irqsave(&ide_lock, flags); 962 spin_lock_irqsave(&ide_lock, flags);
883 __ide_set_handler(drive, handler, timeout, expiry); 963 __ide_set_handler(drive, handler, timeout, expiry);
884 hwif->OUTBSYNC(hwif, cmd, hwif->io_ports.command_addr); 964 hwif->tp_ops->exec_command(hwif, cmd);
885 /* 965 /*
886 * Drive takes 400nS to respond, we must avoid the IRQ being 966 * Drive takes 400nS to respond, we must avoid the IRQ being
887 * serviced before that. 967 * serviced before that.
@@ -899,7 +979,7 @@ void ide_execute_pkt_cmd(ide_drive_t *drive)
899 unsigned long flags; 979 unsigned long flags;
900 980
901 spin_lock_irqsave(&ide_lock, flags); 981 spin_lock_irqsave(&ide_lock, flags);
902 hwif->OUTBSYNC(hwif, WIN_PACKETCMD, hwif->io_ports.command_addr); 982 hwif->tp_ops->exec_command(hwif, WIN_PACKETCMD);
903 ndelay(400); 983 ndelay(400);
904 spin_unlock_irqrestore(&ide_lock, flags); 984 spin_unlock_irqrestore(&ide_lock, flags);
905} 985}
@@ -924,12 +1004,13 @@ static ide_startstop_t do_reset1 (ide_drive_t *, int);
924 */ 1004 */
925static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive) 1005static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive)
926{ 1006{
927 ide_hwgroup_t *hwgroup = HWGROUP(drive); 1007 ide_hwif_t *hwif = drive->hwif;
1008 ide_hwgroup_t *hwgroup = hwif->hwgroup;
928 u8 stat; 1009 u8 stat;
929 1010
930 SELECT_DRIVE(drive); 1011 SELECT_DRIVE(drive);
931 udelay (10); 1012 udelay (10);
932 stat = ide_read_status(drive); 1013 stat = hwif->tp_ops->read_status(hwif);
933 1014
934 if (OK_STAT(stat, 0, BUSY_STAT)) 1015 if (OK_STAT(stat, 0, BUSY_STAT))
935 printk("%s: ATAPI reset complete\n", drive->name); 1016 printk("%s: ATAPI reset complete\n", drive->name);
@@ -975,7 +1056,7 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
975 } 1056 }
976 } 1057 }
977 1058
978 tmp = ide_read_status(drive); 1059 tmp = hwif->tp_ops->read_status(hwif);
979 1060
980 if (!OK_STAT(tmp, 0, BUSY_STAT)) { 1061 if (!OK_STAT(tmp, 0, BUSY_STAT)) {
981 if (time_before(jiffies, hwgroup->poll_timeout)) { 1062 if (time_before(jiffies, hwgroup->poll_timeout)) {
@@ -1089,8 +1170,8 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1089 ide_hwif_t *hwif; 1170 ide_hwif_t *hwif;
1090 ide_hwgroup_t *hwgroup; 1171 ide_hwgroup_t *hwgroup;
1091 struct ide_io_ports *io_ports; 1172 struct ide_io_ports *io_ports;
1173 const struct ide_tp_ops *tp_ops;
1092 const struct ide_port_ops *port_ops; 1174 const struct ide_port_ops *port_ops;
1093 u8 ctl;
1094 1175
1095 spin_lock_irqsave(&ide_lock, flags); 1176 spin_lock_irqsave(&ide_lock, flags);
1096 hwif = HWIF(drive); 1177 hwif = HWIF(drive);
@@ -1098,6 +1179,8 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1098 1179
1099 io_ports = &hwif->io_ports; 1180 io_ports = &hwif->io_ports;
1100 1181
1182 tp_ops = hwif->tp_ops;
1183
1101 /* We must not reset with running handlers */ 1184 /* We must not reset with running handlers */
1102 BUG_ON(hwgroup->handler != NULL); 1185 BUG_ON(hwgroup->handler != NULL);
1103 1186
@@ -1106,7 +1189,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1106 pre_reset(drive); 1189 pre_reset(drive);
1107 SELECT_DRIVE(drive); 1190 SELECT_DRIVE(drive);
1108 udelay (20); 1191 udelay (20);
1109 hwif->OUTBSYNC(hwif, WIN_SRST, io_ports->command_addr); 1192 tp_ops->exec_command(hwif, WIN_SRST);
1110 ndelay(400); 1193 ndelay(400);
1111 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE; 1194 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
1112 hwgroup->polling = 1; 1195 hwgroup->polling = 1;
@@ -1135,16 +1218,15 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1135 * immediate interrupt due to the edge transition it produces. 1218 * immediate interrupt due to the edge transition it produces.
1136 * This single interrupt gives us a "fast poll" for drives that 1219 * This single interrupt gives us a "fast poll" for drives that
1137 * recover from reset very quickly, saving us the first 50ms wait time. 1220 * recover from reset very quickly, saving us the first 50ms wait time.
1221 *
1222 * TODO: add ->softreset method and stop abusing ->set_irq
1138 */ 1223 */
1139 /* set SRST and nIEN */ 1224 /* set SRST and nIEN */
1140 hwif->OUTBSYNC(hwif, ATA_DEVCTL_OBS | 6, io_ports->ctl_addr); 1225 tp_ops->set_irq(hwif, 4);
1141 /* more than enough time */ 1226 /* more than enough time */
1142 udelay(10); 1227 udelay(10);
1143 if (drive->quirk_list == 2) 1228 /* clear SRST, leave nIEN (unless device is on the quirk list) */
1144 ctl = ATA_DEVCTL_OBS; /* clear SRST and nIEN */ 1229 tp_ops->set_irq(hwif, drive->quirk_list == 2);
1145 else
1146 ctl = ATA_DEVCTL_OBS | 2; /* clear SRST, leave nIEN */
1147 hwif->OUTBSYNC(hwif, ctl, io_ports->ctl_addr);
1148 /* more than enough time */ 1230 /* more than enough time */
1149 udelay(10); 1231 udelay(10);
1150 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE; 1232 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
@@ -1189,7 +1271,7 @@ int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
1189 * about locking issues (2.5 work ?). 1271 * about locking issues (2.5 work ?).
1190 */ 1272 */
1191 mdelay(1); 1273 mdelay(1);
1192 stat = hwif->INB(hwif->io_ports.status_addr); 1274 stat = hwif->tp_ops->read_status(hwif);
1193 if ((stat & BUSY_STAT) == 0) 1275 if ((stat & BUSY_STAT) == 0)
1194 return 0; 1276 return 0;
1195 /* 1277 /*
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 13af72f09ec4..97fefabea8b8 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -266,22 +266,11 @@ int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
266 266
267 rate = ide_rate_filter(drive, rate); 267 rate = ide_rate_filter(drive, rate);
268 268
269 BUG_ON(rate < XFER_PIO_0);
270
269 if (rate >= XFER_PIO_0 && rate <= XFER_PIO_5) 271 if (rate >= XFER_PIO_0 && rate <= XFER_PIO_5)
270 return ide_set_pio_mode(drive, rate); 272 return ide_set_pio_mode(drive, rate);
271 273
272 /*
273 * TODO: transfer modes 0x00-0x07 passed from the user-space are
274 * currently handled here which needs fixing (please note that such
275 * case could happen iff the transfer mode has already been set on
276 * the device by ide-proc.c::set_xfer_rate()).
277 */
278 if (rate < XFER_PIO_0) {
279 if (hwif->host_flags & IDE_HFLAG_ABUSE_SET_DMA_MODE)
280 return ide_set_dma_mode(drive, rate);
281 else
282 return ide_config_drive_speed(drive, rate);
283 }
284
285 return ide_set_dma_mode(drive, rate); 274 return ide_set_dma_mode(drive, rate);
286} 275}
287 276
@@ -336,7 +325,7 @@ static void ide_dump_sector(ide_drive_t *drive)
336 else 325 else
337 task.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_DEVICE; 326 task.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_DEVICE;
338 327
339 drive->hwif->tf_read(drive, &task); 328 drive->hwif->tp_ops->tf_read(drive, &task);
340 329
341 if (lba48 || (tf->device & ATA_LBA)) 330 if (lba48 || (tf->device & ATA_LBA))
342 printk(", LBAsect=%llu", 331 printk(", LBAsect=%llu",
diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c
index 03f2ef5470a3..bac9b392b689 100644
--- a/drivers/ide/ide-pnp.c
+++ b/drivers/ide/ide-pnp.c
@@ -29,9 +29,10 @@ static struct pnp_device_id idepnp_devices[] = {
29 29
30static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) 30static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
31{ 31{
32 hw_regs_t hw; 32 struct ide_host *host;
33 ide_hwif_t *hwif;
34 unsigned long base, ctl; 33 unsigned long base, ctl;
34 int rc;
35 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
35 36
36 printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n"); 37 printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n");
37 38
@@ -59,31 +60,25 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
59 hw.irq = pnp_irq(dev, 0); 60 hw.irq = pnp_irq(dev, 0);
60 hw.chipset = ide_generic; 61 hw.chipset = ide_generic;
61 62
62 hwif = ide_find_port(); 63 rc = ide_host_add(NULL, hws, &host);
63 if (hwif) { 64 if (rc)
64 u8 index = hwif->index; 65 goto out;
65 u8 idx[4] = { index, 0xff, 0xff, 0xff };
66 66
67 ide_init_port_hw(hwif, &hw); 67 pnp_set_drvdata(dev, host);
68
69 pnp_set_drvdata(dev, hwif);
70
71 ide_device_add(idx, NULL);
72
73 return 0;
74 }
75 68
69 return 0;
70out:
76 release_region(ctl, 1); 71 release_region(ctl, 1);
77 release_region(base, 8); 72 release_region(base, 8);
78 73
79 return -1; 74 return rc;
80} 75}
81 76
82static void idepnp_remove(struct pnp_dev *dev) 77static void idepnp_remove(struct pnp_dev *dev)
83{ 78{
84 ide_hwif_t *hwif = pnp_get_drvdata(dev); 79 struct ide_host *host = pnp_get_drvdata(dev);
85 80
86 ide_unregister(hwif); 81 ide_host_remove(host);
87 82
88 release_region(pnp_port_start(dev, 1), 1); 83 release_region(pnp_port_start(dev, 1), 1);
89 release_region(pnp_port_start(dev, 0), 8); 84 release_region(pnp_port_start(dev, 0), 8);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 235ebdb29b28..4aa76c453755 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -39,8 +39,6 @@
39#include <asm/uaccess.h> 39#include <asm/uaccess.h>
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42static ide_hwif_t ide_hwifs[MAX_HWIFS]; /* master data repository */
43
44/** 42/**
45 * generic_id - add a generic drive id 43 * generic_id - add a generic drive id
46 * @drive: drive to make an ID block for 44 * @drive: drive to make an ID block for
@@ -126,7 +124,7 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd)
126 124
127 id = drive->id; 125 id = drive->id;
128 /* read 512 bytes of id info */ 126 /* read 512 bytes of id info */
129 hwif->input_data(drive, NULL, id, SECTOR_SIZE); 127 hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
130 128
131 drive->id_read = 1; 129 drive->id_read = 1;
132 local_irq_enable(); 130 local_irq_enable();
@@ -267,6 +265,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
267{ 265{
268 ide_hwif_t *hwif = HWIF(drive); 266 ide_hwif_t *hwif = HWIF(drive);
269 struct ide_io_ports *io_ports = &hwif->io_ports; 267 struct ide_io_ports *io_ports = &hwif->io_ports;
268 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
270 int use_altstatus = 0, rc; 269 int use_altstatus = 0, rc;
271 unsigned long timeout; 270 unsigned long timeout;
272 u8 s = 0, a = 0; 271 u8 s = 0, a = 0;
@@ -275,8 +274,8 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
275 msleep(50); 274 msleep(50);
276 275
277 if (io_ports->ctl_addr) { 276 if (io_ports->ctl_addr) {
278 a = ide_read_altstatus(drive); 277 a = tp_ops->read_altstatus(hwif);
279 s = ide_read_status(drive); 278 s = tp_ops->read_status(hwif);
280 if ((a ^ s) & ~INDEX_STAT) 279 if ((a ^ s) & ~INDEX_STAT)
281 /* ancient Seagate drives, broken interfaces */ 280 /* ancient Seagate drives, broken interfaces */
282 printk(KERN_INFO "%s: probing with STATUS(0x%02x) " 281 printk(KERN_INFO "%s: probing with STATUS(0x%02x) "
@@ -290,12 +289,18 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
290 /* set features register for atapi 289 /* set features register for atapi
291 * identify command to be sure of reply 290 * identify command to be sure of reply
292 */ 291 */
293 if ((cmd == WIN_PIDENTIFY)) 292 if (cmd == WIN_PIDENTIFY) {
294 /* disable dma & overlap */ 293 ide_task_t task;
295 hwif->OUTB(0, io_ports->feature_addr); 294
295 memset(&task, 0, sizeof(task));
296 /* disable DMA & overlap */
297 task.tf_flags = IDE_TFLAG_OUT_FEATURE;
298
299 tp_ops->tf_load(drive, &task);
300 }
296 301
297 /* ask drive for ID */ 302 /* ask drive for ID */
298 hwif->OUTBSYNC(hwif, cmd, hwif->io_ports.command_addr); 303 tp_ops->exec_command(hwif, cmd);
299 304
300 timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2; 305 timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2;
301 timeout += jiffies; 306 timeout += jiffies;
@@ -306,13 +311,13 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
306 } 311 }
307 /* give drive a breather */ 312 /* give drive a breather */
308 msleep(50); 313 msleep(50);
309 s = use_altstatus ? ide_read_altstatus(drive) 314 s = use_altstatus ? tp_ops->read_altstatus(hwif)
310 : ide_read_status(drive); 315 : tp_ops->read_status(hwif);
311 } while (s & BUSY_STAT); 316 } while (s & BUSY_STAT);
312 317
313 /* wait for IRQ and DRQ_STAT */ 318 /* wait for IRQ and DRQ_STAT */
314 msleep(50); 319 msleep(50);
315 s = ide_read_status(drive); 320 s = tp_ops->read_status(hwif);
316 321
317 if (OK_STAT(s, DRQ_STAT, BAD_R_STAT)) { 322 if (OK_STAT(s, DRQ_STAT, BAD_R_STAT)) {
318 unsigned long flags; 323 unsigned long flags;
@@ -324,7 +329,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
324 /* drive responded with ID */ 329 /* drive responded with ID */
325 rc = 0; 330 rc = 0;
326 /* clear drive IRQ */ 331 /* clear drive IRQ */
327 (void)ide_read_status(drive); 332 (void)tp_ops->read_status(hwif);
328 local_irq_restore(flags); 333 local_irq_restore(flags);
329 } else { 334 } else {
330 /* drive refused ID */ 335 /* drive refused ID */
@@ -346,6 +351,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
346static int try_to_identify (ide_drive_t *drive, u8 cmd) 351static int try_to_identify (ide_drive_t *drive, u8 cmd)
347{ 352{
348 ide_hwif_t *hwif = HWIF(drive); 353 ide_hwif_t *hwif = HWIF(drive);
354 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
349 int retval; 355 int retval;
350 int autoprobe = 0; 356 int autoprobe = 0;
351 unsigned long cookie = 0; 357 unsigned long cookie = 0;
@@ -361,7 +367,7 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd)
361 autoprobe = 1; 367 autoprobe = 1;
362 cookie = probe_irq_on(); 368 cookie = probe_irq_on();
363 } 369 }
364 ide_set_irq(drive, autoprobe); 370 tp_ops->set_irq(hwif, autoprobe);
365 } 371 }
366 372
367 retval = actual_try_to_identify(drive, cmd); 373 retval = actual_try_to_identify(drive, cmd);
@@ -369,9 +375,9 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd)
369 if (autoprobe) { 375 if (autoprobe) {
370 int irq; 376 int irq;
371 377
372 ide_set_irq(drive, 0); 378 tp_ops->set_irq(hwif, 0);
373 /* clear drive IRQ */ 379 /* clear drive IRQ */
374 (void)ide_read_status(drive); 380 (void)tp_ops->read_status(hwif);
375 udelay(5); 381 udelay(5);
376 irq = probe_irq_off(cookie); 382 irq = probe_irq_off(cookie);
377 if (!hwif->irq) { 383 if (!hwif->irq) {
@@ -396,7 +402,7 @@ static int ide_busy_sleep(ide_hwif_t *hwif)
396 402
397 do { 403 do {
398 msleep(50); 404 msleep(50);
399 stat = hwif->INB(hwif->io_ports.status_addr); 405 stat = hwif->tp_ops->read_status(hwif);
400 if ((stat & BUSY_STAT) == 0) 406 if ((stat & BUSY_STAT) == 0)
401 return 0; 407 return 0;
402 } while (time_before(jiffies, timeout)); 408 } while (time_before(jiffies, timeout));
@@ -404,6 +410,18 @@ static int ide_busy_sleep(ide_hwif_t *hwif)
404 return 1; 410 return 1;
405} 411}
406 412
413static u8 ide_read_device(ide_drive_t *drive)
414{
415 ide_task_t task;
416
417 memset(&task, 0, sizeof(task));
418 task.tf_flags = IDE_TFLAG_IN_DEVICE;
419
420 drive->hwif->tp_ops->tf_read(drive, &task);
421
422 return task.tf.device;
423}
424
407/** 425/**
408 * do_probe - probe an IDE device 426 * do_probe - probe an IDE device
409 * @drive: drive to probe 427 * @drive: drive to probe
@@ -428,7 +446,7 @@ static int ide_busy_sleep(ide_hwif_t *hwif)
428static int do_probe (ide_drive_t *drive, u8 cmd) 446static int do_probe (ide_drive_t *drive, u8 cmd)
429{ 447{
430 ide_hwif_t *hwif = HWIF(drive); 448 ide_hwif_t *hwif = HWIF(drive);
431 struct ide_io_ports *io_ports = &hwif->io_ports; 449 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
432 int rc; 450 int rc;
433 u8 stat; 451 u8 stat;
434 452
@@ -449,8 +467,8 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
449 msleep(50); 467 msleep(50);
450 SELECT_DRIVE(drive); 468 SELECT_DRIVE(drive);
451 msleep(50); 469 msleep(50);
452 if (hwif->INB(io_ports->device_addr) != drive->select.all && 470
453 !drive->present) { 471 if (ide_read_device(drive) != drive->select.all && !drive->present) {
454 if (drive->select.b.unit != 0) { 472 if (drive->select.b.unit != 0) {
455 /* exit with drive0 selected */ 473 /* exit with drive0 selected */
456 SELECT_DRIVE(&hwif->drives[0]); 474 SELECT_DRIVE(&hwif->drives[0]);
@@ -461,7 +479,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
461 return 3; 479 return 3;
462 } 480 }
463 481
464 stat = ide_read_status(drive); 482 stat = tp_ops->read_status(hwif);
465 483
466 if (OK_STAT(stat, READY_STAT, BUSY_STAT) || 484 if (OK_STAT(stat, READY_STAT, BUSY_STAT) ||
467 drive->present || cmd == WIN_PIDENTIFY) { 485 drive->present || cmd == WIN_PIDENTIFY) {
@@ -471,7 +489,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
471 rc = try_to_identify(drive,cmd); 489 rc = try_to_identify(drive,cmd);
472 } 490 }
473 491
474 stat = ide_read_status(drive); 492 stat = tp_ops->read_status(hwif);
475 493
476 if (stat == (BUSY_STAT | READY_STAT)) 494 if (stat == (BUSY_STAT | READY_STAT))
477 return 4; 495 return 4;
@@ -482,13 +500,13 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
482 msleep(50); 500 msleep(50);
483 SELECT_DRIVE(drive); 501 SELECT_DRIVE(drive);
484 msleep(50); 502 msleep(50);
485 hwif->OUTBSYNC(hwif, WIN_SRST, io_ports->command_addr); 503 tp_ops->exec_command(hwif, WIN_SRST);
486 (void)ide_busy_sleep(hwif); 504 (void)ide_busy_sleep(hwif);
487 rc = try_to_identify(drive, cmd); 505 rc = try_to_identify(drive, cmd);
488 } 506 }
489 507
490 /* ensure drive IRQ is clear */ 508 /* ensure drive IRQ is clear */
491 stat = ide_read_status(drive); 509 stat = tp_ops->read_status(hwif);
492 510
493 if (rc == 1) 511 if (rc == 1)
494 printk(KERN_ERR "%s: no response (status = 0x%02x)\n", 512 printk(KERN_ERR "%s: no response (status = 0x%02x)\n",
@@ -502,7 +520,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
502 SELECT_DRIVE(&hwif->drives[0]); 520 SELECT_DRIVE(&hwif->drives[0]);
503 msleep(50); 521 msleep(50);
504 /* ensure drive irq is clear */ 522 /* ensure drive irq is clear */
505 (void)ide_read_status(drive); 523 (void)tp_ops->read_status(hwif);
506 } 524 }
507 return rc; 525 return rc;
508} 526}
@@ -513,12 +531,13 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
513static void enable_nest (ide_drive_t *drive) 531static void enable_nest (ide_drive_t *drive)
514{ 532{
515 ide_hwif_t *hwif = HWIF(drive); 533 ide_hwif_t *hwif = HWIF(drive);
534 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
516 u8 stat; 535 u8 stat;
517 536
518 printk("%s: enabling %s -- ", hwif->name, drive->id->model); 537 printk("%s: enabling %s -- ", hwif->name, drive->id->model);
519 SELECT_DRIVE(drive); 538 SELECT_DRIVE(drive);
520 msleep(50); 539 msleep(50);
521 hwif->OUTBSYNC(hwif, EXABYTE_ENABLE_NEST, hwif->io_ports.command_addr); 540 tp_ops->exec_command(hwif, EXABYTE_ENABLE_NEST);
522 541
523 if (ide_busy_sleep(hwif)) { 542 if (ide_busy_sleep(hwif)) {
524 printk(KERN_CONT "failed (timeout)\n"); 543 printk(KERN_CONT "failed (timeout)\n");
@@ -527,7 +546,7 @@ static void enable_nest (ide_drive_t *drive)
527 546
528 msleep(50); 547 msleep(50);
529 548
530 stat = ide_read_status(drive); 549 stat = tp_ops->read_status(hwif);
531 550
532 if (!OK_STAT(stat, 0, BAD_STAT)) 551 if (!OK_STAT(stat, 0, BAD_STAT))
533 printk(KERN_CONT "failed (status = 0x%02x)\n", stat); 552 printk(KERN_CONT "failed (status = 0x%02x)\n", stat);
@@ -619,7 +638,7 @@ static inline u8 probe_for_drive (ide_drive_t *drive)
619 return drive->present; 638 return drive->present;
620} 639}
621 640
622static void hwif_release_dev (struct device *dev) 641static void hwif_release_dev(struct device *dev)
623{ 642{
624 ide_hwif_t *hwif = container_of(dev, ide_hwif_t, gendev); 643 ide_hwif_t *hwif = container_of(dev, ide_hwif_t, gendev);
625 644
@@ -709,7 +728,7 @@ static int ide_port_wait_ready(ide_hwif_t *hwif)
709 /* Ignore disks that we will not probe for later. */ 728 /* Ignore disks that we will not probe for later. */
710 if (!drive->noprobe || drive->present) { 729 if (!drive->noprobe || drive->present) {
711 SELECT_DRIVE(drive); 730 SELECT_DRIVE(drive);
712 ide_set_irq(drive, 1); 731 hwif->tp_ops->set_irq(hwif, 1);
713 mdelay(2); 732 mdelay(2);
714 rc = ide_wait_not_busy(hwif, 35000); 733 rc = ide_wait_not_busy(hwif, 35000);
715 if (rc) 734 if (rc)
@@ -971,6 +990,45 @@ static void ide_port_setup_devices(ide_hwif_t *hwif)
971 mutex_unlock(&ide_cfg_mtx); 990 mutex_unlock(&ide_cfg_mtx);
972} 991}
973 992
993static ide_hwif_t *ide_ports[MAX_HWIFS];
994
995void ide_remove_port_from_hwgroup(ide_hwif_t *hwif)
996{
997 ide_hwgroup_t *hwgroup = hwif->hwgroup;
998
999 ide_ports[hwif->index] = NULL;
1000
1001 spin_lock_irq(&ide_lock);
1002 /*
1003 * Remove us from the hwgroup, and free
1004 * the hwgroup if we were the only member
1005 */
1006 if (hwif->next == hwif) {
1007 BUG_ON(hwgroup->hwif != hwif);
1008 kfree(hwgroup);
1009 } else {
1010 /* There is another interface in hwgroup.
1011 * Unlink us, and set hwgroup->drive and ->hwif to
1012 * something sane.
1013 */
1014 ide_hwif_t *g = hwgroup->hwif;
1015
1016 while (g->next != hwif)
1017 g = g->next;
1018 g->next = hwif->next;
1019 if (hwgroup->hwif == hwif) {
1020 /* Chose a random hwif for hwgroup->hwif.
1021 * It's guaranteed that there are no drives
1022 * left in the hwgroup.
1023 */
1024 BUG_ON(hwgroup->drive != NULL);
1025 hwgroup->hwif = g;
1026 }
1027 BUG_ON(hwgroup->hwif == hwif);
1028 }
1029 spin_unlock_irq(&ide_lock);
1030}
1031
974/* 1032/*
975 * This routine sets up the irq for an ide interface, and creates a new 1033 * This routine sets up the irq for an ide interface, and creates a new
976 * hwgroup for the irq/hwif if none was previously assigned. 1034 * hwgroup for the irq/hwif if none was previously assigned.
@@ -998,8 +1056,9 @@ static int init_irq (ide_hwif_t *hwif)
998 * Group up with any other hwifs that share our irq(s). 1056 * Group up with any other hwifs that share our irq(s).
999 */ 1057 */
1000 for (index = 0; index < MAX_HWIFS; index++) { 1058 for (index = 0; index < MAX_HWIFS; index++) {
1001 ide_hwif_t *h = &ide_hwifs[index]; 1059 ide_hwif_t *h = ide_ports[index];
1002 if (h->hwgroup) { /* scan only initialized hwif's */ 1060
1061 if (h && h->hwgroup) { /* scan only initialized ports */
1003 if (hwif->irq == h->irq) { 1062 if (hwif->irq == h->irq) {
1004 hwif->sharing_irq = h->sharing_irq = 1; 1063 hwif->sharing_irq = h->sharing_irq = 1;
1005 if (hwif->chipset != ide_pci || 1064 if (hwif->chipset != ide_pci ||
@@ -1053,6 +1112,8 @@ static int init_irq (ide_hwif_t *hwif)
1053 hwgroup->timer.data = (unsigned long) hwgroup; 1112 hwgroup->timer.data = (unsigned long) hwgroup;
1054 } 1113 }
1055 1114
1115 ide_ports[hwif->index] = hwif;
1116
1056 /* 1117 /*
1057 * Allocate the irq, if not already obtained for another hwif 1118 * Allocate the irq, if not already obtained for another hwif
1058 */ 1119 */
@@ -1066,8 +1127,7 @@ static int init_irq (ide_hwif_t *hwif)
1066 sa = IRQF_SHARED; 1127 sa = IRQF_SHARED;
1067 1128
1068 if (io_ports->ctl_addr) 1129 if (io_ports->ctl_addr)
1069 /* clear nIEN */ 1130 hwif->tp_ops->set_irq(hwif, 1);
1070 hwif->OUTBSYNC(hwif, ATA_DEVCTL_OBS, io_ports->ctl_addr);
1071 1131
1072 if (request_irq(hwif->irq,&ide_intr,sa,hwif->name,hwgroup)) 1132 if (request_irq(hwif->irq,&ide_intr,sa,hwif->name,hwgroup))
1073 goto out_unlink; 1133 goto out_unlink;
@@ -1345,6 +1405,9 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
1345 hwif->host_flags |= d->host_flags; 1405 hwif->host_flags |= d->host_flags;
1346 hwif->pio_mask = d->pio_mask; 1406 hwif->pio_mask = d->pio_mask;
1347 1407
1408 if (d->tp_ops)
1409 hwif->tp_ops = d->tp_ops;
1410
1348 /* ->set_pio_mode for DTC2278 is currently limited to port 0 */ 1411 /* ->set_pio_mode for DTC2278 is currently limited to port 0 */
1349 if (hwif->chipset != ide_dtc2278 || hwif->channel == 0) 1412 if (hwif->chipset != ide_dtc2278 || hwif->channel == 0)
1350 hwif->port_ops = d->port_ops; 1413 hwif->port_ops = d->port_ops;
@@ -1363,6 +1426,7 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
1363 1426
1364 if (rc < 0) { 1427 if (rc < 0) {
1365 printk(KERN_INFO "%s: DMA disabled\n", hwif->name); 1428 printk(KERN_INFO "%s: DMA disabled\n", hwif->name);
1429 hwif->dma_base = 0;
1366 hwif->swdma_mask = 0; 1430 hwif->swdma_mask = 0;
1367 hwif->mwdma_mask = 0; 1431 hwif->mwdma_mask = 0;
1368 hwif->ultra_mask = 0; 1432 hwif->ultra_mask = 0;
@@ -1446,18 +1510,20 @@ static int ide_sysfs_register_port(ide_hwif_t *hwif)
1446 return rc; 1510 return rc;
1447} 1511}
1448 1512
1513static unsigned int ide_indexes;
1514
1449/** 1515/**
1450 * ide_find_port_slot - find free ide_hwifs[] slot 1516 * ide_find_port_slot - find free port slot
1451 * @d: IDE port info 1517 * @d: IDE port info
1452 * 1518 *
1453 * Return the new hwif. If we are out of free slots return NULL. 1519 * Return the new port slot index or -ENOENT if we are out of free slots.
1454 */ 1520 */
1455 1521
1456ide_hwif_t *ide_find_port_slot(const struct ide_port_info *d) 1522static int ide_find_port_slot(const struct ide_port_info *d)
1457{ 1523{
1458 ide_hwif_t *hwif; 1524 int idx = -ENOENT;
1459 int i;
1460 u8 bootable = (d && (d->host_flags & IDE_HFLAG_NON_BOOTABLE)) ? 0 : 1; 1525 u8 bootable = (d && (d->host_flags & IDE_HFLAG_NON_BOOTABLE)) ? 0 : 1;
1526 u8 i = (d && (d->host_flags & IDE_HFLAG_QD_2ND_PORT)) ? 1 : 0;;
1461 1527
1462 /* 1528 /*
1463 * Claim an unassigned slot. 1529 * Claim an unassigned slot.
@@ -1469,51 +1535,106 @@ ide_hwif_t *ide_find_port_slot(const struct ide_port_info *d)
1469 * Unless there is a bootable card that does not use the standard 1535 * Unless there is a bootable card that does not use the standard
1470 * ports 0x1f0/0x170 (the ide0/ide1 defaults). 1536 * ports 0x1f0/0x170 (the ide0/ide1 defaults).
1471 */ 1537 */
1472 if (bootable) { 1538 mutex_lock(&ide_cfg_mtx);
1473 i = (d && (d->host_flags & IDE_HFLAG_QD_2ND_PORT)) ? 1 : 0; 1539 if (MAX_HWIFS == 1) {
1474 1540 if (ide_indexes == 0 && i == 0)
1475 for (; i < MAX_HWIFS; i++) { 1541 idx = 1;
1476 hwif = &ide_hwifs[i];
1477 if (hwif->chipset == ide_unknown)
1478 goto out_found;
1479 }
1480 } else { 1542 } else {
1481 for (i = 2; i < MAX_HWIFS; i++) { 1543 if (bootable) {
1482 hwif = &ide_hwifs[i]; 1544 if ((ide_indexes | i) != (1 << MAX_HWIFS) - 1)
1483 if (hwif->chipset == ide_unknown) 1545 idx = ffz(ide_indexes | i);
1484 goto out_found; 1546 } else {
1547 if ((ide_indexes | 3) != (1 << MAX_HWIFS) - 1)
1548 idx = ffz(ide_indexes | 3);
1549 else if ((ide_indexes & 3) != 3)
1550 idx = ffz(ide_indexes);
1485 } 1551 }
1486 for (i = 0; i < 2 && i < MAX_HWIFS; i++) { 1552 }
1487 hwif = &ide_hwifs[i]; 1553 if (idx >= 0)
1488 if (hwif->chipset == ide_unknown) 1554 ide_indexes |= (1 << idx);
1489 goto out_found; 1555 mutex_unlock(&ide_cfg_mtx);
1556
1557 return idx;
1558}
1559
1560static void ide_free_port_slot(int idx)
1561{
1562 mutex_lock(&ide_cfg_mtx);
1563 ide_indexes &= ~(1 << idx);
1564 mutex_unlock(&ide_cfg_mtx);
1565}
1566
1567struct ide_host *ide_host_alloc_all(const struct ide_port_info *d,
1568 hw_regs_t **hws)
1569{
1570 struct ide_host *host;
1571 int i;
1572
1573 host = kzalloc(sizeof(*host), GFP_KERNEL);
1574 if (host == NULL)
1575 return NULL;
1576
1577 for (i = 0; i < MAX_HWIFS; i++) {
1578 ide_hwif_t *hwif;
1579 int idx;
1580
1581 if (hws[i] == NULL)
1582 continue;
1583
1584 hwif = kzalloc(sizeof(*hwif), GFP_KERNEL);
1585 if (hwif == NULL)
1586 continue;
1587
1588 idx = ide_find_port_slot(d);
1589 if (idx < 0) {
1590 printk(KERN_ERR "%s: no free slot for interface\n",
1591 d ? d->name : "ide");
1592 kfree(hwif);
1593 continue;
1490 } 1594 }
1595
1596 ide_init_port_data(hwif, idx);
1597
1598 host->ports[i] = hwif;
1599 host->n_ports++;
1491 } 1600 }
1492 1601
1493 printk(KERN_ERR "%s: no free slot for interface\n", 1602 if (host->n_ports == 0) {
1494 d ? d->name : "ide"); 1603 kfree(host);
1604 return NULL;
1605 }
1495 1606
1496 return NULL; 1607 return host;
1608}
1609EXPORT_SYMBOL_GPL(ide_host_alloc_all);
1610
1611struct ide_host *ide_host_alloc(const struct ide_port_info *d, hw_regs_t **hws)
1612{
1613 hw_regs_t *hws_all[MAX_HWIFS];
1614 int i;
1497 1615
1498out_found: 1616 for (i = 0; i < MAX_HWIFS; i++)
1499 ide_init_port_data(hwif, i); 1617 hws_all[i] = (i < 4) ? hws[i] : NULL;
1500 return hwif; 1618
1619 return ide_host_alloc_all(d, hws_all);
1501} 1620}
1502EXPORT_SYMBOL_GPL(ide_find_port_slot); 1621EXPORT_SYMBOL_GPL(ide_host_alloc);
1503 1622
1504int ide_device_add_all(u8 *idx, const struct ide_port_info *d) 1623int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
1624 hw_regs_t **hws)
1505{ 1625{
1506 ide_hwif_t *hwif, *mate = NULL; 1626 ide_hwif_t *hwif, *mate = NULL;
1507 int i, rc = 0; 1627 int i, j = 0;
1508 1628
1509 for (i = 0; i < MAX_HWIFS; i++) { 1629 for (i = 0; i < MAX_HWIFS; i++) {
1510 if (idx[i] == 0xff) { 1630 hwif = host->ports[i];
1631
1632 if (hwif == NULL) {
1511 mate = NULL; 1633 mate = NULL;
1512 continue; 1634 continue;
1513 } 1635 }
1514 1636
1515 hwif = &ide_hwifs[idx[i]]; 1637 ide_init_port_hw(hwif, hws[i]);
1516
1517 ide_port_apply_params(hwif); 1638 ide_port_apply_params(hwif);
1518 1639
1519 if (d == NULL) { 1640 if (d == NULL) {
@@ -1534,10 +1655,10 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1534 } 1655 }
1535 1656
1536 for (i = 0; i < MAX_HWIFS; i++) { 1657 for (i = 0; i < MAX_HWIFS; i++) {
1537 if (idx[i] == 0xff) 1658 hwif = host->ports[i];
1538 continue;
1539 1659
1540 hwif = &ide_hwifs[idx[i]]; 1660 if (hwif == NULL)
1661 continue;
1541 1662
1542 if (ide_probe_port(hwif) == 0) 1663 if (ide_probe_port(hwif) == 0)
1543 hwif->present = 1; 1664 hwif->present = 1;
@@ -1551,19 +1672,20 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1551 } 1672 }
1552 1673
1553 for (i = 0; i < MAX_HWIFS; i++) { 1674 for (i = 0; i < MAX_HWIFS; i++) {
1554 if (idx[i] == 0xff) 1675 hwif = host->ports[i];
1555 continue;
1556 1676
1557 hwif = &ide_hwifs[idx[i]]; 1677 if (hwif == NULL)
1678 continue;
1558 1679
1559 if (hwif_init(hwif) == 0) { 1680 if (hwif_init(hwif) == 0) {
1560 printk(KERN_INFO "%s: failed to initialize IDE " 1681 printk(KERN_INFO "%s: failed to initialize IDE "
1561 "interface\n", hwif->name); 1682 "interface\n", hwif->name);
1562 hwif->present = 0; 1683 hwif->present = 0;
1563 rc = -1;
1564 continue; 1684 continue;
1565 } 1685 }
1566 1686
1687 j++;
1688
1567 if (hwif->present) 1689 if (hwif->present)
1568 ide_port_setup_devices(hwif); 1690 ide_port_setup_devices(hwif);
1569 1691
@@ -1574,10 +1696,10 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1574 } 1696 }
1575 1697
1576 for (i = 0; i < MAX_HWIFS; i++) { 1698 for (i = 0; i < MAX_HWIFS; i++) {
1577 if (idx[i] == 0xff) 1699 hwif = host->ports[i];
1578 continue;
1579 1700
1580 hwif = &ide_hwifs[idx[i]]; 1701 if (hwif == NULL)
1702 continue;
1581 1703
1582 if (hwif->chipset == ide_unknown) 1704 if (hwif->chipset == ide_unknown)
1583 hwif->chipset = ide_generic; 1705 hwif->chipset = ide_generic;
@@ -1587,10 +1709,10 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1587 } 1709 }
1588 1710
1589 for (i = 0; i < MAX_HWIFS; i++) { 1711 for (i = 0; i < MAX_HWIFS; i++) {
1590 if (idx[i] == 0xff) 1712 hwif = host->ports[i];
1591 continue;
1592 1713
1593 hwif = &ide_hwifs[idx[i]]; 1714 if (hwif == NULL)
1715 continue;
1594 1716
1595 ide_sysfs_register_port(hwif); 1717 ide_sysfs_register_port(hwif);
1596 ide_proc_register_port(hwif); 1718 ide_proc_register_port(hwif);
@@ -1599,21 +1721,64 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1599 ide_proc_port_register_devices(hwif); 1721 ide_proc_port_register_devices(hwif);
1600 } 1722 }
1601 1723
1602 return rc; 1724 return j ? 0 : -1;
1603} 1725}
1604EXPORT_SYMBOL_GPL(ide_device_add_all); 1726EXPORT_SYMBOL_GPL(ide_host_register);
1605 1727
1606int ide_device_add(u8 idx[4], const struct ide_port_info *d) 1728int ide_host_add(const struct ide_port_info *d, hw_regs_t **hws,
1729 struct ide_host **hostp)
1607{ 1730{
1608 u8 idx_all[MAX_HWIFS]; 1731 struct ide_host *host;
1732 int rc;
1733
1734 host = ide_host_alloc(d, hws);
1735 if (host == NULL)
1736 return -ENOMEM;
1737
1738 rc = ide_host_register(host, d, hws);
1739 if (rc) {
1740 ide_host_free(host);
1741 return rc;
1742 }
1743
1744 if (hostp)
1745 *hostp = host;
1746
1747 return 0;
1748}
1749EXPORT_SYMBOL_GPL(ide_host_add);
1750
1751void ide_host_free(struct ide_host *host)
1752{
1753 ide_hwif_t *hwif;
1609 int i; 1754 int i;
1610 1755
1611 for (i = 0; i < MAX_HWIFS; i++) 1756 for (i = 0; i < MAX_HWIFS; i++) {
1612 idx_all[i] = (i < 4) ? idx[i] : 0xff; 1757 hwif = host->ports[i];
1613 1758
1614 return ide_device_add_all(idx_all, d); 1759 if (hwif == NULL)
1760 continue;
1761
1762 ide_free_port_slot(hwif->index);
1763 kfree(hwif);
1764 }
1765
1766 kfree(host);
1615} 1767}
1616EXPORT_SYMBOL_GPL(ide_device_add); 1768EXPORT_SYMBOL_GPL(ide_host_free);
1769
1770void ide_host_remove(struct ide_host *host)
1771{
1772 int i;
1773
1774 for (i = 0; i < MAX_HWIFS; i++) {
1775 if (host->ports[i])
1776 ide_unregister(host->ports[i]);
1777 }
1778
1779 ide_host_free(host);
1780}
1781EXPORT_SYMBOL_GPL(ide_host_remove);
1617 1782
1618void ide_port_scan(ide_hwif_t *hwif) 1783void ide_port_scan(ide_hwif_t *hwif)
1619{ 1784{
@@ -1634,11 +1799,10 @@ void ide_port_scan(ide_hwif_t *hwif)
1634} 1799}
1635EXPORT_SYMBOL_GPL(ide_port_scan); 1800EXPORT_SYMBOL_GPL(ide_port_scan);
1636 1801
1637static void ide_legacy_init_one(u8 *idx, hw_regs_t *hw, u8 port_no, 1802static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw,
1638 const struct ide_port_info *d, 1803 u8 port_no, const struct ide_port_info *d,
1639 unsigned long config) 1804 unsigned long config)
1640{ 1805{
1641 ide_hwif_t *hwif;
1642 unsigned long base, ctl; 1806 unsigned long base, ctl;
1643 int irq; 1807 int irq;
1644 1808
@@ -1668,33 +1832,25 @@ static void ide_legacy_init_one(u8 *idx, hw_regs_t *hw, u8 port_no,
1668 ide_std_init_ports(hw, base, ctl); 1832 ide_std_init_ports(hw, base, ctl);
1669 hw->irq = irq; 1833 hw->irq = irq;
1670 hw->chipset = d->chipset; 1834 hw->chipset = d->chipset;
1835 hw->config = config;
1671 1836
1672 hwif = ide_find_port_slot(d); 1837 hws[port_no] = hw;
1673 if (hwif) {
1674 ide_init_port_hw(hwif, hw);
1675 if (config)
1676 hwif->config_data = config;
1677 idx[port_no] = hwif->index;
1678 }
1679} 1838}
1680 1839
1681int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config) 1840int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config)
1682{ 1841{
1683 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 1842 hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL };
1684 hw_regs_t hw[2];
1685 1843
1686 memset(&hw, 0, sizeof(hw)); 1844 memset(&hw, 0, sizeof(hw));
1687 1845
1688 if ((d->host_flags & IDE_HFLAG_QD_2ND_PORT) == 0) 1846 if ((d->host_flags & IDE_HFLAG_QD_2ND_PORT) == 0)
1689 ide_legacy_init_one(idx, &hw[0], 0, d, config); 1847 ide_legacy_init_one(hws, &hw[0], 0, d, config);
1690 ide_legacy_init_one(idx, &hw[1], 1, d, config); 1848 ide_legacy_init_one(hws, &hw[1], 1, d, config);
1691 1849
1692 if (idx[0] == 0xff && idx[1] == 0xff && 1850 if (hws[0] == NULL && hws[1] == NULL &&
1693 (d->host_flags & IDE_HFLAG_SINGLE)) 1851 (d->host_flags & IDE_HFLAG_SINGLE))
1694 return -ENOENT; 1852 return -ENOENT;
1695 1853
1696 ide_device_add(idx, d); 1854 return ide_host_add(d, hws, NULL);
1697
1698 return 0;
1699} 1855}
1700EXPORT_SYMBOL_GPL(ide_legacy_device_add); 1856EXPORT_SYMBOL_GPL(ide_legacy_device_add);
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 8af88bf0969b..151c91e933da 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -345,7 +345,7 @@ static int set_xfer_rate (ide_drive_t *drive, int arg)
345 ide_task_t task; 345 ide_task_t task;
346 int err; 346 int err;
347 347
348 if (arg < 0 || arg > 70) 348 if (arg < XFER_PIO_0 || arg > XFER_UDMA_6)
349 return -EINVAL; 349 return -EINVAL;
350 350
351 memset(&task, 0, sizeof(task)); 351 memset(&task, 0, sizeof(task));
@@ -357,7 +357,7 @@ static int set_xfer_rate (ide_drive_t *drive, int arg)
357 357
358 err = ide_no_data_taskfile(drive, &task); 358 err = ide_no_data_taskfile(drive, &task);
359 359
360 if (!err && arg) { 360 if (!err) {
361 ide_set_xfer_rate(drive, (u8) arg); 361 ide_set_xfer_rate(drive, (u8) arg);
362 ide_driveid_update(drive); 362 ide_driveid_update(drive);
363 } 363 }
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 353dd11b9283..6962ca4891a1 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -195,23 +195,6 @@ enum {
195#define IDETAPE_BLOCK_DESCRIPTOR 0 195#define IDETAPE_BLOCK_DESCRIPTOR 0
196#define IDETAPE_CAPABILITIES_PAGE 0x2a 196#define IDETAPE_CAPABILITIES_PAGE 0x2a
197 197
198/* Tape flag bits values. */
199enum {
200 IDETAPE_FLAG_IGNORE_DSC = (1 << 0),
201 /* 0 When the tape position is unknown */
202 IDETAPE_FLAG_ADDRESS_VALID = (1 << 1),
203 /* Device already opened */
204 IDETAPE_FLAG_BUSY = (1 << 2),
205 /* Attempt to auto-detect the current user block size */
206 IDETAPE_FLAG_DETECT_BS = (1 << 3),
207 /* Currently on a filemark */
208 IDETAPE_FLAG_FILEMARK = (1 << 4),
209 /* DRQ interrupt device */
210 IDETAPE_FLAG_DRQ_INTERRUPT = (1 << 5),
211 /* 0 = no tape is loaded, so we don't rewind after ejecting */
212 IDETAPE_FLAG_MEDIUM_PRESENT = (1 << 6),
213};
214
215/* 198/*
216 * Most of our global data which we need to save even as we leave the driver due 199 * Most of our global data which we need to save even as we leave the driver due
217 * to an interrupt or a timer event is stored in the struct defined below. 200 * to an interrupt or a timer event is stored in the struct defined below.
@@ -312,8 +295,6 @@ typedef struct ide_tape_obj {
312 /* Wasted space in each stage */ 295 /* Wasted space in each stage */
313 int excess_bh_size; 296 int excess_bh_size;
314 297
315 /* Status/Action flags: long for set_bit */
316 unsigned long flags;
317 /* protects the ide-tape queue */ 298 /* protects the ide-tape queue */
318 spinlock_t lock; 299 spinlock_t lock;
319 300
@@ -398,7 +379,7 @@ static void idetape_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
398 count = min( 379 count = min(
399 (unsigned int)(bh->b_size - atomic_read(&bh->b_count)), 380 (unsigned int)(bh->b_size - atomic_read(&bh->b_count)),
400 bcount); 381 bcount);
401 drive->hwif->input_data(drive, NULL, bh->b_data + 382 drive->hwif->tp_ops->input_data(drive, NULL, bh->b_data +
402 atomic_read(&bh->b_count), count); 383 atomic_read(&bh->b_count), count);
403 bcount -= count; 384 bcount -= count;
404 atomic_add(count, &bh->b_count); 385 atomic_add(count, &bh->b_count);
@@ -424,7 +405,7 @@ static void idetape_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
424 return; 405 return;
425 } 406 }
426 count = min((unsigned int)pc->b_count, (unsigned int)bcount); 407 count = min((unsigned int)pc->b_count, (unsigned int)bcount);
427 drive->hwif->output_data(drive, NULL, pc->b_data, count); 408 drive->hwif->tp_ops->output_data(drive, NULL, pc->b_data, count);
428 bcount -= count; 409 bcount -= count;
429 pc->b_data += count; 410 pc->b_data += count;
430 pc->b_count -= count; 411 pc->b_count -= count;
@@ -585,7 +566,6 @@ static void ide_tape_kfree_buffer(idetape_tape_t *tape)
585 bh = bh->b_reqnext; 566 bh = bh->b_reqnext;
586 kfree(prev_bh); 567 kfree(prev_bh);
587 } 568 }
588 kfree(tape->merge_bh);
589} 569}
590 570
591static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects) 571static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
@@ -665,7 +645,7 @@ static void ide_tape_callback(ide_drive_t *drive)
665 if (readpos[0] & 0x4) { 645 if (readpos[0] & 0x4) {
666 printk(KERN_INFO "ide-tape: Block location is unknown" 646 printk(KERN_INFO "ide-tape: Block location is unknown"
667 "to the tape\n"); 647 "to the tape\n");
668 clear_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags); 648 clear_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags);
669 uptodate = 0; 649 uptodate = 0;
670 } else { 650 } else {
671 debug_log(DBG_SENSE, "Block Location - %u\n", 651 debug_log(DBG_SENSE, "Block Location - %u\n",
@@ -673,7 +653,7 @@ static void ide_tape_callback(ide_drive_t *drive)
673 653
674 tape->partition = readpos[1]; 654 tape->partition = readpos[1];
675 tape->first_frame = be32_to_cpu(*(u32 *)&readpos[4]); 655 tape->first_frame = be32_to_cpu(*(u32 *)&readpos[4]);
676 set_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags); 656 set_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags);
677 } 657 }
678 } 658 }
679 659
@@ -690,7 +670,6 @@ static void idetape_init_pc(struct ide_atapi_pc *pc)
690 pc->buf_size = IDETAPE_PC_BUFFER_SIZE; 670 pc->buf_size = IDETAPE_PC_BUFFER_SIZE;
691 pc->bh = NULL; 671 pc->bh = NULL;
692 pc->b_data = NULL; 672 pc->b_data = NULL;
693 pc->callback = ide_tape_callback;
694} 673}
695 674
696static void idetape_create_request_sense_cmd(struct ide_atapi_pc *pc) 675static void idetape_create_request_sense_cmd(struct ide_atapi_pc *pc)
@@ -705,7 +684,7 @@ static void idetape_init_rq(struct request *rq, u8 cmd)
705{ 684{
706 blk_rq_init(NULL, rq); 685 blk_rq_init(NULL, rq);
707 rq->cmd_type = REQ_TYPE_SPECIAL; 686 rq->cmd_type = REQ_TYPE_SPECIAL;
708 rq->cmd[0] = cmd; 687 rq->cmd[13] = cmd;
709} 688}
710 689
711/* 690/*
@@ -732,6 +711,7 @@ static void idetape_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
732 rq->cmd_flags |= REQ_PREEMPT; 711 rq->cmd_flags |= REQ_PREEMPT;
733 rq->buffer = (char *) pc; 712 rq->buffer = (char *) pc;
734 rq->rq_disk = tape->disk; 713 rq->rq_disk = tape->disk;
714 memcpy(rq->cmd, pc->c, 12);
735 ide_do_drive_cmd(drive, rq); 715 ide_do_drive_cmd(drive, rq);
736} 716}
737 717
@@ -742,7 +722,6 @@ static void idetape_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
742 */ 722 */
743static void idetape_retry_pc(ide_drive_t *drive) 723static void idetape_retry_pc(ide_drive_t *drive)
744{ 724{
745 idetape_tape_t *tape = drive->driver_data;
746 struct ide_atapi_pc *pc; 725 struct ide_atapi_pc *pc;
747 struct request *rq; 726 struct request *rq;
748 727
@@ -750,7 +729,7 @@ static void idetape_retry_pc(ide_drive_t *drive)
750 pc = idetape_next_pc_storage(drive); 729 pc = idetape_next_pc_storage(drive);
751 rq = idetape_next_rq_storage(drive); 730 rq = idetape_next_rq_storage(drive);
752 idetape_create_request_sense_cmd(pc); 731 idetape_create_request_sense_cmd(pc);
753 set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags); 732 set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
754 idetape_queue_pc_head(drive, pc, rq); 733 idetape_queue_pc_head(drive, pc, rq);
755} 734}
756 735
@@ -887,7 +866,7 @@ static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
887 pc->error = IDETAPE_ERROR_GENERAL; 866 pc->error = IDETAPE_ERROR_GENERAL;
888 } 867 }
889 tape->failed_pc = NULL; 868 tape->failed_pc = NULL;
890 pc->callback(drive); 869 drive->pc_callback(drive);
891 return ide_stopped; 870 return ide_stopped;
892 } 871 }
893 debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]); 872 debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]);
@@ -927,11 +906,12 @@ static void idetape_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code)
927 906
928static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive) 907static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
929{ 908{
909 ide_hwif_t *hwif = drive->hwif;
930 idetape_tape_t *tape = drive->driver_data; 910 idetape_tape_t *tape = drive->driver_data;
931 struct ide_atapi_pc *pc = tape->pc; 911 struct ide_atapi_pc *pc = tape->pc;
932 u8 stat; 912 u8 stat;
933 913
934 stat = ide_read_status(drive); 914 stat = hwif->tp_ops->read_status(hwif);
935 915
936 if (stat & SEEK_STAT) { 916 if (stat & SEEK_STAT) {
937 if (stat & ERR_STAT) { 917 if (stat & ERR_STAT) {
@@ -948,14 +928,17 @@ static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
948 pc->error = IDETAPE_ERROR_GENERAL; 928 pc->error = IDETAPE_ERROR_GENERAL;
949 tape->failed_pc = NULL; 929 tape->failed_pc = NULL;
950 } 930 }
951 pc->callback(drive); 931 drive->pc_callback(drive);
952 return ide_stopped; 932 return ide_stopped;
953} 933}
954 934
955static void ide_tape_create_rw_cmd(idetape_tape_t *tape, 935static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
956 struct ide_atapi_pc *pc, unsigned int length, 936 struct ide_atapi_pc *pc, struct request *rq,
957 struct idetape_bh *bh, u8 opcode) 937 u8 opcode)
958{ 938{
939 struct idetape_bh *bh = (struct idetape_bh *)rq->special;
940 unsigned int length = rq->current_nr_sectors;
941
959 idetape_init_pc(pc); 942 idetape_init_pc(pc);
960 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]); 943 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
961 pc->c[1] = 1; 944 pc->c[1] = 1;
@@ -975,11 +958,14 @@ static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
975 pc->b_data = bh->b_data; 958 pc->b_data = bh->b_data;
976 pc->b_count = atomic_read(&bh->b_count); 959 pc->b_count = atomic_read(&bh->b_count);
977 } 960 }
961
962 memcpy(rq->cmd, pc->c, 12);
978} 963}
979 964
980static ide_startstop_t idetape_do_request(ide_drive_t *drive, 965static ide_startstop_t idetape_do_request(ide_drive_t *drive,
981 struct request *rq, sector_t block) 966 struct request *rq, sector_t block)
982{ 967{
968 ide_hwif_t *hwif = drive->hwif;
983 idetape_tape_t *tape = drive->driver_data; 969 idetape_tape_t *tape = drive->driver_data;
984 struct ide_atapi_pc *pc = NULL; 970 struct ide_atapi_pc *pc = NULL;
985 struct request *postponed_rq = tape->postponed_rq; 971 struct request *postponed_rq = tape->postponed_rq;
@@ -1017,17 +1003,17 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
1017 * If the tape is still busy, postpone our request and service 1003 * If the tape is still busy, postpone our request and service
1018 * the other device meanwhile. 1004 * the other device meanwhile.
1019 */ 1005 */
1020 stat = ide_read_status(drive); 1006 stat = hwif->tp_ops->read_status(hwif);
1021 1007
1022 if (!drive->dsc_overlap && !(rq->cmd[0] & REQ_IDETAPE_PC2)) 1008 if (!drive->dsc_overlap && !(rq->cmd[13] & REQ_IDETAPE_PC2))
1023 set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags); 1009 set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
1024 1010
1025 if (drive->post_reset == 1) { 1011 if (drive->post_reset == 1) {
1026 set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags); 1012 set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
1027 drive->post_reset = 0; 1013 drive->post_reset = 0;
1028 } 1014 }
1029 1015
1030 if (!test_and_clear_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags) && 1016 if (!test_and_clear_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags) &&
1031 (stat & SEEK_STAT) == 0) { 1017 (stat & SEEK_STAT) == 0) {
1032 if (postponed_rq == NULL) { 1018 if (postponed_rq == NULL) {
1033 tape->dsc_polling_start = jiffies; 1019 tape->dsc_polling_start = jiffies;
@@ -1036,7 +1022,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
1036 } else if (time_after(jiffies, tape->dsc_timeout)) { 1022 } else if (time_after(jiffies, tape->dsc_timeout)) {
1037 printk(KERN_ERR "ide-tape: %s: DSC timeout\n", 1023 printk(KERN_ERR "ide-tape: %s: DSC timeout\n",
1038 tape->name); 1024 tape->name);
1039 if (rq->cmd[0] & REQ_IDETAPE_PC2) { 1025 if (rq->cmd[13] & REQ_IDETAPE_PC2) {
1040 idetape_media_access_finished(drive); 1026 idetape_media_access_finished(drive);
1041 return ide_stopped; 1027 return ide_stopped;
1042 } else { 1028 } else {
@@ -1049,35 +1035,29 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
1049 idetape_postpone_request(drive); 1035 idetape_postpone_request(drive);
1050 return ide_stopped; 1036 return ide_stopped;
1051 } 1037 }
1052 if (rq->cmd[0] & REQ_IDETAPE_READ) { 1038 if (rq->cmd[13] & REQ_IDETAPE_READ) {
1053 pc = idetape_next_pc_storage(drive); 1039 pc = idetape_next_pc_storage(drive);
1054 ide_tape_create_rw_cmd(tape, pc, rq->current_nr_sectors, 1040 ide_tape_create_rw_cmd(tape, pc, rq, READ_6);
1055 (struct idetape_bh *)rq->special,
1056 READ_6);
1057 goto out; 1041 goto out;
1058 } 1042 }
1059 if (rq->cmd[0] & REQ_IDETAPE_WRITE) { 1043 if (rq->cmd[13] & REQ_IDETAPE_WRITE) {
1060 pc = idetape_next_pc_storage(drive); 1044 pc = idetape_next_pc_storage(drive);
1061 ide_tape_create_rw_cmd(tape, pc, rq->current_nr_sectors, 1045 ide_tape_create_rw_cmd(tape, pc, rq, WRITE_6);
1062 (struct idetape_bh *)rq->special,
1063 WRITE_6);
1064 goto out; 1046 goto out;
1065 } 1047 }
1066 if (rq->cmd[0] & REQ_IDETAPE_PC1) { 1048 if (rq->cmd[13] & REQ_IDETAPE_PC1) {
1067 pc = (struct ide_atapi_pc *) rq->buffer; 1049 pc = (struct ide_atapi_pc *) rq->buffer;
1068 rq->cmd[0] &= ~(REQ_IDETAPE_PC1); 1050 rq->cmd[13] &= ~(REQ_IDETAPE_PC1);
1069 rq->cmd[0] |= REQ_IDETAPE_PC2; 1051 rq->cmd[13] |= REQ_IDETAPE_PC2;
1070 goto out; 1052 goto out;
1071 } 1053 }
1072 if (rq->cmd[0] & REQ_IDETAPE_PC2) { 1054 if (rq->cmd[13] & REQ_IDETAPE_PC2) {
1073 idetape_media_access_finished(drive); 1055 idetape_media_access_finished(drive);
1074 return ide_stopped; 1056 return ide_stopped;
1075 } 1057 }
1076 BUG(); 1058 BUG();
1077out:
1078 if (test_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags))
1079 pc->flags |= PC_FLAG_DRQ_INTERRUPT;
1080 1059
1060out:
1081 return idetape_issue_pc(drive, pc); 1061 return idetape_issue_pc(drive, pc);
1082} 1062}
1083 1063
@@ -1281,8 +1261,9 @@ static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
1281 1261
1282 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 1262 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
1283 rq->cmd_type = REQ_TYPE_SPECIAL; 1263 rq->cmd_type = REQ_TYPE_SPECIAL;
1284 rq->cmd[0] = REQ_IDETAPE_PC1; 1264 rq->cmd[13] = REQ_IDETAPE_PC1;
1285 rq->buffer = (char *)pc; 1265 rq->buffer = (char *)pc;
1266 memcpy(rq->cmd, pc->c, 12);
1286 error = blk_execute_rq(drive->queue, tape->disk, rq, 0); 1267 error = blk_execute_rq(drive->queue, tape->disk, rq, 0);
1287 blk_put_request(rq); 1268 blk_put_request(rq);
1288 return error; 1269 return error;
@@ -1304,7 +1285,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
1304 int load_attempted = 0; 1285 int load_attempted = 0;
1305 1286
1306 /* Wait for the tape to become ready */ 1287 /* Wait for the tape to become ready */
1307 set_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags); 1288 set_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags);
1308 timeout += jiffies; 1289 timeout += jiffies;
1309 while (time_before(jiffies, timeout)) { 1290 while (time_before(jiffies, timeout)) {
1310 idetape_create_test_unit_ready_cmd(&pc); 1291 idetape_create_test_unit_ready_cmd(&pc);
@@ -1397,7 +1378,7 @@ static void __ide_tape_discard_merge_buffer(ide_drive_t *drive)
1397 if (tape->chrdev_dir != IDETAPE_DIR_READ) 1378 if (tape->chrdev_dir != IDETAPE_DIR_READ)
1398 return; 1379 return;
1399 1380
1400 clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags); 1381 clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags);
1401 tape->merge_bh_size = 0; 1382 tape->merge_bh_size = 0;
1402 if (tape->merge_bh != NULL) { 1383 if (tape->merge_bh != NULL) {
1403 ide_tape_kfree_buffer(tape); 1384 ide_tape_kfree_buffer(tape);
@@ -1465,7 +1446,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
1465 1446
1466 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 1447 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
1467 rq->cmd_type = REQ_TYPE_SPECIAL; 1448 rq->cmd_type = REQ_TYPE_SPECIAL;
1468 rq->cmd[0] = cmd; 1449 rq->cmd[13] = cmd;
1469 rq->rq_disk = tape->disk; 1450 rq->rq_disk = tape->disk;
1470 rq->special = (void *)bh; 1451 rq->special = (void *)bh;
1471 rq->sector = tape->first_frame; 1452 rq->sector = tape->first_frame;
@@ -1636,7 +1617,7 @@ static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
1636 debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks); 1617 debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
1637 1618
1638 /* If we are at a filemark, return a read length of 0 */ 1619 /* If we are at a filemark, return a read length of 0 */
1639 if (test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) 1620 if (test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags))
1640 return 0; 1621 return 0;
1641 1622
1642 idetape_init_read(drive); 1623 idetape_init_read(drive);
@@ -1746,7 +1727,7 @@ static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
1746 1727
1747 if (tape->chrdev_dir == IDETAPE_DIR_READ) { 1728 if (tape->chrdev_dir == IDETAPE_DIR_READ) {
1748 tape->merge_bh_size = 0; 1729 tape->merge_bh_size = 0;
1749 if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) 1730 if (test_and_clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags))
1750 ++count; 1731 ++count;
1751 ide_tape_discard_merge_buffer(drive, 0); 1732 ide_tape_discard_merge_buffer(drive, 0);
1752 } 1733 }
@@ -1801,7 +1782,7 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
1801 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count); 1782 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
1802 1783
1803 if (tape->chrdev_dir != IDETAPE_DIR_READ) { 1784 if (tape->chrdev_dir != IDETAPE_DIR_READ) {
1804 if (test_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags)) 1785 if (test_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags))
1805 if (count > tape->blk_size && 1786 if (count > tape->blk_size &&
1806 (count % tape->blk_size) == 0) 1787 (count % tape->blk_size) == 0)
1807 tape->user_bs_factor = count / tape->blk_size; 1788 tape->user_bs_factor = count / tape->blk_size;
@@ -1841,7 +1822,7 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
1841 tape->merge_bh_size = bytes_read-temp; 1822 tape->merge_bh_size = bytes_read-temp;
1842 } 1823 }
1843finish: 1824finish:
1844 if (!actually_read && test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) { 1825 if (!actually_read && test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) {
1845 debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name); 1826 debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
1846 1827
1847 idetape_space_over_filemarks(drive, MTFSF, 1); 1828 idetape_space_over_filemarks(drive, MTFSF, 1);
@@ -2027,7 +2008,7 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
2027 !IDETAPE_LU_LOAD_MASK); 2008 !IDETAPE_LU_LOAD_MASK);
2028 retval = idetape_queue_pc_tail(drive, &pc); 2009 retval = idetape_queue_pc_tail(drive, &pc);
2029 if (!retval) 2010 if (!retval)
2030 clear_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags); 2011 clear_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags);
2031 return retval; 2012 return retval;
2032 case MTNOP: 2013 case MTNOP:
2033 ide_tape_discard_merge_buffer(drive, 0); 2014 ide_tape_discard_merge_buffer(drive, 0);
@@ -2050,9 +2031,9 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
2050 mt_count % tape->blk_size) 2031 mt_count % tape->blk_size)
2051 return -EIO; 2032 return -EIO;
2052 tape->user_bs_factor = mt_count / tape->blk_size; 2033 tape->user_bs_factor = mt_count / tape->blk_size;
2053 clear_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags); 2034 clear_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags);
2054 } else 2035 } else
2055 set_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags); 2036 set_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags);
2056 return 0; 2037 return 0;
2057 case MTSEEK: 2038 case MTSEEK:
2058 ide_tape_discard_merge_buffer(drive, 0); 2039 ide_tape_discard_merge_buffer(drive, 0);
@@ -2202,20 +2183,20 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
2202 2183
2203 filp->private_data = tape; 2184 filp->private_data = tape;
2204 2185
2205 if (test_and_set_bit(IDETAPE_FLAG_BUSY, &tape->flags)) { 2186 if (test_and_set_bit(IDE_AFLAG_BUSY, &drive->atapi_flags)) {
2206 retval = -EBUSY; 2187 retval = -EBUSY;
2207 goto out_put_tape; 2188 goto out_put_tape;
2208 } 2189 }
2209 2190
2210 retval = idetape_wait_ready(drive, 60 * HZ); 2191 retval = idetape_wait_ready(drive, 60 * HZ);
2211 if (retval) { 2192 if (retval) {
2212 clear_bit(IDETAPE_FLAG_BUSY, &tape->flags); 2193 clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags);
2213 printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name); 2194 printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name);
2214 goto out_put_tape; 2195 goto out_put_tape;
2215 } 2196 }
2216 2197
2217 idetape_read_position(drive); 2198 idetape_read_position(drive);
2218 if (!test_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags)) 2199 if (!test_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags))
2219 (void)idetape_rewind_tape(drive); 2200 (void)idetape_rewind_tape(drive);
2220 2201
2221 /* Read block size and write protect status from drive. */ 2202 /* Read block size and write protect status from drive. */
@@ -2231,7 +2212,7 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
2231 if (tape->write_prot) { 2212 if (tape->write_prot) {
2232 if ((filp->f_flags & O_ACCMODE) == O_WRONLY || 2213 if ((filp->f_flags & O_ACCMODE) == O_WRONLY ||
2233 (filp->f_flags & O_ACCMODE) == O_RDWR) { 2214 (filp->f_flags & O_ACCMODE) == O_RDWR) {
2234 clear_bit(IDETAPE_FLAG_BUSY, &tape->flags); 2215 clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags);
2235 retval = -EROFS; 2216 retval = -EROFS;
2236 goto out_put_tape; 2217 goto out_put_tape;
2237 } 2218 }
@@ -2291,7 +2272,7 @@ static int idetape_chrdev_release(struct inode *inode, struct file *filp)
2291 ide_tape_discard_merge_buffer(drive, 1); 2272 ide_tape_discard_merge_buffer(drive, 1);
2292 } 2273 }
2293 2274
2294 if (minor < 128 && test_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags)) 2275 if (minor < 128 && test_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags))
2295 (void) idetape_rewind_tape(drive); 2276 (void) idetape_rewind_tape(drive);
2296 if (tape->chrdev_dir == IDETAPE_DIR_NONE) { 2277 if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
2297 if (tape->door_locked == DOOR_LOCKED) { 2278 if (tape->door_locked == DOOR_LOCKED) {
@@ -2301,7 +2282,7 @@ static int idetape_chrdev_release(struct inode *inode, struct file *filp)
2301 } 2282 }
2302 } 2283 }
2303 } 2284 }
2304 clear_bit(IDETAPE_FLAG_BUSY, &tape->flags); 2285 clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags);
2305 ide_tape_put(tape); 2286 ide_tape_put(tape);
2306 unlock_kernel(); 2287 unlock_kernel();
2307 return 0; 2288 return 0;
@@ -2464,6 +2445,8 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
2464 u8 gcw[2]; 2445 u8 gcw[2];
2465 u16 *ctl = (u16 *)&tape->caps[12]; 2446 u16 *ctl = (u16 *)&tape->caps[12];
2466 2447
2448 drive->pc_callback = ide_tape_callback;
2449
2467 spin_lock_init(&tape->lock); 2450 spin_lock_init(&tape->lock);
2468 drive->dsc_overlap = 1; 2451 drive->dsc_overlap = 1;
2469 if (drive->hwif->host_flags & IDE_HFLAG_NO_DSC) { 2452 if (drive->hwif->host_flags & IDE_HFLAG_NO_DSC) {
@@ -2484,7 +2467,7 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
2484 2467
2485 /* Command packet DRQ type */ 2468 /* Command packet DRQ type */
2486 if (((gcw[0] & 0x60) >> 5) == 1) 2469 if (((gcw[0] & 0x60) >> 5) == 1)
2487 set_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags); 2470 set_bit(IDE_AFLAG_DRQ_INTERRUPT, &drive->atapi_flags);
2488 2471
2489 idetape_get_inquiry_results(drive); 2472 idetape_get_inquiry_results(drive);
2490 idetape_get_mode_sense_results(drive); 2473 idetape_get_mode_sense_results(drive);
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 1fbdb746dc88..aeddbbd69e86 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -64,6 +64,7 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
64 ide_hwif_t *hwif = HWIF(drive); 64 ide_hwif_t *hwif = HWIF(drive);
65 struct ide_taskfile *tf = &task->tf; 65 struct ide_taskfile *tf = &task->tf;
66 ide_handler_t *handler = NULL; 66 ide_handler_t *handler = NULL;
67 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
67 const struct ide_dma_ops *dma_ops = hwif->dma_ops; 68 const struct ide_dma_ops *dma_ops = hwif->dma_ops;
68 69
69 if (task->data_phase == TASKFILE_MULTI_IN || 70 if (task->data_phase == TASKFILE_MULTI_IN ||
@@ -80,15 +81,15 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
80 81
81 if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) { 82 if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
82 ide_tf_dump(drive->name, tf); 83 ide_tf_dump(drive->name, tf);
83 ide_set_irq(drive, 1); 84 tp_ops->set_irq(hwif, 1);
84 SELECT_MASK(drive, 0); 85 SELECT_MASK(drive, 0);
85 hwif->tf_load(drive, task); 86 tp_ops->tf_load(drive, task);
86 } 87 }
87 88
88 switch (task->data_phase) { 89 switch (task->data_phase) {
89 case TASKFILE_MULTI_OUT: 90 case TASKFILE_MULTI_OUT:
90 case TASKFILE_OUT: 91 case TASKFILE_OUT:
91 hwif->OUTBSYNC(hwif, tf->command, hwif->io_ports.command_addr); 92 tp_ops->exec_command(hwif, tf->command);
92 ndelay(400); /* FIXME */ 93 ndelay(400); /* FIXME */
93 return pre_task_out_intr(drive, task->rq); 94 return pre_task_out_intr(drive, task->rq);
94 case TASKFILE_MULTI_IN: 95 case TASKFILE_MULTI_IN:
@@ -124,7 +125,8 @@ EXPORT_SYMBOL_GPL(do_rw_taskfile);
124 */ 125 */
125static ide_startstop_t set_multmode_intr(ide_drive_t *drive) 126static ide_startstop_t set_multmode_intr(ide_drive_t *drive)
126{ 127{
127 u8 stat = ide_read_status(drive); 128 ide_hwif_t *hwif = drive->hwif;
129 u8 stat = hwif->tp_ops->read_status(hwif);
128 130
129 if (OK_STAT(stat, READY_STAT, BAD_STAT)) 131 if (OK_STAT(stat, READY_STAT, BAD_STAT))
130 drive->mult_count = drive->mult_req; 132 drive->mult_count = drive->mult_req;
@@ -141,11 +143,16 @@ static ide_startstop_t set_multmode_intr(ide_drive_t *drive)
141 */ 143 */
142static ide_startstop_t set_geometry_intr(ide_drive_t *drive) 144static ide_startstop_t set_geometry_intr(ide_drive_t *drive)
143{ 145{
146 ide_hwif_t *hwif = drive->hwif;
144 int retries = 5; 147 int retries = 5;
145 u8 stat; 148 u8 stat;
146 149
147 while (((stat = ide_read_status(drive)) & BUSY_STAT) && retries--) 150 while (1) {
151 stat = hwif->tp_ops->read_status(hwif);
152 if ((stat & BUSY_STAT) == 0 || retries-- == 0)
153 break;
148 udelay(10); 154 udelay(10);
155 };
149 156
150 if (OK_STAT(stat, READY_STAT, BAD_STAT)) 157 if (OK_STAT(stat, READY_STAT, BAD_STAT))
151 return ide_stopped; 158 return ide_stopped;
@@ -162,7 +169,8 @@ static ide_startstop_t set_geometry_intr(ide_drive_t *drive)
162 */ 169 */
163static ide_startstop_t recal_intr(ide_drive_t *drive) 170static ide_startstop_t recal_intr(ide_drive_t *drive)
164{ 171{
165 u8 stat = ide_read_status(drive); 172 ide_hwif_t *hwif = drive->hwif;
173 u8 stat = hwif->tp_ops->read_status(hwif);
166 174
167 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) 175 if (!OK_STAT(stat, READY_STAT, BAD_STAT))
168 return ide_error(drive, "recal_intr", stat); 176 return ide_error(drive, "recal_intr", stat);
@@ -174,11 +182,12 @@ static ide_startstop_t recal_intr(ide_drive_t *drive)
174 */ 182 */
175static ide_startstop_t task_no_data_intr(ide_drive_t *drive) 183static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
176{ 184{
177 ide_task_t *args = HWGROUP(drive)->rq->special; 185 ide_hwif_t *hwif = drive->hwif;
186 ide_task_t *args = hwif->hwgroup->rq->special;
178 u8 stat; 187 u8 stat;
179 188
180 local_irq_enable_in_hardirq(); 189 local_irq_enable_in_hardirq();
181 stat = ide_read_status(drive); 190 stat = hwif->tp_ops->read_status(hwif);
182 191
183 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) 192 if (!OK_STAT(stat, READY_STAT, BAD_STAT))
184 return ide_error(drive, "task_no_data_intr", stat); 193 return ide_error(drive, "task_no_data_intr", stat);
@@ -192,6 +201,7 @@ static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
192 201
193static u8 wait_drive_not_busy(ide_drive_t *drive) 202static u8 wait_drive_not_busy(ide_drive_t *drive)
194{ 203{
204 ide_hwif_t *hwif = drive->hwif;
195 int retries; 205 int retries;
196 u8 stat; 206 u8 stat;
197 207
@@ -200,7 +210,7 @@ static u8 wait_drive_not_busy(ide_drive_t *drive)
200 * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms. 210 * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms.
201 */ 211 */
202 for (retries = 0; retries < 1000; retries++) { 212 for (retries = 0; retries < 1000; retries++) {
203 stat = ide_read_status(drive); 213 stat = hwif->tp_ops->read_status(hwif);
204 214
205 if (stat & BUSY_STAT) 215 if (stat & BUSY_STAT)
206 udelay(10); 216 udelay(10);
@@ -255,9 +265,9 @@ static void ide_pio_sector(ide_drive_t *drive, struct request *rq,
255 265
256 /* do the actual data transfer */ 266 /* do the actual data transfer */
257 if (write) 267 if (write)
258 hwif->output_data(drive, rq, buf, SECTOR_SIZE); 268 hwif->tp_ops->output_data(drive, rq, buf, SECTOR_SIZE);
259 else 269 else
260 hwif->input_data(drive, rq, buf, SECTOR_SIZE); 270 hwif->tp_ops->input_data(drive, rq, buf, SECTOR_SIZE);
261 271
262 kunmap_atomic(buf, KM_BIO_SRC_IRQ); 272 kunmap_atomic(buf, KM_BIO_SRC_IRQ);
263#ifdef CONFIG_HIGHMEM 273#ifdef CONFIG_HIGHMEM
@@ -383,8 +393,8 @@ static ide_startstop_t task_in_unexpected(ide_drive_t *drive, struct request *rq
383static ide_startstop_t task_in_intr(ide_drive_t *drive) 393static ide_startstop_t task_in_intr(ide_drive_t *drive)
384{ 394{
385 ide_hwif_t *hwif = drive->hwif; 395 ide_hwif_t *hwif = drive->hwif;
386 struct request *rq = HWGROUP(drive)->rq; 396 struct request *rq = hwif->hwgroup->rq;
387 u8 stat = ide_read_status(drive); 397 u8 stat = hwif->tp_ops->read_status(hwif);
388 398
389 /* Error? */ 399 /* Error? */
390 if (stat & ERR_STAT) 400 if (stat & ERR_STAT)
@@ -418,7 +428,7 @@ static ide_startstop_t task_out_intr (ide_drive_t *drive)
418{ 428{
419 ide_hwif_t *hwif = drive->hwif; 429 ide_hwif_t *hwif = drive->hwif;
420 struct request *rq = HWGROUP(drive)->rq; 430 struct request *rq = HWGROUP(drive)->rq;
421 u8 stat = ide_read_status(drive); 431 u8 stat = hwif->tp_ops->read_status(hwif);
422 432
423 if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat)) 433 if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
424 return task_error(drive, rq, __func__, stat); 434 return task_error(drive, rq, __func__, stat);
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index d4a6b102a772..60f0ca66aa93 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 1994-1998 Linus Torvalds & authors (see below) 2 * Copyright (C) 1994-1998 Linus Torvalds & authors (see below)
3 * Copyrifht (C) 2003-2005, 2007 Bartlomiej Zolnierkiewicz 3 * Copyright (C) 2003-2005, 2007 Bartlomiej Zolnierkiewicz
4 */ 4 */
5 5
6/* 6/*
@@ -101,8 +101,7 @@ void ide_init_port_data(ide_hwif_t *hwif, unsigned int index)
101 101
102 init_completion(&hwif->gendev_rel_comp); 102 init_completion(&hwif->gendev_rel_comp);
103 103
104 default_hwif_iops(hwif); 104 hwif->tp_ops = &default_tp_ops;
105 default_hwif_transport(hwif);
106 105
107 ide_port_init_devices_data(hwif); 106 ide_port_init_devices_data(hwif);
108} 107}
@@ -134,41 +133,6 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif)
134 } 133 }
135} 134}
136 135
137void ide_remove_port_from_hwgroup(ide_hwif_t *hwif)
138{
139 ide_hwgroup_t *hwgroup = hwif->hwgroup;
140
141 spin_lock_irq(&ide_lock);
142 /*
143 * Remove us from the hwgroup, and free
144 * the hwgroup if we were the only member
145 */
146 if (hwif->next == hwif) {
147 BUG_ON(hwgroup->hwif != hwif);
148 kfree(hwgroup);
149 } else {
150 /* There is another interface in hwgroup.
151 * Unlink us, and set hwgroup->drive and ->hwif to
152 * something sane.
153 */
154 ide_hwif_t *g = hwgroup->hwif;
155
156 while (g->next != hwif)
157 g = g->next;
158 g->next = hwif->next;
159 if (hwgroup->hwif == hwif) {
160 /* Chose a random hwif for hwgroup->hwif.
161 * It's guaranteed that there are no drives
162 * left in the hwgroup.
163 */
164 BUG_ON(hwgroup->drive != NULL);
165 hwgroup->hwif = g;
166 }
167 BUG_ON(hwgroup->hwif == hwif);
168 }
169 spin_unlock_irq(&ide_lock);
170}
171
172/* Called with ide_lock held. */ 136/* Called with ide_lock held. */
173static void __ide_port_unregister_devices(ide_hwif_t *hwif) 137static void __ide_port_unregister_devices(ide_hwif_t *hwif)
174{ 138{
@@ -269,16 +233,9 @@ void ide_unregister(ide_hwif_t *hwif)
269 if (hwif->dma_base) 233 if (hwif->dma_base)
270 ide_release_dma_engine(hwif); 234 ide_release_dma_engine(hwif);
271 235
272 spin_lock_irq(&ide_lock);
273 /* restore hwif data to pristine status */
274 ide_init_port_data(hwif, hwif->index);
275 spin_unlock_irq(&ide_lock);
276
277 mutex_unlock(&ide_cfg_mtx); 236 mutex_unlock(&ide_cfg_mtx);
278} 237}
279 238
280EXPORT_SYMBOL(ide_unregister);
281
282void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw) 239void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw)
283{ 240{
284 memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports)); 241 memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports));
@@ -287,8 +244,8 @@ void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw)
287 hwif->dev = hw->dev; 244 hwif->dev = hw->dev;
288 hwif->gendev.parent = hw->parent ? hw->parent : hw->dev; 245 hwif->gendev.parent = hw->parent ? hw->parent : hw->dev;
289 hwif->ack_intr = hw->ack_intr; 246 hwif->ack_intr = hw->ack_intr;
247 hwif->config_data = hw->config;
290} 248}
291EXPORT_SYMBOL_GPL(ide_init_port_hw);
292 249
293/* 250/*
294 * Locks for IDE setting functionality 251 * Locks for IDE setting functionality
diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/legacy/buddha.c
index 0497e7f85b09..7c2afa97f417 100644
--- a/drivers/ide/legacy/buddha.c
+++ b/drivers/ide/legacy/buddha.c
@@ -37,6 +37,8 @@
37#define CATWEASEL_NUM_HWIFS 3 37#define CATWEASEL_NUM_HWIFS 3
38#define XSURF_NUM_HWIFS 2 38#define XSURF_NUM_HWIFS 2
39 39
40#define MAX_NUM_HWIFS 3
41
40 /* 42 /*
41 * Bases of the IDE interfaces (relative to the board address) 43 * Bases of the IDE interfaces (relative to the board address)
42 */ 44 */
@@ -148,18 +150,14 @@ static void __init buddha_setup_ports(hw_regs_t *hw, unsigned long base,
148 150
149static int __init buddha_init(void) 151static int __init buddha_init(void)
150{ 152{
151 hw_regs_t hw;
152 ide_hwif_t *hwif;
153 int i;
154
155 struct zorro_dev *z = NULL; 153 struct zorro_dev *z = NULL;
156 u_long buddha_board = 0; 154 u_long buddha_board = 0;
157 BuddhaType type; 155 BuddhaType type;
158 int buddha_num_hwifs; 156 int buddha_num_hwifs, i;
159 157
160 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) { 158 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
161 unsigned long board; 159 unsigned long board;
162 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 160 hw_regs_t hw[MAX_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL };
163 161
164 if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) { 162 if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) {
165 buddha_num_hwifs = BUDDHA_NUM_HWIFS; 163 buddha_num_hwifs = BUDDHA_NUM_HWIFS;
@@ -221,19 +219,13 @@ fail_base2:
221 ack_intr = xsurf_ack_intr; 219 ack_intr = xsurf_ack_intr;
222 } 220 }
223 221
224 buddha_setup_ports(&hw, base, ctl, irq_port, ack_intr); 222 buddha_setup_ports(&hw[i], base, ctl, irq_port,
223 ack_intr);
225 224
226 hwif = ide_find_port(); 225 hws[i] = &hw[i];
227 if (hwif) {
228 u8 index = hwif->index;
229
230 ide_init_port_hw(hwif, &hw);
231
232 idx[i] = index;
233 }
234 } 226 }
235 227
236 ide_device_add(idx, NULL); 228 ide_host_add(NULL, hws, NULL);
237 } 229 }
238 230
239 return 0; 231 return 0;
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c
index 129a812bb57f..724f95073d80 100644
--- a/drivers/ide/legacy/falconide.c
+++ b/drivers/ide/legacy/falconide.c
@@ -66,6 +66,27 @@ static void falconide_output_data(ide_drive_t *drive, struct request *rq,
66 outsw_swapw(data_addr, buf, (len + 1) / 2); 66 outsw_swapw(data_addr, buf, (len + 1) / 2);
67} 67}
68 68
69/* Atari has a byte-swapped IDE interface */
70static const struct ide_tp_ops falconide_tp_ops = {
71 .exec_command = ide_exec_command,
72 .read_status = ide_read_status,
73 .read_altstatus = ide_read_altstatus,
74 .read_sff_dma_status = ide_read_sff_dma_status,
75
76 .set_irq = ide_set_irq,
77
78 .tf_load = ide_tf_load,
79 .tf_read = ide_tf_read,
80
81 .input_data = falconide_input_data,
82 .output_data = falconide_output_data,
83};
84
85static const struct ide_port_info falconide_port_info = {
86 .tp_ops = &falconide_tp_ops,
87 .host_flags = IDE_HFLAG_NO_DMA,
88};
89
69static void __init falconide_setup_ports(hw_regs_t *hw) 90static void __init falconide_setup_ports(hw_regs_t *hw)
70{ 91{
71 int i; 92 int i;
@@ -91,11 +112,12 @@ static void __init falconide_setup_ports(hw_regs_t *hw)
91 112
92static int __init falconide_init(void) 113static int __init falconide_init(void)
93{ 114{
94 hw_regs_t hw; 115 struct ide_host *host;
95 ide_hwif_t *hwif; 116 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
117 int rc;
96 118
97 if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE)) 119 if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE))
98 return 0; 120 return -ENODEV;
99 121
100 printk(KERN_INFO "ide: Falcon IDE controller\n"); 122 printk(KERN_INFO "ide: Falcon IDE controller\n");
101 123
@@ -106,23 +128,25 @@ static int __init falconide_init(void)
106 128
107 falconide_setup_ports(&hw); 129 falconide_setup_ports(&hw);
108 130
109 hwif = ide_find_port(); 131 host = ide_host_alloc(&falconide_port_info, hws);
110 if (hwif) { 132 if (host == NULL) {
111 u8 index = hwif->index; 133 rc = -ENOMEM;
112 u8 idx[4] = { index, 0xff, 0xff, 0xff }; 134 goto err;
113 135 }
114 ide_init_port_hw(hwif, &hw);
115 136
116 /* Atari has a byte-swapped IDE interface */ 137 ide_get_lock(NULL, NULL);
117 hwif->input_data = falconide_input_data; 138 rc = ide_host_register(host, &falconide_port_info, hws);
118 hwif->output_data = falconide_output_data; 139 ide_release_lock();
119 140
120 ide_get_lock(NULL, NULL); 141 if (rc)
121 ide_device_add(idx, NULL); 142 goto err_free;
122 ide_release_lock();
123 }
124 143
125 return 0; 144 return 0;
145err_free:
146 ide_host_free(host);
147err:
148 release_mem_region(ATA_HD_BASE, 0x40);
149 return rc;
126} 150}
127 151
128module_init(falconide_init); 152module_init(falconide_init);
diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/legacy/gayle.c
index 7e74b20202df..dd5c467d8dd0 100644
--- a/drivers/ide/legacy/gayle.c
+++ b/drivers/ide/legacy/gayle.c
@@ -31,6 +31,8 @@
31#define GAYLE_BASE_4000 0xdd2020 /* A4000/A4000T */ 31#define GAYLE_BASE_4000 0xdd2020 /* A4000/A4000T */
32#define GAYLE_BASE_1200 0xda0000 /* A1200/A600 and E-Matrix 530 */ 32#define GAYLE_BASE_1200 0xda0000 /* A1200/A600 and E-Matrix 530 */
33 33
34#define GAYLE_IDEREG_SIZE 0x2000
35
34 /* 36 /*
35 * Offsets from one of the above bases 37 * Offsets from one of the above bases
36 */ 38 */
@@ -56,13 +58,11 @@
56#define GAYLE_NUM_HWIFS 1 58#define GAYLE_NUM_HWIFS 1
57#define GAYLE_NUM_PROBE_HWIFS GAYLE_NUM_HWIFS 59#define GAYLE_NUM_PROBE_HWIFS GAYLE_NUM_HWIFS
58#define GAYLE_HAS_CONTROL_REG 1 60#define GAYLE_HAS_CONTROL_REG 1
59#define GAYLE_IDEREG_SIZE 0x2000
60#else /* CONFIG_BLK_DEV_IDEDOUBLER */ 61#else /* CONFIG_BLK_DEV_IDEDOUBLER */
61#define GAYLE_NUM_HWIFS 2 62#define GAYLE_NUM_HWIFS 2
62#define GAYLE_NUM_PROBE_HWIFS (ide_doubler ? GAYLE_NUM_HWIFS : \ 63#define GAYLE_NUM_PROBE_HWIFS (ide_doubler ? GAYLE_NUM_HWIFS : \
63 GAYLE_NUM_HWIFS-1) 64 GAYLE_NUM_HWIFS-1)
64#define GAYLE_HAS_CONTROL_REG (!ide_doubler) 65#define GAYLE_HAS_CONTROL_REG (!ide_doubler)
65#define GAYLE_IDEREG_SIZE (ide_doubler ? 0x1000 : 0x2000)
66 66
67static int ide_doubler; 67static int ide_doubler;
68module_param_named(doubler, ide_doubler, bool, 0); 68module_param_named(doubler, ide_doubler, bool, 0);
@@ -124,8 +124,11 @@ static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base,
124 124
125static int __init gayle_init(void) 125static int __init gayle_init(void)
126{ 126{
127 unsigned long phys_base, res_start, res_n;
128 unsigned long base, ctrlport, irqport;
129 ide_ack_intr_t *ack_intr;
127 int a4000, i; 130 int a4000, i;
128 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 131 hw_regs_t hw[GAYLE_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL };
129 132
130 if (!MACH_IS_AMIGA) 133 if (!MACH_IS_AMIGA)
131 return -ENODEV; 134 return -ENODEV;
@@ -148,13 +151,6 @@ found:
148#endif 151#endif
149 ""); 152 "");
150 153
151 for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) {
152 unsigned long base, ctrlport, irqport;
153 ide_ack_intr_t *ack_intr;
154 hw_regs_t hw;
155 ide_hwif_t *hwif;
156 unsigned long phys_base, res_start, res_n;
157
158 if (a4000) { 154 if (a4000) {
159 phys_base = GAYLE_BASE_4000; 155 phys_base = GAYLE_BASE_4000;
160 irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_4000); 156 irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_4000);
@@ -168,33 +164,22 @@ found:
168 * FIXME: we now have selectable modes between mmio v/s iomio 164 * FIXME: we now have selectable modes between mmio v/s iomio
169 */ 165 */
170 166
171 phys_base += i*GAYLE_NEXT_PORT;
172
173 res_start = ((unsigned long)phys_base) & ~(GAYLE_NEXT_PORT-1); 167 res_start = ((unsigned long)phys_base) & ~(GAYLE_NEXT_PORT-1);
174 res_n = GAYLE_IDEREG_SIZE; 168 res_n = GAYLE_IDEREG_SIZE;
175 169
176 if (!request_mem_region(res_start, res_n, "IDE")) 170 if (!request_mem_region(res_start, res_n, "IDE"))
177 continue; 171 return -EBUSY;
178 172
179 base = (unsigned long)ZTWO_VADDR(phys_base); 173 for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) {
174 base = (unsigned long)ZTWO_VADDR(phys_base + i * GAYLE_NEXT_PORT);
180 ctrlport = GAYLE_HAS_CONTROL_REG ? (base + GAYLE_CONTROL) : 0; 175 ctrlport = GAYLE_HAS_CONTROL_REG ? (base + GAYLE_CONTROL) : 0;
181 176
182 gayle_setup_ports(&hw, base, ctrlport, irqport, ack_intr); 177 gayle_setup_ports(&hw[i], base, ctrlport, irqport, ack_intr);
183
184 hwif = ide_find_port();
185 if (hwif) {
186 u8 index = hwif->index;
187 178
188 ide_init_port_hw(hwif, &hw); 179 hws[i] = &hw[i];
189
190 idx[i] = index;
191 } else
192 release_mem_region(res_start, res_n);
193 } 180 }
194 181
195 ide_device_add(idx, NULL); 182 return ide_host_add(NULL, hws, NULL);
196
197 return 0;
198} 183}
199 184
200module_init(gayle_init); 185module_init(gayle_init);
diff --git a/drivers/ide/legacy/ide-4drives.c b/drivers/ide/legacy/ide-4drives.c
index 89c8ff0a4d08..c76d55de6996 100644
--- a/drivers/ide/legacy/ide-4drives.c
+++ b/drivers/ide/legacy/ide-4drives.c
@@ -28,10 +28,8 @@ static const struct ide_port_info ide_4drives_port_info = {
28 28
29static int __init ide_4drives_init(void) 29static int __init ide_4drives_init(void)
30{ 30{
31 ide_hwif_t *hwif, *mate;
32 unsigned long base = 0x1f0, ctl = 0x3f6; 31 unsigned long base = 0x1f0, ctl = 0x3f6;
33 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 32 hw_regs_t hw, *hws[] = { &hw, &hw, NULL, NULL };
34 hw_regs_t hw;
35 33
36 if (probe_4drives == 0) 34 if (probe_4drives == 0)
37 return -ENODEV; 35 return -ENODEV;
@@ -55,21 +53,7 @@ static int __init ide_4drives_init(void)
55 hw.irq = 14; 53 hw.irq = 14;
56 hw.chipset = ide_4drives; 54 hw.chipset = ide_4drives;
57 55
58 hwif = ide_find_port(); 56 return ide_host_add(&ide_4drives_port_info, hws, NULL);
59 if (hwif) {
60 ide_init_port_hw(hwif, &hw);
61 idx[0] = hwif->index;
62 }
63
64 mate = ide_find_port();
65 if (mate) {
66 ide_init_port_hw(mate, &hw);
67 idx[1] = mate->index;
68 }
69
70 ide_device_add(idx, &ide_4drives_port_info);
71
72 return 0;
73} 57}
74 58
75module_init(ide_4drives_init); 59module_init(ide_4drives_init);
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index 27b1e0b7ecb4..21bfac137844 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -74,7 +74,7 @@ INT_MODULE_PARM(pc_debug, 0);
74 74
75typedef struct ide_info_t { 75typedef struct ide_info_t {
76 struct pcmcia_device *p_dev; 76 struct pcmcia_device *p_dev;
77 ide_hwif_t *hwif; 77 struct ide_host *host;
78 int ndev; 78 int ndev;
79 dev_node_t node; 79 dev_node_t node;
80} ide_info_t; 80} ide_info_t;
@@ -132,7 +132,7 @@ static int ide_probe(struct pcmcia_device *link)
132static void ide_detach(struct pcmcia_device *link) 132static void ide_detach(struct pcmcia_device *link)
133{ 133{
134 ide_info_t *info = link->priv; 134 ide_info_t *info = link->priv;
135 ide_hwif_t *hwif = info->hwif; 135 ide_hwif_t *hwif = info->host->ports[0];
136 unsigned long data_addr, ctl_addr; 136 unsigned long data_addr, ctl_addr;
137 137
138 DEBUG(0, "ide_detach(0x%p)\n", link); 138 DEBUG(0, "ide_detach(0x%p)\n", link);
@@ -157,13 +157,13 @@ static const struct ide_port_info idecs_port_info = {
157 .host_flags = IDE_HFLAG_NO_DMA, 157 .host_flags = IDE_HFLAG_NO_DMA,
158}; 158};
159 159
160static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl, 160static struct ide_host *idecs_register(unsigned long io, unsigned long ctl,
161 unsigned long irq, struct pcmcia_device *handle) 161 unsigned long irq, struct pcmcia_device *handle)
162{ 162{
163 struct ide_host *host;
163 ide_hwif_t *hwif; 164 ide_hwif_t *hwif;
164 hw_regs_t hw; 165 int i, rc;
165 int i; 166 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
166 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
167 167
168 if (!request_region(io, 8, DRV_NAME)) { 168 if (!request_region(io, 8, DRV_NAME)) {
169 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n", 169 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
@@ -184,30 +184,24 @@ static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl,
184 hw.chipset = ide_pci; 184 hw.chipset = ide_pci;
185 hw.dev = &handle->dev; 185 hw.dev = &handle->dev;
186 186
187 hwif = ide_find_port(); 187 rc = ide_host_add(&idecs_port_info, hws, &host);
188 if (hwif == NULL) 188 if (rc)
189 goto out_release; 189 goto out_release;
190 190
191 i = hwif->index; 191 hwif = host->ports[0];
192
193 ide_init_port_hw(hwif, &hw);
194
195 idx[0] = i;
196
197 ide_device_add(idx, &idecs_port_info);
198 192
199 if (hwif->present) 193 if (hwif->present)
200 return hwif; 194 return host;
201 195
202 /* retry registration in case device is still spinning up */ 196 /* retry registration in case device is still spinning up */
203 for (i = 0; i < 10; i++) { 197 for (i = 0; i < 10; i++) {
204 msleep(100); 198 msleep(100);
205 ide_port_scan(hwif); 199 ide_port_scan(hwif);
206 if (hwif->present) 200 if (hwif->present)
207 return hwif; 201 return host;
208 } 202 }
209 203
210 return hwif; 204 return host;
211 205
212out_release: 206out_release:
213 release_region(ctl, 1); 207 release_region(ctl, 1);
@@ -239,7 +233,7 @@ static int ide_config(struct pcmcia_device *link)
239 cistpl_cftable_entry_t *cfg; 233 cistpl_cftable_entry_t *cfg;
240 int pass, last_ret = 0, last_fn = 0, is_kme = 0; 234 int pass, last_ret = 0, last_fn = 0, is_kme = 0;
241 unsigned long io_base, ctl_base; 235 unsigned long io_base, ctl_base;
242 ide_hwif_t *hwif; 236 struct ide_host *host;
243 237
244 DEBUG(0, "ide_config(0x%p)\n", link); 238 DEBUG(0, "ide_config(0x%p)\n", link);
245 239
@@ -334,21 +328,21 @@ static int ide_config(struct pcmcia_device *link)
334 if (is_kme) 328 if (is_kme)
335 outb(0x81, ctl_base+1); 329 outb(0x81, ctl_base+1);
336 330
337 hwif = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link); 331 host = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link);
338 if (hwif == NULL && link->io.NumPorts1 == 0x20) { 332 if (host == NULL && link->io.NumPorts1 == 0x20) {
339 outb(0x02, ctl_base + 0x10); 333 outb(0x02, ctl_base + 0x10);
340 hwif = idecs_register(io_base + 0x10, ctl_base + 0x10, 334 host = idecs_register(io_base + 0x10, ctl_base + 0x10,
341 link->irq.AssignedIRQ, link); 335 link->irq.AssignedIRQ, link);
342 } 336 }
343 337
344 if (hwif == NULL) 338 if (host == NULL)
345 goto failed; 339 goto failed;
346 340
347 info->ndev = 1; 341 info->ndev = 1;
348 sprintf(info->node.dev_name, "hd%c", 'a' + hwif->index * 2); 342 sprintf(info->node.dev_name, "hd%c", 'a' + host->ports[0]->index * 2);
349 info->node.major = hwif->major; 343 info->node.major = host->ports[0]->major;
350 info->node.minor = 0; 344 info->node.minor = 0;
351 info->hwif = hwif; 345 info->host = host;
352 link->dev_node = &info->node; 346 link->dev_node = &info->node;
353 printk(KERN_INFO "ide-cs: %s: Vpp = %d.%d\n", 347 printk(KERN_INFO "ide-cs: %s: Vpp = %d.%d\n",
354 info->node.dev_name, link->conf.Vpp / 10, link->conf.Vpp % 10); 348 info->node.dev_name, link->conf.Vpp / 10, link->conf.Vpp % 10);
@@ -379,15 +373,15 @@ failed:
379static void ide_release(struct pcmcia_device *link) 373static void ide_release(struct pcmcia_device *link)
380{ 374{
381 ide_info_t *info = link->priv; 375 ide_info_t *info = link->priv;
382 ide_hwif_t *hwif = info->hwif; 376 struct ide_host *host = info->host;
383 377
384 DEBUG(0, "ide_release(0x%p)\n", link); 378 DEBUG(0, "ide_release(0x%p)\n", link);
385 379
386 if (info->ndev) { 380 if (info->ndev)
387 /* FIXME: if this fails we need to queue the cleanup somehow 381 /* FIXME: if this fails we need to queue the cleanup somehow
388 -- need to investigate the required PCMCIA magic */ 382 -- need to investigate the required PCMCIA magic */
389 ide_unregister(hwif); 383 ide_host_remove(host);
390 } 384
391 info->ndev = 0; 385 info->ndev = 0;
392 386
393 pcmcia_disable_device(link); 387 pcmcia_disable_device(link);
diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/legacy/ide_platform.c
index a249562b34b5..051b4ab0f359 100644
--- a/drivers/ide/legacy/ide_platform.c
+++ b/drivers/ide/legacy/ide_platform.c
@@ -52,12 +52,10 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
52{ 52{
53 struct resource *res_base, *res_alt, *res_irq; 53 struct resource *res_base, *res_alt, *res_irq;
54 void __iomem *base, *alt_base; 54 void __iomem *base, *alt_base;
55 ide_hwif_t *hwif;
56 struct pata_platform_info *pdata; 55 struct pata_platform_info *pdata;
57 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 56 struct ide_host *host;
58 int ret = 0; 57 int ret = 0, mmio = 0;
59 int mmio = 0; 58 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
60 hw_regs_t hw;
61 struct ide_port_info d = platform_ide_port_info; 59 struct ide_port_info d = platform_ide_port_info;
62 60
63 pdata = pdev->dev.platform_data; 61 pdata = pdev->dev.platform_data;
@@ -94,28 +92,18 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
94 res_alt->start, res_alt->end - res_alt->start + 1); 92 res_alt->start, res_alt->end - res_alt->start + 1);
95 } 93 }
96 94
97 hwif = ide_find_port();
98 if (!hwif) {
99 ret = -ENODEV;
100 goto out;
101 }
102
103 memset(&hw, 0, sizeof(hw)); 95 memset(&hw, 0, sizeof(hw));
104 plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start); 96 plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start);
105 hw.dev = &pdev->dev; 97 hw.dev = &pdev->dev;
106 98
107 ide_init_port_hw(hwif, &hw); 99 if (mmio)
108
109 if (mmio) {
110 d.host_flags |= IDE_HFLAG_MMIO; 100 d.host_flags |= IDE_HFLAG_MMIO;
111 default_hwif_mmiops(hwif);
112 }
113 101
114 idx[0] = hwif->index; 102 ret = ide_host_add(&d, hws, &host);
115 103 if (ret)
116 ide_device_add(idx, &d); 104 goto out;
117 105
118 platform_set_drvdata(pdev, hwif); 106 platform_set_drvdata(pdev, host);
119 107
120 return 0; 108 return 0;
121 109
@@ -125,9 +113,9 @@ out:
125 113
126static int __devexit plat_ide_remove(struct platform_device *pdev) 114static int __devexit plat_ide_remove(struct platform_device *pdev)
127{ 115{
128 ide_hwif_t *hwif = pdev->dev.driver_data; 116 struct ide_host *host = pdev->dev.driver_data;
129 117
130 ide_unregister(hwif); 118 ide_host_remove(host);
131 119
132 return 0; 120 return 0;
133} 121}
diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c
index 0a6195bcfeda..a0bb167980e7 100644
--- a/drivers/ide/legacy/macide.c
+++ b/drivers/ide/legacy/macide.c
@@ -91,11 +91,10 @@ static const char *mac_ide_name[] =
91 91
92static int __init macide_init(void) 92static int __init macide_init(void)
93{ 93{
94 ide_hwif_t *hwif;
95 ide_ack_intr_t *ack_intr; 94 ide_ack_intr_t *ack_intr;
96 unsigned long base; 95 unsigned long base;
97 int irq; 96 int irq;
98 hw_regs_t hw; 97 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
99 98
100 if (!MACH_IS_MAC) 99 if (!MACH_IS_MAC)
101 return -ENODEV; 100 return -ENODEV;
@@ -125,17 +124,7 @@ static int __init macide_init(void)
125 124
126 macide_setup_ports(&hw, base, irq, ack_intr); 125 macide_setup_ports(&hw, base, irq, ack_intr);
127 126
128 hwif = ide_find_port(); 127 return ide_host_add(NULL, hws, NULL);
129 if (hwif) {
130 u8 index = hwif->index;
131 u8 idx[4] = { index, 0xff, 0xff, 0xff };
132
133 ide_init_port_hw(hwif, &hw);
134
135 ide_device_add(idx, NULL);
136 }
137
138 return 0;
139} 128}
140 129
141module_init(macide_init); 130module_init(macide_init);
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c
index 9c2b9d078f69..4abd8fc78197 100644
--- a/drivers/ide/legacy/q40ide.c
+++ b/drivers/ide/legacy/q40ide.c
@@ -96,6 +96,27 @@ static void q40ide_output_data(ide_drive_t *drive, struct request *rq,
96 outsw_swapw(data_addr, buf, (len + 1) / 2); 96 outsw_swapw(data_addr, buf, (len + 1) / 2);
97} 97}
98 98
99/* Q40 has a byte-swapped IDE interface */
100static const struct ide_tp_ops q40ide_tp_ops = {
101 .exec_command = ide_exec_command,
102 .read_status = ide_read_status,
103 .read_altstatus = ide_read_altstatus,
104 .read_sff_dma_status = ide_read_sff_dma_status,
105
106 .set_irq = ide_set_irq,
107
108 .tf_load = ide_tf_load,
109 .tf_read = ide_tf_read,
110
111 .input_data = q40ide_input_data,
112 .output_data = q40ide_output_data,
113};
114
115static const struct ide_port_info q40ide_port_info = {
116 .tp_ops = &q40ide_tp_ops,
117 .host_flags = IDE_HFLAG_NO_DMA,
118};
119
99/* 120/*
100 * the static array is needed to have the name reported in /proc/ioports, 121 * the static array is needed to have the name reported in /proc/ioports,
101 * hwif->name unfortunately isn't available yet 122 * hwif->name unfortunately isn't available yet
@@ -111,9 +132,7 @@ static const char *q40_ide_names[Q40IDE_NUM_HWIFS]={
111static int __init q40ide_init(void) 132static int __init q40ide_init(void)
112{ 133{
113 int i; 134 int i;
114 ide_hwif_t *hwif; 135 hw_regs_t hw[Q40IDE_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL };
115 const char *name;
116 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
117 136
118 if (!MACH_IS_Q40) 137 if (!MACH_IS_Q40)
119 return -ENODEV; 138 return -ENODEV;
@@ -121,9 +140,8 @@ static int __init q40ide_init(void)
121 printk(KERN_INFO "ide: Q40 IDE controller\n"); 140 printk(KERN_INFO "ide: Q40 IDE controller\n");
122 141
123 for (i = 0; i < Q40IDE_NUM_HWIFS; i++) { 142 for (i = 0; i < Q40IDE_NUM_HWIFS; i++) {
124 hw_regs_t hw; 143 const char *name = q40_ide_names[i];
125 144
126 name = q40_ide_names[i];
127 if (!request_region(pcide_bases[i], 8, name)) { 145 if (!request_region(pcide_bases[i], 8, name)) {
128 printk("could not reserve ports %lx-%lx for %s\n", 146 printk("could not reserve ports %lx-%lx for %s\n",
129 pcide_bases[i],pcide_bases[i]+8,name); 147 pcide_bases[i],pcide_bases[i]+8,name);
@@ -135,26 +153,13 @@ static int __init q40ide_init(void)
135 release_region(pcide_bases[i], 8); 153 release_region(pcide_bases[i], 8);
136 continue; 154 continue;
137 } 155 }
138 q40_ide_setup_ports(&hw, pcide_bases[i], 156 q40_ide_setup_ports(&hw[i], pcide_bases[i], NULL,
139 NULL,
140// m68kide_iops,
141 q40ide_default_irq(pcide_bases[i])); 157 q40ide_default_irq(pcide_bases[i]));
142 158
143 hwif = ide_find_port(); 159 hws[i] = &hw[i];
144 if (hwif) {
145 ide_init_port_hw(hwif, &hw);
146
147 /* Q40 has a byte-swapped IDE interface */
148 hwif->input_data = q40ide_input_data;
149 hwif->output_data = q40ide_output_data;
150
151 idx[i] = hwif->index;
152 }
153 } 160 }
154 161
155 ide_device_add(idx, NULL); 162 return ide_host_add(&q40ide_port_info, hws, NULL);
156
157 return 0;
158} 163}
159 164
160module_init(q40ide_init); 165module_init(q40ide_init);
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
index 48d57cae63c6..11b7f61aae40 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/mips/au1xxx-ide.c
@@ -519,6 +519,23 @@ static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
519 *ata_regs = ahwif->regbase + (14 << IDE_REG_SHIFT); 519 *ata_regs = ahwif->regbase + (14 << IDE_REG_SHIFT);
520} 520}
521 521
522#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
523static const struct ide_tp_ops au1xxx_tp_ops = {
524 .exec_command = ide_exec_command,
525 .read_status = ide_read_status,
526 .read_altstatus = ide_read_altstatus,
527 .read_sff_dma_status = ide_read_sff_dma_status,
528
529 .set_irq = ide_set_irq,
530
531 .tf_load = ide_tf_load,
532 .tf_read = ide_tf_read,
533
534 .input_data = au1xxx_input_data,
535 .output_data = au1xxx_output_data,
536};
537#endif
538
522static const struct ide_port_ops au1xxx_port_ops = { 539static const struct ide_port_ops au1xxx_port_ops = {
523 .set_pio_mode = au1xxx_set_pio_mode, 540 .set_pio_mode = au1xxx_set_pio_mode,
524 .set_dma_mode = auide_set_dma_mode, 541 .set_dma_mode = auide_set_dma_mode,
@@ -526,6 +543,9 @@ static const struct ide_port_ops au1xxx_port_ops = {
526 543
527static const struct ide_port_info au1xxx_port_info = { 544static const struct ide_port_info au1xxx_port_info = {
528 .init_dma = auide_ddma_init, 545 .init_dma = auide_ddma_init,
546#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
547 .tp_ops = &au1xxx_tp_ops,
548#endif
529 .port_ops = &au1xxx_port_ops, 549 .port_ops = &au1xxx_port_ops,
530#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 550#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
531 .dma_ops = &au1xxx_dma_ops, 551 .dma_ops = &au1xxx_dma_ops,
@@ -543,11 +563,10 @@ static int au_ide_probe(struct device *dev)
543{ 563{
544 struct platform_device *pdev = to_platform_device(dev); 564 struct platform_device *pdev = to_platform_device(dev);
545 _auide_hwif *ahwif = &auide_hwif; 565 _auide_hwif *ahwif = &auide_hwif;
546 ide_hwif_t *hwif;
547 struct resource *res; 566 struct resource *res;
567 struct ide_host *host;
548 int ret = 0; 568 int ret = 0;
549 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 569 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
550 hw_regs_t hw;
551 570
552#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) 571#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
553 char *mode = "MWDMA2"; 572 char *mode = "MWDMA2";
@@ -584,36 +603,19 @@ static int au_ide_probe(struct device *dev)
584 goto out; 603 goto out;
585 } 604 }
586 605
587 hwif = ide_find_port();
588 if (hwif == NULL) {
589 ret = -ENOENT;
590 goto out;
591 }
592
593 memset(&hw, 0, sizeof(hw)); 606 memset(&hw, 0, sizeof(hw));
594 auide_setup_ports(&hw, ahwif); 607 auide_setup_ports(&hw, ahwif);
595 hw.irq = ahwif->irq; 608 hw.irq = ahwif->irq;
596 hw.dev = dev; 609 hw.dev = dev;
597 hw.chipset = ide_au1xxx; 610 hw.chipset = ide_au1xxx;
598 611
599 ide_init_port_hw(hwif, &hw); 612 ret = ide_host_add(&au1xxx_port_info, hws, &host);
600 613 if (ret)
601 /* If the user has selected DDMA assisted copies, 614 goto out;
602 then set up a few local I/O function entry points
603 */
604
605#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
606 hwif->input_data = au1xxx_input_data;
607 hwif->output_data = au1xxx_output_data;
608#endif
609
610 auide_hwif.hwif = hwif;
611
612 idx[0] = hwif->index;
613 615
614 ide_device_add(idx, &au1xxx_port_info); 616 auide_hwif.hwif = host->ports[0];
615 617
616 dev_set_drvdata(dev, hwif); 618 dev_set_drvdata(dev, host);
617 619
618 printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode ); 620 printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );
619 621
@@ -625,10 +627,10 @@ static int au_ide_remove(struct device *dev)
625{ 627{
626 struct platform_device *pdev = to_platform_device(dev); 628 struct platform_device *pdev = to_platform_device(dev);
627 struct resource *res; 629 struct resource *res;
628 ide_hwif_t *hwif = dev_get_drvdata(dev); 630 struct ide_host *host = dev_get_drvdata(dev);
629 _auide_hwif *ahwif = &auide_hwif; 631 _auide_hwif *ahwif = &auide_hwif;
630 632
631 ide_unregister(hwif); 633 ide_host_remove(host);
632 634
633 iounmap((void *)ahwif->regbase); 635 iounmap((void *)ahwif->regbase);
634 636
diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c
index 9f1212cc4aed..badf79fc9e3a 100644
--- a/drivers/ide/mips/swarm.c
+++ b/drivers/ide/mips/swarm.c
@@ -72,12 +72,11 @@ static const struct ide_port_info swarm_port_info = {
72 */ 72 */
73static int __devinit swarm_ide_probe(struct device *dev) 73static int __devinit swarm_ide_probe(struct device *dev)
74{ 74{
75 ide_hwif_t *hwif;
76 u8 __iomem *base; 75 u8 __iomem *base;
76 struct ide_host *host;
77 phys_t offset, size; 77 phys_t offset, size;
78 hw_regs_t hw; 78 int i, rc;
79 int i; 79 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
80 u8 idx[] = { 0xff, 0xff, 0xff, 0xff };
81 80
82 if (!SIBYTE_HAVE_IDE) 81 if (!SIBYTE_HAVE_IDE)
83 return -ENODEV; 82 return -ENODEV;
@@ -116,26 +115,17 @@ static int __devinit swarm_ide_probe(struct device *dev)
116 hw.irq = K_INT_GB_IDE; 115 hw.irq = K_INT_GB_IDE;
117 hw.chipset = ide_generic; 116 hw.chipset = ide_generic;
118 117
119 hwif = ide_find_port_slot(&swarm_port_info); 118 rc = ide_host_add(&swarm_port_info, hws, &host);
120 if (hwif == NULL) 119 if (rc)
121 goto err; 120 goto err;
122 121
123 ide_init_port_hw(hwif, &hw); 122 dev_set_drvdata(dev, host);
124
125 /* Setup MMIO ops. */
126 default_hwif_mmiops(hwif);
127
128 idx[0] = hwif->index;
129
130 ide_device_add(idx, &swarm_port_info);
131
132 dev_set_drvdata(dev, hwif);
133 123
134 return 0; 124 return 0;
135err: 125err:
136 release_resource(&swarm_ide_resource); 126 release_resource(&swarm_ide_resource);
137 iounmap(base); 127 iounmap(base);
138 return -ENOMEM; 128 return rc;
139} 129}
140 130
141static struct device_driver swarm_ide_driver = { 131static struct device_driver swarm_ide_driver = {
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c
index ae7a4329a581..fbc43e121e6b 100644
--- a/drivers/ide/pci/aec62xx.c
+++ b/drivers/ide/pci/aec62xx.c
@@ -195,7 +195,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
195 .host_flags = IDE_HFLAG_SERIALIZE | 195 .host_flags = IDE_HFLAG_SERIALIZE |
196 IDE_HFLAG_NO_ATAPI_DMA | 196 IDE_HFLAG_NO_ATAPI_DMA |
197 IDE_HFLAG_NO_DSC | 197 IDE_HFLAG_NO_DSC |
198 IDE_HFLAG_ABUSE_SET_DMA_MODE |
199 IDE_HFLAG_OFF_BOARD, 198 IDE_HFLAG_OFF_BOARD,
200 .pio_mask = ATA_PIO4, 199 .pio_mask = ATA_PIO4,
201 .mwdma_mask = ATA_MWDMA2, 200 .mwdma_mask = ATA_MWDMA2,
@@ -205,7 +204,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
205 .init_chipset = init_chipset_aec62xx, 204 .init_chipset = init_chipset_aec62xx,
206 .port_ops = &atp86x_port_ops, 205 .port_ops = &atp86x_port_ops,
207 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_NO_AUTODMA | 206 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_NO_AUTODMA |
208 IDE_HFLAG_ABUSE_SET_DMA_MODE |
209 IDE_HFLAG_OFF_BOARD, 207 IDE_HFLAG_OFF_BOARD,
210 .pio_mask = ATA_PIO4, 208 .pio_mask = ATA_PIO4,
211 .mwdma_mask = ATA_MWDMA2, 209 .mwdma_mask = ATA_MWDMA2,
@@ -216,7 +214,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
216 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, 214 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
217 .port_ops = &atp86x_port_ops, 215 .port_ops = &atp86x_port_ops,
218 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | 216 .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
219 IDE_HFLAG_ABUSE_SET_DMA_MODE |
220 IDE_HFLAG_NON_BOOTABLE, 217 IDE_HFLAG_NON_BOOTABLE,
221 .pio_mask = ATA_PIO4, 218 .pio_mask = ATA_PIO4,
222 .mwdma_mask = ATA_MWDMA2, 219 .mwdma_mask = ATA_MWDMA2,
@@ -226,7 +223,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
226 .init_chipset = init_chipset_aec62xx, 223 .init_chipset = init_chipset_aec62xx,
227 .port_ops = &atp86x_port_ops, 224 .port_ops = &atp86x_port_ops,
228 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | 225 .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
229 IDE_HFLAG_ABUSE_SET_DMA_MODE |
230 IDE_HFLAG_OFF_BOARD, 226 IDE_HFLAG_OFF_BOARD,
231 .pio_mask = ATA_PIO4, 227 .pio_mask = ATA_PIO4,
232 .mwdma_mask = ATA_MWDMA2, 228 .mwdma_mask = ATA_MWDMA2,
@@ -237,7 +233,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
237 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, 233 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
238 .port_ops = &atp86x_port_ops, 234 .port_ops = &atp86x_port_ops,
239 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | 235 .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
240 IDE_HFLAG_ABUSE_SET_DMA_MODE |
241 IDE_HFLAG_OFF_BOARD, 236 IDE_HFLAG_OFF_BOARD,
242 .pio_mask = ATA_PIO4, 237 .pio_mask = ATA_PIO4,
243 .mwdma_mask = ATA_MWDMA2, 238 .mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c
index 80d19c0eb780..5ef7817ac64f 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/pci/alim15x3.c
@@ -471,7 +471,15 @@ static int __devinit init_dma_ali15x3(ide_hwif_t *hwif,
471 struct pci_dev *dev = to_pci_dev(hwif->dev); 471 struct pci_dev *dev = to_pci_dev(hwif->dev);
472 unsigned long base = ide_pci_dma_base(hwif, d); 472 unsigned long base = ide_pci_dma_base(hwif, d);
473 473
474 if (base == 0 || ide_pci_set_master(dev, d->name) < 0) 474 if (base == 0)
475 return -1;
476
477 hwif->dma_base = base;
478
479 if (ide_pci_check_simplex(hwif, d) < 0)
480 return -1;
481
482 if (ide_pci_set_master(dev, d->name) < 0)
475 return -1; 483 return -1;
476 484
477 if (!hwif->channel) 485 if (!hwif->channel)
@@ -483,7 +491,7 @@ static int __devinit init_dma_ali15x3(ide_hwif_t *hwif,
483 if (ide_allocate_dma_engine(hwif)) 491 if (ide_allocate_dma_engine(hwif))
484 return -1; 492 return -1;
485 493
486 ide_setup_dma(hwif, base); 494 hwif->dma_ops = &sff_dma_ops;
487 495
488 return 0; 496 return 0;
489} 497}
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index 0bfcdd0e77b3..ef7d971031ee 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -218,7 +218,6 @@ static const struct ide_port_ops amd_port_ops = {
218 218
219#define IDE_HFLAGS_AMD \ 219#define IDE_HFLAGS_AMD \
220 (IDE_HFLAG_PIO_NO_BLACKLIST | \ 220 (IDE_HFLAG_PIO_NO_BLACKLIST | \
221 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
222 IDE_HFLAG_POST_SET_MODE | \ 221 IDE_HFLAG_POST_SET_MODE | \
223 IDE_HFLAG_IO_32BIT | \ 222 IDE_HFLAG_IO_32BIT | \
224 IDE_HFLAG_UNMASK_IRQS) 223 IDE_HFLAG_UNMASK_IRQS)
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c
index 1ad1e23e3105..e6c62006ca1a 100644
--- a/drivers/ide/pci/cmd640.c
+++ b/drivers/ide/pci/cmd640.c
@@ -181,11 +181,6 @@ static u8 recovery_counts[4] = {16, 16, 16, 16}; /* Recovery count (encoded) */
181static DEFINE_SPINLOCK(cmd640_lock); 181static DEFINE_SPINLOCK(cmd640_lock);
182 182
183/* 183/*
184 * These are initialized to point at the devices we control
185 */
186static ide_hwif_t *cmd_hwif0, *cmd_hwif1;
187
188/*
189 * Interface to access cmd640x registers 184 * Interface to access cmd640x registers
190 */ 185 */
191static unsigned int cmd640_key; 186static unsigned int cmd640_key;
@@ -717,8 +712,7 @@ static int __init cmd640x_init(void)
717 int second_port_cmd640 = 0, rc; 712 int second_port_cmd640 = 0, rc;
718 const char *bus_type, *port2; 713 const char *bus_type, *port2;
719 u8 b, cfr; 714 u8 b, cfr;
720 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 715 hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL };
721 hw_regs_t hw[2];
722 716
723 if (cmd640_vlb && probe_for_cmd640_vlb()) { 717 if (cmd640_vlb && probe_for_cmd640_vlb()) {
724 bus_type = "VLB"; 718 bus_type = "VLB";
@@ -781,15 +775,10 @@ static int __init cmd640x_init(void)
781 printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x" 775 printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x"
782 "\n", 'a' + cmd640_chip_version - 1, bus_type, cfr); 776 "\n", 'a' + cmd640_chip_version - 1, bus_type, cfr);
783 777
784 cmd_hwif0 = ide_find_port();
785
786 /* 778 /*
787 * Initialize data for primary port 779 * Initialize data for primary port
788 */ 780 */
789 if (cmd_hwif0) { 781 hws[0] = &hw[0];
790 ide_init_port_hw(cmd_hwif0, &hw[0]);
791 idx[0] = cmd_hwif0->index;
792 }
793 782
794 /* 783 /*
795 * Ensure compatibility by always using the slowest timings 784 * Ensure compatibility by always using the slowest timings
@@ -829,13 +818,9 @@ static int __init cmd640x_init(void)
829 /* 818 /*
830 * Initialize data for secondary cmd640 port, if enabled 819 * Initialize data for secondary cmd640 port, if enabled
831 */ 820 */
832 if (second_port_cmd640) { 821 if (second_port_cmd640)
833 cmd_hwif1 = ide_find_port(); 822 hws[1] = &hw[1];
834 if (cmd_hwif1) { 823
835 ide_init_port_hw(cmd_hwif1, &hw[1]);
836 idx[1] = cmd_hwif1->index;
837 }
838 }
839 printk(KERN_INFO "cmd640: %sserialized, secondary interface %s\n", 824 printk(KERN_INFO "cmd640: %sserialized, secondary interface %s\n",
840 second_port_cmd640 ? "" : "not ", port2); 825 second_port_cmd640 ? "" : "not ", port2);
841 826
@@ -843,9 +828,7 @@ static int __init cmd640x_init(void)
843 cmd640_dump_regs(); 828 cmd640_dump_regs();
844#endif 829#endif
845 830
846 ide_device_add(idx, &cmd640_port_info); 831 return ide_host_add(&cmd640_port_info, hws, NULL);
847
848 return 1;
849} 832}
850 833
851module_param_named(probe_vlb, cmd640_vlb, bool, 0); 834module_param_named(probe_vlb, cmd640_vlb, bool, 0);
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
index cfa784bacf48..ce58bfcdb3c6 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/pci/cmd64x.c
@@ -262,7 +262,7 @@ static int cmd648_dma_test_irq(ide_drive_t *drive)
262 unsigned long base = hwif->dma_base - (hwif->channel * 8); 262 unsigned long base = hwif->dma_base - (hwif->channel * 8);
263 u8 irq_mask = hwif->channel ? MRDMODE_INTR_CH1 : 263 u8 irq_mask = hwif->channel ? MRDMODE_INTR_CH1 :
264 MRDMODE_INTR_CH0; 264 MRDMODE_INTR_CH0;
265 u8 dma_stat = inb(hwif->dma_status); 265 u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
266 u8 mrdmode = inb(base + 1); 266 u8 mrdmode = inb(base + 1);
267 267
268#ifdef DEBUG 268#ifdef DEBUG
@@ -286,7 +286,7 @@ static int cmd64x_dma_test_irq(ide_drive_t *drive)
286 int irq_reg = hwif->channel ? ARTTIM23 : CFR; 286 int irq_reg = hwif->channel ? ARTTIM23 : CFR;
287 u8 irq_mask = hwif->channel ? ARTTIM23_INTR_CH1 : 287 u8 irq_mask = hwif->channel ? ARTTIM23_INTR_CH1 :
288 CFR_INTR_CH0; 288 CFR_INTR_CH0;
289 u8 dma_stat = inb(hwif->dma_status); 289 u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
290 u8 irq_stat = 0; 290 u8 irq_stat = 0;
291 291
292 (void) pci_read_config_byte(dev, irq_reg, &irq_stat); 292 (void) pci_read_config_byte(dev, irq_reg, &irq_stat);
@@ -317,13 +317,13 @@ static int cmd646_1_dma_end(ide_drive_t *drive)
317 317
318 drive->waiting_for_dma = 0; 318 drive->waiting_for_dma = 0;
319 /* get DMA status */ 319 /* get DMA status */
320 dma_stat = inb(hwif->dma_status); 320 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
321 /* read DMA command state */ 321 /* read DMA command state */
322 dma_cmd = inb(hwif->dma_command); 322 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
323 /* stop DMA */ 323 /* stop DMA */
324 outb(dma_cmd & ~1, hwif->dma_command); 324 outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
325 /* clear the INTR & ERROR bits */ 325 /* clear the INTR & ERROR bits */
326 outb(dma_stat | 6, hwif->dma_status); 326 outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS);
327 /* and free any DMA resources */ 327 /* and free any DMA resources */
328 ide_destroy_dmatable(drive); 328 ide_destroy_dmatable(drive);
329 /* verify good DMA status */ 329 /* verify good DMA status */
diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/pci/cs5520.c
index 992b1cf8db69..b03d8ae947e6 100644
--- a/drivers/ide/pci/cs5520.c
+++ b/drivers/ide/pci/cs5520.c
@@ -62,8 +62,6 @@ static void cs5520_set_pio_mode(ide_drive_t *drive, const u8 pio)
62 struct pci_dev *pdev = to_pci_dev(hwif->dev); 62 struct pci_dev *pdev = to_pci_dev(hwif->dev);
63 int controller = drive->dn > 1 ? 1 : 0; 63 int controller = drive->dn > 1 ? 1 : 0;
64 64
65 /* FIXME: if DMA = 1 do we need to set the DMA bit here ? */
66
67 /* 8bit CAT/CRT - 8bit command timing for channel */ 65 /* 8bit CAT/CRT - 8bit command timing for channel */
68 pci_write_config_byte(pdev, 0x62 + controller, 66 pci_write_config_byte(pdev, 0x62 + controller,
69 (cs5520_pio_clocks[pio].recovery << 4) | 67 (cs5520_pio_clocks[pio].recovery << 4) |
@@ -89,46 +87,17 @@ static void cs5520_set_dma_mode(ide_drive_t *drive, const u8 speed)
89 cs5520_set_pio_mode(drive, 0); 87 cs5520_set_pio_mode(drive, 0);
90} 88}
91 89
92/*
93 * We wrap the DMA activate to set the vdma flag. This is needed
94 * so that the IDE DMA layer issues PIO not DMA commands over the
95 * DMA channel
96 *
97 * ATAPI is harder so disable it for now using IDE_HFLAG_NO_ATAPI_DMA
98 */
99
100static void cs5520_dma_host_set(ide_drive_t *drive, int on)
101{
102 drive->vdma = on;
103 ide_dma_host_set(drive, on);
104}
105
106static const struct ide_port_ops cs5520_port_ops = { 90static const struct ide_port_ops cs5520_port_ops = {
107 .set_pio_mode = cs5520_set_pio_mode, 91 .set_pio_mode = cs5520_set_pio_mode,
108 .set_dma_mode = cs5520_set_dma_mode, 92 .set_dma_mode = cs5520_set_dma_mode,
109}; 93};
110 94
111static const struct ide_dma_ops cs5520_dma_ops = {
112 .dma_host_set = cs5520_dma_host_set,
113 .dma_setup = ide_dma_setup,
114 .dma_exec_cmd = ide_dma_exec_cmd,
115 .dma_start = ide_dma_start,
116 .dma_end = __ide_dma_end,
117 .dma_test_irq = ide_dma_test_irq,
118 .dma_lost_irq = ide_dma_lost_irq,
119 .dma_timeout = ide_dma_timeout,
120};
121
122/* FIXME: VDMA is disabled because it caused system hangs */
123#define DECLARE_CS_DEV(name_str) \ 95#define DECLARE_CS_DEV(name_str) \
124 { \ 96 { \
125 .name = name_str, \ 97 .name = name_str, \
126 .port_ops = &cs5520_port_ops, \ 98 .port_ops = &cs5520_port_ops, \
127 .dma_ops = &cs5520_dma_ops, \
128 .host_flags = IDE_HFLAG_ISA_PORTS | \ 99 .host_flags = IDE_HFLAG_ISA_PORTS | \
129 IDE_HFLAG_CS5520 | \ 100 IDE_HFLAG_CS5520, \
130 IDE_HFLAG_NO_ATAPI_DMA | \
131 IDE_HFLAG_ABUSE_SET_DMA_MODE, \
132 .pio_mask = ATA_PIO4, \ 101 .pio_mask = ATA_PIO4, \
133 } 102 }
134 103
@@ -146,7 +115,7 @@ static const struct ide_port_info cyrix_chipsets[] __devinitdata = {
146static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id) 115static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
147{ 116{
148 const struct ide_port_info *d = &cyrix_chipsets[id->driver_data]; 117 const struct ide_port_info *d = &cyrix_chipsets[id->driver_data];
149 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 118 hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL };
150 119
151 ide_setup_pci_noise(dev, d); 120 ide_setup_pci_noise(dev, d);
152 121
@@ -168,11 +137,9 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic
168 * do all the device setup for us 137 * do all the device setup for us
169 */ 138 */
170 139
171 ide_pci_setup_ports(dev, d, 14, &idx[0]); 140 ide_pci_setup_ports(dev, d, 14, &hw[0], &hws[0]);
172
173 ide_device_add(idx, d);
174 141
175 return 0; 142 return ide_host_add(d, hws, NULL);
176} 143}
177 144
178static const struct pci_device_id cs5520_pci_tbl[] = { 145static const struct pci_device_id cs5520_pci_tbl[] = {
diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c
index dc97c48623f3..5404fe4f701d 100644
--- a/drivers/ide/pci/cs5535.c
+++ b/drivers/ide/pci/cs5535.c
@@ -171,8 +171,7 @@ static const struct ide_port_ops cs5535_port_ops = {
171static const struct ide_port_info cs5535_chipset __devinitdata = { 171static const struct ide_port_info cs5535_chipset __devinitdata = {
172 .name = "CS5535", 172 .name = "CS5535",
173 .port_ops = &cs5535_port_ops, 173 .port_ops = &cs5535_port_ops,
174 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE | 174 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
175 IDE_HFLAG_ABUSE_SET_DMA_MODE,
176 .pio_mask = ATA_PIO4, 175 .pio_mask = ATA_PIO4,
177 .mwdma_mask = ATA_MWDMA2, 176 .mwdma_mask = ATA_MWDMA2,
178 .udma_mask = ATA_UDMA4, 177 .udma_mask = ATA_UDMA4,
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c
index 0106e2a2df77..f84bfb4f600f 100644
--- a/drivers/ide/pci/delkin_cb.c
+++ b/drivers/ide/pci/delkin_cb.c
@@ -56,11 +56,10 @@ static const struct ide_port_info delkin_cb_port_info = {
56static int __devinit 56static int __devinit
57delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id) 57delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
58{ 58{
59 struct ide_host *host;
59 unsigned long base; 60 unsigned long base;
60 hw_regs_t hw;
61 ide_hwif_t *hwif = NULL;
62 int i, rc; 61 int i, rc;
63 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 62 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
64 63
65 rc = pci_enable_device(dev); 64 rc = pci_enable_device(dev);
66 if (rc) { 65 if (rc) {
@@ -87,34 +86,26 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
87 hw.dev = &dev->dev; 86 hw.dev = &dev->dev;
88 hw.chipset = ide_pci; /* this enables IRQ sharing */ 87 hw.chipset = ide_pci; /* this enables IRQ sharing */
89 88
90 hwif = ide_find_port(); 89 rc = ide_host_add(&delkin_cb_port_info, hws, &host);
91 if (hwif == NULL) 90 if (rc)
92 goto out_disable; 91 goto out_disable;
93 92
94 i = hwif->index; 93 pci_set_drvdata(dev, host);
95
96 ide_init_port_hw(hwif, &hw);
97
98 idx[0] = i;
99
100 ide_device_add(idx, &delkin_cb_port_info);
101
102 pci_set_drvdata(dev, hwif);
103 94
104 return 0; 95 return 0;
105 96
106out_disable: 97out_disable:
107 pci_release_regions(dev); 98 pci_release_regions(dev);
108 pci_disable_device(dev); 99 pci_disable_device(dev);
109 return -ENODEV; 100 return rc;
110} 101}
111 102
112static void 103static void
113delkin_cb_remove (struct pci_dev *dev) 104delkin_cb_remove (struct pci_dev *dev)
114{ 105{
115 ide_hwif_t *hwif = pci_get_drvdata(dev); 106 struct ide_host *host = pci_get_drvdata(dev);
116 107
117 ide_unregister(hwif); 108 ide_host_remove(host);
118 109
119 pci_release_regions(dev); 110 pci_release_regions(dev);
120 pci_disable_device(dev); 111 pci_disable_device(dev);
diff --git a/drivers/ide/pci/hpt34x.c b/drivers/ide/pci/hpt34x.c
index 84c36c117194..9e1d1c4741da 100644
--- a/drivers/ide/pci/hpt34x.c
+++ b/drivers/ide/pci/hpt34x.c
@@ -123,7 +123,6 @@ static const struct ide_port_ops hpt34x_port_ops = {
123#define IDE_HFLAGS_HPT34X \ 123#define IDE_HFLAGS_HPT34X \
124 (IDE_HFLAG_NO_ATAPI_DMA | \ 124 (IDE_HFLAG_NO_ATAPI_DMA | \
125 IDE_HFLAG_NO_DSC | \ 125 IDE_HFLAG_NO_DSC | \
126 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
127 IDE_HFLAG_NO_AUTODMA) 126 IDE_HFLAG_NO_AUTODMA)
128 127
129static const struct ide_port_info hpt34x_chipsets[] __devinitdata = { 128static const struct ide_port_info hpt34x_chipsets[] __devinitdata = {
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index 397c6cbe953c..1f1135ce7cd6 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -801,9 +801,9 @@ static void hpt370_irq_timeout(ide_drive_t *drive)
801 printk(KERN_DEBUG "%s: %d bytes in FIFO\n", drive->name, bfifo & 0x1ff); 801 printk(KERN_DEBUG "%s: %d bytes in FIFO\n", drive->name, bfifo & 0x1ff);
802 802
803 /* get DMA command mode */ 803 /* get DMA command mode */
804 dma_cmd = inb(hwif->dma_command); 804 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
805 /* stop DMA */ 805 /* stop DMA */
806 outb(dma_cmd & ~0x1, hwif->dma_command); 806 outb(dma_cmd & ~0x1, hwif->dma_base + ATA_DMA_CMD);
807 hpt370_clear_engine(drive); 807 hpt370_clear_engine(drive);
808} 808}
809 809
@@ -818,12 +818,12 @@ static void hpt370_dma_start(ide_drive_t *drive)
818static int hpt370_dma_end(ide_drive_t *drive) 818static int hpt370_dma_end(ide_drive_t *drive)
819{ 819{
820 ide_hwif_t *hwif = HWIF(drive); 820 ide_hwif_t *hwif = HWIF(drive);
821 u8 dma_stat = inb(hwif->dma_status); 821 u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
822 822
823 if (dma_stat & 0x01) { 823 if (dma_stat & 0x01) {
824 /* wait a little */ 824 /* wait a little */
825 udelay(20); 825 udelay(20);
826 dma_stat = inb(hwif->dma_status); 826 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
827 if (dma_stat & 0x01) 827 if (dma_stat & 0x01)
828 hpt370_irq_timeout(drive); 828 hpt370_irq_timeout(drive);
829 } 829 }
@@ -850,7 +850,7 @@ static int hpt374_dma_test_irq(ide_drive_t *drive)
850 return 0; 850 return 0;
851 } 851 }
852 852
853 dma_stat = inb(hwif->dma_status); 853 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
854 /* return 1 if INTR asserted */ 854 /* return 1 if INTR asserted */
855 if (dma_stat & 4) 855 if (dma_stat & 4)
856 return 1; 856 return 1;
@@ -1320,7 +1320,15 @@ static int __devinit init_dma_hpt366(ide_hwif_t *hwif,
1320 unsigned long flags, base = ide_pci_dma_base(hwif, d); 1320 unsigned long flags, base = ide_pci_dma_base(hwif, d);
1321 u8 dma_old, dma_new, masterdma = 0, slavedma = 0; 1321 u8 dma_old, dma_new, masterdma = 0, slavedma = 0;
1322 1322
1323 if (base == 0 || ide_pci_set_master(dev, d->name) < 0) 1323 if (base == 0)
1324 return -1;
1325
1326 hwif->dma_base = base;
1327
1328 if (ide_pci_check_simplex(hwif, d) < 0)
1329 return -1;
1330
1331 if (ide_pci_set_master(dev, d->name) < 0)
1324 return -1; 1332 return -1;
1325 1333
1326 dma_old = inb(base + 2); 1334 dma_old = inb(base + 2);
@@ -1346,7 +1354,7 @@ static int __devinit init_dma_hpt366(ide_hwif_t *hwif,
1346 if (ide_allocate_dma_engine(hwif)) 1354 if (ide_allocate_dma_engine(hwif))
1347 return -1; 1355 return -1;
1348 1356
1349 ide_setup_dma(hwif, base); 1357 hwif->dma_ops = &sff_dma_ops;
1350 1358
1351 return 0; 1359 return 0;
1352} 1360}
@@ -1401,7 +1409,6 @@ static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2)
1401 1409
1402#define IDE_HFLAGS_HPT3XX \ 1410#define IDE_HFLAGS_HPT3XX \
1403 (IDE_HFLAG_NO_ATAPI_DMA | \ 1411 (IDE_HFLAG_NO_ATAPI_DMA | \
1404 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
1405 IDE_HFLAG_OFF_BOARD) 1412 IDE_HFLAG_OFF_BOARD)
1406 1413
1407static const struct ide_port_ops hpt3xx_port_ops = { 1414static const struct ide_port_ops hpt3xx_port_ops = {
diff --git a/drivers/ide/pci/ns87415.c b/drivers/ide/pci/ns87415.c
index 45ba71a7182f..5cd2b32ff0ef 100644
--- a/drivers/ide/pci/ns87415.c
+++ b/drivers/ide/pci/ns87415.c
@@ -28,10 +28,6 @@
28 */ 28 */
29#include <asm/superio.h> 29#include <asm/superio.h>
30 30
31static unsigned long superio_ide_status[2];
32static unsigned long superio_ide_select[2];
33static unsigned long superio_ide_dma_status[2];
34
35#define SUPERIO_IDE_MAX_RETRIES 25 31#define SUPERIO_IDE_MAX_RETRIES 25
36 32
37/* Because of a defect in Super I/O, all reads of the PCI DMA status 33/* Because of a defect in Super I/O, all reads of the PCI DMA status
@@ -40,27 +36,28 @@ static unsigned long superio_ide_dma_status[2];
40 */ 36 */
41static u8 superio_ide_inb (unsigned long port) 37static u8 superio_ide_inb (unsigned long port)
42{ 38{
43 if (port == superio_ide_status[0] || 39 u8 tmp;
44 port == superio_ide_status[1] || 40 int retries = SUPERIO_IDE_MAX_RETRIES;
45 port == superio_ide_select[0] ||
46 port == superio_ide_select[1] ||
47 port == superio_ide_dma_status[0] ||
48 port == superio_ide_dma_status[1]) {
49 u8 tmp;
50 int retries = SUPERIO_IDE_MAX_RETRIES;
51 41
52 /* printk(" [ reading port 0x%x with retry ] ", port); */ 42 /* printk(" [ reading port 0x%x with retry ] ", port); */
53 43
54 do { 44 do {
55 tmp = inb(port); 45 tmp = inb(port);
56 if (tmp == 0) 46 if (tmp == 0)
57 udelay(50); 47 udelay(50);
58 } while (tmp == 0 && retries-- > 0); 48 } while (tmp == 0 && retries-- > 0);
59 49
60 return tmp; 50 return tmp;
61 } 51}
62 52
63 return inb(port); 53static u8 superio_read_status(ide_hwif_t *hwif)
54{
55 return superio_ide_inb(hwif->io_ports.status_addr);
56}
57
58static u8 superio_read_sff_dma_status(ide_hwif_t *hwif)
59{
60 return superio_ide_inb(hwif->dma_base + ATA_DMA_STATUS);
64} 61}
65 62
66static void superio_tf_read(ide_drive_t *drive, ide_task_t *task) 63static void superio_tf_read(ide_drive_t *drive, ide_task_t *task)
@@ -78,6 +75,8 @@ static void superio_tf_read(ide_drive_t *drive, ide_task_t *task)
78 /* be sure we're looking at the low order bits */ 75 /* be sure we're looking at the low order bits */
79 outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr); 76 outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
80 77
78 if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
79 tf->feature = inb(io_ports->feature_addr);
81 if (task->tf_flags & IDE_TFLAG_IN_NSECT) 80 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
82 tf->nsect = inb(io_ports->nsect_addr); 81 tf->nsect = inb(io_ports->nsect_addr);
83 if (task->tf_flags & IDE_TFLAG_IN_LBAL) 82 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
@@ -105,36 +104,32 @@ static void superio_tf_read(ide_drive_t *drive, ide_task_t *task)
105 } 104 }
106} 105}
107 106
108static void __devinit superio_ide_init_iops (struct hwif_s *hwif) 107static const struct ide_tp_ops superio_tp_ops = {
109{ 108 .exec_command = ide_exec_command,
110 struct pci_dev *pdev = to_pci_dev(hwif->dev); 109 .read_status = superio_read_status,
111 u32 base, dmabase; 110 .read_altstatus = ide_read_altstatus,
112 u8 port = hwif->channel, tmp; 111 .read_sff_dma_status = superio_read_sff_dma_status,
113 112
114 base = pci_resource_start(pdev, port * 2) & ~3; 113 .set_irq = ide_set_irq,
115 dmabase = pci_resource_start(pdev, 4) & ~3;
116
117 superio_ide_status[port] = base + 7;
118 superio_ide_select[port] = base + 6;
119 superio_ide_dma_status[port] = dmabase + (!port ? 2 : 0xa);
120
121 /* Clear error/interrupt, enable dma */
122 tmp = superio_ide_inb(superio_ide_dma_status[port]);
123 outb(tmp | 0x66, superio_ide_dma_status[port]);
124 114
125 hwif->tf_read = superio_tf_read; 115 .tf_load = ide_tf_load,
116 .tf_read = superio_tf_read,
126 117
127 /* We need to override inb to workaround a SuperIO errata */ 118 .input_data = ide_input_data,
128 hwif->INB = superio_ide_inb; 119 .output_data = ide_output_data,
129} 120};
130 121
131static void __devinit init_iops_ns87415(ide_hwif_t *hwif) 122static void __devinit superio_init_iops(struct hwif_s *hwif)
132{ 123{
133 struct pci_dev *dev = to_pci_dev(hwif->dev); 124 struct pci_dev *pdev = to_pci_dev(hwif->dev);
125 u32 dma_stat;
126 u8 port = hwif->channel, tmp;
134 127
135 if (PCI_SLOT(dev->devfn) == 0xE) 128 dma_stat = (pci_resource_start(pdev, 4) & ~3) + (!port ? 2 : 0xa);
136 /* Built-in - assume it's under superio. */ 129
137 superio_ide_init_iops(hwif); 130 /* Clear error/interrupt, enable dma */
131 tmp = superio_ide_inb(dma_stat);
132 outb(tmp | 0x66, dma_stat);
138} 133}
139#endif 134#endif
140 135
@@ -200,14 +195,14 @@ static int ns87415_dma_end(ide_drive_t *drive)
200 u8 dma_stat = 0, dma_cmd = 0; 195 u8 dma_stat = 0, dma_cmd = 0;
201 196
202 drive->waiting_for_dma = 0; 197 drive->waiting_for_dma = 0;
203 dma_stat = hwif->INB(hwif->dma_status); 198 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
204 /* get dma command mode */ 199 /* get DMA command mode */
205 dma_cmd = hwif->INB(hwif->dma_command); 200 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
206 /* stop DMA */ 201 /* stop DMA */
207 outb(dma_cmd & ~1, hwif->dma_command); 202 outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
208 /* from ERRATA: clear the INTR & ERROR bits */ 203 /* from ERRATA: clear the INTR & ERROR bits */
209 dma_cmd = hwif->INB(hwif->dma_command); 204 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
210 outb(dma_cmd | 6, hwif->dma_command); 205 outb(dma_cmd | 6, hwif->dma_base + ATA_DMA_CMD);
211 /* and free any DMA resources */ 206 /* and free any DMA resources */
212 ide_destroy_dmatable(drive); 207 ide_destroy_dmatable(drive);
213 /* verify good DMA status */ 208 /* verify good DMA status */
@@ -276,7 +271,7 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
276 outb(8, hwif->io_ports.ctl_addr); 271 outb(8, hwif->io_ports.ctl_addr);
277 do { 272 do {
278 udelay(50); 273 udelay(50);
279 stat = hwif->INB(hwif->io_ports.status_addr); 274 stat = hwif->tp_ops->read_status(hwif);
280 if (stat == 0xff) 275 if (stat == 0xff)
281 break; 276 break;
282 } while ((stat & BUSY_STAT) && --timeout); 277 } while ((stat & BUSY_STAT) && --timeout);
@@ -291,7 +286,7 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
291 if (!hwif->dma_base) 286 if (!hwif->dma_base)
292 return; 287 return;
293 288
294 outb(0x60, hwif->dma_status); 289 outb(0x60, hwif->dma_base + ATA_DMA_STATUS);
295} 290}
296 291
297static const struct ide_port_ops ns87415_port_ops = { 292static const struct ide_port_ops ns87415_port_ops = {
@@ -311,9 +306,6 @@ static const struct ide_dma_ops ns87415_dma_ops = {
311 306
312static const struct ide_port_info ns87415_chipset __devinitdata = { 307static const struct ide_port_info ns87415_chipset __devinitdata = {
313 .name = "NS87415", 308 .name = "NS87415",
314#ifdef CONFIG_SUPERIO
315 .init_iops = init_iops_ns87415,
316#endif
317 .init_hwif = init_hwif_ns87415, 309 .init_hwif = init_hwif_ns87415,
318 .port_ops = &ns87415_port_ops, 310 .port_ops = &ns87415_port_ops,
319 .dma_ops = &ns87415_dma_ops, 311 .dma_ops = &ns87415_dma_ops,
@@ -323,7 +315,16 @@ static const struct ide_port_info ns87415_chipset __devinitdata = {
323 315
324static int __devinit ns87415_init_one(struct pci_dev *dev, const struct pci_device_id *id) 316static int __devinit ns87415_init_one(struct pci_dev *dev, const struct pci_device_id *id)
325{ 317{
326 return ide_setup_pci_device(dev, &ns87415_chipset); 318 struct ide_port_info d = ns87415_chipset;
319
320#ifdef CONFIG_SUPERIO
321 if (PCI_SLOT(dev->devfn) == 0xE) {
322 /* Built-in - assume it's under superio. */
323 d.init_iops = superio_init_iops;
324 d.tp_ops = &superio_tp_ops;
325 }
326#endif
327 return ide_setup_pci_device(dev, &d);
327} 328}
328 329
329static const struct pci_device_id ns87415_pci_tbl[] = { 330static const struct pci_device_id ns87415_pci_tbl[] = {
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c
index fca89eda5c02..e54dc653b8c4 100644
--- a/drivers/ide/pci/pdc202xx_old.c
+++ b/drivers/ide/pci/pdc202xx_old.c
@@ -206,7 +206,7 @@ static int pdc202xx_dma_test_irq(ide_drive_t *drive)
206{ 206{
207 ide_hwif_t *hwif = HWIF(drive); 207 ide_hwif_t *hwif = HWIF(drive);
208 unsigned long high_16 = hwif->extra_base - 16; 208 unsigned long high_16 = hwif->extra_base - 16;
209 u8 dma_stat = inb(hwif->dma_status); 209 u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
210 u8 sc1d = inb(high_16 + 0x001d); 210 u8 sc1d = inb(high_16 + 0x001d);
211 211
212 if (hwif->channel) { 212 if (hwif->channel) {
@@ -312,7 +312,6 @@ static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
312 312
313#define IDE_HFLAGS_PDC202XX \ 313#define IDE_HFLAGS_PDC202XX \
314 (IDE_HFLAG_ERROR_STOPS_FIFO | \ 314 (IDE_HFLAG_ERROR_STOPS_FIFO | \
315 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
316 IDE_HFLAG_OFF_BOARD) 315 IDE_HFLAG_OFF_BOARD)
317 316
318static const struct ide_port_ops pdc20246_port_ops = { 317static const struct ide_port_ops pdc20246_port_ops = {
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
index f04738d14a6f..0ce41b4dddaf 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/pci/piix.c
@@ -227,9 +227,9 @@ static void piix_dma_clear_irq(ide_drive_t *drive)
227 u8 dma_stat; 227 u8 dma_stat;
228 228
229 /* clear the INTR & ERROR bits */ 229 /* clear the INTR & ERROR bits */
230 dma_stat = inb(hwif->dma_status); 230 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
231 /* Should we force the bit as well ? */ 231 /* Should we force the bit as well ? */
232 outb(dma_stat, hwif->dma_status); 232 outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS);
233} 233}
234 234
235struct ich_laptop { 235struct ich_laptop {
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
index 789c66dfbde5..94a7ab864236 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/pci/scc_pata.c
@@ -65,7 +65,7 @@
65 65
66static struct scc_ports { 66static struct scc_ports {
67 unsigned long ctl, dma; 67 unsigned long ctl, dma;
68 ide_hwif_t *hwif; /* for removing port from system */ 68 struct ide_host *host; /* for removing port from system */
69} scc_ports[MAX_HWIFS]; 69} scc_ports[MAX_HWIFS];
70 70
71/* PIO transfer mode table */ 71/* PIO transfer mode table */
@@ -126,6 +126,46 @@ static u8 scc_ide_inb(unsigned long port)
126 return (u8)data; 126 return (u8)data;
127} 127}
128 128
129static void scc_exec_command(ide_hwif_t *hwif, u8 cmd)
130{
131 out_be32((void *)hwif->io_ports.command_addr, cmd);
132 eieio();
133 in_be32((void *)(hwif->dma_base + 0x01c));
134 eieio();
135}
136
137static u8 scc_read_status(ide_hwif_t *hwif)
138{
139 return (u8)in_be32((void *)hwif->io_ports.status_addr);
140}
141
142static u8 scc_read_altstatus(ide_hwif_t *hwif)
143{
144 return (u8)in_be32((void *)hwif->io_ports.ctl_addr);
145}
146
147static u8 scc_read_sff_dma_status(ide_hwif_t *hwif)
148{
149 return (u8)in_be32((void *)(hwif->dma_base + 4));
150}
151
152static void scc_set_irq(ide_hwif_t *hwif, int on)
153{
154 u8 ctl = ATA_DEVCTL_OBS;
155
156 if (on == 4) { /* hack for SRST */
157 ctl |= 4;
158 on &= ~4;
159 }
160
161 ctl |= on ? 0 : 2;
162
163 out_be32((void *)hwif->io_ports.ctl_addr, ctl);
164 eieio();
165 in_be32((void *)(hwif->dma_base + 0x01c));
166 eieio();
167}
168
129static void scc_ide_insw(unsigned long port, void *addr, u32 count) 169static void scc_ide_insw(unsigned long port, void *addr, u32 count)
130{ 170{
131 u16 *ptr = (u16 *)addr; 171 u16 *ptr = (u16 *)addr;
@@ -148,14 +188,6 @@ static void scc_ide_outb(u8 addr, unsigned long port)
148 out_be32((void*)port, addr); 188 out_be32((void*)port, addr);
149} 189}
150 190
151static void scc_ide_outbsync(ide_hwif_t *hwif, u8 addr, unsigned long port)
152{
153 out_be32((void*)port, addr);
154 eieio();
155 in_be32((void*)(hwif->dma_base + 0x01c));
156 eieio();
157}
158
159static void 191static void
160scc_ide_outsw(unsigned long port, void *addr, u32 count) 192scc_ide_outsw(unsigned long port, void *addr, u32 count)
161{ 193{
@@ -261,14 +293,14 @@ static void scc_dma_host_set(ide_drive_t *drive, int on)
261{ 293{
262 ide_hwif_t *hwif = drive->hwif; 294 ide_hwif_t *hwif = drive->hwif;
263 u8 unit = (drive->select.b.unit & 0x01); 295 u8 unit = (drive->select.b.unit & 0x01);
264 u8 dma_stat = scc_ide_inb(hwif->dma_status); 296 u8 dma_stat = scc_ide_inb(hwif->dma_base + 4);
265 297
266 if (on) 298 if (on)
267 dma_stat |= (1 << (5 + unit)); 299 dma_stat |= (1 << (5 + unit));
268 else 300 else
269 dma_stat &= ~(1 << (5 + unit)); 301 dma_stat &= ~(1 << (5 + unit));
270 302
271 scc_ide_outb(dma_stat, hwif->dma_status); 303 scc_ide_outb(dma_stat, hwif->dma_base + 4);
272} 304}
273 305
274/** 306/**
@@ -304,13 +336,13 @@ static int scc_dma_setup(ide_drive_t *drive)
304 out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma); 336 out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma);
305 337
306 /* specify r/w */ 338 /* specify r/w */
307 out_be32((void __iomem *)hwif->dma_command, reading); 339 out_be32((void __iomem *)hwif->dma_base, reading);
308 340
309 /* read dma_status for INTR & ERROR flags */ 341 /* read DMA status for INTR & ERROR flags */
310 dma_stat = in_be32((void __iomem *)hwif->dma_status); 342 dma_stat = in_be32((void __iomem *)(hwif->dma_base + 4));
311 343
312 /* clear INTR & ERROR flags */ 344 /* clear INTR & ERROR flags */
313 out_be32((void __iomem *)hwif->dma_status, dma_stat|6); 345 out_be32((void __iomem *)(hwif->dma_base + 4), dma_stat | 6);
314 drive->waiting_for_dma = 1; 346 drive->waiting_for_dma = 1;
315 return 0; 347 return 0;
316} 348}
@@ -318,10 +350,10 @@ static int scc_dma_setup(ide_drive_t *drive)
318static void scc_dma_start(ide_drive_t *drive) 350static void scc_dma_start(ide_drive_t *drive)
319{ 351{
320 ide_hwif_t *hwif = drive->hwif; 352 ide_hwif_t *hwif = drive->hwif;
321 u8 dma_cmd = scc_ide_inb(hwif->dma_command); 353 u8 dma_cmd = scc_ide_inb(hwif->dma_base);
322 354
323 /* start DMA */ 355 /* start DMA */
324 scc_ide_outb(dma_cmd | 1, hwif->dma_command); 356 scc_ide_outb(dma_cmd | 1, hwif->dma_base);
325 hwif->dma = 1; 357 hwif->dma = 1;
326 wmb(); 358 wmb();
327} 359}
@@ -333,13 +365,13 @@ static int __scc_dma_end(ide_drive_t *drive)
333 365
334 drive->waiting_for_dma = 0; 366 drive->waiting_for_dma = 0;
335 /* get DMA command mode */ 367 /* get DMA command mode */
336 dma_cmd = scc_ide_inb(hwif->dma_command); 368 dma_cmd = scc_ide_inb(hwif->dma_base);
337 /* stop DMA */ 369 /* stop DMA */
338 scc_ide_outb(dma_cmd & ~1, hwif->dma_command); 370 scc_ide_outb(dma_cmd & ~1, hwif->dma_base);
339 /* get DMA status */ 371 /* get DMA status */
340 dma_stat = scc_ide_inb(hwif->dma_status); 372 dma_stat = scc_ide_inb(hwif->dma_base + 4);
341 /* clear the INTR & ERROR bits */ 373 /* clear the INTR & ERROR bits */
342 scc_ide_outb(dma_stat | 6, hwif->dma_status); 374 scc_ide_outb(dma_stat | 6, hwif->dma_base + 4);
343 /* purge DMA mappings */ 375 /* purge DMA mappings */
344 ide_destroy_dmatable(drive); 376 ide_destroy_dmatable(drive);
345 /* verify good DMA status */ 377 /* verify good DMA status */
@@ -359,6 +391,7 @@ static int __scc_dma_end(ide_drive_t *drive)
359static int scc_dma_end(ide_drive_t *drive) 391static int scc_dma_end(ide_drive_t *drive)
360{ 392{
361 ide_hwif_t *hwif = HWIF(drive); 393 ide_hwif_t *hwif = HWIF(drive);
394 void __iomem *dma_base = (void __iomem *)hwif->dma_base;
362 unsigned long intsts_port = hwif->dma_base + 0x014; 395 unsigned long intsts_port = hwif->dma_base + 0x014;
363 u32 reg; 396 u32 reg;
364 int dma_stat, data_loss = 0; 397 int dma_stat, data_loss = 0;
@@ -397,7 +430,7 @@ static int scc_dma_end(ide_drive_t *drive)
397 printk(KERN_WARNING "%s: SERROR\n", SCC_PATA_NAME); 430 printk(KERN_WARNING "%s: SERROR\n", SCC_PATA_NAME);
398 out_be32((void __iomem *)intsts_port, INTSTS_SERROR|INTSTS_BMSINT); 431 out_be32((void __iomem *)intsts_port, INTSTS_SERROR|INTSTS_BMSINT);
399 432
400 out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS); 433 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
401 continue; 434 continue;
402 } 435 }
403 436
@@ -412,7 +445,7 @@ static int scc_dma_end(ide_drive_t *drive)
412 445
413 out_be32((void __iomem *)intsts_port, INTSTS_PRERR|INTSTS_BMSINT); 446 out_be32((void __iomem *)intsts_port, INTSTS_PRERR|INTSTS_BMSINT);
414 447
415 out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS); 448 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
416 continue; 449 continue;
417 } 450 }
418 451
@@ -420,12 +453,12 @@ static int scc_dma_end(ide_drive_t *drive)
420 printk(KERN_WARNING "%s: Response Error\n", SCC_PATA_NAME); 453 printk(KERN_WARNING "%s: Response Error\n", SCC_PATA_NAME);
421 out_be32((void __iomem *)intsts_port, INTSTS_RERR|INTSTS_BMSINT); 454 out_be32((void __iomem *)intsts_port, INTSTS_RERR|INTSTS_BMSINT);
422 455
423 out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS); 456 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
424 continue; 457 continue;
425 } 458 }
426 459
427 if (reg & INTSTS_ICERR) { 460 if (reg & INTSTS_ICERR) {
428 out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS); 461 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
429 462
430 printk(KERN_WARNING "%s: Illegal Configuration\n", SCC_PATA_NAME); 463 printk(KERN_WARNING "%s: Illegal Configuration\n", SCC_PATA_NAME);
431 out_be32((void __iomem *)intsts_port, INTSTS_ICERR|INTSTS_BMSINT); 464 out_be32((void __iomem *)intsts_port, INTSTS_ICERR|INTSTS_BMSINT);
@@ -553,14 +586,9 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev,
553 const struct ide_port_info *d) 586 const struct ide_port_info *d)
554{ 587{
555 struct scc_ports *ports = pci_get_drvdata(dev); 588 struct scc_ports *ports = pci_get_drvdata(dev);
556 ide_hwif_t *hwif = NULL; 589 struct ide_host *host;
557 hw_regs_t hw; 590 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
558 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 591 int i, rc;
559 int i;
560
561 hwif = ide_find_port_slot(d);
562 if (hwif == NULL)
563 return -ENOMEM;
564 592
565 memset(&hw, 0, sizeof(hw)); 593 memset(&hw, 0, sizeof(hw));
566 for (i = 0; i <= 8; i++) 594 for (i = 0; i <= 8; i++)
@@ -568,11 +596,12 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev,
568 hw.irq = dev->irq; 596 hw.irq = dev->irq;
569 hw.dev = &dev->dev; 597 hw.dev = &dev->dev;
570 hw.chipset = ide_pci; 598 hw.chipset = ide_pci;
571 ide_init_port_hw(hwif, &hw);
572 599
573 idx[0] = hwif->index; 600 rc = ide_host_add(d, hws, &host);
601 if (rc)
602 return rc;
574 603
575 ide_device_add(idx, d); 604 ports->host = host;
576 605
577 return 0; 606 return 0;
578} 607}
@@ -701,6 +730,8 @@ static void scc_tf_read(ide_drive_t *drive, ide_task_t *task)
701 /* be sure we're looking at the low order bits */ 730 /* be sure we're looking at the low order bits */
702 scc_ide_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr); 731 scc_ide_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
703 732
733 if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
734 tf->feature = scc_ide_inb(io_ports->feature_addr);
704 if (task->tf_flags & IDE_TFLAG_IN_NSECT) 735 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
705 tf->nsect = scc_ide_inb(io_ports->nsect_addr); 736 tf->nsect = scc_ide_inb(io_ports->nsect_addr);
706 if (task->tf_flags & IDE_TFLAG_IN_LBAL) 737 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
@@ -774,16 +805,6 @@ static void __devinit init_mmio_iops_scc(ide_hwif_t *hwif)
774 805
775 ide_set_hwifdata(hwif, ports); 806 ide_set_hwifdata(hwif, ports);
776 807
777 hwif->tf_load = scc_tf_load;
778 hwif->tf_read = scc_tf_read;
779
780 hwif->input_data = scc_input_data;
781 hwif->output_data = scc_output_data;
782
783 hwif->INB = scc_ide_inb;
784 hwif->OUTB = scc_ide_outb;
785 hwif->OUTBSYNC = scc_ide_outbsync;
786
787 hwif->dma_base = dma_base; 808 hwif->dma_base = dma_base;
788 hwif->config_data = ports->ctl; 809 hwif->config_data = ports->ctl;
789} 810}
@@ -824,11 +845,6 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif)
824{ 845{
825 struct scc_ports *ports = ide_get_hwifdata(hwif); 846 struct scc_ports *ports = ide_get_hwifdata(hwif);
826 847
827 ports->hwif = hwif;
828
829 hwif->dma_command = hwif->dma_base;
830 hwif->dma_status = hwif->dma_base + 0x04;
831
832 /* PTERADD */ 848 /* PTERADD */
833 out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma); 849 out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
834 850
@@ -838,6 +854,21 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif)
838 hwif->ultra_mask = ATA_UDMA5; /* 100MHz */ 854 hwif->ultra_mask = ATA_UDMA5; /* 100MHz */
839} 855}
840 856
857static const struct ide_tp_ops scc_tp_ops = {
858 .exec_command = scc_exec_command,
859 .read_status = scc_read_status,
860 .read_altstatus = scc_read_altstatus,
861 .read_sff_dma_status = scc_read_sff_dma_status,
862
863 .set_irq = scc_set_irq,
864
865 .tf_load = scc_tf_load,
866 .tf_read = scc_tf_read,
867
868 .input_data = scc_input_data,
869 .output_data = scc_output_data,
870};
871
841static const struct ide_port_ops scc_port_ops = { 872static const struct ide_port_ops scc_port_ops = {
842 .set_pio_mode = scc_set_pio_mode, 873 .set_pio_mode = scc_set_pio_mode,
843 .set_dma_mode = scc_set_dma_mode, 874 .set_dma_mode = scc_set_dma_mode,
@@ -861,6 +892,7 @@ static const struct ide_dma_ops scc_dma_ops = {
861 .name = name_str, \ 892 .name = name_str, \
862 .init_iops = init_iops_scc, \ 893 .init_iops = init_iops_scc, \
863 .init_hwif = init_hwif_scc, \ 894 .init_hwif = init_hwif_scc, \
895 .tp_ops = &scc_tp_ops, \
864 .port_ops = &scc_port_ops, \ 896 .port_ops = &scc_port_ops, \
865 .dma_ops = &scc_dma_ops, \ 897 .dma_ops = &scc_dma_ops, \
866 .host_flags = IDE_HFLAG_SINGLE, \ 898 .host_flags = IDE_HFLAG_SINGLE, \
@@ -895,7 +927,8 @@ static int __devinit scc_init_one(struct pci_dev *dev, const struct pci_device_i
895static void __devexit scc_remove(struct pci_dev *dev) 927static void __devexit scc_remove(struct pci_dev *dev)
896{ 928{
897 struct scc_ports *ports = pci_get_drvdata(dev); 929 struct scc_ports *ports = pci_get_drvdata(dev);
898 ide_hwif_t *hwif = ports->hwif; 930 struct ide_host *host = ports->host;
931 ide_hwif_t *hwif = host->ports[0];
899 932
900 if (hwif->dmatable_cpu) { 933 if (hwif->dmatable_cpu) {
901 pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES, 934 pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES,
@@ -903,7 +936,7 @@ static void __devexit scc_remove(struct pci_dev *dev)
903 hwif->dmatable_cpu = NULL; 936 hwif->dmatable_cpu = NULL;
904 } 937 }
905 938
906 ide_unregister(hwif); 939 ide_host_remove(host);
907 940
908 iounmap((void*)ports->dma); 941 iounmap((void*)ports->dma);
909 iounmap((void*)ports->ctl); 942 iounmap((void*)ports->ctl);
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
index a1fb20826a5b..127ccb45e261 100644
--- a/drivers/ide/pci/serverworks.c
+++ b/drivers/ide/pci/serverworks.c
@@ -349,9 +349,7 @@ static const struct ide_port_ops svwks_port_ops = {
349 .cable_detect = svwks_cable_detect, 349 .cable_detect = svwks_cable_detect,
350}; 350};
351 351
352#define IDE_HFLAGS_SVWKS \ 352#define IDE_HFLAGS_SVWKS IDE_HFLAG_LEGACY_IRQS
353 (IDE_HFLAG_LEGACY_IRQS | \
354 IDE_HFLAG_ABUSE_SET_DMA_MODE)
355 353
356static const struct ide_port_info serverworks_chipsets[] __devinitdata = { 354static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
357 { /* 0 */ 355 { /* 0 */
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index c79ff5b41088..42eef19a18f1 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -127,7 +127,7 @@ sgiioc4_checkirq(ide_hwif_t * hwif)
127 return 0; 127 return 0;
128} 128}
129 129
130static u8 sgiioc4_INB(unsigned long); 130static u8 sgiioc4_read_status(ide_hwif_t *);
131 131
132static int 132static int
133sgiioc4_clearirq(ide_drive_t * drive) 133sgiioc4_clearirq(ide_drive_t * drive)
@@ -141,18 +141,19 @@ sgiioc4_clearirq(ide_drive_t * drive)
141 intr_reg = readl((void __iomem *)other_ir); 141 intr_reg = readl((void __iomem *)other_ir);
142 if (intr_reg & 0x03) { /* Valid IOC4-IDE interrupt */ 142 if (intr_reg & 0x03) { /* Valid IOC4-IDE interrupt */
143 /* 143 /*
144 * Using sgiioc4_INB to read the Status register has a side 144 * Using sgiioc4_read_status to read the Status register has a
145 * effect of clearing the interrupt. The first read should 145 * side effect of clearing the interrupt. The first read should
146 * clear it if it is set. The second read should return 146 * clear it if it is set. The second read should return
147 * a "clear" status if it got cleared. If not, then spin 147 * a "clear" status if it got cleared. If not, then spin
148 * for a bit trying to clear it. 148 * for a bit trying to clear it.
149 */ 149 */
150 u8 stat = sgiioc4_INB(io_ports->status_addr); 150 u8 stat = sgiioc4_read_status(hwif);
151 int count = 0; 151 int count = 0;
152 stat = sgiioc4_INB(io_ports->status_addr); 152
153 stat = sgiioc4_read_status(hwif);
153 while ((stat & 0x80) && (count++ < 100)) { 154 while ((stat & 0x80) && (count++ < 100)) {
154 udelay(1); 155 udelay(1);
155 stat = sgiioc4_INB(io_ports->status_addr); 156 stat = sgiioc4_read_status(hwif);
156 } 157 }
157 158
158 if (intr_reg & 0x02) { 159 if (intr_reg & 0x02) {
@@ -304,9 +305,9 @@ sgiioc4_dma_lost_irq(ide_drive_t * drive)
304 ide_dma_lost_irq(drive); 305 ide_dma_lost_irq(drive);
305} 306}
306 307
307static u8 308static u8 sgiioc4_read_status(ide_hwif_t *hwif)
308sgiioc4_INB(unsigned long port)
309{ 309{
310 unsigned long port = hwif->io_ports.status_addr;
310 u8 reg = (u8) readb((void __iomem *) port); 311 u8 reg = (u8) readb((void __iomem *) port);
311 312
312 if ((port & 0xFFF) == 0x11C) { /* Status register of IOC4 */ 313 if ((port & 0xFFF) == 0x11C) { /* Status register of IOC4 */
@@ -549,6 +550,21 @@ static int sgiioc4_dma_setup(ide_drive_t *drive)
549 return 0; 550 return 0;
550} 551}
551 552
553static const struct ide_tp_ops sgiioc4_tp_ops = {
554 .exec_command = ide_exec_command,
555 .read_status = sgiioc4_read_status,
556 .read_altstatus = ide_read_altstatus,
557 .read_sff_dma_status = ide_read_sff_dma_status,
558
559 .set_irq = ide_set_irq,
560
561 .tf_load = ide_tf_load,
562 .tf_read = ide_tf_read,
563
564 .input_data = ide_input_data,
565 .output_data = ide_output_data,
566};
567
552static const struct ide_port_ops sgiioc4_port_ops = { 568static const struct ide_port_ops sgiioc4_port_ops = {
553 .set_dma_mode = sgiioc4_set_dma_mode, 569 .set_dma_mode = sgiioc4_set_dma_mode,
554 /* reset DMA engine, clear IRQs */ 570 /* reset DMA engine, clear IRQs */
@@ -571,6 +587,7 @@ static const struct ide_port_info sgiioc4_port_info __devinitdata = {
571 .name = DRV_NAME, 587 .name = DRV_NAME,
572 .chipset = ide_pci, 588 .chipset = ide_pci,
573 .init_dma = ide_dma_sgiioc4, 589 .init_dma = ide_dma_sgiioc4,
590 .tp_ops = &sgiioc4_tp_ops,
574 .port_ops = &sgiioc4_port_ops, 591 .port_ops = &sgiioc4_port_ops,
575 .dma_ops = &sgiioc4_dma_ops, 592 .dma_ops = &sgiioc4_dma_ops,
576 .host_flags = IDE_HFLAG_MMIO, 593 .host_flags = IDE_HFLAG_MMIO,
@@ -583,10 +600,10 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
583 unsigned long cmd_base, irqport; 600 unsigned long cmd_base, irqport;
584 unsigned long bar0, cmd_phys_base, ctl; 601 unsigned long bar0, cmd_phys_base, ctl;
585 void __iomem *virt_base; 602 void __iomem *virt_base;
586 ide_hwif_t *hwif; 603 struct ide_host *host;
587 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 604 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
588 hw_regs_t hw;
589 struct ide_port_info d = sgiioc4_port_info; 605 struct ide_port_info d = sgiioc4_port_info;
606 int rc;
590 607
591 /* Get the CmdBlk and CtrlBlk Base Registers */ 608 /* Get the CmdBlk and CtrlBlk Base Registers */
592 bar0 = pci_resource_start(dev, 0); 609 bar0 = pci_resource_start(dev, 0);
@@ -618,30 +635,26 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
618 hw.chipset = ide_pci; 635 hw.chipset = ide_pci;
619 hw.dev = &dev->dev; 636 hw.dev = &dev->dev;
620 637
621 hwif = ide_find_port_slot(&d);
622 if (hwif == NULL)
623 goto err;
624
625 ide_init_port_hw(hwif, &hw);
626
627 /* The IOC4 uses MMIO rather than Port IO. */
628 default_hwif_mmiops(hwif);
629
630 /* Initializing chipset IRQ Registers */ 638 /* Initializing chipset IRQ Registers */
631 writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4)); 639 writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4));
632 640
633 hwif->INB = &sgiioc4_INB; 641 host = ide_host_alloc(&d, hws);
634 642 if (host == NULL) {
635 idx[0] = hwif->index; 643 rc = -ENOMEM;
644 goto err;
645 }
636 646
637 if (ide_device_add(idx, &d)) 647 rc = ide_host_register(host, &d, hws);
638 return -EIO; 648 if (rc)
649 goto err_free;
639 650
640 return 0; 651 return 0;
652err_free:
653 ide_host_free(host);
641err: 654err:
642 release_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE); 655 release_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE);
643 iounmap(virt_base); 656 iounmap(virt_base);
644 return -ENOMEM; 657 return rc;
645} 658}
646 659
647static unsigned int __devinit 660static unsigned int __devinit
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index 6e9d7655d89c..5965a35d94ae 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -334,7 +334,7 @@ static int siimage_io_dma_test_irq(ide_drive_t *drive)
334 unsigned long addr = siimage_selreg(hwif, 1); 334 unsigned long addr = siimage_selreg(hwif, 1);
335 335
336 /* return 1 if INTR asserted */ 336 /* return 1 if INTR asserted */
337 if (hwif->INB(hwif->dma_status) & 4) 337 if (inb(hwif->dma_base + ATA_DMA_STATUS) & 4)
338 return 1; 338 return 1;
339 339
340 /* return 1 if Device INTR asserted */ 340 /* return 1 if Device INTR asserted */
@@ -382,7 +382,7 @@ static int siimage_mmio_dma_test_irq(ide_drive_t *drive)
382 } 382 }
383 383
384 /* return 1 if INTR asserted */ 384 /* return 1 if INTR asserted */
385 if (readb((void __iomem *)hwif->dma_status) & 0x04) 385 if (readb((void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)) & 4)
386 return 1; 386 return 1;
387 387
388 /* return 1 if Device INTR asserted */ 388 /* return 1 if Device INTR asserted */
@@ -601,7 +601,7 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
601 * Fill in the basic hwif bits 601 * Fill in the basic hwif bits
602 */ 602 */
603 hwif->host_flags |= IDE_HFLAG_MMIO; 603 hwif->host_flags |= IDE_HFLAG_MMIO;
604 default_hwif_mmiops(hwif); 604
605 hwif->hwif_data = addr; 605 hwif->hwif_data = addr;
606 606
607 /* 607 /*
diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/pci/sl82c105.c
index 6efbde297174..f82a6502c1b7 100644
--- a/drivers/ide/pci/sl82c105.c
+++ b/drivers/ide/pci/sl82c105.c
@@ -157,9 +157,9 @@ static void sl82c105_dma_lost_irq(ide_drive_t *drive)
157 * Was DMA enabled? If so, disable it - we're resetting the 157 * Was DMA enabled? If so, disable it - we're resetting the
158 * host. The IDE layer will be handling the drive for us. 158 * host. The IDE layer will be handling the drive for us.
159 */ 159 */
160 dma_cmd = inb(hwif->dma_command); 160 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
161 if (dma_cmd & 1) { 161 if (dma_cmd & 1) {
162 outb(dma_cmd & ~1, hwif->dma_command); 162 outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
163 printk("sl82c105: DMA was enabled\n"); 163 printk("sl82c105: DMA was enabled\n");
164 } 164 }
165 165
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/pci/tc86c001.c
index 9b4b27a4c711..477e19790102 100644
--- a/drivers/ide/pci/tc86c001.c
+++ b/drivers/ide/pci/tc86c001.c
@@ -63,7 +63,7 @@ static int tc86c001_timer_expiry(ide_drive_t *drive)
63 ide_hwif_t *hwif = HWIF(drive); 63 ide_hwif_t *hwif = HWIF(drive);
64 ide_expiry_t *expiry = ide_get_hwifdata(hwif); 64 ide_expiry_t *expiry = ide_get_hwifdata(hwif);
65 ide_hwgroup_t *hwgroup = HWGROUP(drive); 65 ide_hwgroup_t *hwgroup = HWGROUP(drive);
66 u8 dma_stat = inb(hwif->dma_status); 66 u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
67 67
68 /* Restore a higher level driver's expiry handler first. */ 68 /* Restore a higher level driver's expiry handler first. */
69 hwgroup->expiry = expiry; 69 hwgroup->expiry = expiry;
@@ -71,21 +71,24 @@ static int tc86c001_timer_expiry(ide_drive_t *drive)
71 if ((dma_stat & 5) == 1) { /* DMA active and no interrupt */ 71 if ((dma_stat & 5) == 1) { /* DMA active and no interrupt */
72 unsigned long sc_base = hwif->config_data; 72 unsigned long sc_base = hwif->config_data;
73 unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04); 73 unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04);
74 u8 dma_cmd = inb(hwif->dma_command); 74 u8 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
75 75
76 printk(KERN_WARNING "%s: DMA interrupt possibly stuck, " 76 printk(KERN_WARNING "%s: DMA interrupt possibly stuck, "
77 "attempting recovery...\n", drive->name); 77 "attempting recovery...\n", drive->name);
78 78
79 /* Stop DMA */ 79 /* Stop DMA */
80 outb(dma_cmd & ~0x01, hwif->dma_command); 80 outb(dma_cmd & ~0x01, hwif->dma_base + ATA_DMA_CMD);
81 81
82 /* Setup the dummy DMA transfer */ 82 /* Setup the dummy DMA transfer */
83 outw(0, sc_base + 0x0a); /* Sector Count */ 83 outw(0, sc_base + 0x0a); /* Sector Count */
84 outw(0, twcr_port); /* Transfer Word Count 1 or 2 */ 84 outw(0, twcr_port); /* Transfer Word Count 1 or 2 */
85 85
86 /* Start the dummy DMA transfer */ 86 /* Start the dummy DMA transfer */
87 outb(0x00, hwif->dma_command); /* clear R_OR_WCTR for write */ 87
88 outb(0x01, hwif->dma_command); /* set START_STOPBM */ 88 /* clear R_OR_WCTR for write */
89 outb(0x00, hwif->dma_base + ATA_DMA_CMD);
90 /* set START_STOPBM */
91 outb(0x01, hwif->dma_base + ATA_DMA_CMD);
89 92
90 /* 93 /*
91 * If an interrupt was pending, it should come thru shortly. 94 * If an interrupt was pending, it should come thru shortly.
@@ -203,8 +206,7 @@ static const struct ide_port_info tc86c001_chipset __devinitdata = {
203 .init_hwif = init_hwif_tc86c001, 206 .init_hwif = init_hwif_tc86c001,
204 .port_ops = &tc86c001_port_ops, 207 .port_ops = &tc86c001_port_ops,
205 .dma_ops = &tc86c001_dma_ops, 208 .dma_ops = &tc86c001_dma_ops,
206 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD | 209 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD,
207 IDE_HFLAG_ABUSE_SET_DMA_MODE,
208 .pio_mask = ATA_PIO4, 210 .pio_mask = ATA_PIO4,
209 .mwdma_mask = ATA_MWDMA2, 211 .mwdma_mask = ATA_MWDMA2,
210 .udma_mask = ATA_UDMA4, 212 .udma_mask = ATA_UDMA4,
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index e47384c70c40..09dc4803ef9d 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -425,7 +425,6 @@ static const struct ide_port_info via82cxxx_chipset __devinitdata = {
425 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } }, 425 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
426 .port_ops = &via_port_ops, 426 .port_ops = &via_port_ops,
427 .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST | 427 .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST |
428 IDE_HFLAG_ABUSE_SET_DMA_MODE |
429 IDE_HFLAG_POST_SET_MODE | 428 IDE_HFLAG_POST_SET_MODE |
430 IDE_HFLAG_IO_32BIT, 429 IDE_HFLAG_IO_32BIT,
431 .pio_mask = ATA_PIO5, 430 .pio_mask = ATA_PIO5,
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index 93fb9067c043..c521bf6e1bf2 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -48,6 +48,8 @@
48#include <asm/mediabay.h> 48#include <asm/mediabay.h>
49#endif 49#endif
50 50
51#define DRV_NAME "ide-pmac"
52
51#undef IDE_PMAC_DEBUG 53#undef IDE_PMAC_DEBUG
52 54
53#define DMA_WAIT_TIMEOUT 50 55#define DMA_WAIT_TIMEOUT 50
@@ -424,7 +426,9 @@ static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
424static void 426static void
425pmac_ide_selectproc(ide_drive_t *drive) 427pmac_ide_selectproc(ide_drive_t *drive)
426{ 428{
427 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 429 ide_hwif_t *hwif = drive->hwif;
430 pmac_ide_hwif_t *pmif =
431 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
428 432
429 if (pmif == NULL) 433 if (pmif == NULL)
430 return; 434 return;
@@ -444,7 +448,9 @@ pmac_ide_selectproc(ide_drive_t *drive)
444static void 448static void
445pmac_ide_kauai_selectproc(ide_drive_t *drive) 449pmac_ide_kauai_selectproc(ide_drive_t *drive)
446{ 450{
447 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 451 ide_hwif_t *hwif = drive->hwif;
452 pmac_ide_hwif_t *pmif =
453 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
448 454
449 if (pmif == NULL) 455 if (pmif == NULL)
450 return; 456 return;
@@ -465,7 +471,9 @@ pmac_ide_kauai_selectproc(ide_drive_t *drive)
465static void 471static void
466pmac_ide_do_update_timings(ide_drive_t *drive) 472pmac_ide_do_update_timings(ide_drive_t *drive)
467{ 473{
468 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 474 ide_hwif_t *hwif = drive->hwif;
475 pmac_ide_hwif_t *pmif =
476 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
469 477
470 if (pmif == NULL) 478 if (pmif == NULL)
471 return; 479 return;
@@ -478,12 +486,26 @@ pmac_ide_do_update_timings(ide_drive_t *drive)
478 pmac_ide_selectproc(drive); 486 pmac_ide_selectproc(drive);
479} 487}
480 488
481static void pmac_outbsync(ide_hwif_t *hwif, u8 value, unsigned long port) 489static void pmac_exec_command(ide_hwif_t *hwif, u8 cmd)
482{ 490{
483 u32 tmp; 491 writeb(cmd, (void __iomem *)hwif->io_ports.command_addr);
484 492 (void)readl((void __iomem *)(hwif->io_ports.data_addr
485 writeb(value, (void __iomem *) port); 493 + IDE_TIMING_CONFIG));
486 tmp = readl((void __iomem *)(hwif->io_ports.data_addr 494}
495
496static void pmac_set_irq(ide_hwif_t *hwif, int on)
497{
498 u8 ctl = ATA_DEVCTL_OBS;
499
500 if (on == 4) { /* hack for SRST */
501 ctl |= 4;
502 on &= ~4;
503 }
504
505 ctl |= on ? 0 : 2;
506
507 writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr);
508 (void)readl((void __iomem *)(hwif->io_ports.data_addr
487 + IDE_TIMING_CONFIG)); 509 + IDE_TIMING_CONFIG));
488} 510}
489 511
@@ -493,11 +515,13 @@ static void pmac_outbsync(ide_hwif_t *hwif, u8 value, unsigned long port)
493static void 515static void
494pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio) 516pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
495{ 517{
518 ide_hwif_t *hwif = drive->hwif;
519 pmac_ide_hwif_t *pmif =
520 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
496 struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio); 521 struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio);
497 u32 *timings, t; 522 u32 *timings, t;
498 unsigned accessTicks, recTicks; 523 unsigned accessTicks, recTicks;
499 unsigned accessTime, recTime; 524 unsigned accessTime, recTime;
500 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
501 unsigned int cycle_time; 525 unsigned int cycle_time;
502 526
503 if (pmif == NULL) 527 if (pmif == NULL)
@@ -778,9 +802,11 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
778 802
779static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed) 803static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
780{ 804{
805 ide_hwif_t *hwif = drive->hwif;
806 pmac_ide_hwif_t *pmif =
807 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
781 int unit = (drive->select.b.unit & 0x01); 808 int unit = (drive->select.b.unit & 0x01);
782 int ret = 0; 809 int ret = 0;
783 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
784 u32 *timings, *timings2, tl[2]; 810 u32 *timings, *timings2, tl[2];
785 811
786 timings = &pmif->timings[unit]; 812 timings = &pmif->timings[unit];
@@ -852,11 +878,8 @@ sanitize_timings(pmac_ide_hwif_t *pmif)
852/* Suspend call back, should be called after the child devices 878/* Suspend call back, should be called after the child devices
853 * have actually been suspended 879 * have actually been suspended
854 */ 880 */
855static int 881static int pmac_ide_do_suspend(pmac_ide_hwif_t *pmif)
856pmac_ide_do_suspend(ide_hwif_t *hwif)
857{ 882{
858 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
859
860 /* We clear the timings */ 883 /* We clear the timings */
861 pmif->timings[0] = 0; 884 pmif->timings[0] = 0;
862 pmif->timings[1] = 0; 885 pmif->timings[1] = 0;
@@ -884,11 +907,8 @@ pmac_ide_do_suspend(ide_hwif_t *hwif)
884/* Resume call back, should be called before the child devices 907/* Resume call back, should be called before the child devices
885 * are resumed 908 * are resumed
886 */ 909 */
887static int 910static int pmac_ide_do_resume(pmac_ide_hwif_t *pmif)
888pmac_ide_do_resume(ide_hwif_t *hwif)
889{ 911{
890 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
891
892 /* Hard reset & re-enable controller (do we really need to reset ? -BenH) */ 912 /* Hard reset & re-enable controller (do we really need to reset ? -BenH) */
893 if (!pmif->mediabay) { 913 if (!pmif->mediabay) {
894 ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1); 914 ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1);
@@ -916,7 +936,8 @@ pmac_ide_do_resume(ide_hwif_t *hwif)
916 936
917static u8 pmac_ide_cable_detect(ide_hwif_t *hwif) 937static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
918{ 938{
919 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)ide_get_hwifdata(hwif); 939 pmac_ide_hwif_t *pmif =
940 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
920 struct device_node *np = pmif->node; 941 struct device_node *np = pmif->node;
921 const char *cable = of_get_property(np, "cable-type", NULL); 942 const char *cable = of_get_property(np, "cable-type", NULL);
922 943
@@ -936,7 +957,40 @@ static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
936 return ATA_CBL_PATA40; 957 return ATA_CBL_PATA40;
937} 958}
938 959
960static void pmac_ide_init_dev(ide_drive_t *drive)
961{
962 ide_hwif_t *hwif = drive->hwif;
963 pmac_ide_hwif_t *pmif =
964 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
965
966 if (pmif->mediabay) {
967#ifdef CONFIG_PMAC_MEDIABAY
968 if (check_media_bay_by_base(pmif->regbase, MB_CD) == 0) {
969 drive->noprobe = 0;
970 return;
971 }
972#endif
973 drive->noprobe = 1;
974 }
975}
976
977static const struct ide_tp_ops pmac_tp_ops = {
978 .exec_command = pmac_exec_command,
979 .read_status = ide_read_status,
980 .read_altstatus = ide_read_altstatus,
981 .read_sff_dma_status = ide_read_sff_dma_status,
982
983 .set_irq = pmac_set_irq,
984
985 .tf_load = ide_tf_load,
986 .tf_read = ide_tf_read,
987
988 .input_data = ide_input_data,
989 .output_data = ide_output_data,
990};
991
939static const struct ide_port_ops pmac_ide_ata6_port_ops = { 992static const struct ide_port_ops pmac_ide_ata6_port_ops = {
993 .init_dev = pmac_ide_init_dev,
940 .set_pio_mode = pmac_ide_set_pio_mode, 994 .set_pio_mode = pmac_ide_set_pio_mode,
941 .set_dma_mode = pmac_ide_set_dma_mode, 995 .set_dma_mode = pmac_ide_set_dma_mode,
942 .selectproc = pmac_ide_kauai_selectproc, 996 .selectproc = pmac_ide_kauai_selectproc,
@@ -944,6 +998,7 @@ static const struct ide_port_ops pmac_ide_ata6_port_ops = {
944}; 998};
945 999
946static const struct ide_port_ops pmac_ide_ata4_port_ops = { 1000static const struct ide_port_ops pmac_ide_ata4_port_ops = {
1001 .init_dev = pmac_ide_init_dev,
947 .set_pio_mode = pmac_ide_set_pio_mode, 1002 .set_pio_mode = pmac_ide_set_pio_mode,
948 .set_dma_mode = pmac_ide_set_dma_mode, 1003 .set_dma_mode = pmac_ide_set_dma_mode,
949 .selectproc = pmac_ide_selectproc, 1004 .selectproc = pmac_ide_selectproc,
@@ -951,6 +1006,7 @@ static const struct ide_port_ops pmac_ide_ata4_port_ops = {
951}; 1006};
952 1007
953static const struct ide_port_ops pmac_ide_port_ops = { 1008static const struct ide_port_ops pmac_ide_port_ops = {
1009 .init_dev = pmac_ide_init_dev,
954 .set_pio_mode = pmac_ide_set_pio_mode, 1010 .set_pio_mode = pmac_ide_set_pio_mode,
955 .set_dma_mode = pmac_ide_set_dma_mode, 1011 .set_dma_mode = pmac_ide_set_dma_mode,
956 .selectproc = pmac_ide_selectproc, 1012 .selectproc = pmac_ide_selectproc,
@@ -959,12 +1015,14 @@ static const struct ide_port_ops pmac_ide_port_ops = {
959static const struct ide_dma_ops pmac_dma_ops; 1015static const struct ide_dma_ops pmac_dma_ops;
960 1016
961static const struct ide_port_info pmac_port_info = { 1017static const struct ide_port_info pmac_port_info = {
1018 .name = DRV_NAME,
962 .init_dma = pmac_ide_init_dma, 1019 .init_dma = pmac_ide_init_dma,
963 .chipset = ide_pmac, 1020 .chipset = ide_pmac,
1021 .tp_ops = &pmac_tp_ops,
1022 .port_ops = &pmac_ide_port_ops,
964#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC 1023#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
965 .dma_ops = &pmac_dma_ops, 1024 .dma_ops = &pmac_dma_ops,
966#endif 1025#endif
967 .port_ops = &pmac_ide_port_ops,
968 .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA | 1026 .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
969 IDE_HFLAG_POST_SET_MODE | 1027 IDE_HFLAG_POST_SET_MODE |
970 IDE_HFLAG_MMIO | 1028 IDE_HFLAG_MMIO |
@@ -977,13 +1035,15 @@ static const struct ide_port_info pmac_port_info = {
977 * Setup, register & probe an IDE channel driven by this driver, this is 1035 * Setup, register & probe an IDE channel driven by this driver, this is
978 * called by one of the 2 probe functions (macio or PCI). 1036 * called by one of the 2 probe functions (macio or PCI).
979 */ 1037 */
980static int __devinit 1038static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, hw_regs_t *hw)
981pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif, hw_regs_t *hw)
982{ 1039{
983 struct device_node *np = pmif->node; 1040 struct device_node *np = pmif->node;
984 const int *bidp; 1041 const int *bidp;
985 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 1042 struct ide_host *host;
1043 ide_hwif_t *hwif;
1044 hw_regs_t *hws[] = { hw, NULL, NULL, NULL };
986 struct ide_port_info d = pmac_port_info; 1045 struct ide_port_info d = pmac_port_info;
1046 int rc;
987 1047
988 pmif->broken_dma = pmif->broken_dma_warn = 0; 1048 pmif->broken_dma = pmif->broken_dma_warn = 0;
989 if (of_device_is_compatible(np, "shasta-ata")) { 1049 if (of_device_is_compatible(np, "shasta-ata")) {
@@ -1054,31 +1114,16 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif, hw_regs_t *hw)
1054 msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY)); 1114 msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY));
1055 } 1115 }
1056 1116
1057 /* Setup MMIO ops */ 1117 printk(KERN_INFO DRV_NAME ": Found Apple %s controller (%s), "
1058 default_hwif_mmiops(hwif); 1118 "bus ID %d%s, irq %d\n", model_name[pmif->kind],
1059 hwif->OUTBSYNC = pmac_outbsync; 1119 pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id,
1120 pmif->mediabay ? " (mediabay)" : "", hw->irq);
1060 1121
1061 hwif->hwif_data = pmif; 1122 rc = ide_host_add(&d, hws, &host);
1062 ide_init_port_hw(hwif, hw); 1123 if (rc)
1124 return rc;
1063 1125
1064 printk(KERN_INFO "ide%d: Found Apple %s controller, bus ID %d%s, irq %d\n", 1126 hwif = host->ports[0];
1065 hwif->index, model_name[pmif->kind], pmif->aapl_bus_id,
1066 pmif->mediabay ? " (mediabay)" : "", hwif->irq);
1067
1068 if (pmif->mediabay) {
1069#ifdef CONFIG_PMAC_MEDIABAY
1070 if (check_media_bay_by_base(pmif->regbase, MB_CD)) {
1071#else
1072 if (1) {
1073#endif
1074 hwif->drives[0].noprobe = 1;
1075 hwif->drives[1].noprobe = 1;
1076 }
1077 }
1078
1079 idx[0] = hwif->index;
1080
1081 ide_device_add(idx, &d);
1082 1127
1083 return 0; 1128 return 0;
1084} 1129}
@@ -1101,7 +1146,6 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1101{ 1146{
1102 void __iomem *base; 1147 void __iomem *base;
1103 unsigned long regbase; 1148 unsigned long regbase;
1104 ide_hwif_t *hwif;
1105 pmac_ide_hwif_t *pmif; 1149 pmac_ide_hwif_t *pmif;
1106 int irq, rc; 1150 int irq, rc;
1107 hw_regs_t hw; 1151 hw_regs_t hw;
@@ -1110,14 +1154,6 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1110 if (pmif == NULL) 1154 if (pmif == NULL)
1111 return -ENOMEM; 1155 return -ENOMEM;
1112 1156
1113 hwif = ide_find_port();
1114 if (hwif == NULL) {
1115 printk(KERN_ERR "ide-pmac: MacIO interface attach with no slot\n");
1116 printk(KERN_ERR " %s\n", mdev->ofdev.node->full_name);
1117 rc = -ENODEV;
1118 goto out_free_pmif;
1119 }
1120
1121 if (macio_resource_count(mdev) == 0) { 1157 if (macio_resource_count(mdev) == 0) {
1122 printk(KERN_WARNING "ide-pmac: no address for %s\n", 1158 printk(KERN_WARNING "ide-pmac: no address for %s\n",
1123 mdev->ofdev.node->full_name); 1159 mdev->ofdev.node->full_name);
@@ -1164,7 +1200,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1164 } else 1200 } else
1165 pmif->dma_regs = NULL; 1201 pmif->dma_regs = NULL;
1166#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ 1202#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
1167 dev_set_drvdata(&mdev->ofdev.dev, hwif); 1203 dev_set_drvdata(&mdev->ofdev.dev, pmif);
1168 1204
1169 memset(&hw, 0, sizeof(hw)); 1205 memset(&hw, 0, sizeof(hw));
1170 pmac_ide_init_ports(&hw, pmif->regbase); 1206 pmac_ide_init_ports(&hw, pmif->regbase);
@@ -1172,7 +1208,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1172 hw.dev = &mdev->bus->pdev->dev; 1208 hw.dev = &mdev->bus->pdev->dev;
1173 hw.parent = &mdev->ofdev.dev; 1209 hw.parent = &mdev->ofdev.dev;
1174 1210
1175 rc = pmac_ide_setup_device(pmif, hwif, &hw); 1211 rc = pmac_ide_setup_device(pmif, &hw);
1176 if (rc != 0) { 1212 if (rc != 0) {
1177 /* The inteface is released to the common IDE layer */ 1213 /* The inteface is released to the common IDE layer */
1178 dev_set_drvdata(&mdev->ofdev.dev, NULL); 1214 dev_set_drvdata(&mdev->ofdev.dev, NULL);
@@ -1195,12 +1231,13 @@ out_free_pmif:
1195static int 1231static int
1196pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg) 1232pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
1197{ 1233{
1198 ide_hwif_t *hwif = (ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev); 1234 pmac_ide_hwif_t *pmif =
1199 int rc = 0; 1235 (pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
1236 int rc = 0;
1200 1237
1201 if (mesg.event != mdev->ofdev.dev.power.power_state.event 1238 if (mesg.event != mdev->ofdev.dev.power.power_state.event
1202 && (mesg.event & PM_EVENT_SLEEP)) { 1239 && (mesg.event & PM_EVENT_SLEEP)) {
1203 rc = pmac_ide_do_suspend(hwif); 1240 rc = pmac_ide_do_suspend(pmif);
1204 if (rc == 0) 1241 if (rc == 0)
1205 mdev->ofdev.dev.power.power_state = mesg; 1242 mdev->ofdev.dev.power.power_state = mesg;
1206 } 1243 }
@@ -1211,11 +1248,12 @@ pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
1211static int 1248static int
1212pmac_ide_macio_resume(struct macio_dev *mdev) 1249pmac_ide_macio_resume(struct macio_dev *mdev)
1213{ 1250{
1214 ide_hwif_t *hwif = (ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev); 1251 pmac_ide_hwif_t *pmif =
1215 int rc = 0; 1252 (pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
1216 1253 int rc = 0;
1254
1217 if (mdev->ofdev.dev.power.power_state.event != PM_EVENT_ON) { 1255 if (mdev->ofdev.dev.power.power_state.event != PM_EVENT_ON) {
1218 rc = pmac_ide_do_resume(hwif); 1256 rc = pmac_ide_do_resume(pmif);
1219 if (rc == 0) 1257 if (rc == 0)
1220 mdev->ofdev.dev.power.power_state = PMSG_ON; 1258 mdev->ofdev.dev.power.power_state = PMSG_ON;
1221 } 1259 }
@@ -1229,7 +1267,6 @@ pmac_ide_macio_resume(struct macio_dev *mdev)
1229static int __devinit 1267static int __devinit
1230pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id) 1268pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1231{ 1269{
1232 ide_hwif_t *hwif;
1233 struct device_node *np; 1270 struct device_node *np;
1234 pmac_ide_hwif_t *pmif; 1271 pmac_ide_hwif_t *pmif;
1235 void __iomem *base; 1272 void __iomem *base;
@@ -1247,14 +1284,6 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1247 if (pmif == NULL) 1284 if (pmif == NULL)
1248 return -ENOMEM; 1285 return -ENOMEM;
1249 1286
1250 hwif = ide_find_port();
1251 if (hwif == NULL) {
1252 printk(KERN_ERR "ide-pmac: PCI interface attach with no slot\n");
1253 printk(KERN_ERR " %s\n", np->full_name);
1254 rc = -ENODEV;
1255 goto out_free_pmif;
1256 }
1257
1258 if (pci_enable_device(pdev)) { 1287 if (pci_enable_device(pdev)) {
1259 printk(KERN_WARNING "ide-pmac: Can't enable PCI device for " 1288 printk(KERN_WARNING "ide-pmac: Can't enable PCI device for "
1260 "%s\n", np->full_name); 1289 "%s\n", np->full_name);
@@ -1284,14 +1313,14 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1284 pmif->kauai_fcr = base; 1313 pmif->kauai_fcr = base;
1285 pmif->irq = pdev->irq; 1314 pmif->irq = pdev->irq;
1286 1315
1287 pci_set_drvdata(pdev, hwif); 1316 pci_set_drvdata(pdev, pmif);
1288 1317
1289 memset(&hw, 0, sizeof(hw)); 1318 memset(&hw, 0, sizeof(hw));
1290 pmac_ide_init_ports(&hw, pmif->regbase); 1319 pmac_ide_init_ports(&hw, pmif->regbase);
1291 hw.irq = pdev->irq; 1320 hw.irq = pdev->irq;
1292 hw.dev = &pdev->dev; 1321 hw.dev = &pdev->dev;
1293 1322
1294 rc = pmac_ide_setup_device(pmif, hwif, &hw); 1323 rc = pmac_ide_setup_device(pmif, &hw);
1295 if (rc != 0) { 1324 if (rc != 0) {
1296 /* The inteface is released to the common IDE layer */ 1325 /* The inteface is released to the common IDE layer */
1297 pci_set_drvdata(pdev, NULL); 1326 pci_set_drvdata(pdev, NULL);
@@ -1310,12 +1339,12 @@ out_free_pmif:
1310static int 1339static int
1311pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) 1340pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
1312{ 1341{
1313 ide_hwif_t *hwif = (ide_hwif_t *)pci_get_drvdata(pdev); 1342 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)pci_get_drvdata(pdev);
1314 int rc = 0; 1343 int rc = 0;
1315 1344
1316 if (mesg.event != pdev->dev.power.power_state.event 1345 if (mesg.event != pdev->dev.power.power_state.event
1317 && (mesg.event & PM_EVENT_SLEEP)) { 1346 && (mesg.event & PM_EVENT_SLEEP)) {
1318 rc = pmac_ide_do_suspend(hwif); 1347 rc = pmac_ide_do_suspend(pmif);
1319 if (rc == 0) 1348 if (rc == 0)
1320 pdev->dev.power.power_state = mesg; 1349 pdev->dev.power.power_state = mesg;
1321 } 1350 }
@@ -1326,11 +1355,11 @@ pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
1326static int 1355static int
1327pmac_ide_pci_resume(struct pci_dev *pdev) 1356pmac_ide_pci_resume(struct pci_dev *pdev)
1328{ 1357{
1329 ide_hwif_t *hwif = (ide_hwif_t *)pci_get_drvdata(pdev); 1358 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)pci_get_drvdata(pdev);
1330 int rc = 0; 1359 int rc = 0;
1331 1360
1332 if (pdev->dev.power.power_state.event != PM_EVENT_ON) { 1361 if (pdev->dev.power.power_state.event != PM_EVENT_ON) {
1333 rc = pmac_ide_do_resume(hwif); 1362 rc = pmac_ide_do_resume(pmif);
1334 if (rc == 0) 1363 if (rc == 0)
1335 pdev->dev.power.power_state = PMSG_ON; 1364 pdev->dev.power.power_state = PMSG_ON;
1336 } 1365 }
@@ -1421,10 +1450,11 @@ out:
1421static int 1450static int
1422pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq) 1451pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
1423{ 1452{
1453 ide_hwif_t *hwif = drive->hwif;
1454 pmac_ide_hwif_t *pmif =
1455 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
1424 struct dbdma_cmd *table; 1456 struct dbdma_cmd *table;
1425 int i, count = 0; 1457 int i, count = 0;
1426 ide_hwif_t *hwif = HWIF(drive);
1427 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1428 volatile struct dbdma_regs __iomem *dma = pmif->dma_regs; 1458 volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
1429 struct scatterlist *sg; 1459 struct scatterlist *sg;
1430 int wr = (rq_data_dir(rq) == WRITE); 1460 int wr = (rq_data_dir(rq) == WRITE);
@@ -1520,7 +1550,8 @@ static int
1520pmac_ide_dma_setup(ide_drive_t *drive) 1550pmac_ide_dma_setup(ide_drive_t *drive)
1521{ 1551{
1522 ide_hwif_t *hwif = HWIF(drive); 1552 ide_hwif_t *hwif = HWIF(drive);
1523 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data; 1553 pmac_ide_hwif_t *pmif =
1554 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
1524 struct request *rq = HWGROUP(drive)->rq; 1555 struct request *rq = HWGROUP(drive)->rq;
1525 u8 unit = (drive->select.b.unit & 0x01); 1556 u8 unit = (drive->select.b.unit & 0x01);
1526 u8 ata4; 1557 u8 ata4;
@@ -1560,7 +1591,9 @@ pmac_ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
1560static void 1591static void
1561pmac_ide_dma_start(ide_drive_t *drive) 1592pmac_ide_dma_start(ide_drive_t *drive)
1562{ 1593{
1563 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 1594 ide_hwif_t *hwif = drive->hwif;
1595 pmac_ide_hwif_t *pmif =
1596 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
1564 volatile struct dbdma_regs __iomem *dma; 1597 volatile struct dbdma_regs __iomem *dma;
1565 1598
1566 dma = pmif->dma_regs; 1599 dma = pmif->dma_regs;
@@ -1576,7 +1609,9 @@ pmac_ide_dma_start(ide_drive_t *drive)
1576static int 1609static int
1577pmac_ide_dma_end (ide_drive_t *drive) 1610pmac_ide_dma_end (ide_drive_t *drive)
1578{ 1611{
1579 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 1612 ide_hwif_t *hwif = drive->hwif;
1613 pmac_ide_hwif_t *pmif =
1614 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
1580 volatile struct dbdma_regs __iomem *dma; 1615 volatile struct dbdma_regs __iomem *dma;
1581 u32 dstat; 1616 u32 dstat;
1582 1617
@@ -1604,7 +1639,9 @@ pmac_ide_dma_end (ide_drive_t *drive)
1604static int 1639static int
1605pmac_ide_dma_test_irq (ide_drive_t *drive) 1640pmac_ide_dma_test_irq (ide_drive_t *drive)
1606{ 1641{
1607 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 1642 ide_hwif_t *hwif = drive->hwif;
1643 pmac_ide_hwif_t *pmif =
1644 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
1608 volatile struct dbdma_regs __iomem *dma; 1645 volatile struct dbdma_regs __iomem *dma;
1609 unsigned long status, timeout; 1646 unsigned long status, timeout;
1610 1647
@@ -1664,7 +1701,9 @@ static void pmac_ide_dma_host_set(ide_drive_t *drive, int on)
1664static void 1701static void
1665pmac_ide_dma_lost_irq (ide_drive_t *drive) 1702pmac_ide_dma_lost_irq (ide_drive_t *drive)
1666{ 1703{
1667 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 1704 ide_hwif_t *hwif = drive->hwif;
1705 pmac_ide_hwif_t *pmif =
1706 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
1668 volatile struct dbdma_regs __iomem *dma; 1707 volatile struct dbdma_regs __iomem *dma;
1669 unsigned long status; 1708 unsigned long status;
1670 1709
@@ -1694,7 +1733,8 @@ static const struct ide_dma_ops pmac_dma_ops = {
1694static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif, 1733static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
1695 const struct ide_port_info *d) 1734 const struct ide_port_info *d)
1696{ 1735{
1697 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data; 1736 pmac_ide_hwif_t *pmif =
1737 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
1698 struct pci_dev *dev = to_pci_dev(hwif->dev); 1738 struct pci_dev *dev = to_pci_dev(hwif->dev);
1699 1739
1700 /* We won't need pci_dev if we switch to generic consistent 1740 /* We won't need pci_dev if we switch to generic consistent
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index 65fc08b6b6d0..b15cad58dc81 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -73,15 +73,12 @@ static void ide_pci_clear_simplex(unsigned long dma_base, const char *name)
73 * @d: IDE port info 73 * @d: IDE port info
74 * 74 *
75 * Fetch the DMA Bus-Master-I/O-Base-Address (BMIBA) from PCI space. 75 * Fetch the DMA Bus-Master-I/O-Base-Address (BMIBA) from PCI space.
76 * Where a device has a partner that is already in DMA mode we check
77 * and enforce IDE simplex rules.
78 */ 76 */
79 77
80unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d) 78unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
81{ 79{
82 struct pci_dev *dev = to_pci_dev(hwif->dev); 80 struct pci_dev *dev = to_pci_dev(hwif->dev);
83 unsigned long dma_base = 0; 81 unsigned long dma_base = 0;
84 u8 dma_stat = 0;
85 82
86 if (hwif->host_flags & IDE_HFLAG_MMIO) 83 if (hwif->host_flags & IDE_HFLAG_MMIO)
87 return hwif->dma_base; 84 return hwif->dma_base;
@@ -102,11 +99,19 @@ unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
102 if (hwif->channel) 99 if (hwif->channel)
103 dma_base += 8; 100 dma_base += 8;
104 101
105 if (d->host_flags & IDE_HFLAG_CS5520) 102 return dma_base;
103}
104EXPORT_SYMBOL_GPL(ide_pci_dma_base);
105
106int ide_pci_check_simplex(ide_hwif_t *hwif, const struct ide_port_info *d)
107{
108 u8 dma_stat;
109
110 if (d->host_flags & (IDE_HFLAG_MMIO | IDE_HFLAG_CS5520))
106 goto out; 111 goto out;
107 112
108 if (d->host_flags & IDE_HFLAG_CLEAR_SIMPLEX) { 113 if (d->host_flags & IDE_HFLAG_CLEAR_SIMPLEX) {
109 ide_pci_clear_simplex(dma_base, d->name); 114 ide_pci_clear_simplex(hwif->dma_base, d->name);
110 goto out; 115 goto out;
111 } 116 }
112 117
@@ -120,15 +125,15 @@ unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
120 * we tune the drive then try to grab DMA ownership if we want to be 125 * we tune the drive then try to grab DMA ownership if we want to be
121 * the DMA end. This has to be become dynamic to handle hot-plug. 126 * the DMA end. This has to be become dynamic to handle hot-plug.
122 */ 127 */
123 dma_stat = hwif->INB(dma_base + 2); 128 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
124 if ((dma_stat & 0x80) && hwif->mate && hwif->mate->dma_base) { 129 if ((dma_stat & 0x80) && hwif->mate && hwif->mate->dma_base) {
125 printk(KERN_INFO "%s: simplex device: DMA disabled\n", d->name); 130 printk(KERN_INFO "%s: simplex device: DMA disabled\n", d->name);
126 dma_base = 0; 131 return -1;
127 } 132 }
128out: 133out:
129 return dma_base; 134 return 0;
130} 135}
131EXPORT_SYMBOL_GPL(ide_pci_dma_base); 136EXPORT_SYMBOL_GPL(ide_pci_check_simplex);
132 137
133/* 138/*
134 * Set up BM-DMA capability (PnP BIOS should have done this) 139 * Set up BM-DMA capability (PnP BIOS should have done this)
@@ -284,33 +289,31 @@ static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *
284} 289}
285 290
286/** 291/**
287 * ide_hwif_configure - configure an IDE interface 292 * ide_hw_configure - configure a hw_regs_t instance
288 * @dev: PCI device holding interface 293 * @dev: PCI device holding interface
289 * @d: IDE port info 294 * @d: IDE port info
290 * @port: port number 295 * @port: port number
291 * @irq: PCI IRQ 296 * @irq: PCI IRQ
297 * @hw: hw_regs_t instance corresponding to this port
292 * 298 *
293 * Perform the initial set up for the hardware interface structure. This 299 * Perform the initial set up for the hardware interface structure. This
294 * is done per interface port rather than per PCI device. There may be 300 * is done per interface port rather than per PCI device. There may be
295 * more than one port per device. 301 * more than one port per device.
296 * 302 *
297 * Returns the new hardware interface structure, or NULL on a failure 303 * Returns zero on success or an error code.
298 */ 304 */
299 305
300static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev, 306static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d,
301 const struct ide_port_info *d, 307 unsigned int port, int irq, hw_regs_t *hw)
302 unsigned int port, int irq)
303{ 308{
304 unsigned long ctl = 0, base = 0; 309 unsigned long ctl = 0, base = 0;
305 ide_hwif_t *hwif;
306 struct hw_regs_s hw;
307 310
308 if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) { 311 if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) {
309 if (ide_pci_check_iomem(dev, d, 2 * port) || 312 if (ide_pci_check_iomem(dev, d, 2 * port) ||
310 ide_pci_check_iomem(dev, d, 2 * port + 1)) { 313 ide_pci_check_iomem(dev, d, 2 * port + 1)) {
311 printk(KERN_ERR "%s: I/O baseregs (BIOS) are reported " 314 printk(KERN_ERR "%s: I/O baseregs (BIOS) are reported "
312 "as MEM for port %d!\n", d->name, port); 315 "as MEM for port %d!\n", d->name, port);
313 return NULL; 316 return -EINVAL;
314 } 317 }
315 318
316 ctl = pci_resource_start(dev, 2*port+1); 319 ctl = pci_resource_start(dev, 2*port+1);
@@ -324,22 +327,16 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev,
324 if (!base || !ctl) { 327 if (!base || !ctl) {
325 printk(KERN_ERR "%s: bad PCI BARs for port %d, skipping\n", 328 printk(KERN_ERR "%s: bad PCI BARs for port %d, skipping\n",
326 d->name, port); 329 d->name, port);
327 return NULL; 330 return -EINVAL;
328 } 331 }
329 332
330 hwif = ide_find_port_slot(d); 333 memset(hw, 0, sizeof(*hw));
331 if (hwif == NULL) 334 hw->irq = irq;
332 return NULL; 335 hw->dev = &dev->dev;
333 336 hw->chipset = d->chipset ? d->chipset : ide_pci;
334 memset(&hw, 0, sizeof(hw)); 337 ide_std_init_ports(hw, base, ctl | 2);
335 hw.irq = irq;
336 hw.dev = &dev->dev;
337 hw.chipset = d->chipset ? d->chipset : ide_pci;
338 ide_std_init_ports(&hw, base, ctl | 2);
339
340 ide_init_port_hw(hwif, &hw);
341 338
342 return hwif; 339 return 0;
343} 340}
344 341
345#ifdef CONFIG_BLK_DEV_IDEDMA_PCI 342#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
@@ -362,7 +359,15 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
362 (dev->class & 0x80))) { 359 (dev->class & 0x80))) {
363 unsigned long base = ide_pci_dma_base(hwif, d); 360 unsigned long base = ide_pci_dma_base(hwif, d);
364 361
365 if (base == 0 || ide_pci_set_master(dev, d->name) < 0) 362 if (base == 0)
363 return -1;
364
365 hwif->dma_base = base;
366
367 if (ide_pci_check_simplex(hwif, d) < 0)
368 return -1;
369
370 if (ide_pci_set_master(dev, d->name) < 0)
366 return -1; 371 return -1;
367 372
368 if (hwif->host_flags & IDE_HFLAG_MMIO) 373 if (hwif->host_flags & IDE_HFLAG_MMIO)
@@ -376,7 +381,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
376 if (ide_allocate_dma_engine(hwif)) 381 if (ide_allocate_dma_engine(hwif))
377 return -1; 382 return -1;
378 383
379 ide_setup_dma(hwif, base); 384 hwif->dma_ops = &sff_dma_ops;
380 } 385 }
381 386
382 return 0; 387 return 0;
@@ -429,7 +434,8 @@ out:
429 * @dev: PCI device 434 * @dev: PCI device
430 * @d: IDE port info 435 * @d: IDE port info
431 * @pciirq: IRQ line 436 * @pciirq: IRQ line
432 * @idx: ATA index table to update 437 * @hw: hw_regs_t instances corresponding to this PCI IDE device
438 * @hws: hw_regs_t pointers table to update
433 * 439 *
434 * Scan the interfaces attached to this device and do any 440 * Scan the interfaces attached to this device and do any
435 * necessary per port setup. Attach the devices and ask the 441 * necessary per port setup. Attach the devices and ask the
@@ -440,10 +446,10 @@ out:
440 * where the chipset setup is not the default PCI IDE one. 446 * where the chipset setup is not the default PCI IDE one.
441 */ 447 */
442 448
443void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, int pciirq, u8 *idx) 449void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d,
450 int pciirq, hw_regs_t *hw, hw_regs_t **hws)
444{ 451{
445 int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port; 452 int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port;
446 ide_hwif_t *hwif;
447 u8 tmp; 453 u8 tmp;
448 454
449 /* 455 /*
@@ -459,11 +465,10 @@ void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, int
459 continue; /* port not enabled */ 465 continue; /* port not enabled */
460 } 466 }
461 467
462 hwif = ide_hwif_configure(dev, d, port, pciirq); 468 if (ide_hw_configure(dev, d, port, pciirq, hw + port))
463 if (hwif == NULL)
464 continue; 469 continue;
465 470
466 *(idx + port) = hwif->index; 471 *(hws + port) = hw + port;
467 } 472 }
468} 473}
469EXPORT_SYMBOL_GPL(ide_pci_setup_ports); 474EXPORT_SYMBOL_GPL(ide_pci_setup_ports);
@@ -480,7 +485,7 @@ EXPORT_SYMBOL_GPL(ide_pci_setup_ports);
480 */ 485 */
481static int do_ide_setup_pci_device(struct pci_dev *dev, 486static int do_ide_setup_pci_device(struct pci_dev *dev,
482 const struct ide_port_info *d, 487 const struct ide_port_info *d,
483 u8 *idx, u8 noisy) 488 u8 noisy)
484{ 489{
485 int tried_config = 0; 490 int tried_config = 0;
486 int pciirq, ret; 491 int pciirq, ret;
@@ -529,22 +534,24 @@ static int do_ide_setup_pci_device(struct pci_dev *dev,
529 d->name, pciirq); 534 d->name, pciirq);
530 } 535 }
531 536
532 /* FIXME: silent failure can happen */ 537 ret = pciirq;
533
534 ide_pci_setup_ports(dev, d, pciirq, idx);
535out: 538out:
536 return ret; 539 return ret;
537} 540}
538 541
539int ide_setup_pci_device(struct pci_dev *dev, const struct ide_port_info *d) 542int ide_setup_pci_device(struct pci_dev *dev, const struct ide_port_info *d)
540{ 543{
541 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 544 hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL };
542 int ret; 545 int ret;
543 546
544 ret = do_ide_setup_pci_device(dev, d, &idx[0], 1); 547 ret = do_ide_setup_pci_device(dev, d, 1);
548
549 if (ret >= 0) {
550 /* FIXME: silent failure can happen */
551 ide_pci_setup_ports(dev, d, ret, &hw[0], &hws[0]);
545 552
546 if (ret >= 0) 553 ret = ide_host_add(d, hws, NULL);
547 ide_device_add(idx, d); 554 }
548 555
549 return ret; 556 return ret;
550} 557}
@@ -555,19 +562,23 @@ int ide_setup_pci_devices(struct pci_dev *dev1, struct pci_dev *dev2,
555{ 562{
556 struct pci_dev *pdev[] = { dev1, dev2 }; 563 struct pci_dev *pdev[] = { dev1, dev2 };
557 int ret, i; 564 int ret, i;
558 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 565 hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL };
559 566
560 for (i = 0; i < 2; i++) { 567 for (i = 0; i < 2; i++) {
561 ret = do_ide_setup_pci_device(pdev[i], d, &idx[i*2], !i); 568 ret = do_ide_setup_pci_device(pdev[i], d, !i);
569
562 /* 570 /*
563 * FIXME: Mom, mom, they stole me the helper function to undo 571 * FIXME: Mom, mom, they stole me the helper function to undo
564 * do_ide_setup_pci_device() on the first device! 572 * do_ide_setup_pci_device() on the first device!
565 */ 573 */
566 if (ret < 0) 574 if (ret < 0)
567 goto out; 575 goto out;
576
577 /* FIXME: silent failure can happen */
578 ide_pci_setup_ports(pdev[i], d, ret, &hw[i*2], &hws[i*2]);
568 } 579 }
569 580
570 ide_device_add(idx, d); 581 ret = ide_host_add(d, hws, NULL);
571out: 582out:
572 return ret; 583 return ret;
573} 584}
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 0792d930c481..7a64aa9b51b6 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -646,8 +646,8 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
646 ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); 646 ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
647 647
648 spin_lock_irqsave(&pool->last_cpu_lock, flags); 648 spin_lock_irqsave(&pool->last_cpu_lock, flags);
649 cpu = next_cpu(pool->last_cpu, cpu_online_map); 649 cpu = next_cpu_nr(pool->last_cpu, cpu_online_map);
650 if (cpu == NR_CPUS) 650 if (cpu >= nr_cpu_ids)
651 cpu = first_cpu(cpu_online_map); 651 cpu = first_cpu(cpu_online_map);
652 pool->last_cpu = cpu; 652 pool->last_cpu = cpu;
653 spin_unlock_irqrestore(&pool->last_cpu_lock, flags); 653 spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
diff --git a/drivers/input/keyboard/tosakbd.c b/drivers/input/keyboard/tosakbd.c
index 94e444b4ee15..b12b7ee4b6aa 100644
--- a/drivers/input/keyboard/tosakbd.c
+++ b/drivers/input/keyboard/tosakbd.c
@@ -215,8 +215,6 @@ static int tosakbd_suspend(struct platform_device *dev, pm_message_t state)
215 unsigned long flags; 215 unsigned long flags;
216 216
217 spin_lock_irqsave(&tosakbd->lock, flags); 217 spin_lock_irqsave(&tosakbd->lock, flags);
218 PGSR1 = (PGSR1 & ~TOSA_GPIO_LOW_STROBE_BIT);
219 PGSR2 = (PGSR2 & ~TOSA_GPIO_HIGH_STROBE_BIT);
220 tosakbd->suspended = 1; 218 tosakbd->suspended = 1;
221 spin_unlock_irqrestore(&tosakbd->lock, flags); 219 spin_unlock_irqrestore(&tosakbd->lock, flags);
222 220
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 260bade0a5ec..9f93c29fed35 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -5,6 +5,10 @@
5menu "Multifunction device drivers" 5menu "Multifunction device drivers"
6 depends on HAS_IOMEM 6 depends on HAS_IOMEM
7 7
8config MFD_CORE
9 tristate
10 default n
11
8config MFD_SM501 12config MFD_SM501
9 tristate "Support for Silicon Motion SM501" 13 tristate "Support for Silicon Motion SM501"
10 ---help--- 14 ---help---
@@ -38,6 +42,13 @@ config HTC_PASIC3
38 HTC Magician devices, respectively. Actual functionality is 42 HTC Magician devices, respectively. Actual functionality is
39 handled by the leds-pasic3 and ds1wm drivers. 43 handled by the leds-pasic3 and ds1wm drivers.
40 44
45config MFD_TC6393XB
46 bool "Support Toshiba TC6393XB"
47 depends on HAVE_GPIO_LIB
48 select MFD_CORE
49 help
50 Support for Toshiba Mobile IO Controller TC6393XB
51
41endmenu 52endmenu
42 53
43menu "Multimedia Capabilities Port drivers" 54menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index eef4e26807df..33daa2f45dd8 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -8,6 +8,10 @@ obj-$(CONFIG_MFD_ASIC3) += asic3.o
8obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o 8obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o
9obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o 9obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
10 10
11obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o
12
13obj-$(CONFIG_MFD_CORE) += mfd-core.o
14
11obj-$(CONFIG_MCP) += mcp-core.o 15obj-$(CONFIG_MCP) += mcp-core.o
12obj-$(CONFIG_MCP_SA11X0) += mcp-sa11x0.o 16obj-$(CONFIG_MCP_SA11X0) += mcp-sa11x0.o
13obj-$(CONFIG_MCP_UCB1200) += ucb1x00-core.o 17obj-$(CONFIG_MCP_UCB1200) += ucb1x00-core.o
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
new file mode 100644
index 000000000000..d7d88ce053a6
--- /dev/null
+++ b/drivers/mfd/mfd-core.c
@@ -0,0 +1,114 @@
1/*
2 * drivers/mfd/mfd-core.c
3 *
4 * core MFD support
5 * Copyright (c) 2006 Ian Molton
6 * Copyright (c) 2007,2008 Dmitry Baryshkov
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/kernel.h>
15#include <linux/platform_device.h>
16#include <linux/mfd/core.h>
17
18static int mfd_add_device(struct platform_device *parent,
19 const struct mfd_cell *cell,
20 struct resource *mem_base,
21 int irq_base)
22{
23 struct resource res[cell->num_resources];
24 struct platform_device *pdev;
25 int ret = -ENOMEM;
26 int r;
27
28 pdev = platform_device_alloc(cell->name, parent->id);
29 if (!pdev)
30 goto fail_alloc;
31
32 pdev->dev.parent = &parent->dev;
33
34 ret = platform_device_add_data(pdev,
35 cell, sizeof(struct mfd_cell));
36 if (ret)
37 goto fail_device;
38
39 memzero(res, sizeof(res));
40 for (r = 0; r < cell->num_resources; r++) {
41 res[r].name = cell->resources[r].name;
42 res[r].flags = cell->resources[r].flags;
43
44 /* Find out base to use */
45 if (cell->resources[r].flags & IORESOURCE_MEM) {
46 res[r].parent = mem_base;
47 res[r].start = mem_base->start +
48 cell->resources[r].start;
49 res[r].end = mem_base->start +
50 cell->resources[r].end;
51 } else if (cell->resources[r].flags & IORESOURCE_IRQ) {
52 res[r].start = irq_base +
53 cell->resources[r].start;
54 res[r].end = irq_base +
55 cell->resources[r].end;
56 } else {
57 res[r].parent = cell->resources[r].parent;
58 res[r].start = cell->resources[r].start;
59 res[r].end = cell->resources[r].end;
60 }
61 }
62
63 platform_device_add_resources(pdev, res, cell->num_resources);
64
65 ret = platform_device_add(pdev);
66 if (ret)
67 goto fail_device;
68
69 return 0;
70
71/* platform_device_del(pdev); */
72fail_device:
73 platform_device_put(pdev);
74fail_alloc:
75 return ret;
76}
77
78int mfd_add_devices(
79 struct platform_device *parent,
80 const struct mfd_cell *cells, int n_devs,
81 struct resource *mem_base,
82 int irq_base)
83{
84 int i;
85 int ret = 0;
86
87 for (i = 0; i < n_devs; i++) {
88 ret = mfd_add_device(parent, cells + i, mem_base, irq_base);
89 if (ret)
90 break;
91 }
92
93 if (ret)
94 mfd_remove_devices(parent);
95
96 return ret;
97}
98EXPORT_SYMBOL(mfd_add_devices);
99
100static int mfd_remove_devices_fn(struct device *dev, void *unused)
101{
102 platform_device_unregister(
103 container_of(dev, struct platform_device, dev));
104 return 0;
105}
106
107void mfd_remove_devices(struct platform_device *parent)
108{
109 device_for_each_child(&parent->dev, NULL, mfd_remove_devices_fn);
110}
111EXPORT_SYMBOL(mfd_remove_devices);
112
113MODULE_LICENSE("GPL");
114MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov");
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
new file mode 100644
index 000000000000..2d87501b6fd4
--- /dev/null
+++ b/drivers/mfd/tc6393xb.c
@@ -0,0 +1,600 @@
1/*
2 * Toshiba TC6393XB SoC support
3 *
4 * Copyright(c) 2005-2006 Chris Humbert
5 * Copyright(c) 2005 Dirk Opfer
6 * Copyright(c) 2005 Ian Molton <spyro@f2s.com>
7 * Copyright(c) 2007 Dmitry Baryshkov
8 *
9 * Based on code written by Sharp/Lineo for 2.4 kernels
10 * Based on locomo.c
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/io.h>
20#include <linux/irq.h>
21#include <linux/platform_device.h>
22#include <linux/fb.h>
23#include <linux/clk.h>
24#include <linux/mfd/core.h>
25#include <linux/mfd/tmio.h>
26#include <linux/mfd/tc6393xb.h>
27#include <linux/gpio.h>
28
29#define SCR_REVID 0x08 /* b Revision ID */
30#define SCR_ISR 0x50 /* b Interrupt Status */
31#define SCR_IMR 0x52 /* b Interrupt Mask */
32#define SCR_IRR 0x54 /* b Interrupt Routing */
33#define SCR_GPER 0x60 /* w GP Enable */
34#define SCR_GPI_SR(i) (0x64 + (i)) /* b3 GPI Status */
35#define SCR_GPI_IMR(i) (0x68 + (i)) /* b3 GPI INT Mask */
36#define SCR_GPI_EDER(i) (0x6c + (i)) /* b3 GPI Edge Detect Enable */
37#define SCR_GPI_LIR(i) (0x70 + (i)) /* b3 GPI Level Invert */
38#define SCR_GPO_DSR(i) (0x78 + (i)) /* b3 GPO Data Set */
39#define SCR_GPO_DOECR(i) (0x7c + (i)) /* b3 GPO Data OE Control */
40#define SCR_GP_IARCR(i) (0x80 + (i)) /* b3 GP Internal Active Register Control */
41#define SCR_GP_IARLCR(i) (0x84 + (i)) /* b3 GP INTERNAL Active Register Level Control */
42#define SCR_GPI_BCR(i) (0x88 + (i)) /* b3 GPI Buffer Control */
43#define SCR_GPA_IARCR 0x8c /* w GPa Internal Active Register Control */
44#define SCR_GPA_IARLCR 0x90 /* w GPa Internal Active Register Level Control */
45#define SCR_GPA_BCR 0x94 /* w GPa Buffer Control */
46#define SCR_CCR 0x98 /* w Clock Control */
47#define SCR_PLL2CR 0x9a /* w PLL2 Control */
48#define SCR_PLL1CR 0x9c /* l PLL1 Control */
49#define SCR_DIARCR 0xa0 /* b Device Internal Active Register Control */
50#define SCR_DBOCR 0xa1 /* b Device Buffer Off Control */
51#define SCR_FER 0xe0 /* b Function Enable */
52#define SCR_MCR 0xe4 /* w Mode Control */
53#define SCR_CONFIG 0xfc /* b Configuration Control */
54#define SCR_DEBUG 0xff /* b Debug */
55
56#define SCR_CCR_CK32K BIT(0)
57#define SCR_CCR_USBCK BIT(1)
58#define SCR_CCR_UNK1 BIT(4)
59#define SCR_CCR_MCLK_MASK (7 << 8)
60#define SCR_CCR_MCLK_OFF (0 << 8)
61#define SCR_CCR_MCLK_12 (1 << 8)
62#define SCR_CCR_MCLK_24 (2 << 8)
63#define SCR_CCR_MCLK_48 (3 << 8)
64#define SCR_CCR_HCLK_MASK (3 << 12)
65#define SCR_CCR_HCLK_24 (0 << 12)
66#define SCR_CCR_HCLK_48 (1 << 12)
67
68#define SCR_FER_USBEN BIT(0) /* USB host enable */
69#define SCR_FER_LCDCVEN BIT(1) /* polysilicon TFT enable */
70#define SCR_FER_SLCDEN BIT(2) /* SLCD enable */
71
72#define SCR_MCR_RDY_MASK (3 << 0)
73#define SCR_MCR_RDY_OPENDRAIN (0 << 0)
74#define SCR_MCR_RDY_TRISTATE (1 << 0)
75#define SCR_MCR_RDY_PUSHPULL (2 << 0)
76#define SCR_MCR_RDY_UNK BIT(2)
77#define SCR_MCR_RDY_EN BIT(3)
78#define SCR_MCR_INT_MASK (3 << 4)
79#define SCR_MCR_INT_OPENDRAIN (0 << 4)
80#define SCR_MCR_INT_TRISTATE (1 << 4)
81#define SCR_MCR_INT_PUSHPULL (2 << 4)
82#define SCR_MCR_INT_UNK BIT(6)
83#define SCR_MCR_INT_EN BIT(7)
84/* bits 8 - 16 are unknown */
85
86#define TC_GPIO_BIT(i) (1 << (i & 0x7))
87
88/*--------------------------------------------------------------------------*/
89
90struct tc6393xb {
91 void __iomem *scr;
92
93 struct gpio_chip gpio;
94
95 struct clk *clk; /* 3,6 Mhz */
96
97 spinlock_t lock; /* protects RMW cycles */
98
99 struct {
100 u8 fer;
101 u16 ccr;
102 u8 gpi_bcr[3];
103 u8 gpo_dsr[3];
104 u8 gpo_doecr[3];
105 } suspend_state;
106
107 struct resource rscr;
108 struct resource *iomem;
109 int irq;
110 int irq_base;
111};
112
113enum {
114 TC6393XB_CELL_NAND,
115};
116
117/*--------------------------------------------------------------------------*/
118
119static int tc6393xb_nand_enable(struct platform_device *nand)
120{
121 struct platform_device *dev = to_platform_device(nand->dev.parent);
122 struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
123 unsigned long flags;
124
125 spin_lock_irqsave(&tc6393xb->lock, flags);
126
127 /* SMD buffer on */
128 dev_dbg(&dev->dev, "SMD buffer on\n");
129 iowrite8(0xff, tc6393xb->scr + SCR_GPI_BCR(1));
130
131 spin_unlock_irqrestore(&tc6393xb->lock, flags);
132
133 return 0;
134}
135
136static struct resource __devinitdata tc6393xb_nand_resources[] = {
137 {
138 .name = TMIO_NAND_CONFIG,
139 .start = 0x0100,
140 .end = 0x01ff,
141 .flags = IORESOURCE_MEM,
142 },
143 {
144 .name = TMIO_NAND_CONTROL,
145 .start = 0x1000,
146 .end = 0x1007,
147 .flags = IORESOURCE_MEM,
148 },
149 {
150 .name = TMIO_NAND_IRQ,
151 .start = IRQ_TC6393_NAND,
152 .end = IRQ_TC6393_NAND,
153 .flags = IORESOURCE_IRQ,
154 },
155};
156
157static struct mfd_cell __devinitdata tc6393xb_cells[] = {
158 [TC6393XB_CELL_NAND] = {
159 .name = "tmio-nand",
160 .enable = tc6393xb_nand_enable,
161 .num_resources = ARRAY_SIZE(tc6393xb_nand_resources),
162 .resources = tc6393xb_nand_resources,
163 },
164};
165
166/*--------------------------------------------------------------------------*/
167
168static int tc6393xb_gpio_get(struct gpio_chip *chip,
169 unsigned offset)
170{
171 struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
172
173 /* XXX: does dsr also represent inputs? */
174 return ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8))
175 & TC_GPIO_BIT(offset);
176}
177
178static void __tc6393xb_gpio_set(struct gpio_chip *chip,
179 unsigned offset, int value)
180{
181 struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
182 u8 dsr;
183
184 dsr = ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8));
185 if (value)
186 dsr |= TC_GPIO_BIT(offset);
187 else
188 dsr &= ~TC_GPIO_BIT(offset);
189
190 iowrite8(dsr, tc6393xb->scr + SCR_GPO_DSR(offset / 8));
191}
192
193static void tc6393xb_gpio_set(struct gpio_chip *chip,
194 unsigned offset, int value)
195{
196 struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
197 unsigned long flags;
198
199 spin_lock_irqsave(&tc6393xb->lock, flags);
200
201 __tc6393xb_gpio_set(chip, offset, value);
202
203 spin_unlock_irqrestore(&tc6393xb->lock, flags);
204}
205
206static int tc6393xb_gpio_direction_input(struct gpio_chip *chip,
207 unsigned offset)
208{
209 struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
210 unsigned long flags;
211 u8 doecr;
212
213 spin_lock_irqsave(&tc6393xb->lock, flags);
214
215 doecr = ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
216 doecr &= ~TC_GPIO_BIT(offset);
217 iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
218
219 spin_unlock_irqrestore(&tc6393xb->lock, flags);
220
221 return 0;
222}
223
224static int tc6393xb_gpio_direction_output(struct gpio_chip *chip,
225 unsigned offset, int value)
226{
227 struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
228 unsigned long flags;
229 u8 doecr;
230
231 spin_lock_irqsave(&tc6393xb->lock, flags);
232
233 __tc6393xb_gpio_set(chip, offset, value);
234
235 doecr = ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
236 doecr |= TC_GPIO_BIT(offset);
237 iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
238
239 spin_unlock_irqrestore(&tc6393xb->lock, flags);
240
241 return 0;
242}
243
244static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb, int gpio_base)
245{
246 tc6393xb->gpio.label = "tc6393xb";
247 tc6393xb->gpio.base = gpio_base;
248 tc6393xb->gpio.ngpio = 16;
249 tc6393xb->gpio.set = tc6393xb_gpio_set;
250 tc6393xb->gpio.get = tc6393xb_gpio_get;
251 tc6393xb->gpio.direction_input = tc6393xb_gpio_direction_input;
252 tc6393xb->gpio.direction_output = tc6393xb_gpio_direction_output;
253
254 return gpiochip_add(&tc6393xb->gpio);
255}
256
257/*--------------------------------------------------------------------------*/
258
259static void
260tc6393xb_irq(unsigned int irq, struct irq_desc *desc)
261{
262 struct tc6393xb *tc6393xb = get_irq_data(irq);
263 unsigned int isr;
264 unsigned int i, irq_base;
265
266 irq_base = tc6393xb->irq_base;
267
268 while ((isr = ioread8(tc6393xb->scr + SCR_ISR) &
269 ~ioread8(tc6393xb->scr + SCR_IMR)))
270 for (i = 0; i < TC6393XB_NR_IRQS; i++) {
271 if (isr & (1 << i))
272 generic_handle_irq(irq_base + i);
273 }
274}
275
276static void tc6393xb_irq_ack(unsigned int irq)
277{
278}
279
280static void tc6393xb_irq_mask(unsigned int irq)
281{
282 struct tc6393xb *tc6393xb = get_irq_chip_data(irq);
283 unsigned long flags;
284 u8 imr;
285
286 spin_lock_irqsave(&tc6393xb->lock, flags);
287 imr = ioread8(tc6393xb->scr + SCR_IMR);
288 imr |= 1 << (irq - tc6393xb->irq_base);
289 iowrite8(imr, tc6393xb->scr + SCR_IMR);
290 spin_unlock_irqrestore(&tc6393xb->lock, flags);
291}
292
293static void tc6393xb_irq_unmask(unsigned int irq)
294{
295 struct tc6393xb *tc6393xb = get_irq_chip_data(irq);
296 unsigned long flags;
297 u8 imr;
298
299 spin_lock_irqsave(&tc6393xb->lock, flags);
300 imr = ioread8(tc6393xb->scr + SCR_IMR);
301 imr &= ~(1 << (irq - tc6393xb->irq_base));
302 iowrite8(imr, tc6393xb->scr + SCR_IMR);
303 spin_unlock_irqrestore(&tc6393xb->lock, flags);
304}
305
306static struct irq_chip tc6393xb_chip = {
307 .name = "tc6393xb",
308 .ack = tc6393xb_irq_ack,
309 .mask = tc6393xb_irq_mask,
310 .unmask = tc6393xb_irq_unmask,
311};
312
313static void tc6393xb_attach_irq(struct platform_device *dev)
314{
315 struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
316 unsigned int irq, irq_base;
317
318 irq_base = tc6393xb->irq_base;
319
320 for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) {
321 set_irq_chip(irq, &tc6393xb_chip);
322 set_irq_chip_data(irq, tc6393xb);
323 set_irq_handler(irq, handle_edge_irq);
324 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
325 }
326
327 set_irq_type(tc6393xb->irq, IRQT_FALLING);
328 set_irq_data(tc6393xb->irq, tc6393xb);
329 set_irq_chained_handler(tc6393xb->irq, tc6393xb_irq);
330}
331
332static void tc6393xb_detach_irq(struct platform_device *dev)
333{
334 struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
335 unsigned int irq, irq_base;
336
337 set_irq_chained_handler(tc6393xb->irq, NULL);
338 set_irq_data(tc6393xb->irq, NULL);
339
340 irq_base = tc6393xb->irq_base;
341
342 for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) {
343 set_irq_flags(irq, 0);
344 set_irq_chip(irq, NULL);
345 set_irq_chip_data(irq, NULL);
346 }
347}
348
349/*--------------------------------------------------------------------------*/
350
351static int tc6393xb_hw_init(struct platform_device *dev)
352{
353 struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
354 struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
355 int i;
356
357 iowrite8(tc6393xb->suspend_state.fer, tc6393xb->scr + SCR_FER);
358 iowrite16(tcpd->scr_pll2cr, tc6393xb->scr + SCR_PLL2CR);
359 iowrite16(tc6393xb->suspend_state.ccr, tc6393xb->scr + SCR_CCR);
360 iowrite16(SCR_MCR_RDY_OPENDRAIN | SCR_MCR_RDY_UNK | SCR_MCR_RDY_EN |
361 SCR_MCR_INT_OPENDRAIN | SCR_MCR_INT_UNK | SCR_MCR_INT_EN |
362 BIT(15), tc6393xb->scr + SCR_MCR);
363 iowrite16(tcpd->scr_gper, tc6393xb->scr + SCR_GPER);
364 iowrite8(0, tc6393xb->scr + SCR_IRR);
365 iowrite8(0xbf, tc6393xb->scr + SCR_IMR);
366
367 for (i = 0; i < 3; i++) {
368 iowrite8(tc6393xb->suspend_state.gpo_dsr[i],
369 tc6393xb->scr + SCR_GPO_DSR(i));
370 iowrite8(tc6393xb->suspend_state.gpo_doecr[i],
371 tc6393xb->scr + SCR_GPO_DOECR(i));
372 iowrite8(tc6393xb->suspend_state.gpi_bcr[i],
373 tc6393xb->scr + SCR_GPI_BCR(i));
374 }
375
376 return 0;
377}
378
379static int __devinit tc6393xb_probe(struct platform_device *dev)
380{
381 struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
382 struct tc6393xb *tc6393xb;
383 struct resource *iomem;
384 struct resource *rscr;
385 int retval, temp;
386 int i;
387
388 iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
389 if (!iomem)
390 return -EINVAL;
391
392 tc6393xb = kzalloc(sizeof *tc6393xb, GFP_KERNEL);
393 if (!tc6393xb) {
394 retval = -ENOMEM;
395 goto err_kzalloc;
396 }
397
398 spin_lock_init(&tc6393xb->lock);
399
400 platform_set_drvdata(dev, tc6393xb);
401 tc6393xb->iomem = iomem;
402 tc6393xb->irq = platform_get_irq(dev, 0);
403 tc6393xb->irq_base = tcpd->irq_base;
404
405 tc6393xb->clk = clk_get(&dev->dev, "GPIO27_CLK" /* "CK3P6MI" */);
406 if (IS_ERR(tc6393xb->clk)) {
407 retval = PTR_ERR(tc6393xb->clk);
408 goto err_clk_get;
409 }
410
411 rscr = &tc6393xb->rscr;
412 rscr->name = "tc6393xb-core";
413 rscr->start = iomem->start;
414 rscr->end = iomem->start + 0xff;
415 rscr->flags = IORESOURCE_MEM;
416
417 retval = request_resource(iomem, rscr);
418 if (retval)
419 goto err_request_scr;
420
421 tc6393xb->scr = ioremap(rscr->start, rscr->end - rscr->start + 1);
422 if (!tc6393xb->scr) {
423 retval = -ENOMEM;
424 goto err_ioremap;
425 }
426
427 retval = clk_enable(tc6393xb->clk);
428 if (retval)
429 goto err_clk_enable;
430
431 retval = tcpd->enable(dev);
432 if (retval)
433 goto err_enable;
434
435 tc6393xb->suspend_state.fer = 0;
436 for (i = 0; i < 3; i++) {
437 tc6393xb->suspend_state.gpo_dsr[i] =
438 (tcpd->scr_gpo_dsr >> (8 * i)) & 0xff;
439 tc6393xb->suspend_state.gpo_doecr[i] =
440 (tcpd->scr_gpo_doecr >> (8 * i)) & 0xff;
441 }
442 /*
443 * It may be necessary to change this back to
444 * platform-dependant code
445 */
446 tc6393xb->suspend_state.ccr = SCR_CCR_UNK1 |
447 SCR_CCR_HCLK_48;
448
449 retval = tc6393xb_hw_init(dev);
450 if (retval)
451 goto err_hw_init;
452
453 printk(KERN_INFO "Toshiba tc6393xb revision %d at 0x%08lx, irq %d\n",
454 ioread8(tc6393xb->scr + SCR_REVID),
455 (unsigned long) iomem->start, tc6393xb->irq);
456
457 tc6393xb->gpio.base = -1;
458
459 if (tcpd->gpio_base >= 0) {
460 retval = tc6393xb_register_gpio(tc6393xb, tcpd->gpio_base);
461 if (retval)
462 goto err_gpio_add;
463 }
464
465 if (tc6393xb->irq)
466 tc6393xb_attach_irq(dev);
467
468 tc6393xb_cells[TC6393XB_CELL_NAND].driver_data = tcpd->nand_data;
469
470 retval = mfd_add_devices(dev,
471 tc6393xb_cells, ARRAY_SIZE(tc6393xb_cells),
472 iomem, tcpd->irq_base);
473
474 return 0;
475
476 if (tc6393xb->irq)
477 tc6393xb_detach_irq(dev);
478
479err_gpio_add:
480 if (tc6393xb->gpio.base != -1)
481 temp = gpiochip_remove(&tc6393xb->gpio);
482err_hw_init:
483 tcpd->disable(dev);
484err_clk_enable:
485 clk_disable(tc6393xb->clk);
486err_enable:
487 iounmap(tc6393xb->scr);
488err_ioremap:
489 release_resource(&tc6393xb->rscr);
490err_request_scr:
491 clk_put(tc6393xb->clk);
492err_clk_get:
493 kfree(tc6393xb);
494err_kzalloc:
495 return retval;
496}
497
498static int __devexit tc6393xb_remove(struct platform_device *dev)
499{
500 struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
501 struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
502 int ret;
503
504 mfd_remove_devices(dev);
505
506 if (tc6393xb->irq)
507 tc6393xb_detach_irq(dev);
508
509 if (tc6393xb->gpio.base != -1) {
510 ret = gpiochip_remove(&tc6393xb->gpio);
511 if (ret) {
512 dev_err(&dev->dev, "Can't remove gpio chip: %d\n", ret);
513 return ret;
514 }
515 }
516
517 ret = tcpd->disable(dev);
518
519 clk_disable(tc6393xb->clk);
520
521 iounmap(tc6393xb->scr);
522
523 release_resource(&tc6393xb->rscr);
524
525 platform_set_drvdata(dev, NULL);
526
527 clk_put(tc6393xb->clk);
528
529 kfree(tc6393xb);
530
531 return ret;
532}
533
534#ifdef CONFIG_PM
535static int tc6393xb_suspend(struct platform_device *dev, pm_message_t state)
536{
537 struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
538 struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
539 int i;
540
541
542 tc6393xb->suspend_state.ccr = ioread16(tc6393xb->scr + SCR_CCR);
543 tc6393xb->suspend_state.fer = ioread8(tc6393xb->scr + SCR_FER);
544
545 for (i = 0; i < 3; i++) {
546 tc6393xb->suspend_state.gpo_dsr[i] =
547 ioread8(tc6393xb->scr + SCR_GPO_DSR(i));
548 tc6393xb->suspend_state.gpo_doecr[i] =
549 ioread8(tc6393xb->scr + SCR_GPO_DOECR(i));
550 tc6393xb->suspend_state.gpi_bcr[i] =
551 ioread8(tc6393xb->scr + SCR_GPI_BCR(i));
552 }
553
554 return tcpd->suspend(dev);
555}
556
557static int tc6393xb_resume(struct platform_device *dev)
558{
559 struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
560 int ret = tcpd->resume(dev);
561
562 if (ret)
563 return ret;
564
565 return tc6393xb_hw_init(dev);
566}
567#else
568#define tc6393xb_suspend NULL
569#define tc6393xb_resume NULL
570#endif
571
572static struct platform_driver tc6393xb_driver = {
573 .probe = tc6393xb_probe,
574 .remove = __devexit_p(tc6393xb_remove),
575 .suspend = tc6393xb_suspend,
576 .resume = tc6393xb_resume,
577
578 .driver = {
579 .name = "tc6393xb",
580 .owner = THIS_MODULE,
581 },
582};
583
584static int __init tc6393xb_init(void)
585{
586 return platform_driver_register(&tc6393xb_driver);
587}
588
589static void __exit tc6393xb_exit(void)
590{
591 platform_driver_unregister(&tc6393xb_driver);
592}
593
594subsys_initcall(tc6393xb_init);
595module_exit(tc6393xb_exit);
596
597MODULE_LICENSE("GPL");
598MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov and Dirk Opfer");
599MODULE_DESCRIPTION("tc6393xb Toshiba Mobile IO Controller");
600MODULE_ALIAS("platform:tc6393xb");
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 08256ed0d9a6..579b01ff82d4 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -229,10 +229,11 @@ xpc_hb_checker(void *ignore)
229 int last_IRQ_count = 0; 229 int last_IRQ_count = 0;
230 int new_IRQ_count; 230 int new_IRQ_count;
231 int force_IRQ = 0; 231 int force_IRQ = 0;
232 cpumask_of_cpu_ptr(cpumask, XPC_HB_CHECK_CPU);
232 233
233 /* this thread was marked active by xpc_hb_init() */ 234 /* this thread was marked active by xpc_hb_init() */
234 235
235 set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU)); 236 set_cpus_allowed_ptr(current, cpumask);
236 237
237 /* set our heartbeating to other partitions into motion */ 238 /* set our heartbeating to other partitions into motion */
238 xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); 239 xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index d6b9b486417c..a067fe436301 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -21,13 +21,17 @@
21#define RESULT_UNSUP_HOST 2 21#define RESULT_UNSUP_HOST 2
22#define RESULT_UNSUP_CARD 3 22#define RESULT_UNSUP_CARD 3
23 23
24#define BUFFER_SIZE (PAGE_SIZE * 4) 24#define BUFFER_ORDER 2
25#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
25 26
26struct mmc_test_card { 27struct mmc_test_card {
27 struct mmc_card *card; 28 struct mmc_card *card;
28 29
29 u8 scratch[BUFFER_SIZE]; 30 u8 scratch[BUFFER_SIZE];
30 u8 *buffer; 31 u8 *buffer;
32#ifdef CONFIG_HIGHMEM
33 struct page *highmem;
34#endif
31}; 35};
32 36
33/*******************************************************************/ 37/*******************************************************************/
@@ -384,14 +388,16 @@ static int mmc_test_transfer(struct mmc_test_card *test,
384 int ret, i; 388 int ret, i;
385 unsigned long flags; 389 unsigned long flags;
386 390
391 BUG_ON(blocks * blksz > BUFFER_SIZE);
392
387 if (write) { 393 if (write) {
388 for (i = 0;i < blocks * blksz;i++) 394 for (i = 0;i < blocks * blksz;i++)
389 test->scratch[i] = i; 395 test->scratch[i] = i;
390 } else { 396 } else {
391 memset(test->scratch, 0, BUFFER_SIZE); 397 memset(test->scratch, 0, blocks * blksz);
392 } 398 }
393 local_irq_save(flags); 399 local_irq_save(flags);
394 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); 400 sg_copy_from_buffer(sg, sg_len, test->scratch, blocks * blksz);
395 local_irq_restore(flags); 401 local_irq_restore(flags);
396 402
397 ret = mmc_test_set_blksize(test, blksz); 403 ret = mmc_test_set_blksize(test, blksz);
@@ -438,7 +444,7 @@ static int mmc_test_transfer(struct mmc_test_card *test,
438 } 444 }
439 } else { 445 } else {
440 local_irq_save(flags); 446 local_irq_save(flags);
441 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); 447 sg_copy_to_buffer(sg, sg_len, test->scratch, blocks * blksz);
442 local_irq_restore(flags); 448 local_irq_restore(flags);
443 for (i = 0;i < blocks * blksz;i++) { 449 for (i = 0;i < blocks * blksz;i++) {
444 if (test->scratch[i] != (u8)i) 450 if (test->scratch[i] != (u8)i)
@@ -799,6 +805,157 @@ static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
799 return 0; 805 return 0;
800} 806}
801 807
808static int mmc_test_bigsg_write(struct mmc_test_card *test)
809{
810 int ret;
811 unsigned int size;
812 struct scatterlist sg;
813
814 if (test->card->host->max_blk_count == 1)
815 return RESULT_UNSUP_HOST;
816
817 size = PAGE_SIZE * 2;
818 size = min(size, test->card->host->max_req_size);
819 size = min(size, test->card->host->max_seg_size);
820 size = min(size, test->card->host->max_blk_count * 512);
821
822 memset(test->buffer, 0, BUFFER_SIZE);
823
824 if (size < 1024)
825 return RESULT_UNSUP_HOST;
826
827 sg_init_table(&sg, 1);
828 sg_init_one(&sg, test->buffer, BUFFER_SIZE);
829
830 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
831 if (ret)
832 return ret;
833
834 return 0;
835}
836
837static int mmc_test_bigsg_read(struct mmc_test_card *test)
838{
839 int ret, i;
840 unsigned int size;
841 struct scatterlist sg;
842
843 if (test->card->host->max_blk_count == 1)
844 return RESULT_UNSUP_HOST;
845
846 size = PAGE_SIZE * 2;
847 size = min(size, test->card->host->max_req_size);
848 size = min(size, test->card->host->max_seg_size);
849 size = min(size, test->card->host->max_blk_count * 512);
850
851 if (size < 1024)
852 return RESULT_UNSUP_HOST;
853
854 memset(test->buffer, 0xCD, BUFFER_SIZE);
855
856 sg_init_table(&sg, 1);
857 sg_init_one(&sg, test->buffer, BUFFER_SIZE);
858 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
859 if (ret)
860 return ret;
861
862 /* mmc_test_transfer() doesn't check for read overflows */
863 for (i = size;i < BUFFER_SIZE;i++) {
864 if (test->buffer[i] != 0xCD)
865 return RESULT_FAIL;
866 }
867
868 return 0;
869}
870
871#ifdef CONFIG_HIGHMEM
872
873static int mmc_test_write_high(struct mmc_test_card *test)
874{
875 int ret;
876 struct scatterlist sg;
877
878 sg_init_table(&sg, 1);
879 sg_set_page(&sg, test->highmem, 512, 0);
880
881 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
882 if (ret)
883 return ret;
884
885 return 0;
886}
887
888static int mmc_test_read_high(struct mmc_test_card *test)
889{
890 int ret;
891 struct scatterlist sg;
892
893 sg_init_table(&sg, 1);
894 sg_set_page(&sg, test->highmem, 512, 0);
895
896 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
897 if (ret)
898 return ret;
899
900 return 0;
901}
902
903static int mmc_test_multi_write_high(struct mmc_test_card *test)
904{
905 int ret;
906 unsigned int size;
907 struct scatterlist sg;
908
909 if (test->card->host->max_blk_count == 1)
910 return RESULT_UNSUP_HOST;
911
912 size = PAGE_SIZE * 2;
913 size = min(size, test->card->host->max_req_size);
914 size = min(size, test->card->host->max_seg_size);
915 size = min(size, test->card->host->max_blk_count * 512);
916
917 if (size < 1024)
918 return RESULT_UNSUP_HOST;
919
920 sg_init_table(&sg, 1);
921 sg_set_page(&sg, test->highmem, size, 0);
922
923 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
924 if (ret)
925 return ret;
926
927 return 0;
928}
929
930static int mmc_test_multi_read_high(struct mmc_test_card *test)
931{
932 int ret;
933 unsigned int size;
934 struct scatterlist sg;
935
936 if (test->card->host->max_blk_count == 1)
937 return RESULT_UNSUP_HOST;
938
939 size = PAGE_SIZE * 2;
940 size = min(size, test->card->host->max_req_size);
941 size = min(size, test->card->host->max_seg_size);
942 size = min(size, test->card->host->max_blk_count * 512);
943
944 if (size < 1024)
945 return RESULT_UNSUP_HOST;
946
947 sg_init_table(&sg, 1);
948 sg_set_page(&sg, test->highmem, size, 0);
949
950 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
951 if (ret)
952 return ret;
953
954 return 0;
955}
956
957#endif /* CONFIG_HIGHMEM */
958
802static const struct mmc_test_case mmc_test_cases[] = { 959static const struct mmc_test_case mmc_test_cases[] = {
803 { 960 {
804 .name = "Basic write (no data verification)", 961 .name = "Basic write (no data verification)",
@@ -913,6 +1070,53 @@ static const struct mmc_test_case mmc_test_cases[] = {
913 .name = "Correct xfer_size at read (midway failure)", 1070 .name = "Correct xfer_size at read (midway failure)",
914 .run = mmc_test_multi_xfersize_read, 1071 .run = mmc_test_multi_xfersize_read,
915 }, 1072 },
1073
1074 {
1075 .name = "Over-sized SG list write",
1076 .prepare = mmc_test_prepare_write,
1077 .run = mmc_test_bigsg_write,
1078 .cleanup = mmc_test_cleanup,
1079 },
1080
1081 {
1082 .name = "Over-sized SG list read",
1083 .prepare = mmc_test_prepare_read,
1084 .run = mmc_test_bigsg_read,
1085 .cleanup = mmc_test_cleanup,
1086 },
1087
1088#ifdef CONFIG_HIGHMEM
1089
1090 {
1091 .name = "Highmem write",
1092 .prepare = mmc_test_prepare_write,
1093 .run = mmc_test_write_high,
1094 .cleanup = mmc_test_cleanup,
1095 },
1096
1097 {
1098 .name = "Highmem read",
1099 .prepare = mmc_test_prepare_read,
1100 .run = mmc_test_read_high,
1101 .cleanup = mmc_test_cleanup,
1102 },
1103
1104 {
1105 .name = "Multi-block highmem write",
1106 .prepare = mmc_test_prepare_write,
1107 .run = mmc_test_multi_write_high,
1108 .cleanup = mmc_test_cleanup,
1109 },
1110
1111 {
1112 .name = "Multi-block highmem read",
1113 .prepare = mmc_test_prepare_read,
1114 .run = mmc_test_multi_read_high,
1115 .cleanup = mmc_test_cleanup,
1116 },
1117
1118#endif /* CONFIG_HIGHMEM */
1119
916}; 1120};
917 1121
918static struct mutex mmc_test_lock; 1122static struct mutex mmc_test_lock;
@@ -1014,12 +1218,23 @@ static ssize_t mmc_test_store(struct device *dev,
1014 test->card = card; 1218 test->card = card;
1015 1219
1016 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL); 1220 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
1221#ifdef CONFIG_HIGHMEM
1222 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
1223#endif
1224
1225#ifdef CONFIG_HIGHMEM
1226 if (test->buffer && test->highmem) {
1227#else
1017 if (test->buffer) { 1228 if (test->buffer) {
1229#endif
1018 mutex_lock(&mmc_test_lock); 1230 mutex_lock(&mmc_test_lock);
1019 mmc_test_run(test, testcase); 1231 mmc_test_run(test, testcase);
1020 mutex_unlock(&mmc_test_lock); 1232 mutex_unlock(&mmc_test_lock);
1021 } 1233 }
1022 1234
1235#ifdef CONFIG_HIGHMEM
1236 __free_pages(test->highmem, BUFFER_ORDER);
1237#endif
1023 kfree(test->buffer); 1238 kfree(test->buffer);
1024 kfree(test); 1239 kfree(test);
1025 1240
@@ -1041,6 +1256,8 @@ static int mmc_test_probe(struct mmc_card *card)
1041 if (ret) 1256 if (ret)
1042 return ret; 1257 return ret;
1043 1258
1259 dev_info(&card->dev, "Card claimed for testing.\n");
1260
1044 return 0; 1261 return 0;
1045} 1262}
1046 1263
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 7731ddefdc1b..3dee97e7d165 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -148,7 +148,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
148 printk(KERN_WARNING "%s: unable to allocate " 148 printk(KERN_WARNING "%s: unable to allocate "
149 "bounce buffer\n", mmc_card_name(card)); 149 "bounce buffer\n", mmc_card_name(card));
150 } else { 150 } else {
151 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH); 151 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
152 blk_queue_max_sectors(mq->queue, bouncesz / 512); 152 blk_queue_max_sectors(mq->queue, bouncesz / 512);
153 blk_queue_max_phys_segments(mq->queue, bouncesz / 512); 153 blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
154 blk_queue_max_hw_segments(mq->queue, bouncesz / 512); 154 blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
@@ -290,55 +290,15 @@ void mmc_queue_resume(struct mmc_queue *mq)
290 } 290 }
291} 291}
292 292
293static void copy_sg(struct scatterlist *dst, unsigned int dst_len, 293/*
294 struct scatterlist *src, unsigned int src_len) 294 * Prepare the sg list(s) to be handed of to the host driver
295{ 295 */
296 unsigned int chunk;
297 char *dst_buf, *src_buf;
298 unsigned int dst_size, src_size;
299
300 dst_buf = NULL;
301 src_buf = NULL;
302 dst_size = 0;
303 src_size = 0;
304
305 while (src_len) {
306 BUG_ON(dst_len == 0);
307
308 if (dst_size == 0) {
309 dst_buf = sg_virt(dst);
310 dst_size = dst->length;
311 }
312
313 if (src_size == 0) {
314 src_buf = sg_virt(src);
315 src_size = src->length;
316 }
317
318 chunk = min(dst_size, src_size);
319
320 memcpy(dst_buf, src_buf, chunk);
321
322 dst_buf += chunk;
323 src_buf += chunk;
324 dst_size -= chunk;
325 src_size -= chunk;
326
327 if (dst_size == 0) {
328 dst++;
329 dst_len--;
330 }
331
332 if (src_size == 0) {
333 src++;
334 src_len--;
335 }
336 }
337}
338
339unsigned int mmc_queue_map_sg(struct mmc_queue *mq) 296unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
340{ 297{
341 unsigned int sg_len; 298 unsigned int sg_len;
299 size_t buflen;
300 struct scatterlist *sg;
301 int i;
342 302
343 if (!mq->bounce_buf) 303 if (!mq->bounce_buf)
344 return blk_rq_map_sg(mq->queue, mq->req, mq->sg); 304 return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
@@ -349,47 +309,52 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
349 309
350 mq->bounce_sg_len = sg_len; 310 mq->bounce_sg_len = sg_len;
351 311
352 /* 312 buflen = 0;
353 * Shortcut in the event we only get a single entry. 313 for_each_sg(mq->bounce_sg, sg, sg_len, i)
354 */ 314 buflen += sg->length;
355 if (sg_len == 1) {
356 memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist));
357 return 1;
358 }
359 315
360 sg_init_one(mq->sg, mq->bounce_buf, 0); 316 sg_init_one(mq->sg, mq->bounce_buf, buflen);
361
362 while (sg_len) {
363 mq->sg[0].length += mq->bounce_sg[sg_len - 1].length;
364 sg_len--;
365 }
366 317
367 return 1; 318 return 1;
368} 319}
369 320
321/*
322 * If writing, bounce the data to the buffer before the request
323 * is sent to the host driver
324 */
370void mmc_queue_bounce_pre(struct mmc_queue *mq) 325void mmc_queue_bounce_pre(struct mmc_queue *mq)
371{ 326{
327 unsigned long flags;
328
372 if (!mq->bounce_buf) 329 if (!mq->bounce_buf)
373 return; 330 return;
374 331
375 if (mq->bounce_sg_len == 1)
376 return;
377 if (rq_data_dir(mq->req) != WRITE) 332 if (rq_data_dir(mq->req) != WRITE)
378 return; 333 return;
379 334
380 copy_sg(mq->sg, 1, mq->bounce_sg, mq->bounce_sg_len); 335 local_irq_save(flags);
336 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
337 mq->bounce_buf, mq->sg[0].length);
338 local_irq_restore(flags);
381} 339}
382 340
341/*
342 * If reading, bounce the data from the buffer after the request
343 * has been handled by the host driver
344 */
383void mmc_queue_bounce_post(struct mmc_queue *mq) 345void mmc_queue_bounce_post(struct mmc_queue *mq)
384{ 346{
347 unsigned long flags;
348
385 if (!mq->bounce_buf) 349 if (!mq->bounce_buf)
386 return; 350 return;
387 351
388 if (mq->bounce_sg_len == 1)
389 return;
390 if (rq_data_dir(mq->req) != READ) 352 if (rq_data_dir(mq->req) != READ)
391 return; 353 return;
392 354
393 copy_sg(mq->bounce_sg, mq->bounce_sg_len, mq->sg, 1); 355 local_irq_save(flags);
356 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
357 mq->bounce_buf, mq->sg[0].length);
358 local_irq_restore(flags);
394} 359}
395 360
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index 3f15eb204895..99b20917cc0f 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -1043,7 +1043,7 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
1043 goto out6; 1043 goto out6;
1044 } 1044 }
1045 1045
1046 platform_set_drvdata(pdev, mmc); 1046 platform_set_drvdata(pdev, host);
1047 1047
1048 printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X" 1048 printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X"
1049 " (mode=%s)\n", pdev->id, host->iobase, 1049 " (mode=%s)\n", pdev->id, host->iobase,
@@ -1087,13 +1087,10 @@ out0:
1087 1087
1088static int __devexit au1xmmc_remove(struct platform_device *pdev) 1088static int __devexit au1xmmc_remove(struct platform_device *pdev)
1089{ 1089{
1090 struct mmc_host *mmc = platform_get_drvdata(pdev); 1090 struct au1xmmc_host *host = platform_get_drvdata(pdev);
1091 struct au1xmmc_host *host;
1092
1093 if (mmc) {
1094 host = mmc_priv(mmc);
1095 1091
1096 mmc_remove_host(mmc); 1092 if (host) {
1093 mmc_remove_host(host->mmc);
1097 1094
1098#ifdef CONFIG_LEDS_CLASS 1095#ifdef CONFIG_LEDS_CLASS
1099 if (host->platdata && host->platdata->led) 1096 if (host->platdata && host->platdata->led)
@@ -1101,8 +1098,8 @@ static int __devexit au1xmmc_remove(struct platform_device *pdev)
1101#endif 1098#endif
1102 1099
1103 if (host->platdata && host->platdata->cd_setup && 1100 if (host->platdata && host->platdata->cd_setup &&
1104 !(mmc->caps & MMC_CAP_NEEDS_POLL)) 1101 !(host->mmc->caps & MMC_CAP_NEEDS_POLL))
1105 host->platdata->cd_setup(mmc, 0); 1102 host->platdata->cd_setup(host->mmc, 0);
1106 1103
1107 au_writel(0, HOST_ENABLE(host)); 1104 au_writel(0, HOST_ENABLE(host));
1108 au_writel(0, HOST_CONFIG(host)); 1105 au_writel(0, HOST_CONFIG(host));
@@ -1122,16 +1119,49 @@ static int __devexit au1xmmc_remove(struct platform_device *pdev)
1122 release_resource(host->ioarea); 1119 release_resource(host->ioarea);
1123 kfree(host->ioarea); 1120 kfree(host->ioarea);
1124 1121
1125 mmc_free_host(mmc); 1122 mmc_free_host(host->mmc);
1123 platform_set_drvdata(pdev, NULL);
1126 } 1124 }
1127 return 0; 1125 return 0;
1128} 1126}
1129 1127
1128#ifdef CONFIG_PM
1129static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
1130{
1131 struct au1xmmc_host *host = platform_get_drvdata(pdev);
1132 int ret;
1133
1134 ret = mmc_suspend_host(host->mmc, state);
1135 if (ret)
1136 return ret;
1137
1138 au_writel(0, HOST_CONFIG2(host));
1139 au_writel(0, HOST_CONFIG(host));
1140 au_writel(0xffffffff, HOST_STATUS(host));
1141 au_writel(0, HOST_ENABLE(host));
1142 au_sync();
1143
1144 return 0;
1145}
1146
1147static int au1xmmc_resume(struct platform_device *pdev)
1148{
1149 struct au1xmmc_host *host = platform_get_drvdata(pdev);
1150
1151 au1xmmc_reset_controller(host);
1152
1153 return mmc_resume_host(host->mmc);
1154}
1155#else
1156#define au1xmmc_suspend NULL
1157#define au1xmmc_resume NULL
1158#endif
1159
1130static struct platform_driver au1xmmc_driver = { 1160static struct platform_driver au1xmmc_driver = {
1131 .probe = au1xmmc_probe, 1161 .probe = au1xmmc_probe,
1132 .remove = au1xmmc_remove, 1162 .remove = au1xmmc_remove,
1133 .suspend = NULL, 1163 .suspend = au1xmmc_suspend,
1134 .resume = NULL, 1164 .resume = au1xmmc_resume,
1135 .driver = { 1165 .driver = {
1136 .name = DRIVER_NAME, 1166 .name = DRIVER_NAME,
1137 .owner = THIS_MODULE, 1167 .owner = THIS_MODULE,
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index d39f59738866..a8e18fe53077 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -177,7 +177,7 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
177 if (dalgn) 177 if (dalgn)
178 DALGN |= (1 << host->dma); 178 DALGN |= (1 << host->dma);
179 else 179 else
180 DALGN &= (1 << host->dma); 180 DALGN &= ~(1 << host->dma);
181 DDADR(host->dma) = host->sg_dma; 181 DDADR(host->dma) = host->sg_dma;
182 DCSR(host->dma) = DCSR_RUN; 182 DCSR(host->dma) = DCSR_RUN;
183} 183}
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 6a1e4994b724..be550c26da68 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1331,21 +1331,30 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1331 return ret; 1331 return ret;
1332} 1332}
1333 1333
1334static void s3cmci_shutdown(struct platform_device *pdev)
1335{
1336 struct mmc_host *mmc = platform_get_drvdata(pdev);
1337 struct s3cmci_host *host = mmc_priv(mmc);
1338
1339 if (host->irq_cd >= 0)
1340 free_irq(host->irq_cd, host);
1341
1342 mmc_remove_host(mmc);
1343 clk_disable(host->clk);
1344}
1345
1334static int __devexit s3cmci_remove(struct platform_device *pdev) 1346static int __devexit s3cmci_remove(struct platform_device *pdev)
1335{ 1347{
1336 struct mmc_host *mmc = platform_get_drvdata(pdev); 1348 struct mmc_host *mmc = platform_get_drvdata(pdev);
1337 struct s3cmci_host *host = mmc_priv(mmc); 1349 struct s3cmci_host *host = mmc_priv(mmc);
1338 1350
1339 mmc_remove_host(mmc); 1351 s3cmci_shutdown(pdev);
1340 1352
1341 clk_disable(host->clk);
1342 clk_put(host->clk); 1353 clk_put(host->clk);
1343 1354
1344 tasklet_disable(&host->pio_tasklet); 1355 tasklet_disable(&host->pio_tasklet);
1345 s3c2410_dma_free(S3CMCI_DMA, &s3cmci_dma_client); 1356 s3c2410_dma_free(S3CMCI_DMA, &s3cmci_dma_client);
1346 1357
1347 if (host->irq_cd >= 0)
1348 free_irq(host->irq_cd, host);
1349 free_irq(host->irq, host); 1358 free_irq(host->irq, host);
1350 1359
1351 iounmap(host->base); 1360 iounmap(host->base);
@@ -1355,17 +1364,17 @@ static int __devexit s3cmci_remove(struct platform_device *pdev)
1355 return 0; 1364 return 0;
1356} 1365}
1357 1366
1358static int __devinit s3cmci_probe_2410(struct platform_device *dev) 1367static int __devinit s3cmci_2410_probe(struct platform_device *dev)
1359{ 1368{
1360 return s3cmci_probe(dev, 0); 1369 return s3cmci_probe(dev, 0);
1361} 1370}
1362 1371
1363static int __devinit s3cmci_probe_2412(struct platform_device *dev) 1372static int __devinit s3cmci_2412_probe(struct platform_device *dev)
1364{ 1373{
1365 return s3cmci_probe(dev, 1); 1374 return s3cmci_probe(dev, 1);
1366} 1375}
1367 1376
1368static int __devinit s3cmci_probe_2440(struct platform_device *dev) 1377static int __devinit s3cmci_2440_probe(struct platform_device *dev)
1369{ 1378{
1370 return s3cmci_probe(dev, 1); 1379 return s3cmci_probe(dev, 1);
1371} 1380}
@@ -1392,29 +1401,32 @@ static int s3cmci_resume(struct platform_device *dev)
1392#endif /* CONFIG_PM */ 1401#endif /* CONFIG_PM */
1393 1402
1394 1403
1395static struct platform_driver s3cmci_driver_2410 = { 1404static struct platform_driver s3cmci_2410_driver = {
1396 .driver.name = "s3c2410-sdi", 1405 .driver.name = "s3c2410-sdi",
1397 .driver.owner = THIS_MODULE, 1406 .driver.owner = THIS_MODULE,
1398 .probe = s3cmci_probe_2410, 1407 .probe = s3cmci_2410_probe,
1399 .remove = __devexit_p(s3cmci_remove), 1408 .remove = __devexit_p(s3cmci_remove),
1409 .shutdown = s3cmci_shutdown,
1400 .suspend = s3cmci_suspend, 1410 .suspend = s3cmci_suspend,
1401 .resume = s3cmci_resume, 1411 .resume = s3cmci_resume,
1402}; 1412};
1403 1413
1404static struct platform_driver s3cmci_driver_2412 = { 1414static struct platform_driver s3cmci_2412_driver = {
1405 .driver.name = "s3c2412-sdi", 1415 .driver.name = "s3c2412-sdi",
1406 .driver.owner = THIS_MODULE, 1416 .driver.owner = THIS_MODULE,
1407 .probe = s3cmci_probe_2412, 1417 .probe = s3cmci_2412_probe,
1408 .remove = __devexit_p(s3cmci_remove), 1418 .remove = __devexit_p(s3cmci_remove),
1419 .shutdown = s3cmci_shutdown,
1409 .suspend = s3cmci_suspend, 1420 .suspend = s3cmci_suspend,
1410 .resume = s3cmci_resume, 1421 .resume = s3cmci_resume,
1411}; 1422};
1412 1423
1413static struct platform_driver s3cmci_driver_2440 = { 1424static struct platform_driver s3cmci_2440_driver = {
1414 .driver.name = "s3c2440-sdi", 1425 .driver.name = "s3c2440-sdi",
1415 .driver.owner = THIS_MODULE, 1426 .driver.owner = THIS_MODULE,
1416 .probe = s3cmci_probe_2440, 1427 .probe = s3cmci_2440_probe,
1417 .remove = __devexit_p(s3cmci_remove), 1428 .remove = __devexit_p(s3cmci_remove),
1429 .shutdown = s3cmci_shutdown,
1418 .suspend = s3cmci_suspend, 1430 .suspend = s3cmci_suspend,
1419 .resume = s3cmci_resume, 1431 .resume = s3cmci_resume,
1420}; 1432};
@@ -1422,17 +1434,17 @@ static struct platform_driver s3cmci_driver_2440 = {
1422 1434
1423static int __init s3cmci_init(void) 1435static int __init s3cmci_init(void)
1424{ 1436{
1425 platform_driver_register(&s3cmci_driver_2410); 1437 platform_driver_register(&s3cmci_2410_driver);
1426 platform_driver_register(&s3cmci_driver_2412); 1438 platform_driver_register(&s3cmci_2412_driver);
1427 platform_driver_register(&s3cmci_driver_2440); 1439 platform_driver_register(&s3cmci_2440_driver);
1428 return 0; 1440 return 0;
1429} 1441}
1430 1442
1431static void __exit s3cmci_exit(void) 1443static void __exit s3cmci_exit(void)
1432{ 1444{
1433 platform_driver_unregister(&s3cmci_driver_2410); 1445 platform_driver_unregister(&s3cmci_2410_driver);
1434 platform_driver_unregister(&s3cmci_driver_2412); 1446 platform_driver_unregister(&s3cmci_2412_driver);
1435 platform_driver_unregister(&s3cmci_driver_2440); 1447 platform_driver_unregister(&s3cmci_2440_driver);
1436} 1448}
1437 1449
1438module_init(s3cmci_init); 1450module_init(s3cmci_init);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 17701c3da733..c3a5db72ddd7 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -173,119 +173,95 @@ static void sdhci_led_control(struct led_classdev *led,
173 * * 173 * *
174\*****************************************************************************/ 174\*****************************************************************************/
175 175
176static inline char* sdhci_sg_to_buffer(struct sdhci_host* host)
177{
178 return sg_virt(host->cur_sg);
179}
180
181static inline int sdhci_next_sg(struct sdhci_host* host)
182{
183 /*
184 * Skip to next SG entry.
185 */
186 host->cur_sg++;
187 host->num_sg--;
188
189 /*
190 * Any entries left?
191 */
192 if (host->num_sg > 0) {
193 host->offset = 0;
194 host->remain = host->cur_sg->length;
195 }
196
197 return host->num_sg;
198}
199
200static void sdhci_read_block_pio(struct sdhci_host *host) 176static void sdhci_read_block_pio(struct sdhci_host *host)
201{ 177{
202 int blksize, chunk_remain; 178 unsigned long flags;
203 u32 data; 179 size_t blksize, len, chunk;
204 char *buffer; 180 u32 scratch;
205 int size; 181 u8 *buf;
206 182
207 DBG("PIO reading\n"); 183 DBG("PIO reading\n");
208 184
209 blksize = host->data->blksz; 185 blksize = host->data->blksz;
210 chunk_remain = 0; 186 chunk = 0;
211 data = 0;
212 187
213 buffer = sdhci_sg_to_buffer(host) + host->offset; 188 local_irq_save(flags);
214 189
215 while (blksize) { 190 while (blksize) {
216 if (chunk_remain == 0) { 191 if (!sg_miter_next(&host->sg_miter))
217 data = readl(host->ioaddr + SDHCI_BUFFER); 192 BUG();
218 chunk_remain = min(blksize, 4);
219 }
220 193
221 size = min(host->remain, chunk_remain); 194 len = min(host->sg_miter.length, blksize);
222 195
223 chunk_remain -= size; 196 blksize -= len;
224 blksize -= size; 197 host->sg_miter.consumed = len;
225 host->offset += size;
226 host->remain -= size;
227 198
228 while (size) { 199 buf = host->sg_miter.addr;
229 *buffer = data & 0xFF;
230 buffer++;
231 data >>= 8;
232 size--;
233 }
234 200
235 if (host->remain == 0) { 201 while (len) {
236 if (sdhci_next_sg(host) == 0) { 202 if (chunk == 0) {
237 BUG_ON(blksize != 0); 203 scratch = readl(host->ioaddr + SDHCI_BUFFER);
238 return; 204 chunk = 4;
239 } 205 }
240 buffer = sdhci_sg_to_buffer(host); 206
207 *buf = scratch & 0xFF;
208
209 buf++;
210 scratch >>= 8;
211 chunk--;
212 len--;
241 } 213 }
242 } 214 }
215
216 sg_miter_stop(&host->sg_miter);
217
218 local_irq_restore(flags);
243} 219}
244 220
245static void sdhci_write_block_pio(struct sdhci_host *host) 221static void sdhci_write_block_pio(struct sdhci_host *host)
246{ 222{
247 int blksize, chunk_remain; 223 unsigned long flags;
248 u32 data; 224 size_t blksize, len, chunk;
249 char *buffer; 225 u32 scratch;
250 int bytes, size; 226 u8 *buf;
251 227
252 DBG("PIO writing\n"); 228 DBG("PIO writing\n");
253 229
254 blksize = host->data->blksz; 230 blksize = host->data->blksz;
255 chunk_remain = 4; 231 chunk = 0;
256 data = 0; 232 scratch = 0;
257 233
258 bytes = 0; 234 local_irq_save(flags);
259 buffer = sdhci_sg_to_buffer(host) + host->offset;
260 235
261 while (blksize) { 236 while (blksize) {
262 size = min(host->remain, chunk_remain); 237 if (!sg_miter_next(&host->sg_miter))
263 238 BUG();
264 chunk_remain -= size;
265 blksize -= size;
266 host->offset += size;
267 host->remain -= size;
268
269 while (size) {
270 data >>= 8;
271 data |= (u32)*buffer << 24;
272 buffer++;
273 size--;
274 }
275 239
276 if (chunk_remain == 0) { 240 len = min(host->sg_miter.length, blksize);
277 writel(data, host->ioaddr + SDHCI_BUFFER); 241
278 chunk_remain = min(blksize, 4); 242 blksize -= len;
279 } 243 host->sg_miter.consumed = len;
244
245 buf = host->sg_miter.addr;
280 246
281 if (host->remain == 0) { 247 while (len) {
282 if (sdhci_next_sg(host) == 0) { 248 scratch |= (u32)*buf << (chunk * 8);
283 BUG_ON(blksize != 0); 249
284 return; 250 buf++;
251 chunk++;
252 len--;
253
254 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
255 writel(scratch, host->ioaddr + SDHCI_BUFFER);
256 chunk = 0;
257 scratch = 0;
285 } 258 }
286 buffer = sdhci_sg_to_buffer(host);
287 } 259 }
288 } 260 }
261
262 sg_miter_stop(&host->sg_miter);
263
264 local_irq_restore(flags);
289} 265}
290 266
291static void sdhci_transfer_pio(struct sdhci_host *host) 267static void sdhci_transfer_pio(struct sdhci_host *host)
@@ -294,7 +270,7 @@ static void sdhci_transfer_pio(struct sdhci_host *host)
294 270
295 BUG_ON(!host->data); 271 BUG_ON(!host->data);
296 272
297 if (host->num_sg == 0) 273 if (host->blocks == 0)
298 return; 274 return;
299 275
300 if (host->data->flags & MMC_DATA_READ) 276 if (host->data->flags & MMC_DATA_READ)
@@ -308,7 +284,8 @@ static void sdhci_transfer_pio(struct sdhci_host *host)
308 else 284 else
309 sdhci_write_block_pio(host); 285 sdhci_write_block_pio(host);
310 286
311 if (host->num_sg == 0) 287 host->blocks--;
288 if (host->blocks == 0)
312 break; 289 break;
313 } 290 }
314 291
@@ -389,6 +366,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
389 if (offset) { 366 if (offset) {
390 if (data->flags & MMC_DATA_WRITE) { 367 if (data->flags & MMC_DATA_WRITE) {
391 buffer = sdhci_kmap_atomic(sg, &flags); 368 buffer = sdhci_kmap_atomic(sg, &flags);
369 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
392 memcpy(align, buffer, offset); 370 memcpy(align, buffer, offset);
393 sdhci_kunmap_atomic(buffer, &flags); 371 sdhci_kunmap_atomic(buffer, &flags);
394 } 372 }
@@ -510,6 +488,7 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
510 size = 4 - (sg_dma_address(sg) & 0x3); 488 size = 4 - (sg_dma_address(sg) & 0x3);
511 489
512 buffer = sdhci_kmap_atomic(sg, &flags); 490 buffer = sdhci_kmap_atomic(sg, &flags);
491 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
513 memcpy(buffer, align, size); 492 memcpy(buffer, align, size);
514 sdhci_kunmap_atomic(buffer, &flags); 493 sdhci_kunmap_atomic(buffer, &flags);
515 494
@@ -687,7 +666,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
687 WARN_ON(1); 666 WARN_ON(1);
688 host->flags &= ~SDHCI_USE_DMA; 667 host->flags &= ~SDHCI_USE_DMA;
689 } else { 668 } else {
690 WARN_ON(count != 1); 669 WARN_ON(sg_cnt != 1);
691 writel(sg_dma_address(data->sg), 670 writel(sg_dma_address(data->sg),
692 host->ioaddr + SDHCI_DMA_ADDRESS); 671 host->ioaddr + SDHCI_DMA_ADDRESS);
693 } 672 }
@@ -711,11 +690,9 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
711 } 690 }
712 691
713 if (!(host->flags & SDHCI_REQ_USE_DMA)) { 692 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
714 host->cur_sg = data->sg; 693 sg_miter_start(&host->sg_miter,
715 host->num_sg = data->sg_len; 694 data->sg, data->sg_len, SG_MITER_ATOMIC);
716 695 host->blocks = data->blocks;
717 host->offset = 0;
718 host->remain = host->cur_sg->length;
719 } 696 }
720 697
721 /* We do not handle DMA boundaries, so set it to max (512 KiB) */ 698 /* We do not handle DMA boundaries, so set it to max (512 KiB) */
@@ -1581,9 +1558,15 @@ int sdhci_add_host(struct sdhci_host *host)
1581 } 1558 }
1582 } 1559 }
1583 1560
1584 /* XXX: Hack to get MMC layer to avoid highmem */ 1561 /*
1585 if (!(host->flags & SDHCI_USE_DMA)) 1562 * If we use DMA, then it's up to the caller to set the DMA
1586 mmc_dev(host->mmc)->dma_mask = NULL; 1563 * mask, but PIO does not need the hw shim so we set a new
1564 * mask here in that case.
1565 */
1566 if (!(host->flags & SDHCI_USE_DMA)) {
1567 host->dma_mask = DMA_BIT_MASK(64);
1568 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
1569 }
1587 1570
1588 host->max_clk = 1571 host->max_clk =
1589 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; 1572 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 5bb355281765..a06bf8b89343 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -212,6 +212,7 @@ struct sdhci_host {
212 212
213 /* Internal data */ 213 /* Internal data */
214 struct mmc_host *mmc; /* MMC structure */ 214 struct mmc_host *mmc; /* MMC structure */
215 u64 dma_mask; /* custom DMA mask */
215 216
216#ifdef CONFIG_LEDS_CLASS 217#ifdef CONFIG_LEDS_CLASS
217 struct led_classdev led; /* LED control */ 218 struct led_classdev led; /* LED control */
@@ -238,10 +239,8 @@ struct sdhci_host {
238 struct mmc_data *data; /* Current data request */ 239 struct mmc_data *data; /* Current data request */
239 unsigned int data_early:1; /* Data finished before cmd */ 240 unsigned int data_early:1; /* Data finished before cmd */
240 241
241 struct scatterlist *cur_sg; /* We're working on this */ 242 struct sg_mapping_iter sg_miter; /* SG state for PIO */
242 int num_sg; /* Entries left */ 243 unsigned int blocks; /* remaining PIO blocks */
243 int offset; /* Offset into current sg */
244 int remain; /* Bytes left in current */
245 244
246 int sg_count; /* Mapped sg entries */ 245 int sg_count; /* Mapped sg entries */
247 246
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index cb663ef245d5..fc8529bedfdf 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -20,9 +20,11 @@
20 20
21#include <linux/mtd/nand.h> 21#include <linux/mtd/nand.h>
22#include <linux/mtd/partitions.h> 22#include <linux/mtd/partitions.h>
23#include <linux/gpio.h>
23 24
24#include <asm/io.h> 25#include <asm/io.h>
25#include <asm/irq.h> 26#include <asm/irq.h>
27#include <asm/mach-types.h>
26 28
27#include <asm/arch/hardware.h> 29#include <asm/arch/hardware.h>
28#include <asm/arch/pxa-regs.h> 30#include <asm/arch/pxa-regs.h>
@@ -30,20 +32,6 @@
30#define GPIO_NAND_CS (11) 32#define GPIO_NAND_CS (11)
31#define GPIO_NAND_RB (89) 33#define GPIO_NAND_RB (89)
32 34
33/* This macro needed to ensure in-order operation of GPIO and local
34 * bus. Without both asm command and dummy uncached read there're
35 * states when NAND access is broken. I've looked for such macro(s) in
36 * include/asm-arm but found nothing approptiate.
37 * dmac_clean_range is close, but is makes cache invalidation
38 * unnecessary here and it cannot be used in module
39 */
40#define DRAIN_WB() \
41 do { \
42 unsigned char dummy; \
43 asm volatile ("mcr p15, 0, r0, c7, c10, 4":::"r0"); \
44 dummy=*((unsigned char*)UNCACHED_ADDR); \
45 } while(0)
46
47/* MTD structure for CM-X270 board */ 35/* MTD structure for CM-X270 board */
48static struct mtd_info *cmx270_nand_mtd; 36static struct mtd_info *cmx270_nand_mtd;
49 37
@@ -103,14 +91,14 @@ static int cmx270_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
103 91
104static inline void nand_cs_on(void) 92static inline void nand_cs_on(void)
105{ 93{
106 GPCR(GPIO_NAND_CS) = GPIO_bit(GPIO_NAND_CS); 94 gpio_set_value(GPIO_NAND_CS, 0);
107} 95}
108 96
109static void nand_cs_off(void) 97static void nand_cs_off(void)
110{ 98{
111 DRAIN_WB(); 99 dsb();
112 100
113 GPSR(GPIO_NAND_CS) = GPIO_bit(GPIO_NAND_CS); 101 gpio_set_value(GPIO_NAND_CS, 1);
114} 102}
115 103
116/* 104/*
@@ -122,7 +110,7 @@ static void cmx270_hwcontrol(struct mtd_info *mtd, int dat,
122 struct nand_chip* this = mtd->priv; 110 struct nand_chip* this = mtd->priv;
123 unsigned int nandaddr = (unsigned int)this->IO_ADDR_W; 111 unsigned int nandaddr = (unsigned int)this->IO_ADDR_W;
124 112
125 DRAIN_WB(); 113 dsb();
126 114
127 if (ctrl & NAND_CTRL_CHANGE) { 115 if (ctrl & NAND_CTRL_CHANGE) {
128 if ( ctrl & NAND_ALE ) 116 if ( ctrl & NAND_ALE )
@@ -139,12 +127,12 @@ static void cmx270_hwcontrol(struct mtd_info *mtd, int dat,
139 nand_cs_off(); 127 nand_cs_off();
140 } 128 }
141 129
142 DRAIN_WB(); 130 dsb();
143 this->IO_ADDR_W = (void __iomem*)nandaddr; 131 this->IO_ADDR_W = (void __iomem*)nandaddr;
144 if (dat != NAND_CMD_NONE) 132 if (dat != NAND_CMD_NONE)
145 writel((dat << 16), this->IO_ADDR_W); 133 writel((dat << 16), this->IO_ADDR_W);
146 134
147 DRAIN_WB(); 135 dsb();
148} 136}
149 137
150/* 138/*
@@ -152,9 +140,9 @@ static void cmx270_hwcontrol(struct mtd_info *mtd, int dat,
152 */ 140 */
153static int cmx270_device_ready(struct mtd_info *mtd) 141static int cmx270_device_ready(struct mtd_info *mtd)
154{ 142{
155 DRAIN_WB(); 143 dsb();
156 144
157 return (GPLR(GPIO_NAND_RB) & GPIO_bit(GPIO_NAND_RB)); 145 return (gpio_get_value(GPIO_NAND_RB));
158} 146}
159 147
160/* 148/*
@@ -168,20 +156,40 @@ static int cmx270_init(void)
168 int mtd_parts_nb = 0; 156 int mtd_parts_nb = 0;
169 int ret; 157 int ret;
170 158
159 if (!machine_is_armcore())
160 return -ENODEV;
161
162 ret = gpio_request(GPIO_NAND_CS, "NAND CS");
163 if (ret) {
164 pr_warning("CM-X270: failed to request NAND CS gpio\n");
165 return ret;
166 }
167
168 gpio_direction_output(GPIO_NAND_CS, 1);
169
170 ret = gpio_request(GPIO_NAND_RB, "NAND R/B");
171 if (ret) {
172 pr_warning("CM-X270: failed to request NAND R/B gpio\n");
173 goto err_gpio_request;
174 }
175
176 gpio_direction_input(GPIO_NAND_RB);
177
171 /* Allocate memory for MTD device structure and private data */ 178 /* Allocate memory for MTD device structure and private data */
172 cmx270_nand_mtd = kzalloc(sizeof(struct mtd_info) + 179 cmx270_nand_mtd = kzalloc(sizeof(struct mtd_info) +
173 sizeof(struct nand_chip), 180 sizeof(struct nand_chip),
174 GFP_KERNEL); 181 GFP_KERNEL);
175 if (!cmx270_nand_mtd) { 182 if (!cmx270_nand_mtd) {
176 printk("Unable to allocate CM-X270 NAND MTD device structure.\n"); 183 pr_debug("Unable to allocate CM-X270 NAND MTD device structure.\n");
177 return -ENOMEM; 184 ret = -ENOMEM;
185 goto err_kzalloc;
178 } 186 }
179 187
180 cmx270_nand_io = ioremap(PXA_CS1_PHYS, 12); 188 cmx270_nand_io = ioremap(PXA_CS1_PHYS, 12);
181 if (!cmx270_nand_io) { 189 if (!cmx270_nand_io) {
182 printk("Unable to ioremap NAND device\n"); 190 pr_debug("Unable to ioremap NAND device\n");
183 ret = -EINVAL; 191 ret = -EINVAL;
184 goto err1; 192 goto err_ioremap;
185 } 193 }
186 194
187 /* Get pointer to private data */ 195 /* Get pointer to private data */
@@ -209,9 +217,9 @@ static int cmx270_init(void)
209 217
210 /* Scan to find existence of the device */ 218 /* Scan to find existence of the device */
211 if (nand_scan (cmx270_nand_mtd, 1)) { 219 if (nand_scan (cmx270_nand_mtd, 1)) {
212 printk(KERN_NOTICE "No NAND device\n"); 220 pr_notice("No NAND device\n");
213 ret = -ENXIO; 221 ret = -ENXIO;
214 goto err2; 222 goto err_scan;
215 } 223 }
216 224
217#ifdef CONFIG_MTD_CMDLINE_PARTS 225#ifdef CONFIG_MTD_CMDLINE_PARTS
@@ -229,18 +237,22 @@ static int cmx270_init(void)
229 } 237 }
230 238
231 /* Register the partitions */ 239 /* Register the partitions */
232 printk(KERN_NOTICE "Using %s partition definition\n", part_type); 240 pr_notice("Using %s partition definition\n", part_type);
233 ret = add_mtd_partitions(cmx270_nand_mtd, mtd_parts, mtd_parts_nb); 241 ret = add_mtd_partitions(cmx270_nand_mtd, mtd_parts, mtd_parts_nb);
234 if (ret) 242 if (ret)
235 goto err2; 243 goto err_scan;
236 244
237 /* Return happy */ 245 /* Return happy */
238 return 0; 246 return 0;
239 247
240err2: 248err_scan:
241 iounmap(cmx270_nand_io); 249 iounmap(cmx270_nand_io);
242err1: 250err_ioremap:
243 kfree(cmx270_nand_mtd); 251 kfree(cmx270_nand_mtd);
252err_kzalloc:
253 gpio_free(GPIO_NAND_RB);
254err_gpio_request:
255 gpio_free(GPIO_NAND_CS);
244 256
245 return ret; 257 return ret;
246 258
@@ -255,6 +267,9 @@ static void cmx270_cleanup(void)
255 /* Release resources, unregister device */ 267 /* Release resources, unregister device */
256 nand_release(cmx270_nand_mtd); 268 nand_release(cmx270_nand_mtd);
257 269
270 gpio_free(GPIO_NAND_RB);
271 gpio_free(GPIO_NAND_CS);
272
258 iounmap(cmx270_nand_io); 273 iounmap(cmx270_nand_io);
259 274
260 /* Free the MTD device structure */ 275 /* Free the MTD device structure */
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index f2051b209da2..2040965d7724 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -308,7 +308,7 @@ static void smc_reset(struct net_device *dev)
308 * can't handle it then there will be no recovery except for 308 * can't handle it then there will be no recovery except for
309 * a hard reset or power cycle 309 * a hard reset or power cycle
310 */ 310 */
311 if (nowait) 311 if (lp->cfg.flags & SMC91X_NOWAIT)
312 cfg |= CONFIG_NO_WAIT; 312 cfg |= CONFIG_NO_WAIT;
313 313
314 /* 314 /*
@@ -1939,8 +1939,11 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr,
1939 if (retval) 1939 if (retval)
1940 goto err_out; 1940 goto err_out;
1941 1941
1942#ifdef SMC_USE_PXA_DMA 1942#ifdef CONFIG_ARCH_PXA
1943 { 1943# ifdef SMC_USE_PXA_DMA
1944 lp->cfg.flags |= SMC91X_USE_DMA;
1945# endif
1946 if (lp->cfg.flags & SMC91X_USE_DMA) {
1944 int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW, 1947 int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW,
1945 smc_pxa_dma_irq, NULL); 1948 smc_pxa_dma_irq, NULL);
1946 if (dma >= 0) 1949 if (dma >= 0)
@@ -1980,7 +1983,7 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr,
1980 } 1983 }
1981 1984
1982err_out: 1985err_out:
1983#ifdef SMC_USE_PXA_DMA 1986#ifdef CONFIG_ARCH_PXA
1984 if (retval && dev->dma != (unsigned char)-1) 1987 if (retval && dev->dma != (unsigned char)-1)
1985 pxa_free_dma(dev->dma); 1988 pxa_free_dma(dev->dma);
1986#endif 1989#endif
@@ -2050,9 +2053,11 @@ static int smc_enable_device(struct platform_device *pdev)
2050 return 0; 2053 return 0;
2051} 2054}
2052 2055
2053static int smc_request_attrib(struct platform_device *pdev) 2056static int smc_request_attrib(struct platform_device *pdev,
2057 struct net_device *ndev)
2054{ 2058{
2055 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); 2059 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
2060 struct smc_local *lp = netdev_priv(ndev);
2056 2061
2057 if (!res) 2062 if (!res)
2058 return 0; 2063 return 0;
@@ -2063,9 +2068,11 @@ static int smc_request_attrib(struct platform_device *pdev)
2063 return 0; 2068 return 0;
2064} 2069}
2065 2070
2066static void smc_release_attrib(struct platform_device *pdev) 2071static void smc_release_attrib(struct platform_device *pdev,
2072 struct net_device *ndev)
2067{ 2073{
2068 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); 2074 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
2075 struct smc_local *lp = netdev_priv(ndev);
2069 2076
2070 if (res) 2077 if (res)
2071 release_mem_region(res->start, ATTRIB_SIZE); 2078 release_mem_region(res->start, ATTRIB_SIZE);
@@ -2123,27 +2130,14 @@ static int smc_drv_probe(struct platform_device *pdev)
2123 struct net_device *ndev; 2130 struct net_device *ndev;
2124 struct resource *res, *ires; 2131 struct resource *res, *ires;
2125 unsigned int __iomem *addr; 2132 unsigned int __iomem *addr;
2133 unsigned long irq_flags = SMC_IRQ_FLAGS;
2126 int ret; 2134 int ret;
2127 2135
2128 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
2129 if (!res)
2130 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2131 if (!res) {
2132 ret = -ENODEV;
2133 goto out;
2134 }
2135
2136
2137 if (!request_mem_region(res->start, SMC_IO_EXTENT, CARDNAME)) {
2138 ret = -EBUSY;
2139 goto out;
2140 }
2141
2142 ndev = alloc_etherdev(sizeof(struct smc_local)); 2136 ndev = alloc_etherdev(sizeof(struct smc_local));
2143 if (!ndev) { 2137 if (!ndev) {
2144 printk("%s: could not allocate device.\n", CARDNAME); 2138 printk("%s: could not allocate device.\n", CARDNAME);
2145 ret = -ENOMEM; 2139 ret = -ENOMEM;
2146 goto out_release_io; 2140 goto out;
2147 } 2141 }
2148 SET_NETDEV_DEV(ndev, &pdev->dev); 2142 SET_NETDEV_DEV(ndev, &pdev->dev);
2149 2143
@@ -2152,37 +2146,47 @@ static int smc_drv_probe(struct platform_device *pdev)
2152 */ 2146 */
2153 2147
2154 lp = netdev_priv(ndev); 2148 lp = netdev_priv(ndev);
2155 lp->cfg.irq_flags = SMC_IRQ_FLAGS;
2156 2149
2157#ifdef SMC_DYNAMIC_BUS_CONFIG 2150 if (pd) {
2158 if (pd)
2159 memcpy(&lp->cfg, pd, sizeof(lp->cfg)); 2151 memcpy(&lp->cfg, pd, sizeof(lp->cfg));
2160 else { 2152 lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
2161 lp->cfg.flags = SMC91X_USE_8BIT; 2153 } else {
2162 lp->cfg.flags |= SMC91X_USE_16BIT; 2154 lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0;
2163 lp->cfg.flags |= SMC91X_USE_32BIT; 2155 lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0;
2156 lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0;
2157 lp->cfg.flags |= (nowait) ? SMC91X_NOWAIT : 0;
2164 } 2158 }
2165 2159
2166 lp->cfg.flags &= ~(SMC_CAN_USE_8BIT ? 0 : SMC91X_USE_8BIT);
2167 lp->cfg.flags &= ~(SMC_CAN_USE_16BIT ? 0 : SMC91X_USE_16BIT);
2168 lp->cfg.flags &= ~(SMC_CAN_USE_32BIT ? 0 : SMC91X_USE_32BIT);
2169#endif
2170
2171 ndev->dma = (unsigned char)-1; 2160 ndev->dma = (unsigned char)-1;
2172 2161
2162 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
2163 if (!res)
2164 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2165 if (!res) {
2166 ret = -ENODEV;
2167 goto out_free_netdev;
2168 }
2169
2170
2171 if (!request_mem_region(res->start, SMC_IO_EXTENT, CARDNAME)) {
2172 ret = -EBUSY;
2173 goto out_free_netdev;
2174 }
2175
2173 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2176 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2174 if (!ires) { 2177 if (!ires) {
2175 ret = -ENODEV; 2178 ret = -ENODEV;
2176 goto out_free_netdev; 2179 goto out_release_io;
2177 } 2180 }
2178 2181
2179 ndev->irq = ires->start; 2182 ndev->irq = ires->start;
2180 if (SMC_IRQ_FLAGS == -1)
2181 lp->cfg.irq_flags = ires->flags & IRQF_TRIGGER_MASK;
2182 2183
2183 ret = smc_request_attrib(pdev); 2184 if (ires->flags & IRQF_TRIGGER_MASK)
2185 irq_flags = ires->flags & IRQF_TRIGGER_MASK;
2186
2187 ret = smc_request_attrib(pdev, ndev);
2184 if (ret) 2188 if (ret)
2185 goto out_free_netdev; 2189 goto out_release_io;
2186#if defined(CONFIG_SA1100_ASSABET) 2190#if defined(CONFIG_SA1100_ASSABET)
2187 NCR_0 |= NCR_ENET_OSC_EN; 2191 NCR_0 |= NCR_ENET_OSC_EN;
2188#endif 2192#endif
@@ -2197,7 +2201,7 @@ static int smc_drv_probe(struct platform_device *pdev)
2197 goto out_release_attrib; 2201 goto out_release_attrib;
2198 } 2202 }
2199 2203
2200#ifdef SMC_USE_PXA_DMA 2204#ifdef CONFIG_ARCH_PXA
2201 { 2205 {
2202 struct smc_local *lp = netdev_priv(ndev); 2206 struct smc_local *lp = netdev_priv(ndev);
2203 lp->device = &pdev->dev; 2207 lp->device = &pdev->dev;
@@ -2205,7 +2209,7 @@ static int smc_drv_probe(struct platform_device *pdev)
2205 } 2209 }
2206#endif 2210#endif
2207 2211
2208 ret = smc_probe(ndev, addr, lp->cfg.irq_flags); 2212 ret = smc_probe(ndev, addr, irq_flags);
2209 if (ret != 0) 2213 if (ret != 0)
2210 goto out_iounmap; 2214 goto out_iounmap;
2211 2215
@@ -2217,11 +2221,11 @@ static int smc_drv_probe(struct platform_device *pdev)
2217 platform_set_drvdata(pdev, NULL); 2221 platform_set_drvdata(pdev, NULL);
2218 iounmap(addr); 2222 iounmap(addr);
2219 out_release_attrib: 2223 out_release_attrib:
2220 smc_release_attrib(pdev); 2224 smc_release_attrib(pdev, ndev);
2221 out_free_netdev:
2222 free_netdev(ndev);
2223 out_release_io: 2225 out_release_io:
2224 release_mem_region(res->start, SMC_IO_EXTENT); 2226 release_mem_region(res->start, SMC_IO_EXTENT);
2227 out_free_netdev:
2228 free_netdev(ndev);
2225 out: 2229 out:
2226 printk("%s: not found (%d).\n", CARDNAME, ret); 2230 printk("%s: not found (%d).\n", CARDNAME, ret);
2227 2231
@@ -2240,14 +2244,14 @@ static int smc_drv_remove(struct platform_device *pdev)
2240 2244
2241 free_irq(ndev->irq, ndev); 2245 free_irq(ndev->irq, ndev);
2242 2246
2243#ifdef SMC_USE_PXA_DMA 2247#ifdef CONFIG_ARCH_PXA
2244 if (ndev->dma != (unsigned char)-1) 2248 if (ndev->dma != (unsigned char)-1)
2245 pxa_free_dma(ndev->dma); 2249 pxa_free_dma(ndev->dma);
2246#endif 2250#endif
2247 iounmap(lp->base); 2251 iounmap(lp->base);
2248 2252
2249 smc_release_datacs(pdev,ndev); 2253 smc_release_datacs(pdev,ndev);
2250 smc_release_attrib(pdev); 2254 smc_release_attrib(pdev,ndev);
2251 2255
2252 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs"); 2256 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
2253 if (!res) 2257 if (!res)
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 8606818653f8..22209b6f1405 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -40,23 +40,46 @@
40 * Define your architecture specific bus configuration parameters here. 40 * Define your architecture specific bus configuration parameters here.
41 */ 41 */
42 42
43#if defined(CONFIG_ARCH_LUBBOCK) 43#if defined(CONFIG_ARCH_LUBBOCK) ||\
44 defined(CONFIG_MACH_MAINSTONE) ||\
45 defined(CONFIG_MACH_ZYLONITE) ||\
46 defined(CONFIG_MACH_LITTLETON)
44 47
45/* We can only do 16-bit reads and writes in the static memory space. */ 48#include <asm/mach-types.h>
46#define SMC_CAN_USE_8BIT 0 49
50/* Now the bus width is specified in the platform data
51 * pretend here to support all I/O access types
52 */
53#define SMC_CAN_USE_8BIT 1
47#define SMC_CAN_USE_16BIT 1 54#define SMC_CAN_USE_16BIT 1
48#define SMC_CAN_USE_32BIT 0 55#define SMC_CAN_USE_32BIT 1
49#define SMC_NOWAIT 1 56#define SMC_NOWAIT 1
50 57
51/* The first two address lines aren't connected... */ 58#define SMC_IO_SHIFT (lp->io_shift)
52#define SMC_IO_SHIFT 2
53 59
60#define SMC_inb(a, r) readb((a) + (r))
54#define SMC_inw(a, r) readw((a) + (r)) 61#define SMC_inw(a, r) readw((a) + (r))
55#define SMC_outw(v, a, r) writew(v, (a) + (r)) 62#define SMC_inl(a, r) readl((a) + (r))
63#define SMC_outb(v, a, r) writeb(v, (a) + (r))
64#define SMC_outl(v, a, r) writel(v, (a) + (r))
56#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) 65#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
57#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) 66#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
67#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
68#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
58#define SMC_IRQ_FLAGS (-1) /* from resource */ 69#define SMC_IRQ_FLAGS (-1) /* from resource */
59 70
71/* We actually can't write halfwords properly if not word aligned */
72static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
73{
74 if (machine_is_mainstone() && reg & 2) {
75 unsigned int v = val << 16;
76 v |= readl(ioaddr + (reg & ~2)) & 0xffff;
77 writel(v, ioaddr + (reg & ~2));
78 } else {
79 writew(val, ioaddr + reg);
80 }
81}
82
60#elif defined(CONFIG_BLACKFIN) 83#elif defined(CONFIG_BLACKFIN)
61 84
62#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH 85#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH
@@ -195,7 +218,6 @@
195#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) 218#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
196 219
197#elif defined(CONFIG_ARCH_INNOKOM) || \ 220#elif defined(CONFIG_ARCH_INNOKOM) || \
198 defined(CONFIG_MACH_MAINSTONE) || \
199 defined(CONFIG_ARCH_PXA_IDP) || \ 221 defined(CONFIG_ARCH_PXA_IDP) || \
200 defined(CONFIG_ARCH_RAMSES) || \ 222 defined(CONFIG_ARCH_RAMSES) || \
201 defined(CONFIG_ARCH_PCM027) 223 defined(CONFIG_ARCH_PCM027)
@@ -229,22 +251,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
229 } 251 }
230} 252}
231 253
232#elif defined(CONFIG_MACH_ZYLONITE)
233
234#define SMC_CAN_USE_8BIT 1
235#define SMC_CAN_USE_16BIT 1
236#define SMC_CAN_USE_32BIT 0
237#define SMC_IO_SHIFT 0
238#define SMC_NOWAIT 1
239#define SMC_USE_PXA_DMA 1
240#define SMC_inb(a, r) readb((a) + (r))
241#define SMC_inw(a, r) readw((a) + (r))
242#define SMC_insw(a, r, p, l) insw((a) + (r), p, l)
243#define SMC_outsw(a, r, p, l) outsw((a) + (r), p, l)
244#define SMC_outb(v, a, r) writeb(v, (a) + (r))
245#define SMC_outw(v, a, r) writew(v, (a) + (r))
246#define SMC_IRQ_FLAGS (-1) /* from resource */
247
248#elif defined(CONFIG_ARCH_OMAP) 254#elif defined(CONFIG_ARCH_OMAP)
249 255
250/* We can only do 16-bit reads and writes in the static memory space. */ 256/* We can only do 16-bit reads and writes in the static memory space. */
@@ -454,7 +460,6 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r,
454#define RPC_LSA_DEFAULT RPC_LED_100_10 460#define RPC_LSA_DEFAULT RPC_LED_100_10
455#define RPC_LSB_DEFAULT RPC_LED_TX_RX 461#define RPC_LSB_DEFAULT RPC_LED_TX_RX
456 462
457#define SMC_DYNAMIC_BUS_CONFIG
458#endif 463#endif
459 464
460 465
@@ -493,7 +498,7 @@ struct smc_local {
493 498
494 spinlock_t lock; 499 spinlock_t lock;
495 500
496#ifdef SMC_USE_PXA_DMA 501#ifdef CONFIG_ARCH_PXA
497 /* DMA needs the physical address of the chip */ 502 /* DMA needs the physical address of the chip */
498 u_long physaddr; 503 u_long physaddr;
499 struct device *device; 504 struct device *device;
@@ -501,20 +506,17 @@ struct smc_local {
501 void __iomem *base; 506 void __iomem *base;
502 void __iomem *datacs; 507 void __iomem *datacs;
503 508
509 /* the low address lines on some platforms aren't connected... */
510 int io_shift;
511
504 struct smc91x_platdata cfg; 512 struct smc91x_platdata cfg;
505}; 513};
506 514
507#ifdef SMC_DYNAMIC_BUS_CONFIG 515#define SMC_8BIT(p) ((p)->cfg.flags & SMC91X_USE_8BIT)
508#define SMC_8BIT(p) (((p)->cfg.flags & SMC91X_USE_8BIT) && SMC_CAN_USE_8BIT) 516#define SMC_16BIT(p) ((p)->cfg.flags & SMC91X_USE_16BIT)
509#define SMC_16BIT(p) (((p)->cfg.flags & SMC91X_USE_16BIT) && SMC_CAN_USE_16BIT) 517#define SMC_32BIT(p) ((p)->cfg.flags & SMC91X_USE_32BIT)
510#define SMC_32BIT(p) (((p)->cfg.flags & SMC91X_USE_32BIT) && SMC_CAN_USE_32BIT)
511#else
512#define SMC_8BIT(p) SMC_CAN_USE_8BIT
513#define SMC_16BIT(p) SMC_CAN_USE_16BIT
514#define SMC_32BIT(p) SMC_CAN_USE_32BIT
515#endif
516 518
517#ifdef SMC_USE_PXA_DMA 519#ifdef CONFIG_ARCH_PXA
518/* 520/*
519 * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is 521 * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is
520 * always happening in irq context so no need to worry about races. TX is 522 * always happening in irq context so no need to worry about races. TX is
@@ -608,7 +610,7 @@ smc_pxa_dma_irq(int dma, void *dummy)
608{ 610{
609 DCSR(dma) = 0; 611 DCSR(dma) = 0;
610} 612}
611#endif /* SMC_USE_PXA_DMA */ 613#endif /* CONFIG_ARCH_PXA */
612 614
613 615
614/* 616/*
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index e45402adac3f..e0f884034c9f 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -219,7 +219,8 @@ config PCMCIA_SA1111
219config PCMCIA_PXA2XX 219config PCMCIA_PXA2XX
220 tristate "PXA2xx support" 220 tristate "PXA2xx support"
221 depends on ARM && ARCH_PXA && PCMCIA 221 depends on ARM && ARCH_PXA && PCMCIA
222 depends on ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL || MACH_ARMCORE 222 depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \
223 || MACH_ARMCORE || ARCH_PXA_PALM)
223 help 224 help
224 Say Y here to include support for the PXA2xx PCMCIA controller 225 Say Y here to include support for the PXA2xx PCMCIA controller
225 226
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 85c6cc931f97..269a9e913ba2 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -72,4 +72,5 @@ pxa2xx_cs-$(CONFIG_ARCH_LUBBOCK) += pxa2xx_lubbock.o sa1111_generic.o
72pxa2xx_cs-$(CONFIG_MACH_MAINSTONE) += pxa2xx_mainstone.o 72pxa2xx_cs-$(CONFIG_MACH_MAINSTONE) += pxa2xx_mainstone.o
73pxa2xx_cs-$(CONFIG_PXA_SHARPSL) += pxa2xx_sharpsl.o 73pxa2xx_cs-$(CONFIG_PXA_SHARPSL) += pxa2xx_sharpsl.o
74pxa2xx_cs-$(CONFIG_MACH_ARMCORE) += pxa2xx_cm_x270.o 74pxa2xx_cs-$(CONFIG_MACH_ARMCORE) += pxa2xx_cm_x270.o
75pxa2xx_cs-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o
75 76
diff --git a/drivers/pcmcia/pxa2xx_cm_x270.c b/drivers/pcmcia/pxa2xx_cm_x270.c
index f123fce65f2e..bb95db7d2b76 100644
--- a/drivers/pcmcia/pxa2xx_cm_x270.c
+++ b/drivers/pcmcia/pxa2xx_cm_x270.c
@@ -5,83 +5,60 @@
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 * 7 *
8 * Compulab Ltd., 2003, 2007 8 * Compulab Ltd., 2003, 2007, 2008
9 * Mike Rapoport <mike@compulab.co.il> 9 * Mike Rapoport <mike@compulab.co.il>
10 * 10 *
11 */ 11 */
12 12
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/platform_device.h> 13#include <linux/platform_device.h>
16#include <linux/irq.h> 14#include <linux/irq.h>
17#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/gpio.h>
18 17
19#include <pcmcia/ss.h>
20#include <asm/hardware.h>
21#include <asm/mach-types.h> 18#include <asm/mach-types.h>
22
23#include <asm/arch/pxa-regs.h> 19#include <asm/arch/pxa-regs.h>
24#include <asm/arch/pxa2xx-gpio.h>
25#include <asm/arch/cm-x270.h>
26 20
27#include "soc_common.h" 21#include "soc_common.h"
28 22
23#define GPIO_PCMCIA_S0_CD_VALID (84)
24#define GPIO_PCMCIA_S0_RDYINT (82)
25#define GPIO_PCMCIA_RESET (53)
26
27#define PCMCIA_S0_CD_VALID IRQ_GPIO(GPIO_PCMCIA_S0_CD_VALID)
28#define PCMCIA_S0_RDYINT IRQ_GPIO(GPIO_PCMCIA_S0_RDYINT)
29
30
29static struct pcmcia_irqs irqs[] = { 31static struct pcmcia_irqs irqs[] = {
30 { 0, PCMCIA_S0_CD_VALID, "PCMCIA0 CD" }, 32 { 0, PCMCIA_S0_CD_VALID, "PCMCIA0 CD" },
31 { 1, PCMCIA_S1_CD_VALID, "PCMCIA1 CD" },
32}; 33};
33 34
34static int cmx270_pcmcia_hw_init(struct soc_pcmcia_socket *skt) 35static int cmx270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
35{ 36{
36 GPSR(GPIO48_nPOE) = GPIO_bit(GPIO48_nPOE) | 37 int ret = gpio_request(GPIO_PCMCIA_RESET, "PCCard reset");
37 GPIO_bit(GPIO49_nPWE) | 38 if (ret)
38 GPIO_bit(GPIO50_nPIOR) | 39 return ret;
39 GPIO_bit(GPIO51_nPIOW) | 40 gpio_direction_output(GPIO_PCMCIA_RESET, 0);
40 GPIO_bit(GPIO85_nPCE_1) | 41
41 GPIO_bit(GPIO54_nPCE_2); 42 skt->irq = PCMCIA_S0_RDYINT;
42 43 ret = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
43 pxa_gpio_mode(GPIO48_nPOE_MD); 44 if (!ret)
44 pxa_gpio_mode(GPIO49_nPWE_MD); 45 gpio_free(GPIO_PCMCIA_RESET);
45 pxa_gpio_mode(GPIO50_nPIOR_MD); 46
46 pxa_gpio_mode(GPIO51_nPIOW_MD); 47 return ret;
47 pxa_gpio_mode(GPIO85_nPCE_1_MD);
48 pxa_gpio_mode(GPIO54_nPCE_2_MD);
49 pxa_gpio_mode(GPIO55_nPREG_MD);
50 pxa_gpio_mode(GPIO56_nPWAIT_MD);
51 pxa_gpio_mode(GPIO57_nIOIS16_MD);
52
53 /* Reset signal */
54 pxa_gpio_mode(GPIO53_nPCE_2 | GPIO_OUT);
55 GPCR(GPIO53_nPCE_2) = GPIO_bit(GPIO53_nPCE_2);
56
57 set_irq_type(PCMCIA_S0_CD_VALID, IRQ_TYPE_EDGE_BOTH);
58 set_irq_type(PCMCIA_S1_CD_VALID, IRQ_TYPE_EDGE_BOTH);
59
60 /* irq's for slots: */
61 set_irq_type(PCMCIA_S0_RDYINT, IRQ_TYPE_EDGE_FALLING);
62 set_irq_type(PCMCIA_S1_RDYINT, IRQ_TYPE_EDGE_FALLING);
63
64 skt->irq = (skt->nr == 0) ? PCMCIA_S0_RDYINT : PCMCIA_S1_RDYINT;
65 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
66} 48}
67 49
68static void cmx270_pcmcia_shutdown(struct soc_pcmcia_socket *skt) 50static void cmx270_pcmcia_shutdown(struct soc_pcmcia_socket *skt)
69{ 51{
70 soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs)); 52 soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs));
71 53 gpio_free(GPIO_PCMCIA_RESET);
72 set_irq_type(IRQ_TO_GPIO(PCMCIA_S0_CD_VALID), IRQ_TYPE_NONE);
73 set_irq_type(IRQ_TO_GPIO(PCMCIA_S1_CD_VALID), IRQ_TYPE_NONE);
74
75 set_irq_type(IRQ_TO_GPIO(PCMCIA_S0_RDYINT), IRQ_TYPE_NONE);
76 set_irq_type(IRQ_TO_GPIO(PCMCIA_S1_RDYINT), IRQ_TYPE_NONE);
77} 54}
78 55
79 56
80static void cmx270_pcmcia_socket_state(struct soc_pcmcia_socket *skt, 57static void cmx270_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
81 struct pcmcia_state *state) 58 struct pcmcia_state *state)
82{ 59{
83 state->detect = (PCC_DETECT(skt->nr) == 0) ? 1 : 0; 60 state->detect = (gpio_get_value(GPIO_PCMCIA_S0_CD_VALID) == 0) ? 1 : 0;
84 state->ready = (PCC_READY(skt->nr) == 0) ? 0 : 1; 61 state->ready = (gpio_get_value(GPIO_PCMCIA_S0_RDYINT) == 0) ? 0 : 1;
85 state->bvd1 = 1; 62 state->bvd1 = 1;
86 state->bvd2 = 1; 63 state->bvd2 = 1;
87 state->vs_3v = 0; 64 state->vs_3v = 0;
@@ -93,32 +70,16 @@ static void cmx270_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
93static int cmx270_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, 70static int cmx270_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
94 const socket_state_t *state) 71 const socket_state_t *state)
95{ 72{
96 GPSR(GPIO49_nPWE) = GPIO_bit(GPIO49_nPWE);
97 pxa_gpio_mode(GPIO49_nPWE | GPIO_OUT);
98
99 switch (skt->nr) { 73 switch (skt->nr) {
100 case 0: 74 case 0:
101 if (state->flags & SS_RESET) { 75 if (state->flags & SS_RESET) {
102 GPCR(GPIO49_nPWE) = GPIO_bit(GPIO49_nPWE); 76 gpio_set_value(GPIO_PCMCIA_RESET, 1);
103 GPSR(GPIO53_nPCE_2) = GPIO_bit(GPIO53_nPCE_2);
104 udelay(10);
105 GPCR(GPIO53_nPCE_2) = GPIO_bit(GPIO53_nPCE_2);
106 GPSR(GPIO49_nPWE) = GPIO_bit(GPIO49_nPWE);
107 }
108 break;
109 case 1:
110 if (state->flags & SS_RESET) {
111 GPCR(GPIO49_nPWE) = GPIO_bit(GPIO49_nPWE);
112 GPSR(GPIO53_nPCE_2) = GPIO_bit(GPIO53_nPCE_2);
113 udelay(10); 77 udelay(10);
114 GPCR(GPIO53_nPCE_2) = GPIO_bit(GPIO53_nPCE_2); 78 gpio_set_value(GPIO_PCMCIA_RESET, 0);
115 GPSR(GPIO49_nPWE) = GPIO_bit(GPIO49_nPWE);
116 } 79 }
117 break; 80 break;
118 } 81 }
119 82
120 pxa_gpio_mode(GPIO49_nPWE_MD);
121
122 return 0; 83 return 0;
123} 84}
124 85
@@ -139,7 +100,7 @@ static struct pcmcia_low_level cmx270_pcmcia_ops __initdata = {
139 .configure_socket = cmx270_pcmcia_configure_socket, 100 .configure_socket = cmx270_pcmcia_configure_socket,
140 .socket_init = cmx270_pcmcia_socket_init, 101 .socket_init = cmx270_pcmcia_socket_init,
141 .socket_suspend = cmx270_pcmcia_socket_suspend, 102 .socket_suspend = cmx270_pcmcia_socket_suspend,
142 .nr = 2, 103 .nr = 1,
143}; 104};
144 105
145static struct platform_device *cmx270_pcmcia_device; 106static struct platform_device *cmx270_pcmcia_device;
diff --git a/drivers/pcmcia/pxa2xx_palmtx.c b/drivers/pcmcia/pxa2xx_palmtx.c
new file mode 100644
index 000000000000..4abde190c1f5
--- /dev/null
+++ b/drivers/pcmcia/pxa2xx_palmtx.c
@@ -0,0 +1,118 @@
1/*
2 * linux/drivers/pcmcia/pxa2xx_palmtx.c
3 *
4 * Driver for Palm T|X PCMCIA
5 *
6 * Copyright (C) 2007-2008 Marek Vasut <marek.vasut@gmail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/platform_device.h>
16
17#include <asm/mach-types.h>
18
19#include <asm/arch/gpio.h>
20#include <asm/arch/palmtx.h>
21
22#include "soc_common.h"
23
24static int palmtx_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
25{
26 skt->irq = IRQ_GPIO(GPIO_NR_PALMTX_PCMCIA_READY);
27 return 0;
28}
29
30static void palmtx_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
31{
32}
33
34static void palmtx_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
35 struct pcmcia_state *state)
36{
37 state->detect = 1; /* always inserted */
38 state->ready = !!gpio_get_value(GPIO_NR_PALMTX_PCMCIA_READY);
39 state->bvd1 = 1;
40 state->bvd2 = 1;
41 state->wrprot = 0;
42 state->vs_3v = 1;
43 state->vs_Xv = 0;
44}
45
46static int
47palmtx_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
48 const socket_state_t *state)
49{
50 gpio_set_value(GPIO_NR_PALMTX_PCMCIA_POWER1, 1);
51 gpio_set_value(GPIO_NR_PALMTX_PCMCIA_POWER2, 1);
52 gpio_set_value(GPIO_NR_PALMTX_PCMCIA_RESET,
53 !!(state->flags & SS_RESET));
54
55 return 0;
56}
57
58static void palmtx_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
59{
60}
61
62static void palmtx_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
63{
64}
65
66static struct pcmcia_low_level palmtx_pcmcia_ops = {
67 .owner = THIS_MODULE,
68
69 .first = 0,
70 .nr = 1,
71
72 .hw_init = palmtx_pcmcia_hw_init,
73 .hw_shutdown = palmtx_pcmcia_hw_shutdown,
74
75 .socket_state = palmtx_pcmcia_socket_state,
76 .configure_socket = palmtx_pcmcia_configure_socket,
77
78 .socket_init = palmtx_pcmcia_socket_init,
79 .socket_suspend = palmtx_pcmcia_socket_suspend,
80};
81
82static struct platform_device *palmtx_pcmcia_device;
83
84static int __init palmtx_pcmcia_init(void)
85{
86 int ret;
87
88 if (!machine_is_palmtx())
89 return -ENODEV;
90
91 palmtx_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
92 if (!palmtx_pcmcia_device)
93 return -ENOMEM;
94
95 ret = platform_device_add_data(palmtx_pcmcia_device, &palmtx_pcmcia_ops,
96 sizeof(palmtx_pcmcia_ops));
97
98 if (!ret)
99 ret = platform_device_add(palmtx_pcmcia_device);
100
101 if (ret)
102 platform_device_put(palmtx_pcmcia_device);
103
104 return ret;
105}
106
107static void __exit palmtx_pcmcia_exit(void)
108{
109 platform_device_unregister(palmtx_pcmcia_device);
110}
111
112fs_initcall(palmtx_pcmcia_init);
113module_exit(palmtx_pcmcia_exit);
114
115MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
116MODULE_DESCRIPTION("PCMCIA support for Palm T|X");
117MODULE_ALIAS("platform:pxa2xx-pcmcia");
118MODULE_LICENSE("GPL");
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 58c806e9c58a..4d17d384578d 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -49,4 +49,10 @@ config BATTERY_OLPC
49 help 49 help
50 Say Y to enable support for the battery on the OLPC laptop. 50 Say Y to enable support for the battery on the OLPC laptop.
51 51
52config BATTERY_PALMTX
53 tristate "Palm T|X battery"
54 depends on MACH_PALMTX
55 help
56 Say Y to enable support for the battery in Palm T|X.
57
52endif # POWER_SUPPLY 58endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 6413ded5fe5f..6f43a54ee420 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -20,3 +20,4 @@ obj-$(CONFIG_APM_POWER) += apm_power.o
20obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o 20obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o
21obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o 21obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o
22obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o 22obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
23obj-$(CONFIG_BATTERY_PALMTX) += palmtx_battery.o
diff --git a/drivers/power/palmtx_battery.c b/drivers/power/palmtx_battery.c
new file mode 100644
index 000000000000..244bb273a637
--- /dev/null
+++ b/drivers/power/palmtx_battery.c
@@ -0,0 +1,198 @@
1/*
2 * linux/drivers/power/palmtx_battery.c
3 *
4 * Battery measurement code for Palm T|X Handheld computer
5 *
6 * based on tosa_battery.c
7 *
8 * Copyright (C) 2008 Marek Vasut <marek.vasut@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/power_supply.h>
18#include <linux/wm97xx.h>
19#include <linux/delay.h>
20#include <linux/spinlock.h>
21#include <linux/interrupt.h>
22#include <linux/gpio.h>
23
24#include <asm/mach-types.h>
25#include <asm/arch/palmtx.h>
26
27static DEFINE_MUTEX(bat_lock);
28static struct work_struct bat_work;
29struct mutex work_lock;
30int bat_status = POWER_SUPPLY_STATUS_DISCHARGING;
31
32static unsigned long palmtx_read_bat(struct power_supply *bat_ps)
33{
34 return wm97xx_read_aux_adc(bat_ps->dev->parent->driver_data,
35 WM97XX_AUX_ID3) * 1000 / 414;
36}
37
38static unsigned long palmtx_read_temp(struct power_supply *bat_ps)
39{
40 return wm97xx_read_aux_adc(bat_ps->dev->parent->driver_data,
41 WM97XX_AUX_ID2);
42}
43
44static int palmtx_bat_get_property(struct power_supply *bat_ps,
45 enum power_supply_property psp,
46 union power_supply_propval *val)
47{
48 switch (psp) {
49 case POWER_SUPPLY_PROP_STATUS:
50 val->intval = bat_status;
51 break;
52 case POWER_SUPPLY_PROP_TECHNOLOGY:
53 val->intval = POWER_SUPPLY_TECHNOLOGY_LIPO;
54 break;
55 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
56 val->intval = palmtx_read_bat(bat_ps);
57 break;
58 case POWER_SUPPLY_PROP_VOLTAGE_MAX:
59 case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
60 val->intval = PALMTX_BAT_MAX_VOLTAGE;
61 break;
62 case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
63 val->intval = PALMTX_BAT_MIN_VOLTAGE;
64 break;
65 case POWER_SUPPLY_PROP_TEMP:
66 val->intval = palmtx_read_temp(bat_ps);
67 break;
68 case POWER_SUPPLY_PROP_PRESENT:
69 val->intval = 1;
70 break;
71 default:
72 return -EINVAL;
73 }
74 return 0;
75}
76
77static void palmtx_bat_external_power_changed(struct power_supply *bat_ps)
78{
79 schedule_work(&bat_work);
80}
81
82static char *status_text[] = {
83 [POWER_SUPPLY_STATUS_UNKNOWN] = "Unknown",
84 [POWER_SUPPLY_STATUS_CHARGING] = "Charging",
85 [POWER_SUPPLY_STATUS_DISCHARGING] = "Discharging",
86};
87
88static void palmtx_bat_update(struct power_supply *bat_ps)
89{
90 int old_status = bat_status;
91
92 mutex_lock(&work_lock);
93
94 bat_status = gpio_get_value(GPIO_NR_PALMTX_POWER_DETECT) ?
95 POWER_SUPPLY_STATUS_CHARGING :
96 POWER_SUPPLY_STATUS_DISCHARGING;
97
98 if (old_status != bat_status) {
99 pr_debug("%s %s -> %s\n", bat_ps->name,
100 status_text[old_status],
101 status_text[bat_status]);
102 power_supply_changed(bat_ps);
103 }
104
105 mutex_unlock(&work_lock);
106}
107
108static enum power_supply_property palmtx_bat_main_props[] = {
109 POWER_SUPPLY_PROP_STATUS,
110 POWER_SUPPLY_PROP_TECHNOLOGY,
111 POWER_SUPPLY_PROP_VOLTAGE_NOW,
112 POWER_SUPPLY_PROP_VOLTAGE_MAX,
113 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
114 POWER_SUPPLY_PROP_TEMP,
115 POWER_SUPPLY_PROP_PRESENT,
116};
117
118struct power_supply bat_ps = {
119 .name = "main-battery",
120 .type = POWER_SUPPLY_TYPE_BATTERY,
121 .properties = palmtx_bat_main_props,
122 .num_properties = ARRAY_SIZE(palmtx_bat_main_props),
123 .get_property = palmtx_bat_get_property,
124 .external_power_changed = palmtx_bat_external_power_changed,
125 .use_for_apm = 1,
126};
127
128static void palmtx_bat_work(struct work_struct *work)
129{
130 palmtx_bat_update(&bat_ps);
131}
132
133#ifdef CONFIG_PM
134static int palmtx_bat_suspend(struct platform_device *dev, pm_message_t state)
135{
136 flush_scheduled_work();
137 return 0;
138}
139
140static int palmtx_bat_resume(struct platform_device *dev)
141{
142 schedule_work(&bat_work);
143 return 0;
144}
145#else
146#define palmtx_bat_suspend NULL
147#define palmtx_bat_resume NULL
148#endif
149
150static int __devinit palmtx_bat_probe(struct platform_device *dev)
151{
152 int ret = 0;
153
154 if (!machine_is_palmtx())
155 return -ENODEV;
156
157 mutex_init(&work_lock);
158
159 INIT_WORK(&bat_work, palmtx_bat_work);
160
161 ret = power_supply_register(&dev->dev, &bat_ps);
162 if (!ret)
163 schedule_work(&bat_work);
164
165 return ret;
166}
167
168static int __devexit palmtx_bat_remove(struct platform_device *dev)
169{
170 power_supply_unregister(&bat_ps);
171 return 0;
172}
173
174static struct platform_driver palmtx_bat_driver = {
175 .driver.name = "wm97xx-battery",
176 .driver.owner = THIS_MODULE,
177 .probe = palmtx_bat_probe,
178 .remove = __devexit_p(palmtx_bat_remove),
179 .suspend = palmtx_bat_suspend,
180 .resume = palmtx_bat_resume,
181};
182
183static int __init palmtx_bat_init(void)
184{
185 return platform_driver_register(&palmtx_bat_driver);
186}
187
188static void __exit palmtx_bat_exit(void)
189{
190 platform_driver_unregister(&palmtx_bat_driver);
191}
192
193module_init(palmtx_bat_init);
194module_exit(palmtx_bat_exit);
195
196MODULE_LICENSE("GPL");
197MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
198MODULE_DESCRIPTION("Palm T|X battery driver");
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index f843c1383a4b..538552495d48 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -84,7 +84,6 @@ typedef struct ide_scsi_obj {
84 struct Scsi_Host *host; 84 struct Scsi_Host *host;
85 85
86 struct ide_atapi_pc *pc; /* Current packet command */ 86 struct ide_atapi_pc *pc; /* Current packet command */
87 unsigned long flags; /* Status/Action flags */
88 unsigned long transform; /* SCSI cmd translation layer */ 87 unsigned long transform; /* SCSI cmd translation layer */
89 unsigned long log; /* log flags */ 88 unsigned long log; /* log flags */
90} idescsi_scsi_t; 89} idescsi_scsi_t;
@@ -126,23 +125,14 @@ static inline idescsi_scsi_t *drive_to_idescsi(ide_drive_t *ide_drive)
126} 125}
127 126
128/* 127/*
129 * Per ATAPI device status bits.
130 */
131#define IDESCSI_DRQ_INTERRUPT 0 /* DRQ interrupt device */
132
133/*
134 * ide-scsi requests.
135 */
136#define IDESCSI_PC_RQ 90
137
138/*
139 * PIO data transfer routine using the scatter gather table. 128 * PIO data transfer routine using the scatter gather table.
140 */ 129 */
141static void ide_scsi_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc, 130static void ide_scsi_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
142 unsigned int bcount, int write) 131 unsigned int bcount, int write)
143{ 132{
144 ide_hwif_t *hwif = drive->hwif; 133 ide_hwif_t *hwif = drive->hwif;
145 xfer_func_t *xf = write ? hwif->output_data : hwif->input_data; 134 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
135 xfer_func_t *xf = write ? tp_ops->output_data : tp_ops->input_data;
146 char *buf; 136 char *buf;
147 int count; 137 int count;
148 138
@@ -228,7 +218,6 @@ static int idescsi_check_condition(ide_drive_t *drive,
228 rq->cmd_type = REQ_TYPE_SENSE; 218 rq->cmd_type = REQ_TYPE_SENSE;
229 rq->cmd_flags |= REQ_PREEMPT; 219 rq->cmd_flags |= REQ_PREEMPT;
230 pc->timeout = jiffies + WAIT_READY; 220 pc->timeout = jiffies + WAIT_READY;
231 pc->callback = ide_scsi_callback;
232 /* NOTE! Save the failed packet command in "rq->buffer" */ 221 /* NOTE! Save the failed packet command in "rq->buffer" */
233 rq->buffer = (void *) failed_cmd->special; 222 rq->buffer = (void *) failed_cmd->special;
234 pc->scsi_cmd = ((struct ide_atapi_pc *) failed_cmd->special)->scsi_cmd; 223 pc->scsi_cmd = ((struct ide_atapi_pc *) failed_cmd->special)->scsi_cmd;
@@ -237,6 +226,7 @@ static int idescsi_check_condition(ide_drive_t *drive,
237 ide_scsi_hex_dump(pc->c, 6); 226 ide_scsi_hex_dump(pc->c, 6);
238 } 227 }
239 rq->rq_disk = scsi->disk; 228 rq->rq_disk = scsi->disk;
229 memcpy(rq->cmd, pc->c, 12);
240 ide_do_drive_cmd(drive, rq); 230 ide_do_drive_cmd(drive, rq);
241 return 0; 231 return 0;
242} 232}
@@ -246,10 +236,9 @@ idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
246{ 236{
247 ide_hwif_t *hwif = drive->hwif; 237 ide_hwif_t *hwif = drive->hwif;
248 238
249 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) 239 if (hwif->tp_ops->read_status(hwif) & (BUSY_STAT | DRQ_STAT))
250 /* force an abort */ 240 /* force an abort */
251 hwif->OUTBSYNC(hwif, WIN_IDLEIMMEDIATE, 241 hwif->tp_ops->exec_command(hwif, WIN_IDLEIMMEDIATE);
252 hwif->io_ports.command_addr);
253 242
254 rq->errors++; 243 rq->errors++;
255 244
@@ -421,10 +410,6 @@ static ide_startstop_t idescsi_do_request (ide_drive_t *drive, struct request *r
421 410
422 if (blk_sense_request(rq) || blk_special_request(rq)) { 411 if (blk_sense_request(rq) || blk_special_request(rq)) {
423 struct ide_atapi_pc *pc = (struct ide_atapi_pc *)rq->special; 412 struct ide_atapi_pc *pc = (struct ide_atapi_pc *)rq->special;
424 idescsi_scsi_t *scsi = drive_to_idescsi(drive);
425
426 if (test_bit(IDESCSI_DRQ_INTERRUPT, &scsi->flags))
427 pc->flags |= PC_FLAG_DRQ_INTERRUPT;
428 413
429 if (drive->using_dma && !idescsi_map_sg(drive, pc)) 414 if (drive->using_dma && !idescsi_map_sg(drive, pc))
430 pc->flags |= PC_FLAG_DMA_OK; 415 pc->flags |= PC_FLAG_DMA_OK;
@@ -460,11 +445,14 @@ static inline void idescsi_add_settings(ide_drive_t *drive) { ; }
460static void idescsi_setup (ide_drive_t *drive, idescsi_scsi_t *scsi) 445static void idescsi_setup (ide_drive_t *drive, idescsi_scsi_t *scsi)
461{ 446{
462 if (drive->id && (drive->id->config & 0x0060) == 0x20) 447 if (drive->id && (drive->id->config & 0x0060) == 0x20)
463 set_bit (IDESCSI_DRQ_INTERRUPT, &scsi->flags); 448 set_bit(IDE_AFLAG_DRQ_INTERRUPT, &drive->atapi_flags);
464 clear_bit(IDESCSI_SG_TRANSFORM, &scsi->transform); 449 clear_bit(IDESCSI_SG_TRANSFORM, &scsi->transform);
465#if IDESCSI_DEBUG_LOG 450#if IDESCSI_DEBUG_LOG
466 set_bit(IDESCSI_LOG_CMD, &scsi->log); 451 set_bit(IDESCSI_LOG_CMD, &scsi->log);
467#endif /* IDESCSI_DEBUG_LOG */ 452#endif /* IDESCSI_DEBUG_LOG */
453
454 drive->pc_callback = ide_scsi_callback;
455
468 idescsi_add_settings(drive); 456 idescsi_add_settings(drive);
469} 457}
470 458
@@ -616,7 +604,6 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
616 pc->scsi_cmd = cmd; 604 pc->scsi_cmd = cmd;
617 pc->done = done; 605 pc->done = done;
618 pc->timeout = jiffies + cmd->timeout_per_command; 606 pc->timeout = jiffies + cmd->timeout_per_command;
619 pc->callback = ide_scsi_callback;
620 607
621 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) { 608 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) {
622 printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number); 609 printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number);
@@ -631,6 +618,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
631 rq->special = (char *) pc; 618 rq->special = (char *) pc;
632 rq->cmd_type = REQ_TYPE_SPECIAL; 619 rq->cmd_type = REQ_TYPE_SPECIAL;
633 spin_unlock_irq(host->host_lock); 620 spin_unlock_irq(host->host_lock);
621 memcpy(rq->cmd, pc->c, 12);
634 blk_execute_rq_nowait(drive->queue, scsi->disk, rq, 0, NULL); 622 blk_execute_rq_nowait(drive->queue, scsi->disk, rq, 0, NULL);
635 spin_lock_irq(host->host_lock); 623 spin_lock_irq(host->host_lock);
636 return 0; 624 return 0;
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c
index 93e407ee08b9..1ff80de177db 100644
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/serial/cpm_uart/cpm_uart_core.c
@@ -201,6 +201,10 @@ static void cpm_uart_int_tx(struct uart_port *port)
201 cpm_uart_tx_pump(port); 201 cpm_uart_tx_pump(port);
202} 202}
203 203
204#ifdef CONFIG_CONSOLE_POLL
205static int serial_polled;
206#endif
207
204/* 208/*
205 * Receive characters 209 * Receive characters
206 */ 210 */
@@ -222,6 +226,12 @@ static void cpm_uart_int_rx(struct uart_port *port)
222 */ 226 */
223 bdp = pinfo->rx_cur; 227 bdp = pinfo->rx_cur;
224 for (;;) { 228 for (;;) {
229#ifdef CONFIG_CONSOLE_POLL
230 if (unlikely(serial_polled)) {
231 serial_polled = 0;
232 return;
233 }
234#endif
225 /* get status */ 235 /* get status */
226 status = in_be16(&bdp->cbd_sc); 236 status = in_be16(&bdp->cbd_sc);
227 /* If this one is empty, return happy */ 237 /* If this one is empty, return happy */
@@ -253,7 +263,12 @@ static void cpm_uart_int_rx(struct uart_port *port)
253 goto handle_error; 263 goto handle_error;
254 if (uart_handle_sysrq_char(port, ch)) 264 if (uart_handle_sysrq_char(port, ch))
255 continue; 265 continue;
256 266#ifdef CONFIG_CONSOLE_POLL
267 if (unlikely(serial_polled)) {
268 serial_polled = 0;
269 return;
270 }
271#endif
257 error_return: 272 error_return:
258 tty_insert_flip_char(tty, ch, flg); 273 tty_insert_flip_char(tty, ch, flg);
259 274
@@ -865,6 +880,80 @@ static void cpm_uart_config_port(struct uart_port *port, int flags)
865 cpm_uart_request_port(port); 880 cpm_uart_request_port(port);
866 } 881 }
867} 882}
883
884#ifdef CONFIG_CONSOLE_POLL
885/* Serial polling routines for writing and reading from the uart while
886 * in an interrupt or debug context.
887 */
888
889#define GDB_BUF_SIZE 512 /* power of 2, please */
890
891static char poll_buf[GDB_BUF_SIZE];
892static char *pollp;
893static int poll_chars;
894
895static int poll_wait_key(char *obuf, struct uart_cpm_port *pinfo)
896{
897 u_char c, *cp;
898 volatile cbd_t *bdp;
899 int i;
900
901 /* Get the address of the host memory buffer.
902 */
903 bdp = pinfo->rx_cur;
904 while (bdp->cbd_sc & BD_SC_EMPTY)
905 ;
906
907 /* If the buffer address is in the CPM DPRAM, don't
908 * convert it.
909 */
910 cp = cpm2cpu_addr(bdp->cbd_bufaddr, pinfo);
911
912 if (obuf) {
913 i = c = bdp->cbd_datlen;
914 while (i-- > 0)
915 *obuf++ = *cp++;
916 } else
917 c = *cp;
918 bdp->cbd_sc &= ~(BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV | BD_SC_ID);
919 bdp->cbd_sc |= BD_SC_EMPTY;
920
921 if (bdp->cbd_sc & BD_SC_WRAP)
922 bdp = pinfo->rx_bd_base;
923 else
924 bdp++;
925 pinfo->rx_cur = (cbd_t *)bdp;
926
927 return (int)c;
928}
929
930static int cpm_get_poll_char(struct uart_port *port)
931{
932 struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
933
934 if (!serial_polled) {
935 serial_polled = 1;
936 poll_chars = 0;
937 }
938 if (poll_chars <= 0) {
939 poll_chars = poll_wait_key(poll_buf, pinfo);
940 pollp = poll_buf;
941 }
942 poll_chars--;
943 return *pollp++;
944}
945
946static void cpm_put_poll_char(struct uart_port *port,
947 unsigned char c)
948{
949 struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
950 static char ch[2];
951
952 ch[0] = (char)c;
953 cpm_uart_early_write(pinfo->port.line, ch, 1);
954}
955#endif /* CONFIG_CONSOLE_POLL */
956
868static struct uart_ops cpm_uart_pops = { 957static struct uart_ops cpm_uart_pops = {
869 .tx_empty = cpm_uart_tx_empty, 958 .tx_empty = cpm_uart_tx_empty,
870 .set_mctrl = cpm_uart_set_mctrl, 959 .set_mctrl = cpm_uart_set_mctrl,
@@ -882,6 +971,10 @@ static struct uart_ops cpm_uart_pops = {
882 .request_port = cpm_uart_request_port, 971 .request_port = cpm_uart_request_port,
883 .config_port = cpm_uart_config_port, 972 .config_port = cpm_uart_config_port,
884 .verify_port = cpm_uart_verify_port, 973 .verify_port = cpm_uart_verify_port,
974#ifdef CONFIG_CONSOLE_POLL
975 .poll_get_char = cpm_get_poll_char,
976 .poll_put_char = cpm_put_poll_char,
977#endif
885}; 978};
886 979
887struct uart_cpm_port cpm_uart_ports[UART_NR]; 980struct uart_cpm_port cpm_uart_ports[UART_NR];
diff --git a/drivers/serial/mpsc.c b/drivers/serial/mpsc.c
index c9f53e71f252..61d3ade5286c 100644
--- a/drivers/serial/mpsc.c
+++ b/drivers/serial/mpsc.c
@@ -921,6 +921,10 @@ static int mpsc_make_ready(struct mpsc_port_info *pi)
921 return 0; 921 return 0;
922} 922}
923 923
924#ifdef CONFIG_CONSOLE_POLL
925static int serial_polled;
926#endif
927
924/* 928/*
925 ****************************************************************************** 929 ******************************************************************************
926 * 930 *
@@ -956,7 +960,12 @@ static int mpsc_rx_intr(struct mpsc_port_info *pi)
956 while (!((cmdstat = be32_to_cpu(rxre->cmdstat)) 960 while (!((cmdstat = be32_to_cpu(rxre->cmdstat))
957 & SDMA_DESC_CMDSTAT_O)) { 961 & SDMA_DESC_CMDSTAT_O)) {
958 bytes_in = be16_to_cpu(rxre->bytecnt); 962 bytes_in = be16_to_cpu(rxre->bytecnt);
959 963#ifdef CONFIG_CONSOLE_POLL
964 if (unlikely(serial_polled)) {
965 serial_polled = 0;
966 return 0;
967 }
968#endif
960 /* Following use of tty struct directly is deprecated */ 969 /* Following use of tty struct directly is deprecated */
961 if (unlikely(tty_buffer_request_room(tty, bytes_in) 970 if (unlikely(tty_buffer_request_room(tty, bytes_in)
962 < bytes_in)) { 971 < bytes_in)) {
@@ -1017,6 +1026,12 @@ static int mpsc_rx_intr(struct mpsc_port_info *pi)
1017 if (uart_handle_sysrq_char(&pi->port, *bp)) { 1026 if (uart_handle_sysrq_char(&pi->port, *bp)) {
1018 bp++; 1027 bp++;
1019 bytes_in--; 1028 bytes_in--;
1029#ifdef CONFIG_CONSOLE_POLL
1030 if (unlikely(serial_polled)) {
1031 serial_polled = 0;
1032 return 0;
1033 }
1034#endif
1020 goto next_frame; 1035 goto next_frame;
1021 } 1036 }
1022 1037
@@ -1519,6 +1534,133 @@ static int mpsc_verify_port(struct uart_port *port, struct serial_struct *ser)
1519 1534
1520 return rc; 1535 return rc;
1521} 1536}
1537#ifdef CONFIG_CONSOLE_POLL
1538/* Serial polling routines for writing and reading from the uart while
1539 * in an interrupt or debug context.
1540 */
1541
1542static char poll_buf[2048];
1543static int poll_ptr;
1544static int poll_cnt;
1545static void mpsc_put_poll_char(struct uart_port *port,
1546 unsigned char c);
1547
1548static int mpsc_get_poll_char(struct uart_port *port)
1549{
1550 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1551 struct mpsc_rx_desc *rxre;
1552 u32 cmdstat, bytes_in, i;
1553 u8 *bp;
1554
1555 if (!serial_polled)
1556 serial_polled = 1;
1557
1558 pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line);
1559
1560 if (poll_cnt) {
1561 poll_cnt--;
1562 return poll_buf[poll_ptr++];
1563 }
1564 poll_ptr = 0;
1565 poll_cnt = 0;
1566
1567 while (poll_cnt == 0) {
1568 rxre = (struct mpsc_rx_desc *)(pi->rxr +
1569 (pi->rxr_posn*MPSC_RXRE_SIZE));
1570 dma_cache_sync(pi->port.dev, (void *)rxre,
1571 MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
1572#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1573 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1574 invalidate_dcache_range((ulong)rxre,
1575 (ulong)rxre + MPSC_RXRE_SIZE);
1576#endif
1577 /*
1578 * Loop through Rx descriptors handling ones that have
1579 * been completed.
1580 */
1581 while (poll_cnt == 0 &&
1582 !((cmdstat = be32_to_cpu(rxre->cmdstat)) &
1583 SDMA_DESC_CMDSTAT_O)){
1584 bytes_in = be16_to_cpu(rxre->bytecnt);
1585 bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
1586 dma_cache_sync(pi->port.dev, (void *) bp,
1587 MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
1588#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1589 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1590 invalidate_dcache_range((ulong)bp,
1591 (ulong)bp + MPSC_RXBE_SIZE);
1592#endif
1593 if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR |
1594 SDMA_DESC_CMDSTAT_FR | SDMA_DESC_CMDSTAT_OR))) &&
1595 !(cmdstat & pi->port.ignore_status_mask)) {
1596 poll_buf[poll_cnt] = *bp;
1597 poll_cnt++;
1598 } else {
1599 for (i = 0; i < bytes_in; i++) {
1600 poll_buf[poll_cnt] = *bp++;
1601 poll_cnt++;
1602 }
1603 pi->port.icount.rx += bytes_in;
1604 }
1605 rxre->bytecnt = cpu_to_be16(0);
1606 wmb();
1607 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O |
1608 SDMA_DESC_CMDSTAT_EI |
1609 SDMA_DESC_CMDSTAT_F |
1610 SDMA_DESC_CMDSTAT_L);
1611 wmb();
1612 dma_cache_sync(pi->port.dev, (void *)rxre,
1613 MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL);
1614#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1615 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1616 flush_dcache_range((ulong)rxre,
1617 (ulong)rxre + MPSC_RXRE_SIZE);
1618#endif
1619
1620 /* Advance to next descriptor */
1621 pi->rxr_posn = (pi->rxr_posn + 1) &
1622 (MPSC_RXR_ENTRIES - 1);
1623 rxre = (struct mpsc_rx_desc *)(pi->rxr +
1624 (pi->rxr_posn * MPSC_RXRE_SIZE));
1625 dma_cache_sync(pi->port.dev, (void *)rxre,
1626 MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
1627#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1628 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1629 invalidate_dcache_range((ulong)rxre,
1630 (ulong)rxre + MPSC_RXRE_SIZE);
1631#endif
1632 }
1633
1634 /* Restart rx engine, if its stopped */
1635 if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0)
1636 mpsc_start_rx(pi);
1637 }
1638 if (poll_cnt) {
1639 poll_cnt--;
1640 return poll_buf[poll_ptr++];
1641 }
1642
1643 return 0;
1644}
1645
1646
1647static void mpsc_put_poll_char(struct uart_port *port,
1648 unsigned char c)
1649{
1650 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1651 u32 data;
1652
1653 data = readl(pi->mpsc_base + MPSC_MPCR);
1654 writeb(c, pi->mpsc_base + MPSC_CHR_1);
1655 mb();
1656 data = readl(pi->mpsc_base + MPSC_CHR_2);
1657 data |= MPSC_CHR_2_TTCS;
1658 writel(data, pi->mpsc_base + MPSC_CHR_2);
1659 mb();
1660
1661 while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_TTCS);
1662}
1663#endif
1522 1664
1523static struct uart_ops mpsc_pops = { 1665static struct uart_ops mpsc_pops = {
1524 .tx_empty = mpsc_tx_empty, 1666 .tx_empty = mpsc_tx_empty,
@@ -1537,6 +1679,10 @@ static struct uart_ops mpsc_pops = {
1537 .request_port = mpsc_request_port, 1679 .request_port = mpsc_request_port,
1538 .config_port = mpsc_config_port, 1680 .config_port = mpsc_config_port,
1539 .verify_port = mpsc_verify_port, 1681 .verify_port = mpsc_verify_port,
1682#ifdef CONFIG_CONSOLE_POLL
1683 .poll_get_char = mpsc_get_poll_char,
1684 .poll_put_char = mpsc_put_poll_char,
1685#endif
1540}; 1686};
1541 1687
1542/* 1688/*
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index fbd6289977c8..8fb0066609bb 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -152,9 +152,10 @@ static int is_vbus_present(void)
152static void pullup_off(void) 152static void pullup_off(void)
153{ 153{
154 struct pxa2xx_udc_mach_info *mach = the_controller->mach; 154 struct pxa2xx_udc_mach_info *mach = the_controller->mach;
155 int off_level = mach->gpio_pullup_inverted;
155 156
156 if (mach->gpio_pullup) 157 if (mach->gpio_pullup)
157 gpio_set_value(mach->gpio_pullup, 0); 158 gpio_set_value(mach->gpio_pullup, off_level);
158 else if (mach->udc_command) 159 else if (mach->udc_command)
159 mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT); 160 mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
160} 161}
@@ -162,9 +163,10 @@ static void pullup_off(void)
162static void pullup_on(void) 163static void pullup_on(void)
163{ 164{
164 struct pxa2xx_udc_mach_info *mach = the_controller->mach; 165 struct pxa2xx_udc_mach_info *mach = the_controller->mach;
166 int on_level = !mach->gpio_pullup_inverted;
165 167
166 if (mach->gpio_pullup) 168 if (mach->gpio_pullup)
167 gpio_set_value(mach->gpio_pullup, 1); 169 gpio_set_value(mach->gpio_pullup, on_level);
168 else if (mach->udc_command) 170 else if (mach->udc_command)
169 mach->udc_command(PXA2XX_UDC_CMD_CONNECT); 171 mach->udc_command(PXA2XX_UDC_CMD_CONNECT);
170} 172}
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index d0746261c957..bb2514369507 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -227,6 +227,22 @@ static int pxafb_bpp_to_lccr3(struct fb_var_screeninfo *var)
227 case 4: ret = LCCR3_4BPP; break; 227 case 4: ret = LCCR3_4BPP; break;
228 case 8: ret = LCCR3_8BPP; break; 228 case 8: ret = LCCR3_8BPP; break;
229 case 16: ret = LCCR3_16BPP; break; 229 case 16: ret = LCCR3_16BPP; break;
230 case 24:
231 switch (var->red.length + var->green.length +
232 var->blue.length + var->transp.length) {
233 case 18: ret = LCCR3_18BPP_P | LCCR3_PDFOR_3; break;
234 case 19: ret = LCCR3_19BPP_P; break;
235 }
236 break;
237 case 32:
238 switch (var->red.length + var->green.length +
239 var->blue.length + var->transp.length) {
240 case 18: ret = LCCR3_18BPP | LCCR3_PDFOR_3; break;
241 case 19: ret = LCCR3_19BPP; break;
242 case 24: ret = LCCR3_24BPP | LCCR3_PDFOR_3; break;
243 case 25: ret = LCCR3_25BPP; break;
244 }
245 break;
230 } 246 }
231 return ret; 247 return ret;
232} 248}
@@ -345,6 +361,41 @@ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
345 var->green.offset = 5; var->green.length = 6; 361 var->green.offset = 5; var->green.length = 6;
346 var->blue.offset = 0; var->blue.length = 5; 362 var->blue.offset = 0; var->blue.length = 5;
347 var->transp.offset = var->transp.length = 0; 363 var->transp.offset = var->transp.length = 0;
364 } else if (var->bits_per_pixel > 16) {
365 struct pxafb_mode_info *mode;
366
367 mode = pxafb_getmode(inf, var);
368 if (!mode)
369 return -EINVAL;
370
371 switch (mode->depth) {
372 case 18: /* RGB666 */
373 var->transp.offset = var->transp.length = 0;
374 var->red.offset = 12; var->red.length = 6;
375 var->green.offset = 6; var->green.length = 6;
376 var->blue.offset = 0; var->blue.length = 6;
377 break;
378 case 19: /* RGBT666 */
379 var->transp.offset = 18; var->transp.length = 1;
380 var->red.offset = 12; var->red.length = 6;
381 var->green.offset = 6; var->green.length = 6;
382 var->blue.offset = 0; var->blue.length = 6;
383 break;
384 case 24: /* RGB888 */
385 var->transp.offset = var->transp.length = 0;
386 var->red.offset = 16; var->red.length = 8;
387 var->green.offset = 8; var->green.length = 8;
388 var->blue.offset = 0; var->blue.length = 8;
389 break;
390 case 25: /* RGBT888 */
391 var->transp.offset = 24; var->transp.length = 1;
392 var->red.offset = 16; var->red.length = 8;
393 var->green.offset = 8; var->green.length = 8;
394 var->blue.offset = 0; var->blue.length = 8;
395 break;
396 default:
397 return -EINVAL;
398 }
348 } else { 399 } else {
349 var->red.offset = var->green.offset = 0; 400 var->red.offset = var->green.offset = 0;
350 var->blue.offset = var->transp.offset = 0; 401 var->blue.offset = var->transp.offset = 0;
@@ -376,7 +427,7 @@ static int pxafb_set_par(struct fb_info *info)
376 struct pxafb_info *fbi = (struct pxafb_info *)info; 427 struct pxafb_info *fbi = (struct pxafb_info *)info;
377 struct fb_var_screeninfo *var = &info->var; 428 struct fb_var_screeninfo *var = &info->var;
378 429
379 if (var->bits_per_pixel == 16) 430 if (var->bits_per_pixel >= 16)
380 fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR; 431 fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR;
381 else if (!fbi->cmap_static) 432 else if (!fbi->cmap_static)
382 fbi->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR; 433 fbi->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR;
@@ -391,7 +442,7 @@ static int pxafb_set_par(struct fb_info *info)
391 442
392 fbi->fb.fix.line_length = var->xres_virtual * 443 fbi->fb.fix.line_length = var->xres_virtual *
393 var->bits_per_pixel / 8; 444 var->bits_per_pixel / 8;
394 if (var->bits_per_pixel == 16) 445 if (var->bits_per_pixel >= 16)
395 fbi->palette_size = 0; 446 fbi->palette_size = 0;
396 else 447 else
397 fbi->palette_size = var->bits_per_pixel == 1 ? 448 fbi->palette_size = var->bits_per_pixel == 1 ?
@@ -404,7 +455,7 @@ static int pxafb_set_par(struct fb_info *info)
404 */ 455 */
405 pxafb_set_truecolor(fbi->fb.fix.visual == FB_VISUAL_TRUECOLOR); 456 pxafb_set_truecolor(fbi->fb.fix.visual == FB_VISUAL_TRUECOLOR);
406 457
407 if (fbi->fb.var.bits_per_pixel == 16) 458 if (fbi->fb.var.bits_per_pixel >= 16)
408 fb_dealloc_cmap(&fbi->fb.cmap); 459 fb_dealloc_cmap(&fbi->fb.cmap);
409 else 460 else
410 fb_alloc_cmap(&fbi->fb.cmap, 1<<fbi->fb.var.bits_per_pixel, 0); 461 fb_alloc_cmap(&fbi->fb.cmap, 1<<fbi->fb.var.bits_per_pixel, 0);
@@ -831,6 +882,8 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var,
831 case 4: 882 case 4:
832 case 8: 883 case 8:
833 case 16: 884 case 16:
885 case 24:
886 case 32:
834 break; 887 break;
835 default: 888 default:
836 printk(KERN_ERR "%s: invalid bit depth %d\n", 889 printk(KERN_ERR "%s: invalid bit depth %d\n",
@@ -968,6 +1021,11 @@ static void pxafb_setup_gpio(struct pxafb_info *fbi)
968 1021
969 for (gpio = 58; ldd_bits; gpio++, ldd_bits--) 1022 for (gpio = 58; ldd_bits; gpio++, ldd_bits--)
970 pxa_gpio_mode(gpio | GPIO_ALT_FN_2_OUT); 1023 pxa_gpio_mode(gpio | GPIO_ALT_FN_2_OUT);
1024 /* 18 bit interface */
1025 if (fbi->fb.var.bits_per_pixel > 16) {
1026 pxa_gpio_mode(86 | GPIO_ALT_FN_2_OUT);
1027 pxa_gpio_mode(87 | GPIO_ALT_FN_2_OUT);
1028 }
971 pxa_gpio_mode(GPIO74_LCD_FCLK_MD); 1029 pxa_gpio_mode(GPIO74_LCD_FCLK_MD);
972 pxa_gpio_mode(GPIO75_LCD_LCLK_MD); 1030 pxa_gpio_mode(GPIO75_LCD_LCLK_MD);
973 pxa_gpio_mode(GPIO76_LCD_PCLK_MD); 1031 pxa_gpio_mode(GPIO76_LCD_PCLK_MD);