aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/processor_throttling.c17
-rw-r--r--drivers/acpi/sleep/main.c42
-rw-r--r--drivers/acpi/system.c1
-rw-r--r--drivers/atm/fore200e.c15
-rw-r--r--drivers/auxdisplay/cfag12864b.c13
-rw-r--r--drivers/base/cpu.c4
-rw-r--r--drivers/base/memory.c19
-rw-r--r--drivers/char/Kconfig7
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/lcd.c516
-rw-r--r--drivers/char/lcd.h154
-rw-r--r--drivers/char/mem.c5
-rw-r--r--drivers/char/mspec.c23
-rw-r--r--drivers/char/nvram.c1
-rw-r--r--drivers/char/random.c1
-rw-r--r--drivers/char/rtc.c19
-rw-r--r--drivers/char/stallion.c1
-rw-r--r--drivers/char/tty_io.c15
-rw-r--r--drivers/char/vt.c6
-rw-r--r--drivers/cpufreq/cpufreq.c14
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c4
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c79
-rw-r--r--drivers/dca/dca-core.c131
-rw-r--r--drivers/dca/dca-sysfs.c3
-rw-r--r--drivers/dma/Kconfig37
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/dmaengine.c35
-rw-r--r--drivers/dma/dmatest.c444
-rw-r--r--drivers/dma/dw_dmac.c1122
-rw-r--r--drivers/dma/dw_dmac_regs.h225
-rw-r--r--drivers/dma/fsldma.c38
-rw-r--r--drivers/dma/ioat.c15
-rw-r--r--drivers/dma/ioat_dca.c244
-rw-r--r--drivers/dma/ioat_dma.c402
-rw-r--r--drivers/dma/ioatdma.h28
-rw-r--r--drivers/dma/ioatdma_hw.h1
-rw-r--r--drivers/dma/ioatdma_registers.h20
-rw-r--r--drivers/dma/iop-adma.c53
-rw-r--r--drivers/dma/mv_xor.c1375
-rw-r--r--drivers/dma/mv_xor.h183
-rw-r--r--drivers/firmware/dcdbas.c3
-rw-r--r--drivers/hid/hid-core.c10
-rw-r--r--drivers/hid/hid-input-quirks.c40
-rw-r--r--drivers/hid/hid-input.c3
-rw-r--r--drivers/hid/hidraw.c48
-rw-r--r--drivers/hid/usbhid/hid-quirks.c22
-rw-r--r--drivers/hid/usbhid/hiddev.c14
-rw-r--r--drivers/hid/usbhid/usbkbd.c10
-rw-r--r--drivers/hid/usbhid/usbmouse.c8
-rw-r--r--drivers/ide/Kconfig2
-rw-r--r--drivers/ide/arm/icside.c71
-rw-r--r--drivers/ide/arm/ide_arm.c14
-rw-r--r--drivers/ide/arm/palm_bk3710.c30
-rw-r--r--drivers/ide/arm/rapide.c24
-rw-r--r--drivers/ide/h8300/ide-h8300.c48
-rw-r--r--drivers/ide/ide-atapi.c58
-rw-r--r--drivers/ide/ide-cd.c157
-rw-r--r--drivers/ide/ide-cd.h38
-rw-r--r--drivers/ide/ide-cd_ioctl.c35
-rw-r--r--drivers/ide/ide-disk.c2
-rw-r--r--drivers/ide/ide-dma.c103
-rw-r--r--drivers/ide/ide-floppy.c90
-rw-r--r--drivers/ide/ide-generic.c73
-rw-r--r--drivers/ide/ide-io.c42
-rw-r--r--drivers/ide/ide-iops.c230
-rw-r--r--drivers/ide/ide-lib.c17
-rw-r--r--drivers/ide/ide-pnp.c29
-rw-r--r--drivers/ide/ide-probe.c366
-rw-r--r--drivers/ide/ide-proc.c4
-rw-r--r--drivers/ide/ide-tape.c127
-rw-r--r--drivers/ide/ide-taskfile.c38
-rw-r--r--drivers/ide/ide.c49
-rw-r--r--drivers/ide/legacy/buddha.c24
-rw-r--r--drivers/ide/legacy/falconide.c56
-rw-r--r--drivers/ide/legacy/gayle.c39
-rw-r--r--drivers/ide/legacy/ide-4drives.c20
-rw-r--r--drivers/ide/legacy/ide-cs.c54
-rw-r--r--drivers/ide/legacy/ide_platform.c32
-rw-r--r--drivers/ide/legacy/macide.c15
-rw-r--r--drivers/ide/legacy/q40ide.c47
-rw-r--r--drivers/ide/mips/au1xxx-ide.c56
-rw-r--r--drivers/ide/mips/swarm.c24
-rw-r--r--drivers/ide/pci/aec62xx.c5
-rw-r--r--drivers/ide/pci/alim15x3.c12
-rw-r--r--drivers/ide/pci/amd74xx.c1
-rw-r--r--drivers/ide/pci/cmd640.c29
-rw-r--r--drivers/ide/pci/cmd64x.c12
-rw-r--r--drivers/ide/pci/cs5520.c41
-rw-r--r--drivers/ide/pci/cs5535.c3
-rw-r--r--drivers/ide/pci/delkin_cb.c25
-rw-r--r--drivers/ide/pci/hpt34x.c1
-rw-r--r--drivers/ide/pci/hpt366.c23
-rw-r--r--drivers/ide/pci/ns87415.c115
-rw-r--r--drivers/ide/pci/pdc202xx_old.c3
-rw-r--r--drivers/ide/pci/piix.c4
-rw-r--r--drivers/ide/pci/scc_pata.c139
-rw-r--r--drivers/ide/pci/serverworks.c4
-rw-r--r--drivers/ide/pci/sgiioc4.c65
-rw-r--r--drivers/ide/pci/siimage.c6
-rw-r--r--drivers/ide/pci/sl82c105.c4
-rw-r--r--drivers/ide/pci/tc86c001.c16
-rw-r--r--drivers/ide/pci/via82cxxx.c1
-rw-r--r--drivers/ide/ppc/pmac.c222
-rw-r--r--drivers/ide/setup-pci.c109
-rw-r--r--drivers/ieee1394/iso.c1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c4
-rw-r--r--drivers/input/keyboard/tosakbd.c2
-rw-r--r--drivers/isdn/gigaset/asyncdata.c3
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c12
-rw-r--r--drivers/isdn/gigaset/common.c2
-rw-r--r--drivers/isdn/gigaset/gigaset.h3
-rw-r--r--drivers/isdn/gigaset/i4l.c56
-rw-r--r--drivers/isdn/gigaset/interface.c25
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c7
-rw-r--r--drivers/leds/Kconfig16
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/led-triggers.c3
-rw-r--r--drivers/leds/leds-atmel-pwm.c2
-rw-r--r--drivers/leds/leds-h1940.c9
-rw-r--r--drivers/leds/leds-pca9532.c337
-rw-r--r--drivers/leds/leds-pca955x.c384
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-dvb.c1
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-ioread.c1
-rw-r--r--drivers/media/video/uvc/uvc_queue.c1
-rw-r--r--drivers/media/video/videobuf-core.c1
-rw-r--r--drivers/message/i2o/device.c54
-rw-r--r--drivers/mfd/Kconfig11
-rw-r--r--drivers/mfd/Makefile4
-rw-r--r--drivers/mfd/mfd-core.c114
-rw-r--r--drivers/mfd/tc6393xb.c600
-rw-r--r--drivers/misc/atmel_pwm.c3
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c3
-rw-r--r--drivers/mmc/card/mmc_test.c225
-rw-r--r--drivers/mmc/card/queue.c97
-rw-r--r--drivers/mmc/host/au1xmmc.c54
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/s3cmci.c50
-rw-r--r--drivers/mmc/host/sdhci.c167
-rw-r--r--drivers/mmc/host/sdhci.h7
-rw-r--r--drivers/mtd/maps/uclinux.c1
-rw-r--r--drivers/mtd/nand/cmx270_nand.c79
-rw-r--r--drivers/net/e1000e/netdev.c2
-rw-r--r--drivers/net/mlx4/eq.c1
-rw-r--r--drivers/net/sky2.c5
-rw-r--r--drivers/net/smc91x.c94
-rw-r--r--drivers/net/smc91x.h76
-rw-r--r--drivers/pcmcia/Kconfig3
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/electra_cf.c1
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x270.c93
-rw-r--r--drivers/pcmcia/pxa2xx_palmtx.c118
-rw-r--r--drivers/power/Kconfig6
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/palmtx_battery.c198
-rw-r--r--drivers/rtc/Kconfig19
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/rtc-at91rm9200.c16
-rw-r--r--drivers/rtc/rtc-cmos.c294
-rw-r--r--drivers/rtc/rtc-dev.c58
-rw-r--r--drivers/rtc/rtc-ds1305.c847
-rw-r--r--drivers/rtc/rtc-m41t80.c20
-rw-r--r--drivers/rtc/rtc-m41t94.c173
-rw-r--r--drivers/rtc/rtc-omap.c21
-rw-r--r--drivers/rtc/rtc-pcf8583.c129
-rw-r--r--drivers/rtc/rtc-s3c.c89
-rw-r--r--drivers/rtc/rtc-vr41xx.c65
-rw-r--r--drivers/scsi/ide-scsi.c32
-rw-r--r--drivers/scsi/sun_esp.c1
-rw-r--r--drivers/serial/8250.c17
-rw-r--r--drivers/serial/8250_gsc.c2
-rw-r--r--drivers/serial/8250_pci.c17
-rw-r--r--drivers/serial/Kconfig16
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_core.c116
-rw-r--r--drivers/serial/dz.c24
-rw-r--r--drivers/serial/mpsc.c148
-rw-r--r--drivers/serial/zs.c21
-rw-r--r--drivers/spi/Kconfig45
-rw-r--r--drivers/spi/au1550_spi.c207
-rw-r--r--drivers/spi/spi.c4
-rw-r--r--drivers/spi/spi_mpc83xx.c29
-rw-r--r--drivers/spi/spidev.c19
-rw-r--r--drivers/spi/xilinx_spi.c5
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c6
-rw-r--r--drivers/video/Kconfig53
-rw-r--r--drivers/video/Makefile5
-rw-r--r--drivers/video/acornfb.c1
-rw-r--r--drivers/video/amifb.c24
-rw-r--r--drivers/video/atafb.c7
-rw-r--r--drivers/video/atmel_lcdfb.c92
-rw-r--r--drivers/video/aty/aty128fb.c6
-rw-r--r--drivers/video/aty/atyfb_base.c100
-rw-r--r--drivers/video/aty/radeon_base.c20
-rw-r--r--drivers/video/backlight/Kconfig36
-rw-r--r--drivers/video/backlight/Makefile6
-rw-r--r--drivers/video/backlight/atmel-pwm-bl.c244
-rw-r--r--drivers/video/backlight/ili9320.c330
-rw-r--r--drivers/video/backlight/ili9320.h80
-rw-r--r--drivers/video/backlight/lcd.c2
-rw-r--r--drivers/video/backlight/platform_lcd.c172
-rw-r--r--drivers/video/backlight/vgg2432a4.c284
-rw-r--r--drivers/video/bf54x-lq043fb.c2
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c2
-rw-r--r--drivers/video/carminefb.c790
-rw-r--r--drivers/video/carminefb.h64
-rw-r--r--drivers/video/carminefb_regs.h159
-rw-r--r--drivers/video/cobalt_lcdfb.c371
-rw-r--r--drivers/video/console/fbcon.c5
-rw-r--r--drivers/video/console/fbcon.h8
-rw-r--r--drivers/video/console/mdacon.c4
-rw-r--r--drivers/video/fbmem.c10
-rw-r--r--drivers/video/fbmon.c2
-rw-r--r--drivers/video/fsl-diu-fb.c60
-rw-r--r--drivers/video/geode/lxfb.h2
-rw-r--r--drivers/video/geode/lxfb_ops.c28
-rw-r--r--drivers/video/hgafb.c36
-rw-r--r--drivers/video/imxfb.c1
-rw-r--r--drivers/video/neofb.c215
-rw-r--r--drivers/video/omap/dispc.c1
-rw-r--r--drivers/video/omap/omapfb_main.c1
-rw-r--r--drivers/video/pxafb.c72
-rw-r--r--drivers/video/pxafb.h2
-rw-r--r--drivers/video/sa1100fb.c8
-rw-r--r--drivers/video/sa1100fb.h2
-rw-r--r--drivers/video/sh7760fb.c658
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c725
-rw-r--r--drivers/video/sis/init.h1
-rw-r--r--drivers/video/sis/init301.h1
-rw-r--r--drivers/video/sis/initextlfb.c1
-rw-r--r--drivers/video/sis/osdef.h1
-rw-r--r--drivers/video/sis/sis.h22
-rw-r--r--drivers/video/sis/sis_accel.c1
-rw-r--r--drivers/video/sis/sis_main.c44
-rw-r--r--drivers/video/sis/sis_main.h4
-rw-r--r--drivers/video/sis/vgatypes.h4
-rw-r--r--drivers/video/skeletonfb.c37
-rw-r--r--drivers/video/sm501fb.c329
-rw-r--r--drivers/video/tdfxfb.c8
-rw-r--r--drivers/video/tridentfb.c1350
-rw-r--r--drivers/video/uvesafb.c4
-rw-r--r--drivers/video/vfb.c14
-rw-r--r--drivers/video/vga16fb.c122
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/zorro/zorro-sysfs.c1
245 files changed, 15859 insertions, 4493 deletions
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 0622ace05220..a2c3f9cfa549 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -827,6 +827,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
827static int acpi_processor_get_throttling(struct acpi_processor *pr) 827static int acpi_processor_get_throttling(struct acpi_processor *pr)
828{ 828{
829 cpumask_t saved_mask; 829 cpumask_t saved_mask;
830 cpumask_of_cpu_ptr_declare(new_mask);
830 int ret; 831 int ret;
831 832
832 if (!pr) 833 if (!pr)
@@ -838,7 +839,8 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
838 * Migrate task to the cpu pointed by pr. 839 * Migrate task to the cpu pointed by pr.
839 */ 840 */
840 saved_mask = current->cpus_allowed; 841 saved_mask = current->cpus_allowed;
841 set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); 842 cpumask_of_cpu_ptr_next(new_mask, pr->id);
843 set_cpus_allowed_ptr(current, new_mask);
842 ret = pr->throttling.acpi_processor_get_throttling(pr); 844 ret = pr->throttling.acpi_processor_get_throttling(pr);
843 /* restore the previous state */ 845 /* restore the previous state */
844 set_cpus_allowed_ptr(current, &saved_mask); 846 set_cpus_allowed_ptr(current, &saved_mask);
@@ -987,6 +989,7 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
987int acpi_processor_set_throttling(struct acpi_processor *pr, int state) 989int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
988{ 990{
989 cpumask_t saved_mask; 991 cpumask_t saved_mask;
992 cpumask_of_cpu_ptr_declare(new_mask);
990 int ret = 0; 993 int ret = 0;
991 unsigned int i; 994 unsigned int i;
992 struct acpi_processor *match_pr; 995 struct acpi_processor *match_pr;
@@ -1013,7 +1016,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1013 * affected cpu in order to get one proper T-state. 1016 * affected cpu in order to get one proper T-state.
1014 * The notifier event is THROTTLING_PRECHANGE. 1017 * The notifier event is THROTTLING_PRECHANGE.
1015 */ 1018 */
1016 for_each_cpu_mask(i, online_throttling_cpus) { 1019 for_each_cpu_mask_nr(i, online_throttling_cpus) {
1017 t_state.cpu = i; 1020 t_state.cpu = i;
1018 acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, 1021 acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
1019 &t_state); 1022 &t_state);
@@ -1025,7 +1028,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1025 * it can be called only for the cpu pointed by pr. 1028 * it can be called only for the cpu pointed by pr.
1026 */ 1029 */
1027 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { 1030 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
1028 set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); 1031 cpumask_of_cpu_ptr_next(new_mask, pr->id);
1032 set_cpus_allowed_ptr(current, new_mask);
1029 ret = p_throttling->acpi_processor_set_throttling(pr, 1033 ret = p_throttling->acpi_processor_set_throttling(pr,
1030 t_state.target_state); 1034 t_state.target_state);
1031 } else { 1035 } else {
@@ -1034,7 +1038,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1034 * it is necessary to set T-state for every affected 1038 * it is necessary to set T-state for every affected
1035 * cpus. 1039 * cpus.
1036 */ 1040 */
1037 for_each_cpu_mask(i, online_throttling_cpus) { 1041 for_each_cpu_mask_nr(i, online_throttling_cpus) {
1038 match_pr = per_cpu(processors, i); 1042 match_pr = per_cpu(processors, i);
1039 /* 1043 /*
1040 * If the pointer is invalid, we will report the 1044 * If the pointer is invalid, we will report the
@@ -1056,7 +1060,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1056 continue; 1060 continue;
1057 } 1061 }
1058 t_state.cpu = i; 1062 t_state.cpu = i;
1059 set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); 1063 cpumask_of_cpu_ptr_next(new_mask, i);
1064 set_cpus_allowed_ptr(current, new_mask);
1060 ret = match_pr->throttling. 1065 ret = match_pr->throttling.
1061 acpi_processor_set_throttling( 1066 acpi_processor_set_throttling(
1062 match_pr, t_state.target_state); 1067 match_pr, t_state.target_state);
@@ -1068,7 +1073,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1068 * affected cpu to update the T-states. 1073 * affected cpu to update the T-states.
1069 * The notifier event is THROTTLING_POSTCHANGE 1074 * The notifier event is THROTTLING_POSTCHANGE
1070 */ 1075 */
1071 for_each_cpu_mask(i, online_throttling_cpus) { 1076 for_each_cpu_mask_nr(i, online_throttling_cpus) {
1072 t_state.cpu = i; 1077 t_state.cpu = i;
1073 acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, 1078 acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
1074 &t_state); 1079 &t_state);
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index 0489a7d1d42c..d13194a031bf 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -280,9 +280,36 @@ static struct platform_suspend_ops acpi_suspend_ops_old = {
280 .end = acpi_pm_end, 280 .end = acpi_pm_end,
281 .recover = acpi_pm_finish, 281 .recover = acpi_pm_finish,
282}; 282};
283
284static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
285{
286 old_suspend_ordering = true;
287 return 0;
288}
289
290static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
291 {
292 .callback = init_old_suspend_ordering,
293 .ident = "Abit KN9 (nForce4 variant)",
294 .matches = {
295 DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"),
296 DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
297 },
298 },
299 {},
300};
283#endif /* CONFIG_SUSPEND */ 301#endif /* CONFIG_SUSPEND */
284 302
285#ifdef CONFIG_HIBERNATION 303#ifdef CONFIG_HIBERNATION
304static unsigned long s4_hardware_signature;
305static struct acpi_table_facs *facs;
306static bool nosigcheck;
307
308void __init acpi_no_s4_hw_signature(void)
309{
310 nosigcheck = true;
311}
312
286static int acpi_hibernation_begin(void) 313static int acpi_hibernation_begin(void)
287{ 314{
288 acpi_target_sleep_state = ACPI_STATE_S4; 315 acpi_target_sleep_state = ACPI_STATE_S4;
@@ -316,6 +343,12 @@ static void acpi_hibernation_leave(void)
316 acpi_enable(); 343 acpi_enable();
317 /* Reprogram control registers and execute _BFS */ 344 /* Reprogram control registers and execute _BFS */
318 acpi_leave_sleep_state_prep(ACPI_STATE_S4); 345 acpi_leave_sleep_state_prep(ACPI_STATE_S4);
346 /* Check the hardware signature */
347 if (facs && s4_hardware_signature != facs->hardware_signature) {
348 printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
349 "cannot resume!\n");
350 panic("ACPI S4 hardware signature mismatch");
351 }
319} 352}
320 353
321static void acpi_pm_enable_gpes(void) 354static void acpi_pm_enable_gpes(void)
@@ -516,6 +549,8 @@ int __init acpi_sleep_init(void)
516 u8 type_a, type_b; 549 u8 type_a, type_b;
517#ifdef CONFIG_SUSPEND 550#ifdef CONFIG_SUSPEND
518 int i = 0; 551 int i = 0;
552
553 dmi_check_system(acpisleep_dmi_table);
519#endif 554#endif
520 555
521 if (acpi_disabled) 556 if (acpi_disabled)
@@ -544,6 +579,13 @@ int __init acpi_sleep_init(void)
544 &acpi_hibernation_ops_old : &acpi_hibernation_ops); 579 &acpi_hibernation_ops_old : &acpi_hibernation_ops);
545 sleep_states[ACPI_STATE_S4] = 1; 580 sleep_states[ACPI_STATE_S4] = 1;
546 printk(" S4"); 581 printk(" S4");
582 if (!nosigcheck) {
583 acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
584 (struct acpi_table_header **)&facs);
585 if (facs)
586 s4_hardware_signature =
587 facs->hardware_signature;
588 }
547 } 589 }
548#endif 590#endif
549 status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b); 591 status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index d8e3f153b295..91dec448b3ed 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -26,6 +26,7 @@
26#include <linux/proc_fs.h> 26#include <linux/proc_fs.h>
27#include <linux/seq_file.h> 27#include <linux/seq_file.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/string.h>
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
30 31
31#include <acpi/acpi_drivers.h> 32#include <acpi/acpi_drivers.h>
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index d5c1bbfbe79d..73338d231db9 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2562,7 +2562,8 @@ fore200e_load_and_start_fw(struct fore200e* fore200e)
2562 const struct firmware *firmware; 2562 const struct firmware *firmware;
2563 struct device *device; 2563 struct device *device;
2564 struct fw_header *fw_header; 2564 struct fw_header *fw_header;
2565 u32 *fw_data, fw_size; 2565 const __le32 *fw_data;
2566 u32 fw_size;
2566 u32 __iomem *load_addr; 2567 u32 __iomem *load_addr;
2567 char buf[48]; 2568 char buf[48];
2568 int err = -ENODEV; 2569 int err = -ENODEV;
@@ -2582,7 +2583,7 @@ fore200e_load_and_start_fw(struct fore200e* fore200e)
2582 return err; 2583 return err;
2583 } 2584 }
2584 2585
2585 fw_data = (u32 *) firmware->data; 2586 fw_data = (__le32 *) firmware->data;
2586 fw_size = firmware->size / sizeof(u32); 2587 fw_size = firmware->size / sizeof(u32);
2587 fw_header = (struct fw_header *) firmware->data; 2588 fw_header = (struct fw_header *) firmware->data;
2588 load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset); 2589 load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
@@ -3199,6 +3200,14 @@ static const struct fore200e_bus fore200e_bus[] = {
3199 {} 3200 {}
3200}; 3201};
3201 3202
3202#ifdef MODULE_LICENSE
3203MODULE_LICENSE("GPL"); 3203MODULE_LICENSE("GPL");
3204#ifdef CONFIG_PCI
3205#ifdef __LITTLE_ENDIAN__
3206MODULE_FIRMWARE("pca200e.bin");
3207#else
3208MODULE_FIRMWARE("pca200e_ecd.bin2");
3209#endif
3210#endif /* CONFIG_PCI */
3211#ifdef CONFIG_SBUS
3212MODULE_FIRMWARE("sba200e_ecd.bin2");
3204#endif 3213#endif
diff --git a/drivers/auxdisplay/cfag12864b.c b/drivers/auxdisplay/cfag12864b.c
index 683509f013ab..eacb175f6bd3 100644
--- a/drivers/auxdisplay/cfag12864b.c
+++ b/drivers/auxdisplay/cfag12864b.c
@@ -336,16 +336,9 @@ static int __init cfag12864b_init(void)
336 "ks0108 is not initialized\n"); 336 "ks0108 is not initialized\n");
337 goto none; 337 goto none;
338 } 338 }
339 BUILD_BUG_ON(PAGE_SIZE < CFAG12864B_SIZE);
339 340
340 if (PAGE_SIZE < CFAG12864B_SIZE) { 341 cfag12864b_buffer = (unsigned char *) get_zeroed_page(GFP_KERNEL);
341 printk(KERN_ERR CFAG12864B_NAME ": ERROR: "
342 "page size (%i) < cfag12864b size (%i)\n",
343 (unsigned int)PAGE_SIZE, CFAG12864B_SIZE);
344 ret = -ENOMEM;
345 goto none;
346 }
347
348 cfag12864b_buffer = (unsigned char *) __get_free_page(GFP_KERNEL);
349 if (cfag12864b_buffer == NULL) { 342 if (cfag12864b_buffer == NULL) {
350 printk(KERN_ERR CFAG12864B_NAME ": ERROR: " 343 printk(KERN_ERR CFAG12864B_NAME ": ERROR: "
351 "can't get a free page\n"); 344 "can't get a free page\n");
@@ -367,8 +360,6 @@ static int __init cfag12864b_init(void)
367 if (cfag12864b_workqueue == NULL) 360 if (cfag12864b_workqueue == NULL)
368 goto cachealloced; 361 goto cachealloced;
369 362
370 memset(cfag12864b_buffer, 0, CFAG12864B_SIZE);
371
372 cfag12864b_clear(); 363 cfag12864b_clear();
373 cfag12864b_on(); 364 cfag12864b_on();
374 365
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 20537d507909..64f5d54f7edc 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -121,14 +121,14 @@ static ssize_t print_cpus_##type(struct sysdev_class *class, char *buf) \
121{ \ 121{ \
122 return print_cpus_map(buf, &cpu_##type##_map); \ 122 return print_cpus_map(buf, &cpu_##type##_map); \
123} \ 123} \
124struct sysdev_class_attribute attr_##type##_map = \ 124static struct sysdev_class_attribute attr_##type##_map = \
125 _SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL) 125 _SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL)
126 126
127print_cpus_func(online); 127print_cpus_func(online);
128print_cpus_func(possible); 128print_cpus_func(possible);
129print_cpus_func(present); 129print_cpus_func(present);
130 130
131struct sysdev_class_attribute *cpu_state_attr[] = { 131static struct sysdev_class_attribute *cpu_state_attr[] = {
132 &attr_online_map, 132 &attr_online_map,
133 &attr_possible_map, 133 &attr_possible_map,
134 &attr_present_map, 134 &attr_present_map,
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 4d4e0e7b6e92..855ed1a9f97b 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -101,6 +101,21 @@ static ssize_t show_mem_phys_index(struct sys_device *dev,
101} 101}
102 102
103/* 103/*
104 * Show whether the section of memory is likely to be hot-removable
105 */
106static ssize_t show_mem_removable(struct sys_device *dev, char *buf)
107{
108 unsigned long start_pfn;
109 int ret;
110 struct memory_block *mem =
111 container_of(dev, struct memory_block, sysdev);
112
113 start_pfn = section_nr_to_pfn(mem->phys_index);
114 ret = is_mem_section_removable(start_pfn, PAGES_PER_SECTION);
115 return sprintf(buf, "%d\n", ret);
116}
117
118/*
104 * online, offline, going offline, etc. 119 * online, offline, going offline, etc.
105 */ 120 */
106static ssize_t show_mem_state(struct sys_device *dev, 121static ssize_t show_mem_state(struct sys_device *dev,
@@ -262,6 +277,7 @@ static ssize_t show_phys_device(struct sys_device *dev,
262static SYSDEV_ATTR(phys_index, 0444, show_mem_phys_index, NULL); 277static SYSDEV_ATTR(phys_index, 0444, show_mem_phys_index, NULL);
263static SYSDEV_ATTR(state, 0644, show_mem_state, store_mem_state); 278static SYSDEV_ATTR(state, 0644, show_mem_state, store_mem_state);
264static SYSDEV_ATTR(phys_device, 0444, show_phys_device, NULL); 279static SYSDEV_ATTR(phys_device, 0444, show_phys_device, NULL);
280static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL);
265 281
266#define mem_create_simple_file(mem, attr_name) \ 282#define mem_create_simple_file(mem, attr_name) \
267 sysdev_create_file(&mem->sysdev, &attr_##attr_name) 283 sysdev_create_file(&mem->sysdev, &attr_##attr_name)
@@ -350,6 +366,8 @@ static int add_memory_block(unsigned long node_id, struct mem_section *section,
350 ret = mem_create_simple_file(mem, state); 366 ret = mem_create_simple_file(mem, state);
351 if (!ret) 367 if (!ret)
352 ret = mem_create_simple_file(mem, phys_device); 368 ret = mem_create_simple_file(mem, phys_device);
369 if (!ret)
370 ret = mem_create_simple_file(mem, removable);
353 371
354 return ret; 372 return ret;
355} 373}
@@ -394,6 +412,7 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section,
394 mem_remove_simple_file(mem, phys_index); 412 mem_remove_simple_file(mem, phys_index);
395 mem_remove_simple_file(mem, state); 413 mem_remove_simple_file(mem, state);
396 mem_remove_simple_file(mem, phys_device); 414 mem_remove_simple_file(mem, phys_device);
415 mem_remove_simple_file(mem, removable);
397 unregister_memory(mem, section); 416 unregister_memory(mem, section);
398 417
399 return 0; 418 return 0;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index e0bbbfb6a36b..67b07576f8bf 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -857,13 +857,6 @@ config DS1302
857 857
858endif # RTC_LIB 858endif # RTC_LIB
859 859
860config COBALT_LCD
861 bool "Support for Cobalt LCD"
862 depends on MIPS_COBALT
863 help
864 This option enables support for the LCD display and buttons found
865 on Cobalt systems through a misc device.
866
867config DTLK 860config DTLK
868 tristate "Double Talk PC internal speech card support" 861 tristate "Double Talk PC internal speech card support"
869 depends on ISA 862 depends on ISA
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index dc5a327d72d5..4b6e736cfa02 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -88,7 +88,6 @@ obj-$(CONFIG_TOSHIBA) += toshiba.o
88obj-$(CONFIG_I8K) += i8k.o 88obj-$(CONFIG_I8K) += i8k.o
89obj-$(CONFIG_DS1620) += ds1620.o 89obj-$(CONFIG_DS1620) += ds1620.o
90obj-$(CONFIG_HW_RANDOM) += hw_random/ 90obj-$(CONFIG_HW_RANDOM) += hw_random/
91obj-$(CONFIG_COBALT_LCD) += lcd.o
92obj-$(CONFIG_PPDEV) += ppdev.o 91obj-$(CONFIG_PPDEV) += ppdev.o
93obj-$(CONFIG_NWBUTTON) += nwbutton.o 92obj-$(CONFIG_NWBUTTON) += nwbutton.o
94obj-$(CONFIG_NWFLASH) += nwflash.o 93obj-$(CONFIG_NWFLASH) += nwflash.o
diff --git a/drivers/char/lcd.c b/drivers/char/lcd.c
deleted file mode 100644
index 1c29b20e4f4c..000000000000
--- a/drivers/char/lcd.c
+++ /dev/null
@@ -1,516 +0,0 @@
1/*
2 * LCD, LED and Button interface for Cobalt
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1996, 1997 by Andrew Bose
9 *
10 * Linux kernel version history:
11 * March 2001: Ported from 2.0.34 by Liam Davies
12 *
13 */
14#include <linux/types.h>
15#include <linux/errno.h>
16#include <linux/miscdevice.h>
17#include <linux/slab.h>
18#include <linux/ioport.h>
19#include <linux/fcntl.h>
20#include <linux/mc146818rtc.h>
21#include <linux/netdevice.h>
22#include <linux/sched.h>
23#include <linux/smp_lock.h>
24#include <linux/delay.h>
25
26#include <asm/io.h>
27#include <asm/uaccess.h>
28#include <asm/system.h>
29
30#include "lcd.h"
31
32static int lcd_ioctl(struct inode *inode, struct file *file,
33 unsigned int cmd, unsigned long arg);
34
35static unsigned int lcd_present = 1;
36
37/* used in arch/mips/cobalt/reset.c */
38int led_state = 0;
39
40#if defined(CONFIG_TULIP) && 0
41
42#define MAX_INTERFACES 8
43static linkcheck_func_t linkcheck_callbacks[MAX_INTERFACES];
44static void *linkcheck_cookies[MAX_INTERFACES];
45
46int lcd_register_linkcheck_func(int iface_num, void *func, void *cookie)
47{
48 if (iface_num < 0 ||
49 iface_num >= MAX_INTERFACES ||
50 linkcheck_callbacks[iface_num] != NULL)
51 return -1;
52 linkcheck_callbacks[iface_num] = (linkcheck_func_t) func;
53 linkcheck_cookies[iface_num] = cookie;
54 return 0;
55}
56#endif
57
58static int lcd_ioctl(struct inode *inode, struct file *file,
59 unsigned int cmd, unsigned long arg)
60{
61 struct lcd_display button_display;
62 unsigned long address, a;
63
64 switch (cmd) {
65 case LCD_On:
66 udelay(150);
67 BusyCheck();
68 LCDWriteInst(0x0F);
69 break;
70
71 case LCD_Off:
72 udelay(150);
73 BusyCheck();
74 LCDWriteInst(0x08);
75 break;
76
77 case LCD_Reset:
78 udelay(150);
79 LCDWriteInst(0x3F);
80 udelay(150);
81 LCDWriteInst(0x3F);
82 udelay(150);
83 LCDWriteInst(0x3F);
84 udelay(150);
85 LCDWriteInst(0x3F);
86 udelay(150);
87 LCDWriteInst(0x01);
88 udelay(150);
89 LCDWriteInst(0x06);
90 break;
91
92 case LCD_Clear:
93 udelay(150);
94 BusyCheck();
95 LCDWriteInst(0x01);
96 break;
97
98 case LCD_Cursor_Left:
99 udelay(150);
100 BusyCheck();
101 LCDWriteInst(0x10);
102 break;
103
104 case LCD_Cursor_Right:
105 udelay(150);
106 BusyCheck();
107 LCDWriteInst(0x14);
108 break;
109
110 case LCD_Cursor_Off:
111 udelay(150);
112 BusyCheck();
113 LCDWriteInst(0x0C);
114 break;
115
116 case LCD_Cursor_On:
117 udelay(150);
118 BusyCheck();
119 LCDWriteInst(0x0F);
120 break;
121
122 case LCD_Blink_Off:
123 udelay(150);
124 BusyCheck();
125 LCDWriteInst(0x0E);
126 break;
127
128 case LCD_Get_Cursor_Pos:{
129 struct lcd_display display;
130
131 udelay(150);
132 BusyCheck();
133 display.cursor_address = (LCDReadInst);
134 display.cursor_address =
135 (display.cursor_address & 0x07F);
136 if (copy_to_user
137 ((struct lcd_display *) arg, &display,
138 sizeof(struct lcd_display)))
139 return -EFAULT;
140
141 break;
142 }
143
144
145 case LCD_Set_Cursor_Pos:{
146 struct lcd_display display;
147
148 if (copy_from_user
149 (&display, (struct lcd_display *) arg,
150 sizeof(struct lcd_display)))
151 return -EFAULT;
152
153 a = (display.cursor_address | kLCD_Addr);
154
155 udelay(150);
156 BusyCheck();
157 LCDWriteInst(a);
158
159 break;
160 }
161
162 case LCD_Get_Cursor:{
163 struct lcd_display display;
164
165 udelay(150);
166 BusyCheck();
167 display.character = LCDReadData;
168
169 if (copy_to_user
170 ((struct lcd_display *) arg, &display,
171 sizeof(struct lcd_display)))
172 return -EFAULT;
173 udelay(150);
174 BusyCheck();
175 LCDWriteInst(0x10);
176
177 break;
178 }
179
180 case LCD_Set_Cursor:{
181 struct lcd_display display;
182
183 if (copy_from_user
184 (&display, (struct lcd_display *) arg,
185 sizeof(struct lcd_display)))
186 return -EFAULT;
187
188 udelay(150);
189 BusyCheck();
190 LCDWriteData(display.character);
191 udelay(150);
192 BusyCheck();
193 LCDWriteInst(0x10);
194
195 break;
196 }
197
198
199 case LCD_Disp_Left:
200 udelay(150);
201 BusyCheck();
202 LCDWriteInst(0x18);
203 break;
204
205 case LCD_Disp_Right:
206 udelay(150);
207 BusyCheck();
208 LCDWriteInst(0x1C);
209 break;
210
211 case LCD_Home:
212 udelay(150);
213 BusyCheck();
214 LCDWriteInst(0x02);
215 break;
216
217 case LCD_Write:{
218 struct lcd_display display;
219 unsigned int index;
220
221
222 if (copy_from_user
223 (&display, (struct lcd_display *) arg,
224 sizeof(struct lcd_display)))
225 return -EFAULT;
226
227 udelay(150);
228 BusyCheck();
229 LCDWriteInst(0x80);
230 udelay(150);
231 BusyCheck();
232
233 for (index = 0; index < (display.size1); index++) {
234 udelay(150);
235 BusyCheck();
236 LCDWriteData(display.line1[index]);
237 BusyCheck();
238 }
239
240 udelay(150);
241 BusyCheck();
242 LCDWriteInst(0xC0);
243 udelay(150);
244 BusyCheck();
245 for (index = 0; index < (display.size2); index++) {
246 udelay(150);
247 BusyCheck();
248 LCDWriteData(display.line2[index]);
249 }
250
251 break;
252 }
253
254 case LCD_Read:{
255 struct lcd_display display;
256
257 BusyCheck();
258 for (address = kDD_R00; address <= kDD_R01;
259 address++) {
260 a = (address | kLCD_Addr);
261
262 udelay(150);
263 BusyCheck();
264 LCDWriteInst(a);
265 udelay(150);
266 BusyCheck();
267 display.line1[address] = LCDReadData;
268 }
269
270 display.line1[0x27] = '\0';
271
272 for (address = kDD_R10; address <= kDD_R11;
273 address++) {
274 a = (address | kLCD_Addr);
275
276 udelay(150);
277 BusyCheck();
278 LCDWriteInst(a);
279
280 udelay(150);
281 BusyCheck();
282 display.line2[address - 0x40] =
283 LCDReadData;
284 }
285
286 display.line2[0x27] = '\0';
287
288 if (copy_to_user
289 ((struct lcd_display *) arg, &display,
290 sizeof(struct lcd_display)))
291 return -EFAULT;
292 break;
293 }
294
295// set all GPIO leds to led_display.leds
296
297 case LED_Set:{
298 struct lcd_display led_display;
299
300
301 if (copy_from_user
302 (&led_display, (struct lcd_display *) arg,
303 sizeof(struct lcd_display)))
304 return -EFAULT;
305
306 led_state = led_display.leds;
307 LEDSet(led_state);
308
309 break;
310 }
311
312
313// set only bit led_display.leds
314
315 case LED_Bit_Set:{
316 unsigned int i;
317 int bit = 1;
318 struct lcd_display led_display;
319
320
321 if (copy_from_user
322 (&led_display, (struct lcd_display *) arg,
323 sizeof(struct lcd_display)))
324 return -EFAULT;
325
326 for (i = 0; i < (int) led_display.leds; i++) {
327 bit = 2 * bit;
328 }
329
330 led_state = led_state | bit;
331 LEDSet(led_state);
332 break;
333 }
334
335// clear only bit led_display.leds
336
337 case LED_Bit_Clear:{
338 unsigned int i;
339 int bit = 1;
340 struct lcd_display led_display;
341
342
343 if (copy_from_user
344 (&led_display, (struct lcd_display *) arg,
345 sizeof(struct lcd_display)))
346 return -EFAULT;
347
348 for (i = 0; i < (int) led_display.leds; i++) {
349 bit = 2 * bit;
350 }
351
352 led_state = led_state & ~bit;
353 LEDSet(led_state);
354 break;
355 }
356
357
358 case BUTTON_Read:{
359 button_display.buttons = GPIRead;
360 if (copy_to_user
361 ((struct lcd_display *) arg, &button_display,
362 sizeof(struct lcd_display)))
363 return -EFAULT;
364 break;
365 }
366
367 case LINK_Check:{
368 button_display.buttons =
369 *((volatile unsigned long *) (0xB0100060));
370 if (copy_to_user
371 ((struct lcd_display *) arg, &button_display,
372 sizeof(struct lcd_display)))
373 return -EFAULT;
374 break;
375 }
376
377 case LINK_Check_2:{
378 int iface_num;
379
380 /* panel-utils should pass in the desired interface status is wanted for
381 * in "buttons" of the structure. We will set this to non-zero if the
382 * link is in fact up for the requested interface. --DaveM
383 */
384 if (copy_from_user
385 (&button_display, (struct lcd_display *) arg,
386 sizeof(button_display)))
387 return -EFAULT;
388 iface_num = button_display.buttons;
389#if defined(CONFIG_TULIP) && 0
390 if (iface_num >= 0 &&
391 iface_num < MAX_INTERFACES &&
392 linkcheck_callbacks[iface_num] != NULL) {
393 button_display.buttons =
394 linkcheck_callbacks[iface_num]
395 (linkcheck_cookies[iface_num]);
396 } else
397#endif
398 button_display.buttons = 0;
399
400 if (__copy_to_user
401 ((struct lcd_display *) arg, &button_display,
402 sizeof(struct lcd_display)))
403 return -EFAULT;
404 break;
405 }
406
407 default:
408 return -EINVAL;
409
410 }
411
412 return 0;
413
414}
415
416static int lcd_open(struct inode *inode, struct file *file)
417{
418 cycle_kernel_lock();
419
420 if (!lcd_present)
421 return -ENXIO;
422 else
423 return 0;
424}
425
426/* Only RESET or NEXT counts as button pressed */
427
428static inline int button_pressed(void)
429{
430 unsigned long buttons = GPIRead;
431
432 if ((buttons == BUTTON_Next) || (buttons == BUTTON_Next_B)
433 || (buttons == BUTTON_Reset_B))
434 return buttons;
435 return 0;
436}
437
438/* LED daemon sits on this and we wake him up once a key is pressed. */
439
440static int lcd_waiters = 0;
441
442static ssize_t lcd_read(struct file *file, char *buf,
443 size_t count, loff_t *ofs)
444{
445 long buttons_now;
446
447 if (lcd_waiters > 0)
448 return -EINVAL;
449
450 lcd_waiters++;
451 while (((buttons_now = (long) button_pressed()) == 0) &&
452 !(signal_pending(current))) {
453 msleep_interruptible(2000);
454 }
455 lcd_waiters--;
456
457 if (signal_pending(current))
458 return -ERESTARTSYS;
459 return buttons_now;
460}
461
462/*
463 * The various file operations we support.
464 */
465
466static const struct file_operations lcd_fops = {
467 .read = lcd_read,
468 .ioctl = lcd_ioctl,
469 .open = lcd_open,
470};
471
472static struct miscdevice lcd_dev = {
473 MISC_DYNAMIC_MINOR,
474 "lcd",
475 &lcd_fops
476};
477
478static int lcd_init(void)
479{
480 int ret;
481 unsigned long data;
482
483 pr_info("%s\n", LCD_DRIVER);
484 ret = misc_register(&lcd_dev);
485 if (ret) {
486 printk(KERN_WARNING LCD "Unable to register misc device.\n");
487 return ret;
488 }
489
490 /* Check region? Naaah! Just snarf it up. */
491/* request_region(RTC_PORT(0), RTC_IO_EXTENT, "lcd");*/
492
493 udelay(150);
494 data = LCDReadData;
495 if ((data & 0x000000FF) == (0x00)) {
496 lcd_present = 0;
497 pr_info(LCD "LCD Not Present\n");
498 } else {
499 lcd_present = 1;
500 WRITE_GAL(kGal_DevBank2PReg, kGal_DevBank2Cfg);
501 WRITE_GAL(kGal_DevBank3PReg, kGal_DevBank3Cfg);
502 }
503
504 return 0;
505}
506
507static void __exit lcd_exit(void)
508{
509 misc_deregister(&lcd_dev);
510}
511
512module_init(lcd_init);
513module_exit(lcd_exit);
514
515MODULE_AUTHOR("Andrew Bose");
516MODULE_LICENSE("GPL");
diff --git a/drivers/char/lcd.h b/drivers/char/lcd.h
deleted file mode 100644
index 290b3ff23b03..000000000000
--- a/drivers/char/lcd.h
+++ /dev/null
@@ -1,154 +0,0 @@
1/*
2 * LED, LCD and Button panel driver for Cobalt
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1996, 1997 by Andrew Bose
9 *
10 * Linux kernel version history:
11 * March 2001: Ported from 2.0.34 by Liam Davies
12 *
13 */
14
15// function headers
16
17#define LCD_CHARS_PER_LINE 40
18#define MAX_IDLE_TIME 120
19
20struct lcd_display {
21 unsigned buttons;
22 int size1;
23 int size2;
24 unsigned char line1[LCD_CHARS_PER_LINE];
25 unsigned char line2[LCD_CHARS_PER_LINE];
26 unsigned char cursor_address;
27 unsigned char character;
28 unsigned char leds;
29 unsigned char *RomImage;
30};
31
32
33
34#define LCD_DRIVER "Cobalt LCD Driver v2.10"
35
36#define LCD "lcd: "
37
38#define kLCD_IR 0x0F000000
39#define kLCD_DR 0x0F000010
40#define kGPI 0x0D000000
41#define kLED 0x0C000000
42
43#define kDD_R00 0x00
44#define kDD_R01 0x27
45#define kDD_R10 0x40
46#define kDD_R11 0x67
47
48#define kLCD_Addr 0x00000080
49
50#define LCDTimeoutValue 0xfff
51
52
53// Macros
54
55#define LCDWriteData(x) outl((x << 24), kLCD_DR)
56#define LCDWriteInst(x) outl((x << 24), kLCD_IR)
57
58#define LCDReadData (inl(kLCD_DR) >> 24)
59#define LCDReadInst (inl(kLCD_IR) >> 24)
60
61#define GPIRead (inl(kGPI) >> 24)
62
63#define LEDSet(x) outb((char)x, kLED)
64
65#define WRITE_GAL(x,y) outl(y, 0x04000000 | (x))
66#define BusyCheck() while ((LCDReadInst & 0x80) == 0x80)
67
68
69
70/*
71 * Function command codes for io_ctl.
72 */
73#define LCD_On 1
74#define LCD_Off 2
75#define LCD_Clear 3
76#define LCD_Reset 4
77#define LCD_Cursor_Left 5
78#define LCD_Cursor_Right 6
79#define LCD_Disp_Left 7
80#define LCD_Disp_Right 8
81#define LCD_Get_Cursor 9
82#define LCD_Set_Cursor 10
83#define LCD_Home 11
84#define LCD_Read 12
85#define LCD_Write 13
86#define LCD_Cursor_Off 14
87#define LCD_Cursor_On 15
88#define LCD_Get_Cursor_Pos 16
89#define LCD_Set_Cursor_Pos 17
90#define LCD_Blink_Off 18
91
92#define LED_Set 40
93#define LED_Bit_Set 41
94#define LED_Bit_Clear 42
95
96
97// Button defs
98#define BUTTON_Read 50
99
100
101// Ethernet LINK check hackaroo
102#define LINK_Check 90
103#define LINK_Check_2 91
104
105// Button patterns _B - single layer lcd boards
106
107#define BUTTON_NONE 0x3F
108#define BUTTON_NONE_B 0xFE
109
110#define BUTTON_Left 0x3B
111#define BUTTON_Left_B 0xFA
112
113#define BUTTON_Right 0x37
114#define BUTTON_Right_B 0xDE
115
116#define BUTTON_Up 0x2F
117#define BUTTON_Up_B 0xF6
118
119#define BUTTON_Down 0x1F
120#define BUTTON_Down_B 0xEE
121
122#define BUTTON_Next 0x3D
123#define BUTTON_Next_B 0x7E
124
125#define BUTTON_Enter 0x3E
126#define BUTTON_Enter_B 0xBE
127
128#define BUTTON_Reset_B 0xFC
129
130
131// debounce constants
132
133#define BUTTON_SENSE 160000
134#define BUTTON_DEBOUNCE 5000
135
136
137// Galileo register stuff
138
139#define kGal_DevBank2Cfg 0x1466DB33
140#define kGal_DevBank2PReg 0x464
141#define kGal_DevBank3Cfg 0x146FDFFB
142#define kGal_DevBank3PReg 0x468
143
144// Network
145
146#define kIPADDR 1
147#define kNETMASK 2
148#define kGATEWAY 3
149#define kDNS 4
150
151#define kClassA 5
152#define kClassB 6
153#define kClassC 7
154
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index c2dba82eb5f7..672b08e694d0 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -327,7 +327,10 @@ static void mmap_mem_close(struct vm_area_struct *vma)
327 327
328static struct vm_operations_struct mmap_mem_ops = { 328static struct vm_operations_struct mmap_mem_ops = {
329 .open = mmap_mem_open, 329 .open = mmap_mem_open,
330 .close = mmap_mem_close 330 .close = mmap_mem_close,
331#ifdef CONFIG_HAVE_IOREMAP_PROT
332 .access = generic_access_phys
333#endif
331}; 334};
332 335
333static int mmap_mem(struct file * file, struct vm_area_struct * vma) 336static int mmap_mem(struct file * file, struct vm_area_struct * vma)
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index fe2a95b5d3c0..30f095a8c2d4 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -193,25 +193,23 @@ mspec_close(struct vm_area_struct *vma)
193} 193}
194 194
195/* 195/*
196 * mspec_nopfn 196 * mspec_fault
197 * 197 *
198 * Creates a mspec page and maps it to user space. 198 * Creates a mspec page and maps it to user space.
199 */ 199 */
200static unsigned long 200static int
201mspec_nopfn(struct vm_area_struct *vma, unsigned long address) 201mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
202{ 202{
203 unsigned long paddr, maddr; 203 unsigned long paddr, maddr;
204 unsigned long pfn; 204 unsigned long pfn;
205 int index; 205 pgoff_t index = vmf->pgoff;
206 struct vma_data *vdata = vma->vm_private_data; 206 struct vma_data *vdata = vma->vm_private_data;
207 207
208 BUG_ON(address < vdata->vm_start || address >= vdata->vm_end);
209 index = (address - vdata->vm_start) >> PAGE_SHIFT;
210 maddr = (volatile unsigned long) vdata->maddr[index]; 208 maddr = (volatile unsigned long) vdata->maddr[index];
211 if (maddr == 0) { 209 if (maddr == 0) {
212 maddr = uncached_alloc_page(numa_node_id(), 1); 210 maddr = uncached_alloc_page(numa_node_id(), 1);
213 if (maddr == 0) 211 if (maddr == 0)
214 return NOPFN_OOM; 212 return VM_FAULT_OOM;
215 213
216 spin_lock(&vdata->lock); 214 spin_lock(&vdata->lock);
217 if (vdata->maddr[index] == 0) { 215 if (vdata->maddr[index] == 0) {
@@ -231,13 +229,20 @@ mspec_nopfn(struct vm_area_struct *vma, unsigned long address)
231 229
232 pfn = paddr >> PAGE_SHIFT; 230 pfn = paddr >> PAGE_SHIFT;
233 231
234 return pfn; 232 /*
233 * vm_insert_pfn can fail with -EBUSY, but in that case it will
234 * be because another thread has installed the pte first, so it
235 * is no problem.
236 */
237 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
238
239 return VM_FAULT_NOPAGE;
235} 240}
236 241
237static struct vm_operations_struct mspec_vm_ops = { 242static struct vm_operations_struct mspec_vm_ops = {
238 .open = mspec_open, 243 .open = mspec_open,
239 .close = mspec_close, 244 .close = mspec_close,
240 .nopfn = mspec_nopfn 245 .fault = mspec_fault,
241}; 246};
242 247
243/* 248/*
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index a22662b6a1a5..39f6357e3b5d 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -107,7 +107,6 @@
107#include <linux/init.h> 107#include <linux/init.h>
108#include <linux/proc_fs.h> 108#include <linux/proc_fs.h>
109#include <linux/spinlock.h> 109#include <linux/spinlock.h>
110#include <linux/smp_lock.h>
111 110
112#include <asm/io.h> 111#include <asm/io.h>
113#include <asm/uaccess.h> 112#include <asm/uaccess.h>
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 0cf98bd4f2d2..e0d0e371909c 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -236,6 +236,7 @@
236#include <linux/fs.h> 236#include <linux/fs.h>
237#include <linux/genhd.h> 237#include <linux/genhd.h>
238#include <linux/interrupt.h> 238#include <linux/interrupt.h>
239#include <linux/mm.h>
239#include <linux/spinlock.h> 240#include <linux/spinlock.h>
240#include <linux/percpu.h> 241#include <linux/percpu.h>
241#include <linux/cryptohash.h> 242#include <linux/cryptohash.h>
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index fa92a8af5a5a..dbefbb30ed44 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -78,9 +78,10 @@
78#include <linux/wait.h> 78#include <linux/wait.h>
79#include <linux/bcd.h> 79#include <linux/bcd.h>
80#include <linux/delay.h> 80#include <linux/delay.h>
81#include <linux/smp_lock.h>
82#include <linux/uaccess.h>
81 83
82#include <asm/current.h> 84#include <asm/current.h>
83#include <asm/uaccess.h>
84#include <asm/system.h> 85#include <asm/system.h>
85 86
86#ifdef CONFIG_X86 87#ifdef CONFIG_X86
@@ -120,8 +121,6 @@ static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
120 return 0; 121 return 0;
121} 122}
122#endif 123#endif
123#else
124extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id);
125#endif 124#endif
126 125
127/* 126/*
@@ -144,8 +143,7 @@ static DEFINE_TIMER(rtc_irq_timer, rtc_dropped_irq, 0, 0);
144static ssize_t rtc_read(struct file *file, char __user *buf, 143static ssize_t rtc_read(struct file *file, char __user *buf,
145 size_t count, loff_t *ppos); 144 size_t count, loff_t *ppos);
146 145
147static int rtc_ioctl(struct inode *inode, struct file *file, 146static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
148 unsigned int cmd, unsigned long arg);
149 147
150#ifdef RTC_IRQ 148#ifdef RTC_IRQ
151static unsigned int rtc_poll(struct file *file, poll_table *wait); 149static unsigned int rtc_poll(struct file *file, poll_table *wait);
@@ -719,10 +717,13 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
719 &wtime, sizeof wtime) ? -EFAULT : 0; 717 &wtime, sizeof wtime) ? -EFAULT : 0;
720} 718}
721 719
722static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 720static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
723 unsigned long arg)
724{ 721{
725 return rtc_do_ioctl(cmd, arg, 0); 722 long ret;
723 lock_kernel();
724 ret = rtc_do_ioctl(cmd, arg, 0);
725 unlock_kernel();
726 return ret;
726} 727}
727 728
728/* 729/*
@@ -915,7 +916,7 @@ static const struct file_operations rtc_fops = {
915#ifdef RTC_IRQ 916#ifdef RTC_IRQ
916 .poll = rtc_poll, 917 .poll = rtc_poll,
917#endif 918#endif
918 .ioctl = rtc_ioctl, 919 .unlocked_ioctl = rtc_ioctl,
919 .open = rtc_open, 920 .open = rtc_open,
920 .release = rtc_release, 921 .release = rtc_release,
921 .fasync = rtc_fasync, 922 .fasync = rtc_fasync,
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index b976248e1072..19db1eb87c26 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -1256,7 +1256,6 @@ static int stl_tiocmset(struct tty_struct *tty, struct file *file,
1256static int stl_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg) 1256static int stl_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg)
1257{ 1257{
1258 struct stlport *portp; 1258 struct stlport *portp;
1259 unsigned int ival;
1260 int rc; 1259 int rc;
1261 void __user *argp = (void __user *)arg; 1260 void __user *argp = (void __user *)arg;
1262 1261
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 15e597d03002..6f4d856df987 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -915,7 +915,7 @@ static void tty_reset_termios(struct tty_struct *tty)
915 * do_tty_hangup - actual handler for hangup events 915 * do_tty_hangup - actual handler for hangup events
916 * @work: tty device 916 * @work: tty device
917 * 917 *
918k * This can be called by the "eventd" kernel thread. That is process 918 * This can be called by the "eventd" kernel thread. That is process
919 * synchronous but doesn't hold any locks, so we need to make sure we 919 * synchronous but doesn't hold any locks, so we need to make sure we
920 * have the appropriate locks for what we're doing. 920 * have the appropriate locks for what we're doing.
921 * 921 *
@@ -1119,19 +1119,6 @@ int tty_hung_up_p(struct file *filp)
1119 1119
1120EXPORT_SYMBOL(tty_hung_up_p); 1120EXPORT_SYMBOL(tty_hung_up_p);
1121 1121
1122/**
1123 * is_tty - checker whether file is a TTY
1124 * @filp: file handle that may be a tty
1125 *
1126 * Check if the file handle is a tty handle.
1127 */
1128
1129int is_tty(struct file *filp)
1130{
1131 return filp->f_op->read == tty_read
1132 || filp->f_op->read == hung_up_tty_read;
1133}
1134
1135static void session_clear_tty(struct pid *session) 1122static void session_clear_tty(struct pid *session)
1136{ 1123{
1137 struct task_struct *p; 1124 struct task_struct *p;
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index e32a076d5f1f..cb8c90da3934 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -261,7 +261,7 @@ static void notify_update(struct vc_data *vc)
261#ifdef VT_BUF_VRAM_ONLY 261#ifdef VT_BUF_VRAM_ONLY
262#define DO_UPDATE(vc) 0 262#define DO_UPDATE(vc) 0
263#else 263#else
264#define DO_UPDATE(vc) CON_IS_VISIBLE(vc) 264#define DO_UPDATE(vc) (CON_IS_VISIBLE(vc) && !console_blanked)
265#endif 265#endif
266 266
267static inline unsigned short *screenpos(struct vc_data *vc, int offset, int viewed) 267static inline unsigned short *screenpos(struct vc_data *vc, int offset, int viewed)
@@ -2749,8 +2749,8 @@ static int con_open(struct tty_struct *tty, struct file *filp)
2749 tty->termios->c_iflag |= IUTF8; 2749 tty->termios->c_iflag |= IUTF8;
2750 else 2750 else
2751 tty->termios->c_iflag &= ~IUTF8; 2751 tty->termios->c_iflag &= ~IUTF8;
2752 release_console_sem();
2753 vcs_make_sysfs(tty); 2752 vcs_make_sysfs(tty);
2753 release_console_sem();
2754 return ret; 2754 return ret;
2755 } 2755 }
2756 } 2756 }
@@ -2775,8 +2775,8 @@ static void con_close(struct tty_struct *tty, struct file *filp)
2775 if (vc) 2775 if (vc)
2776 vc->vc_tty = NULL; 2776 vc->vc_tty = NULL;
2777 tty->driver_data = NULL; 2777 tty->driver_data = NULL;
2778 release_console_sem();
2779 vcs_remove_sysfs(tty); 2778 vcs_remove_sysfs(tty);
2779 release_console_sem();
2780 mutex_unlock(&tty_mutex); 2780 mutex_unlock(&tty_mutex);
2781 /* 2781 /*
2782 * tty_mutex is released, but we still hold BKL, so there is 2782 * tty_mutex is released, but we still hold BKL, so there is
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index ee1df0d45e81..8d6a3ff02672 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -589,7 +589,7 @@ static ssize_t show_cpus(cpumask_t mask, char *buf)
589 ssize_t i = 0; 589 ssize_t i = 0;
590 unsigned int cpu; 590 unsigned int cpu;
591 591
592 for_each_cpu_mask(cpu, mask) { 592 for_each_cpu_mask_nr(cpu, mask) {
593 if (i) 593 if (i)
594 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); 594 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
595 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); 595 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
@@ -835,7 +835,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
835 } 835 }
836#endif 836#endif
837 837
838 for_each_cpu_mask(j, policy->cpus) { 838 for_each_cpu_mask_nr(j, policy->cpus) {
839 if (cpu == j) 839 if (cpu == j)
840 continue; 840 continue;
841 841
@@ -898,14 +898,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
898 } 898 }
899 899
900 spin_lock_irqsave(&cpufreq_driver_lock, flags); 900 spin_lock_irqsave(&cpufreq_driver_lock, flags);
901 for_each_cpu_mask(j, policy->cpus) { 901 for_each_cpu_mask_nr(j, policy->cpus) {
902 per_cpu(cpufreq_cpu_data, j) = policy; 902 per_cpu(cpufreq_cpu_data, j) = policy;
903 per_cpu(policy_cpu, j) = policy->cpu; 903 per_cpu(policy_cpu, j) = policy->cpu;
904 } 904 }
905 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 905 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
906 906
907 /* symlink affected CPUs */ 907 /* symlink affected CPUs */
908 for_each_cpu_mask(j, policy->cpus) { 908 for_each_cpu_mask_nr(j, policy->cpus) {
909 if (j == cpu) 909 if (j == cpu)
910 continue; 910 continue;
911 if (!cpu_online(j)) 911 if (!cpu_online(j))
@@ -945,7 +945,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
945 945
946err_out_unregister: 946err_out_unregister:
947 spin_lock_irqsave(&cpufreq_driver_lock, flags); 947 spin_lock_irqsave(&cpufreq_driver_lock, flags);
948 for_each_cpu_mask(j, policy->cpus) 948 for_each_cpu_mask_nr(j, policy->cpus)
949 per_cpu(cpufreq_cpu_data, j) = NULL; 949 per_cpu(cpufreq_cpu_data, j) = NULL;
950 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 950 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
951 951
@@ -1028,7 +1028,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1028 * the sysfs links afterwards. 1028 * the sysfs links afterwards.
1029 */ 1029 */
1030 if (unlikely(cpus_weight(data->cpus) > 1)) { 1030 if (unlikely(cpus_weight(data->cpus) > 1)) {
1031 for_each_cpu_mask(j, data->cpus) { 1031 for_each_cpu_mask_nr(j, data->cpus) {
1032 if (j == cpu) 1032 if (j == cpu)
1033 continue; 1033 continue;
1034 per_cpu(cpufreq_cpu_data, j) = NULL; 1034 per_cpu(cpufreq_cpu_data, j) = NULL;
@@ -1038,7 +1038,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1038 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1038 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1039 1039
1040 if (unlikely(cpus_weight(data->cpus) > 1)) { 1040 if (unlikely(cpus_weight(data->cpus) > 1)) {
1041 for_each_cpu_mask(j, data->cpus) { 1041 for_each_cpu_mask_nr(j, data->cpus) {
1042 if (j == cpu) 1042 if (j == cpu)
1043 continue; 1043 continue;
1044 dprintk("removing link for cpu %u\n", j); 1044 dprintk("removing link for cpu %u\n", j);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 5d3a04ba6ad2..fe565ee43757 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -497,7 +497,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
497 return rc; 497 return rc;
498 } 498 }
499 499
500 for_each_cpu_mask(j, policy->cpus) { 500 for_each_cpu_mask_nr(j, policy->cpus) {
501 struct cpu_dbs_info_s *j_dbs_info; 501 struct cpu_dbs_info_s *j_dbs_info;
502 j_dbs_info = &per_cpu(cpu_dbs_info, j); 502 j_dbs_info = &per_cpu(cpu_dbs_info, j);
503 j_dbs_info->cur_policy = policy; 503 j_dbs_info->cur_policy = policy;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index d2af20dda382..33855cb3cf16 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -367,7 +367,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
367 367
368 /* Get Idle Time */ 368 /* Get Idle Time */
369 idle_ticks = UINT_MAX; 369 idle_ticks = UINT_MAX;
370 for_each_cpu_mask(j, policy->cpus) { 370 for_each_cpu_mask_nr(j, policy->cpus) {
371 cputime64_t total_idle_ticks; 371 cputime64_t total_idle_ticks;
372 unsigned int tmp_idle_ticks; 372 unsigned int tmp_idle_ticks;
373 struct cpu_dbs_info_s *j_dbs_info; 373 struct cpu_dbs_info_s *j_dbs_info;
@@ -521,7 +521,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
521 return rc; 521 return rc;
522 } 522 }
523 523
524 for_each_cpu_mask(j, policy->cpus) { 524 for_each_cpu_mask_nr(j, policy->cpus) {
525 struct cpu_dbs_info_s *j_dbs_info; 525 struct cpu_dbs_info_s *j_dbs_info;
526 j_dbs_info = &per_cpu(cpu_dbs_info, j); 526 j_dbs_info = &per_cpu(cpu_dbs_info, j);
527 j_dbs_info->cur_policy = policy; 527 j_dbs_info->cur_policy = policy;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index cb2ac01a41a1..32244aa7cc0c 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -30,16 +30,18 @@
30/** 30/**
31 * A few values needed by the userspace governor 31 * A few values needed by the userspace governor
32 */ 32 */
33static unsigned int cpu_max_freq[NR_CPUS]; 33static DEFINE_PER_CPU(unsigned int, cpu_max_freq);
34static unsigned int cpu_min_freq[NR_CPUS]; 34static DEFINE_PER_CPU(unsigned int, cpu_min_freq);
35static unsigned int cpu_cur_freq[NR_CPUS]; /* current CPU freq */ 35static DEFINE_PER_CPU(unsigned int, cpu_cur_freq); /* current CPU freq */
36static unsigned int cpu_set_freq[NR_CPUS]; /* CPU freq desired by userspace */ 36static DEFINE_PER_CPU(unsigned int, cpu_set_freq); /* CPU freq desired by
37static unsigned int cpu_is_managed[NR_CPUS]; 37 userspace */
38static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
38 39
39static DEFINE_MUTEX (userspace_mutex); 40static DEFINE_MUTEX (userspace_mutex);
40static int cpus_using_userspace_governor; 41static int cpus_using_userspace_governor;
41 42
42#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg) 43#define dprintk(msg...) \
44 cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg)
43 45
44/* keep track of frequency transitions */ 46/* keep track of frequency transitions */
45static int 47static int
@@ -48,12 +50,12 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
48{ 50{
49 struct cpufreq_freqs *freq = data; 51 struct cpufreq_freqs *freq = data;
50 52
51 if (!cpu_is_managed[freq->cpu]) 53 if (!per_cpu(cpu_is_managed, freq->cpu))
52 return 0; 54 return 0;
53 55
54 dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n", 56 dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n",
55 freq->cpu, freq->new); 57 freq->cpu, freq->new);
56 cpu_cur_freq[freq->cpu] = freq->new; 58 per_cpu(cpu_cur_freq, freq->cpu) = freq->new;
57 59
58 return 0; 60 return 0;
59} 61}
@@ -77,15 +79,15 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
77 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); 79 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
78 80
79 mutex_lock(&userspace_mutex); 81 mutex_lock(&userspace_mutex);
80 if (!cpu_is_managed[policy->cpu]) 82 if (!per_cpu(cpu_is_managed, policy->cpu))
81 goto err; 83 goto err;
82 84
83 cpu_set_freq[policy->cpu] = freq; 85 per_cpu(cpu_set_freq, policy->cpu) = freq;
84 86
85 if (freq < cpu_min_freq[policy->cpu]) 87 if (freq < per_cpu(cpu_min_freq, policy->cpu))
86 freq = cpu_min_freq[policy->cpu]; 88 freq = per_cpu(cpu_min_freq, policy->cpu);
87 if (freq > cpu_max_freq[policy->cpu]) 89 if (freq > per_cpu(cpu_max_freq, policy->cpu))
88 freq = cpu_max_freq[policy->cpu]; 90 freq = per_cpu(cpu_max_freq, policy->cpu);
89 91
90 /* 92 /*
91 * We're safe from concurrent calls to ->target() here 93 * We're safe from concurrent calls to ->target() here
@@ -104,7 +106,7 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
104 106
105static ssize_t show_speed(struct cpufreq_policy *policy, char *buf) 107static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
106{ 108{
107 return sprintf(buf, "%u\n", cpu_cur_freq[policy->cpu]); 109 return sprintf(buf, "%u\n", per_cpu(cpu_cur_freq, policy->cpu));
108} 110}
109 111
110static int cpufreq_governor_userspace(struct cpufreq_policy *policy, 112static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
@@ -127,12 +129,17 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
127 } 129 }
128 cpus_using_userspace_governor++; 130 cpus_using_userspace_governor++;
129 131
130 cpu_is_managed[cpu] = 1; 132 per_cpu(cpu_is_managed, cpu) = 1;
131 cpu_min_freq[cpu] = policy->min; 133 per_cpu(cpu_min_freq, cpu) = policy->min;
132 cpu_max_freq[cpu] = policy->max; 134 per_cpu(cpu_max_freq, cpu) = policy->max;
133 cpu_cur_freq[cpu] = policy->cur; 135 per_cpu(cpu_cur_freq, cpu) = policy->cur;
134 cpu_set_freq[cpu] = policy->cur; 136 per_cpu(cpu_set_freq, cpu) = policy->cur;
135 dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]); 137 dprintk("managing cpu %u started "
138 "(%u - %u kHz, currently %u kHz)\n",
139 cpu,
140 per_cpu(cpu_min_freq, cpu),
141 per_cpu(cpu_max_freq, cpu),
142 per_cpu(cpu_cur_freq, cpu));
136 143
137 mutex_unlock(&userspace_mutex); 144 mutex_unlock(&userspace_mutex);
138 break; 145 break;
@@ -145,34 +152,34 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
145 CPUFREQ_TRANSITION_NOTIFIER); 152 CPUFREQ_TRANSITION_NOTIFIER);
146 } 153 }
147 154
148 cpu_is_managed[cpu] = 0; 155 per_cpu(cpu_is_managed, cpu) = 0;
149 cpu_min_freq[cpu] = 0; 156 per_cpu(cpu_min_freq, cpu) = 0;
150 cpu_max_freq[cpu] = 0; 157 per_cpu(cpu_max_freq, cpu) = 0;
151 cpu_set_freq[cpu] = 0; 158 per_cpu(cpu_set_freq, cpu) = 0;
152 dprintk("managing cpu %u stopped\n", cpu); 159 dprintk("managing cpu %u stopped\n", cpu);
153 mutex_unlock(&userspace_mutex); 160 mutex_unlock(&userspace_mutex);
154 break; 161 break;
155 case CPUFREQ_GOV_LIMITS: 162 case CPUFREQ_GOV_LIMITS:
156 mutex_lock(&userspace_mutex); 163 mutex_lock(&userspace_mutex);
157 dprintk("limit event for cpu %u: %u - %u kHz," 164 dprintk("limit event for cpu %u: %u - %u kHz, "
158 "currently %u kHz, last set to %u kHz\n", 165 "currently %u kHz, last set to %u kHz\n",
159 cpu, policy->min, policy->max, 166 cpu, policy->min, policy->max,
160 cpu_cur_freq[cpu], cpu_set_freq[cpu]); 167 per_cpu(cpu_cur_freq, cpu),
161 if (policy->max < cpu_set_freq[cpu]) { 168 per_cpu(cpu_set_freq, cpu));
169 if (policy->max < per_cpu(cpu_set_freq, cpu)) {
162 __cpufreq_driver_target(policy, policy->max, 170 __cpufreq_driver_target(policy, policy->max,
163 CPUFREQ_RELATION_H); 171 CPUFREQ_RELATION_H);
164 } 172 } else if (policy->min > per_cpu(cpu_set_freq, cpu)) {
165 else if (policy->min > cpu_set_freq[cpu]) {
166 __cpufreq_driver_target(policy, policy->min, 173 __cpufreq_driver_target(policy, policy->min,
167 CPUFREQ_RELATION_L); 174 CPUFREQ_RELATION_L);
168 } 175 } else {
169 else { 176 __cpufreq_driver_target(policy,
170 __cpufreq_driver_target(policy, cpu_set_freq[cpu], 177 per_cpu(cpu_set_freq, cpu),
171 CPUFREQ_RELATION_L); 178 CPUFREQ_RELATION_L);
172 } 179 }
173 cpu_min_freq[cpu] = policy->min; 180 per_cpu(cpu_min_freq, cpu) = policy->min;
174 cpu_max_freq[cpu] = policy->max; 181 per_cpu(cpu_max_freq, cpu) = policy->max;
175 cpu_cur_freq[cpu] = policy->cur; 182 per_cpu(cpu_cur_freq, cpu) = policy->cur;
176 mutex_unlock(&userspace_mutex); 183 mutex_unlock(&userspace_mutex);
177 break; 184 break;
178 } 185 }
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index bf5b92f86df7..ec249d2db633 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -28,13 +28,29 @@
28#include <linux/device.h> 28#include <linux/device.h>
29#include <linux/dca.h> 29#include <linux/dca.h>
30 30
31MODULE_LICENSE("GPL"); 31#define DCA_VERSION "1.4"
32 32
33/* For now we're assuming a single, global, DCA provider for the system. */ 33MODULE_VERSION(DCA_VERSION);
34MODULE_LICENSE("GPL");
35MODULE_AUTHOR("Intel Corporation");
34 36
35static DEFINE_SPINLOCK(dca_lock); 37static DEFINE_SPINLOCK(dca_lock);
36 38
37static struct dca_provider *global_dca = NULL; 39static LIST_HEAD(dca_providers);
40
41static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
42{
43 struct dca_provider *dca, *ret = NULL;
44
45 list_for_each_entry(dca, &dca_providers, node) {
46 if ((!dev) || (dca->ops->dev_managed(dca, dev))) {
47 ret = dca;
48 break;
49 }
50 }
51
52 return ret;
53}
38 54
39/** 55/**
40 * dca_add_requester - add a dca client to the list 56 * dca_add_requester - add a dca client to the list
@@ -42,25 +58,39 @@ static struct dca_provider *global_dca = NULL;
42 */ 58 */
43int dca_add_requester(struct device *dev) 59int dca_add_requester(struct device *dev)
44{ 60{
45 int err, slot; 61 struct dca_provider *dca;
62 int err, slot = -ENODEV;
46 63
47 if (!global_dca) 64 if (!dev)
48 return -ENODEV; 65 return -EFAULT;
49 66
50 spin_lock(&dca_lock); 67 spin_lock(&dca_lock);
51 slot = global_dca->ops->add_requester(global_dca, dev); 68
52 spin_unlock(&dca_lock); 69 /* check if the requester has not been added already */
53 if (slot < 0) 70 dca = dca_find_provider_by_dev(dev);
71 if (dca) {
72 spin_unlock(&dca_lock);
73 return -EEXIST;
74 }
75
76 list_for_each_entry(dca, &dca_providers, node) {
77 slot = dca->ops->add_requester(dca, dev);
78 if (slot >= 0)
79 break;
80 }
81 if (slot < 0) {
82 spin_unlock(&dca_lock);
54 return slot; 83 return slot;
84 }
55 85
56 err = dca_sysfs_add_req(global_dca, dev, slot); 86 err = dca_sysfs_add_req(dca, dev, slot);
57 if (err) { 87 if (err) {
58 spin_lock(&dca_lock); 88 dca->ops->remove_requester(dca, dev);
59 global_dca->ops->remove_requester(global_dca, dev);
60 spin_unlock(&dca_lock); 89 spin_unlock(&dca_lock);
61 return err; 90 return err;
62 } 91 }
63 92
93 spin_unlock(&dca_lock);
64 return 0; 94 return 0;
65} 95}
66EXPORT_SYMBOL_GPL(dca_add_requester); 96EXPORT_SYMBOL_GPL(dca_add_requester);
@@ -71,30 +101,78 @@ EXPORT_SYMBOL_GPL(dca_add_requester);
71 */ 101 */
72int dca_remove_requester(struct device *dev) 102int dca_remove_requester(struct device *dev)
73{ 103{
104 struct dca_provider *dca;
74 int slot; 105 int slot;
75 if (!global_dca) 106
76 return -ENODEV; 107 if (!dev)
108 return -EFAULT;
77 109
78 spin_lock(&dca_lock); 110 spin_lock(&dca_lock);
79 slot = global_dca->ops->remove_requester(global_dca, dev); 111 dca = dca_find_provider_by_dev(dev);
80 spin_unlock(&dca_lock); 112 if (!dca) {
81 if (slot < 0) 113 spin_unlock(&dca_lock);
114 return -ENODEV;
115 }
116 slot = dca->ops->remove_requester(dca, dev);
117 if (slot < 0) {
118 spin_unlock(&dca_lock);
82 return slot; 119 return slot;
120 }
83 121
84 dca_sysfs_remove_req(global_dca, slot); 122 dca_sysfs_remove_req(dca, slot);
123
124 spin_unlock(&dca_lock);
85 return 0; 125 return 0;
86} 126}
87EXPORT_SYMBOL_GPL(dca_remove_requester); 127EXPORT_SYMBOL_GPL(dca_remove_requester);
88 128
89/** 129/**
90 * dca_get_tag - return the dca tag for the given cpu 130 * dca_common_get_tag - return the dca tag (serves both new and old api)
131 * @dev - the device that wants dca service
91 * @cpu - the cpuid as returned by get_cpu() 132 * @cpu - the cpuid as returned by get_cpu()
92 */ 133 */
93u8 dca_get_tag(int cpu) 134u8 dca_common_get_tag(struct device *dev, int cpu)
94{ 135{
95 if (!global_dca) 136 struct dca_provider *dca;
137 u8 tag;
138
139 spin_lock(&dca_lock);
140
141 dca = dca_find_provider_by_dev(dev);
142 if (!dca) {
143 spin_unlock(&dca_lock);
96 return -ENODEV; 144 return -ENODEV;
97 return global_dca->ops->get_tag(global_dca, cpu); 145 }
146 tag = dca->ops->get_tag(dca, dev, cpu);
147
148 spin_unlock(&dca_lock);
149 return tag;
150}
151
152/**
153 * dca3_get_tag - return the dca tag to the requester device
154 * for the given cpu (new api)
155 * @dev - the device that wants dca service
156 * @cpu - the cpuid as returned by get_cpu()
157 */
158u8 dca3_get_tag(struct device *dev, int cpu)
159{
160 if (!dev)
161 return -EFAULT;
162
163 return dca_common_get_tag(dev, cpu);
164}
165EXPORT_SYMBOL_GPL(dca3_get_tag);
166
167/**
168 * dca_get_tag - return the dca tag for the given cpu (old api)
169 * @cpu - the cpuid as returned by get_cpu()
170 */
171u8 dca_get_tag(int cpu)
172{
173 struct device *dev = NULL;
174
175 return dca_common_get_tag(dev, cpu);
98} 176}
99EXPORT_SYMBOL_GPL(dca_get_tag); 177EXPORT_SYMBOL_GPL(dca_get_tag);
100 178
@@ -140,12 +218,10 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
140{ 218{
141 int err; 219 int err;
142 220
143 if (global_dca)
144 return -EEXIST;
145 err = dca_sysfs_add_provider(dca, dev); 221 err = dca_sysfs_add_provider(dca, dev);
146 if (err) 222 if (err)
147 return err; 223 return err;
148 global_dca = dca; 224 list_add(&dca->node, &dca_providers);
149 blocking_notifier_call_chain(&dca_provider_chain, 225 blocking_notifier_call_chain(&dca_provider_chain,
150 DCA_PROVIDER_ADD, NULL); 226 DCA_PROVIDER_ADD, NULL);
151 return 0; 227 return 0;
@@ -158,11 +234,9 @@ EXPORT_SYMBOL_GPL(register_dca_provider);
158 */ 234 */
159void unregister_dca_provider(struct dca_provider *dca) 235void unregister_dca_provider(struct dca_provider *dca)
160{ 236{
161 if (!global_dca)
162 return;
163 blocking_notifier_call_chain(&dca_provider_chain, 237 blocking_notifier_call_chain(&dca_provider_chain,
164 DCA_PROVIDER_REMOVE, NULL); 238 DCA_PROVIDER_REMOVE, NULL);
165 global_dca = NULL; 239 list_del(&dca->node);
166 dca_sysfs_remove_provider(dca); 240 dca_sysfs_remove_provider(dca);
167} 241}
168EXPORT_SYMBOL_GPL(unregister_dca_provider); 242EXPORT_SYMBOL_GPL(unregister_dca_provider);
@@ -187,6 +261,7 @@ EXPORT_SYMBOL_GPL(dca_unregister_notify);
187 261
188static int __init dca_init(void) 262static int __init dca_init(void)
189{ 263{
264 printk(KERN_ERR "dca service started, version %s\n", DCA_VERSION);
190 return dca_sysfs_init(); 265 return dca_sysfs_init();
191} 266}
192 267
diff --git a/drivers/dca/dca-sysfs.c b/drivers/dca/dca-sysfs.c
index 9a70377bfb34..7af4b403bd2d 100644
--- a/drivers/dca/dca-sysfs.c
+++ b/drivers/dca/dca-sysfs.c
@@ -13,10 +13,11 @@ static spinlock_t dca_idr_lock;
13int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot) 13int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot)
14{ 14{
15 struct device *cd; 15 struct device *cd;
16 static int req_count;
16 17
17 cd = device_create_drvdata(dca_class, dca->cd, 18 cd = device_create_drvdata(dca_class, dca->cd,
18 MKDEV(0, slot + 1), NULL, 19 MKDEV(0, slot + 1), NULL,
19 "requester%d", slot); 20 "requester%d", req_count++);
20 if (IS_ERR(cd)) 21 if (IS_ERR(cd))
21 return PTR_ERR(cd); 22 return PTR_ERR(cd);
22 return 0; 23 return 0;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6239c3df30ac..cd303901eb5b 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -4,13 +4,14 @@
4 4
5menuconfig DMADEVICES 5menuconfig DMADEVICES
6 bool "DMA Engine support" 6 bool "DMA Engine support"
7 depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX || PPC 7 depends on !HIGHMEM64G && HAS_DMA
8 depends on !HIGHMEM64G
9 help 8 help
10 DMA engines can do asynchronous data transfers without 9 DMA engines can do asynchronous data transfers without
11 involving the host CPU. Currently, this framework can be 10 involving the host CPU. Currently, this framework can be
12 used to offload memory copies in the network stack and 11 used to offload memory copies in the network stack and
13 RAID operations in the MD driver. 12 RAID operations in the MD driver. This menu only presents
13 DMA Device drivers supported by the configured arch, it may
14 be empty in some cases.
14 15
15if DMADEVICES 16if DMADEVICES
16 17
@@ -37,6 +38,15 @@ config INTEL_IOP_ADMA
37 help 38 help
38 Enable support for the Intel(R) IOP Series RAID engines. 39 Enable support for the Intel(R) IOP Series RAID engines.
39 40
41config DW_DMAC
42 tristate "Synopsys DesignWare AHB DMA support"
43 depends on AVR32
44 select DMA_ENGINE
45 default y if CPU_AT32AP7000
46 help
47 Support the Synopsys DesignWare AHB DMA controller. This
48 can be integrated in chips such as the Atmel AT32ap7000.
49
40config FSL_DMA 50config FSL_DMA
41 bool "Freescale MPC85xx/MPC83xx DMA support" 51 bool "Freescale MPC85xx/MPC83xx DMA support"
42 depends on PPC 52 depends on PPC
@@ -46,6 +56,14 @@ config FSL_DMA
46 MPC8560/40, MPC8555, MPC8548 and MPC8641 processors. 56 MPC8560/40, MPC8555, MPC8548 and MPC8641 processors.
47 The MPC8349, MPC8360 is also supported. 57 The MPC8349, MPC8360 is also supported.
48 58
59config MV_XOR
60 bool "Marvell XOR engine support"
61 depends on PLAT_ORION
62 select ASYNC_CORE
63 select DMA_ENGINE
64 ---help---
65 Enable support for the Marvell XOR engine.
66
49config DMA_ENGINE 67config DMA_ENGINE
50 bool 68 bool
51 69
@@ -55,10 +73,19 @@ comment "DMA Clients"
55config NET_DMA 73config NET_DMA
56 bool "Network: TCP receive copy offload" 74 bool "Network: TCP receive copy offload"
57 depends on DMA_ENGINE && NET 75 depends on DMA_ENGINE && NET
76 default (INTEL_IOATDMA || FSL_DMA)
58 help 77 help
59 This enables the use of DMA engines in the network stack to 78 This enables the use of DMA engines in the network stack to
60 offload receive copy-to-user operations, freeing CPU cycles. 79 offload receive copy-to-user operations, freeing CPU cycles.
61 Since this is the main user of the DMA engine, it should be enabled; 80
62 say Y here. 81 Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise
82 say N.
83
84config DMATEST
85 tristate "DMA Test client"
86 depends on DMA_ENGINE
87 help
88 Simple DMA test client. Say N unless you're debugging a
89 DMA Device driver.
63 90
64endif 91endif
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index c8036d945902..14f59527d4f6 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -1,6 +1,9 @@
1obj-$(CONFIG_DMA_ENGINE) += dmaengine.o 1obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
2obj-$(CONFIG_NET_DMA) += iovlock.o 2obj-$(CONFIG_NET_DMA) += iovlock.o
3obj-$(CONFIG_DMATEST) += dmatest.o
3obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o 4obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
4ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o 5ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o
5obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o 6obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
6obj-$(CONFIG_FSL_DMA) += fsldma.o 7obj-$(CONFIG_FSL_DMA) += fsldma.o
8obj-$(CONFIG_MV_XOR) += mv_xor.o
9obj-$(CONFIG_DW_DMAC) += dw_dmac.o
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 97b329e76798..dc003a3a787d 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -169,12 +169,18 @@ static void dma_client_chan_alloc(struct dma_client *client)
169 enum dma_state_client ack; 169 enum dma_state_client ack;
170 170
171 /* Find a channel */ 171 /* Find a channel */
172 list_for_each_entry(device, &dma_device_list, global_node) 172 list_for_each_entry(device, &dma_device_list, global_node) {
173 /* Does the client require a specific DMA controller? */
174 if (client->slave && client->slave->dma_dev
175 && client->slave->dma_dev != device->dev)
176 continue;
177
173 list_for_each_entry(chan, &device->channels, device_node) { 178 list_for_each_entry(chan, &device->channels, device_node) {
174 if (!dma_chan_satisfies_mask(chan, client->cap_mask)) 179 if (!dma_chan_satisfies_mask(chan, client->cap_mask))
175 continue; 180 continue;
176 181
177 desc = chan->device->device_alloc_chan_resources(chan); 182 desc = chan->device->device_alloc_chan_resources(
183 chan, client);
178 if (desc >= 0) { 184 if (desc >= 0) {
179 ack = client->event_callback(client, 185 ack = client->event_callback(client,
180 chan, 186 chan,
@@ -183,12 +189,14 @@ static void dma_client_chan_alloc(struct dma_client *client)
183 /* we are done once this client rejects 189 /* we are done once this client rejects
184 * an available resource 190 * an available resource
185 */ 191 */
186 if (ack == DMA_ACK) 192 if (ack == DMA_ACK) {
187 dma_chan_get(chan); 193 dma_chan_get(chan);
188 else if (ack == DMA_NAK) 194 chan->client_count++;
195 } else if (ack == DMA_NAK)
189 return; 196 return;
190 } 197 }
191 } 198 }
199 }
192} 200}
193 201
194enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) 202enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
@@ -272,8 +280,10 @@ static void dma_clients_notify_removed(struct dma_chan *chan)
272 /* client was holding resources for this channel so 280 /* client was holding resources for this channel so
273 * free it 281 * free it
274 */ 282 */
275 if (ack == DMA_ACK) 283 if (ack == DMA_ACK) {
276 dma_chan_put(chan); 284 dma_chan_put(chan);
285 chan->client_count--;
286 }
277 } 287 }
278 288
279 mutex_unlock(&dma_list_mutex); 289 mutex_unlock(&dma_list_mutex);
@@ -285,6 +295,10 @@ static void dma_clients_notify_removed(struct dma_chan *chan)
285 */ 295 */
286void dma_async_client_register(struct dma_client *client) 296void dma_async_client_register(struct dma_client *client)
287{ 297{
298 /* validate client data */
299 BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) &&
300 !client->slave);
301
288 mutex_lock(&dma_list_mutex); 302 mutex_lock(&dma_list_mutex);
289 list_add_tail(&client->global_node, &dma_client_list); 303 list_add_tail(&client->global_node, &dma_client_list);
290 mutex_unlock(&dma_list_mutex); 304 mutex_unlock(&dma_list_mutex);
@@ -313,8 +327,10 @@ void dma_async_client_unregister(struct dma_client *client)
313 ack = client->event_callback(client, chan, 327 ack = client->event_callback(client, chan,
314 DMA_RESOURCE_REMOVED); 328 DMA_RESOURCE_REMOVED);
315 329
316 if (ack == DMA_ACK) 330 if (ack == DMA_ACK) {
317 dma_chan_put(chan); 331 dma_chan_put(chan);
332 chan->client_count--;
333 }
318 } 334 }
319 335
320 list_del(&client->global_node); 336 list_del(&client->global_node);
@@ -359,6 +375,10 @@ int dma_async_device_register(struct dma_device *device)
359 !device->device_prep_dma_memset); 375 !device->device_prep_dma_memset);
360 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && 376 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
361 !device->device_prep_dma_interrupt); 377 !device->device_prep_dma_interrupt);
378 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
379 !device->device_prep_slave_sg);
380 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
381 !device->device_terminate_all);
362 382
363 BUG_ON(!device->device_alloc_chan_resources); 383 BUG_ON(!device->device_alloc_chan_resources);
364 BUG_ON(!device->device_free_chan_resources); 384 BUG_ON(!device->device_free_chan_resources);
@@ -378,7 +398,7 @@ int dma_async_device_register(struct dma_device *device)
378 398
379 chan->chan_id = chancnt++; 399 chan->chan_id = chancnt++;
380 chan->dev.class = &dma_devclass; 400 chan->dev.class = &dma_devclass;
381 chan->dev.parent = NULL; 401 chan->dev.parent = device->dev;
382 snprintf(chan->dev.bus_id, BUS_ID_SIZE, "dma%dchan%d", 402 snprintf(chan->dev.bus_id, BUS_ID_SIZE, "dma%dchan%d",
383 device->dev_id, chan->chan_id); 403 device->dev_id, chan->chan_id);
384 404
@@ -394,6 +414,7 @@ int dma_async_device_register(struct dma_device *device)
394 kref_get(&device->refcount); 414 kref_get(&device->refcount);
395 kref_get(&device->refcount); 415 kref_get(&device->refcount);
396 kref_init(&chan->refcount); 416 kref_init(&chan->refcount);
417 chan->client_count = 0;
397 chan->slow_ref = 0; 418 chan->slow_ref = 0;
398 INIT_RCU_HEAD(&chan->rcu); 419 INIT_RCU_HEAD(&chan->rcu);
399 } 420 }
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
new file mode 100644
index 000000000000..a08d19704743
--- /dev/null
+++ b/drivers/dma/dmatest.c
@@ -0,0 +1,444 @@
1/*
2 * DMA Engine test module
3 *
4 * Copyright (C) 2007 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/delay.h>
11#include <linux/dmaengine.h>
12#include <linux/init.h>
13#include <linux/kthread.h>
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/random.h>
17#include <linux/wait.h>
18
19static unsigned int test_buf_size = 16384;
20module_param(test_buf_size, uint, S_IRUGO);
21MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
22
23static char test_channel[BUS_ID_SIZE];
24module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
25MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
26
27static char test_device[BUS_ID_SIZE];
28module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
29MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
30
31static unsigned int threads_per_chan = 1;
32module_param(threads_per_chan, uint, S_IRUGO);
33MODULE_PARM_DESC(threads_per_chan,
34 "Number of threads to start per channel (default: 1)");
35
36static unsigned int max_channels;
37module_param(max_channels, uint, S_IRUGO);
38MODULE_PARM_DESC(nr_channels,
39 "Maximum number of channels to use (default: all)");
40
41/*
42 * Initialization patterns. All bytes in the source buffer has bit 7
43 * set, all bytes in the destination buffer has bit 7 cleared.
44 *
45 * Bit 6 is set for all bytes which are to be copied by the DMA
46 * engine. Bit 5 is set for all bytes which are to be overwritten by
47 * the DMA engine.
48 *
49 * The remaining bits are the inverse of a counter which increments by
50 * one for each byte address.
51 */
52#define PATTERN_SRC 0x80
53#define PATTERN_DST 0x00
54#define PATTERN_COPY 0x40
55#define PATTERN_OVERWRITE 0x20
56#define PATTERN_COUNT_MASK 0x1f
57
58struct dmatest_thread {
59 struct list_head node;
60 struct task_struct *task;
61 struct dma_chan *chan;
62 u8 *srcbuf;
63 u8 *dstbuf;
64};
65
66struct dmatest_chan {
67 struct list_head node;
68 struct dma_chan *chan;
69 struct list_head threads;
70};
71
72/*
73 * These are protected by dma_list_mutex since they're only used by
74 * the DMA client event callback
75 */
76static LIST_HEAD(dmatest_channels);
77static unsigned int nr_channels;
78
79static bool dmatest_match_channel(struct dma_chan *chan)
80{
81 if (test_channel[0] == '\0')
82 return true;
83 return strcmp(chan->dev.bus_id, test_channel) == 0;
84}
85
86static bool dmatest_match_device(struct dma_device *device)
87{
88 if (test_device[0] == '\0')
89 return true;
90 return strcmp(device->dev->bus_id, test_device) == 0;
91}
92
93static unsigned long dmatest_random(void)
94{
95 unsigned long buf;
96
97 get_random_bytes(&buf, sizeof(buf));
98 return buf;
99}
100
101static void dmatest_init_srcbuf(u8 *buf, unsigned int start, unsigned int len)
102{
103 unsigned int i;
104
105 for (i = 0; i < start; i++)
106 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
107 for ( ; i < start + len; i++)
108 buf[i] = PATTERN_SRC | PATTERN_COPY
109 | (~i & PATTERN_COUNT_MASK);;
110 for ( ; i < test_buf_size; i++)
111 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
112}
113
114static void dmatest_init_dstbuf(u8 *buf, unsigned int start, unsigned int len)
115{
116 unsigned int i;
117
118 for (i = 0; i < start; i++)
119 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
120 for ( ; i < start + len; i++)
121 buf[i] = PATTERN_DST | PATTERN_OVERWRITE
122 | (~i & PATTERN_COUNT_MASK);
123 for ( ; i < test_buf_size; i++)
124 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
125}
126
127static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
128 unsigned int counter, bool is_srcbuf)
129{
130 u8 diff = actual ^ pattern;
131 u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
132 const char *thread_name = current->comm;
133
134 if (is_srcbuf)
135 pr_warning("%s: srcbuf[0x%x] overwritten!"
136 " Expected %02x, got %02x\n",
137 thread_name, index, expected, actual);
138 else if ((pattern & PATTERN_COPY)
139 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
140 pr_warning("%s: dstbuf[0x%x] not copied!"
141 " Expected %02x, got %02x\n",
142 thread_name, index, expected, actual);
143 else if (diff & PATTERN_SRC)
144 pr_warning("%s: dstbuf[0x%x] was copied!"
145 " Expected %02x, got %02x\n",
146 thread_name, index, expected, actual);
147 else
148 pr_warning("%s: dstbuf[0x%x] mismatch!"
149 " Expected %02x, got %02x\n",
150 thread_name, index, expected, actual);
151}
152
153static unsigned int dmatest_verify(u8 *buf, unsigned int start,
154 unsigned int end, unsigned int counter, u8 pattern,
155 bool is_srcbuf)
156{
157 unsigned int i;
158 unsigned int error_count = 0;
159 u8 actual;
160
161 for (i = start; i < end; i++) {
162 actual = buf[i];
163 if (actual != (pattern | (~counter & PATTERN_COUNT_MASK))) {
164 if (error_count < 32)
165 dmatest_mismatch(actual, pattern, i, counter,
166 is_srcbuf);
167 error_count++;
168 }
169 counter++;
170 }
171
172 if (error_count > 32)
173 pr_warning("%s: %u errors suppressed\n",
174 current->comm, error_count - 32);
175
176 return error_count;
177}
178
179/*
180 * This function repeatedly tests DMA transfers of various lengths and
181 * offsets until it is told to exit by kthread_stop(). There may be
182 * multiple threads running this function in parallel for a single
183 * channel, and there may be multiple channels being tested in
184 * parallel.
185 *
186 * Before each test, the source and destination buffer is initialized
187 * with a known pattern. This pattern is different depending on
188 * whether it's in an area which is supposed to be copied or
189 * overwritten, and different in the source and destination buffers.
190 * So if the DMA engine doesn't copy exactly what we tell it to copy,
191 * we'll notice.
192 */
193static int dmatest_func(void *data)
194{
195 struct dmatest_thread *thread = data;
196 struct dma_chan *chan;
197 const char *thread_name;
198 unsigned int src_off, dst_off, len;
199 unsigned int error_count;
200 unsigned int failed_tests = 0;
201 unsigned int total_tests = 0;
202 dma_cookie_t cookie;
203 enum dma_status status;
204 int ret;
205
206 thread_name = current->comm;
207
208 ret = -ENOMEM;
209 thread->srcbuf = kmalloc(test_buf_size, GFP_KERNEL);
210 if (!thread->srcbuf)
211 goto err_srcbuf;
212 thread->dstbuf = kmalloc(test_buf_size, GFP_KERNEL);
213 if (!thread->dstbuf)
214 goto err_dstbuf;
215
216 smp_rmb();
217 chan = thread->chan;
218 dma_chan_get(chan);
219
220 while (!kthread_should_stop()) {
221 total_tests++;
222
223 len = dmatest_random() % test_buf_size + 1;
224 src_off = dmatest_random() % (test_buf_size - len + 1);
225 dst_off = dmatest_random() % (test_buf_size - len + 1);
226
227 dmatest_init_srcbuf(thread->srcbuf, src_off, len);
228 dmatest_init_dstbuf(thread->dstbuf, dst_off, len);
229
230 cookie = dma_async_memcpy_buf_to_buf(chan,
231 thread->dstbuf + dst_off,
232 thread->srcbuf + src_off,
233 len);
234 if (dma_submit_error(cookie)) {
235 pr_warning("%s: #%u: submit error %d with src_off=0x%x "
236 "dst_off=0x%x len=0x%x\n",
237 thread_name, total_tests - 1, cookie,
238 src_off, dst_off, len);
239 msleep(100);
240 failed_tests++;
241 continue;
242 }
243 dma_async_memcpy_issue_pending(chan);
244
245 do {
246 msleep(1);
247 status = dma_async_memcpy_complete(
248 chan, cookie, NULL, NULL);
249 } while (status == DMA_IN_PROGRESS);
250
251 if (status == DMA_ERROR) {
252 pr_warning("%s: #%u: error during copy\n",
253 thread_name, total_tests - 1);
254 failed_tests++;
255 continue;
256 }
257
258 error_count = 0;
259
260 pr_debug("%s: verifying source buffer...\n", thread_name);
261 error_count += dmatest_verify(thread->srcbuf, 0, src_off,
262 0, PATTERN_SRC, true);
263 error_count += dmatest_verify(thread->srcbuf, src_off,
264 src_off + len, src_off,
265 PATTERN_SRC | PATTERN_COPY, true);
266 error_count += dmatest_verify(thread->srcbuf, src_off + len,
267 test_buf_size, src_off + len,
268 PATTERN_SRC, true);
269
270 pr_debug("%s: verifying dest buffer...\n",
271 thread->task->comm);
272 error_count += dmatest_verify(thread->dstbuf, 0, dst_off,
273 0, PATTERN_DST, false);
274 error_count += dmatest_verify(thread->dstbuf, dst_off,
275 dst_off + len, src_off,
276 PATTERN_SRC | PATTERN_COPY, false);
277 error_count += dmatest_verify(thread->dstbuf, dst_off + len,
278 test_buf_size, dst_off + len,
279 PATTERN_DST, false);
280
281 if (error_count) {
282 pr_warning("%s: #%u: %u errors with "
283 "src_off=0x%x dst_off=0x%x len=0x%x\n",
284 thread_name, total_tests - 1, error_count,
285 src_off, dst_off, len);
286 failed_tests++;
287 } else {
288 pr_debug("%s: #%u: No errors with "
289 "src_off=0x%x dst_off=0x%x len=0x%x\n",
290 thread_name, total_tests - 1,
291 src_off, dst_off, len);
292 }
293 }
294
295 ret = 0;
296 dma_chan_put(chan);
297 kfree(thread->dstbuf);
298err_dstbuf:
299 kfree(thread->srcbuf);
300err_srcbuf:
301 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
302 thread_name, total_tests, failed_tests, ret);
303 return ret;
304}
305
306static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
307{
308 struct dmatest_thread *thread;
309 struct dmatest_thread *_thread;
310 int ret;
311
312 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
313 ret = kthread_stop(thread->task);
314 pr_debug("dmatest: thread %s exited with status %d\n",
315 thread->task->comm, ret);
316 list_del(&thread->node);
317 kfree(thread);
318 }
319 kfree(dtc);
320}
321
322static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
323{
324 struct dmatest_chan *dtc;
325 struct dmatest_thread *thread;
326 unsigned int i;
327
328 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_ATOMIC);
329 if (!dtc) {
330 pr_warning("dmatest: No memory for %s\n", chan->dev.bus_id);
331 return DMA_NAK;
332 }
333
334 dtc->chan = chan;
335 INIT_LIST_HEAD(&dtc->threads);
336
337 for (i = 0; i < threads_per_chan; i++) {
338 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
339 if (!thread) {
340 pr_warning("dmatest: No memory for %s-test%u\n",
341 chan->dev.bus_id, i);
342 break;
343 }
344 thread->chan = dtc->chan;
345 smp_wmb();
346 thread->task = kthread_run(dmatest_func, thread, "%s-test%u",
347 chan->dev.bus_id, i);
348 if (IS_ERR(thread->task)) {
349 pr_warning("dmatest: Failed to run thread %s-test%u\n",
350 chan->dev.bus_id, i);
351 kfree(thread);
352 break;
353 }
354
355 /* srcbuf and dstbuf are allocated by the thread itself */
356
357 list_add_tail(&thread->node, &dtc->threads);
358 }
359
360 pr_info("dmatest: Started %u threads using %s\n", i, chan->dev.bus_id);
361
362 list_add_tail(&dtc->node, &dmatest_channels);
363 nr_channels++;
364
365 return DMA_ACK;
366}
367
368static enum dma_state_client dmatest_remove_channel(struct dma_chan *chan)
369{
370 struct dmatest_chan *dtc, *_dtc;
371
372 list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
373 if (dtc->chan == chan) {
374 list_del(&dtc->node);
375 dmatest_cleanup_channel(dtc);
376 pr_debug("dmatest: lost channel %s\n",
377 chan->dev.bus_id);
378 return DMA_ACK;
379 }
380 }
381
382 return DMA_DUP;
383}
384
385/*
386 * Start testing threads as new channels are assigned to us, and kill
387 * them when the channels go away.
388 *
389 * When we unregister the client, all channels are removed so this
390 * will also take care of cleaning things up when the module is
391 * unloaded.
392 */
393static enum dma_state_client
394dmatest_event(struct dma_client *client, struct dma_chan *chan,
395 enum dma_state state)
396{
397 enum dma_state_client ack = DMA_NAK;
398
399 switch (state) {
400 case DMA_RESOURCE_AVAILABLE:
401 if (!dmatest_match_channel(chan)
402 || !dmatest_match_device(chan->device))
403 ack = DMA_DUP;
404 else if (max_channels && nr_channels >= max_channels)
405 ack = DMA_NAK;
406 else
407 ack = dmatest_add_channel(chan);
408 break;
409
410 case DMA_RESOURCE_REMOVED:
411 ack = dmatest_remove_channel(chan);
412 break;
413
414 default:
415 pr_info("dmatest: Unhandled event %u (%s)\n",
416 state, chan->dev.bus_id);
417 break;
418 }
419
420 return ack;
421}
422
423static struct dma_client dmatest_client = {
424 .event_callback = dmatest_event,
425};
426
427static int __init dmatest_init(void)
428{
429 dma_cap_set(DMA_MEMCPY, dmatest_client.cap_mask);
430 dma_async_client_register(&dmatest_client);
431 dma_async_client_chan_request(&dmatest_client);
432
433 return 0;
434}
435module_init(dmatest_init);
436
437static void __exit dmatest_exit(void)
438{
439 dma_async_client_unregister(&dmatest_client);
440}
441module_exit(dmatest_exit);
442
443MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>");
444MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
new file mode 100644
index 000000000000..94df91771243
--- /dev/null
+++ b/drivers/dma/dw_dmac.c
@@ -0,0 +1,1122 @@
1/*
2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
3 * AVR32 systems.)
4 *
5 * Copyright (C) 2007-2008 Atmel Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/clk.h>
12#include <linux/delay.h>
13#include <linux/dmaengine.h>
14#include <linux/dma-mapping.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/mm.h>
19#include <linux/module.h>
20#include <linux/platform_device.h>
21#include <linux/slab.h>
22
23#include "dw_dmac_regs.h"
24
25/*
26 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
27 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
28 * of which use ARM any more). See the "Databook" from Synopsys for
29 * information beyond what licensees probably provide.
30 *
31 * The driver has currently been tested only with the Atmel AT32AP7000,
32 * which does not support descriptor writeback.
33 */
34
35/* NOTE: DMS+SMS is system-specific. We should get this information
36 * from the platform code somehow.
37 */
38#define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \
39 | DWC_CTLL_SRC_MSIZE(0) \
40 | DWC_CTLL_DMS(0) \
41 | DWC_CTLL_SMS(1) \
42 | DWC_CTLL_LLP_D_EN \
43 | DWC_CTLL_LLP_S_EN)
44
45/*
46 * This is configuration-dependent and usually a funny size like 4095.
47 * Let's round it down to the nearest power of two.
48 *
49 * Note that this is a transfer count, i.e. if we transfer 32-bit
50 * words, we can do 8192 bytes per descriptor.
51 *
52 * This parameter is also system-specific.
53 */
54#define DWC_MAX_COUNT 2048U
55
56/*
57 * Number of descriptors to allocate for each channel. This should be
58 * made configurable somehow; preferably, the clients (at least the
59 * ones using slave transfers) should be able to give us a hint.
60 */
61#define NR_DESCS_PER_CHANNEL 64
62
63/*----------------------------------------------------------------------*/
64
65/*
66 * Because we're not relying on writeback from the controller (it may not
67 * even be configured into the core!) we don't need to use dma_pool. These
68 * descriptors -- and associated data -- are cacheable. We do need to make
69 * sure their dcache entries are written back before handing them off to
70 * the controller, though.
71 */
72
73static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
74{
75 return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
76}
77
78static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc)
79{
80 return list_entry(dwc->queue.next, struct dw_desc, desc_node);
81}
82
83static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
84{
85 struct dw_desc *desc, *_desc;
86 struct dw_desc *ret = NULL;
87 unsigned int i = 0;
88
89 spin_lock_bh(&dwc->lock);
90 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
91 if (async_tx_test_ack(&desc->txd)) {
92 list_del(&desc->desc_node);
93 ret = desc;
94 break;
95 }
96 dev_dbg(&dwc->chan.dev, "desc %p not ACKed\n", desc);
97 i++;
98 }
99 spin_unlock_bh(&dwc->lock);
100
101 dev_vdbg(&dwc->chan.dev, "scanned %u descriptors on freelist\n", i);
102
103 return ret;
104}
105
106static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
107{
108 struct dw_desc *child;
109
110 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
111 dma_sync_single_for_cpu(dwc->chan.dev.parent,
112 child->txd.phys, sizeof(child->lli),
113 DMA_TO_DEVICE);
114 dma_sync_single_for_cpu(dwc->chan.dev.parent,
115 desc->txd.phys, sizeof(desc->lli),
116 DMA_TO_DEVICE);
117}
118
119/*
120 * Move a descriptor, including any children, to the free list.
121 * `desc' must not be on any lists.
122 */
123static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
124{
125 if (desc) {
126 struct dw_desc *child;
127
128 dwc_sync_desc_for_cpu(dwc, desc);
129
130 spin_lock_bh(&dwc->lock);
131 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
132 dev_vdbg(&dwc->chan.dev,
133 "moving child desc %p to freelist\n",
134 child);
135 list_splice_init(&desc->txd.tx_list, &dwc->free_list);
136 dev_vdbg(&dwc->chan.dev, "moving desc %p to freelist\n", desc);
137 list_add(&desc->desc_node, &dwc->free_list);
138 spin_unlock_bh(&dwc->lock);
139 }
140}
141
142/* Called with dwc->lock held and bh disabled */
143static dma_cookie_t
144dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
145{
146 dma_cookie_t cookie = dwc->chan.cookie;
147
148 if (++cookie < 0)
149 cookie = 1;
150
151 dwc->chan.cookie = cookie;
152 desc->txd.cookie = cookie;
153
154 return cookie;
155}
156
157/*----------------------------------------------------------------------*/
158
159/* Called with dwc->lock held and bh disabled */
160static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
161{
162 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
163
164 /* ASSERT: channel is idle */
165 if (dma_readl(dw, CH_EN) & dwc->mask) {
166 dev_err(&dwc->chan.dev,
167 "BUG: Attempted to start non-idle channel\n");
168 dev_err(&dwc->chan.dev,
169 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
170 channel_readl(dwc, SAR),
171 channel_readl(dwc, DAR),
172 channel_readl(dwc, LLP),
173 channel_readl(dwc, CTL_HI),
174 channel_readl(dwc, CTL_LO));
175
176 /* The tasklet will hopefully advance the queue... */
177 return;
178 }
179
180 channel_writel(dwc, LLP, first->txd.phys);
181 channel_writel(dwc, CTL_LO,
182 DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
183 channel_writel(dwc, CTL_HI, 0);
184 channel_set_bit(dw, CH_EN, dwc->mask);
185}
186
187/*----------------------------------------------------------------------*/
188
189static void
190dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
191{
192 dma_async_tx_callback callback;
193 void *param;
194 struct dma_async_tx_descriptor *txd = &desc->txd;
195
196 dev_vdbg(&dwc->chan.dev, "descriptor %u complete\n", txd->cookie);
197
198 dwc->completed = txd->cookie;
199 callback = txd->callback;
200 param = txd->callback_param;
201
202 dwc_sync_desc_for_cpu(dwc, desc);
203 list_splice_init(&txd->tx_list, &dwc->free_list);
204 list_move(&desc->desc_node, &dwc->free_list);
205
206 /*
207 * We use dma_unmap_page() regardless of how the buffers were
208 * mapped before they were submitted...
209 */
210 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP))
211 dma_unmap_page(dwc->chan.dev.parent, desc->lli.dar, desc->len,
212 DMA_FROM_DEVICE);
213 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
214 dma_unmap_page(dwc->chan.dev.parent, desc->lli.sar, desc->len,
215 DMA_TO_DEVICE);
216
217 /*
218 * The API requires that no submissions are done from a
219 * callback, so we don't need to drop the lock here
220 */
221 if (callback)
222 callback(param);
223}
224
225static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
226{
227 struct dw_desc *desc, *_desc;
228 LIST_HEAD(list);
229
230 if (dma_readl(dw, CH_EN) & dwc->mask) {
231 dev_err(&dwc->chan.dev,
232 "BUG: XFER bit set, but channel not idle!\n");
233
234 /* Try to continue after resetting the channel... */
235 channel_clear_bit(dw, CH_EN, dwc->mask);
236 while (dma_readl(dw, CH_EN) & dwc->mask)
237 cpu_relax();
238 }
239
240 /*
241 * Submit queued descriptors ASAP, i.e. before we go through
242 * the completed ones.
243 */
244 if (!list_empty(&dwc->queue))
245 dwc_dostart(dwc, dwc_first_queued(dwc));
246 list_splice_init(&dwc->active_list, &list);
247 list_splice_init(&dwc->queue, &dwc->active_list);
248
249 list_for_each_entry_safe(desc, _desc, &list, desc_node)
250 dwc_descriptor_complete(dwc, desc);
251}
252
253static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
254{
255 dma_addr_t llp;
256 struct dw_desc *desc, *_desc;
257 struct dw_desc *child;
258 u32 status_xfer;
259
260 /*
261 * Clear block interrupt flag before scanning so that we don't
262 * miss any, and read LLP before RAW_XFER to ensure it is
263 * valid if we decide to scan the list.
264 */
265 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
266 llp = channel_readl(dwc, LLP);
267 status_xfer = dma_readl(dw, RAW.XFER);
268
269 if (status_xfer & dwc->mask) {
270 /* Everything we've submitted is done */
271 dma_writel(dw, CLEAR.XFER, dwc->mask);
272 dwc_complete_all(dw, dwc);
273 return;
274 }
275
276 dev_vdbg(&dwc->chan.dev, "scan_descriptors: llp=0x%x\n", llp);
277
278 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
279 if (desc->lli.llp == llp)
280 /* This one is currently in progress */
281 return;
282
283 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
284 if (child->lli.llp == llp)
285 /* Currently in progress */
286 return;
287
288 /*
289 * No descriptors so far seem to be in progress, i.e.
290 * this one must be done.
291 */
292 dwc_descriptor_complete(dwc, desc);
293 }
294
295 dev_err(&dwc->chan.dev,
296 "BUG: All descriptors done, but channel not idle!\n");
297
298 /* Try to continue after resetting the channel... */
299 channel_clear_bit(dw, CH_EN, dwc->mask);
300 while (dma_readl(dw, CH_EN) & dwc->mask)
301 cpu_relax();
302
303 if (!list_empty(&dwc->queue)) {
304 dwc_dostart(dwc, dwc_first_queued(dwc));
305 list_splice_init(&dwc->queue, &dwc->active_list);
306 }
307}
308
309static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
310{
311 dev_printk(KERN_CRIT, &dwc->chan.dev,
312 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
313 lli->sar, lli->dar, lli->llp,
314 lli->ctlhi, lli->ctllo);
315}
316
317static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
318{
319 struct dw_desc *bad_desc;
320 struct dw_desc *child;
321
322 dwc_scan_descriptors(dw, dwc);
323
324 /*
325 * The descriptor currently at the head of the active list is
326 * borked. Since we don't have any way to report errors, we'll
327 * just have to scream loudly and try to carry on.
328 */
329 bad_desc = dwc_first_active(dwc);
330 list_del_init(&bad_desc->desc_node);
331 list_splice_init(&dwc->queue, dwc->active_list.prev);
332
333 /* Clear the error flag and try to restart the controller */
334 dma_writel(dw, CLEAR.ERROR, dwc->mask);
335 if (!list_empty(&dwc->active_list))
336 dwc_dostart(dwc, dwc_first_active(dwc));
337
338 /*
339 * KERN_CRITICAL may seem harsh, but since this only happens
340 * when someone submits a bad physical address in a
341 * descriptor, we should consider ourselves lucky that the
342 * controller flagged an error instead of scribbling over
343 * random memory locations.
344 */
345 dev_printk(KERN_CRIT, &dwc->chan.dev,
346 "Bad descriptor submitted for DMA!\n");
347 dev_printk(KERN_CRIT, &dwc->chan.dev,
348 " cookie: %d\n", bad_desc->txd.cookie);
349 dwc_dump_lli(dwc, &bad_desc->lli);
350 list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
351 dwc_dump_lli(dwc, &child->lli);
352
353 /* Pretend the descriptor completed successfully */
354 dwc_descriptor_complete(dwc, bad_desc);
355}
356
357static void dw_dma_tasklet(unsigned long data)
358{
359 struct dw_dma *dw = (struct dw_dma *)data;
360 struct dw_dma_chan *dwc;
361 u32 status_block;
362 u32 status_xfer;
363 u32 status_err;
364 int i;
365
366 status_block = dma_readl(dw, RAW.BLOCK);
367 status_xfer = dma_readl(dw, RAW.BLOCK);
368 status_err = dma_readl(dw, RAW.ERROR);
369
370 dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n",
371 status_block, status_err);
372
373 for (i = 0; i < dw->dma.chancnt; i++) {
374 dwc = &dw->chan[i];
375 spin_lock(&dwc->lock);
376 if (status_err & (1 << i))
377 dwc_handle_error(dw, dwc);
378 else if ((status_block | status_xfer) & (1 << i))
379 dwc_scan_descriptors(dw, dwc);
380 spin_unlock(&dwc->lock);
381 }
382
383 /*
384 * Re-enable interrupts. Block Complete interrupts are only
385 * enabled if the INT_EN bit in the descriptor is set. This
386 * will trigger a scan before the whole list is done.
387 */
388 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
389 channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
390 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
391}
392
393static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
394{
395 struct dw_dma *dw = dev_id;
396 u32 status;
397
398 dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n",
399 dma_readl(dw, STATUS_INT));
400
401 /*
402 * Just disable the interrupts. We'll turn them back on in the
403 * softirq handler.
404 */
405 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
406 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
407 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
408
409 status = dma_readl(dw, STATUS_INT);
410 if (status) {
411 dev_err(dw->dma.dev,
412 "BUG: Unexpected interrupts pending: 0x%x\n",
413 status);
414
415 /* Try to recover */
416 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
417 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
418 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
419 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
420 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
421 }
422
423 tasklet_schedule(&dw->tasklet);
424
425 return IRQ_HANDLED;
426}
427
428/*----------------------------------------------------------------------*/
429
430static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
431{
432 struct dw_desc *desc = txd_to_dw_desc(tx);
433 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
434 dma_cookie_t cookie;
435
436 spin_lock_bh(&dwc->lock);
437 cookie = dwc_assign_cookie(dwc, desc);
438
439 /*
440 * REVISIT: We should attempt to chain as many descriptors as
441 * possible, perhaps even appending to those already submitted
442 * for DMA. But this is hard to do in a race-free manner.
443 */
444 if (list_empty(&dwc->active_list)) {
445 dev_vdbg(&tx->chan->dev, "tx_submit: started %u\n",
446 desc->txd.cookie);
447 dwc_dostart(dwc, desc);
448 list_add_tail(&desc->desc_node, &dwc->active_list);
449 } else {
450 dev_vdbg(&tx->chan->dev, "tx_submit: queued %u\n",
451 desc->txd.cookie);
452
453 list_add_tail(&desc->desc_node, &dwc->queue);
454 }
455
456 spin_unlock_bh(&dwc->lock);
457
458 return cookie;
459}
460
461static struct dma_async_tx_descriptor *
462dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
463 size_t len, unsigned long flags)
464{
465 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
466 struct dw_desc *desc;
467 struct dw_desc *first;
468 struct dw_desc *prev;
469 size_t xfer_count;
470 size_t offset;
471 unsigned int src_width;
472 unsigned int dst_width;
473 u32 ctllo;
474
475 dev_vdbg(&chan->dev, "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
476 dest, src, len, flags);
477
478 if (unlikely(!len)) {
479 dev_dbg(&chan->dev, "prep_dma_memcpy: length is zero!\n");
480 return NULL;
481 }
482
483 /*
484 * We can be a lot more clever here, but this should take care
485 * of the most common optimization.
486 */
487 if (!((src | dest | len) & 3))
488 src_width = dst_width = 2;
489 else if (!((src | dest | len) & 1))
490 src_width = dst_width = 1;
491 else
492 src_width = dst_width = 0;
493
494 ctllo = DWC_DEFAULT_CTLLO
495 | DWC_CTLL_DST_WIDTH(dst_width)
496 | DWC_CTLL_SRC_WIDTH(src_width)
497 | DWC_CTLL_DST_INC
498 | DWC_CTLL_SRC_INC
499 | DWC_CTLL_FC_M2M;
500 prev = first = NULL;
501
502 for (offset = 0; offset < len; offset += xfer_count << src_width) {
503 xfer_count = min_t(size_t, (len - offset) >> src_width,
504 DWC_MAX_COUNT);
505
506 desc = dwc_desc_get(dwc);
507 if (!desc)
508 goto err_desc_get;
509
510 desc->lli.sar = src + offset;
511 desc->lli.dar = dest + offset;
512 desc->lli.ctllo = ctllo;
513 desc->lli.ctlhi = xfer_count;
514
515 if (!first) {
516 first = desc;
517 } else {
518 prev->lli.llp = desc->txd.phys;
519 dma_sync_single_for_device(chan->dev.parent,
520 prev->txd.phys, sizeof(prev->lli),
521 DMA_TO_DEVICE);
522 list_add_tail(&desc->desc_node,
523 &first->txd.tx_list);
524 }
525 prev = desc;
526 }
527
528
529 if (flags & DMA_PREP_INTERRUPT)
530 /* Trigger interrupt after last block */
531 prev->lli.ctllo |= DWC_CTLL_INT_EN;
532
533 prev->lli.llp = 0;
534 dma_sync_single_for_device(chan->dev.parent,
535 prev->txd.phys, sizeof(prev->lli),
536 DMA_TO_DEVICE);
537
538 first->txd.flags = flags;
539 first->len = len;
540
541 return &first->txd;
542
543err_desc_get:
544 dwc_desc_put(dwc, first);
545 return NULL;
546}
547
548static struct dma_async_tx_descriptor *
549dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
550 unsigned int sg_len, enum dma_data_direction direction,
551 unsigned long flags)
552{
553 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
554 struct dw_dma_slave *dws = dwc->dws;
555 struct dw_desc *prev;
556 struct dw_desc *first;
557 u32 ctllo;
558 dma_addr_t reg;
559 unsigned int reg_width;
560 unsigned int mem_width;
561 unsigned int i;
562 struct scatterlist *sg;
563 size_t total_len = 0;
564
565 dev_vdbg(&chan->dev, "prep_dma_slave\n");
566
567 if (unlikely(!dws || !sg_len))
568 return NULL;
569
570 reg_width = dws->slave.reg_width;
571 prev = first = NULL;
572
573 sg_len = dma_map_sg(chan->dev.parent, sgl, sg_len, direction);
574
575 switch (direction) {
576 case DMA_TO_DEVICE:
577 ctllo = (DWC_DEFAULT_CTLLO
578 | DWC_CTLL_DST_WIDTH(reg_width)
579 | DWC_CTLL_DST_FIX
580 | DWC_CTLL_SRC_INC
581 | DWC_CTLL_FC_M2P);
582 reg = dws->slave.tx_reg;
583 for_each_sg(sgl, sg, sg_len, i) {
584 struct dw_desc *desc;
585 u32 len;
586 u32 mem;
587
588 desc = dwc_desc_get(dwc);
589 if (!desc) {
590 dev_err(&chan->dev,
591 "not enough descriptors available\n");
592 goto err_desc_get;
593 }
594
595 mem = sg_phys(sg);
596 len = sg_dma_len(sg);
597 mem_width = 2;
598 if (unlikely(mem & 3 || len & 3))
599 mem_width = 0;
600
601 desc->lli.sar = mem;
602 desc->lli.dar = reg;
603 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
604 desc->lli.ctlhi = len >> mem_width;
605
606 if (!first) {
607 first = desc;
608 } else {
609 prev->lli.llp = desc->txd.phys;
610 dma_sync_single_for_device(chan->dev.parent,
611 prev->txd.phys,
612 sizeof(prev->lli),
613 DMA_TO_DEVICE);
614 list_add_tail(&desc->desc_node,
615 &first->txd.tx_list);
616 }
617 prev = desc;
618 total_len += len;
619 }
620 break;
621 case DMA_FROM_DEVICE:
622 ctllo = (DWC_DEFAULT_CTLLO
623 | DWC_CTLL_SRC_WIDTH(reg_width)
624 | DWC_CTLL_DST_INC
625 | DWC_CTLL_SRC_FIX
626 | DWC_CTLL_FC_P2M);
627
628 reg = dws->slave.rx_reg;
629 for_each_sg(sgl, sg, sg_len, i) {
630 struct dw_desc *desc;
631 u32 len;
632 u32 mem;
633
634 desc = dwc_desc_get(dwc);
635 if (!desc) {
636 dev_err(&chan->dev,
637 "not enough descriptors available\n");
638 goto err_desc_get;
639 }
640
641 mem = sg_phys(sg);
642 len = sg_dma_len(sg);
643 mem_width = 2;
644 if (unlikely(mem & 3 || len & 3))
645 mem_width = 0;
646
647 desc->lli.sar = reg;
648 desc->lli.dar = mem;
649 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
650 desc->lli.ctlhi = len >> reg_width;
651
652 if (!first) {
653 first = desc;
654 } else {
655 prev->lli.llp = desc->txd.phys;
656 dma_sync_single_for_device(chan->dev.parent,
657 prev->txd.phys,
658 sizeof(prev->lli),
659 DMA_TO_DEVICE);
660 list_add_tail(&desc->desc_node,
661 &first->txd.tx_list);
662 }
663 prev = desc;
664 total_len += len;
665 }
666 break;
667 default:
668 return NULL;
669 }
670
671 if (flags & DMA_PREP_INTERRUPT)
672 /* Trigger interrupt after last block */
673 prev->lli.ctllo |= DWC_CTLL_INT_EN;
674
675 prev->lli.llp = 0;
676 dma_sync_single_for_device(chan->dev.parent,
677 prev->txd.phys, sizeof(prev->lli),
678 DMA_TO_DEVICE);
679
680 first->len = total_len;
681
682 return &first->txd;
683
684err_desc_get:
685 dwc_desc_put(dwc, first);
686 return NULL;
687}
688
689static void dwc_terminate_all(struct dma_chan *chan)
690{
691 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
692 struct dw_dma *dw = to_dw_dma(chan->device);
693 struct dw_desc *desc, *_desc;
694 LIST_HEAD(list);
695
696 /*
697 * This is only called when something went wrong elsewhere, so
698 * we don't really care about the data. Just disable the
699 * channel. We still have to poll the channel enable bit due
700 * to AHB/HSB limitations.
701 */
702 spin_lock_bh(&dwc->lock);
703
704 channel_clear_bit(dw, CH_EN, dwc->mask);
705
706 while (dma_readl(dw, CH_EN) & dwc->mask)
707 cpu_relax();
708
709 /* active_list entries will end up before queued entries */
710 list_splice_init(&dwc->queue, &list);
711 list_splice_init(&dwc->active_list, &list);
712
713 spin_unlock_bh(&dwc->lock);
714
715 /* Flush all pending and queued descriptors */
716 list_for_each_entry_safe(desc, _desc, &list, desc_node)
717 dwc_descriptor_complete(dwc, desc);
718}
719
720static enum dma_status
721dwc_is_tx_complete(struct dma_chan *chan,
722 dma_cookie_t cookie,
723 dma_cookie_t *done, dma_cookie_t *used)
724{
725 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
726 dma_cookie_t last_used;
727 dma_cookie_t last_complete;
728 int ret;
729
730 last_complete = dwc->completed;
731 last_used = chan->cookie;
732
733 ret = dma_async_is_complete(cookie, last_complete, last_used);
734 if (ret != DMA_SUCCESS) {
735 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
736
737 last_complete = dwc->completed;
738 last_used = chan->cookie;
739
740 ret = dma_async_is_complete(cookie, last_complete, last_used);
741 }
742
743 if (done)
744 *done = last_complete;
745 if (used)
746 *used = last_used;
747
748 return ret;
749}
750
751static void dwc_issue_pending(struct dma_chan *chan)
752{
753 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
754
755 spin_lock_bh(&dwc->lock);
756 if (!list_empty(&dwc->queue))
757 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
758 spin_unlock_bh(&dwc->lock);
759}
760
761static int dwc_alloc_chan_resources(struct dma_chan *chan,
762 struct dma_client *client)
763{
764 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
765 struct dw_dma *dw = to_dw_dma(chan->device);
766 struct dw_desc *desc;
767 struct dma_slave *slave;
768 struct dw_dma_slave *dws;
769 int i;
770 u32 cfghi;
771 u32 cfglo;
772
773 dev_vdbg(&chan->dev, "alloc_chan_resources\n");
774
775 /* Channels doing slave DMA can only handle one client. */
776 if (dwc->dws || client->slave) {
777 if (chan->client_count)
778 return -EBUSY;
779 }
780
781 /* ASSERT: channel is idle */
782 if (dma_readl(dw, CH_EN) & dwc->mask) {
783 dev_dbg(&chan->dev, "DMA channel not idle?\n");
784 return -EIO;
785 }
786
787 dwc->completed = chan->cookie = 1;
788
789 cfghi = DWC_CFGH_FIFO_MODE;
790 cfglo = 0;
791
792 slave = client->slave;
793 if (slave) {
794 /*
795 * We need controller-specific data to set up slave
796 * transfers.
797 */
798 BUG_ON(!slave->dma_dev || slave->dma_dev != dw->dma.dev);
799
800 dws = container_of(slave, struct dw_dma_slave, slave);
801
802 dwc->dws = dws;
803 cfghi = dws->cfg_hi;
804 cfglo = dws->cfg_lo;
805 } else {
806 dwc->dws = NULL;
807 }
808
809 channel_writel(dwc, CFG_LO, cfglo);
810 channel_writel(dwc, CFG_HI, cfghi);
811
812 /*
813 * NOTE: some controllers may have additional features that we
814 * need to initialize here, like "scatter-gather" (which
815 * doesn't mean what you think it means), and status writeback.
816 */
817
818 spin_lock_bh(&dwc->lock);
819 i = dwc->descs_allocated;
820 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
821 spin_unlock_bh(&dwc->lock);
822
823 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
824 if (!desc) {
825 dev_info(&chan->dev,
826 "only allocated %d descriptors\n", i);
827 spin_lock_bh(&dwc->lock);
828 break;
829 }
830
831 dma_async_tx_descriptor_init(&desc->txd, chan);
832 desc->txd.tx_submit = dwc_tx_submit;
833 desc->txd.flags = DMA_CTRL_ACK;
834 INIT_LIST_HEAD(&desc->txd.tx_list);
835 desc->txd.phys = dma_map_single(chan->dev.parent, &desc->lli,
836 sizeof(desc->lli), DMA_TO_DEVICE);
837 dwc_desc_put(dwc, desc);
838
839 spin_lock_bh(&dwc->lock);
840 i = ++dwc->descs_allocated;
841 }
842
843 /* Enable interrupts */
844 channel_set_bit(dw, MASK.XFER, dwc->mask);
845 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
846 channel_set_bit(dw, MASK.ERROR, dwc->mask);
847
848 spin_unlock_bh(&dwc->lock);
849
850 dev_dbg(&chan->dev,
851 "alloc_chan_resources allocated %d descriptors\n", i);
852
853 return i;
854}
855
856static void dwc_free_chan_resources(struct dma_chan *chan)
857{
858 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
859 struct dw_dma *dw = to_dw_dma(chan->device);
860 struct dw_desc *desc, *_desc;
861 LIST_HEAD(list);
862
863 dev_dbg(&chan->dev, "free_chan_resources (descs allocated=%u)\n",
864 dwc->descs_allocated);
865
866 /* ASSERT: channel is idle */
867 BUG_ON(!list_empty(&dwc->active_list));
868 BUG_ON(!list_empty(&dwc->queue));
869 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
870
871 spin_lock_bh(&dwc->lock);
872 list_splice_init(&dwc->free_list, &list);
873 dwc->descs_allocated = 0;
874 dwc->dws = NULL;
875
876 /* Disable interrupts */
877 channel_clear_bit(dw, MASK.XFER, dwc->mask);
878 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
879 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
880
881 spin_unlock_bh(&dwc->lock);
882
883 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
884 dev_vdbg(&chan->dev, " freeing descriptor %p\n", desc);
885 dma_unmap_single(chan->dev.parent, desc->txd.phys,
886 sizeof(desc->lli), DMA_TO_DEVICE);
887 kfree(desc);
888 }
889
890 dev_vdbg(&chan->dev, "free_chan_resources done\n");
891}
892
893/*----------------------------------------------------------------------*/
894
895static void dw_dma_off(struct dw_dma *dw)
896{
897 dma_writel(dw, CFG, 0);
898
899 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
900 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
901 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
902 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
903 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
904
905 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
906 cpu_relax();
907}
908
909static int __init dw_probe(struct platform_device *pdev)
910{
911 struct dw_dma_platform_data *pdata;
912 struct resource *io;
913 struct dw_dma *dw;
914 size_t size;
915 int irq;
916 int err;
917 int i;
918
919 pdata = pdev->dev.platform_data;
920 if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
921 return -EINVAL;
922
923 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
924 if (!io)
925 return -EINVAL;
926
927 irq = platform_get_irq(pdev, 0);
928 if (irq < 0)
929 return irq;
930
931 size = sizeof(struct dw_dma);
932 size += pdata->nr_channels * sizeof(struct dw_dma_chan);
933 dw = kzalloc(size, GFP_KERNEL);
934 if (!dw)
935 return -ENOMEM;
936
937 if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) {
938 err = -EBUSY;
939 goto err_kfree;
940 }
941
942 memset(dw, 0, sizeof *dw);
943
944 dw->regs = ioremap(io->start, DW_REGLEN);
945 if (!dw->regs) {
946 err = -ENOMEM;
947 goto err_release_r;
948 }
949
950 dw->clk = clk_get(&pdev->dev, "hclk");
951 if (IS_ERR(dw->clk)) {
952 err = PTR_ERR(dw->clk);
953 goto err_clk;
954 }
955 clk_enable(dw->clk);
956
957 /* force dma off, just in case */
958 dw_dma_off(dw);
959
960 err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw);
961 if (err)
962 goto err_irq;
963
964 platform_set_drvdata(pdev, dw);
965
966 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
967
968 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
969
970 INIT_LIST_HEAD(&dw->dma.channels);
971 for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) {
972 struct dw_dma_chan *dwc = &dw->chan[i];
973
974 dwc->chan.device = &dw->dma;
975 dwc->chan.cookie = dwc->completed = 1;
976 dwc->chan.chan_id = i;
977 list_add_tail(&dwc->chan.device_node, &dw->dma.channels);
978
979 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
980 spin_lock_init(&dwc->lock);
981 dwc->mask = 1 << i;
982
983 INIT_LIST_HEAD(&dwc->active_list);
984 INIT_LIST_HEAD(&dwc->queue);
985 INIT_LIST_HEAD(&dwc->free_list);
986
987 channel_clear_bit(dw, CH_EN, dwc->mask);
988 }
989
990 /* Clear/disable all interrupts on all channels. */
991 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
992 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
993 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
994 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
995 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
996
997 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
998 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
999 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1000 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1001 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1002
1003 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1004 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1005 dw->dma.dev = &pdev->dev;
1006 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1007 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1008
1009 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1010
1011 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1012 dw->dma.device_terminate_all = dwc_terminate_all;
1013
1014 dw->dma.device_is_tx_complete = dwc_is_tx_complete;
1015 dw->dma.device_issue_pending = dwc_issue_pending;
1016
1017 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1018
1019 printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
1020 pdev->dev.bus_id, dw->dma.chancnt);
1021
1022 dma_async_device_register(&dw->dma);
1023
1024 return 0;
1025
1026err_irq:
1027 clk_disable(dw->clk);
1028 clk_put(dw->clk);
1029err_clk:
1030 iounmap(dw->regs);
1031 dw->regs = NULL;
1032err_release_r:
1033 release_resource(io);
1034err_kfree:
1035 kfree(dw);
1036 return err;
1037}
1038
1039static int __exit dw_remove(struct platform_device *pdev)
1040{
1041 struct dw_dma *dw = platform_get_drvdata(pdev);
1042 struct dw_dma_chan *dwc, *_dwc;
1043 struct resource *io;
1044
1045 dw_dma_off(dw);
1046 dma_async_device_unregister(&dw->dma);
1047
1048 free_irq(platform_get_irq(pdev, 0), dw);
1049 tasklet_kill(&dw->tasklet);
1050
1051 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1052 chan.device_node) {
1053 list_del(&dwc->chan.device_node);
1054 channel_clear_bit(dw, CH_EN, dwc->mask);
1055 }
1056
1057 clk_disable(dw->clk);
1058 clk_put(dw->clk);
1059
1060 iounmap(dw->regs);
1061 dw->regs = NULL;
1062
1063 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1064 release_mem_region(io->start, DW_REGLEN);
1065
1066 kfree(dw);
1067
1068 return 0;
1069}
1070
1071static void dw_shutdown(struct platform_device *pdev)
1072{
1073 struct dw_dma *dw = platform_get_drvdata(pdev);
1074
1075 dw_dma_off(platform_get_drvdata(pdev));
1076 clk_disable(dw->clk);
1077}
1078
1079static int dw_suspend_late(struct platform_device *pdev, pm_message_t mesg)
1080{
1081 struct dw_dma *dw = platform_get_drvdata(pdev);
1082
1083 dw_dma_off(platform_get_drvdata(pdev));
1084 clk_disable(dw->clk);
1085 return 0;
1086}
1087
1088static int dw_resume_early(struct platform_device *pdev)
1089{
1090 struct dw_dma *dw = platform_get_drvdata(pdev);
1091
1092 clk_enable(dw->clk);
1093 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1094 return 0;
1095
1096}
1097
1098static struct platform_driver dw_driver = {
1099 .remove = __exit_p(dw_remove),
1100 .shutdown = dw_shutdown,
1101 .suspend_late = dw_suspend_late,
1102 .resume_early = dw_resume_early,
1103 .driver = {
1104 .name = "dw_dmac",
1105 },
1106};
1107
1108static int __init dw_init(void)
1109{
1110 return platform_driver_probe(&dw_driver, dw_probe);
1111}
1112module_init(dw_init);
1113
1114static void __exit dw_exit(void)
1115{
1116 platform_driver_unregister(&dw_driver);
1117}
1118module_exit(dw_exit);
1119
1120MODULE_LICENSE("GPL v2");
1121MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1122MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>");
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
new file mode 100644
index 000000000000..00fdd187bb0c
--- /dev/null
+++ b/drivers/dma/dw_dmac_regs.h
@@ -0,0 +1,225 @@
1/*
2 * Driver for the Synopsys DesignWare AHB DMA Controller
3 *
4 * Copyright (C) 2005-2007 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/dw_dmac.h>
12
13#define DW_DMA_MAX_NR_CHANNELS 8
14
15/*
16 * Redefine this macro to handle differences between 32- and 64-bit
17 * addressing, big vs. little endian, etc.
18 */
19#define DW_REG(name) u32 name; u32 __pad_##name
20
21/* Hardware register definitions. */
22struct dw_dma_chan_regs {
23 DW_REG(SAR); /* Source Address Register */
24 DW_REG(DAR); /* Destination Address Register */
25 DW_REG(LLP); /* Linked List Pointer */
26 u32 CTL_LO; /* Control Register Low */
27 u32 CTL_HI; /* Control Register High */
28 DW_REG(SSTAT);
29 DW_REG(DSTAT);
30 DW_REG(SSTATAR);
31 DW_REG(DSTATAR);
32 u32 CFG_LO; /* Configuration Register Low */
33 u32 CFG_HI; /* Configuration Register High */
34 DW_REG(SGR);
35 DW_REG(DSR);
36};
37
38struct dw_dma_irq_regs {
39 DW_REG(XFER);
40 DW_REG(BLOCK);
41 DW_REG(SRC_TRAN);
42 DW_REG(DST_TRAN);
43 DW_REG(ERROR);
44};
45
46struct dw_dma_regs {
47 /* per-channel registers */
48 struct dw_dma_chan_regs CHAN[DW_DMA_MAX_NR_CHANNELS];
49
50 /* irq handling */
51 struct dw_dma_irq_regs RAW; /* r */
52 struct dw_dma_irq_regs STATUS; /* r (raw & mask) */
53 struct dw_dma_irq_regs MASK; /* rw (set = irq enabled) */
54 struct dw_dma_irq_regs CLEAR; /* w (ack, affects "raw") */
55
56 DW_REG(STATUS_INT); /* r */
57
58 /* software handshaking */
59 DW_REG(REQ_SRC);
60 DW_REG(REQ_DST);
61 DW_REG(SGL_REQ_SRC);
62 DW_REG(SGL_REQ_DST);
63 DW_REG(LAST_SRC);
64 DW_REG(LAST_DST);
65
66 /* miscellaneous */
67 DW_REG(CFG);
68 DW_REG(CH_EN);
69 DW_REG(ID);
70 DW_REG(TEST);
71
72 /* optional encoded params, 0x3c8..0x3 */
73};
74
75/* Bitfields in CTL_LO */
76#define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */
77#define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */
78#define DWC_CTLL_SRC_WIDTH(n) ((n)<<4)
79#define DWC_CTLL_DST_INC (0<<7) /* DAR update/not */
80#define DWC_CTLL_DST_DEC (1<<7)
81#define DWC_CTLL_DST_FIX (2<<7)
82#define DWC_CTLL_SRC_INC (0<<7) /* SAR update/not */
83#define DWC_CTLL_SRC_DEC (1<<9)
84#define DWC_CTLL_SRC_FIX (2<<9)
85#define DWC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */
86#define DWC_CTLL_SRC_MSIZE(n) ((n)<<14)
87#define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */
88#define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */
89#define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */
90#define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */
91#define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */
92#define DWC_CTLL_FC_P2P (3 << 20) /* periph-to-periph */
93/* plus 4 transfer types for peripheral-as-flow-controller */
94#define DWC_CTLL_DMS(n) ((n)<<23) /* dst master select */
95#define DWC_CTLL_SMS(n) ((n)<<25) /* src master select */
96#define DWC_CTLL_LLP_D_EN (1 << 27) /* dest block chain */
97#define DWC_CTLL_LLP_S_EN (1 << 28) /* src block chain */
98
99/* Bitfields in CTL_HI */
100#define DWC_CTLH_DONE 0x00001000
101#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff
102
103/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */
104#define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */
105#define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */
106#define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */
107#define DWC_CFGL_HS_SRC (1 << 11) /* handshake w/src */
108#define DWC_CFGL_MAX_BURST(x) ((x) << 20)
109#define DWC_CFGL_RELOAD_SAR (1 << 30)
110#define DWC_CFGL_RELOAD_DAR (1 << 31)
111
112/* Bitfields in CFG_HI. Platform-configurable bits are in <linux/dw_dmac.h> */
113#define DWC_CFGH_DS_UPD_EN (1 << 5)
114#define DWC_CFGH_SS_UPD_EN (1 << 6)
115
116/* Bitfields in SGR */
117#define DWC_SGR_SGI(x) ((x) << 0)
118#define DWC_SGR_SGC(x) ((x) << 20)
119
120/* Bitfields in DSR */
121#define DWC_DSR_DSI(x) ((x) << 0)
122#define DWC_DSR_DSC(x) ((x) << 20)
123
124/* Bitfields in CFG */
125#define DW_CFG_DMA_EN (1 << 0)
126
127#define DW_REGLEN 0x400
128
129struct dw_dma_chan {
130 struct dma_chan chan;
131 void __iomem *ch_regs;
132 u8 mask;
133
134 spinlock_t lock;
135
136 /* these other elements are all protected by lock */
137 dma_cookie_t completed;
138 struct list_head active_list;
139 struct list_head queue;
140 struct list_head free_list;
141
142 struct dw_dma_slave *dws;
143
144 unsigned int descs_allocated;
145};
146
147static inline struct dw_dma_chan_regs __iomem *
148__dwc_regs(struct dw_dma_chan *dwc)
149{
150 return dwc->ch_regs;
151}
152
153#define channel_readl(dwc, name) \
154 __raw_readl(&(__dwc_regs(dwc)->name))
155#define channel_writel(dwc, name, val) \
156 __raw_writel((val), &(__dwc_regs(dwc)->name))
157
158static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
159{
160 return container_of(chan, struct dw_dma_chan, chan);
161}
162
163
164struct dw_dma {
165 struct dma_device dma;
166 void __iomem *regs;
167 struct tasklet_struct tasklet;
168 struct clk *clk;
169
170 u8 all_chan_mask;
171
172 struct dw_dma_chan chan[0];
173};
174
175static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
176{
177 return dw->regs;
178}
179
180#define dma_readl(dw, name) \
181 __raw_readl(&(__dw_regs(dw)->name))
182#define dma_writel(dw, name, val) \
183 __raw_writel((val), &(__dw_regs(dw)->name))
184
185#define channel_set_bit(dw, reg, mask) \
186 dma_writel(dw, reg, ((mask) << 8) | (mask))
187#define channel_clear_bit(dw, reg, mask) \
188 dma_writel(dw, reg, ((mask) << 8) | 0)
189
190static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
191{
192 return container_of(ddev, struct dw_dma, dma);
193}
194
195/* LLI == Linked List Item; a.k.a. DMA block descriptor */
196struct dw_lli {
197 /* values that are not changed by hardware */
198 dma_addr_t sar;
199 dma_addr_t dar;
200 dma_addr_t llp; /* chain to next lli */
201 u32 ctllo;
202 /* values that may get written back: */
203 u32 ctlhi;
204 /* sstat and dstat can snapshot peripheral register state.
205 * silicon config may discard either or both...
206 */
207 u32 sstat;
208 u32 dstat;
209};
210
211struct dw_desc {
212 /* FIRST values the hardware uses */
213 struct dw_lli lli;
214
215 /* THEN values for driver housekeeping */
216 struct list_head desc_node;
217 struct dma_async_tx_descriptor txd;
218 size_t len;
219};
220
221static inline struct dw_desc *
222txd_to_dw_desc(struct dma_async_tx_descriptor *txd)
223{
224 return container_of(txd, struct dw_desc, txd);
225}
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 054eabffc185..c0059ca58340 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -366,7 +366,8 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
366 * 366 *
367 * Return - The number of descriptors allocated. 367 * Return - The number of descriptors allocated.
368 */ 368 */
369static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) 369static int fsl_dma_alloc_chan_resources(struct dma_chan *chan,
370 struct dma_client *client)
370{ 371{
371 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 372 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
372 LIST_HEAD(tmp_list); 373 LIST_HEAD(tmp_list);
@@ -809,8 +810,7 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
809 if (!src) { 810 if (!src) {
810 dev_err(fsl_chan->dev, 811 dev_err(fsl_chan->dev,
811 "selftest: Cannot alloc memory for test!\n"); 812 "selftest: Cannot alloc memory for test!\n");
812 err = -ENOMEM; 813 return -ENOMEM;
813 goto out;
814 } 814 }
815 815
816 dest = src + test_size; 816 dest = src + test_size;
@@ -820,7 +820,7 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
820 820
821 chan = &fsl_chan->common; 821 chan = &fsl_chan->common;
822 822
823 if (fsl_dma_alloc_chan_resources(chan) < 1) { 823 if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) {
824 dev_err(fsl_chan->dev, 824 dev_err(fsl_chan->dev,
825 "selftest: Cannot alloc resources for DMA\n"); 825 "selftest: Cannot alloc resources for DMA\n");
826 err = -ENODEV; 826 err = -ENODEV;
@@ -842,13 +842,13 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
842 if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) { 842 if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) {
843 dev_err(fsl_chan->dev, "selftest: Time out!\n"); 843 dev_err(fsl_chan->dev, "selftest: Time out!\n");
844 err = -ENODEV; 844 err = -ENODEV;
845 goto out; 845 goto free_resources;
846 } 846 }
847 847
848 /* Test free and re-alloc channel resources */ 848 /* Test free and re-alloc channel resources */
849 fsl_dma_free_chan_resources(chan); 849 fsl_dma_free_chan_resources(chan);
850 850
851 if (fsl_dma_alloc_chan_resources(chan) < 1) { 851 if (fsl_dma_alloc_chan_resources(chan, NULL) < 1) {
852 dev_err(fsl_chan->dev, 852 dev_err(fsl_chan->dev,
853 "selftest: Cannot alloc resources for DMA\n"); 853 "selftest: Cannot alloc resources for DMA\n");
854 err = -ENODEV; 854 err = -ENODEV;
@@ -927,8 +927,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
927 if (!new_fsl_chan) { 927 if (!new_fsl_chan) {
928 dev_err(&dev->dev, "No free memory for allocating " 928 dev_err(&dev->dev, "No free memory for allocating "
929 "dma channels!\n"); 929 "dma channels!\n");
930 err = -ENOMEM; 930 return -ENOMEM;
931 goto err;
932 } 931 }
933 932
934 /* get dma channel register base */ 933 /* get dma channel register base */
@@ -936,7 +935,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
936 if (err) { 935 if (err) {
937 dev_err(&dev->dev, "Can't get %s property 'reg'\n", 936 dev_err(&dev->dev, "Can't get %s property 'reg'\n",
938 dev->node->full_name); 937 dev->node->full_name);
939 goto err; 938 goto err_no_reg;
940 } 939 }
941 940
942 new_fsl_chan->feature = *(u32 *)match->data; 941 new_fsl_chan->feature = *(u32 *)match->data;
@@ -958,7 +957,7 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
958 dev_err(&dev->dev, "There is no %d channel!\n", 957 dev_err(&dev->dev, "There is no %d channel!\n",
959 new_fsl_chan->id); 958 new_fsl_chan->id);
960 err = -EINVAL; 959 err = -EINVAL;
961 goto err; 960 goto err_no_chan;
962 } 961 }
963 fdev->chan[new_fsl_chan->id] = new_fsl_chan; 962 fdev->chan[new_fsl_chan->id] = new_fsl_chan;
964 tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet, 963 tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
@@ -997,23 +996,26 @@ static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
997 if (err) { 996 if (err) {
998 dev_err(&dev->dev, "DMA channel %s request_irq error " 997 dev_err(&dev->dev, "DMA channel %s request_irq error "
999 "with return %d\n", dev->node->full_name, err); 998 "with return %d\n", dev->node->full_name, err);
1000 goto err; 999 goto err_no_irq;
1001 } 1000 }
1002 } 1001 }
1003 1002
1004 err = fsl_dma_self_test(new_fsl_chan); 1003 err = fsl_dma_self_test(new_fsl_chan);
1005 if (err) 1004 if (err)
1006 goto err; 1005 goto err_self_test;
1007 1006
1008 dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, 1007 dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
1009 match->compatible, new_fsl_chan->irq); 1008 match->compatible, new_fsl_chan->irq);
1010 1009
1011 return 0; 1010 return 0;
1012err: 1011
1013 dma_halt(new_fsl_chan); 1012err_self_test:
1014 iounmap(new_fsl_chan->reg_base);
1015 free_irq(new_fsl_chan->irq, new_fsl_chan); 1013 free_irq(new_fsl_chan->irq, new_fsl_chan);
1014err_no_irq:
1016 list_del(&new_fsl_chan->common.device_node); 1015 list_del(&new_fsl_chan->common.device_node);
1016err_no_chan:
1017 iounmap(new_fsl_chan->reg_base);
1018err_no_reg:
1017 kfree(new_fsl_chan); 1019 kfree(new_fsl_chan);
1018 return err; 1020 return err;
1019} 1021}
@@ -1054,8 +1056,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
1054 fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); 1056 fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
1055 if (!fdev) { 1057 if (!fdev) {
1056 dev_err(&dev->dev, "No enough memory for 'priv'\n"); 1058 dev_err(&dev->dev, "No enough memory for 'priv'\n");
1057 err = -ENOMEM; 1059 return -ENOMEM;
1058 goto err;
1059 } 1060 }
1060 fdev->dev = &dev->dev; 1061 fdev->dev = &dev->dev;
1061 INIT_LIST_HEAD(&fdev->common.channels); 1062 INIT_LIST_HEAD(&fdev->common.channels);
@@ -1065,7 +1066,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
1065 if (err) { 1066 if (err) {
1066 dev_err(&dev->dev, "Can't get %s property 'reg'\n", 1067 dev_err(&dev->dev, "Can't get %s property 'reg'\n",
1067 dev->node->full_name); 1068 dev->node->full_name);
1068 goto err; 1069 goto err_no_reg;
1069 } 1070 }
1070 1071
1071 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " 1072 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
@@ -1103,6 +1104,7 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
1103 1104
1104err: 1105err:
1105 iounmap(fdev->reg_base); 1106 iounmap(fdev->reg_base);
1107err_no_reg:
1106 kfree(fdev); 1108 kfree(fdev);
1107 return err; 1109 return err;
1108} 1110}
diff --git a/drivers/dma/ioat.c b/drivers/dma/ioat.c
index 16e0fd8facfb..9b16a3af9a0a 100644
--- a/drivers/dma/ioat.c
+++ b/drivers/dma/ioat.c
@@ -47,6 +47,16 @@ static struct pci_device_id ioat_pci_tbl[] = {
47 47
48 /* I/OAT v2 platforms */ 48 /* I/OAT v2 platforms */
49 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) }, 49 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) },
50
51 /* I/OAT v3 platforms */
52 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
53 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
54 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
55 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
56 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
57 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
58 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
59 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
50 { 0, } 60 { 0, }
51}; 61};
52 62
@@ -83,6 +93,11 @@ static int ioat_setup_functionality(struct pci_dev *pdev, void __iomem *iobase)
83 if (device->dma && ioat_dca_enabled) 93 if (device->dma && ioat_dca_enabled)
84 device->dca = ioat2_dca_init(pdev, iobase); 94 device->dca = ioat2_dca_init(pdev, iobase);
85 break; 95 break;
96 case IOAT_VER_3_0:
97 device->dma = ioat_dma_probe(pdev, iobase);
98 if (device->dma && ioat_dca_enabled)
99 device->dca = ioat3_dca_init(pdev, iobase);
100 break;
86 default: 101 default:
87 err = -ENODEV; 102 err = -ENODEV;
88 break; 103 break;
diff --git a/drivers/dma/ioat_dca.c b/drivers/dma/ioat_dca.c
index 9e922760b7ff..6cf622da0286 100644
--- a/drivers/dma/ioat_dca.c
+++ b/drivers/dma/ioat_dca.c
@@ -37,12 +37,18 @@
37#include "ioatdma_registers.h" 37#include "ioatdma_registers.h"
38 38
39/* 39/*
40 * Bit 16 of a tag map entry is the "valid" bit, if it is set then bits 0:15 40 * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
41 * contain the bit number of the APIC ID to map into the DCA tag. If the valid 41 * contain the bit number of the APIC ID to map into the DCA tag. If the valid
42 * bit is not set, then the value must be 0 or 1 and defines the bit in the tag. 42 * bit is not set, then the value must be 0 or 1 and defines the bit in the tag.
43 */ 43 */
44#define DCA_TAG_MAP_VALID 0x80 44#define DCA_TAG_MAP_VALID 0x80
45 45
46#define DCA3_TAG_MAP_BIT_TO_INV 0x80
47#define DCA3_TAG_MAP_BIT_TO_SEL 0x40
48#define DCA3_TAG_MAP_LITERAL_VAL 0x1
49
50#define DCA_TAG_MAP_MASK 0xDF
51
46/* 52/*
47 * "Legacy" DCA systems do not implement the DCA register set in the 53 * "Legacy" DCA systems do not implement the DCA register set in the
48 * I/OAT device. Software needs direct support for their tag mappings. 54 * I/OAT device. Software needs direct support for their tag mappings.
@@ -95,6 +101,7 @@ struct ioat_dca_slot {
95}; 101};
96 102
97#define IOAT_DCA_MAX_REQ 6 103#define IOAT_DCA_MAX_REQ 6
104#define IOAT3_DCA_MAX_REQ 2
98 105
99struct ioat_dca_priv { 106struct ioat_dca_priv {
100 void __iomem *iobase; 107 void __iomem *iobase;
@@ -171,7 +178,9 @@ static int ioat_dca_remove_requester(struct dca_provider *dca,
171 return -ENODEV; 178 return -ENODEV;
172} 179}
173 180
174static u8 ioat_dca_get_tag(struct dca_provider *dca, int cpu) 181static u8 ioat_dca_get_tag(struct dca_provider *dca,
182 struct device *dev,
183 int cpu)
175{ 184{
176 struct ioat_dca_priv *ioatdca = dca_priv(dca); 185 struct ioat_dca_priv *ioatdca = dca_priv(dca);
177 int i, apic_id, bit, value; 186 int i, apic_id, bit, value;
@@ -193,10 +202,26 @@ static u8 ioat_dca_get_tag(struct dca_provider *dca, int cpu)
193 return tag; 202 return tag;
194} 203}
195 204
205static int ioat_dca_dev_managed(struct dca_provider *dca,
206 struct device *dev)
207{
208 struct ioat_dca_priv *ioatdca = dca_priv(dca);
209 struct pci_dev *pdev;
210 int i;
211
212 pdev = to_pci_dev(dev);
213 for (i = 0; i < ioatdca->max_requesters; i++) {
214 if (ioatdca->req_slots[i].pdev == pdev)
215 return 1;
216 }
217 return 0;
218}
219
196static struct dca_ops ioat_dca_ops = { 220static struct dca_ops ioat_dca_ops = {
197 .add_requester = ioat_dca_add_requester, 221 .add_requester = ioat_dca_add_requester,
198 .remove_requester = ioat_dca_remove_requester, 222 .remove_requester = ioat_dca_remove_requester,
199 .get_tag = ioat_dca_get_tag, 223 .get_tag = ioat_dca_get_tag,
224 .dev_managed = ioat_dca_dev_managed,
200}; 225};
201 226
202 227
@@ -207,6 +232,8 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
207 u8 *tag_map = NULL; 232 u8 *tag_map = NULL;
208 int i; 233 int i;
209 int err; 234 int err;
235 u8 version;
236 u8 max_requesters;
210 237
211 if (!system_has_dca_enabled(pdev)) 238 if (!system_has_dca_enabled(pdev))
212 return NULL; 239 return NULL;
@@ -237,15 +264,20 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
237 if (tag_map == NULL) 264 if (tag_map == NULL)
238 return NULL; 265 return NULL;
239 266
267 version = readb(iobase + IOAT_VER_OFFSET);
268 if (version == IOAT_VER_3_0)
269 max_requesters = IOAT3_DCA_MAX_REQ;
270 else
271 max_requesters = IOAT_DCA_MAX_REQ;
272
240 dca = alloc_dca_provider(&ioat_dca_ops, 273 dca = alloc_dca_provider(&ioat_dca_ops,
241 sizeof(*ioatdca) + 274 sizeof(*ioatdca) +
242 (sizeof(struct ioat_dca_slot) * IOAT_DCA_MAX_REQ)); 275 (sizeof(struct ioat_dca_slot) * max_requesters));
243 if (!dca) 276 if (!dca)
244 return NULL; 277 return NULL;
245 278
246 ioatdca = dca_priv(dca); 279 ioatdca = dca_priv(dca);
247 ioatdca->max_requesters = IOAT_DCA_MAX_REQ; 280 ioatdca->max_requesters = max_requesters;
248
249 ioatdca->dca_base = iobase + 0x54; 281 ioatdca->dca_base = iobase + 0x54;
250 282
251 /* copy over the APIC ID to DCA tag mapping */ 283 /* copy over the APIC ID to DCA tag mapping */
@@ -323,11 +355,13 @@ static int ioat2_dca_remove_requester(struct dca_provider *dca,
323 return -ENODEV; 355 return -ENODEV;
324} 356}
325 357
326static u8 ioat2_dca_get_tag(struct dca_provider *dca, int cpu) 358static u8 ioat2_dca_get_tag(struct dca_provider *dca,
359 struct device *dev,
360 int cpu)
327{ 361{
328 u8 tag; 362 u8 tag;
329 363
330 tag = ioat_dca_get_tag(dca, cpu); 364 tag = ioat_dca_get_tag(dca, dev, cpu);
331 tag = (~tag) & 0x1F; 365 tag = (~tag) & 0x1F;
332 return tag; 366 return tag;
333} 367}
@@ -336,6 +370,7 @@ static struct dca_ops ioat2_dca_ops = {
336 .add_requester = ioat2_dca_add_requester, 370 .add_requester = ioat2_dca_add_requester,
337 .remove_requester = ioat2_dca_remove_requester, 371 .remove_requester = ioat2_dca_remove_requester,
338 .get_tag = ioat2_dca_get_tag, 372 .get_tag = ioat2_dca_get_tag,
373 .dev_managed = ioat_dca_dev_managed,
339}; 374};
340 375
341static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset) 376static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
@@ -425,3 +460,198 @@ struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
425 460
426 return dca; 461 return dca;
427} 462}
463
464static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
465{
466 struct ioat_dca_priv *ioatdca = dca_priv(dca);
467 struct pci_dev *pdev;
468 int i;
469 u16 id;
470 u16 global_req_table;
471
472 /* This implementation only supports PCI-Express */
473 if (dev->bus != &pci_bus_type)
474 return -ENODEV;
475 pdev = to_pci_dev(dev);
476 id = dcaid_from_pcidev(pdev);
477
478 if (ioatdca->requester_count == ioatdca->max_requesters)
479 return -ENODEV;
480
481 for (i = 0; i < ioatdca->max_requesters; i++) {
482 if (ioatdca->req_slots[i].pdev == NULL) {
483 /* found an empty slot */
484 ioatdca->requester_count++;
485 ioatdca->req_slots[i].pdev = pdev;
486 ioatdca->req_slots[i].rid = id;
487 global_req_table =
488 readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
489 writel(id | IOAT_DCA_GREQID_VALID,
490 ioatdca->iobase + global_req_table + (i * 4));
491 return i;
492 }
493 }
494 /* Error, ioatdma->requester_count is out of whack */
495 return -EFAULT;
496}
497
498static int ioat3_dca_remove_requester(struct dca_provider *dca,
499 struct device *dev)
500{
501 struct ioat_dca_priv *ioatdca = dca_priv(dca);
502 struct pci_dev *pdev;
503 int i;
504 u16 global_req_table;
505
506 /* This implementation only supports PCI-Express */
507 if (dev->bus != &pci_bus_type)
508 return -ENODEV;
509 pdev = to_pci_dev(dev);
510
511 for (i = 0; i < ioatdca->max_requesters; i++) {
512 if (ioatdca->req_slots[i].pdev == pdev) {
513 global_req_table =
514 readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
515 writel(0, ioatdca->iobase + global_req_table + (i * 4));
516 ioatdca->req_slots[i].pdev = NULL;
517 ioatdca->req_slots[i].rid = 0;
518 ioatdca->requester_count--;
519 return i;
520 }
521 }
522 return -ENODEV;
523}
524
525static u8 ioat3_dca_get_tag(struct dca_provider *dca,
526 struct device *dev,
527 int cpu)
528{
529 u8 tag;
530
531 struct ioat_dca_priv *ioatdca = dca_priv(dca);
532 int i, apic_id, bit, value;
533 u8 entry;
534
535 tag = 0;
536 apic_id = cpu_physical_id(cpu);
537
538 for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
539 entry = ioatdca->tag_map[i];
540 if (entry & DCA3_TAG_MAP_BIT_TO_SEL) {
541 bit = entry &
542 ~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV);
543 value = (apic_id & (1 << bit)) ? 1 : 0;
544 } else if (entry & DCA3_TAG_MAP_BIT_TO_INV) {
545 bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV;
546 value = (apic_id & (1 << bit)) ? 0 : 1;
547 } else {
548 value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0;
549 }
550 tag |= (value << i);
551 }
552
553 return tag;
554}
555
556static struct dca_ops ioat3_dca_ops = {
557 .add_requester = ioat3_dca_add_requester,
558 .remove_requester = ioat3_dca_remove_requester,
559 .get_tag = ioat3_dca_get_tag,
560 .dev_managed = ioat_dca_dev_managed,
561};
562
563static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
564{
565 int slots = 0;
566 u32 req;
567 u16 global_req_table;
568
569 global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET);
570 if (global_req_table == 0)
571 return 0;
572
573 do {
574 req = readl(iobase + global_req_table + (slots * sizeof(u32)));
575 slots++;
576 } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
577
578 return slots;
579}
580
581struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
582{
583 struct dca_provider *dca;
584 struct ioat_dca_priv *ioatdca;
585 int slots;
586 int i;
587 int err;
588 u16 dca_offset;
589 u16 csi_fsb_control;
590 u16 pcie_control;
591 u8 bit;
592
593 union {
594 u64 full;
595 struct {
596 u32 low;
597 u32 high;
598 };
599 } tag_map;
600
601 if (!system_has_dca_enabled(pdev))
602 return NULL;
603
604 dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
605 if (dca_offset == 0)
606 return NULL;
607
608 slots = ioat3_dca_count_dca_slots(iobase, dca_offset);
609 if (slots == 0)
610 return NULL;
611
612 dca = alloc_dca_provider(&ioat3_dca_ops,
613 sizeof(*ioatdca)
614 + (sizeof(struct ioat_dca_slot) * slots));
615 if (!dca)
616 return NULL;
617
618 ioatdca = dca_priv(dca);
619 ioatdca->iobase = iobase;
620 ioatdca->dca_base = iobase + dca_offset;
621 ioatdca->max_requesters = slots;
622
623 /* some bios might not know to turn these on */
624 csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
625 if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) {
626 csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH;
627 writew(csi_fsb_control,
628 ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
629 }
630 pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
631 if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) {
632 pcie_control |= IOAT3_PCI_CONTROL_MEMWR;
633 writew(pcie_control,
634 ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
635 }
636
637
638 /* TODO version, compatibility and configuration checks */
639
640 /* copy out the APIC to DCA tag map */
641 tag_map.low =
642 readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW);
643 tag_map.high =
644 readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH);
645 for (i = 0; i < 8; i++) {
646 bit = tag_map.full >> (8 * i);
647 ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK;
648 }
649
650 err = register_dca_provider(dca, &pdev->dev);
651 if (err) {
652 free_dca_provider(dca);
653 return NULL;
654 }
655
656 return dca;
657}
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 318e8a22d814..a52156e56886 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -32,6 +32,7 @@
32#include <linux/dmaengine.h> 32#include <linux/dmaengine.h>
33#include <linux/delay.h> 33#include <linux/delay.h>
34#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35#include <linux/workqueue.h>
35#include "ioatdma.h" 36#include "ioatdma.h"
36#include "ioatdma_registers.h" 37#include "ioatdma_registers.h"
37#include "ioatdma_hw.h" 38#include "ioatdma_hw.h"
@@ -41,11 +42,23 @@
41#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) 42#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
42#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) 43#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
43 44
45#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
44static int ioat_pending_level = 4; 46static int ioat_pending_level = 4;
45module_param(ioat_pending_level, int, 0644); 47module_param(ioat_pending_level, int, 0644);
46MODULE_PARM_DESC(ioat_pending_level, 48MODULE_PARM_DESC(ioat_pending_level,
47 "high-water mark for pushing ioat descriptors (default: 4)"); 49 "high-water mark for pushing ioat descriptors (default: 4)");
48 50
51#define RESET_DELAY msecs_to_jiffies(100)
52#define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000))
53static void ioat_dma_chan_reset_part2(struct work_struct *work);
54static void ioat_dma_chan_watchdog(struct work_struct *work);
55
56/*
57 * workaround for IOAT ver.3.0 null descriptor issue
58 * (channel returns error when size is 0)
59 */
60#define NULL_DESC_BUFFER_SIZE 1
61
49/* internal functions */ 62/* internal functions */
50static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); 63static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
51static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); 64static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
@@ -122,6 +135,38 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
122 int i; 135 int i;
123 struct ioat_dma_chan *ioat_chan; 136 struct ioat_dma_chan *ioat_chan;
124 137
138 /*
139 * IOAT ver.3 workarounds
140 */
141 if (device->version == IOAT_VER_3_0) {
142 u32 chan_err_mask;
143 u16 dev_id;
144 u32 dmauncerrsts;
145
146 /*
147 * Write CHANERRMSK_INT with 3E07h to mask out the errors
148 * that can cause stability issues for IOAT ver.3
149 */
150 chan_err_mask = 0x3E07;
151 pci_write_config_dword(device->pdev,
152 IOAT_PCI_CHANERRMASK_INT_OFFSET,
153 chan_err_mask);
154
155 /*
156 * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
157 * (workaround for spurious config parity error after restart)
158 */
159 pci_read_config_word(device->pdev,
160 IOAT_PCI_DEVICE_ID_OFFSET,
161 &dev_id);
162 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
163 dmauncerrsts = 0x10;
164 pci_write_config_dword(device->pdev,
165 IOAT_PCI_DMAUNCERRSTS_OFFSET,
166 dmauncerrsts);
167 }
168 }
169
125 device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); 170 device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
126 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); 171 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
127 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); 172 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
@@ -137,6 +182,7 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
137 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1)); 182 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
138 ioat_chan->xfercap = xfercap; 183 ioat_chan->xfercap = xfercap;
139 ioat_chan->desccount = 0; 184 ioat_chan->desccount = 0;
185 INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2);
140 if (ioat_chan->device->version != IOAT_VER_1_2) { 186 if (ioat_chan->device->version != IOAT_VER_1_2) {
141 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE 187 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
142 | IOAT_DMA_DCA_ANY_CPU, 188 | IOAT_DMA_DCA_ANY_CPU,
@@ -175,7 +221,7 @@ static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
175{ 221{
176 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 222 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
177 223
178 if (ioat_chan->pending != 0) { 224 if (ioat_chan->pending > 0) {
179 spin_lock_bh(&ioat_chan->desc_lock); 225 spin_lock_bh(&ioat_chan->desc_lock);
180 __ioat1_dma_memcpy_issue_pending(ioat_chan); 226 __ioat1_dma_memcpy_issue_pending(ioat_chan);
181 spin_unlock_bh(&ioat_chan->desc_lock); 227 spin_unlock_bh(&ioat_chan->desc_lock);
@@ -194,13 +240,228 @@ static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
194{ 240{
195 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 241 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
196 242
197 if (ioat_chan->pending != 0) { 243 if (ioat_chan->pending > 0) {
198 spin_lock_bh(&ioat_chan->desc_lock); 244 spin_lock_bh(&ioat_chan->desc_lock);
199 __ioat2_dma_memcpy_issue_pending(ioat_chan); 245 __ioat2_dma_memcpy_issue_pending(ioat_chan);
200 spin_unlock_bh(&ioat_chan->desc_lock); 246 spin_unlock_bh(&ioat_chan->desc_lock);
201 } 247 }
202} 248}
203 249
250
251/**
252 * ioat_dma_chan_reset_part2 - reinit the channel after a reset
253 */
254static void ioat_dma_chan_reset_part2(struct work_struct *work)
255{
256 struct ioat_dma_chan *ioat_chan =
257 container_of(work, struct ioat_dma_chan, work.work);
258 struct ioat_desc_sw *desc;
259
260 spin_lock_bh(&ioat_chan->cleanup_lock);
261 spin_lock_bh(&ioat_chan->desc_lock);
262
263 ioat_chan->completion_virt->low = 0;
264 ioat_chan->completion_virt->high = 0;
265 ioat_chan->pending = 0;
266
267 /*
268 * count the descriptors waiting, and be sure to do it
269 * right for both the CB1 line and the CB2 ring
270 */
271 ioat_chan->dmacount = 0;
272 if (ioat_chan->used_desc.prev) {
273 desc = to_ioat_desc(ioat_chan->used_desc.prev);
274 do {
275 ioat_chan->dmacount++;
276 desc = to_ioat_desc(desc->node.next);
277 } while (&desc->node != ioat_chan->used_desc.next);
278 }
279
280 /*
281 * write the new starting descriptor address
282 * this puts channel engine into ARMED state
283 */
284 desc = to_ioat_desc(ioat_chan->used_desc.prev);
285 switch (ioat_chan->device->version) {
286 case IOAT_VER_1_2:
287 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
288 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
289 writel(((u64) desc->async_tx.phys) >> 32,
290 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
291
292 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
293 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
294 break;
295 case IOAT_VER_2_0:
296 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
297 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
298 writel(((u64) desc->async_tx.phys) >> 32,
299 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
300
301 /* tell the engine to go with what's left to be done */
302 writew(ioat_chan->dmacount,
303 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
304
305 break;
306 }
307 dev_err(&ioat_chan->device->pdev->dev,
308 "chan%d reset - %d descs waiting, %d total desc\n",
309 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
310
311 spin_unlock_bh(&ioat_chan->desc_lock);
312 spin_unlock_bh(&ioat_chan->cleanup_lock);
313}
314
315/**
316 * ioat_dma_reset_channel - restart a channel
317 * @ioat_chan: IOAT DMA channel handle
318 */
319static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan)
320{
321 u32 chansts, chanerr;
322
323 if (!ioat_chan->used_desc.prev)
324 return;
325
326 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
327 chansts = (ioat_chan->completion_virt->low
328 & IOAT_CHANSTS_DMA_TRANSFER_STATUS);
329 if (chanerr) {
330 dev_err(&ioat_chan->device->pdev->dev,
331 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
332 chan_num(ioat_chan), chansts, chanerr);
333 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
334 }
335
336 /*
337 * whack it upside the head with a reset
338 * and wait for things to settle out.
339 * force the pending count to a really big negative
340 * to make sure no one forces an issue_pending
341 * while we're waiting.
342 */
343
344 spin_lock_bh(&ioat_chan->desc_lock);
345 ioat_chan->pending = INT_MIN;
346 writeb(IOAT_CHANCMD_RESET,
347 ioat_chan->reg_base
348 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
349 spin_unlock_bh(&ioat_chan->desc_lock);
350
351 /* schedule the 2nd half instead of sleeping a long time */
352 schedule_delayed_work(&ioat_chan->work, RESET_DELAY);
353}
354
355/**
356 * ioat_dma_chan_watchdog - watch for stuck channels
357 */
358static void ioat_dma_chan_watchdog(struct work_struct *work)
359{
360 struct ioatdma_device *device =
361 container_of(work, struct ioatdma_device, work.work);
362 struct ioat_dma_chan *ioat_chan;
363 int i;
364
365 union {
366 u64 full;
367 struct {
368 u32 low;
369 u32 high;
370 };
371 } completion_hw;
372 unsigned long compl_desc_addr_hw;
373
374 for (i = 0; i < device->common.chancnt; i++) {
375 ioat_chan = ioat_lookup_chan_by_index(device, i);
376
377 if (ioat_chan->device->version == IOAT_VER_1_2
378 /* have we started processing anything yet */
379 && ioat_chan->last_completion
380 /* have we completed any since last watchdog cycle? */
381 && (ioat_chan->last_completion ==
382 ioat_chan->watchdog_completion)
383 /* has TCP stuck on one cookie since last watchdog? */
384 && (ioat_chan->watchdog_tcp_cookie ==
385 ioat_chan->watchdog_last_tcp_cookie)
386 && (ioat_chan->watchdog_tcp_cookie !=
387 ioat_chan->completed_cookie)
388 /* is there something in the chain to be processed? */
389 /* CB1 chain always has at least the last one processed */
390 && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next)
391 && ioat_chan->pending == 0) {
392
393 /*
394 * check CHANSTS register for completed
395 * descriptor address.
396 * if it is different than completion writeback,
397 * it is not zero
398 * and it has changed since the last watchdog
399 * we can assume that channel
400 * is still working correctly
401 * and the problem is in completion writeback.
402 * update completion writeback
403 * with actual CHANSTS value
404 * else
405 * try resetting the channel
406 */
407
408 completion_hw.low = readl(ioat_chan->reg_base +
409 IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version));
410 completion_hw.high = readl(ioat_chan->reg_base +
411 IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version));
412#if (BITS_PER_LONG == 64)
413 compl_desc_addr_hw =
414 completion_hw.full
415 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
416#else
417 compl_desc_addr_hw =
418 completion_hw.low & IOAT_LOW_COMPLETION_MASK;
419#endif
420
421 if ((compl_desc_addr_hw != 0)
422 && (compl_desc_addr_hw != ioat_chan->watchdog_completion)
423 && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) {
424 ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
425 ioat_chan->completion_virt->low = completion_hw.low;
426 ioat_chan->completion_virt->high = completion_hw.high;
427 } else {
428 ioat_dma_reset_channel(ioat_chan);
429 ioat_chan->watchdog_completion = 0;
430 ioat_chan->last_compl_desc_addr_hw = 0;
431 }
432
433 /*
434 * for version 2.0 if there are descriptors yet to be processed
435 * and the last completed hasn't changed since the last watchdog
436 * if they haven't hit the pending level
437 * issue the pending to push them through
438 * else
439 * try resetting the channel
440 */
441 } else if (ioat_chan->device->version == IOAT_VER_2_0
442 && ioat_chan->used_desc.prev
443 && ioat_chan->last_completion
444 && ioat_chan->last_completion == ioat_chan->watchdog_completion) {
445
446 if (ioat_chan->pending < ioat_pending_level)
447 ioat2_dma_memcpy_issue_pending(&ioat_chan->common);
448 else {
449 ioat_dma_reset_channel(ioat_chan);
450 ioat_chan->watchdog_completion = 0;
451 }
452 } else {
453 ioat_chan->last_compl_desc_addr_hw = 0;
454 ioat_chan->watchdog_completion
455 = ioat_chan->last_completion;
456 }
457
458 ioat_chan->watchdog_last_tcp_cookie =
459 ioat_chan->watchdog_tcp_cookie;
460 }
461
462 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
463}
464
204static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) 465static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
205{ 466{
206 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); 467 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
@@ -250,6 +511,13 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
250 prev = new; 511 prev = new;
251 } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan))); 512 } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
252 513
514 if (!new) {
515 dev_err(&ioat_chan->device->pdev->dev,
516 "tx submit failed\n");
517 spin_unlock_bh(&ioat_chan->desc_lock);
518 return -ENOMEM;
519 }
520
253 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; 521 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
254 if (new->async_tx.callback) { 522 if (new->async_tx.callback) {
255 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; 523 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
@@ -335,7 +603,14 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
335 desc_count++; 603 desc_count++;
336 } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan))); 604 } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
337 605
338 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; 606 if (!new) {
607 dev_err(&ioat_chan->device->pdev->dev,
608 "tx submit failed\n");
609 spin_unlock_bh(&ioat_chan->desc_lock);
610 return -ENOMEM;
611 }
612
613 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
339 if (new->async_tx.callback) { 614 if (new->async_tx.callback) {
340 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; 615 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
341 if (first != new) { 616 if (first != new) {
@@ -406,6 +681,7 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
406 desc_sw->async_tx.tx_submit = ioat1_tx_submit; 681 desc_sw->async_tx.tx_submit = ioat1_tx_submit;
407 break; 682 break;
408 case IOAT_VER_2_0: 683 case IOAT_VER_2_0:
684 case IOAT_VER_3_0:
409 desc_sw->async_tx.tx_submit = ioat2_tx_submit; 685 desc_sw->async_tx.tx_submit = ioat2_tx_submit;
410 break; 686 break;
411 } 687 }
@@ -452,7 +728,8 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
452 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors 728 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
453 * @chan: the channel to be filled out 729 * @chan: the channel to be filled out
454 */ 730 */
455static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) 731static int ioat_dma_alloc_chan_resources(struct dma_chan *chan,
732 struct dma_client *client)
456{ 733{
457 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); 734 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
458 struct ioat_desc_sw *desc; 735 struct ioat_desc_sw *desc;
@@ -555,6 +832,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
555 } 832 }
556 break; 833 break;
557 case IOAT_VER_2_0: 834 case IOAT_VER_2_0:
835 case IOAT_VER_3_0:
558 list_for_each_entry_safe(desc, _desc, 836 list_for_each_entry_safe(desc, _desc,
559 ioat_chan->free_desc.next, node) { 837 ioat_chan->free_desc.next, node) {
560 list_del(&desc->node); 838 list_del(&desc->node);
@@ -585,6 +863,10 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
585 ioat_chan->last_completion = ioat_chan->completion_addr = 0; 863 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
586 ioat_chan->pending = 0; 864 ioat_chan->pending = 0;
587 ioat_chan->dmacount = 0; 865 ioat_chan->dmacount = 0;
866 ioat_chan->watchdog_completion = 0;
867 ioat_chan->last_compl_desc_addr_hw = 0;
868 ioat_chan->watchdog_tcp_cookie =
869 ioat_chan->watchdog_last_tcp_cookie = 0;
588} 870}
589 871
590/** 872/**
@@ -640,7 +922,8 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
640 922
641 /* set up the noop descriptor */ 923 /* set up the noop descriptor */
642 noop_desc = to_ioat_desc(ioat_chan->used_desc.next); 924 noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
643 noop_desc->hw->size = 0; 925 /* set size to non-zero value (channel returns error when size is 0) */
926 noop_desc->hw->size = NULL_DESC_BUFFER_SIZE;
644 noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL; 927 noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
645 noop_desc->hw->src_addr = 0; 928 noop_desc->hw->src_addr = 0;
646 noop_desc->hw->dst_addr = 0; 929 noop_desc->hw->dst_addr = 0;
@@ -690,6 +973,7 @@ static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
690 return ioat1_dma_get_next_descriptor(ioat_chan); 973 return ioat1_dma_get_next_descriptor(ioat_chan);
691 break; 974 break;
692 case IOAT_VER_2_0: 975 case IOAT_VER_2_0:
976 case IOAT_VER_3_0:
693 return ioat2_dma_get_next_descriptor(ioat_chan); 977 return ioat2_dma_get_next_descriptor(ioat_chan);
694 break; 978 break;
695 } 979 }
@@ -716,8 +1000,12 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
716 new->src = dma_src; 1000 new->src = dma_src;
717 new->async_tx.flags = flags; 1001 new->async_tx.flags = flags;
718 return &new->async_tx; 1002 return &new->async_tx;
719 } else 1003 } else {
1004 dev_err(&ioat_chan->device->pdev->dev,
1005 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
1006 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
720 return NULL; 1007 return NULL;
1008 }
721} 1009}
722 1010
723static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( 1011static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
@@ -744,8 +1032,13 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
744 new->src = dma_src; 1032 new->src = dma_src;
745 new->async_tx.flags = flags; 1033 new->async_tx.flags = flags;
746 return &new->async_tx; 1034 return &new->async_tx;
747 } else 1035 } else {
1036 spin_unlock_bh(&ioat_chan->desc_lock);
1037 dev_err(&ioat_chan->device->pdev->dev,
1038 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
1039 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
748 return NULL; 1040 return NULL;
1041 }
749} 1042}
750 1043
751static void ioat_dma_cleanup_tasklet(unsigned long data) 1044static void ioat_dma_cleanup_tasklet(unsigned long data)
@@ -756,6 +1049,27 @@ static void ioat_dma_cleanup_tasklet(unsigned long data)
756 chan->reg_base + IOAT_CHANCTRL_OFFSET); 1049 chan->reg_base + IOAT_CHANCTRL_OFFSET);
757} 1050}
758 1051
1052static void
1053ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
1054{
1055 /*
1056 * yes we are unmapping both _page and _single
1057 * alloc'd regions with unmap_page. Is this
1058 * *really* that bad?
1059 */
1060 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP))
1061 pci_unmap_page(ioat_chan->device->pdev,
1062 pci_unmap_addr(desc, dst),
1063 pci_unmap_len(desc, len),
1064 PCI_DMA_FROMDEVICE);
1065
1066 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP))
1067 pci_unmap_page(ioat_chan->device->pdev,
1068 pci_unmap_addr(desc, src),
1069 pci_unmap_len(desc, len),
1070 PCI_DMA_TODEVICE);
1071}
1072
759/** 1073/**
760 * ioat_dma_memcpy_cleanup - cleanup up finished descriptors 1074 * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
761 * @chan: ioat channel to be cleaned up 1075 * @chan: ioat channel to be cleaned up
@@ -799,11 +1113,27 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
799 1113
800 if (phys_complete == ioat_chan->last_completion) { 1114 if (phys_complete == ioat_chan->last_completion) {
801 spin_unlock_bh(&ioat_chan->cleanup_lock); 1115 spin_unlock_bh(&ioat_chan->cleanup_lock);
1116 /*
1117 * perhaps we're stuck so hard that the watchdog can't go off?
1118 * try to catch it after 2 seconds
1119 */
1120 if (ioat_chan->device->version != IOAT_VER_3_0) {
1121 if (time_after(jiffies,
1122 ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
1123 ioat_dma_chan_watchdog(&(ioat_chan->device->work.work));
1124 ioat_chan->last_completion_time = jiffies;
1125 }
1126 }
802 return; 1127 return;
803 } 1128 }
1129 ioat_chan->last_completion_time = jiffies;
804 1130
805 cookie = 0; 1131 cookie = 0;
806 spin_lock_bh(&ioat_chan->desc_lock); 1132 if (!spin_trylock_bh(&ioat_chan->desc_lock)) {
1133 spin_unlock_bh(&ioat_chan->cleanup_lock);
1134 return;
1135 }
1136
807 switch (ioat_chan->device->version) { 1137 switch (ioat_chan->device->version) {
808 case IOAT_VER_1_2: 1138 case IOAT_VER_1_2:
809 list_for_each_entry_safe(desc, _desc, 1139 list_for_each_entry_safe(desc, _desc,
@@ -816,21 +1146,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
816 */ 1146 */
817 if (desc->async_tx.cookie) { 1147 if (desc->async_tx.cookie) {
818 cookie = desc->async_tx.cookie; 1148 cookie = desc->async_tx.cookie;
819 1149 ioat_dma_unmap(ioat_chan, desc);
820 /*
821 * yes we are unmapping both _page and _single
822 * alloc'd regions with unmap_page. Is this
823 * *really* that bad?
824 */
825 pci_unmap_page(ioat_chan->device->pdev,
826 pci_unmap_addr(desc, dst),
827 pci_unmap_len(desc, len),
828 PCI_DMA_FROMDEVICE);
829 pci_unmap_page(ioat_chan->device->pdev,
830 pci_unmap_addr(desc, src),
831 pci_unmap_len(desc, len),
832 PCI_DMA_TODEVICE);
833
834 if (desc->async_tx.callback) { 1150 if (desc->async_tx.callback) {
835 desc->async_tx.callback(desc->async_tx.callback_param); 1151 desc->async_tx.callback(desc->async_tx.callback_param);
836 desc->async_tx.callback = NULL; 1152 desc->async_tx.callback = NULL;
@@ -862,6 +1178,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
862 } 1178 }
863 break; 1179 break;
864 case IOAT_VER_2_0: 1180 case IOAT_VER_2_0:
1181 case IOAT_VER_3_0:
865 /* has some other thread has already cleaned up? */ 1182 /* has some other thread has already cleaned up? */
866 if (ioat_chan->used_desc.prev == NULL) 1183 if (ioat_chan->used_desc.prev == NULL)
867 break; 1184 break;
@@ -889,16 +1206,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
889 if (desc->async_tx.cookie) { 1206 if (desc->async_tx.cookie) {
890 cookie = desc->async_tx.cookie; 1207 cookie = desc->async_tx.cookie;
891 desc->async_tx.cookie = 0; 1208 desc->async_tx.cookie = 0;
892 1209 ioat_dma_unmap(ioat_chan, desc);
893 pci_unmap_page(ioat_chan->device->pdev,
894 pci_unmap_addr(desc, dst),
895 pci_unmap_len(desc, len),
896 PCI_DMA_FROMDEVICE);
897 pci_unmap_page(ioat_chan->device->pdev,
898 pci_unmap_addr(desc, src),
899 pci_unmap_len(desc, len),
900 PCI_DMA_TODEVICE);
901
902 if (desc->async_tx.callback) { 1210 if (desc->async_tx.callback) {
903 desc->async_tx.callback(desc->async_tx.callback_param); 1211 desc->async_tx.callback(desc->async_tx.callback_param);
904 desc->async_tx.callback = NULL; 1212 desc->async_tx.callback = NULL;
@@ -943,6 +1251,7 @@ static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
943 1251
944 last_used = chan->cookie; 1252 last_used = chan->cookie;
945 last_complete = ioat_chan->completed_cookie; 1253 last_complete = ioat_chan->completed_cookie;
1254 ioat_chan->watchdog_tcp_cookie = cookie;
946 1255
947 if (done) 1256 if (done)
948 *done = last_complete; 1257 *done = last_complete;
@@ -973,10 +1282,19 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
973 spin_lock_bh(&ioat_chan->desc_lock); 1282 spin_lock_bh(&ioat_chan->desc_lock);
974 1283
975 desc = ioat_dma_get_next_descriptor(ioat_chan); 1284 desc = ioat_dma_get_next_descriptor(ioat_chan);
1285
1286 if (!desc) {
1287 dev_err(&ioat_chan->device->pdev->dev,
1288 "Unable to start null desc - get next desc failed\n");
1289 spin_unlock_bh(&ioat_chan->desc_lock);
1290 return;
1291 }
1292
976 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL 1293 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
977 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN 1294 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
978 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS; 1295 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
979 desc->hw->size = 0; 1296 /* set size to non-zero value (channel returns error when size is 0) */
1297 desc->hw->size = NULL_DESC_BUFFER_SIZE;
980 desc->hw->src_addr = 0; 1298 desc->hw->src_addr = 0;
981 desc->hw->dst_addr = 0; 1299 desc->hw->dst_addr = 0;
982 async_tx_ack(&desc->async_tx); 1300 async_tx_ack(&desc->async_tx);
@@ -994,6 +1312,7 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
994 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); 1312 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
995 break; 1313 break;
996 case IOAT_VER_2_0: 1314 case IOAT_VER_2_0:
1315 case IOAT_VER_3_0:
997 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, 1316 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
998 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); 1317 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
999 writel(((u64) desc->async_tx.phys) >> 32, 1318 writel(((u64) desc->async_tx.phys) >> 32,
@@ -1049,7 +1368,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
1049 dma_chan = container_of(device->common.channels.next, 1368 dma_chan = container_of(device->common.channels.next,
1050 struct dma_chan, 1369 struct dma_chan,
1051 device_node); 1370 device_node);
1052 if (device->common.device_alloc_chan_resources(dma_chan) < 1) { 1371 if (device->common.device_alloc_chan_resources(dma_chan, NULL) < 1) {
1053 dev_err(&device->pdev->dev, 1372 dev_err(&device->pdev->dev,
1054 "selftest cannot allocate chan resource\n"); 1373 "selftest cannot allocate chan resource\n");
1055 err = -ENODEV; 1374 err = -ENODEV;
@@ -1312,6 +1631,7 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1312 ioat1_dma_memcpy_issue_pending; 1631 ioat1_dma_memcpy_issue_pending;
1313 break; 1632 break;
1314 case IOAT_VER_2_0: 1633 case IOAT_VER_2_0:
1634 case IOAT_VER_3_0:
1315 device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy; 1635 device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
1316 device->common.device_issue_pending = 1636 device->common.device_issue_pending =
1317 ioat2_dma_memcpy_issue_pending; 1637 ioat2_dma_memcpy_issue_pending;
@@ -1331,8 +1651,16 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1331 if (err) 1651 if (err)
1332 goto err_self_test; 1652 goto err_self_test;
1333 1653
1654 ioat_set_tcp_copy_break(device);
1655
1334 dma_async_device_register(&device->common); 1656 dma_async_device_register(&device->common);
1335 1657
1658 if (device->version != IOAT_VER_3_0) {
1659 INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
1660 schedule_delayed_work(&device->work,
1661 WATCHDOG_DELAY);
1662 }
1663
1336 return device; 1664 return device;
1337 1665
1338err_self_test: 1666err_self_test:
@@ -1365,6 +1693,10 @@ void ioat_dma_remove(struct ioatdma_device *device)
1365 pci_release_regions(device->pdev); 1693 pci_release_regions(device->pdev);
1366 pci_disable_device(device->pdev); 1694 pci_disable_device(device->pdev);
1367 1695
1696 if (device->version != IOAT_VER_3_0) {
1697 cancel_delayed_work(&device->work);
1698 }
1699
1368 list_for_each_entry_safe(chan, _chan, 1700 list_for_each_entry_safe(chan, _chan,
1369 &device->common.channels, device_node) { 1701 &device->common.channels, device_node) {
1370 ioat_chan = to_ioat_chan(chan); 1702 ioat_chan = to_ioat_chan(chan);
diff --git a/drivers/dma/ioatdma.h b/drivers/dma/ioatdma.h
index f2c7fedbf009..a3306d0e1372 100644
--- a/drivers/dma/ioatdma.h
+++ b/drivers/dma/ioatdma.h
@@ -27,8 +27,9 @@
27#include <linux/dmapool.h> 27#include <linux/dmapool.h>
28#include <linux/cache.h> 28#include <linux/cache.h>
29#include <linux/pci_ids.h> 29#include <linux/pci_ids.h>
30#include <net/tcp.h>
30 31
31#define IOAT_DMA_VERSION "2.04" 32#define IOAT_DMA_VERSION "3.30"
32 33
33enum ioat_interrupt { 34enum ioat_interrupt {
34 none = 0, 35 none = 0,
@@ -40,6 +41,7 @@ enum ioat_interrupt {
40 41
41#define IOAT_LOW_COMPLETION_MASK 0xffffffc0 42#define IOAT_LOW_COMPLETION_MASK 0xffffffc0
42#define IOAT_DMA_DCA_ANY_CPU ~0 43#define IOAT_DMA_DCA_ANY_CPU ~0
44#define IOAT_WATCHDOG_PERIOD (2 * HZ)
43 45
44 46
45/** 47/**
@@ -62,6 +64,7 @@ struct ioatdma_device {
62 struct dma_device common; 64 struct dma_device common;
63 u8 version; 65 u8 version;
64 enum ioat_interrupt irq_mode; 66 enum ioat_interrupt irq_mode;
67 struct delayed_work work;
65 struct msix_entry msix_entries[4]; 68 struct msix_entry msix_entries[4];
66 struct ioat_dma_chan *idx[4]; 69 struct ioat_dma_chan *idx[4];
67}; 70};
@@ -75,6 +78,7 @@ struct ioat_dma_chan {
75 78
76 dma_cookie_t completed_cookie; 79 dma_cookie_t completed_cookie;
77 unsigned long last_completion; 80 unsigned long last_completion;
81 unsigned long last_completion_time;
78 82
79 size_t xfercap; /* XFERCAP register value expanded out */ 83 size_t xfercap; /* XFERCAP register value expanded out */
80 84
@@ -82,6 +86,10 @@ struct ioat_dma_chan {
82 spinlock_t desc_lock; 86 spinlock_t desc_lock;
83 struct list_head free_desc; 87 struct list_head free_desc;
84 struct list_head used_desc; 88 struct list_head used_desc;
89 unsigned long watchdog_completion;
90 int watchdog_tcp_cookie;
91 u32 watchdog_last_tcp_cookie;
92 struct delayed_work work;
85 93
86 int pending; 94 int pending;
87 int dmacount; 95 int dmacount;
@@ -98,6 +106,7 @@ struct ioat_dma_chan {
98 u32 high; 106 u32 high;
99 }; 107 };
100 } *completion_virt; 108 } *completion_virt;
109 unsigned long last_compl_desc_addr_hw;
101 struct tasklet_struct cleanup_task; 110 struct tasklet_struct cleanup_task;
102}; 111};
103 112
@@ -121,17 +130,34 @@ struct ioat_desc_sw {
121 struct dma_async_tx_descriptor async_tx; 130 struct dma_async_tx_descriptor async_tx;
122}; 131};
123 132
133static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev)
134{
135 #ifdef CONFIG_NET_DMA
136 switch (dev->version) {
137 case IOAT_VER_1_2:
138 case IOAT_VER_3_0:
139 sysctl_tcp_dma_copybreak = 4096;
140 break;
141 case IOAT_VER_2_0:
142 sysctl_tcp_dma_copybreak = 2048;
143 break;
144 }
145 #endif
146}
147
124#if defined(CONFIG_INTEL_IOATDMA) || defined(CONFIG_INTEL_IOATDMA_MODULE) 148#if defined(CONFIG_INTEL_IOATDMA) || defined(CONFIG_INTEL_IOATDMA_MODULE)
125struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, 149struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
126 void __iomem *iobase); 150 void __iomem *iobase);
127void ioat_dma_remove(struct ioatdma_device *device); 151void ioat_dma_remove(struct ioatdma_device *device);
128struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); 152struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
129struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); 153struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
154struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
130#else 155#else
131#define ioat_dma_probe(pdev, iobase) NULL 156#define ioat_dma_probe(pdev, iobase) NULL
132#define ioat_dma_remove(device) do { } while (0) 157#define ioat_dma_remove(device) do { } while (0)
133#define ioat_dca_init(pdev, iobase) NULL 158#define ioat_dca_init(pdev, iobase) NULL
134#define ioat2_dca_init(pdev, iobase) NULL 159#define ioat2_dca_init(pdev, iobase) NULL
160#define ioat3_dca_init(pdev, iobase) NULL
135#endif 161#endif
136 162
137#endif /* IOATDMA_H */ 163#endif /* IOATDMA_H */
diff --git a/drivers/dma/ioatdma_hw.h b/drivers/dma/ioatdma_hw.h
index dd470fa91d86..f1ae2c776f74 100644
--- a/drivers/dma/ioatdma_hw.h
+++ b/drivers/dma/ioatdma_hw.h
@@ -35,6 +35,7 @@
35#define IOAT_PCI_SID 0x8086 35#define IOAT_PCI_SID 0x8086
36#define IOAT_VER_1_2 0x12 /* Version 1.2 */ 36#define IOAT_VER_1_2 0x12 /* Version 1.2 */
37#define IOAT_VER_2_0 0x20 /* Version 2.0 */ 37#define IOAT_VER_2_0 0x20 /* Version 2.0 */
38#define IOAT_VER_3_0 0x30 /* Version 3.0 */
38 39
39struct ioat_dma_descriptor { 40struct ioat_dma_descriptor {
40 uint32_t size; 41 uint32_t size;
diff --git a/drivers/dma/ioatdma_registers.h b/drivers/dma/ioatdma_registers.h
index 9832d7ebd931..827cb503cac6 100644
--- a/drivers/dma/ioatdma_registers.h
+++ b/drivers/dma/ioatdma_registers.h
@@ -25,6 +25,10 @@
25#define IOAT_PCI_DMACTRL_DMA_EN 0x00000001 25#define IOAT_PCI_DMACTRL_DMA_EN 0x00000001
26#define IOAT_PCI_DMACTRL_MSI_EN 0x00000002 26#define IOAT_PCI_DMACTRL_MSI_EN 0x00000002
27 27
28#define IOAT_PCI_DEVICE_ID_OFFSET 0x02
29#define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148
30#define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184
31
28/* MMIO Device Registers */ 32/* MMIO Device Registers */
29#define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */ 33#define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */
30 34
@@ -149,7 +153,23 @@
149#define IOAT_DCA_GREQID_VALID 0x20000000 153#define IOAT_DCA_GREQID_VALID 0x20000000
150#define IOAT_DCA_GREQID_LASTID 0x80000000 154#define IOAT_DCA_GREQID_LASTID 0x80000000
151 155
156#define IOAT3_CSI_CAPABILITY_OFFSET 0x08
157#define IOAT3_CSI_CAPABILITY_PREFETCH 0x1
158
159#define IOAT3_PCI_CAPABILITY_OFFSET 0x0A
160#define IOAT3_PCI_CAPABILITY_MEMWR 0x1
161
162#define IOAT3_CSI_CONTROL_OFFSET 0x0C
163#define IOAT3_CSI_CONTROL_PREFETCH 0x1
164
165#define IOAT3_PCI_CONTROL_OFFSET 0x0E
166#define IOAT3_PCI_CONTROL_MEMWR 0x1
167
168#define IOAT3_APICID_TAG_MAP_OFFSET 0x10
169#define IOAT3_APICID_TAG_MAP_OFFSET_LOW 0x10
170#define IOAT3_APICID_TAG_MAP_OFFSET_HIGH 0x14
152 171
172#define IOAT3_DCA_GREQID_OFFSET 0x02
153 173
154#define IOAT1_CHAINADDR_OFFSET 0x0C /* 64-bit Descriptor Chain Address Register */ 174#define IOAT1_CHAINADDR_OFFSET 0x0C /* 64-bit Descriptor Chain Address Register */
155#define IOAT2_CHAINADDR_OFFSET 0x10 /* 64-bit Descriptor Chain Address Register */ 175#define IOAT2_CHAINADDR_OFFSET 0x10 /* 64-bit Descriptor Chain Address Register */
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 0ec0f431e6a1..85bfeba4d85e 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -82,17 +82,24 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
82 struct device *dev = 82 struct device *dev =
83 &iop_chan->device->pdev->dev; 83 &iop_chan->device->pdev->dev;
84 u32 len = unmap->unmap_len; 84 u32 len = unmap->unmap_len;
85 u32 src_cnt = unmap->unmap_src_cnt; 85 enum dma_ctrl_flags flags = desc->async_tx.flags;
86 dma_addr_t addr = iop_desc_get_dest_addr(unmap, 86 u32 src_cnt;
87 iop_chan); 87 dma_addr_t addr;
88 88
89 dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); 89 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
90 while (src_cnt--) { 90 addr = iop_desc_get_dest_addr(unmap, iop_chan);
91 addr = iop_desc_get_src_addr(unmap, 91 dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
92 iop_chan, 92 }
93 src_cnt); 93
94 dma_unmap_page(dev, addr, len, 94 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
95 DMA_TO_DEVICE); 95 src_cnt = unmap->unmap_src_cnt;
96 while (src_cnt--) {
97 addr = iop_desc_get_src_addr(unmap,
98 iop_chan,
99 src_cnt);
100 dma_unmap_page(dev, addr, len,
101 DMA_TO_DEVICE);
102 }
96 } 103 }
97 desc->group_head = NULL; 104 desc->group_head = NULL;
98 } 105 }
@@ -366,8 +373,8 @@ retry:
366 if (!retry++) 373 if (!retry++)
367 goto retry; 374 goto retry;
368 375
369 /* try to free some slots if the allocation fails */ 376 /* perform direct reclaim if the allocation fails */
370 tasklet_schedule(&iop_chan->irq_tasklet); 377 __iop_adma_slot_cleanup(iop_chan);
371 378
372 return NULL; 379 return NULL;
373} 380}
@@ -443,8 +450,18 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
443static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan); 450static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
444static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan); 451static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
445 452
446/* returns the number of allocated descriptors */ 453/**
447static int iop_adma_alloc_chan_resources(struct dma_chan *chan) 454 * iop_adma_alloc_chan_resources - returns the number of allocated descriptors
455 * @chan - allocate descriptor resources for this channel
456 * @client - current client requesting the channel be ready for requests
457 *
458 * Note: We keep the slots for 1 operation on iop_chan->chain at all times. To
459 * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be
460 * greater than 2x the number slots needed to satisfy a device->max_xor
461 * request.
462 * */
463static int iop_adma_alloc_chan_resources(struct dma_chan *chan,
464 struct dma_client *client)
448{ 465{
449 char *hw_desc; 466 char *hw_desc;
450 int idx; 467 int idx;
@@ -838,7 +855,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
838 dma_chan = container_of(device->common.channels.next, 855 dma_chan = container_of(device->common.channels.next,
839 struct dma_chan, 856 struct dma_chan,
840 device_node); 857 device_node);
841 if (iop_adma_alloc_chan_resources(dma_chan) < 1) { 858 if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) {
842 err = -ENODEV; 859 err = -ENODEV;
843 goto out; 860 goto out;
844 } 861 }
@@ -936,7 +953,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
936 dma_chan = container_of(device->common.channels.next, 953 dma_chan = container_of(device->common.channels.next,
937 struct dma_chan, 954 struct dma_chan,
938 device_node); 955 device_node);
939 if (iop_adma_alloc_chan_resources(dma_chan) < 1) { 956 if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) {
940 err = -ENODEV; 957 err = -ENODEV;
941 goto out; 958 goto out;
942 } 959 }
@@ -1387,6 +1404,8 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
1387 spin_unlock_bh(&iop_chan->lock); 1404 spin_unlock_bh(&iop_chan->lock);
1388} 1405}
1389 1406
1407MODULE_ALIAS("platform:iop-adma");
1408
1390static struct platform_driver iop_adma_driver = { 1409static struct platform_driver iop_adma_driver = {
1391 .probe = iop_adma_probe, 1410 .probe = iop_adma_probe,
1392 .remove = iop_adma_remove, 1411 .remove = iop_adma_remove,
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
new file mode 100644
index 000000000000..a4e4494663bf
--- /dev/null
+++ b/drivers/dma/mv_xor.c
@@ -0,0 +1,1375 @@
1/*
2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/async_tx.h>
22#include <linux/delay.h>
23#include <linux/dma-mapping.h>
24#include <linux/spinlock.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/memory.h>
28#include <asm/plat-orion/mv_xor.h>
29#include "mv_xor.h"
30
31static void mv_xor_issue_pending(struct dma_chan *chan);
32
33#define to_mv_xor_chan(chan) \
34 container_of(chan, struct mv_xor_chan, common)
35
36#define to_mv_xor_device(dev) \
37 container_of(dev, struct mv_xor_device, common)
38
39#define to_mv_xor_slot(tx) \
40 container_of(tx, struct mv_xor_desc_slot, async_tx)
41
42static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
43{
44 struct mv_xor_desc *hw_desc = desc->hw_desc;
45
46 hw_desc->status = (1 << 31);
47 hw_desc->phy_next_desc = 0;
48 hw_desc->desc_command = (1 << 31);
49}
50
51static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
52{
53 struct mv_xor_desc *hw_desc = desc->hw_desc;
54 return hw_desc->phy_dest_addr;
55}
56
57static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
58 int src_idx)
59{
60 struct mv_xor_desc *hw_desc = desc->hw_desc;
61 return hw_desc->phy_src_addr[src_idx];
62}
63
64
65static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
66 u32 byte_count)
67{
68 struct mv_xor_desc *hw_desc = desc->hw_desc;
69 hw_desc->byte_count = byte_count;
70}
71
72static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
73 u32 next_desc_addr)
74{
75 struct mv_xor_desc *hw_desc = desc->hw_desc;
76 BUG_ON(hw_desc->phy_next_desc);
77 hw_desc->phy_next_desc = next_desc_addr;
78}
79
80static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
81{
82 struct mv_xor_desc *hw_desc = desc->hw_desc;
83 hw_desc->phy_next_desc = 0;
84}
85
86static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
87{
88 desc->value = val;
89}
90
91static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
92 dma_addr_t addr)
93{
94 struct mv_xor_desc *hw_desc = desc->hw_desc;
95 hw_desc->phy_dest_addr = addr;
96}
97
98static int mv_chan_memset_slot_count(size_t len)
99{
100 return 1;
101}
102
103#define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
104
105static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
106 int index, dma_addr_t addr)
107{
108 struct mv_xor_desc *hw_desc = desc->hw_desc;
109 hw_desc->phy_src_addr[index] = addr;
110 if (desc->type == DMA_XOR)
111 hw_desc->desc_command |= (1 << index);
112}
113
114static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
115{
116 return __raw_readl(XOR_CURR_DESC(chan));
117}
118
119static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
120 u32 next_desc_addr)
121{
122 __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
123}
124
125static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
126{
127 __raw_writel(desc_addr, XOR_DEST_POINTER(chan));
128}
129
130static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
131{
132 __raw_writel(block_size, XOR_BLOCK_SIZE(chan));
133}
134
135static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
136{
137 __raw_writel(value, XOR_INIT_VALUE_LOW(chan));
138 __raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
139}
140
141static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
142{
143 u32 val = __raw_readl(XOR_INTR_MASK(chan));
144 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
145 __raw_writel(val, XOR_INTR_MASK(chan));
146}
147
148static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
149{
150 u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
151 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
152 return intr_cause;
153}
154
155static int mv_is_err_intr(u32 intr_cause)
156{
157 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
158 return 1;
159
160 return 0;
161}
162
163static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
164{
165 u32 val = (1 << (1 + (chan->idx * 16)));
166 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
167 __raw_writel(val, XOR_INTR_CAUSE(chan));
168}
169
170static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
171{
172 u32 val = 0xFFFF0000 >> (chan->idx * 16);
173 __raw_writel(val, XOR_INTR_CAUSE(chan));
174}
175
176static int mv_can_chain(struct mv_xor_desc_slot *desc)
177{
178 struct mv_xor_desc_slot *chain_old_tail = list_entry(
179 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
180
181 if (chain_old_tail->type != desc->type)
182 return 0;
183 if (desc->type == DMA_MEMSET)
184 return 0;
185
186 return 1;
187}
188
189static void mv_set_mode(struct mv_xor_chan *chan,
190 enum dma_transaction_type type)
191{
192 u32 op_mode;
193 u32 config = __raw_readl(XOR_CONFIG(chan));
194
195 switch (type) {
196 case DMA_XOR:
197 op_mode = XOR_OPERATION_MODE_XOR;
198 break;
199 case DMA_MEMCPY:
200 op_mode = XOR_OPERATION_MODE_MEMCPY;
201 break;
202 case DMA_MEMSET:
203 op_mode = XOR_OPERATION_MODE_MEMSET;
204 break;
205 default:
206 dev_printk(KERN_ERR, chan->device->common.dev,
207 "error: unsupported operation %d.\n",
208 type);
209 BUG();
210 return;
211 }
212
213 config &= ~0x7;
214 config |= op_mode;
215 __raw_writel(config, XOR_CONFIG(chan));
216 chan->current_type = type;
217}
218
219static void mv_chan_activate(struct mv_xor_chan *chan)
220{
221 u32 activation;
222
223 dev_dbg(chan->device->common.dev, " activate chan.\n");
224 activation = __raw_readl(XOR_ACTIVATION(chan));
225 activation |= 0x1;
226 __raw_writel(activation, XOR_ACTIVATION(chan));
227}
228
229static char mv_chan_is_busy(struct mv_xor_chan *chan)
230{
231 u32 state = __raw_readl(XOR_ACTIVATION(chan));
232
233 state = (state >> 4) & 0x3;
234
235 return (state == 1) ? 1 : 0;
236}
237
238static int mv_chan_xor_slot_count(size_t len, int src_cnt)
239{
240 return 1;
241}
242
243/**
244 * mv_xor_free_slots - flags descriptor slots for reuse
245 * @slot: Slot to free
246 * Caller must hold &mv_chan->lock while calling this function
247 */
248static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
249 struct mv_xor_desc_slot *slot)
250{
251 dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n",
252 __func__, __LINE__, slot);
253
254 slot->slots_per_op = 0;
255
256}
257
258/*
259 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
260 * sw_desc
261 * Caller must hold &mv_chan->lock while calling this function
262 */
263static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
264 struct mv_xor_desc_slot *sw_desc)
265{
266 dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n",
267 __func__, __LINE__, sw_desc);
268 if (sw_desc->type != mv_chan->current_type)
269 mv_set_mode(mv_chan, sw_desc->type);
270
271 if (sw_desc->type == DMA_MEMSET) {
272 /* for memset requests we need to program the engine, no
273 * descriptors used.
274 */
275 struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
276 mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
277 mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
278 mv_chan_set_value(mv_chan, sw_desc->value);
279 } else {
280 /* set the hardware chain */
281 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
282 }
283 mv_chan->pending += sw_desc->slot_cnt;
284 mv_xor_issue_pending(&mv_chan->common);
285}
286
287static dma_cookie_t
288mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
289 struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
290{
291 BUG_ON(desc->async_tx.cookie < 0);
292
293 if (desc->async_tx.cookie > 0) {
294 cookie = desc->async_tx.cookie;
295
296 /* call the callback (must not sleep or submit new
297 * operations to this channel)
298 */
299 if (desc->async_tx.callback)
300 desc->async_tx.callback(
301 desc->async_tx.callback_param);
302
303 /* unmap dma addresses
304 * (unmap_single vs unmap_page?)
305 */
306 if (desc->group_head && desc->unmap_len) {
307 struct mv_xor_desc_slot *unmap = desc->group_head;
308 struct device *dev =
309 &mv_chan->device->pdev->dev;
310 u32 len = unmap->unmap_len;
311 enum dma_ctrl_flags flags = desc->async_tx.flags;
312 u32 src_cnt;
313 dma_addr_t addr;
314
315 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
316 addr = mv_desc_get_dest_addr(unmap);
317 dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
318 }
319
320 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
321 src_cnt = unmap->unmap_src_cnt;
322 while (src_cnt--) {
323 addr = mv_desc_get_src_addr(unmap,
324 src_cnt);
325 dma_unmap_page(dev, addr, len,
326 DMA_TO_DEVICE);
327 }
328 }
329 desc->group_head = NULL;
330 }
331 }
332
333 /* run dependent operations */
334 async_tx_run_dependencies(&desc->async_tx);
335
336 return cookie;
337}
338
339static int
340mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
341{
342 struct mv_xor_desc_slot *iter, *_iter;
343
344 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
345 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
346 completed_node) {
347
348 if (async_tx_test_ack(&iter->async_tx)) {
349 list_del(&iter->completed_node);
350 mv_xor_free_slots(mv_chan, iter);
351 }
352 }
353 return 0;
354}
355
356static int
357mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
358 struct mv_xor_chan *mv_chan)
359{
360 dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n",
361 __func__, __LINE__, desc, desc->async_tx.flags);
362 list_del(&desc->chain_node);
363 /* the client is allowed to attach dependent operations
364 * until 'ack' is set
365 */
366 if (!async_tx_test_ack(&desc->async_tx)) {
367 /* move this slot to the completed_slots */
368 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
369 return 0;
370 }
371
372 mv_xor_free_slots(mv_chan, desc);
373 return 0;
374}
375
376static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
377{
378 struct mv_xor_desc_slot *iter, *_iter;
379 dma_cookie_t cookie = 0;
380 int busy = mv_chan_is_busy(mv_chan);
381 u32 current_desc = mv_chan_get_current_desc(mv_chan);
382 int seen_current = 0;
383
384 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
385 dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc);
386 mv_xor_clean_completed_slots(mv_chan);
387
388 /* free completed slots from the chain starting with
389 * the oldest descriptor
390 */
391
392 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
393 chain_node) {
394 prefetch(_iter);
395 prefetch(&_iter->async_tx);
396
397 /* do not advance past the current descriptor loaded into the
398 * hardware channel, subsequent descriptors are either in
399 * process or have not been submitted
400 */
401 if (seen_current)
402 break;
403
404 /* stop the search if we reach the current descriptor and the
405 * channel is busy
406 */
407 if (iter->async_tx.phys == current_desc) {
408 seen_current = 1;
409 if (busy)
410 break;
411 }
412
413 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
414
415 if (mv_xor_clean_slot(iter, mv_chan))
416 break;
417 }
418
419 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
420 struct mv_xor_desc_slot *chain_head;
421 chain_head = list_entry(mv_chan->chain.next,
422 struct mv_xor_desc_slot,
423 chain_node);
424
425 mv_xor_start_new_chain(mv_chan, chain_head);
426 }
427
428 if (cookie > 0)
429 mv_chan->completed_cookie = cookie;
430}
431
432static void
433mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
434{
435 spin_lock_bh(&mv_chan->lock);
436 __mv_xor_slot_cleanup(mv_chan);
437 spin_unlock_bh(&mv_chan->lock);
438}
439
440static void mv_xor_tasklet(unsigned long data)
441{
442 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
443 __mv_xor_slot_cleanup(chan);
444}
445
446static struct mv_xor_desc_slot *
447mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
448 int slots_per_op)
449{
450 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
451 LIST_HEAD(chain);
452 int slots_found, retry = 0;
453
454 /* start search from the last allocated descrtiptor
455 * if a contiguous allocation can not be found start searching
456 * from the beginning of the list
457 */
458retry:
459 slots_found = 0;
460 if (retry == 0)
461 iter = mv_chan->last_used;
462 else
463 iter = list_entry(&mv_chan->all_slots,
464 struct mv_xor_desc_slot,
465 slot_node);
466
467 list_for_each_entry_safe_continue(
468 iter, _iter, &mv_chan->all_slots, slot_node) {
469 prefetch(_iter);
470 prefetch(&_iter->async_tx);
471 if (iter->slots_per_op) {
472 /* give up after finding the first busy slot
473 * on the second pass through the list
474 */
475 if (retry)
476 break;
477
478 slots_found = 0;
479 continue;
480 }
481
482 /* start the allocation if the slot is correctly aligned */
483 if (!slots_found++)
484 alloc_start = iter;
485
486 if (slots_found == num_slots) {
487 struct mv_xor_desc_slot *alloc_tail = NULL;
488 struct mv_xor_desc_slot *last_used = NULL;
489 iter = alloc_start;
490 while (num_slots) {
491 int i;
492
493 /* pre-ack all but the last descriptor */
494 async_tx_ack(&iter->async_tx);
495
496 list_add_tail(&iter->chain_node, &chain);
497 alloc_tail = iter;
498 iter->async_tx.cookie = 0;
499 iter->slot_cnt = num_slots;
500 iter->xor_check_result = NULL;
501 for (i = 0; i < slots_per_op; i++) {
502 iter->slots_per_op = slots_per_op - i;
503 last_used = iter;
504 iter = list_entry(iter->slot_node.next,
505 struct mv_xor_desc_slot,
506 slot_node);
507 }
508 num_slots -= slots_per_op;
509 }
510 alloc_tail->group_head = alloc_start;
511 alloc_tail->async_tx.cookie = -EBUSY;
512 list_splice(&chain, &alloc_tail->async_tx.tx_list);
513 mv_chan->last_used = last_used;
514 mv_desc_clear_next_desc(alloc_start);
515 mv_desc_clear_next_desc(alloc_tail);
516 return alloc_tail;
517 }
518 }
519 if (!retry++)
520 goto retry;
521
522 /* try to free some slots if the allocation fails */
523 tasklet_schedule(&mv_chan->irq_tasklet);
524
525 return NULL;
526}
527
528static dma_cookie_t
529mv_desc_assign_cookie(struct mv_xor_chan *mv_chan,
530 struct mv_xor_desc_slot *desc)
531{
532 dma_cookie_t cookie = mv_chan->common.cookie;
533
534 if (++cookie < 0)
535 cookie = 1;
536 mv_chan->common.cookie = desc->async_tx.cookie = cookie;
537 return cookie;
538}
539
540/************************ DMA engine API functions ****************************/
541static dma_cookie_t
542mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
543{
544 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
545 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
546 struct mv_xor_desc_slot *grp_start, *old_chain_tail;
547 dma_cookie_t cookie;
548 int new_hw_chain = 1;
549
550 dev_dbg(mv_chan->device->common.dev,
551 "%s sw_desc %p: async_tx %p\n",
552 __func__, sw_desc, &sw_desc->async_tx);
553
554 grp_start = sw_desc->group_head;
555
556 spin_lock_bh(&mv_chan->lock);
557 cookie = mv_desc_assign_cookie(mv_chan, sw_desc);
558
559 if (list_empty(&mv_chan->chain))
560 list_splice_init(&sw_desc->async_tx.tx_list, &mv_chan->chain);
561 else {
562 new_hw_chain = 0;
563
564 old_chain_tail = list_entry(mv_chan->chain.prev,
565 struct mv_xor_desc_slot,
566 chain_node);
567 list_splice_init(&grp_start->async_tx.tx_list,
568 &old_chain_tail->chain_node);
569
570 if (!mv_can_chain(grp_start))
571 goto submit_done;
572
573 dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n",
574 old_chain_tail->async_tx.phys);
575
576 /* fix up the hardware chain */
577 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
578
579 /* if the channel is not busy */
580 if (!mv_chan_is_busy(mv_chan)) {
581 u32 current_desc = mv_chan_get_current_desc(mv_chan);
582 /*
583 * and the curren desc is the end of the chain before
584 * the append, then we need to start the channel
585 */
586 if (current_desc == old_chain_tail->async_tx.phys)
587 new_hw_chain = 1;
588 }
589 }
590
591 if (new_hw_chain)
592 mv_xor_start_new_chain(mv_chan, grp_start);
593
594submit_done:
595 spin_unlock_bh(&mv_chan->lock);
596
597 return cookie;
598}
599
600/* returns the number of allocated descriptors */
601static int mv_xor_alloc_chan_resources(struct dma_chan *chan,
602 struct dma_client *client)
603{
604 char *hw_desc;
605 int idx;
606 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
607 struct mv_xor_desc_slot *slot = NULL;
608 struct mv_xor_platform_data *plat_data =
609 mv_chan->device->pdev->dev.platform_data;
610 int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE;
611
612 /* Allocate descriptor slots */
613 idx = mv_chan->slots_allocated;
614 while (idx < num_descs_in_pool) {
615 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
616 if (!slot) {
617 printk(KERN_INFO "MV XOR Channel only initialized"
618 " %d descriptor slots", idx);
619 break;
620 }
621 hw_desc = (char *) mv_chan->device->dma_desc_pool_virt;
622 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
623
624 dma_async_tx_descriptor_init(&slot->async_tx, chan);
625 slot->async_tx.tx_submit = mv_xor_tx_submit;
626 INIT_LIST_HEAD(&slot->chain_node);
627 INIT_LIST_HEAD(&slot->slot_node);
628 INIT_LIST_HEAD(&slot->async_tx.tx_list);
629 hw_desc = (char *) mv_chan->device->dma_desc_pool;
630 slot->async_tx.phys =
631 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
632 slot->idx = idx++;
633
634 spin_lock_bh(&mv_chan->lock);
635 mv_chan->slots_allocated = idx;
636 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
637 spin_unlock_bh(&mv_chan->lock);
638 }
639
640 if (mv_chan->slots_allocated && !mv_chan->last_used)
641 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
642 struct mv_xor_desc_slot,
643 slot_node);
644
645 dev_dbg(mv_chan->device->common.dev,
646 "allocated %d descriptor slots last_used: %p\n",
647 mv_chan->slots_allocated, mv_chan->last_used);
648
649 return mv_chan->slots_allocated ? : -ENOMEM;
650}
651
652static struct dma_async_tx_descriptor *
653mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
654 size_t len, unsigned long flags)
655{
656 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
657 struct mv_xor_desc_slot *sw_desc, *grp_start;
658 int slot_cnt;
659
660 dev_dbg(mv_chan->device->common.dev,
661 "%s dest: %x src %x len: %u flags: %ld\n",
662 __func__, dest, src, len, flags);
663 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
664 return NULL;
665
666 BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
667
668 spin_lock_bh(&mv_chan->lock);
669 slot_cnt = mv_chan_memcpy_slot_count(len);
670 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
671 if (sw_desc) {
672 sw_desc->type = DMA_MEMCPY;
673 sw_desc->async_tx.flags = flags;
674 grp_start = sw_desc->group_head;
675 mv_desc_init(grp_start, flags);
676 mv_desc_set_byte_count(grp_start, len);
677 mv_desc_set_dest_addr(sw_desc->group_head, dest);
678 mv_desc_set_src_addr(grp_start, 0, src);
679 sw_desc->unmap_src_cnt = 1;
680 sw_desc->unmap_len = len;
681 }
682 spin_unlock_bh(&mv_chan->lock);
683
684 dev_dbg(mv_chan->device->common.dev,
685 "%s sw_desc %p async_tx %p\n",
686 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
687
688 return sw_desc ? &sw_desc->async_tx : NULL;
689}
690
691static struct dma_async_tx_descriptor *
692mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
693 size_t len, unsigned long flags)
694{
695 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
696 struct mv_xor_desc_slot *sw_desc, *grp_start;
697 int slot_cnt;
698
699 dev_dbg(mv_chan->device->common.dev,
700 "%s dest: %x len: %u flags: %ld\n",
701 __func__, dest, len, flags);
702 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
703 return NULL;
704
705 BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
706
707 spin_lock_bh(&mv_chan->lock);
708 slot_cnt = mv_chan_memset_slot_count(len);
709 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
710 if (sw_desc) {
711 sw_desc->type = DMA_MEMSET;
712 sw_desc->async_tx.flags = flags;
713 grp_start = sw_desc->group_head;
714 mv_desc_init(grp_start, flags);
715 mv_desc_set_byte_count(grp_start, len);
716 mv_desc_set_dest_addr(sw_desc->group_head, dest);
717 mv_desc_set_block_fill_val(grp_start, value);
718 sw_desc->unmap_src_cnt = 1;
719 sw_desc->unmap_len = len;
720 }
721 spin_unlock_bh(&mv_chan->lock);
722 dev_dbg(mv_chan->device->common.dev,
723 "%s sw_desc %p async_tx %p \n",
724 __func__, sw_desc, &sw_desc->async_tx);
725 return sw_desc ? &sw_desc->async_tx : NULL;
726}
727
728static struct dma_async_tx_descriptor *
729mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
730 unsigned int src_cnt, size_t len, unsigned long flags)
731{
732 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
733 struct mv_xor_desc_slot *sw_desc, *grp_start;
734 int slot_cnt;
735
736 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
737 return NULL;
738
739 BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT));
740
741 dev_dbg(mv_chan->device->common.dev,
742 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
743 __func__, src_cnt, len, dest, flags);
744
745 spin_lock_bh(&mv_chan->lock);
746 slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
747 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
748 if (sw_desc) {
749 sw_desc->type = DMA_XOR;
750 sw_desc->async_tx.flags = flags;
751 grp_start = sw_desc->group_head;
752 mv_desc_init(grp_start, flags);
753 /* the byte count field is the same as in memcpy desc*/
754 mv_desc_set_byte_count(grp_start, len);
755 mv_desc_set_dest_addr(sw_desc->group_head, dest);
756 sw_desc->unmap_src_cnt = src_cnt;
757 sw_desc->unmap_len = len;
758 while (src_cnt--)
759 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
760 }
761 spin_unlock_bh(&mv_chan->lock);
762 dev_dbg(mv_chan->device->common.dev,
763 "%s sw_desc %p async_tx %p \n",
764 __func__, sw_desc, &sw_desc->async_tx);
765 return sw_desc ? &sw_desc->async_tx : NULL;
766}
767
768static void mv_xor_free_chan_resources(struct dma_chan *chan)
769{
770 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
771 struct mv_xor_desc_slot *iter, *_iter;
772 int in_use_descs = 0;
773
774 mv_xor_slot_cleanup(mv_chan);
775
776 spin_lock_bh(&mv_chan->lock);
777 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
778 chain_node) {
779 in_use_descs++;
780 list_del(&iter->chain_node);
781 }
782 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
783 completed_node) {
784 in_use_descs++;
785 list_del(&iter->completed_node);
786 }
787 list_for_each_entry_safe_reverse(
788 iter, _iter, &mv_chan->all_slots, slot_node) {
789 list_del(&iter->slot_node);
790 kfree(iter);
791 mv_chan->slots_allocated--;
792 }
793 mv_chan->last_used = NULL;
794
795 dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n",
796 __func__, mv_chan->slots_allocated);
797 spin_unlock_bh(&mv_chan->lock);
798
799 if (in_use_descs)
800 dev_err(mv_chan->device->common.dev,
801 "freeing %d in use descriptors!\n", in_use_descs);
802}
803
804/**
805 * mv_xor_is_complete - poll the status of an XOR transaction
806 * @chan: XOR channel handle
807 * @cookie: XOR transaction identifier
808 */
809static enum dma_status mv_xor_is_complete(struct dma_chan *chan,
810 dma_cookie_t cookie,
811 dma_cookie_t *done,
812 dma_cookie_t *used)
813{
814 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
815 dma_cookie_t last_used;
816 dma_cookie_t last_complete;
817 enum dma_status ret;
818
819 last_used = chan->cookie;
820 last_complete = mv_chan->completed_cookie;
821 mv_chan->is_complete_cookie = cookie;
822 if (done)
823 *done = last_complete;
824 if (used)
825 *used = last_used;
826
827 ret = dma_async_is_complete(cookie, last_complete, last_used);
828 if (ret == DMA_SUCCESS) {
829 mv_xor_clean_completed_slots(mv_chan);
830 return ret;
831 }
832 mv_xor_slot_cleanup(mv_chan);
833
834 last_used = chan->cookie;
835 last_complete = mv_chan->completed_cookie;
836
837 if (done)
838 *done = last_complete;
839 if (used)
840 *used = last_used;
841
842 return dma_async_is_complete(cookie, last_complete, last_used);
843}
844
845static void mv_dump_xor_regs(struct mv_xor_chan *chan)
846{
847 u32 val;
848
849 val = __raw_readl(XOR_CONFIG(chan));
850 dev_printk(KERN_ERR, chan->device->common.dev,
851 "config 0x%08x.\n", val);
852
853 val = __raw_readl(XOR_ACTIVATION(chan));
854 dev_printk(KERN_ERR, chan->device->common.dev,
855 "activation 0x%08x.\n", val);
856
857 val = __raw_readl(XOR_INTR_CAUSE(chan));
858 dev_printk(KERN_ERR, chan->device->common.dev,
859 "intr cause 0x%08x.\n", val);
860
861 val = __raw_readl(XOR_INTR_MASK(chan));
862 dev_printk(KERN_ERR, chan->device->common.dev,
863 "intr mask 0x%08x.\n", val);
864
865 val = __raw_readl(XOR_ERROR_CAUSE(chan));
866 dev_printk(KERN_ERR, chan->device->common.dev,
867 "error cause 0x%08x.\n", val);
868
869 val = __raw_readl(XOR_ERROR_ADDR(chan));
870 dev_printk(KERN_ERR, chan->device->common.dev,
871 "error addr 0x%08x.\n", val);
872}
873
874static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
875 u32 intr_cause)
876{
877 if (intr_cause & (1 << 4)) {
878 dev_dbg(chan->device->common.dev,
879 "ignore this error\n");
880 return;
881 }
882
883 dev_printk(KERN_ERR, chan->device->common.dev,
884 "error on chan %d. intr cause 0x%08x.\n",
885 chan->idx, intr_cause);
886
887 mv_dump_xor_regs(chan);
888 BUG();
889}
890
891static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
892{
893 struct mv_xor_chan *chan = data;
894 u32 intr_cause = mv_chan_get_intr_cause(chan);
895
896 dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause);
897
898 if (mv_is_err_intr(intr_cause))
899 mv_xor_err_interrupt_handler(chan, intr_cause);
900
901 tasklet_schedule(&chan->irq_tasklet);
902
903 mv_xor_device_clear_eoc_cause(chan);
904
905 return IRQ_HANDLED;
906}
907
908static void mv_xor_issue_pending(struct dma_chan *chan)
909{
910 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
911
912 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
913 mv_chan->pending = 0;
914 mv_chan_activate(mv_chan);
915 }
916}
917
918/*
919 * Perform a transaction to verify the HW works.
920 */
921#define MV_XOR_TEST_SIZE 2000
922
923static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
924{
925 int i;
926 void *src, *dest;
927 dma_addr_t src_dma, dest_dma;
928 struct dma_chan *dma_chan;
929 dma_cookie_t cookie;
930 struct dma_async_tx_descriptor *tx;
931 int err = 0;
932 struct mv_xor_chan *mv_chan;
933
934 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
935 if (!src)
936 return -ENOMEM;
937
938 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
939 if (!dest) {
940 kfree(src);
941 return -ENOMEM;
942 }
943
944 /* Fill in src buffer */
945 for (i = 0; i < MV_XOR_TEST_SIZE; i++)
946 ((u8 *) src)[i] = (u8)i;
947
948 /* Start copy, using first DMA channel */
949 dma_chan = container_of(device->common.channels.next,
950 struct dma_chan,
951 device_node);
952 if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) {
953 err = -ENODEV;
954 goto out;
955 }
956
957 dest_dma = dma_map_single(dma_chan->device->dev, dest,
958 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
959
960 src_dma = dma_map_single(dma_chan->device->dev, src,
961 MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
962
963 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
964 MV_XOR_TEST_SIZE, 0);
965 cookie = mv_xor_tx_submit(tx);
966 mv_xor_issue_pending(dma_chan);
967 async_tx_ack(tx);
968 msleep(1);
969
970 if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
971 DMA_SUCCESS) {
972 dev_printk(KERN_ERR, dma_chan->device->dev,
973 "Self-test copy timed out, disabling\n");
974 err = -ENODEV;
975 goto free_resources;
976 }
977
978 mv_chan = to_mv_xor_chan(dma_chan);
979 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
980 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
981 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
982 dev_printk(KERN_ERR, dma_chan->device->dev,
983 "Self-test copy failed compare, disabling\n");
984 err = -ENODEV;
985 goto free_resources;
986 }
987
988free_resources:
989 mv_xor_free_chan_resources(dma_chan);
990out:
991 kfree(src);
992 kfree(dest);
993 return err;
994}
995
996#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
997static int __devinit
998mv_xor_xor_self_test(struct mv_xor_device *device)
999{
1000 int i, src_idx;
1001 struct page *dest;
1002 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
1003 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
1004 dma_addr_t dest_dma;
1005 struct dma_async_tx_descriptor *tx;
1006 struct dma_chan *dma_chan;
1007 dma_cookie_t cookie;
1008 u8 cmp_byte = 0;
1009 u32 cmp_word;
1010 int err = 0;
1011 struct mv_xor_chan *mv_chan;
1012
1013 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1014 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
1015 if (!xor_srcs[src_idx])
1016 while (src_idx--) {
1017 __free_page(xor_srcs[src_idx]);
1018 return -ENOMEM;
1019 }
1020 }
1021
1022 dest = alloc_page(GFP_KERNEL);
1023 if (!dest)
1024 while (src_idx--) {
1025 __free_page(xor_srcs[src_idx]);
1026 return -ENOMEM;
1027 }
1028
1029 /* Fill in src buffers */
1030 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1031 u8 *ptr = page_address(xor_srcs[src_idx]);
1032 for (i = 0; i < PAGE_SIZE; i++)
1033 ptr[i] = (1 << src_idx);
1034 }
1035
1036 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
1037 cmp_byte ^= (u8) (1 << src_idx);
1038
1039 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1040 (cmp_byte << 8) | cmp_byte;
1041
1042 memset(page_address(dest), 0, PAGE_SIZE);
1043
1044 dma_chan = container_of(device->common.channels.next,
1045 struct dma_chan,
1046 device_node);
1047 if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) {
1048 err = -ENODEV;
1049 goto out;
1050 }
1051
1052 /* test xor */
1053 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
1054 DMA_FROM_DEVICE);
1055
1056 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
1057 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
1058 0, PAGE_SIZE, DMA_TO_DEVICE);
1059
1060 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1061 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
1062
1063 cookie = mv_xor_tx_submit(tx);
1064 mv_xor_issue_pending(dma_chan);
1065 async_tx_ack(tx);
1066 msleep(8);
1067
1068 if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) !=
1069 DMA_SUCCESS) {
1070 dev_printk(KERN_ERR, dma_chan->device->dev,
1071 "Self-test xor timed out, disabling\n");
1072 err = -ENODEV;
1073 goto free_resources;
1074 }
1075
1076 mv_chan = to_mv_xor_chan(dma_chan);
1077 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
1078 PAGE_SIZE, DMA_FROM_DEVICE);
1079 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1080 u32 *ptr = page_address(dest);
1081 if (ptr[i] != cmp_word) {
1082 dev_printk(KERN_ERR, dma_chan->device->dev,
1083 "Self-test xor failed compare, disabling."
1084 " index %d, data %x, expected %x\n", i,
1085 ptr[i], cmp_word);
1086 err = -ENODEV;
1087 goto free_resources;
1088 }
1089 }
1090
1091free_resources:
1092 mv_xor_free_chan_resources(dma_chan);
1093out:
1094 src_idx = MV_XOR_NUM_SRC_TEST;
1095 while (src_idx--)
1096 __free_page(xor_srcs[src_idx]);
1097 __free_page(dest);
1098 return err;
1099}
1100
1101static int __devexit mv_xor_remove(struct platform_device *dev)
1102{
1103 struct mv_xor_device *device = platform_get_drvdata(dev);
1104 struct dma_chan *chan, *_chan;
1105 struct mv_xor_chan *mv_chan;
1106 struct mv_xor_platform_data *plat_data = dev->dev.platform_data;
1107
1108 dma_async_device_unregister(&device->common);
1109
1110 dma_free_coherent(&dev->dev, plat_data->pool_size,
1111 device->dma_desc_pool_virt, device->dma_desc_pool);
1112
1113 list_for_each_entry_safe(chan, _chan, &device->common.channels,
1114 device_node) {
1115 mv_chan = to_mv_xor_chan(chan);
1116 list_del(&chan->device_node);
1117 }
1118
1119 return 0;
1120}
1121
1122static int __devinit mv_xor_probe(struct platform_device *pdev)
1123{
1124 int ret = 0;
1125 int irq;
1126 struct mv_xor_device *adev;
1127 struct mv_xor_chan *mv_chan;
1128 struct dma_device *dma_dev;
1129 struct mv_xor_platform_data *plat_data = pdev->dev.platform_data;
1130
1131
1132 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
1133 if (!adev)
1134 return -ENOMEM;
1135
1136 dma_dev = &adev->common;
1137
1138 /* allocate coherent memory for hardware descriptors
1139 * note: writecombine gives slightly better performance, but
1140 * requires that we explicitly flush the writes
1141 */
1142 adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
1143 plat_data->pool_size,
1144 &adev->dma_desc_pool,
1145 GFP_KERNEL);
1146 if (!adev->dma_desc_pool_virt)
1147 return -ENOMEM;
1148
1149 adev->id = plat_data->hw_id;
1150
1151 /* discover transaction capabilites from the platform data */
1152 dma_dev->cap_mask = plat_data->cap_mask;
1153 adev->pdev = pdev;
1154 platform_set_drvdata(pdev, adev);
1155
1156 adev->shared = platform_get_drvdata(plat_data->shared);
1157
1158 INIT_LIST_HEAD(&dma_dev->channels);
1159
1160 /* set base routines */
1161 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1162 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1163 dma_dev->device_is_tx_complete = mv_xor_is_complete;
1164 dma_dev->device_issue_pending = mv_xor_issue_pending;
1165 dma_dev->dev = &pdev->dev;
1166
1167 /* set prep routines based on capability */
1168 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1169 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1170 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1171 dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
1172 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1173 dma_dev->max_xor = 8; ;
1174 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1175 }
1176
1177 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1178 if (!mv_chan) {
1179 ret = -ENOMEM;
1180 goto err_free_dma;
1181 }
1182 mv_chan->device = adev;
1183 mv_chan->idx = plat_data->hw_id;
1184 mv_chan->mmr_base = adev->shared->xor_base;
1185
1186 if (!mv_chan->mmr_base) {
1187 ret = -ENOMEM;
1188 goto err_free_dma;
1189 }
1190 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1191 mv_chan);
1192
1193 /* clear errors before enabling interrupts */
1194 mv_xor_device_clear_err_status(mv_chan);
1195
1196 irq = platform_get_irq(pdev, 0);
1197 if (irq < 0) {
1198 ret = irq;
1199 goto err_free_dma;
1200 }
1201 ret = devm_request_irq(&pdev->dev, irq,
1202 mv_xor_interrupt_handler,
1203 0, dev_name(&pdev->dev), mv_chan);
1204 if (ret)
1205 goto err_free_dma;
1206
1207 mv_chan_unmask_interrupts(mv_chan);
1208
1209 mv_set_mode(mv_chan, DMA_MEMCPY);
1210
1211 spin_lock_init(&mv_chan->lock);
1212 INIT_LIST_HEAD(&mv_chan->chain);
1213 INIT_LIST_HEAD(&mv_chan->completed_slots);
1214 INIT_LIST_HEAD(&mv_chan->all_slots);
1215 INIT_RCU_HEAD(&mv_chan->common.rcu);
1216 mv_chan->common.device = dma_dev;
1217
1218 list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
1219
1220 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1221 ret = mv_xor_memcpy_self_test(adev);
1222 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1223 if (ret)
1224 goto err_free_dma;
1225 }
1226
1227 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1228 ret = mv_xor_xor_self_test(adev);
1229 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1230 if (ret)
1231 goto err_free_dma;
1232 }
1233
1234 dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: "
1235 "( %s%s%s%s)\n",
1236 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1237 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
1238 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1239 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1240
1241 dma_async_device_register(dma_dev);
1242 goto out;
1243
1244 err_free_dma:
1245 dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
1246 adev->dma_desc_pool_virt, adev->dma_desc_pool);
1247 out:
1248 return ret;
1249}
1250
1251static void
1252mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
1253 struct mbus_dram_target_info *dram)
1254{
1255 void __iomem *base = msp->xor_base;
1256 u32 win_enable = 0;
1257 int i;
1258
1259 for (i = 0; i < 8; i++) {
1260 writel(0, base + WINDOW_BASE(i));
1261 writel(0, base + WINDOW_SIZE(i));
1262 if (i < 4)
1263 writel(0, base + WINDOW_REMAP_HIGH(i));
1264 }
1265
1266 for (i = 0; i < dram->num_cs; i++) {
1267 struct mbus_dram_window *cs = dram->cs + i;
1268
1269 writel((cs->base & 0xffff0000) |
1270 (cs->mbus_attr << 8) |
1271 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1272 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1273
1274 win_enable |= (1 << i);
1275 win_enable |= 3 << (16 + (2 * i));
1276 }
1277
1278 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1279 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1280}
1281
1282static struct platform_driver mv_xor_driver = {
1283 .probe = mv_xor_probe,
1284 .remove = mv_xor_remove,
1285 .driver = {
1286 .owner = THIS_MODULE,
1287 .name = MV_XOR_NAME,
1288 },
1289};
1290
1291static int mv_xor_shared_probe(struct platform_device *pdev)
1292{
1293 struct mv_xor_platform_shared_data *msd = pdev->dev.platform_data;
1294 struct mv_xor_shared_private *msp;
1295 struct resource *res;
1296
1297 dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n");
1298
1299 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
1300 if (!msp)
1301 return -ENOMEM;
1302
1303 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1304 if (!res)
1305 return -ENODEV;
1306
1307 msp->xor_base = devm_ioremap(&pdev->dev, res->start,
1308 res->end - res->start + 1);
1309 if (!msp->xor_base)
1310 return -EBUSY;
1311
1312 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1313 if (!res)
1314 return -ENODEV;
1315
1316 msp->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1317 res->end - res->start + 1);
1318 if (!msp->xor_high_base)
1319 return -EBUSY;
1320
1321 platform_set_drvdata(pdev, msp);
1322
1323 /*
1324 * (Re-)program MBUS remapping windows if we are asked to.
1325 */
1326 if (msd != NULL && msd->dram != NULL)
1327 mv_xor_conf_mbus_windows(msp, msd->dram);
1328
1329 return 0;
1330}
1331
1332static int mv_xor_shared_remove(struct platform_device *pdev)
1333{
1334 return 0;
1335}
1336
1337static struct platform_driver mv_xor_shared_driver = {
1338 .probe = mv_xor_shared_probe,
1339 .remove = mv_xor_shared_remove,
1340 .driver = {
1341 .owner = THIS_MODULE,
1342 .name = MV_XOR_SHARED_NAME,
1343 },
1344};
1345
1346
1347static int __init mv_xor_init(void)
1348{
1349 int rc;
1350
1351 rc = platform_driver_register(&mv_xor_shared_driver);
1352 if (!rc) {
1353 rc = platform_driver_register(&mv_xor_driver);
1354 if (rc)
1355 platform_driver_unregister(&mv_xor_shared_driver);
1356 }
1357 return rc;
1358}
1359module_init(mv_xor_init);
1360
1361/* it's currently unsafe to unload this module */
1362#if 0
1363static void __exit mv_xor_exit(void)
1364{
1365 platform_driver_unregister(&mv_xor_driver);
1366 platform_driver_unregister(&mv_xor_shared_driver);
1367 return;
1368}
1369
1370module_exit(mv_xor_exit);
1371#endif
1372
1373MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1374MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1375MODULE_LICENSE("GPL");
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
new file mode 100644
index 000000000000..06cafe1ef521
--- /dev/null
+++ b/drivers/dma/mv_xor.h
@@ -0,0 +1,183 @@
1/*
2 * Copyright (C) 2007, 2008, Marvell International Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software Foundation,
15 * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18#ifndef MV_XOR_H
19#define MV_XOR_H
20
21#include <linux/types.h>
22#include <linux/io.h>
23#include <linux/dmaengine.h>
24#include <linux/interrupt.h>
25
26#define USE_TIMER
27#define MV_XOR_SLOT_SIZE 64
28#define MV_XOR_THRESHOLD 1
29
30#define XOR_OPERATION_MODE_XOR 0
31#define XOR_OPERATION_MODE_MEMCPY 2
32#define XOR_OPERATION_MODE_MEMSET 4
33
34#define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4))
35#define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4))
36#define XOR_BYTE_COUNT(chan) (chan->mmr_base + 0x220 + (chan->idx * 4))
37#define XOR_DEST_POINTER(chan) (chan->mmr_base + 0x2B0 + (chan->idx * 4))
38#define XOR_BLOCK_SIZE(chan) (chan->mmr_base + 0x2C0 + (chan->idx * 4))
39#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_base + 0x2E0)
40#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_base + 0x2E4)
41
42#define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4))
43#define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4))
44#define XOR_INTR_CAUSE(chan) (chan->mmr_base + 0x30)
45#define XOR_INTR_MASK(chan) (chan->mmr_base + 0x40)
46#define XOR_ERROR_CAUSE(chan) (chan->mmr_base + 0x50)
47#define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60)
48#define XOR_INTR_MASK_VALUE 0x3F5
49
50#define WINDOW_BASE(w) (0x250 + ((w) << 2))
51#define WINDOW_SIZE(w) (0x270 + ((w) << 2))
52#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2))
53#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2))
54
55struct mv_xor_shared_private {
56 void __iomem *xor_base;
57 void __iomem *xor_high_base;
58};
59
60
61/**
62 * struct mv_xor_device - internal representation of a XOR device
63 * @pdev: Platform device
64 * @id: HW XOR Device selector
65 * @dma_desc_pool: base of DMA descriptor region (DMA address)
66 * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
67 * @common: embedded struct dma_device
68 */
69struct mv_xor_device {
70 struct platform_device *pdev;
71 int id;
72 dma_addr_t dma_desc_pool;
73 void *dma_desc_pool_virt;
74 struct dma_device common;
75 struct mv_xor_shared_private *shared;
76};
77
78/**
79 * struct mv_xor_chan - internal representation of a XOR channel
80 * @pending: allows batching of hardware operations
81 * @completed_cookie: identifier for the most recently completed operation
82 * @lock: serializes enqueue/dequeue operations to the descriptors pool
83 * @mmr_base: memory mapped register base
84 * @idx: the index of the xor channel
85 * @chain: device chain view of the descriptors
86 * @completed_slots: slots completed by HW but still need to be acked
87 * @device: parent device
88 * @common: common dmaengine channel object members
89 * @last_used: place holder for allocation to continue from where it left off
90 * @all_slots: complete domain of slots usable by the channel
91 * @slots_allocated: records the actual size of the descriptor slot pool
92 * @irq_tasklet: bottom half where mv_xor_slot_cleanup runs
93 */
94struct mv_xor_chan {
95 int pending;
96 dma_cookie_t completed_cookie;
97 spinlock_t lock; /* protects the descriptor slot pool */
98 void __iomem *mmr_base;
99 unsigned int idx;
100 enum dma_transaction_type current_type;
101 struct list_head chain;
102 struct list_head completed_slots;
103 struct mv_xor_device *device;
104 struct dma_chan common;
105 struct mv_xor_desc_slot *last_used;
106 struct list_head all_slots;
107 int slots_allocated;
108 struct tasklet_struct irq_tasklet;
109#ifdef USE_TIMER
110 unsigned long cleanup_time;
111 u32 current_on_last_cleanup;
112 dma_cookie_t is_complete_cookie;
113#endif
114};
115
116/**
117 * struct mv_xor_desc_slot - software descriptor
118 * @slot_node: node on the mv_xor_chan.all_slots list
119 * @chain_node: node on the mv_xor_chan.chain list
120 * @completed_node: node on the mv_xor_chan.completed_slots list
121 * @hw_desc: virtual address of the hardware descriptor chain
122 * @phys: hardware address of the hardware descriptor chain
123 * @group_head: first operation in a transaction
124 * @slot_cnt: total slots used in an transaction (group of operations)
125 * @slots_per_op: number of slots per operation
126 * @idx: pool index
127 * @unmap_src_cnt: number of xor sources
128 * @unmap_len: transaction bytecount
129 * @async_tx: support for the async_tx api
130 * @group_list: list of slots that make up a multi-descriptor transaction
131 * for example transfer lengths larger than the supported hw max
132 * @xor_check_result: result of zero sum
133 * @crc32_result: result crc calculation
134 */
135struct mv_xor_desc_slot {
136 struct list_head slot_node;
137 struct list_head chain_node;
138 struct list_head completed_node;
139 enum dma_transaction_type type;
140 void *hw_desc;
141 struct mv_xor_desc_slot *group_head;
142 u16 slot_cnt;
143 u16 slots_per_op;
144 u16 idx;
145 u16 unmap_src_cnt;
146 u32 value;
147 size_t unmap_len;
148 struct dma_async_tx_descriptor async_tx;
149 union {
150 u32 *xor_check_result;
151 u32 *crc32_result;
152 };
153#ifdef USE_TIMER
154 unsigned long arrival_time;
155 struct timer_list timeout;
156#endif
157};
158
159/* This structure describes XOR descriptor size 64bytes */
160struct mv_xor_desc {
161 u32 status; /* descriptor execution status */
162 u32 crc32_result; /* result of CRC-32 calculation */
163 u32 desc_command; /* type of operation to be carried out */
164 u32 phy_next_desc; /* next descriptor address pointer */
165 u32 byte_count; /* size of src/dst blocks in bytes */
166 u32 phy_dest_addr; /* destination block address */
167 u32 phy_src_addr[8]; /* source block addresses */
168 u32 reserved0;
169 u32 reserved1;
170};
171
172#define to_mv_sw_desc(addr_hw_desc) \
173 container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc)
174
175#define mv_hw_desc_slot_idx(hw_desc, idx) \
176 ((void *)(((unsigned long)hw_desc) + ((idx) << 5)))
177
178#define MV_XOR_MIN_BYTE_COUNT (128)
179#define XOR_MAX_BYTE_COUNT ((16 * 1024 * 1024) - 1)
180#define MV_XOR_MAX_BYTE_COUNT XOR_MAX_BYTE_COUNT
181
182
183#endif
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index 25918f7dfd0f..0b624e927a6f 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -254,6 +254,7 @@ static ssize_t host_control_on_shutdown_store(struct device *dev,
254static int smi_request(struct smi_cmd *smi_cmd) 254static int smi_request(struct smi_cmd *smi_cmd)
255{ 255{
256 cpumask_t old_mask; 256 cpumask_t old_mask;
257 cpumask_of_cpu_ptr(new_mask, 0);
257 int ret = 0; 258 int ret = 0;
258 259
259 if (smi_cmd->magic != SMI_CMD_MAGIC) { 260 if (smi_cmd->magic != SMI_CMD_MAGIC) {
@@ -264,7 +265,7 @@ static int smi_request(struct smi_cmd *smi_cmd)
264 265
265 /* SMI requires CPU 0 */ 266 /* SMI requires CPU 0 */
266 old_mask = current->cpus_allowed; 267 old_mask = current->cpus_allowed;
267 set_cpus_allowed_ptr(current, &cpumask_of_cpu(0)); 268 set_cpus_allowed_ptr(current, new_mask);
268 if (smp_processor_id() != 0) { 269 if (smp_processor_id() != 0) {
269 dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", 270 dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
270 __func__); 271 __func__);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f43d6d3cf2fa..426ac5add585 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -780,7 +780,7 @@ static __inline__ __u32 extract(__u8 *report, unsigned offset, unsigned n)
780 */ 780 */
781static __inline__ void implement(__u8 *report, unsigned offset, unsigned n, __u32 value) 781static __inline__ void implement(__u8 *report, unsigned offset, unsigned n, __u32 value)
782{ 782{
783 __le64 x; 783 u64 x;
784 u64 m = (1ULL << n) - 1; 784 u64 m = (1ULL << n) - 1;
785 785
786 if (n > 32) 786 if (n > 32)
@@ -796,10 +796,10 @@ static __inline__ void implement(__u8 *report, unsigned offset, unsigned n, __u3
796 report += offset >> 3; 796 report += offset >> 3;
797 offset &= 7; 797 offset &= 7;
798 798
799 x = get_unaligned((__le64 *)report); 799 x = get_unaligned_le64(report);
800 x &= cpu_to_le64(~(m << offset)); 800 x &= ~(m << offset);
801 x |= cpu_to_le64(((u64) value) << offset); 801 x |= ((u64)value) << offset;
802 put_unaligned(x, (__le64 *) report); 802 put_unaligned_le64(x, report);
803} 803}
804 804
805/* 805/*
diff --git a/drivers/hid/hid-input-quirks.c b/drivers/hid/hid-input-quirks.c
index 4c2052c658f1..16feea014494 100644
--- a/drivers/hid/hid-input-quirks.c
+++ b/drivers/hid/hid-input-quirks.c
@@ -89,6 +89,29 @@ static int quirk_logitech_ultrax_remote(struct hid_usage *usage, struct input_de
89 return 1; 89 return 1;
90} 90}
91 91
92static int quirk_gyration_remote(struct hid_usage *usage, struct input_dev *input,
93 unsigned long **bit, int *max)
94{
95 if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR)
96 return 0;
97
98 set_bit(EV_REP, input->evbit);
99 switch(usage->hid & HID_USAGE) {
100 /* Reported on Gyration MCE Remote */
101 case 0x00d: map_key_clear(KEY_HOME); break;
102 case 0x024: map_key_clear(KEY_DVD); break;
103 case 0x025: map_key_clear(KEY_PVR); break;
104 case 0x046: map_key_clear(KEY_MEDIA); break;
105 case 0x047: map_key_clear(KEY_MP3); break;
106 case 0x049: map_key_clear(KEY_CAMERA); break;
107 case 0x04a: map_key_clear(KEY_VIDEO); break;
108
109 default:
110 return 0;
111 }
112 return 1;
113}
114
92static int quirk_chicony_tactical_pad(struct hid_usage *usage, struct input_dev *input, 115static int quirk_chicony_tactical_pad(struct hid_usage *usage, struct input_dev *input,
93 unsigned long **bit, int *max) 116 unsigned long **bit, int *max)
94{ 117{
@@ -303,6 +326,9 @@ static int quirk_sunplus_wdesktop(struct hid_usage *usage, struct input_dev *inp
303#define VENDOR_ID_EZKEY 0x0518 326#define VENDOR_ID_EZKEY 0x0518
304#define DEVICE_ID_BTC_8193 0x0002 327#define DEVICE_ID_BTC_8193 0x0002
305 328
329#define VENDOR_ID_GYRATION 0x0c16
330#define DEVICE_ID_GYRATION_REMOTE 0x0002
331
306#define VENDOR_ID_LOGITECH 0x046d 332#define VENDOR_ID_LOGITECH 0x046d
307#define DEVICE_ID_LOGITECH_RECEIVER 0xc101 333#define DEVICE_ID_LOGITECH_RECEIVER 0xc101
308#define DEVICE_ID_S510_RECEIVER 0xc50c 334#define DEVICE_ID_S510_RECEIVER 0xc50c
@@ -337,6 +363,8 @@ static const struct hid_input_blacklist {
337 363
338 { VENDOR_ID_EZKEY, DEVICE_ID_BTC_8193, quirk_btc_8193 }, 364 { VENDOR_ID_EZKEY, DEVICE_ID_BTC_8193, quirk_btc_8193 },
339 365
366 { VENDOR_ID_GYRATION, DEVICE_ID_GYRATION_REMOTE, quirk_gyration_remote },
367
340 { VENDOR_ID_LOGITECH, DEVICE_ID_LOGITECH_RECEIVER, quirk_logitech_ultrax_remote }, 368 { VENDOR_ID_LOGITECH, DEVICE_ID_LOGITECH_RECEIVER, quirk_logitech_ultrax_remote },
341 { VENDOR_ID_LOGITECH, DEVICE_ID_S510_RECEIVER, quirk_logitech_wireless }, 369 { VENDOR_ID_LOGITECH, DEVICE_ID_S510_RECEIVER, quirk_logitech_wireless },
342 { VENDOR_ID_LOGITECH, DEVICE_ID_S510_RECEIVER_2, quirk_logitech_wireless }, 370 { VENDOR_ID_LOGITECH, DEVICE_ID_S510_RECEIVER_2, quirk_logitech_wireless },
@@ -438,6 +466,18 @@ int hidinput_event_quirks(struct hid_device *hid, struct hid_field *field, struc
438 input_event(input, usage->type, REL_WHEEL, -value); 466 input_event(input, usage->type, REL_WHEEL, -value);
439 return 1; 467 return 1;
440 } 468 }
469
470 /* Gyration MCE remote "Sleep" key */
471 if (hid->vendor == VENDOR_ID_GYRATION &&
472 hid->product == DEVICE_ID_GYRATION_REMOTE &&
473 (usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK &&
474 (usage->hid & 0xff) == 0x82) {
475 input_event(input, usage->type, usage->code, 1);
476 input_sync(input);
477 input_event(input, usage->type, usage->code, 0);
478 input_sync(input);
479 return 1;
480 }
441 return 0; 481 return 0;
442} 482}
443 483
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 5c52a20ad344..1b2e8dc3398d 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -100,6 +100,8 @@ static struct hidinput_key_translation apple_fn_keys[] = {
100 { KEY_F2, KEY_BRIGHTNESSUP, APPLE_FLAG_FKEY }, 100 { KEY_F2, KEY_BRIGHTNESSUP, APPLE_FLAG_FKEY },
101 { KEY_F3, KEY_FN_F5, APPLE_FLAG_FKEY }, /* Exposé */ 101 { KEY_F3, KEY_FN_F5, APPLE_FLAG_FKEY }, /* Exposé */
102 { KEY_F4, KEY_FN_F4, APPLE_FLAG_FKEY }, /* Dashboard */ 102 { KEY_F4, KEY_FN_F4, APPLE_FLAG_FKEY }, /* Dashboard */
103 { KEY_F5, KEY_KBDILLUMDOWN, APPLE_FLAG_FKEY },
104 { KEY_F6, KEY_KBDILLUMUP, APPLE_FLAG_FKEY },
103 { KEY_F7, KEY_PREVIOUSSONG, APPLE_FLAG_FKEY }, 105 { KEY_F7, KEY_PREVIOUSSONG, APPLE_FLAG_FKEY },
104 { KEY_F8, KEY_PLAYPAUSE, APPLE_FLAG_FKEY }, 106 { KEY_F8, KEY_PLAYPAUSE, APPLE_FLAG_FKEY },
105 { KEY_F9, KEY_NEXTSONG, APPLE_FLAG_FKEY }, 107 { KEY_F9, KEY_NEXTSONG, APPLE_FLAG_FKEY },
@@ -612,6 +614,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
612 case 0x0b6: map_key_clear(KEY_PREVIOUSSONG); break; 614 case 0x0b6: map_key_clear(KEY_PREVIOUSSONG); break;
613 case 0x0b7: map_key_clear(KEY_STOPCD); break; 615 case 0x0b7: map_key_clear(KEY_STOPCD); break;
614 case 0x0b8: map_key_clear(KEY_EJECTCD); break; 616 case 0x0b8: map_key_clear(KEY_EJECTCD); break;
617 case 0x0bc: map_key_clear(KEY_MEDIA_REPEAT); break;
615 618
616 case 0x0cd: map_key_clear(KEY_PLAYPAUSE); break; 619 case 0x0cd: map_key_clear(KEY_PLAYPAUSE); break;
617 case 0x0e0: map_abs_clear(ABS_VOLUME); break; 620 case 0x0e0: map_abs_clear(ABS_VOLUME); break;
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 0c6b4d4e7e27..c40f0403edaf 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -105,6 +105,7 @@ out:
105static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) 105static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
106{ 106{
107 unsigned int minor = iminor(file->f_path.dentry->d_inode); 107 unsigned int minor = iminor(file->f_path.dentry->d_inode);
108 /* FIXME: What stops hidraw_table going NULL */
108 struct hid_device *dev = hidraw_table[minor]->hid; 109 struct hid_device *dev = hidraw_table[minor]->hid;
109 __u8 *buf; 110 __u8 *buf;
110 int ret = 0; 111 int ret = 0;
@@ -211,38 +212,43 @@ static int hidraw_release(struct inode * inode, struct file * file)
211 kfree(list->hidraw); 212 kfree(list->hidraw);
212 } 213 }
213 214
215 kfree(list);
216
214 return 0; 217 return 0;
215} 218}
216 219
217static int hidraw_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 220static long hidraw_ioctl(struct file *file, unsigned int cmd,
221 unsigned long arg)
218{ 222{
223 struct inode *inode = file->f_path.dentry->d_inode;
219 unsigned int minor = iminor(inode); 224 unsigned int minor = iminor(inode);
225 long ret = 0;
226 /* FIXME: What stops hidraw_table going NULL */
220 struct hidraw *dev = hidraw_table[minor]; 227 struct hidraw *dev = hidraw_table[minor];
221 void __user *user_arg = (void __user*) arg; 228 void __user *user_arg = (void __user*) arg;
222 229
230 lock_kernel();
223 switch (cmd) { 231 switch (cmd) {
224 case HIDIOCGRDESCSIZE: 232 case HIDIOCGRDESCSIZE:
225 if (put_user(dev->hid->rsize, (int __user *)arg)) 233 if (put_user(dev->hid->rsize, (int __user *)arg))
226 return -EFAULT; 234 ret = -EFAULT;
227 return 0; 235 break;
228 236
229 case HIDIOCGRDESC: 237 case HIDIOCGRDESC:
230 { 238 {
231 __u32 len; 239 __u32 len;
232 240
233 if (get_user(len, (int __user *)arg)) 241 if (get_user(len, (int __user *)arg))
234 return -EFAULT; 242 ret = -EFAULT;
235 243 else if (len > HID_MAX_DESCRIPTOR_SIZE - 1)
236 if (len > HID_MAX_DESCRIPTOR_SIZE - 1) 244 ret = -EINVAL;
237 return -EINVAL; 245 else if (copy_to_user(user_arg + offsetof(
238 246 struct hidraw_report_descriptor,
239 if (copy_to_user(user_arg + offsetof( 247 value[0]),
240 struct hidraw_report_descriptor, 248 dev->hid->rdesc,
241 value[0]), 249 min(dev->hid->rsize, len)))
242 dev->hid->rdesc, 250 ret = -EFAULT;
243 min(dev->hid->rsize, len))) 251 break;
244 return -EFAULT;
245 return 0;
246 } 252 }
247 case HIDIOCGRAWINFO: 253 case HIDIOCGRAWINFO:
248 { 254 {
@@ -252,15 +258,13 @@ static int hidraw_ioctl(struct inode *inode, struct file *file, unsigned int cmd
252 dinfo.vendor = dev->hid->vendor; 258 dinfo.vendor = dev->hid->vendor;
253 dinfo.product = dev->hid->product; 259 dinfo.product = dev->hid->product;
254 if (copy_to_user(user_arg, &dinfo, sizeof(dinfo))) 260 if (copy_to_user(user_arg, &dinfo, sizeof(dinfo)))
255 return -EFAULT; 261 ret = -EFAULT;
256 262 break;
257 return 0;
258 } 263 }
259 default: 264 default:
260 printk(KERN_EMERG "hidraw: unsupported ioctl() %x\n", 265 ret = -ENOTTY;
261 cmd);
262 } 266 }
263 return -EINVAL; 267 return ret;
264} 268}
265 269
266static const struct file_operations hidraw_ops = { 270static const struct file_operations hidraw_ops = {
@@ -270,7 +274,7 @@ static const struct file_operations hidraw_ops = {
270 .poll = hidraw_poll, 274 .poll = hidraw_poll,
271 .open = hidraw_open, 275 .open = hidraw_open,
272 .release = hidraw_release, 276 .release = hidraw_release,
273 .ioctl = hidraw_ioctl, 277 .unlocked_ioctl = hidraw_ioctl,
274}; 278};
275 279
276void hidraw_report_event(struct hid_device *hid, u8 *data, int len) 280void hidraw_report_event(struct hid_device *hid, u8 *data, int len)
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 1df832a8fcbc..61e78a4369b9 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -69,12 +69,18 @@
69#define USB_DEVICE_ID_APPLE_ALU_ANSI 0x0220 69#define USB_DEVICE_ID_APPLE_ALU_ANSI 0x0220
70#define USB_DEVICE_ID_APPLE_ALU_ISO 0x0221 70#define USB_DEVICE_ID_APPLE_ALU_ISO 0x0221
71#define USB_DEVICE_ID_APPLE_ALU_JIS 0x0222 71#define USB_DEVICE_ID_APPLE_ALU_JIS 0x0222
72#define USB_DEVICE_ID_APPLE_WELLSPRING_ANSI 0x0223
73#define USB_DEVICE_ID_APPLE_WELLSPRING_ISO 0x0224
74#define USB_DEVICE_ID_APPLE_WELLSPRING_JIS 0x0225
72#define USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI 0x0229 75#define USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI 0x0229
73#define USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO 0x022a 76#define USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO 0x022a
74#define USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS 0x022b 77#define USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS 0x022b
75#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI 0x022c 78#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI 0x022c
76#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO 0x022d 79#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO 0x022d
77#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS 0x022e 80#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS 0x022e
81#define USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI 0x0230
82#define USB_DEVICE_ID_APPLE_WELLSPRING2_ISO 0x0231
83#define USB_DEVICE_ID_APPLE_WELLSPRING2_JIS 0x0232
78#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a 84#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
79#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b 85#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
80#define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242 86#define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242
@@ -241,6 +247,8 @@
241#define USB_DEVICE_ID_LD_MACHINETEST 0x2040 247#define USB_DEVICE_ID_LD_MACHINETEST 0x2040
242 248
243#define USB_VENDOR_ID_LOGITECH 0x046d 249#define USB_VENDOR_ID_LOGITECH 0x046d
250#define USB_DEVICE_ID_LOGITECH_LX3 0xc044
251#define USB_DEVICE_ID_LOGITECH_V150 0xc047
244#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 252#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
245#define USB_DEVICE_ID_LOGITECH_HARMONY 0xc110 253#define USB_DEVICE_ID_LOGITECH_HARMONY 0xc110
246#define USB_DEVICE_ID_LOGITECH_HARMONY_2 0xc111 254#define USB_DEVICE_ID_LOGITECH_HARMONY_2 0xc111
@@ -314,6 +322,7 @@
314#define USB_DEVICE_ID_S510_RECEIVER_2 0xc517 322#define USB_DEVICE_ID_S510_RECEIVER_2 0xc517
315#define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512 323#define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512
316#define USB_DEVICE_ID_MX3000_RECEIVER 0xc513 324#define USB_DEVICE_ID_MX3000_RECEIVER 0xc513
325#define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704
317#define USB_DEVICE_ID_DINOVO_EDGE 0xc714 326#define USB_DEVICE_ID_DINOVO_EDGE 0xc714
318#define USB_DEVICE_ID_DINOVO_MINI 0xc71f 327#define USB_DEVICE_ID_DINOVO_MINI 0xc71f
319 328
@@ -443,7 +452,8 @@ static const struct hid_blacklist {
443 { USB_VENDOR_ID_NEC, USB_DEVICE_ID_NEC_USB_GAME_PAD, HID_QUIRK_BADPAD }, 452 { USB_VENDOR_ID_NEC, USB_DEVICE_ID_NEC_USB_GAME_PAD, HID_QUIRK_BADPAD },
444 { USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD, HID_QUIRK_BADPAD }, 453 { USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD, HID_QUIRK_BADPAD },
445 { USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD, HID_QUIRK_BADPAD }, 454 { USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD, HID_QUIRK_BADPAD },
446 455
456 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP, HID_QUIRK_DUPLICATE_USAGES },
447 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE, HID_QUIRK_DUPLICATE_USAGES }, 457 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE, HID_QUIRK_DUPLICATE_USAGES },
448 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI, HID_QUIRK_DUPLICATE_USAGES }, 458 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI, HID_QUIRK_DUPLICATE_USAGES },
449 459
@@ -593,6 +603,8 @@ static const struct hid_blacklist {
593 603
594 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP }, 604 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP },
595 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP }, 605 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP },
606 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_LX3, HID_QUIRK_INVERT_HWHEEL },
607 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_V150, HID_QUIRK_INVERT_HWHEEL },
596 608
597 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K, HID_QUIRK_MICROSOFT_KEYS }, 609 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K, HID_QUIRK_MICROSOFT_KEYS },
598 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K, HID_QUIRK_MICROSOFT_KEYS }, 610 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K, HID_QUIRK_MICROSOFT_KEYS },
@@ -642,6 +654,12 @@ static const struct hid_blacklist {
642 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN }, 654 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN },
643 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD }, 655 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
644 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN }, 656 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN },
657 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI, HID_QUIRK_APPLE_HAS_FN },
658 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
659 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS, HID_QUIRK_APPLE_HAS_FN },
660 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI, HID_QUIRK_APPLE_HAS_FN },
661 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
662 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, HID_QUIRK_APPLE_HAS_FN },
645 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE }, 663 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
646 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE }, 664 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
647 665
@@ -1128,7 +1146,7 @@ static void usbhid_fixup_microsoft_descriptor(unsigned char *rdesc, int rsize)
1128 && rdesc[557] == 0x19 1146 && rdesc[557] == 0x19
1129 && rdesc[559] == 0x29) { 1147 && rdesc[559] == 0x29) {
1130 printk(KERN_INFO "Fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n"); 1148 printk(KERN_INFO "Fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n");
1131 rdesc[284] = rdesc[304] = rdesc[558] = 0x35; 1149 rdesc[284] = rdesc[304] = rdesc[557] = 0x35;
1132 rdesc[352] = 0x36; 1150 rdesc[352] = 0x36;
1133 rdesc[286] = rdesc[355] = 0x46; 1151 rdesc[286] = rdesc[355] = 0x46;
1134 rdesc[306] = rdesc[559] = 0x45; 1152 rdesc[306] = rdesc[559] = 0x45;
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 95cc192bc7af..842e9edb888e 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -406,6 +406,7 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
406 uref_multi = kmalloc(sizeof(struct hiddev_usage_ref_multi), GFP_KERNEL); 406 uref_multi = kmalloc(sizeof(struct hiddev_usage_ref_multi), GFP_KERNEL);
407 if (!uref_multi) 407 if (!uref_multi)
408 return -ENOMEM; 408 return -ENOMEM;
409 lock_kernel();
409 uref = &uref_multi->uref; 410 uref = &uref_multi->uref;
410 if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) { 411 if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) {
411 if (copy_from_user(uref_multi, user_arg, 412 if (copy_from_user(uref_multi, user_arg,
@@ -501,12 +502,15 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
501 } 502 }
502 503
503goodreturn: 504goodreturn:
505 unlock_kernel();
504 kfree(uref_multi); 506 kfree(uref_multi);
505 return 0; 507 return 0;
506fault: 508fault:
509 unlock_kernel();
507 kfree(uref_multi); 510 kfree(uref_multi);
508 return -EFAULT; 511 return -EFAULT;
509inval: 512inval:
513 unlock_kernel();
510 kfree(uref_multi); 514 kfree(uref_multi);
511 return -EINVAL; 515 return -EINVAL;
512 } 516 }
@@ -540,7 +544,7 @@ static noinline int hiddev_ioctl_string(struct hiddev *hiddev, unsigned int cmd,
540 return len; 544 return len;
541} 545}
542 546
543static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 547static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
544{ 548{
545 struct hiddev_list *list = file->private_data; 549 struct hiddev_list *list = file->private_data;
546 struct hiddev *hiddev = list->hiddev; 550 struct hiddev *hiddev = list->hiddev;
@@ -555,7 +559,10 @@ static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd
555 struct usbhid_device *usbhid = hid->driver_data; 559 struct usbhid_device *usbhid = hid->driver_data;
556 void __user *user_arg = (void __user *)arg; 560 void __user *user_arg = (void __user *)arg;
557 int i; 561 int i;
562
563 /* Called without BKL by compat methods so no BKL taken */
558 564
565 /* FIXME: Who or what stop this racing with a disconnect ?? */
559 if (!hiddev->exist) 566 if (!hiddev->exist)
560 return -EIO; 567 return -EIO;
561 568
@@ -756,8 +763,7 @@ static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd
756#ifdef CONFIG_COMPAT 763#ifdef CONFIG_COMPAT
757static long hiddev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 764static long hiddev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
758{ 765{
759 struct inode *inode = file->f_path.dentry->d_inode; 766 return hiddev_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
760 return hiddev_ioctl(inode, file, cmd, (unsigned long)compat_ptr(arg));
761} 767}
762#endif 768#endif
763 769
@@ -768,7 +774,7 @@ static const struct file_operations hiddev_fops = {
768 .poll = hiddev_poll, 774 .poll = hiddev_poll,
769 .open = hiddev_open, 775 .open = hiddev_open,
770 .release = hiddev_release, 776 .release = hiddev_release,
771 .ioctl = hiddev_ioctl, 777 .unlocked_ioctl = hiddev_ioctl,
772 .fasync = hiddev_fasync, 778 .fasync = hiddev_fasync,
773#ifdef CONFIG_COMPAT 779#ifdef CONFIG_COMPAT
774 .compat_ioctl = hiddev_compat_ioctl, 780 .compat_ioctl = hiddev_compat_ioctl,
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index 3cd46d2e53c1..0caaafe01843 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -43,7 +43,7 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
43MODULE_DESCRIPTION(DRIVER_DESC); 43MODULE_DESCRIPTION(DRIVER_DESC);
44MODULE_LICENSE(DRIVER_LICENSE); 44MODULE_LICENSE(DRIVER_LICENSE);
45 45
46static unsigned char usb_kbd_keycode[256] = { 46static const unsigned char usb_kbd_keycode[256] = {
47 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38, 47 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38,
48 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3, 48 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3,
49 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26, 49 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26,
@@ -233,14 +233,6 @@ static int usb_kbd_probe(struct usb_interface *iface,
233 if (!usb_endpoint_is_int_in(endpoint)) 233 if (!usb_endpoint_is_int_in(endpoint))
234 return -ENODEV; 234 return -ENODEV;
235 235
236#ifdef CONFIG_USB_HID
237 if (usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
238 le16_to_cpu(dev->descriptor.idProduct))
239 & HID_QUIRK_IGNORE) {
240 return -ENODEV;
241 }
242#endif
243
244 pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress); 236 pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
245 maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); 237 maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
246 238
diff --git a/drivers/hid/usbhid/usbmouse.c b/drivers/hid/usbhid/usbmouse.c
index 703e9d0e8714..35689ef172cc 100644
--- a/drivers/hid/usbhid/usbmouse.c
+++ b/drivers/hid/usbhid/usbmouse.c
@@ -129,14 +129,6 @@ static int usb_mouse_probe(struct usb_interface *intf, const struct usb_device_i
129 if (!usb_endpoint_is_int_in(endpoint)) 129 if (!usb_endpoint_is_int_in(endpoint))
130 return -ENODEV; 130 return -ENODEV;
131 131
132#ifdef CONFIG_USB_HID
133 if (usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
134 le16_to_cpu(dev->descriptor.idProduct))
135 & (HID_QUIRK_IGNORE|HID_QUIRK_IGNORE_MOUSE)) {
136 return -ENODEV;
137 }
138#endif
139
140 pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress); 132 pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
141 maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); 133 maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
142 134
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 15b09b89588a..04d9c4d459d0 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -510,6 +510,7 @@ config BLK_DEV_TRIFLEX
510 510
511config BLK_DEV_CY82C693 511config BLK_DEV_CY82C693
512 tristate "CY82C693 chipset support" 512 tristate "CY82C693 chipset support"
513 depends on ALPHA
513 select IDE_TIMINGS 514 select IDE_TIMINGS
514 select BLK_DEV_IDEDMA_PCI 515 select BLK_DEV_IDEDMA_PCI
515 help 516 help
@@ -548,6 +549,7 @@ config BLK_DEV_CS5535
548 549
549config BLK_DEV_HPT34X 550config BLK_DEV_HPT34X
550 tristate "HPT34X chipset support" 551 tristate "HPT34X chipset support"
552 depends on BROKEN
551 select BLK_DEV_IDEDMA_PCI 553 select BLK_DEV_IDEDMA_PCI
552 help 554 help
553 This driver adds up to 4 more EIDE devices sharing a single 555 This driver adds up to 4 more EIDE devices sharing a single
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c
index 52f58c885783..f575e8341aec 100644
--- a/drivers/ide/arm/icside.c
+++ b/drivers/ide/arm/icside.c
@@ -72,7 +72,7 @@ struct icside_state {
72 void __iomem *ioc_base; 72 void __iomem *ioc_base;
73 unsigned int sel; 73 unsigned int sel;
74 unsigned int type; 74 unsigned int type;
75 ide_hwif_t *hwif[2]; 75 struct ide_host *host;
76}; 76};
77 77
78#define ICS_TYPE_A3IN 0 78#define ICS_TYPE_A3IN 0
@@ -375,12 +375,14 @@ static int icside_dma_test_irq(ide_drive_t *drive)
375 375
376static void icside_dma_timeout(ide_drive_t *drive) 376static void icside_dma_timeout(ide_drive_t *drive)
377{ 377{
378 ide_hwif_t *hwif = drive->hwif;
379
378 printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name); 380 printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
379 381
380 if (icside_dma_test_irq(drive)) 382 if (icside_dma_test_irq(drive))
381 return; 383 return;
382 384
383 ide_dump_status(drive, "DMA timeout", ide_read_status(drive)); 385 ide_dump_status(drive, "DMA timeout", hwif->tp_ops->read_status(hwif));
384 386
385 icside_dma_end(drive); 387 icside_dma_end(drive);
386} 388}
@@ -440,10 +442,10 @@ static void icside_setup_ports(hw_regs_t *hw, void __iomem *base,
440static int __init 442static int __init
441icside_register_v5(struct icside_state *state, struct expansion_card *ec) 443icside_register_v5(struct icside_state *state, struct expansion_card *ec)
442{ 444{
443 ide_hwif_t *hwif;
444 void __iomem *base; 445 void __iomem *base;
445 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 446 struct ide_host *host;
446 hw_regs_t hw; 447 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
448 int ret;
447 449
448 base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); 450 base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
449 if (!base) 451 if (!base)
@@ -463,22 +465,23 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec)
463 465
464 icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec); 466 icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec);
465 467
466 hwif = ide_find_port(); 468 host = ide_host_alloc(NULL, hws);
467 if (!hwif) 469 if (host == NULL)
468 return -ENODEV; 470 return -ENODEV;
469 471
470 ide_init_port_hw(hwif, &hw); 472 state->host = host;
471 default_hwif_mmiops(hwif);
472
473 state->hwif[0] = hwif;
474 473
475 ecard_set_drvdata(ec, state); 474 ecard_set_drvdata(ec, state);
476 475
477 idx[0] = hwif->index; 476 ret = ide_host_register(host, NULL, hws);
478 477 if (ret)
479 ide_device_add(idx, NULL); 478 goto err_free;
480 479
481 return 0; 480 return 0;
481err_free:
482 ide_host_free(host);
483 ecard_set_drvdata(ec, NULL);
484 return ret;
482} 485}
483 486
484static const struct ide_port_info icside_v6_port_info __initdata = { 487static const struct ide_port_info icside_v6_port_info __initdata = {
@@ -493,13 +496,12 @@ static const struct ide_port_info icside_v6_port_info __initdata = {
493static int __init 496static int __init
494icside_register_v6(struct icside_state *state, struct expansion_card *ec) 497icside_register_v6(struct icside_state *state, struct expansion_card *ec)
495{ 498{
496 ide_hwif_t *hwif, *mate;
497 void __iomem *ioc_base, *easi_base; 499 void __iomem *ioc_base, *easi_base;
500 struct ide_host *host;
498 unsigned int sel = 0; 501 unsigned int sel = 0;
499 int ret; 502 int ret;
500 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 503 hw_regs_t hw[2], *hws[] = { &hw[0], NULL, NULL, NULL };
501 struct ide_port_info d = icside_v6_port_info; 504 struct ide_port_info d = icside_v6_port_info;
502 hw_regs_t hw[2];
503 505
504 ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); 506 ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
505 if (!ioc_base) { 507 if (!ioc_base) {
@@ -538,28 +540,11 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
538 icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec); 540 icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec);
539 icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec); 541 icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec);
540 542
541 /* 543 host = ide_host_alloc(&d, hws);
542 * Find and register the interfaces. 544 if (host == NULL)
543 */
544 hwif = ide_find_port();
545 if (hwif == NULL)
546 return -ENODEV; 545 return -ENODEV;
547 546
548 ide_init_port_hw(hwif, &hw[0]); 547 state->host = host;
549 default_hwif_mmiops(hwif);
550
551 idx[0] = hwif->index;
552
553 mate = ide_find_port();
554 if (mate) {
555 ide_init_port_hw(mate, &hw[1]);
556 default_hwif_mmiops(mate);
557
558 idx[1] = mate->index;
559 }
560
561 state->hwif[0] = hwif;
562 state->hwif[1] = mate;
563 548
564 ecard_set_drvdata(ec, state); 549 ecard_set_drvdata(ec, state);
565 550
@@ -569,11 +554,17 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
569 d.dma_ops = NULL; 554 d.dma_ops = NULL;
570 } 555 }
571 556
572 ide_device_add(idx, &d); 557 ret = ide_host_register(host, NULL, hws);
558 if (ret)
559 goto err_free;
573 560
574 return 0; 561 return 0;
575 562err_free:
576 out: 563 ide_host_free(host);
564 if (d.dma_ops)
565 free_dma(ec->dma);
566 ecard_set_drvdata(ec, NULL);
567out:
577 return ret; 568 return ret;
578} 569}
579 570
diff --git a/drivers/ide/arm/ide_arm.c b/drivers/ide/arm/ide_arm.c
index 2f311da4c963..176532ffae0e 100644
--- a/drivers/ide/arm/ide_arm.c
+++ b/drivers/ide/arm/ide_arm.c
@@ -28,10 +28,8 @@
28 28
29static int __init ide_arm_init(void) 29static int __init ide_arm_init(void)
30{ 30{
31 ide_hwif_t *hwif;
32 hw_regs_t hw;
33 unsigned long base = IDE_ARM_IO, ctl = IDE_ARM_IO + 0x206; 31 unsigned long base = IDE_ARM_IO, ctl = IDE_ARM_IO + 0x206;
34 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 32 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
35 33
36 if (!request_region(base, 8, DRV_NAME)) { 34 if (!request_region(base, 8, DRV_NAME)) {
37 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n", 35 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
@@ -51,15 +49,7 @@ static int __init ide_arm_init(void)
51 hw.irq = IDE_ARM_IRQ; 49 hw.irq = IDE_ARM_IRQ;
52 hw.chipset = ide_generic; 50 hw.chipset = ide_generic;
53 51
54 hwif = ide_find_port(); 52 return ide_host_add(NULL, hws, NULL);
55 if (hwif) {
56 ide_init_port_hw(hwif, &hw);
57 idx[0] = hwif->index;
58
59 ide_device_add(idx, NULL);
60 }
61
62 return 0;
63} 53}
64 54
65module_init(ide_arm_init); 55module_init(ide_arm_init);
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/arm/palm_bk3710.c
index c79b85b6e4a3..65bb4b8fd570 100644
--- a/drivers/ide/arm/palm_bk3710.c
+++ b/drivers/ide/arm/palm_bk3710.c
@@ -316,15 +316,14 @@ static u8 __devinit palm_bk3710_cable_detect(ide_hwif_t *hwif)
316static int __devinit palm_bk3710_init_dma(ide_hwif_t *hwif, 316static int __devinit palm_bk3710_init_dma(ide_hwif_t *hwif,
317 const struct ide_port_info *d) 317 const struct ide_port_info *d)
318{ 318{
319 unsigned long base =
320 hwif->io_ports.data_addr - IDE_PALM_ATA_PRI_REG_OFFSET;
321
322 printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name); 319 printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name);
323 320
324 if (ide_allocate_dma_engine(hwif)) 321 if (ide_allocate_dma_engine(hwif))
325 return -1; 322 return -1;
326 323
327 ide_setup_dma(hwif, base); 324 hwif->dma_base = hwif->io_ports.data_addr - IDE_PALM_ATA_PRI_REG_OFFSET;
325
326 hwif->dma_ops = &sff_dma_ops;
328 327
329 return 0; 328 return 0;
330} 329}
@@ -348,11 +347,10 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
348{ 347{
349 struct clk *clk; 348 struct clk *clk;
350 struct resource *mem, *irq; 349 struct resource *mem, *irq;
351 ide_hwif_t *hwif; 350 struct ide_host *host;
352 unsigned long base, rate; 351 unsigned long base, rate;
353 int i; 352 int i, rc;
354 hw_regs_t hw; 353 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
355 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
356 354
357 clk = clk_get(NULL, "IDECLK"); 355 clk = clk_get(NULL, "IDECLK");
358 if (IS_ERR(clk)) 356 if (IS_ERR(clk))
@@ -394,24 +392,14 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
394 hw.irq = irq->start; 392 hw.irq = irq->start;
395 hw.chipset = ide_palm3710; 393 hw.chipset = ide_palm3710;
396 394
397 hwif = ide_find_port(); 395 rc = ide_host_add(&palm_bk3710_port_info, hws, NULL);
398 if (hwif == NULL) 396 if (rc)
399 goto out; 397 goto out;
400 398
401 i = hwif->index;
402
403 ide_init_port_hw(hwif, &hw);
404
405 default_hwif_mmiops(hwif);
406
407 idx[0] = i;
408
409 ide_device_add(idx, &palm_bk3710_port_info);
410
411 return 0; 399 return 0;
412out: 400out:
413 printk(KERN_WARNING "Palm Chip BK3710 IDE Register Fail\n"); 401 printk(KERN_WARNING "Palm Chip BK3710 IDE Register Fail\n");
414 return -ENODEV; 402 return rc;
415} 403}
416 404
417/* work with hotplug and coldplug */ 405/* work with hotplug and coldplug */
diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/arm/rapide.c
index 43057e0303c8..2bdd8b734afb 100644
--- a/drivers/ide/arm/rapide.c
+++ b/drivers/ide/arm/rapide.c
@@ -32,11 +32,10 @@ static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base,
32static int __devinit 32static int __devinit
33rapide_probe(struct expansion_card *ec, const struct ecard_id *id) 33rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
34{ 34{
35 ide_hwif_t *hwif;
36 void __iomem *base; 35 void __iomem *base;
36 struct ide_host *host;
37 int ret; 37 int ret;
38 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 38 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
39 hw_regs_t hw;
40 39
41 ret = ecard_request_resources(ec); 40 ret = ecard_request_resources(ec);
42 if (ret) 41 if (ret)
@@ -53,20 +52,11 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
53 hw.chipset = ide_generic; 52 hw.chipset = ide_generic;
54 hw.dev = &ec->dev; 53 hw.dev = &ec->dev;
55 54
56 hwif = ide_find_port(); 55 ret = ide_host_add(&rapide_port_info, hws, &host);
57 if (hwif == NULL) { 56 if (ret)
58 ret = -ENOENT;
59 goto release; 57 goto release;
60 }
61
62 ide_init_port_hw(hwif, &hw);
63 default_hwif_mmiops(hwif);
64
65 idx[0] = hwif->index;
66
67 ide_device_add(idx, &rapide_port_info);
68 58
69 ecard_set_drvdata(ec, hwif); 59 ecard_set_drvdata(ec, host);
70 goto out; 60 goto out;
71 61
72 release: 62 release:
@@ -77,11 +67,11 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
77 67
78static void __devexit rapide_remove(struct expansion_card *ec) 68static void __devexit rapide_remove(struct expansion_card *ec)
79{ 69{
80 ide_hwif_t *hwif = ecard_get_drvdata(ec); 70 struct ide_host *host = ecard_get_drvdata(ec);
81 71
82 ecard_set_drvdata(ec, NULL); 72 ecard_set_drvdata(ec, NULL);
83 73
84 ide_unregister(hwif); 74 ide_host_remove(host);
85 75
86 ecard_release_resources(ec); 76 ecard_release_resources(ec);
87} 77}
diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/h8300/ide-h8300.c
index 20fad6d542cc..bde7a585f198 100644
--- a/drivers/ide/h8300/ide-h8300.c
+++ b/drivers/ide/h8300/ide-h8300.c
@@ -100,6 +100,8 @@ static void h8300_tf_read(ide_drive_t *drive, ide_task_t *task)
100 /* be sure we're looking at the low order bits */ 100 /* be sure we're looking at the low order bits */
101 outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr); 101 outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
102 102
103 if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
104 tf->feature = inb(io_ports->feature_addr);
103 if (task->tf_flags & IDE_TFLAG_IN_NSECT) 105 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
104 tf->nsect = inb(io_ports->nsect_addr); 106 tf->nsect = inb(io_ports->nsect_addr);
105 if (task->tf_flags & IDE_TFLAG_IN_LBAL) 107 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
@@ -153,6 +155,21 @@ static void h8300_output_data(ide_drive_t *drive, struct request *rq,
153 mm_outsw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2); 155 mm_outsw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
154} 156}
155 157
158static const struct ide_tp_ops h8300_tp_ops = {
159 .exec_command = ide_exec_command,
160 .read_status = ide_read_status,
161 .read_altstatus = ide_read_altstatus,
162 .read_sff_dma_status = ide_read_sff_dma_status,
163
164 .set_irq = ide_set_irq,
165
166 .tf_load = h8300_tf_load,
167 .tf_read = h8300_tf_read,
168
169 .input_data = h8300_input_data,
170 .output_data = h8300_output_data,
171};
172
156#define H8300_IDE_GAP (2) 173#define H8300_IDE_GAP (2)
157 174
158static inline void hw_setup(hw_regs_t *hw) 175static inline void hw_setup(hw_regs_t *hw)
@@ -167,27 +184,14 @@ static inline void hw_setup(hw_regs_t *hw)
167 hw->chipset = ide_generic; 184 hw->chipset = ide_generic;
168} 185}
169 186
170static inline void hwif_setup(ide_hwif_t *hwif)
171{
172 default_hwif_iops(hwif);
173
174 hwif->tf_load = h8300_tf_load;
175 hwif->tf_read = h8300_tf_read;
176
177 hwif->input_data = h8300_input_data;
178 hwif->output_data = h8300_output_data;
179}
180
181static const struct ide_port_info h8300_port_info = { 187static const struct ide_port_info h8300_port_info = {
188 .tp_ops = &h8300_tp_ops,
182 .host_flags = IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_NO_DMA, 189 .host_flags = IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_NO_DMA,
183}; 190};
184 191
185static int __init h8300_ide_init(void) 192static int __init h8300_ide_init(void)
186{ 193{
187 hw_regs_t hw; 194 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
188 ide_hwif_t *hwif;
189 int index;
190 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
191 195
192 printk(KERN_INFO DRV_NAME ": H8/300 generic IDE interface\n"); 196 printk(KERN_INFO DRV_NAME ": H8/300 generic IDE interface\n");
193 197
@@ -200,19 +204,7 @@ static int __init h8300_ide_init(void)
200 204
201 hw_setup(&hw); 205 hw_setup(&hw);
202 206
203 hwif = ide_find_port_slot(&h8300_port_info); 207 return ide_host_add(&h8300_port_info, hws, NULL);
204 if (hwif == NULL)
205 return -ENOENT;
206
207 index = hwif->index;
208 ide_init_port_hw(hwif, &hw);
209 hwif_setup(hwif);
210
211 idx[0] = index;
212
213 ide_device_add(idx, &h8300_port_info);
214
215 return 0;
216 208
217out_busy: 209out_busy:
218 printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n"); 210 printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n");
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 2802031de670..adf04f99cdeb 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -22,6 +22,8 @@ ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
22 void (*io_buffers)(ide_drive_t *, struct ide_atapi_pc *, unsigned, int)) 22 void (*io_buffers)(ide_drive_t *, struct ide_atapi_pc *, unsigned, int))
23{ 23{
24 ide_hwif_t *hwif = drive->hwif; 24 ide_hwif_t *hwif = drive->hwif;
25 struct request *rq = hwif->hwgroup->rq;
26 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
25 xfer_func_t *xferfunc; 27 xfer_func_t *xferfunc;
26 unsigned int temp; 28 unsigned int temp;
27 u16 bcount; 29 u16 bcount;
@@ -30,12 +32,12 @@ ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
30 debug_log("Enter %s - interrupt handler\n", __func__); 32 debug_log("Enter %s - interrupt handler\n", __func__);
31 33
32 if (pc->flags & PC_FLAG_TIMEDOUT) { 34 if (pc->flags & PC_FLAG_TIMEDOUT) {
33 pc->callback(drive); 35 drive->pc_callback(drive);
34 return ide_stopped; 36 return ide_stopped;
35 } 37 }
36 38
37 /* Clear the interrupt */ 39 /* Clear the interrupt */
38 stat = ide_read_status(drive); 40 stat = tp_ops->read_status(hwif);
39 41
40 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) { 42 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
41 if (hwif->dma_ops->dma_end(drive) || 43 if (hwif->dma_ops->dma_end(drive) ||
@@ -63,8 +65,9 @@ ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
63 local_irq_enable_in_hardirq(); 65 local_irq_enable_in_hardirq();
64 66
65 if (drive->media == ide_tape && !scsi && 67 if (drive->media == ide_tape && !scsi &&
66 (stat & ERR_STAT) && pc->c[0] == REQUEST_SENSE) 68 (stat & ERR_STAT) && rq->cmd[0] == REQUEST_SENSE)
67 stat &= ~ERR_STAT; 69 stat &= ~ERR_STAT;
70
68 if ((stat & ERR_STAT) || (pc->flags & PC_FLAG_DMA_ERROR)) { 71 if ((stat & ERR_STAT) || (pc->flags & PC_FLAG_DMA_ERROR)) {
69 /* Error detected */ 72 /* Error detected */
70 debug_log("%s: I/O error\n", drive->name); 73 debug_log("%s: I/O error\n", drive->name);
@@ -75,16 +78,17 @@ ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
75 goto cmd_finished; 78 goto cmd_finished;
76 } 79 }
77 80
78 if (pc->c[0] == REQUEST_SENSE) { 81 if (rq->cmd[0] == REQUEST_SENSE) {
79 printk(KERN_ERR "%s: I/O error in request sense" 82 printk(KERN_ERR "%s: I/O error in request sense"
80 " command\n", drive->name); 83 " command\n", drive->name);
81 return ide_do_reset(drive); 84 return ide_do_reset(drive);
82 } 85 }
83 86
84 debug_log("[cmd %x]: check condition\n", pc->c[0]); 87 debug_log("[cmd %x]: check condition\n", rq->cmd[0]);
85 88
86 /* Retry operation */ 89 /* Retry operation */
87 retry_pc(drive); 90 retry_pc(drive);
91
88 /* queued, but not started */ 92 /* queued, but not started */
89 return ide_stopped; 93 return ide_stopped;
90 } 94 }
@@ -95,8 +99,10 @@ cmd_finished:
95 dsc_handle(drive); 99 dsc_handle(drive);
96 return ide_stopped; 100 return ide_stopped;
97 } 101 }
102
98 /* Command finished - Call the callback function */ 103 /* Command finished - Call the callback function */
99 pc->callback(drive); 104 drive->pc_callback(drive);
105
100 return ide_stopped; 106 return ide_stopped;
101 } 107 }
102 108
@@ -107,16 +113,15 @@ cmd_finished:
107 ide_dma_off(drive); 113 ide_dma_off(drive);
108 return ide_do_reset(drive); 114 return ide_do_reset(drive);
109 } 115 }
110 /* Get the number of bytes to transfer on this interrupt. */
111 bcount = (hwif->INB(hwif->io_ports.lbah_addr) << 8) |
112 hwif->INB(hwif->io_ports.lbam_addr);
113 116
114 ireason = hwif->INB(hwif->io_ports.nsect_addr); 117 /* Get the number of bytes to transfer on this interrupt. */
118 ide_read_bcount_and_ireason(drive, &bcount, &ireason);
115 119
116 if (ireason & CD) { 120 if (ireason & CD) {
117 printk(KERN_ERR "%s: CoD != 0 in %s\n", drive->name, __func__); 121 printk(KERN_ERR "%s: CoD != 0 in %s\n", drive->name, __func__);
118 return ide_do_reset(drive); 122 return ide_do_reset(drive);
119 } 123 }
124
120 if (((ireason & IO) == IO) == !!(pc->flags & PC_FLAG_WRITING)) { 125 if (((ireason & IO) == IO) == !!(pc->flags & PC_FLAG_WRITING)) {
121 /* Hopefully, we will never get here */ 126 /* Hopefully, we will never get here */
122 printk(KERN_ERR "%s: We wanted to %s, but the device wants us " 127 printk(KERN_ERR "%s: We wanted to %s, but the device wants us "
@@ -125,6 +130,7 @@ cmd_finished:
125 (ireason & IO) ? "Read" : "Write"); 130 (ireason & IO) ? "Read" : "Write");
126 return ide_do_reset(drive); 131 return ide_do_reset(drive);
127 } 132 }
133
128 if (!(pc->flags & PC_FLAG_WRITING)) { 134 if (!(pc->flags & PC_FLAG_WRITING)) {
129 /* Reading - Check that we have enough space */ 135 /* Reading - Check that we have enough space */
130 temp = pc->xferred + bcount; 136 temp = pc->xferred + bcount;
@@ -142,7 +148,7 @@ cmd_finished:
142 if (pc->sg) 148 if (pc->sg)
143 io_buffers(drive, pc, temp, 0); 149 io_buffers(drive, pc, temp, 0);
144 else 150 else
145 hwif->input_data(drive, NULL, 151 tp_ops->input_data(drive, NULL,
146 pc->cur_pos, temp); 152 pc->cur_pos, temp);
147 printk(KERN_ERR "%s: transferred %d of " 153 printk(KERN_ERR "%s: transferred %d of "
148 "%d bytes\n", 154 "%d bytes\n",
@@ -159,9 +165,9 @@ cmd_finished:
159 debug_log("The device wants to send us more data than " 165 debug_log("The device wants to send us more data than "
160 "expected - allowing transfer\n"); 166 "expected - allowing transfer\n");
161 } 167 }
162 xferfunc = hwif->input_data; 168 xferfunc = tp_ops->input_data;
163 } else 169 } else
164 xferfunc = hwif->output_data; 170 xferfunc = tp_ops->output_data;
165 171
166 if ((drive->media == ide_floppy && !scsi && !pc->buf) || 172 if ((drive->media == ide_floppy && !scsi && !pc->buf) ||
167 (drive->media == ide_tape && !scsi && pc->bh) || 173 (drive->media == ide_tape && !scsi && pc->bh) ||
@@ -175,7 +181,7 @@ cmd_finished:
175 pc->cur_pos += bcount; 181 pc->cur_pos += bcount;
176 182
177 debug_log("[cmd %x] transferred %d bytes on that intr.\n", 183 debug_log("[cmd %x] transferred %d bytes on that intr.\n",
178 pc->c[0], bcount); 184 rq->cmd[0], bcount);
179 185
180 /* And set the interrupt handler again */ 186 /* And set the interrupt handler again */
181 ide_set_handler(drive, handler, timeout, expiry); 187 ide_set_handler(drive, handler, timeout, expiry);
@@ -183,16 +189,27 @@ cmd_finished:
183} 189}
184EXPORT_SYMBOL_GPL(ide_pc_intr); 190EXPORT_SYMBOL_GPL(ide_pc_intr);
185 191
192static u8 ide_read_ireason(ide_drive_t *drive)
193{
194 ide_task_t task;
195
196 memset(&task, 0, sizeof(task));
197 task.tf_flags = IDE_TFLAG_IN_NSECT;
198
199 drive->hwif->tp_ops->tf_read(drive, &task);
200
201 return task.tf.nsect & 3;
202}
203
186static u8 ide_wait_ireason(ide_drive_t *drive, u8 ireason) 204static u8 ide_wait_ireason(ide_drive_t *drive, u8 ireason)
187{ 205{
188 ide_hwif_t *hwif = drive->hwif;
189 int retries = 100; 206 int retries = 100;
190 207
191 while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) { 208 while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) {
192 printk(KERN_ERR "%s: (IO,CoD != (0,1) while issuing " 209 printk(KERN_ERR "%s: (IO,CoD != (0,1) while issuing "
193 "a packet command, retrying\n", drive->name); 210 "a packet command, retrying\n", drive->name);
194 udelay(100); 211 udelay(100);
195 ireason = hwif->INB(hwif->io_ports.nsect_addr); 212 ireason = ide_read_ireason(drive);
196 if (retries == 0) { 213 if (retries == 0) {
197 printk(KERN_ERR "%s: (IO,CoD != (0,1) while issuing " 214 printk(KERN_ERR "%s: (IO,CoD != (0,1) while issuing "
198 "a packet command, ignoring\n", 215 "a packet command, ignoring\n",
@@ -210,6 +227,7 @@ ide_startstop_t ide_transfer_pc(ide_drive_t *drive, struct ide_atapi_pc *pc,
210 ide_expiry_t *expiry) 227 ide_expiry_t *expiry)
211{ 228{
212 ide_hwif_t *hwif = drive->hwif; 229 ide_hwif_t *hwif = drive->hwif;
230 struct request *rq = hwif->hwgroup->rq;
213 ide_startstop_t startstop; 231 ide_startstop_t startstop;
214 u8 ireason; 232 u8 ireason;
215 233
@@ -219,7 +237,7 @@ ide_startstop_t ide_transfer_pc(ide_drive_t *drive, struct ide_atapi_pc *pc,
219 return startstop; 237 return startstop;
220 } 238 }
221 239
222 ireason = hwif->INB(hwif->io_ports.nsect_addr); 240 ireason = ide_read_ireason(drive);
223 if (drive->media == ide_tape && !drive->scsi) 241 if (drive->media == ide_tape && !drive->scsi)
224 ireason = ide_wait_ireason(drive, ireason); 242 ireason = ide_wait_ireason(drive, ireason);
225 243
@@ -239,8 +257,8 @@ ide_startstop_t ide_transfer_pc(ide_drive_t *drive, struct ide_atapi_pc *pc,
239 } 257 }
240 258
241 /* Send the actual packet */ 259 /* Send the actual packet */
242 if ((pc->flags & PC_FLAG_ZIP_DRIVE) == 0) 260 if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0)
243 hwif->output_data(drive, NULL, pc->c, 12); 261 hwif->tp_ops->output_data(drive, NULL, rq->cmd, 12);
244 262
245 return ide_started; 263 return ide_started;
246} 264}
@@ -284,7 +302,7 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_atapi_pc *pc,
284 bcount, dma); 302 bcount, dma);
285 303
286 /* Issue the packet command */ 304 /* Issue the packet command */
287 if (pc->flags & PC_FLAG_DRQ_INTERRUPT) { 305 if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) {
288 ide_execute_command(drive, WIN_PACKETCMD, handler, 306 ide_execute_command(drive, WIN_PACKETCMD, handler,
289 timeout, NULL); 307 timeout, NULL);
290 return ide_started; 308 return ide_started;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 6e29dd532090..4e73aeee4053 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -85,10 +85,8 @@ static void ide_cd_put(struct cdrom_info *cd)
85/* Mark that we've seen a media change and invalidate our internal buffers. */ 85/* Mark that we've seen a media change and invalidate our internal buffers. */
86static void cdrom_saw_media_change(ide_drive_t *drive) 86static void cdrom_saw_media_change(ide_drive_t *drive)
87{ 87{
88 struct cdrom_info *cd = drive->driver_data; 88 drive->atapi_flags |= IDE_AFLAG_MEDIA_CHANGED;
89 89 drive->atapi_flags &= ~IDE_AFLAG_TOC_VALID;
90 cd->cd_flags |= IDE_CD_FLAG_MEDIA_CHANGED;
91 cd->cd_flags &= ~IDE_CD_FLAG_TOC_VALID;
92} 90}
93 91
94static int cdrom_log_sense(ide_drive_t *drive, struct request *rq, 92static int cdrom_log_sense(ide_drive_t *drive, struct request *rq,
@@ -280,11 +278,12 @@ static void ide_dump_status_no_sense(ide_drive_t *drive, const char *msg, u8 st)
280 */ 278 */
281static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) 279static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
282{ 280{
283 struct request *rq = HWGROUP(drive)->rq; 281 ide_hwif_t *hwif = drive->hwif;
282 struct request *rq = hwif->hwgroup->rq;
284 int stat, err, sense_key; 283 int stat, err, sense_key;
285 284
286 /* check for errors */ 285 /* check for errors */
287 stat = ide_read_status(drive); 286 stat = hwif->tp_ops->read_status(hwif);
288 287
289 if (stat_ret) 288 if (stat_ret)
290 *stat_ret = stat; 289 *stat_ret = stat;
@@ -528,7 +527,7 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
528 ide_pktcmd_tf_load(drive, IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL, 527 ide_pktcmd_tf_load(drive, IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL,
529 xferlen, info->dma); 528 xferlen, info->dma);
530 529
531 if (info->cd_flags & IDE_CD_FLAG_DRQ_INTERRUPT) { 530 if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) {
532 /* waiting for CDB interrupt, not DMA yet. */ 531 /* waiting for CDB interrupt, not DMA yet. */
533 if (info->dma) 532 if (info->dma)
534 drive->waiting_for_dma = 0; 533 drive->waiting_for_dma = 0;
@@ -560,7 +559,7 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive,
560 struct cdrom_info *info = drive->driver_data; 559 struct cdrom_info *info = drive->driver_data;
561 ide_startstop_t startstop; 560 ide_startstop_t startstop;
562 561
563 if (info->cd_flags & IDE_CD_FLAG_DRQ_INTERRUPT) { 562 if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) {
564 /* 563 /*
565 * Here we should have been called after receiving an interrupt 564 * Here we should have been called after receiving an interrupt
566 * from the device. DRQ should how be set. 565 * from the device. DRQ should how be set.
@@ -589,7 +588,7 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive,
589 cmd_len = ATAPI_MIN_CDB_BYTES; 588 cmd_len = ATAPI_MIN_CDB_BYTES;
590 589
591 /* send the command to the device */ 590 /* send the command to the device */
592 hwif->output_data(drive, NULL, rq->cmd, cmd_len); 591 hwif->tp_ops->output_data(drive, NULL, rq->cmd, cmd_len);
593 592
594 /* start the DMA if need be */ 593 /* start the DMA if need be */
595 if (info->dma) 594 if (info->dma)
@@ -606,6 +605,8 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive,
606static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq, 605static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq,
607 int len, int ireason, int rw) 606 int len, int ireason, int rw)
608{ 607{
608 ide_hwif_t *hwif = drive->hwif;
609
609 /* 610 /*
610 * ireason == 0: the drive wants to receive data from us 611 * ireason == 0: the drive wants to receive data from us
611 * ireason == 2: the drive is expecting to transfer data to us 612 * ireason == 2: the drive is expecting to transfer data to us
@@ -624,7 +625,7 @@ static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq,
624 * Some drives (ASUS) seem to tell us that status info is 625 * Some drives (ASUS) seem to tell us that status info is
625 * available. Just get it and ignore. 626 * available. Just get it and ignore.
626 */ 627 */
627 (void)ide_read_status(drive); 628 (void)hwif->tp_ops->read_status(hwif);
628 return 0; 629 return 0;
629 } else { 630 } else {
630 /* drive wants a command packet, or invalid ireason... */ 631 /* drive wants a command packet, or invalid ireason... */
@@ -645,20 +646,18 @@ static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq,
645 */ 646 */
646static int ide_cd_check_transfer_size(ide_drive_t *drive, int len) 647static int ide_cd_check_transfer_size(ide_drive_t *drive, int len)
647{ 648{
648 struct cdrom_info *cd = drive->driver_data;
649
650 if ((len % SECTOR_SIZE) == 0) 649 if ((len % SECTOR_SIZE) == 0)
651 return 0; 650 return 0;
652 651
653 printk(KERN_ERR "%s: %s: Bad transfer size %d\n", 652 printk(KERN_ERR "%s: %s: Bad transfer size %d\n",
654 drive->name, __func__, len); 653 drive->name, __func__, len);
655 654
656 if (cd->cd_flags & IDE_CD_FLAG_LIMIT_NFRAMES) 655 if (drive->atapi_flags & IDE_AFLAG_LIMIT_NFRAMES)
657 printk(KERN_ERR " This drive is not supported by " 656 printk(KERN_ERR " This drive is not supported by "
658 "this version of the driver\n"); 657 "this version of the driver\n");
659 else { 658 else {
660 printk(KERN_ERR " Trying to limit transfer sizes\n"); 659 printk(KERN_ERR " Trying to limit transfer sizes\n");
661 cd->cd_flags |= IDE_CD_FLAG_LIMIT_NFRAMES; 660 drive->atapi_flags |= IDE_AFLAG_LIMIT_NFRAMES;
662 } 661 }
663 662
664 return 1; 663 return 1;
@@ -735,7 +734,7 @@ static ide_startstop_t cdrom_seek_intr(ide_drive_t *drive)
735 if (cdrom_decode_status(drive, 0, &stat)) 734 if (cdrom_decode_status(drive, 0, &stat))
736 return ide_stopped; 735 return ide_stopped;
737 736
738 info->cd_flags |= IDE_CD_FLAG_SEEKING; 737 drive->atapi_flags |= IDE_AFLAG_SEEKING;
739 738
740 if (retry && time_after(jiffies, info->start_seek + IDECD_SEEK_TIMER)) { 739 if (retry && time_after(jiffies, info->start_seek + IDECD_SEEK_TIMER)) {
741 if (--retry == 0) 740 if (--retry == 0)
@@ -892,10 +891,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
892 struct request *rq = HWGROUP(drive)->rq; 891 struct request *rq = HWGROUP(drive)->rq;
893 xfer_func_t *xferfunc; 892 xfer_func_t *xferfunc;
894 ide_expiry_t *expiry = NULL; 893 ide_expiry_t *expiry = NULL;
895 int dma_error = 0, dma, stat, ireason, len, thislen, uptodate = 0; 894 int dma_error = 0, dma, stat, thislen, uptodate = 0;
896 int write = (rq_data_dir(rq) == WRITE) ? 1 : 0; 895 int write = (rq_data_dir(rq) == WRITE) ? 1 : 0;
897 unsigned int timeout; 896 unsigned int timeout;
898 u8 lowcyl, highcyl; 897 u16 len;
898 u8 ireason;
899 899
900 /* check for errors */ 900 /* check for errors */
901 dma = info->dma; 901 dma = info->dma;
@@ -923,12 +923,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
923 goto end_request; 923 goto end_request;
924 } 924 }
925 925
926 /* ok we fall to pio :/ */ 926 ide_read_bcount_and_ireason(drive, &len, &ireason);
927 ireason = hwif->INB(hwif->io_ports.nsect_addr) & 0x3;
928 lowcyl = hwif->INB(hwif->io_ports.lbam_addr);
929 highcyl = hwif->INB(hwif->io_ports.lbah_addr);
930
931 len = lowcyl + (256 * highcyl);
932 927
933 thislen = blk_fs_request(rq) ? len : rq->data_len; 928 thislen = blk_fs_request(rq) ? len : rq->data_len;
934 if (thislen > len) 929 if (thislen > len)
@@ -991,10 +986,10 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
991 986
992 if (ireason == 0) { 987 if (ireason == 0) {
993 write = 1; 988 write = 1;
994 xferfunc = hwif->output_data; 989 xferfunc = hwif->tp_ops->output_data;
995 } else { 990 } else {
996 write = 0; 991 write = 0;
997 xferfunc = hwif->input_data; 992 xferfunc = hwif->tp_ops->input_data;
998 } 993 }
999 994
1000 /* transfer data */ 995 /* transfer data */
@@ -1198,9 +1193,10 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
1198 int xferlen; 1193 int xferlen;
1199 1194
1200 if (blk_fs_request(rq)) { 1195 if (blk_fs_request(rq)) {
1201 if (info->cd_flags & IDE_CD_FLAG_SEEKING) { 1196 if (drive->atapi_flags & IDE_AFLAG_SEEKING) {
1197 ide_hwif_t *hwif = drive->hwif;
1202 unsigned long elapsed = jiffies - info->start_seek; 1198 unsigned long elapsed = jiffies - info->start_seek;
1203 int stat = ide_read_status(drive); 1199 int stat = hwif->tp_ops->read_status(hwif);
1204 1200
1205 if ((stat & SEEK_STAT) != SEEK_STAT) { 1201 if ((stat & SEEK_STAT) != SEEK_STAT) {
1206 if (elapsed < IDECD_SEEK_TIMEOUT) { 1202 if (elapsed < IDECD_SEEK_TIMEOUT) {
@@ -1211,7 +1207,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
1211 printk(KERN_ERR "%s: DSC timeout\n", 1207 printk(KERN_ERR "%s: DSC timeout\n",
1212 drive->name); 1208 drive->name);
1213 } 1209 }
1214 info->cd_flags &= ~IDE_CD_FLAG_SEEKING; 1210 drive->atapi_flags &= ~IDE_AFLAG_SEEKING;
1215 } 1211 }
1216 if (rq_data_dir(rq) == READ && 1212 if (rq_data_dir(rq) == READ &&
1217 IDE_LARGE_SEEK(info->last_block, block, 1213 IDE_LARGE_SEEK(info->last_block, block,
@@ -1288,7 +1284,7 @@ int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
1288 */ 1284 */
1289 cmd[7] = cdi->sanyo_slot % 3; 1285 cmd[7] = cdi->sanyo_slot % 3;
1290 1286
1291 return ide_cd_queue_pc(drive, cmd, 0, NULL, 0, sense, 0, REQ_QUIET); 1287 return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, REQ_QUIET);
1292} 1288}
1293 1289
1294static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, 1290static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
@@ -1296,8 +1292,8 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
1296 struct request_sense *sense) 1292 struct request_sense *sense)
1297{ 1293{
1298 struct { 1294 struct {
1299 __u32 lba; 1295 __be32 lba;
1300 __u32 blocklen; 1296 __be32 blocklen;
1301 } capbuf; 1297 } capbuf;
1302 1298
1303 int stat; 1299 int stat;
@@ -1369,7 +1365,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1369 */ 1365 */
1370 (void) cdrom_check_status(drive, sense); 1366 (void) cdrom_check_status(drive, sense);
1371 1367
1372 if (info->cd_flags & IDE_CD_FLAG_TOC_VALID) 1368 if (drive->atapi_flags & IDE_AFLAG_TOC_VALID)
1373 return 0; 1369 return 0;
1374 1370
1375 /* try to get the total cdrom capacity and sector size */ 1371 /* try to get the total cdrom capacity and sector size */
@@ -1391,7 +1387,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1391 if (stat) 1387 if (stat)
1392 return stat; 1388 return stat;
1393 1389
1394 if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD) { 1390 if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
1395 toc->hdr.first_track = BCD2BIN(toc->hdr.first_track); 1391 toc->hdr.first_track = BCD2BIN(toc->hdr.first_track);
1396 toc->hdr.last_track = BCD2BIN(toc->hdr.last_track); 1392 toc->hdr.last_track = BCD2BIN(toc->hdr.last_track);
1397 } 1393 }
@@ -1432,7 +1428,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1432 if (stat) 1428 if (stat)
1433 return stat; 1429 return stat;
1434 1430
1435 if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD) { 1431 if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
1436 toc->hdr.first_track = (u8)BIN2BCD(CDROM_LEADOUT); 1432 toc->hdr.first_track = (u8)BIN2BCD(CDROM_LEADOUT);
1437 toc->hdr.last_track = (u8)BIN2BCD(CDROM_LEADOUT); 1433 toc->hdr.last_track = (u8)BIN2BCD(CDROM_LEADOUT);
1438 } else { 1434 } else {
@@ -1446,14 +1442,14 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1446 1442
1447 toc->hdr.toc_length = be16_to_cpu(toc->hdr.toc_length); 1443 toc->hdr.toc_length = be16_to_cpu(toc->hdr.toc_length);
1448 1444
1449 if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD) { 1445 if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
1450 toc->hdr.first_track = BCD2BIN(toc->hdr.first_track); 1446 toc->hdr.first_track = BCD2BIN(toc->hdr.first_track);
1451 toc->hdr.last_track = BCD2BIN(toc->hdr.last_track); 1447 toc->hdr.last_track = BCD2BIN(toc->hdr.last_track);
1452 } 1448 }
1453 1449
1454 for (i = 0; i <= ntracks; i++) { 1450 for (i = 0; i <= ntracks; i++) {
1455 if (info->cd_flags & IDE_CD_FLAG_TOCADDR_AS_BCD) { 1451 if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) {
1456 if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD) 1452 if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD)
1457 toc->ent[i].track = BCD2BIN(toc->ent[i].track); 1453 toc->ent[i].track = BCD2BIN(toc->ent[i].track);
1458 msf_from_bcd(&toc->ent[i].addr.msf); 1454 msf_from_bcd(&toc->ent[i].addr.msf);
1459 } 1455 }
@@ -1476,7 +1472,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1476 toc->last_session_lba = msf_to_lba(0, 2, 0); /* 0m 2s 0f */ 1472 toc->last_session_lba = msf_to_lba(0, 2, 0); /* 0m 2s 0f */
1477 } 1473 }
1478 1474
1479 if (info->cd_flags & IDE_CD_FLAG_TOCADDR_AS_BCD) { 1475 if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) {
1480 /* re-read multisession information using MSF format */ 1476 /* re-read multisession information using MSF format */
1481 stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp, 1477 stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp,
1482 sizeof(ms_tmp), sense); 1478 sizeof(ms_tmp), sense);
@@ -1500,7 +1496,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1500 } 1496 }
1501 1497
1502 /* Remember that we've read this stuff. */ 1498 /* Remember that we've read this stuff. */
1503 info->cd_flags |= IDE_CD_FLAG_TOC_VALID; 1499 drive->atapi_flags |= IDE_AFLAG_TOC_VALID;
1504 1500
1505 return 0; 1501 return 0;
1506} 1502}
@@ -1512,7 +1508,7 @@ int ide_cdrom_get_capabilities(ide_drive_t *drive, u8 *buf)
1512 struct packet_command cgc; 1508 struct packet_command cgc;
1513 int stat, attempts = 3, size = ATAPI_CAPABILITIES_PAGE_SIZE; 1509 int stat, attempts = 3, size = ATAPI_CAPABILITIES_PAGE_SIZE;
1514 1510
1515 if ((info->cd_flags & IDE_CD_FLAG_FULL_CAPS_PAGE) == 0) 1511 if ((drive->atapi_flags & IDE_AFLAG_FULL_CAPS_PAGE) == 0)
1516 size -= ATAPI_CAPABILITIES_PAGE_PAD_SIZE; 1512 size -= ATAPI_CAPABILITIES_PAGE_PAD_SIZE;
1517 1513
1518 init_cdrom_command(&cgc, buf, size, CGC_DATA_UNKNOWN); 1514 init_cdrom_command(&cgc, buf, size, CGC_DATA_UNKNOWN);
@@ -1530,15 +1526,12 @@ void ide_cdrom_update_speed(ide_drive_t *drive, u8 *buf)
1530 struct cdrom_info *cd = drive->driver_data; 1526 struct cdrom_info *cd = drive->driver_data;
1531 u16 curspeed, maxspeed; 1527 u16 curspeed, maxspeed;
1532 1528
1533 curspeed = *(u16 *)&buf[8 + 14]; 1529 if (drive->atapi_flags & IDE_AFLAG_LE_SPEED_FIELDS) {
1534 maxspeed = *(u16 *)&buf[8 + 8]; 1530 curspeed = le16_to_cpup((__le16 *)&buf[8 + 14]);
1535 1531 maxspeed = le16_to_cpup((__le16 *)&buf[8 + 8]);
1536 if (cd->cd_flags & IDE_CD_FLAG_LE_SPEED_FIELDS) {
1537 curspeed = le16_to_cpu(curspeed);
1538 maxspeed = le16_to_cpu(maxspeed);
1539 } else { 1532 } else {
1540 curspeed = be16_to_cpu(curspeed); 1533 curspeed = be16_to_cpup((__be16 *)&buf[8 + 14]);
1541 maxspeed = be16_to_cpu(maxspeed); 1534 maxspeed = be16_to_cpup((__be16 *)&buf[8 + 8]);
1542 } 1535 }
1543 1536
1544 cd->current_speed = (curspeed + (176/2)) / 176; 1537 cd->current_speed = (curspeed + (176/2)) / 176;
@@ -1579,7 +1572,7 @@ static int ide_cdrom_register(ide_drive_t *drive, int nslots)
1579 devinfo->handle = drive; 1572 devinfo->handle = drive;
1580 strcpy(devinfo->name, drive->name); 1573 strcpy(devinfo->name, drive->name);
1581 1574
1582 if (info->cd_flags & IDE_CD_FLAG_NO_SPEED_SELECT) 1575 if (drive->atapi_flags & IDE_AFLAG_NO_SPEED_SELECT)
1583 devinfo->mask |= CDC_SELECT_SPEED; 1576 devinfo->mask |= CDC_SELECT_SPEED;
1584 1577
1585 devinfo->disk = info->disk; 1578 devinfo->disk = info->disk;
@@ -1605,8 +1598,8 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
1605 return nslots; 1598 return nslots;
1606 } 1599 }
1607 1600
1608 if (cd->cd_flags & IDE_CD_FLAG_PRE_ATAPI12) { 1601 if (drive->atapi_flags & IDE_AFLAG_PRE_ATAPI12) {
1609 cd->cd_flags &= ~IDE_CD_FLAG_NO_EJECT; 1602 drive->atapi_flags &= ~IDE_AFLAG_NO_EJECT;
1610 cdi->mask &= ~CDC_PLAY_AUDIO; 1603 cdi->mask &= ~CDC_PLAY_AUDIO;
1611 return nslots; 1604 return nslots;
1612 } 1605 }
@@ -1624,9 +1617,9 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
1624 return 0; 1617 return 0;
1625 1618
1626 if ((buf[8 + 6] & 0x01) == 0) 1619 if ((buf[8 + 6] & 0x01) == 0)
1627 cd->cd_flags |= IDE_CD_FLAG_NO_DOORLOCK; 1620 drive->atapi_flags |= IDE_AFLAG_NO_DOORLOCK;
1628 if (buf[8 + 6] & 0x08) 1621 if (buf[8 + 6] & 0x08)
1629 cd->cd_flags &= ~IDE_CD_FLAG_NO_EJECT; 1622 drive->atapi_flags &= ~IDE_AFLAG_NO_EJECT;
1630 if (buf[8 + 3] & 0x01) 1623 if (buf[8 + 3] & 0x01)
1631 cdi->mask &= ~CDC_CD_R; 1624 cdi->mask &= ~CDC_CD_R;
1632 if (buf[8 + 3] & 0x02) 1625 if (buf[8 + 3] & 0x02)
@@ -1637,7 +1630,7 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
1637 cdi->mask &= ~(CDC_DVD_RAM | CDC_RAM); 1630 cdi->mask &= ~(CDC_DVD_RAM | CDC_RAM);
1638 if (buf[8 + 3] & 0x10) 1631 if (buf[8 + 3] & 0x10)
1639 cdi->mask &= ~CDC_DVD_R; 1632 cdi->mask &= ~CDC_DVD_R;
1640 if ((buf[8 + 4] & 0x01) || (cd->cd_flags & IDE_CD_FLAG_PLAY_AUDIO_OK)) 1633 if ((buf[8 + 4] & 0x01) || (drive->atapi_flags & IDE_AFLAG_PLAY_AUDIO_OK))
1641 cdi->mask &= ~CDC_PLAY_AUDIO; 1634 cdi->mask &= ~CDC_PLAY_AUDIO;
1642 1635
1643 mechtype = buf[8 + 6] >> 5; 1636 mechtype = buf[8 + 6] >> 5;
@@ -1679,7 +1672,7 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
1679 else 1672 else
1680 printk(KERN_CONT " drive"); 1673 printk(KERN_CONT " drive");
1681 1674
1682 printk(KERN_CONT ", %dkB Cache\n", be16_to_cpu(*(u16 *)&buf[8 + 12])); 1675 printk(KERN_CONT ", %dkB Cache\n", be16_to_cpup((__be16 *)&buf[8 + 12]));
1683 1676
1684 return nslots; 1677 return nslots;
1685} 1678}
@@ -1802,43 +1795,43 @@ static inline void ide_cdrom_add_settings(ide_drive_t *drive) { ; }
1802 1795
1803static const struct cd_list_entry ide_cd_quirks_list[] = { 1796static const struct cd_list_entry ide_cd_quirks_list[] = {
1804 /* Limit transfer size per interrupt. */ 1797 /* Limit transfer size per interrupt. */
1805 { "SAMSUNG CD-ROM SCR-2430", NULL, IDE_CD_FLAG_LIMIT_NFRAMES }, 1798 { "SAMSUNG CD-ROM SCR-2430", NULL, IDE_AFLAG_LIMIT_NFRAMES },
1806 { "SAMSUNG CD-ROM SCR-2432", NULL, IDE_CD_FLAG_LIMIT_NFRAMES }, 1799 { "SAMSUNG CD-ROM SCR-2432", NULL, IDE_AFLAG_LIMIT_NFRAMES },
1807 /* SCR-3231 doesn't support the SET_CD_SPEED command. */ 1800 /* SCR-3231 doesn't support the SET_CD_SPEED command. */
1808 { "SAMSUNG CD-ROM SCR-3231", NULL, IDE_CD_FLAG_NO_SPEED_SELECT }, 1801 { "SAMSUNG CD-ROM SCR-3231", NULL, IDE_AFLAG_NO_SPEED_SELECT },
1809 /* Old NEC260 (not R) was released before ATAPI 1.2 spec. */ 1802 /* Old NEC260 (not R) was released before ATAPI 1.2 spec. */
1810 { "NEC CD-ROM DRIVE:260", "1.01", IDE_CD_FLAG_TOCADDR_AS_BCD | 1803 { "NEC CD-ROM DRIVE:260", "1.01", IDE_AFLAG_TOCADDR_AS_BCD |
1811 IDE_CD_FLAG_PRE_ATAPI12, }, 1804 IDE_AFLAG_PRE_ATAPI12, },
1812 /* Vertos 300, some versions of this drive like to talk BCD. */ 1805 /* Vertos 300, some versions of this drive like to talk BCD. */
1813 { "V003S0DS", NULL, IDE_CD_FLAG_VERTOS_300_SSD, }, 1806 { "V003S0DS", NULL, IDE_AFLAG_VERTOS_300_SSD, },
1814 /* Vertos 600 ESD. */ 1807 /* Vertos 600 ESD. */
1815 { "V006E0DS", NULL, IDE_CD_FLAG_VERTOS_600_ESD, }, 1808 { "V006E0DS", NULL, IDE_AFLAG_VERTOS_600_ESD, },
1816 /* 1809 /*
1817 * Sanyo 3 CD changer uses a non-standard command for CD changing 1810 * Sanyo 3 CD changer uses a non-standard command for CD changing
1818 * (by default standard ATAPI support for CD changers is used). 1811 * (by default standard ATAPI support for CD changers is used).
1819 */ 1812 */
1820 { "CD-ROM CDR-C3 G", NULL, IDE_CD_FLAG_SANYO_3CD }, 1813 { "CD-ROM CDR-C3 G", NULL, IDE_AFLAG_SANYO_3CD },
1821 { "CD-ROM CDR-C3G", NULL, IDE_CD_FLAG_SANYO_3CD }, 1814 { "CD-ROM CDR-C3G", NULL, IDE_AFLAG_SANYO_3CD },
1822 { "CD-ROM CDR_C36", NULL, IDE_CD_FLAG_SANYO_3CD }, 1815 { "CD-ROM CDR_C36", NULL, IDE_AFLAG_SANYO_3CD },
1823 /* Stingray 8X CD-ROM. */ 1816 /* Stingray 8X CD-ROM. */
1824 { "STINGRAY 8422 IDE 8X CD-ROM 7-27-95", NULL, IDE_CD_FLAG_PRE_ATAPI12}, 1817 { "STINGRAY 8422 IDE 8X CD-ROM 7-27-95", NULL, IDE_AFLAG_PRE_ATAPI12 },
1825 /* 1818 /*
1826 * ACER 50X CD-ROM and WPI 32X CD-ROM require the full spec length 1819 * ACER 50X CD-ROM and WPI 32X CD-ROM require the full spec length
1827 * mode sense page capabilities size, but older drives break. 1820 * mode sense page capabilities size, but older drives break.
1828 */ 1821 */
1829 { "ATAPI CD ROM DRIVE 50X MAX", NULL, IDE_CD_FLAG_FULL_CAPS_PAGE }, 1822 { "ATAPI CD ROM DRIVE 50X MAX", NULL, IDE_AFLAG_FULL_CAPS_PAGE },
1830 { "WPI CDS-32X", NULL, IDE_CD_FLAG_FULL_CAPS_PAGE }, 1823 { "WPI CDS-32X", NULL, IDE_AFLAG_FULL_CAPS_PAGE },
1831 /* ACER/AOpen 24X CD-ROM has the speed fields byte-swapped. */ 1824 /* ACER/AOpen 24X CD-ROM has the speed fields byte-swapped. */
1832 { "", "241N", IDE_CD_FLAG_LE_SPEED_FIELDS }, 1825 { "", "241N", IDE_AFLAG_LE_SPEED_FIELDS },
1833 /* 1826 /*
1834 * Some drives used by Apple don't advertise audio play 1827 * Some drives used by Apple don't advertise audio play
1835 * but they do support reading TOC & audio datas. 1828 * but they do support reading TOC & audio datas.
1836 */ 1829 */
1837 { "MATSHITADVD-ROM SR-8187", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, 1830 { "MATSHITADVD-ROM SR-8187", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
1838 { "MATSHITADVD-ROM SR-8186", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, 1831 { "MATSHITADVD-ROM SR-8186", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
1839 { "MATSHITADVD-ROM SR-8176", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, 1832 { "MATSHITADVD-ROM SR-8176", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
1840 { "MATSHITADVD-ROM SR-8174", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, 1833 { "MATSHITADVD-ROM SR-8174", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
1841 { "Optiarc DVD RW AD-5200A", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, 1834 { "Optiarc DVD RW AD-5200A", NULL, IDE_AFLAG_PLAY_AUDIO_OK },
1842 { NULL, NULL, 0 } 1835 { NULL, NULL, 0 }
1843}; 1836};
1844 1837
@@ -1873,20 +1866,20 @@ static int ide_cdrom_setup(ide_drive_t *drive)
1873 1866
1874 drive->special.all = 0; 1867 drive->special.all = 0;
1875 1868
1876 cd->cd_flags = IDE_CD_FLAG_MEDIA_CHANGED | IDE_CD_FLAG_NO_EJECT | 1869 drive->atapi_flags = IDE_AFLAG_MEDIA_CHANGED | IDE_AFLAG_NO_EJECT |
1877 ide_cd_flags(id); 1870 ide_cd_flags(id);
1878 1871
1879 if ((id->config & 0x0060) == 0x20) 1872 if ((id->config & 0x0060) == 0x20)
1880 cd->cd_flags |= IDE_CD_FLAG_DRQ_INTERRUPT; 1873 drive->atapi_flags |= IDE_AFLAG_DRQ_INTERRUPT;
1881 1874
1882 if ((cd->cd_flags & IDE_CD_FLAG_VERTOS_300_SSD) && 1875 if ((drive->atapi_flags & IDE_AFLAG_VERTOS_300_SSD) &&
1883 id->fw_rev[4] == '1' && id->fw_rev[6] <= '2') 1876 id->fw_rev[4] == '1' && id->fw_rev[6] <= '2')
1884 cd->cd_flags |= (IDE_CD_FLAG_TOCTRACKS_AS_BCD | 1877 drive->atapi_flags |= (IDE_AFLAG_TOCTRACKS_AS_BCD |
1885 IDE_CD_FLAG_TOCADDR_AS_BCD); 1878 IDE_AFLAG_TOCADDR_AS_BCD);
1886 else if ((cd->cd_flags & IDE_CD_FLAG_VERTOS_600_ESD) && 1879 else if ((drive->atapi_flags & IDE_AFLAG_VERTOS_600_ESD) &&
1887 id->fw_rev[4] == '1' && id->fw_rev[6] <= '2') 1880 id->fw_rev[4] == '1' && id->fw_rev[6] <= '2')
1888 cd->cd_flags |= IDE_CD_FLAG_TOCTRACKS_AS_BCD; 1881 drive->atapi_flags |= IDE_AFLAG_TOCTRACKS_AS_BCD;
1889 else if (cd->cd_flags & IDE_CD_FLAG_SANYO_3CD) 1882 else if (drive->atapi_flags & IDE_AFLAG_SANYO_3CD)
1890 /* 3 => use CD in slot 0 */ 1883 /* 3 => use CD in slot 0 */
1891 cdi->sanyo_slot = 3; 1884 cdi->sanyo_slot = 3;
1892 1885
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index fe0ea36e4124..61a4599b77db 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -27,42 +27,6 @@
27#define ATAPI_CAPABILITIES_PAGE_SIZE (8 + 20) 27#define ATAPI_CAPABILITIES_PAGE_SIZE (8 + 20)
28#define ATAPI_CAPABILITIES_PAGE_PAD_SIZE 4 28#define ATAPI_CAPABILITIES_PAGE_PAD_SIZE 4
29 29
30enum {
31 /* Device sends an interrupt when ready for a packet command. */
32 IDE_CD_FLAG_DRQ_INTERRUPT = (1 << 0),
33 /* Drive cannot lock the door. */
34 IDE_CD_FLAG_NO_DOORLOCK = (1 << 1),
35 /* Drive cannot eject the disc. */
36 IDE_CD_FLAG_NO_EJECT = (1 << 2),
37 /* Drive is a pre ATAPI 1.2 drive. */
38 IDE_CD_FLAG_PRE_ATAPI12 = (1 << 3),
39 /* TOC addresses are in BCD. */
40 IDE_CD_FLAG_TOCADDR_AS_BCD = (1 << 4),
41 /* TOC track numbers are in BCD. */
42 IDE_CD_FLAG_TOCTRACKS_AS_BCD = (1 << 5),
43 /*
44 * Drive does not provide data in multiples of SECTOR_SIZE
45 * when more than one interrupt is needed.
46 */
47 IDE_CD_FLAG_LIMIT_NFRAMES = (1 << 6),
48 /* Seeking in progress. */
49 IDE_CD_FLAG_SEEKING = (1 << 7),
50 /* Driver has noticed a media change. */
51 IDE_CD_FLAG_MEDIA_CHANGED = (1 << 8),
52 /* Saved TOC information is current. */
53 IDE_CD_FLAG_TOC_VALID = (1 << 9),
54 /* We think that the drive door is locked. */
55 IDE_CD_FLAG_DOOR_LOCKED = (1 << 10),
56 /* SET_CD_SPEED command is unsupported. */
57 IDE_CD_FLAG_NO_SPEED_SELECT = (1 << 11),
58 IDE_CD_FLAG_VERTOS_300_SSD = (1 << 12),
59 IDE_CD_FLAG_VERTOS_600_ESD = (1 << 13),
60 IDE_CD_FLAG_SANYO_3CD = (1 << 14),
61 IDE_CD_FLAG_FULL_CAPS_PAGE = (1 << 15),
62 IDE_CD_FLAG_PLAY_AUDIO_OK = (1 << 16),
63 IDE_CD_FLAG_LE_SPEED_FIELDS = (1 << 17),
64};
65
66/* Structure of a MSF cdrom address. */ 30/* Structure of a MSF cdrom address. */
67struct atapi_msf { 31struct atapi_msf {
68 byte reserved; 32 byte reserved;
@@ -128,8 +92,6 @@ struct cdrom_info {
128 unsigned long last_block; 92 unsigned long last_block;
129 unsigned long start_seek; 93 unsigned long start_seek;
130 94
131 unsigned int cd_flags;
132
133 u8 max_speed; /* Max speed of the drive. */ 95 u8 max_speed; /* Max speed of the drive. */
134 u8 current_speed; /* Current speed of the drive. */ 96 u8 current_speed; /* Current speed of the drive. */
135 97
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 24d002addf73..74231b41f611 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -27,10 +27,9 @@ int ide_cdrom_open_real(struct cdrom_device_info *cdi, int purpose)
27void ide_cdrom_release_real(struct cdrom_device_info *cdi) 27void ide_cdrom_release_real(struct cdrom_device_info *cdi)
28{ 28{
29 ide_drive_t *drive = cdi->handle; 29 ide_drive_t *drive = cdi->handle;
30 struct cdrom_info *cd = drive->driver_data;
31 30
32 if (!cdi->use_count) 31 if (!cdi->use_count)
33 cd->cd_flags &= ~IDE_CD_FLAG_TOC_VALID; 32 drive->atapi_flags &= ~IDE_AFLAG_TOC_VALID;
34} 33}
35 34
36/* 35/*
@@ -83,13 +82,12 @@ int ide_cdrom_check_media_change_real(struct cdrom_device_info *cdi,
83 int slot_nr) 82 int slot_nr)
84{ 83{
85 ide_drive_t *drive = cdi->handle; 84 ide_drive_t *drive = cdi->handle;
86 struct cdrom_info *cd = drive->driver_data;
87 int retval; 85 int retval;
88 86
89 if (slot_nr == CDSL_CURRENT) { 87 if (slot_nr == CDSL_CURRENT) {
90 (void) cdrom_check_status(drive, NULL); 88 (void) cdrom_check_status(drive, NULL);
91 retval = (cd->cd_flags & IDE_CD_FLAG_MEDIA_CHANGED) ? 1 : 0; 89 retval = (drive->atapi_flags & IDE_AFLAG_MEDIA_CHANGED) ? 1 : 0;
92 cd->cd_flags &= ~IDE_CD_FLAG_MEDIA_CHANGED; 90 drive->atapi_flags &= ~IDE_AFLAG_MEDIA_CHANGED;
93 return retval; 91 return retval;
94 } else { 92 } else {
95 return -EINVAL; 93 return -EINVAL;
@@ -107,11 +105,11 @@ int cdrom_eject(ide_drive_t *drive, int ejectflag,
107 char loej = 0x02; 105 char loej = 0x02;
108 unsigned char cmd[BLK_MAX_CDB]; 106 unsigned char cmd[BLK_MAX_CDB];
109 107
110 if ((cd->cd_flags & IDE_CD_FLAG_NO_EJECT) && !ejectflag) 108 if ((drive->atapi_flags & IDE_AFLAG_NO_EJECT) && !ejectflag)
111 return -EDRIVE_CANT_DO_THIS; 109 return -EDRIVE_CANT_DO_THIS;
112 110
113 /* reload fails on some drives, if the tray is locked */ 111 /* reload fails on some drives, if the tray is locked */
114 if ((cd->cd_flags & IDE_CD_FLAG_DOOR_LOCKED) && ejectflag) 112 if ((drive->atapi_flags & IDE_AFLAG_DOOR_LOCKED) && ejectflag)
115 return 0; 113 return 0;
116 114
117 /* only tell drive to close tray if open, if it can do that */ 115 /* only tell drive to close tray if open, if it can do that */
@@ -123,7 +121,7 @@ int cdrom_eject(ide_drive_t *drive, int ejectflag,
123 cmd[0] = GPCMD_START_STOP_UNIT; 121 cmd[0] = GPCMD_START_STOP_UNIT;
124 cmd[4] = loej | (ejectflag != 0); 122 cmd[4] = loej | (ejectflag != 0);
125 123
126 return ide_cd_queue_pc(drive, cmd, 0, NULL, 0, sense, 0, 0); 124 return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, 0);
127} 125}
128 126
129/* Lock the door if LOCKFLAG is nonzero; unlock it otherwise. */ 127/* Lock the door if LOCKFLAG is nonzero; unlock it otherwise. */
@@ -131,7 +129,6 @@ static
131int ide_cd_lockdoor(ide_drive_t *drive, int lockflag, 129int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
132 struct request_sense *sense) 130 struct request_sense *sense)
133{ 131{
134 struct cdrom_info *cd = drive->driver_data;
135 struct request_sense my_sense; 132 struct request_sense my_sense;
136 int stat; 133 int stat;
137 134
@@ -139,7 +136,7 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
139 sense = &my_sense; 136 sense = &my_sense;
140 137
141 /* If the drive cannot lock the door, just pretend. */ 138 /* If the drive cannot lock the door, just pretend. */
142 if (cd->cd_flags & IDE_CD_FLAG_NO_DOORLOCK) { 139 if (drive->atapi_flags & IDE_AFLAG_NO_DOORLOCK) {
143 stat = 0; 140 stat = 0;
144 } else { 141 } else {
145 unsigned char cmd[BLK_MAX_CDB]; 142 unsigned char cmd[BLK_MAX_CDB];
@@ -149,7 +146,7 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
149 cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL; 146 cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
150 cmd[4] = lockflag ? 1 : 0; 147 cmd[4] = lockflag ? 1 : 0;
151 148
152 stat = ide_cd_queue_pc(drive, cmd, 0, NULL, 0, 149 stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL,
153 sense, 0, 0); 150 sense, 0, 0);
154 } 151 }
155 152
@@ -160,7 +157,7 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
160 (sense->asc == 0x24 || sense->asc == 0x20)) { 157 (sense->asc == 0x24 || sense->asc == 0x20)) {
161 printk(KERN_ERR "%s: door locking not supported\n", 158 printk(KERN_ERR "%s: door locking not supported\n",
162 drive->name); 159 drive->name);
163 cd->cd_flags |= IDE_CD_FLAG_NO_DOORLOCK; 160 drive->atapi_flags |= IDE_AFLAG_NO_DOORLOCK;
164 stat = 0; 161 stat = 0;
165 } 162 }
166 163
@@ -170,9 +167,9 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
170 167
171 if (stat == 0) { 168 if (stat == 0) {
172 if (lockflag) 169 if (lockflag)
173 cd->cd_flags |= IDE_CD_FLAG_DOOR_LOCKED; 170 drive->atapi_flags |= IDE_AFLAG_DOOR_LOCKED;
174 else 171 else
175 cd->cd_flags &= ~IDE_CD_FLAG_DOOR_LOCKED; 172 drive->atapi_flags &= ~IDE_AFLAG_DOOR_LOCKED;
176 } 173 }
177 174
178 return stat; 175 return stat;
@@ -231,7 +228,7 @@ int ide_cdrom_select_speed(struct cdrom_device_info *cdi, int speed)
231 cmd[5] = speed & 0xff; 228 cmd[5] = speed & 0xff;
232 } 229 }
233 230
234 stat = ide_cd_queue_pc(drive, cmd, 0, NULL, 0, &sense, 0, 0); 231 stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, &sense, 0, 0);
235 232
236 if (!ide_cdrom_get_capabilities(drive, buf)) { 233 if (!ide_cdrom_get_capabilities(drive, buf)) {
237 ide_cdrom_update_speed(drive, buf); 234 ide_cdrom_update_speed(drive, buf);
@@ -250,7 +247,7 @@ int ide_cdrom_get_last_session(struct cdrom_device_info *cdi,
250 struct request_sense sense; 247 struct request_sense sense;
251 int ret; 248 int ret;
252 249
253 if ((info->cd_flags & IDE_CD_FLAG_TOC_VALID) == 0 || !info->toc) { 250 if ((drive->atapi_flags & IDE_AFLAG_TOC_VALID) == 0 || !info->toc) {
254 ret = ide_cd_read_toc(drive, &sense); 251 ret = ide_cd_read_toc(drive, &sense);
255 if (ret) 252 if (ret)
256 return ret; 253 return ret;
@@ -308,7 +305,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
308 * A reset will unlock the door. If it was previously locked, 305 * A reset will unlock the door. If it was previously locked,
309 * lock it again. 306 * lock it again.
310 */ 307 */
311 if (cd->cd_flags & IDE_CD_FLAG_DOOR_LOCKED) 308 if (drive->atapi_flags & IDE_AFLAG_DOOR_LOCKED)
312 (void)ide_cd_lockdoor(drive, 1, &sense); 309 (void)ide_cd_lockdoor(drive, 1, &sense);
313 310
314 return ret; 311 return ret;
@@ -324,7 +321,7 @@ static int ide_cd_get_toc_entry(ide_drive_t *drive, int track,
324 /* 321 /*
325 * don't serve cached data, if the toc isn't valid 322 * don't serve cached data, if the toc isn't valid
326 */ 323 */
327 if ((info->cd_flags & IDE_CD_FLAG_TOC_VALID) == 0) 324 if ((drive->atapi_flags & IDE_AFLAG_TOC_VALID) == 0)
328 return -EINVAL; 325 return -EINVAL;
329 326
330 /* Check validity of requested track number. */ 327 /* Check validity of requested track number. */
@@ -374,7 +371,7 @@ static int ide_cd_fake_play_trkind(ide_drive_t *drive, void *arg)
374 lba_to_msf(lba_start, &cmd[3], &cmd[4], &cmd[5]); 371 lba_to_msf(lba_start, &cmd[3], &cmd[4], &cmd[5]);
375 lba_to_msf(lba_end - 1, &cmd[6], &cmd[7], &cmd[8]); 372 lba_to_msf(lba_end - 1, &cmd[6], &cmd[7], &cmd[8]);
376 373
377 return ide_cd_queue_pc(drive, cmd, 0, NULL, 0, &sense, 0, 0); 374 return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, &sense, 0, 0);
378} 375}
379 376
380static int ide_cd_read_tochdr(ide_drive_t *drive, void *arg) 377static int ide_cd_read_tochdr(ide_drive_t *drive, void *arg)
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 3a2e80237c10..df5fe5756871 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -158,7 +158,7 @@ static void ide_tf_set_cmd(ide_drive_t *drive, ide_task_t *task, u8 dma)
158 write = (task->tf_flags & IDE_TFLAG_WRITE) ? 1 : 0; 158 write = (task->tf_flags & IDE_TFLAG_WRITE) ? 1 : 0;
159 159
160 if (dma) 160 if (dma)
161 index = drive->vdma ? 4 : 8; 161 index = 8;
162 else 162 else
163 index = drive->mult_count ? 0 : 4; 163 index = drive->mult_count ? 0 : 4;
164 164
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 7ee44f86bc54..be99d463dcc7 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -100,10 +100,11 @@ static const struct drive_list_entry drive_blacklist [] = {
100 100
101ide_startstop_t ide_dma_intr (ide_drive_t *drive) 101ide_startstop_t ide_dma_intr (ide_drive_t *drive)
102{ 102{
103 ide_hwif_t *hwif = drive->hwif;
103 u8 stat = 0, dma_stat = 0; 104 u8 stat = 0, dma_stat = 0;
104 105
105 dma_stat = drive->hwif->dma_ops->dma_end(drive); 106 dma_stat = hwif->dma_ops->dma_end(drive);
106 stat = ide_read_status(drive); 107 stat = hwif->tp_ops->read_status(hwif);
107 108
108 if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) { 109 if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
109 if (!dma_stat) { 110 if (!dma_stat) {
@@ -334,7 +335,7 @@ static int config_drive_for_dma (ide_drive_t *drive)
334static int dma_timer_expiry (ide_drive_t *drive) 335static int dma_timer_expiry (ide_drive_t *drive)
335{ 336{
336 ide_hwif_t *hwif = HWIF(drive); 337 ide_hwif_t *hwif = HWIF(drive);
337 u8 dma_stat = hwif->INB(hwif->dma_status); 338 u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
338 339
339 printk(KERN_WARNING "%s: dma_timer_expiry: dma status == 0x%02x\n", 340 printk(KERN_WARNING "%s: dma_timer_expiry: dma status == 0x%02x\n",
340 drive->name, dma_stat); 341 drive->name, dma_stat);
@@ -369,14 +370,18 @@ void ide_dma_host_set(ide_drive_t *drive, int on)
369{ 370{
370 ide_hwif_t *hwif = HWIF(drive); 371 ide_hwif_t *hwif = HWIF(drive);
371 u8 unit = (drive->select.b.unit & 0x01); 372 u8 unit = (drive->select.b.unit & 0x01);
372 u8 dma_stat = hwif->INB(hwif->dma_status); 373 u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
373 374
374 if (on) 375 if (on)
375 dma_stat |= (1 << (5 + unit)); 376 dma_stat |= (1 << (5 + unit));
376 else 377 else
377 dma_stat &= ~(1 << (5 + unit)); 378 dma_stat &= ~(1 << (5 + unit));
378 379
379 hwif->OUTB(dma_stat, hwif->dma_status); 380 if (hwif->host_flags & IDE_HFLAG_MMIO)
381 writeb(dma_stat,
382 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
383 else
384 outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS);
380} 385}
381 386
382EXPORT_SYMBOL_GPL(ide_dma_host_set); 387EXPORT_SYMBOL_GPL(ide_dma_host_set);
@@ -449,6 +454,7 @@ int ide_dma_setup(ide_drive_t *drive)
449 ide_hwif_t *hwif = drive->hwif; 454 ide_hwif_t *hwif = drive->hwif;
450 struct request *rq = HWGROUP(drive)->rq; 455 struct request *rq = HWGROUP(drive)->rq;
451 unsigned int reading; 456 unsigned int reading;
457 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
452 u8 dma_stat; 458 u8 dma_stat;
453 459
454 if (rq_data_dir(rq)) 460 if (rq_data_dir(rq))
@@ -470,13 +476,21 @@ int ide_dma_setup(ide_drive_t *drive)
470 outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS); 476 outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS);
471 477
472 /* specify r/w */ 478 /* specify r/w */
473 hwif->OUTB(reading, hwif->dma_command); 479 if (mmio)
480 writeb(reading, (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
481 else
482 outb(reading, hwif->dma_base + ATA_DMA_CMD);
474 483
475 /* read dma_status for INTR & ERROR flags */ 484 /* read DMA status for INTR & ERROR flags */
476 dma_stat = hwif->INB(hwif->dma_status); 485 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
477 486
478 /* clear INTR & ERROR flags */ 487 /* clear INTR & ERROR flags */
479 hwif->OUTB(dma_stat|6, hwif->dma_status); 488 if (mmio)
489 writeb(dma_stat | 6,
490 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
491 else
492 outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS);
493
480 drive->waiting_for_dma = 1; 494 drive->waiting_for_dma = 1;
481 return 0; 495 return 0;
482} 496}
@@ -492,16 +506,24 @@ EXPORT_SYMBOL_GPL(ide_dma_exec_cmd);
492 506
493void ide_dma_start(ide_drive_t *drive) 507void ide_dma_start(ide_drive_t *drive)
494{ 508{
495 ide_hwif_t *hwif = HWIF(drive); 509 ide_hwif_t *hwif = drive->hwif;
496 u8 dma_cmd = hwif->INB(hwif->dma_command); 510 u8 dma_cmd;
497 511
498 /* Note that this is done *after* the cmd has 512 /* Note that this is done *after* the cmd has
499 * been issued to the drive, as per the BM-IDE spec. 513 * been issued to the drive, as per the BM-IDE spec.
500 * The Promise Ultra33 doesn't work correctly when 514 * The Promise Ultra33 doesn't work correctly when
501 * we do this part before issuing the drive cmd. 515 * we do this part before issuing the drive cmd.
502 */ 516 */
503 /* start DMA */ 517 if (hwif->host_flags & IDE_HFLAG_MMIO) {
504 hwif->OUTB(dma_cmd|1, hwif->dma_command); 518 dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
519 /* start DMA */
520 writeb(dma_cmd | 1,
521 (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
522 } else {
523 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
524 outb(dma_cmd | 1, hwif->dma_base + ATA_DMA_CMD);
525 }
526
505 hwif->dma = 1; 527 hwif->dma = 1;
506 wmb(); 528 wmb();
507} 529}
@@ -511,18 +533,33 @@ EXPORT_SYMBOL_GPL(ide_dma_start);
511/* returns 1 on error, 0 otherwise */ 533/* returns 1 on error, 0 otherwise */
512int __ide_dma_end (ide_drive_t *drive) 534int __ide_dma_end (ide_drive_t *drive)
513{ 535{
514 ide_hwif_t *hwif = HWIF(drive); 536 ide_hwif_t *hwif = drive->hwif;
537 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
515 u8 dma_stat = 0, dma_cmd = 0; 538 u8 dma_stat = 0, dma_cmd = 0;
516 539
517 drive->waiting_for_dma = 0; 540 drive->waiting_for_dma = 0;
518 /* get dma_command mode */ 541
519 dma_cmd = hwif->INB(hwif->dma_command); 542 if (mmio) {
520 /* stop DMA */ 543 /* get DMA command mode */
521 hwif->OUTB(dma_cmd&~1, hwif->dma_command); 544 dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
545 /* stop DMA */
546 writeb(dma_cmd & ~1,
547 (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
548 } else {
549 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
550 outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
551 }
552
522 /* get DMA status */ 553 /* get DMA status */
523 dma_stat = hwif->INB(hwif->dma_status); 554 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
524 /* clear the INTR & ERROR bits */ 555
525 hwif->OUTB(dma_stat|6, hwif->dma_status); 556 if (mmio)
557 /* clear the INTR & ERROR bits */
558 writeb(dma_stat | 6,
559 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
560 else
561 outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS);
562
526 /* purge DMA mappings */ 563 /* purge DMA mappings */
527 ide_destroy_dmatable(drive); 564 ide_destroy_dmatable(drive);
528 /* verify good DMA status */ 565 /* verify good DMA status */
@@ -537,7 +574,7 @@ EXPORT_SYMBOL(__ide_dma_end);
537int ide_dma_test_irq(ide_drive_t *drive) 574int ide_dma_test_irq(ide_drive_t *drive)
538{ 575{
539 ide_hwif_t *hwif = HWIF(drive); 576 ide_hwif_t *hwif = HWIF(drive);
540 u8 dma_stat = hwif->INB(hwif->dma_status); 577 u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
541 578
542 /* return 1 if INTR asserted */ 579 /* return 1 if INTR asserted */
543 if ((dma_stat & 4) == 4) 580 if ((dma_stat & 4) == 4)
@@ -719,9 +756,8 @@ static int ide_tune_dma(ide_drive_t *drive)
719static int ide_dma_check(ide_drive_t *drive) 756static int ide_dma_check(ide_drive_t *drive)
720{ 757{
721 ide_hwif_t *hwif = drive->hwif; 758 ide_hwif_t *hwif = drive->hwif;
722 int vdma = (hwif->host_flags & IDE_HFLAG_VDMA)? 1 : 0;
723 759
724 if (!vdma && ide_tune_dma(drive)) 760 if (ide_tune_dma(drive))
725 return 0; 761 return 0;
726 762
727 /* TODO: always do PIO fallback */ 763 /* TODO: always do PIO fallback */
@@ -730,7 +766,7 @@ static int ide_dma_check(ide_drive_t *drive)
730 766
731 ide_set_max_pio(drive); 767 ide_set_max_pio(drive);
732 768
733 return vdma ? 0 : -1; 769 return -1;
734} 770}
735 771
736int ide_id_dma_bug(ide_drive_t *drive) 772int ide_id_dma_bug(ide_drive_t *drive)
@@ -842,7 +878,7 @@ int ide_allocate_dma_engine(ide_hwif_t *hwif)
842} 878}
843EXPORT_SYMBOL_GPL(ide_allocate_dma_engine); 879EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);
844 880
845static const struct ide_dma_ops sff_dma_ops = { 881const struct ide_dma_ops sff_dma_ops = {
846 .dma_host_set = ide_dma_host_set, 882 .dma_host_set = ide_dma_host_set,
847 .dma_setup = ide_dma_setup, 883 .dma_setup = ide_dma_setup,
848 .dma_exec_cmd = ide_dma_exec_cmd, 884 .dma_exec_cmd = ide_dma_exec_cmd,
@@ -852,18 +888,5 @@ static const struct ide_dma_ops sff_dma_ops = {
852 .dma_timeout = ide_dma_timeout, 888 .dma_timeout = ide_dma_timeout,
853 .dma_lost_irq = ide_dma_lost_irq, 889 .dma_lost_irq = ide_dma_lost_irq,
854}; 890};
855 891EXPORT_SYMBOL_GPL(sff_dma_ops);
856void ide_setup_dma(ide_hwif_t *hwif, unsigned long base)
857{
858 hwif->dma_base = base;
859
860 if (!hwif->dma_command)
861 hwif->dma_command = hwif->dma_base + 0;
862 if (!hwif->dma_status)
863 hwif->dma_status = hwif->dma_base + 2;
864
865 hwif->dma_ops = &sff_dma_ops;
866}
867
868EXPORT_SYMBOL_GPL(ide_setup_dma);
869#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ 892#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 011d72011cc4..3d8e6dd0f41e 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -125,26 +125,10 @@ typedef struct ide_floppy_obj {
125 int wp; 125 int wp;
126 /* Supports format progress report */ 126 /* Supports format progress report */
127 int srfp; 127 int srfp;
128 /* Status/Action flags */
129 unsigned long flags;
130} idefloppy_floppy_t; 128} idefloppy_floppy_t;
131 129
132#define IDEFLOPPY_TICKS_DELAY HZ/20 /* default delay for ZIP 100 (50ms) */ 130#define IDEFLOPPY_TICKS_DELAY HZ/20 /* default delay for ZIP 100 (50ms) */
133 131
134/* Floppy flag bits values. */
135enum {
136 /* DRQ interrupt device */
137 IDEFLOPPY_FLAG_DRQ_INTERRUPT = (1 << 0),
138 /* Media may have changed */
139 IDEFLOPPY_FLAG_MEDIA_CHANGED = (1 << 1),
140 /* Format in progress */
141 IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS = (1 << 2),
142 /* Avoid commands not supported in Clik drive */
143 IDEFLOPPY_FLAG_CLIK_DRIVE = (1 << 3),
144 /* Requires BH algorithm for packets */
145 IDEFLOPPY_FLAG_ZIP_DRIVE = (1 << 4),
146};
147
148/* Defines for the MODE SENSE command */ 132/* Defines for the MODE SENSE command */
149#define MODE_SENSE_CURRENT 0x00 133#define MODE_SENSE_CURRENT 0x00
150#define MODE_SENSE_CHANGEABLE 0x01 134#define MODE_SENSE_CHANGEABLE 0x01
@@ -247,9 +231,9 @@ static void ide_floppy_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
247 231
248 data = bvec_kmap_irq(bvec, &flags); 232 data = bvec_kmap_irq(bvec, &flags);
249 if (direction) 233 if (direction)
250 hwif->output_data(drive, NULL, data, count); 234 hwif->tp_ops->output_data(drive, NULL, data, count);
251 else 235 else
252 hwif->input_data(drive, NULL, data, count); 236 hwif->tp_ops->input_data(drive, NULL, data, count);
253 bvec_kunmap_irq(data, &flags); 237 bvec_kunmap_irq(data, &flags);
254 238
255 bcount -= count; 239 bcount -= count;
@@ -291,6 +275,7 @@ static void idefloppy_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
291 rq->cmd_type = REQ_TYPE_SPECIAL; 275 rq->cmd_type = REQ_TYPE_SPECIAL;
292 rq->cmd_flags |= REQ_PREEMPT; 276 rq->cmd_flags |= REQ_PREEMPT;
293 rq->rq_disk = floppy->disk; 277 rq->rq_disk = floppy->disk;
278 memcpy(rq->cmd, pc->c, 12);
294 ide_do_drive_cmd(drive, rq); 279 ide_do_drive_cmd(drive, rq);
295} 280}
296 281
@@ -354,7 +339,6 @@ static void idefloppy_init_pc(struct ide_atapi_pc *pc)
354 memset(pc, 0, sizeof(*pc)); 339 memset(pc, 0, sizeof(*pc));
355 pc->buf = pc->pc_buf; 340 pc->buf = pc->pc_buf;
356 pc->buf_size = IDEFLOPPY_PC_BUFFER_SIZE; 341 pc->buf_size = IDEFLOPPY_PC_BUFFER_SIZE;
357 pc->callback = ide_floppy_callback;
358} 342}
359 343
360static void idefloppy_create_request_sense_cmd(struct ide_atapi_pc *pc) 344static void idefloppy_create_request_sense_cmd(struct ide_atapi_pc *pc)
@@ -402,7 +386,7 @@ static int idefloppy_transfer_pc(ide_drive_t *drive)
402 idefloppy_floppy_t *floppy = drive->driver_data; 386 idefloppy_floppy_t *floppy = drive->driver_data;
403 387
404 /* Send the actual packet */ 388 /* Send the actual packet */
405 drive->hwif->output_data(drive, NULL, floppy->pc->c, 12); 389 drive->hwif->tp_ops->output_data(drive, NULL, floppy->pc->c, 12);
406 390
407 /* Timeout for the packet command */ 391 /* Timeout for the packet command */
408 return IDEFLOPPY_WAIT_CMD; 392 return IDEFLOPPY_WAIT_CMD;
@@ -429,7 +413,7 @@ static ide_startstop_t idefloppy_start_pc_transfer(ide_drive_t *drive)
429 * 40 and 50msec work well. idefloppy_pc_intr will not be actually 413 * 40 and 50msec work well. idefloppy_pc_intr will not be actually
430 * used until after the packet is moved in about 50 msec. 414 * used until after the packet is moved in about 50 msec.
431 */ 415 */
432 if (pc->flags & PC_FLAG_ZIP_DRIVE) { 416 if (drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) {
433 timeout = floppy->ticks; 417 timeout = floppy->ticks;
434 expiry = &idefloppy_transfer_pc; 418 expiry = &idefloppy_transfer_pc;
435 } else { 419 } else {
@@ -474,7 +458,7 @@ static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive,
474 pc->error = IDEFLOPPY_ERROR_GENERAL; 458 pc->error = IDEFLOPPY_ERROR_GENERAL;
475 459
476 floppy->failed_pc = NULL; 460 floppy->failed_pc = NULL;
477 pc->callback(drive); 461 drive->pc_callback(drive);
478 return ide_stopped; 462 return ide_stopped;
479 } 463 }
480 464
@@ -574,6 +558,8 @@ static void idefloppy_create_rw_cmd(idefloppy_floppy_t *floppy,
574 put_unaligned(cpu_to_be16(blocks), (unsigned short *)&pc->c[7]); 558 put_unaligned(cpu_to_be16(blocks), (unsigned short *)&pc->c[7]);
575 put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[2]); 559 put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[2]);
576 560
561 memcpy(rq->cmd, pc->c, 12);
562
577 pc->rq = rq; 563 pc->rq = rq;
578 pc->b_count = cmd == READ ? 0 : rq->bio->bi_size; 564 pc->b_count = cmd == READ ? 0 : rq->bio->bi_size;
579 if (rq->cmd_flags & REQ_RW) 565 if (rq->cmd_flags & REQ_RW)
@@ -647,12 +633,6 @@ static ide_startstop_t idefloppy_do_request(ide_drive_t *drive,
647 return ide_stopped; 633 return ide_stopped;
648 } 634 }
649 635
650 if (floppy->flags & IDEFLOPPY_FLAG_DRQ_INTERRUPT)
651 pc->flags |= PC_FLAG_DRQ_INTERRUPT;
652
653 if (floppy->flags & IDEFLOPPY_FLAG_ZIP_DRIVE)
654 pc->flags |= PC_FLAG_ZIP_DRIVE;
655
656 pc->rq = rq; 636 pc->rq = rq;
657 637
658 return idefloppy_issue_pc(drive, pc); 638 return idefloppy_issue_pc(drive, pc);
@@ -671,6 +651,7 @@ static int idefloppy_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
671 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 651 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
672 rq->buffer = (char *) pc; 652 rq->buffer = (char *) pc;
673 rq->cmd_type = REQ_TYPE_SPECIAL; 653 rq->cmd_type = REQ_TYPE_SPECIAL;
654 memcpy(rq->cmd, pc->c, 12);
674 error = blk_execute_rq(drive->queue, floppy->disk, rq, 0); 655 error = blk_execute_rq(drive->queue, floppy->disk, rq, 0);
675 blk_put_request(rq); 656 blk_put_request(rq);
676 657
@@ -795,7 +776,7 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
795 switch (pc.buf[desc_start + 4] & 0x03) { 776 switch (pc.buf[desc_start + 4] & 0x03) {
796 /* Clik! drive returns this instead of CAPACITY_CURRENT */ 777 /* Clik! drive returns this instead of CAPACITY_CURRENT */
797 case CAPACITY_UNFORMATTED: 778 case CAPACITY_UNFORMATTED:
798 if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) 779 if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE))
799 /* 780 /*
800 * If it is not a clik drive, break out 781 * If it is not a clik drive, break out
801 * (maintains previous driver behaviour) 782 * (maintains previous driver behaviour)
@@ -841,7 +822,7 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
841 } 822 }
842 823
843 /* Clik! disk does not support get_flexible_disk_page */ 824 /* Clik! disk does not support get_flexible_disk_page */
844 if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) 825 if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE))
845 (void) ide_floppy_get_flexible_disk_page(drive); 826 (void) ide_floppy_get_flexible_disk_page(drive);
846 827
847 set_capacity(floppy->disk, floppy->blocks * floppy->bs_factor); 828 set_capacity(floppy->disk, floppy->blocks * floppy->bs_factor);
@@ -949,11 +930,12 @@ static int idefloppy_get_format_progress(ide_drive_t *drive, int __user *arg)
949 930
950 /* Else assume format_unit has finished, and we're at 0x10000 */ 931 /* Else assume format_unit has finished, and we're at 0x10000 */
951 } else { 932 } else {
933 ide_hwif_t *hwif = drive->hwif;
952 unsigned long flags; 934 unsigned long flags;
953 u8 stat; 935 u8 stat;
954 936
955 local_irq_save(flags); 937 local_irq_save(flags);
956 stat = ide_read_status(drive); 938 stat = hwif->tp_ops->read_status(hwif);
957 local_irq_restore(flags); 939 local_irq_restore(flags);
958 940
959 progress_indication = ((stat & SEEK_STAT) == 0) ? 0 : 0x10000; 941 progress_indication = ((stat & SEEK_STAT) == 0) ? 0 : 0x10000;
@@ -1039,9 +1021,10 @@ static void idefloppy_setup(ide_drive_t *drive, idefloppy_floppy_t *floppy)
1039 1021
1040 *((u16 *) &gcw) = drive->id->config; 1022 *((u16 *) &gcw) = drive->id->config;
1041 floppy->pc = floppy->pc_stack; 1023 floppy->pc = floppy->pc_stack;
1024 drive->pc_callback = ide_floppy_callback;
1042 1025
1043 if (((gcw[0] & 0x60) >> 5) == 1) 1026 if (((gcw[0] & 0x60) >> 5) == 1)
1044 floppy->flags |= IDEFLOPPY_FLAG_DRQ_INTERRUPT; 1027 drive->atapi_flags |= IDE_AFLAG_DRQ_INTERRUPT;
1045 /* 1028 /*
1046 * We used to check revisions here. At this point however I'm giving up. 1029 * We used to check revisions here. At this point however I'm giving up.
1047 * Just assume they are all broken, its easier. 1030 * Just assume they are all broken, its easier.
@@ -1052,7 +1035,7 @@ static void idefloppy_setup(ide_drive_t *drive, idefloppy_floppy_t *floppy)
1052 * we'll leave the limitation below for the 2.2.x tree. 1035 * we'll leave the limitation below for the 2.2.x tree.
1053 */ 1036 */
1054 if (!strncmp(drive->id->model, "IOMEGA ZIP 100 ATAPI", 20)) { 1037 if (!strncmp(drive->id->model, "IOMEGA ZIP 100 ATAPI", 20)) {
1055 floppy->flags |= IDEFLOPPY_FLAG_ZIP_DRIVE; 1038 drive->atapi_flags |= IDE_AFLAG_ZIP_DRIVE;
1056 /* This value will be visible in the /proc/ide/hdx/settings */ 1039 /* This value will be visible in the /proc/ide/hdx/settings */
1057 floppy->ticks = IDEFLOPPY_TICKS_DELAY; 1040 floppy->ticks = IDEFLOPPY_TICKS_DELAY;
1058 blk_queue_max_sectors(drive->queue, 64); 1041 blk_queue_max_sectors(drive->queue, 64);
@@ -1064,7 +1047,7 @@ static void idefloppy_setup(ide_drive_t *drive, idefloppy_floppy_t *floppy)
1064 */ 1047 */
1065 if (strncmp(drive->id->model, "IOMEGA Clik!", 11) == 0) { 1048 if (strncmp(drive->id->model, "IOMEGA Clik!", 11) == 0) {
1066 blk_queue_max_sectors(drive->queue, 64); 1049 blk_queue_max_sectors(drive->queue, 64);
1067 floppy->flags |= IDEFLOPPY_FLAG_CLIK_DRIVE; 1050 drive->atapi_flags |= IDE_AFLAG_CLIK_DRIVE;
1068 } 1051 }
1069 1052
1070 (void) ide_floppy_get_capacity(drive); 1053 (void) ide_floppy_get_capacity(drive);
@@ -1153,7 +1136,7 @@ static int idefloppy_open(struct inode *inode, struct file *filp)
1153 floppy->openers++; 1136 floppy->openers++;
1154 1137
1155 if (floppy->openers == 1) { 1138 if (floppy->openers == 1) {
1156 floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS; 1139 drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS;
1157 /* Just in case */ 1140 /* Just in case */
1158 1141
1159 idefloppy_init_pc(&pc); 1142 idefloppy_init_pc(&pc);
@@ -1180,14 +1163,14 @@ static int idefloppy_open(struct inode *inode, struct file *filp)
1180 ret = -EROFS; 1163 ret = -EROFS;
1181 goto out_put_floppy; 1164 goto out_put_floppy;
1182 } 1165 }
1183 floppy->flags |= IDEFLOPPY_FLAG_MEDIA_CHANGED; 1166 drive->atapi_flags |= IDE_AFLAG_MEDIA_CHANGED;
1184 /* IOMEGA Clik! drives do not support lock/unlock commands */ 1167 /* IOMEGA Clik! drives do not support lock/unlock commands */
1185 if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) { 1168 if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) {
1186 idefloppy_create_prevent_cmd(&pc, 1); 1169 idefloppy_create_prevent_cmd(&pc, 1);
1187 (void) idefloppy_queue_pc_tail(drive, &pc); 1170 (void) idefloppy_queue_pc_tail(drive, &pc);
1188 } 1171 }
1189 check_disk_change(inode->i_bdev); 1172 check_disk_change(inode->i_bdev);
1190 } else if (floppy->flags & IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS) { 1173 } else if (drive->atapi_flags & IDE_AFLAG_FORMAT_IN_PROGRESS) {
1191 ret = -EBUSY; 1174 ret = -EBUSY;
1192 goto out_put_floppy; 1175 goto out_put_floppy;
1193 } 1176 }
@@ -1210,12 +1193,12 @@ static int idefloppy_release(struct inode *inode, struct file *filp)
1210 1193
1211 if (floppy->openers == 1) { 1194 if (floppy->openers == 1) {
1212 /* IOMEGA Clik! drives do not support lock/unlock commands */ 1195 /* IOMEGA Clik! drives do not support lock/unlock commands */
1213 if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) { 1196 if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) {
1214 idefloppy_create_prevent_cmd(&pc, 0); 1197 idefloppy_create_prevent_cmd(&pc, 0);
1215 (void) idefloppy_queue_pc_tail(drive, &pc); 1198 (void) idefloppy_queue_pc_tail(drive, &pc);
1216 } 1199 }
1217 1200
1218 floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS; 1201 drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS;
1219 } 1202 }
1220 1203
1221 floppy->openers--; 1204 floppy->openers--;
@@ -1236,15 +1219,17 @@ static int idefloppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1236 return 0; 1219 return 0;
1237} 1220}
1238 1221
1239static int ide_floppy_lockdoor(idefloppy_floppy_t *floppy, 1222static int ide_floppy_lockdoor(ide_drive_t *drive, struct ide_atapi_pc *pc,
1240 struct ide_atapi_pc *pc, unsigned long arg, unsigned int cmd) 1223 unsigned long arg, unsigned int cmd)
1241{ 1224{
1225 idefloppy_floppy_t *floppy = drive->driver_data;
1226
1242 if (floppy->openers > 1) 1227 if (floppy->openers > 1)
1243 return -EBUSY; 1228 return -EBUSY;
1244 1229
1245 /* The IOMEGA Clik! Drive doesn't support this command - 1230 /* The IOMEGA Clik! Drive doesn't support this command -
1246 * no room for an eject mechanism */ 1231 * no room for an eject mechanism */
1247 if (!(floppy->flags & IDEFLOPPY_FLAG_CLIK_DRIVE)) { 1232 if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) {
1248 int prevent = arg ? 1 : 0; 1233 int prevent = arg ? 1 : 0;
1249 1234
1250 if (cmd == CDROMEJECT) 1235 if (cmd == CDROMEJECT)
@@ -1265,16 +1250,17 @@ static int ide_floppy_lockdoor(idefloppy_floppy_t *floppy,
1265static int ide_floppy_format_unit(idefloppy_floppy_t *floppy, 1250static int ide_floppy_format_unit(idefloppy_floppy_t *floppy,
1266 int __user *arg) 1251 int __user *arg)
1267{ 1252{
1268 int blocks, length, flags, err = 0;
1269 struct ide_atapi_pc pc; 1253 struct ide_atapi_pc pc;
1254 ide_drive_t *drive = floppy->drive;
1255 int blocks, length, flags, err = 0;
1270 1256
1271 if (floppy->openers > 1) { 1257 if (floppy->openers > 1) {
1272 /* Don't format if someone is using the disk */ 1258 /* Don't format if someone is using the disk */
1273 floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS; 1259 drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS;
1274 return -EBUSY; 1260 return -EBUSY;
1275 } 1261 }
1276 1262
1277 floppy->flags |= IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS; 1263 drive->atapi_flags |= IDE_AFLAG_FORMAT_IN_PROGRESS;
1278 1264
1279 /* 1265 /*
1280 * Send ATAPI_FORMAT_UNIT to the drive. 1266 * Send ATAPI_FORMAT_UNIT to the drive.
@@ -1298,15 +1284,15 @@ static int ide_floppy_format_unit(idefloppy_floppy_t *floppy,
1298 goto out; 1284 goto out;
1299 } 1285 }
1300 1286
1301 (void) idefloppy_get_sfrp_bit(floppy->drive); 1287 (void) idefloppy_get_sfrp_bit(drive);
1302 idefloppy_create_format_unit_cmd(&pc, blocks, length, flags); 1288 idefloppy_create_format_unit_cmd(&pc, blocks, length, flags);
1303 1289
1304 if (idefloppy_queue_pc_tail(floppy->drive, &pc)) 1290 if (idefloppy_queue_pc_tail(drive, &pc))
1305 err = -EIO; 1291 err = -EIO;
1306 1292
1307out: 1293out:
1308 if (err) 1294 if (err)
1309 floppy->flags &= ~IDEFLOPPY_FLAG_FORMAT_IN_PROGRESS; 1295 drive->atapi_flags &= ~IDE_AFLAG_FORMAT_IN_PROGRESS;
1310 return err; 1296 return err;
1311} 1297}
1312 1298
@@ -1325,7 +1311,7 @@ static int idefloppy_ioctl(struct inode *inode, struct file *file,
1325 case CDROMEJECT: 1311 case CDROMEJECT:
1326 /* fall through */ 1312 /* fall through */
1327 case CDROM_LOCKDOOR: 1313 case CDROM_LOCKDOOR:
1328 return ide_floppy_lockdoor(floppy, &pc, arg, cmd); 1314 return ide_floppy_lockdoor(drive, &pc, arg, cmd);
1329 case IDEFLOPPY_IOCTL_FORMAT_SUPPORTED: 1315 case IDEFLOPPY_IOCTL_FORMAT_SUPPORTED:
1330 return 0; 1316 return 0;
1331 case IDEFLOPPY_IOCTL_FORMAT_GET_CAPACITY: 1317 case IDEFLOPPY_IOCTL_FORMAT_GET_CAPACITY:
@@ -1366,8 +1352,8 @@ static int idefloppy_media_changed(struct gendisk *disk)
1366 drive->attach = 0; 1352 drive->attach = 0;
1367 return 0; 1353 return 0;
1368 } 1354 }
1369 ret = !!(floppy->flags & IDEFLOPPY_FLAG_MEDIA_CHANGED); 1355 ret = !!(drive->atapi_flags & IDE_AFLAG_MEDIA_CHANGED);
1370 floppy->flags &= ~IDEFLOPPY_FLAG_MEDIA_CHANGED; 1356 drive->atapi_flags &= ~IDE_AFLAG_MEDIA_CHANGED;
1371 return ret; 1357 return ret;
1372} 1358}
1373 1359
diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c
index 2d92214096ab..31d98fec775f 100644
--- a/drivers/ide/ide-generic.c
+++ b/drivers/ide/ide-generic.c
@@ -28,29 +28,21 @@ MODULE_PARM_DESC(probe_mask, "probe mask for legacy ISA IDE ports");
28 28
29static ssize_t store_add(struct class *cls, const char *buf, size_t n) 29static ssize_t store_add(struct class *cls, const char *buf, size_t n)
30{ 30{
31 ide_hwif_t *hwif;
32 unsigned int base, ctl; 31 unsigned int base, ctl;
33 int irq; 32 int irq, rc;
34 hw_regs_t hw; 33 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
35 u8 idx[] = { 0xff, 0xff, 0xff, 0xff };
36 34
37 if (sscanf(buf, "%x:%x:%d", &base, &ctl, &irq) != 3) 35 if (sscanf(buf, "%x:%x:%d", &base, &ctl, &irq) != 3)
38 return -EINVAL; 36 return -EINVAL;
39 37
40 hwif = ide_find_port();
41 if (hwif == NULL)
42 return -ENOENT;
43
44 memset(&hw, 0, sizeof(hw)); 38 memset(&hw, 0, sizeof(hw));
45 ide_std_init_ports(&hw, base, ctl); 39 ide_std_init_ports(&hw, base, ctl);
46 hw.irq = irq; 40 hw.irq = irq;
47 hw.chipset = ide_generic; 41 hw.chipset = ide_generic;
48 42
49 ide_init_port_hw(hwif, &hw); 43 rc = ide_host_add(NULL, hws, NULL);
50 44 if (rc)
51 idx[0] = hwif->index; 45 return rc;
52
53 ide_device_add(idx, NULL);
54 46
55 return n; 47 return n;
56}; 48};
@@ -90,18 +82,18 @@ static int __init ide_generic_sysfs_init(void)
90 82
91static int __init ide_generic_init(void) 83static int __init ide_generic_init(void)
92{ 84{
93 u8 idx[MAX_HWIFS]; 85 hw_regs_t hw[MAX_HWIFS], *hws[MAX_HWIFS];
94 int i; 86 struct ide_host *host;
87 unsigned long io_addr;
88 int i, rc;
95 89
96 printk(KERN_INFO DRV_NAME ": please use \"probe_mask=0x3f\" module " 90 printk(KERN_INFO DRV_NAME ": please use \"probe_mask=0x3f\" module "
97 "parameter for probing all legacy ISA IDE ports\n"); 91 "parameter for probing all legacy ISA IDE ports\n");
98 92
99 for (i = 0; i < MAX_HWIFS; i++) { 93 for (i = 0; i < MAX_HWIFS; i++) {
100 ide_hwif_t *hwif; 94 io_addr = ide_default_io_base(i);
101 unsigned long io_addr = ide_default_io_base(i);
102 hw_regs_t hw;
103 95
104 idx[i] = 0xff; 96 hws[i] = NULL;
105 97
106 if ((probe_mask & (1 << i)) && io_addr) { 98 if ((probe_mask & (1 << i)) && io_addr) {
107 if (!request_region(io_addr, 8, DRV_NAME)) { 99 if (!request_region(io_addr, 8, DRV_NAME)) {
@@ -119,33 +111,42 @@ static int __init ide_generic_init(void)
119 continue; 111 continue;
120 } 112 }
121 113
122 /* 114 memset(&hw[i], 0, sizeof(hw[i]));
123 * Skip probing if the corresponding 115 ide_std_init_ports(&hw[i], io_addr, io_addr + 0x206);
124 * slot is already occupied. 116 hw[i].irq = ide_default_irq(io_addr);
125 */ 117 hw[i].chipset = ide_generic;
126 hwif = ide_find_port();
127 if (hwif == NULL || hwif->index != i) {
128 idx[i] = 0xff;
129 continue;
130 }
131
132 memset(&hw, 0, sizeof(hw));
133 ide_std_init_ports(&hw, io_addr, io_addr + 0x206);
134 hw.irq = ide_default_irq(io_addr);
135 hw.chipset = ide_generic;
136 ide_init_port_hw(hwif, &hw);
137 118
138 idx[i] = i; 119 hws[i] = &hw[i];
139 } 120 }
140 } 121 }
141 122
142 ide_device_add_all(idx, NULL); 123 host = ide_host_alloc_all(NULL, hws);
124 if (host == NULL) {
125 rc = -ENOMEM;
126 goto err;
127 }
128
129 rc = ide_host_register(host, NULL, hws);
130 if (rc)
131 goto err_free;
143 132
144 if (ide_generic_sysfs_init()) 133 if (ide_generic_sysfs_init())
145 printk(KERN_ERR DRV_NAME ": failed to create ide_generic " 134 printk(KERN_ERR DRV_NAME ": failed to create ide_generic "
146 "class\n"); 135 "class\n");
147 136
148 return 0; 137 return 0;
138err_free:
139 ide_host_free(host);
140err:
141 for (i = 0; i < MAX_HWIFS; i++) {
142 if (hws[i] == NULL)
143 continue;
144
145 io_addr = hws[i]->io_ports.data_addr;
146 release_region(io_addr + 0x206, 1);
147 release_region(io_addr, 8);
148 }
149 return rc;
149} 150}
150 151
151module_init(ide_generic_init); 152module_init(ide_generic_init);
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 661b75a89d4d..a896a283f27f 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -330,7 +330,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
330 tf->error = err; 330 tf->error = err;
331 tf->status = stat; 331 tf->status = stat;
332 332
333 drive->hwif->tf_read(drive, task); 333 drive->hwif->tp_ops->tf_read(drive, task);
334 334
335 if (task->tf_flags & IDE_TFLAG_DYN) 335 if (task->tf_flags & IDE_TFLAG_DYN)
336 kfree(task); 336 kfree(task);
@@ -381,8 +381,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8
381 if (err == ABRT_ERR) { 381 if (err == ABRT_ERR) {
382 if (drive->select.b.lba && 382 if (drive->select.b.lba &&
383 /* some newer drives don't support WIN_SPECIFY */ 383 /* some newer drives don't support WIN_SPECIFY */
384 hwif->INB(hwif->io_ports.command_addr) == 384 hwif->tp_ops->read_status(hwif) == WIN_SPECIFY)
385 WIN_SPECIFY)
386 return ide_stopped; 385 return ide_stopped;
387 } else if ((err & BAD_CRC) == BAD_CRC) { 386 } else if ((err & BAD_CRC) == BAD_CRC) {
388 /* UDMA crc error, just retry the operation */ 387 /* UDMA crc error, just retry the operation */
@@ -408,7 +407,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8
408 return ide_stopped; 407 return ide_stopped;
409 } 408 }
410 409
411 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) 410 if (hwif->tp_ops->read_status(hwif) & (BUSY_STAT | DRQ_STAT))
412 rq->errors |= ERROR_RESET; 411 rq->errors |= ERROR_RESET;
413 412
414 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 413 if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
@@ -435,10 +434,9 @@ static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u
435 /* add decoding error stuff */ 434 /* add decoding error stuff */
436 } 435 }
437 436
438 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) 437 if (hwif->tp_ops->read_status(hwif) & (BUSY_STAT | DRQ_STAT))
439 /* force an abort */ 438 /* force an abort */
440 hwif->OUTBSYNC(hwif, WIN_IDLEIMMEDIATE, 439 hwif->tp_ops->exec_command(hwif, WIN_IDLEIMMEDIATE);
441 hwif->io_ports.command_addr);
442 440
443 if (rq->errors >= ERROR_MAX) { 441 if (rq->errors >= ERROR_MAX) {
444 ide_kill_rq(drive, rq); 442 ide_kill_rq(drive, rq);
@@ -712,7 +710,8 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
712#ifdef DEBUG 710#ifdef DEBUG
713 printk("%s: DRIVE_CMD (null)\n", drive->name); 711 printk("%s: DRIVE_CMD (null)\n", drive->name);
714#endif 712#endif
715 ide_end_drive_cmd(drive, ide_read_status(drive), ide_read_error(drive)); 713 ide_end_drive_cmd(drive, hwif->tp_ops->read_status(hwif),
714 ide_read_error(drive));
716 715
717 return ide_stopped; 716 return ide_stopped;
718} 717}
@@ -747,16 +746,17 @@ static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
747 * the bus may be broken enough to walk on our toes at this 746 * the bus may be broken enough to walk on our toes at this
748 * point. 747 * point.
749 */ 748 */
749 ide_hwif_t *hwif = drive->hwif;
750 int rc; 750 int rc;
751#ifdef DEBUG_PM 751#ifdef DEBUG_PM
752 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name); 752 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
753#endif 753#endif
754 rc = ide_wait_not_busy(HWIF(drive), 35000); 754 rc = ide_wait_not_busy(hwif, 35000);
755 if (rc) 755 if (rc)
756 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); 756 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
757 SELECT_DRIVE(drive); 757 SELECT_DRIVE(drive);
758 ide_set_irq(drive, 1); 758 hwif->tp_ops->set_irq(hwif, 1);
759 rc = ide_wait_not_busy(HWIF(drive), 100000); 759 rc = ide_wait_not_busy(hwif, 100000);
760 if (rc) 760 if (rc)
761 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); 761 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
762 } 762 }
@@ -1042,7 +1042,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
1042 * quirk_list may not like intr setups/cleanups 1042 * quirk_list may not like intr setups/cleanups
1043 */ 1043 */
1044 if (drive->quirk_list != 1) 1044 if (drive->quirk_list != 1)
1045 ide_set_irq(drive, 0); 1045 hwif->tp_ops->set_irq(hwif, 0);
1046 } 1046 }
1047 hwgroup->hwif = hwif; 1047 hwgroup->hwif = hwif;
1048 hwgroup->drive = drive; 1048 hwgroup->drive = drive;
@@ -1142,7 +1142,7 @@ static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
1142 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); 1142 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
1143 (void)hwif->dma_ops->dma_end(drive); 1143 (void)hwif->dma_ops->dma_end(drive);
1144 ret = ide_error(drive, "dma timeout error", 1144 ret = ide_error(drive, "dma timeout error",
1145 ide_read_status(drive)); 1145 hwif->tp_ops->read_status(hwif));
1146 } else { 1146 } else {
1147 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); 1147 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
1148 hwif->dma_ops->dma_timeout(drive); 1148 hwif->dma_ops->dma_timeout(drive);
@@ -1267,7 +1267,7 @@ void ide_timer_expiry (unsigned long data)
1267 } else 1267 } else
1268 startstop = 1268 startstop =
1269 ide_error(drive, "irq timeout", 1269 ide_error(drive, "irq timeout",
1270 ide_read_status(drive)); 1270 hwif->tp_ops->read_status(hwif));
1271 } 1271 }
1272 drive->service_time = jiffies - drive->service_start; 1272 drive->service_time = jiffies - drive->service_start;
1273 spin_lock_irq(&ide_lock); 1273 spin_lock_irq(&ide_lock);
@@ -1323,7 +1323,8 @@ static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup)
1323 */ 1323 */
1324 do { 1324 do {
1325 if (hwif->irq == irq) { 1325 if (hwif->irq == irq) {
1326 stat = hwif->INB(hwif->io_ports.status_addr); 1326 stat = hwif->tp_ops->read_status(hwif);
1327
1327 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) { 1328 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) {
1328 /* Try to not flood the console with msgs */ 1329 /* Try to not flood the console with msgs */
1329 static unsigned long last_msgtime, count; 1330 static unsigned long last_msgtime, count;
@@ -1413,7 +1414,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
1413 * Whack the status register, just in case 1414 * Whack the status register, just in case
1414 * we have a leftover pending IRQ. 1415 * we have a leftover pending IRQ.
1415 */ 1416 */
1416 (void) hwif->INB(hwif->io_ports.status_addr); 1417 (void)hwif->tp_ops->read_status(hwif);
1417#endif /* CONFIG_BLK_DEV_IDEPCI */ 1418#endif /* CONFIG_BLK_DEV_IDEPCI */
1418 } 1419 }
1419 spin_unlock_irqrestore(&ide_lock, flags); 1420 spin_unlock_irqrestore(&ide_lock, flags);
@@ -1519,6 +1520,7 @@ EXPORT_SYMBOL(ide_do_drive_cmd);
1519 1520
1520void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma) 1521void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
1521{ 1522{
1523 ide_hwif_t *hwif = drive->hwif;
1522 ide_task_t task; 1524 ide_task_t task;
1523 1525
1524 memset(&task, 0, sizeof(task)); 1526 memset(&task, 0, sizeof(task));
@@ -1529,9 +1531,9 @@ void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
1529 task.tf.lbah = (bcount >> 8) & 0xff; 1531 task.tf.lbah = (bcount >> 8) & 0xff;
1530 1532
1531 ide_tf_dump(drive->name, &task.tf); 1533 ide_tf_dump(drive->name, &task.tf);
1532 ide_set_irq(drive, 1); 1534 hwif->tp_ops->set_irq(hwif, 1);
1533 SELECT_MASK(drive, 0); 1535 SELECT_MASK(drive, 0);
1534 drive->hwif->tf_load(drive, &task); 1536 hwif->tp_ops->tf_load(drive, &task);
1535} 1537}
1536 1538
1537EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load); 1539EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load);
@@ -1543,9 +1545,9 @@ void ide_pad_transfer(ide_drive_t *drive, int write, int len)
1543 1545
1544 while (len > 0) { 1546 while (len > 0) {
1545 if (write) 1547 if (write)
1546 hwif->output_data(drive, NULL, buf, min(4, len)); 1548 hwif->tp_ops->output_data(drive, NULL, buf, min(4, len));
1547 else 1549 else
1548 hwif->input_data(drive, NULL, buf, min(4, len)); 1550 hwif->tp_ops->input_data(drive, NULL, buf, min(4, len));
1549 len -= 4; 1551 len -= 4;
1550 } 1552 }
1551} 1553}
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 44aaec256a30..07da5fb9eaff 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -42,18 +42,6 @@ static void ide_outb (u8 val, unsigned long port)
42 outb(val, port); 42 outb(val, port);
43} 43}
44 44
45static void ide_outbsync(ide_hwif_t *hwif, u8 addr, unsigned long port)
46{
47 outb(addr, port);
48}
49
50void default_hwif_iops (ide_hwif_t *hwif)
51{
52 hwif->OUTB = ide_outb;
53 hwif->OUTBSYNC = ide_outbsync;
54 hwif->INB = ide_inb;
55}
56
57/* 45/*
58 * MMIO operations, typically used for SATA controllers 46 * MMIO operations, typically used for SATA controllers
59 */ 47 */
@@ -68,31 +56,19 @@ static void ide_mm_outb (u8 value, unsigned long port)
68 writeb(value, (void __iomem *) port); 56 writeb(value, (void __iomem *) port);
69} 57}
70 58
71static void ide_mm_outbsync(ide_hwif_t *hwif, u8 value, unsigned long port)
72{
73 writeb(value, (void __iomem *) port);
74}
75
76void default_hwif_mmiops (ide_hwif_t *hwif)
77{
78 hwif->OUTB = ide_mm_outb;
79 /* Most systems will need to override OUTBSYNC, alas however
80 this one is controller specific! */
81 hwif->OUTBSYNC = ide_mm_outbsync;
82 hwif->INB = ide_mm_inb;
83}
84
85EXPORT_SYMBOL(default_hwif_mmiops);
86
87void SELECT_DRIVE (ide_drive_t *drive) 59void SELECT_DRIVE (ide_drive_t *drive)
88{ 60{
89 ide_hwif_t *hwif = drive->hwif; 61 ide_hwif_t *hwif = drive->hwif;
90 const struct ide_port_ops *port_ops = hwif->port_ops; 62 const struct ide_port_ops *port_ops = hwif->port_ops;
63 ide_task_t task;
91 64
92 if (port_ops && port_ops->selectproc) 65 if (port_ops && port_ops->selectproc)
93 port_ops->selectproc(drive); 66 port_ops->selectproc(drive);
94 67
95 hwif->OUTB(drive->select.all, hwif->io_ports.device_addr); 68 memset(&task, 0, sizeof(task));
69 task.tf_flags = IDE_TFLAG_OUT_DEVICE;
70
71 drive->hwif->tp_ops->tf_load(drive, &task);
96} 72}
97 73
98void SELECT_MASK(ide_drive_t *drive, int mask) 74void SELECT_MASK(ide_drive_t *drive, int mask)
@@ -103,7 +79,61 @@ void SELECT_MASK(ide_drive_t *drive, int mask)
103 port_ops->maskproc(drive, mask); 79 port_ops->maskproc(drive, mask);
104} 80}
105 81
106static void ide_tf_load(ide_drive_t *drive, ide_task_t *task) 82void ide_exec_command(ide_hwif_t *hwif, u8 cmd)
83{
84 if (hwif->host_flags & IDE_HFLAG_MMIO)
85 writeb(cmd, (void __iomem *)hwif->io_ports.command_addr);
86 else
87 outb(cmd, hwif->io_ports.command_addr);
88}
89EXPORT_SYMBOL_GPL(ide_exec_command);
90
91u8 ide_read_status(ide_hwif_t *hwif)
92{
93 if (hwif->host_flags & IDE_HFLAG_MMIO)
94 return readb((void __iomem *)hwif->io_ports.status_addr);
95 else
96 return inb(hwif->io_ports.status_addr);
97}
98EXPORT_SYMBOL_GPL(ide_read_status);
99
100u8 ide_read_altstatus(ide_hwif_t *hwif)
101{
102 if (hwif->host_flags & IDE_HFLAG_MMIO)
103 return readb((void __iomem *)hwif->io_ports.ctl_addr);
104 else
105 return inb(hwif->io_ports.ctl_addr);
106}
107EXPORT_SYMBOL_GPL(ide_read_altstatus);
108
109u8 ide_read_sff_dma_status(ide_hwif_t *hwif)
110{
111 if (hwif->host_flags & IDE_HFLAG_MMIO)
112 return readb((void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
113 else
114 return inb(hwif->dma_base + ATA_DMA_STATUS);
115}
116EXPORT_SYMBOL_GPL(ide_read_sff_dma_status);
117
118void ide_set_irq(ide_hwif_t *hwif, int on)
119{
120 u8 ctl = ATA_DEVCTL_OBS;
121
122 if (on == 4) { /* hack for SRST */
123 ctl |= 4;
124 on &= ~4;
125 }
126
127 ctl |= on ? 0 : 2;
128
129 if (hwif->host_flags & IDE_HFLAG_MMIO)
130 writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr);
131 else
132 outb(ctl, hwif->io_ports.ctl_addr);
133}
134EXPORT_SYMBOL_GPL(ide_set_irq);
135
136void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
107{ 137{
108 ide_hwif_t *hwif = drive->hwif; 138 ide_hwif_t *hwif = drive->hwif;
109 struct ide_io_ports *io_ports = &hwif->io_ports; 139 struct ide_io_ports *io_ports = &hwif->io_ports;
@@ -155,8 +185,9 @@ static void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
155 tf_outb((tf->device & HIHI) | drive->select.all, 185 tf_outb((tf->device & HIHI) | drive->select.all,
156 io_ports->device_addr); 186 io_ports->device_addr);
157} 187}
188EXPORT_SYMBOL_GPL(ide_tf_load);
158 189
159static void ide_tf_read(ide_drive_t *drive, ide_task_t *task) 190void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
160{ 191{
161 ide_hwif_t *hwif = drive->hwif; 192 ide_hwif_t *hwif = drive->hwif;
162 struct ide_io_ports *io_ports = &hwif->io_ports; 193 struct ide_io_ports *io_ports = &hwif->io_ports;
@@ -188,6 +219,8 @@ static void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
188 /* be sure we're looking at the low order bits */ 219 /* be sure we're looking at the low order bits */
189 tf_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr); 220 tf_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
190 221
222 if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
223 tf->feature = tf_inb(io_ports->feature_addr);
191 if (task->tf_flags & IDE_TFLAG_IN_NSECT) 224 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
192 tf->nsect = tf_inb(io_ports->nsect_addr); 225 tf->nsect = tf_inb(io_ports->nsect_addr);
193 if (task->tf_flags & IDE_TFLAG_IN_LBAL) 226 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
@@ -214,6 +247,7 @@ static void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
214 tf->hob_lbah = tf_inb(io_ports->lbah_addr); 247 tf->hob_lbah = tf_inb(io_ports->lbah_addr);
215 } 248 }
216} 249}
250EXPORT_SYMBOL_GPL(ide_tf_read);
217 251
218/* 252/*
219 * Some localbus EIDE interfaces require a special access sequence 253 * Some localbus EIDE interfaces require a special access sequence
@@ -236,8 +270,8 @@ static void ata_vlb_sync(unsigned long port)
236 * so if an odd len is specified, be sure that there's at least one 270 * so if an odd len is specified, be sure that there's at least one
237 * extra byte allocated for the buffer. 271 * extra byte allocated for the buffer.
238 */ 272 */
239static void ata_input_data(ide_drive_t *drive, struct request *rq, 273void ide_input_data(ide_drive_t *drive, struct request *rq, void *buf,
240 void *buf, unsigned int len) 274 unsigned int len)
241{ 275{
242 ide_hwif_t *hwif = drive->hwif; 276 ide_hwif_t *hwif = drive->hwif;
243 struct ide_io_ports *io_ports = &hwif->io_ports; 277 struct ide_io_ports *io_ports = &hwif->io_ports;
@@ -277,12 +311,13 @@ static void ata_input_data(ide_drive_t *drive, struct request *rq,
277 insw(data_addr, buf, len / 2); 311 insw(data_addr, buf, len / 2);
278 } 312 }
279} 313}
314EXPORT_SYMBOL_GPL(ide_input_data);
280 315
281/* 316/*
282 * This is used for most PIO data transfers *to* the IDE interface 317 * This is used for most PIO data transfers *to* the IDE interface
283 */ 318 */
284static void ata_output_data(ide_drive_t *drive, struct request *rq, 319void ide_output_data(ide_drive_t *drive, struct request *rq, void *buf,
285 void *buf, unsigned int len) 320 unsigned int len)
286{ 321{
287 ide_hwif_t *hwif = drive->hwif; 322 ide_hwif_t *hwif = drive->hwif;
288 struct ide_io_ports *io_ports = &hwif->io_ports; 323 struct ide_io_ports *io_ports = &hwif->io_ports;
@@ -320,15 +355,50 @@ static void ata_output_data(ide_drive_t *drive, struct request *rq,
320 outsw(data_addr, buf, len / 2); 355 outsw(data_addr, buf, len / 2);
321 } 356 }
322} 357}
358EXPORT_SYMBOL_GPL(ide_output_data);
359
360u8 ide_read_error(ide_drive_t *drive)
361{
362 ide_task_t task;
363
364 memset(&task, 0, sizeof(task));
365 task.tf_flags = IDE_TFLAG_IN_FEATURE;
366
367 drive->hwif->tp_ops->tf_read(drive, &task);
368
369 return task.tf.error;
370}
371EXPORT_SYMBOL_GPL(ide_read_error);
323 372
324void default_hwif_transport(ide_hwif_t *hwif) 373void ide_read_bcount_and_ireason(ide_drive_t *drive, u16 *bcount, u8 *ireason)
325{ 374{
326 hwif->tf_load = ide_tf_load; 375 ide_task_t task;
327 hwif->tf_read = ide_tf_read; 376
377 memset(&task, 0, sizeof(task));
378 task.tf_flags = IDE_TFLAG_IN_LBAH | IDE_TFLAG_IN_LBAM |
379 IDE_TFLAG_IN_NSECT;
328 380
329 hwif->input_data = ata_input_data; 381 drive->hwif->tp_ops->tf_read(drive, &task);
330 hwif->output_data = ata_output_data; 382
383 *bcount = (task.tf.lbah << 8) | task.tf.lbam;
384 *ireason = task.tf.nsect & 3;
331} 385}
386EXPORT_SYMBOL_GPL(ide_read_bcount_and_ireason);
387
388const struct ide_tp_ops default_tp_ops = {
389 .exec_command = ide_exec_command,
390 .read_status = ide_read_status,
391 .read_altstatus = ide_read_altstatus,
392 .read_sff_dma_status = ide_read_sff_dma_status,
393
394 .set_irq = ide_set_irq,
395
396 .tf_load = ide_tf_load,
397 .tf_read = ide_tf_read,
398
399 .input_data = ide_input_data,
400 .output_data = ide_output_data,
401};
332 402
333void ide_fix_driveid (struct hd_driveid *id) 403void ide_fix_driveid (struct hd_driveid *id)
334{ 404{
@@ -483,10 +553,10 @@ int drive_is_ready (ide_drive_t *drive)
483 * about possible isa-pnp and pci-pnp issues yet. 553 * about possible isa-pnp and pci-pnp issues yet.
484 */ 554 */
485 if (hwif->io_ports.ctl_addr) 555 if (hwif->io_ports.ctl_addr)
486 stat = ide_read_altstatus(drive); 556 stat = hwif->tp_ops->read_altstatus(hwif);
487 else 557 else
488 /* Note: this may clear a pending IRQ!! */ 558 /* Note: this may clear a pending IRQ!! */
489 stat = ide_read_status(drive); 559 stat = hwif->tp_ops->read_status(hwif);
490 560
491 if (stat & BUSY_STAT) 561 if (stat & BUSY_STAT)
492 /* drive busy: definitely not interrupting */ 562 /* drive busy: definitely not interrupting */
@@ -511,24 +581,26 @@ EXPORT_SYMBOL(drive_is_ready);
511 */ 581 */
512static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout, u8 *rstat) 582static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout, u8 *rstat)
513{ 583{
584 ide_hwif_t *hwif = drive->hwif;
585 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
514 unsigned long flags; 586 unsigned long flags;
515 int i; 587 int i;
516 u8 stat; 588 u8 stat;
517 589
518 udelay(1); /* spec allows drive 400ns to assert "BUSY" */ 590 udelay(1); /* spec allows drive 400ns to assert "BUSY" */
519 stat = ide_read_status(drive); 591 stat = tp_ops->read_status(hwif);
520 592
521 if (stat & BUSY_STAT) { 593 if (stat & BUSY_STAT) {
522 local_irq_set(flags); 594 local_irq_set(flags);
523 timeout += jiffies; 595 timeout += jiffies;
524 while ((stat = ide_read_status(drive)) & BUSY_STAT) { 596 while ((stat = tp_ops->read_status(hwif)) & BUSY_STAT) {
525 if (time_after(jiffies, timeout)) { 597 if (time_after(jiffies, timeout)) {
526 /* 598 /*
527 * One last read after the timeout in case 599 * One last read after the timeout in case
528 * heavy interrupt load made us not make any 600 * heavy interrupt load made us not make any
529 * progress during the timeout.. 601 * progress during the timeout..
530 */ 602 */
531 stat = ide_read_status(drive); 603 stat = tp_ops->read_status(hwif);
532 if (!(stat & BUSY_STAT)) 604 if (!(stat & BUSY_STAT))
533 break; 605 break;
534 606
@@ -548,7 +620,7 @@ static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long ti
548 */ 620 */
549 for (i = 0; i < 10; i++) { 621 for (i = 0; i < 10; i++) {
550 udelay(1); 622 udelay(1);
551 stat = ide_read_status(drive); 623 stat = tp_ops->read_status(hwif);
552 624
553 if (OK_STAT(stat, good, bad)) { 625 if (OK_STAT(stat, good, bad)) {
554 *rstat = stat; 626 *rstat = stat;
@@ -674,6 +746,7 @@ no_80w:
674int ide_driveid_update(ide_drive_t *drive) 746int ide_driveid_update(ide_drive_t *drive)
675{ 747{
676 ide_hwif_t *hwif = drive->hwif; 748 ide_hwif_t *hwif = drive->hwif;
749 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
677 struct hd_driveid *id; 750 struct hd_driveid *id;
678 unsigned long timeout, flags; 751 unsigned long timeout, flags;
679 u8 stat; 752 u8 stat;
@@ -684,9 +757,9 @@ int ide_driveid_update(ide_drive_t *drive)
684 */ 757 */
685 758
686 SELECT_MASK(drive, 1); 759 SELECT_MASK(drive, 1);
687 ide_set_irq(drive, 0); 760 tp_ops->set_irq(hwif, 0);
688 msleep(50); 761 msleep(50);
689 hwif->OUTBSYNC(hwif, WIN_IDENTIFY, hwif->io_ports.command_addr); 762 tp_ops->exec_command(hwif, WIN_IDENTIFY);
690 timeout = jiffies + WAIT_WORSTCASE; 763 timeout = jiffies + WAIT_WORSTCASE;
691 do { 764 do {
692 if (time_after(jiffies, timeout)) { 765 if (time_after(jiffies, timeout)) {
@@ -695,11 +768,11 @@ int ide_driveid_update(ide_drive_t *drive)
695 } 768 }
696 769
697 msleep(50); /* give drive a breather */ 770 msleep(50); /* give drive a breather */
698 stat = ide_read_altstatus(drive); 771 stat = tp_ops->read_altstatus(hwif);
699 } while (stat & BUSY_STAT); 772 } while (stat & BUSY_STAT);
700 773
701 msleep(50); /* wait for IRQ and DRQ_STAT */ 774 msleep(50); /* wait for IRQ and DRQ_STAT */
702 stat = ide_read_status(drive); 775 stat = tp_ops->read_status(hwif);
703 776
704 if (!OK_STAT(stat, DRQ_STAT, BAD_R_STAT)) { 777 if (!OK_STAT(stat, DRQ_STAT, BAD_R_STAT)) {
705 SELECT_MASK(drive, 0); 778 SELECT_MASK(drive, 0);
@@ -713,8 +786,8 @@ int ide_driveid_update(ide_drive_t *drive)
713 local_irq_restore(flags); 786 local_irq_restore(flags);
714 return 0; 787 return 0;
715 } 788 }
716 hwif->input_data(drive, NULL, id, SECTOR_SIZE); 789 tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
717 (void)ide_read_status(drive); /* clear drive IRQ */ 790 (void)tp_ops->read_status(hwif); /* clear drive IRQ */
718 local_irq_enable(); 791 local_irq_enable();
719 local_irq_restore(flags); 792 local_irq_restore(flags);
720 ide_fix_driveid(id); 793 ide_fix_driveid(id);
@@ -735,9 +808,10 @@ int ide_driveid_update(ide_drive_t *drive)
735int ide_config_drive_speed(ide_drive_t *drive, u8 speed) 808int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
736{ 809{
737 ide_hwif_t *hwif = drive->hwif; 810 ide_hwif_t *hwif = drive->hwif;
738 struct ide_io_ports *io_ports = &hwif->io_ports; 811 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
739 int error = 0; 812 int error = 0;
740 u8 stat; 813 u8 stat;
814 ide_task_t task;
741 815
742#ifdef CONFIG_BLK_DEV_IDEDMA 816#ifdef CONFIG_BLK_DEV_IDEDMA
743 if (hwif->dma_ops) /* check if host supports DMA */ 817 if (hwif->dma_ops) /* check if host supports DMA */
@@ -770,12 +844,19 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
770 SELECT_DRIVE(drive); 844 SELECT_DRIVE(drive);
771 SELECT_MASK(drive, 0); 845 SELECT_MASK(drive, 0);
772 udelay(1); 846 udelay(1);
773 ide_set_irq(drive, 0); 847 tp_ops->set_irq(hwif, 0);
774 hwif->OUTB(speed, io_ports->nsect_addr); 848
775 hwif->OUTB(SETFEATURES_XFER, io_ports->feature_addr); 849 memset(&task, 0, sizeof(task));
776 hwif->OUTBSYNC(hwif, WIN_SETFEATURES, io_ports->command_addr); 850 task.tf_flags = IDE_TFLAG_OUT_FEATURE | IDE_TFLAG_OUT_NSECT;
851 task.tf.feature = SETFEATURES_XFER;
852 task.tf.nsect = speed;
853
854 tp_ops->tf_load(drive, &task);
855
856 tp_ops->exec_command(hwif, WIN_SETFEATURES);
857
777 if (drive->quirk_list == 2) 858 if (drive->quirk_list == 2)
778 ide_set_irq(drive, 1); 859 tp_ops->set_irq(hwif, 1);
779 860
780 error = __ide_wait_stat(drive, drive->ready_stat, 861 error = __ide_wait_stat(drive, drive->ready_stat,
781 BUSY_STAT|DRQ_STAT|ERR_STAT, 862 BUSY_STAT|DRQ_STAT|ERR_STAT,
@@ -796,8 +877,7 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
796 877
797 skip: 878 skip:
798#ifdef CONFIG_BLK_DEV_IDEDMA 879#ifdef CONFIG_BLK_DEV_IDEDMA
799 if ((speed >= XFER_SW_DMA_0 || (hwif->host_flags & IDE_HFLAG_VDMA)) && 880 if (speed >= XFER_SW_DMA_0 && drive->using_dma)
800 drive->using_dma)
801 hwif->dma_ops->dma_host_set(drive, 1); 881 hwif->dma_ops->dma_host_set(drive, 1);
802 else if (hwif->dma_ops) /* check if host supports DMA */ 882 else if (hwif->dma_ops) /* check if host supports DMA */
803 ide_dma_off_quietly(drive); 883 ide_dma_off_quietly(drive);
@@ -881,7 +961,7 @@ void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
881 961
882 spin_lock_irqsave(&ide_lock, flags); 962 spin_lock_irqsave(&ide_lock, flags);
883 __ide_set_handler(drive, handler, timeout, expiry); 963 __ide_set_handler(drive, handler, timeout, expiry);
884 hwif->OUTBSYNC(hwif, cmd, hwif->io_ports.command_addr); 964 hwif->tp_ops->exec_command(hwif, cmd);
885 /* 965 /*
886 * Drive takes 400nS to respond, we must avoid the IRQ being 966 * Drive takes 400nS to respond, we must avoid the IRQ being
887 * serviced before that. 967 * serviced before that.
@@ -899,7 +979,7 @@ void ide_execute_pkt_cmd(ide_drive_t *drive)
899 unsigned long flags; 979 unsigned long flags;
900 980
901 spin_lock_irqsave(&ide_lock, flags); 981 spin_lock_irqsave(&ide_lock, flags);
902 hwif->OUTBSYNC(hwif, WIN_PACKETCMD, hwif->io_ports.command_addr); 982 hwif->tp_ops->exec_command(hwif, WIN_PACKETCMD);
903 ndelay(400); 983 ndelay(400);
904 spin_unlock_irqrestore(&ide_lock, flags); 984 spin_unlock_irqrestore(&ide_lock, flags);
905} 985}
@@ -924,12 +1004,13 @@ static ide_startstop_t do_reset1 (ide_drive_t *, int);
924 */ 1004 */
925static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive) 1005static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive)
926{ 1006{
927 ide_hwgroup_t *hwgroup = HWGROUP(drive); 1007 ide_hwif_t *hwif = drive->hwif;
1008 ide_hwgroup_t *hwgroup = hwif->hwgroup;
928 u8 stat; 1009 u8 stat;
929 1010
930 SELECT_DRIVE(drive); 1011 SELECT_DRIVE(drive);
931 udelay (10); 1012 udelay (10);
932 stat = ide_read_status(drive); 1013 stat = hwif->tp_ops->read_status(hwif);
933 1014
934 if (OK_STAT(stat, 0, BUSY_STAT)) 1015 if (OK_STAT(stat, 0, BUSY_STAT))
935 printk("%s: ATAPI reset complete\n", drive->name); 1016 printk("%s: ATAPI reset complete\n", drive->name);
@@ -975,7 +1056,7 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
975 } 1056 }
976 } 1057 }
977 1058
978 tmp = ide_read_status(drive); 1059 tmp = hwif->tp_ops->read_status(hwif);
979 1060
980 if (!OK_STAT(tmp, 0, BUSY_STAT)) { 1061 if (!OK_STAT(tmp, 0, BUSY_STAT)) {
981 if (time_before(jiffies, hwgroup->poll_timeout)) { 1062 if (time_before(jiffies, hwgroup->poll_timeout)) {
@@ -1089,8 +1170,8 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1089 ide_hwif_t *hwif; 1170 ide_hwif_t *hwif;
1090 ide_hwgroup_t *hwgroup; 1171 ide_hwgroup_t *hwgroup;
1091 struct ide_io_ports *io_ports; 1172 struct ide_io_ports *io_ports;
1173 const struct ide_tp_ops *tp_ops;
1092 const struct ide_port_ops *port_ops; 1174 const struct ide_port_ops *port_ops;
1093 u8 ctl;
1094 1175
1095 spin_lock_irqsave(&ide_lock, flags); 1176 spin_lock_irqsave(&ide_lock, flags);
1096 hwif = HWIF(drive); 1177 hwif = HWIF(drive);
@@ -1098,6 +1179,8 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1098 1179
1099 io_ports = &hwif->io_ports; 1180 io_ports = &hwif->io_ports;
1100 1181
1182 tp_ops = hwif->tp_ops;
1183
1101 /* We must not reset with running handlers */ 1184 /* We must not reset with running handlers */
1102 BUG_ON(hwgroup->handler != NULL); 1185 BUG_ON(hwgroup->handler != NULL);
1103 1186
@@ -1106,7 +1189,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1106 pre_reset(drive); 1189 pre_reset(drive);
1107 SELECT_DRIVE(drive); 1190 SELECT_DRIVE(drive);
1108 udelay (20); 1191 udelay (20);
1109 hwif->OUTBSYNC(hwif, WIN_SRST, io_ports->command_addr); 1192 tp_ops->exec_command(hwif, WIN_SRST);
1110 ndelay(400); 1193 ndelay(400);
1111 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE; 1194 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
1112 hwgroup->polling = 1; 1195 hwgroup->polling = 1;
@@ -1135,16 +1218,15 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1135 * immediate interrupt due to the edge transition it produces. 1218 * immediate interrupt due to the edge transition it produces.
1136 * This single interrupt gives us a "fast poll" for drives that 1219 * This single interrupt gives us a "fast poll" for drives that
1137 * recover from reset very quickly, saving us the first 50ms wait time. 1220 * recover from reset very quickly, saving us the first 50ms wait time.
1221 *
1222 * TODO: add ->softreset method and stop abusing ->set_irq
1138 */ 1223 */
1139 /* set SRST and nIEN */ 1224 /* set SRST and nIEN */
1140 hwif->OUTBSYNC(hwif, ATA_DEVCTL_OBS | 6, io_ports->ctl_addr); 1225 tp_ops->set_irq(hwif, 4);
1141 /* more than enough time */ 1226 /* more than enough time */
1142 udelay(10); 1227 udelay(10);
1143 if (drive->quirk_list == 2) 1228 /* clear SRST, leave nIEN (unless device is on the quirk list) */
1144 ctl = ATA_DEVCTL_OBS; /* clear SRST and nIEN */ 1229 tp_ops->set_irq(hwif, drive->quirk_list == 2);
1145 else
1146 ctl = ATA_DEVCTL_OBS | 2; /* clear SRST, leave nIEN */
1147 hwif->OUTBSYNC(hwif, ctl, io_ports->ctl_addr);
1148 /* more than enough time */ 1230 /* more than enough time */
1149 udelay(10); 1231 udelay(10);
1150 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE; 1232 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
@@ -1189,7 +1271,7 @@ int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
1189 * about locking issues (2.5 work ?). 1271 * about locking issues (2.5 work ?).
1190 */ 1272 */
1191 mdelay(1); 1273 mdelay(1);
1192 stat = hwif->INB(hwif->io_ports.status_addr); 1274 stat = hwif->tp_ops->read_status(hwif);
1193 if ((stat & BUSY_STAT) == 0) 1275 if ((stat & BUSY_STAT) == 0)
1194 return 0; 1276 return 0;
1195 /* 1277 /*
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 13af72f09ec4..97fefabea8b8 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -266,22 +266,11 @@ int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
266 266
267 rate = ide_rate_filter(drive, rate); 267 rate = ide_rate_filter(drive, rate);
268 268
269 BUG_ON(rate < XFER_PIO_0);
270
269 if (rate >= XFER_PIO_0 && rate <= XFER_PIO_5) 271 if (rate >= XFER_PIO_0 && rate <= XFER_PIO_5)
270 return ide_set_pio_mode(drive, rate); 272 return ide_set_pio_mode(drive, rate);
271 273
272 /*
273 * TODO: transfer modes 0x00-0x07 passed from the user-space are
274 * currently handled here which needs fixing (please note that such
275 * case could happen iff the transfer mode has already been set on
276 * the device by ide-proc.c::set_xfer_rate()).
277 */
278 if (rate < XFER_PIO_0) {
279 if (hwif->host_flags & IDE_HFLAG_ABUSE_SET_DMA_MODE)
280 return ide_set_dma_mode(drive, rate);
281 else
282 return ide_config_drive_speed(drive, rate);
283 }
284
285 return ide_set_dma_mode(drive, rate); 274 return ide_set_dma_mode(drive, rate);
286} 275}
287 276
@@ -336,7 +325,7 @@ static void ide_dump_sector(ide_drive_t *drive)
336 else 325 else
337 task.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_DEVICE; 326 task.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_DEVICE;
338 327
339 drive->hwif->tf_read(drive, &task); 328 drive->hwif->tp_ops->tf_read(drive, &task);
340 329
341 if (lba48 || (tf->device & ATA_LBA)) 330 if (lba48 || (tf->device & ATA_LBA))
342 printk(", LBAsect=%llu", 331 printk(", LBAsect=%llu",
diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c
index 03f2ef5470a3..bac9b392b689 100644
--- a/drivers/ide/ide-pnp.c
+++ b/drivers/ide/ide-pnp.c
@@ -29,9 +29,10 @@ static struct pnp_device_id idepnp_devices[] = {
29 29
30static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) 30static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
31{ 31{
32 hw_regs_t hw; 32 struct ide_host *host;
33 ide_hwif_t *hwif;
34 unsigned long base, ctl; 33 unsigned long base, ctl;
34 int rc;
35 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
35 36
36 printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n"); 37 printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n");
37 38
@@ -59,31 +60,25 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
59 hw.irq = pnp_irq(dev, 0); 60 hw.irq = pnp_irq(dev, 0);
60 hw.chipset = ide_generic; 61 hw.chipset = ide_generic;
61 62
62 hwif = ide_find_port(); 63 rc = ide_host_add(NULL, hws, &host);
63 if (hwif) { 64 if (rc)
64 u8 index = hwif->index; 65 goto out;
65 u8 idx[4] = { index, 0xff, 0xff, 0xff };
66 66
67 ide_init_port_hw(hwif, &hw); 67 pnp_set_drvdata(dev, host);
68
69 pnp_set_drvdata(dev, hwif);
70
71 ide_device_add(idx, NULL);
72
73 return 0;
74 }
75 68
69 return 0;
70out:
76 release_region(ctl, 1); 71 release_region(ctl, 1);
77 release_region(base, 8); 72 release_region(base, 8);
78 73
79 return -1; 74 return rc;
80} 75}
81 76
82static void idepnp_remove(struct pnp_dev *dev) 77static void idepnp_remove(struct pnp_dev *dev)
83{ 78{
84 ide_hwif_t *hwif = pnp_get_drvdata(dev); 79 struct ide_host *host = pnp_get_drvdata(dev);
85 80
86 ide_unregister(hwif); 81 ide_host_remove(host);
87 82
88 release_region(pnp_port_start(dev, 1), 1); 83 release_region(pnp_port_start(dev, 1), 1);
89 release_region(pnp_port_start(dev, 0), 8); 84 release_region(pnp_port_start(dev, 0), 8);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 235ebdb29b28..4aa76c453755 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -39,8 +39,6 @@
39#include <asm/uaccess.h> 39#include <asm/uaccess.h>
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42static ide_hwif_t ide_hwifs[MAX_HWIFS]; /* master data repository */
43
44/** 42/**
45 * generic_id - add a generic drive id 43 * generic_id - add a generic drive id
46 * @drive: drive to make an ID block for 44 * @drive: drive to make an ID block for
@@ -126,7 +124,7 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd)
126 124
127 id = drive->id; 125 id = drive->id;
128 /* read 512 bytes of id info */ 126 /* read 512 bytes of id info */
129 hwif->input_data(drive, NULL, id, SECTOR_SIZE); 127 hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
130 128
131 drive->id_read = 1; 129 drive->id_read = 1;
132 local_irq_enable(); 130 local_irq_enable();
@@ -267,6 +265,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
267{ 265{
268 ide_hwif_t *hwif = HWIF(drive); 266 ide_hwif_t *hwif = HWIF(drive);
269 struct ide_io_ports *io_ports = &hwif->io_ports; 267 struct ide_io_ports *io_ports = &hwif->io_ports;
268 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
270 int use_altstatus = 0, rc; 269 int use_altstatus = 0, rc;
271 unsigned long timeout; 270 unsigned long timeout;
272 u8 s = 0, a = 0; 271 u8 s = 0, a = 0;
@@ -275,8 +274,8 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
275 msleep(50); 274 msleep(50);
276 275
277 if (io_ports->ctl_addr) { 276 if (io_ports->ctl_addr) {
278 a = ide_read_altstatus(drive); 277 a = tp_ops->read_altstatus(hwif);
279 s = ide_read_status(drive); 278 s = tp_ops->read_status(hwif);
280 if ((a ^ s) & ~INDEX_STAT) 279 if ((a ^ s) & ~INDEX_STAT)
281 /* ancient Seagate drives, broken interfaces */ 280 /* ancient Seagate drives, broken interfaces */
282 printk(KERN_INFO "%s: probing with STATUS(0x%02x) " 281 printk(KERN_INFO "%s: probing with STATUS(0x%02x) "
@@ -290,12 +289,18 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
290 /* set features register for atapi 289 /* set features register for atapi
291 * identify command to be sure of reply 290 * identify command to be sure of reply
292 */ 291 */
293 if ((cmd == WIN_PIDENTIFY)) 292 if (cmd == WIN_PIDENTIFY) {
294 /* disable dma & overlap */ 293 ide_task_t task;
295 hwif->OUTB(0, io_ports->feature_addr); 294
295 memset(&task, 0, sizeof(task));
296 /* disable DMA & overlap */
297 task.tf_flags = IDE_TFLAG_OUT_FEATURE;
298
299 tp_ops->tf_load(drive, &task);
300 }
296 301
297 /* ask drive for ID */ 302 /* ask drive for ID */
298 hwif->OUTBSYNC(hwif, cmd, hwif->io_ports.command_addr); 303 tp_ops->exec_command(hwif, cmd);
299 304
300 timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2; 305 timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2;
301 timeout += jiffies; 306 timeout += jiffies;
@@ -306,13 +311,13 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
306 } 311 }
307 /* give drive a breather */ 312 /* give drive a breather */
308 msleep(50); 313 msleep(50);
309 s = use_altstatus ? ide_read_altstatus(drive) 314 s = use_altstatus ? tp_ops->read_altstatus(hwif)
310 : ide_read_status(drive); 315 : tp_ops->read_status(hwif);
311 } while (s & BUSY_STAT); 316 } while (s & BUSY_STAT);
312 317
313 /* wait for IRQ and DRQ_STAT */ 318 /* wait for IRQ and DRQ_STAT */
314 msleep(50); 319 msleep(50);
315 s = ide_read_status(drive); 320 s = tp_ops->read_status(hwif);
316 321
317 if (OK_STAT(s, DRQ_STAT, BAD_R_STAT)) { 322 if (OK_STAT(s, DRQ_STAT, BAD_R_STAT)) {
318 unsigned long flags; 323 unsigned long flags;
@@ -324,7 +329,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
324 /* drive responded with ID */ 329 /* drive responded with ID */
325 rc = 0; 330 rc = 0;
326 /* clear drive IRQ */ 331 /* clear drive IRQ */
327 (void)ide_read_status(drive); 332 (void)tp_ops->read_status(hwif);
328 local_irq_restore(flags); 333 local_irq_restore(flags);
329 } else { 334 } else {
330 /* drive refused ID */ 335 /* drive refused ID */
@@ -346,6 +351,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
346static int try_to_identify (ide_drive_t *drive, u8 cmd) 351static int try_to_identify (ide_drive_t *drive, u8 cmd)
347{ 352{
348 ide_hwif_t *hwif = HWIF(drive); 353 ide_hwif_t *hwif = HWIF(drive);
354 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
349 int retval; 355 int retval;
350 int autoprobe = 0; 356 int autoprobe = 0;
351 unsigned long cookie = 0; 357 unsigned long cookie = 0;
@@ -361,7 +367,7 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd)
361 autoprobe = 1; 367 autoprobe = 1;
362 cookie = probe_irq_on(); 368 cookie = probe_irq_on();
363 } 369 }
364 ide_set_irq(drive, autoprobe); 370 tp_ops->set_irq(hwif, autoprobe);
365 } 371 }
366 372
367 retval = actual_try_to_identify(drive, cmd); 373 retval = actual_try_to_identify(drive, cmd);
@@ -369,9 +375,9 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd)
369 if (autoprobe) { 375 if (autoprobe) {
370 int irq; 376 int irq;
371 377
372 ide_set_irq(drive, 0); 378 tp_ops->set_irq(hwif, 0);
373 /* clear drive IRQ */ 379 /* clear drive IRQ */
374 (void)ide_read_status(drive); 380 (void)tp_ops->read_status(hwif);
375 udelay(5); 381 udelay(5);
376 irq = probe_irq_off(cookie); 382 irq = probe_irq_off(cookie);
377 if (!hwif->irq) { 383 if (!hwif->irq) {
@@ -396,7 +402,7 @@ static int ide_busy_sleep(ide_hwif_t *hwif)
396 402
397 do { 403 do {
398 msleep(50); 404 msleep(50);
399 stat = hwif->INB(hwif->io_ports.status_addr); 405 stat = hwif->tp_ops->read_status(hwif);
400 if ((stat & BUSY_STAT) == 0) 406 if ((stat & BUSY_STAT) == 0)
401 return 0; 407 return 0;
402 } while (time_before(jiffies, timeout)); 408 } while (time_before(jiffies, timeout));
@@ -404,6 +410,18 @@ static int ide_busy_sleep(ide_hwif_t *hwif)
404 return 1; 410 return 1;
405} 411}
406 412
413static u8 ide_read_device(ide_drive_t *drive)
414{
415 ide_task_t task;
416
417 memset(&task, 0, sizeof(task));
418 task.tf_flags = IDE_TFLAG_IN_DEVICE;
419
420 drive->hwif->tp_ops->tf_read(drive, &task);
421
422 return task.tf.device;
423}
424
407/** 425/**
408 * do_probe - probe an IDE device 426 * do_probe - probe an IDE device
409 * @drive: drive to probe 427 * @drive: drive to probe
@@ -428,7 +446,7 @@ static int ide_busy_sleep(ide_hwif_t *hwif)
428static int do_probe (ide_drive_t *drive, u8 cmd) 446static int do_probe (ide_drive_t *drive, u8 cmd)
429{ 447{
430 ide_hwif_t *hwif = HWIF(drive); 448 ide_hwif_t *hwif = HWIF(drive);
431 struct ide_io_ports *io_ports = &hwif->io_ports; 449 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
432 int rc; 450 int rc;
433 u8 stat; 451 u8 stat;
434 452
@@ -449,8 +467,8 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
449 msleep(50); 467 msleep(50);
450 SELECT_DRIVE(drive); 468 SELECT_DRIVE(drive);
451 msleep(50); 469 msleep(50);
452 if (hwif->INB(io_ports->device_addr) != drive->select.all && 470
453 !drive->present) { 471 if (ide_read_device(drive) != drive->select.all && !drive->present) {
454 if (drive->select.b.unit != 0) { 472 if (drive->select.b.unit != 0) {
455 /* exit with drive0 selected */ 473 /* exit with drive0 selected */
456 SELECT_DRIVE(&hwif->drives[0]); 474 SELECT_DRIVE(&hwif->drives[0]);
@@ -461,7 +479,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
461 return 3; 479 return 3;
462 } 480 }
463 481
464 stat = ide_read_status(drive); 482 stat = tp_ops->read_status(hwif);
465 483
466 if (OK_STAT(stat, READY_STAT, BUSY_STAT) || 484 if (OK_STAT(stat, READY_STAT, BUSY_STAT) ||
467 drive->present || cmd == WIN_PIDENTIFY) { 485 drive->present || cmd == WIN_PIDENTIFY) {
@@ -471,7 +489,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
471 rc = try_to_identify(drive,cmd); 489 rc = try_to_identify(drive,cmd);
472 } 490 }
473 491
474 stat = ide_read_status(drive); 492 stat = tp_ops->read_status(hwif);
475 493
476 if (stat == (BUSY_STAT | READY_STAT)) 494 if (stat == (BUSY_STAT | READY_STAT))
477 return 4; 495 return 4;
@@ -482,13 +500,13 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
482 msleep(50); 500 msleep(50);
483 SELECT_DRIVE(drive); 501 SELECT_DRIVE(drive);
484 msleep(50); 502 msleep(50);
485 hwif->OUTBSYNC(hwif, WIN_SRST, io_ports->command_addr); 503 tp_ops->exec_command(hwif, WIN_SRST);
486 (void)ide_busy_sleep(hwif); 504 (void)ide_busy_sleep(hwif);
487 rc = try_to_identify(drive, cmd); 505 rc = try_to_identify(drive, cmd);
488 } 506 }
489 507
490 /* ensure drive IRQ is clear */ 508 /* ensure drive IRQ is clear */
491 stat = ide_read_status(drive); 509 stat = tp_ops->read_status(hwif);
492 510
493 if (rc == 1) 511 if (rc == 1)
494 printk(KERN_ERR "%s: no response (status = 0x%02x)\n", 512 printk(KERN_ERR "%s: no response (status = 0x%02x)\n",
@@ -502,7 +520,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
502 SELECT_DRIVE(&hwif->drives[0]); 520 SELECT_DRIVE(&hwif->drives[0]);
503 msleep(50); 521 msleep(50);
504 /* ensure drive irq is clear */ 522 /* ensure drive irq is clear */
505 (void)ide_read_status(drive); 523 (void)tp_ops->read_status(hwif);
506 } 524 }
507 return rc; 525 return rc;
508} 526}
@@ -513,12 +531,13 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
513static void enable_nest (ide_drive_t *drive) 531static void enable_nest (ide_drive_t *drive)
514{ 532{
515 ide_hwif_t *hwif = HWIF(drive); 533 ide_hwif_t *hwif = HWIF(drive);
534 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
516 u8 stat; 535 u8 stat;
517 536
518 printk("%s: enabling %s -- ", hwif->name, drive->id->model); 537 printk("%s: enabling %s -- ", hwif->name, drive->id->model);
519 SELECT_DRIVE(drive); 538 SELECT_DRIVE(drive);
520 msleep(50); 539 msleep(50);
521 hwif->OUTBSYNC(hwif, EXABYTE_ENABLE_NEST, hwif->io_ports.command_addr); 540 tp_ops->exec_command(hwif, EXABYTE_ENABLE_NEST);
522 541
523 if (ide_busy_sleep(hwif)) { 542 if (ide_busy_sleep(hwif)) {
524 printk(KERN_CONT "failed (timeout)\n"); 543 printk(KERN_CONT "failed (timeout)\n");
@@ -527,7 +546,7 @@ static void enable_nest (ide_drive_t *drive)
527 546
528 msleep(50); 547 msleep(50);
529 548
530 stat = ide_read_status(drive); 549 stat = tp_ops->read_status(hwif);
531 550
532 if (!OK_STAT(stat, 0, BAD_STAT)) 551 if (!OK_STAT(stat, 0, BAD_STAT))
533 printk(KERN_CONT "failed (status = 0x%02x)\n", stat); 552 printk(KERN_CONT "failed (status = 0x%02x)\n", stat);
@@ -619,7 +638,7 @@ static inline u8 probe_for_drive (ide_drive_t *drive)
619 return drive->present; 638 return drive->present;
620} 639}
621 640
622static void hwif_release_dev (struct device *dev) 641static void hwif_release_dev(struct device *dev)
623{ 642{
624 ide_hwif_t *hwif = container_of(dev, ide_hwif_t, gendev); 643 ide_hwif_t *hwif = container_of(dev, ide_hwif_t, gendev);
625 644
@@ -709,7 +728,7 @@ static int ide_port_wait_ready(ide_hwif_t *hwif)
709 /* Ignore disks that we will not probe for later. */ 728 /* Ignore disks that we will not probe for later. */
710 if (!drive->noprobe || drive->present) { 729 if (!drive->noprobe || drive->present) {
711 SELECT_DRIVE(drive); 730 SELECT_DRIVE(drive);
712 ide_set_irq(drive, 1); 731 hwif->tp_ops->set_irq(hwif, 1);
713 mdelay(2); 732 mdelay(2);
714 rc = ide_wait_not_busy(hwif, 35000); 733 rc = ide_wait_not_busy(hwif, 35000);
715 if (rc) 734 if (rc)
@@ -971,6 +990,45 @@ static void ide_port_setup_devices(ide_hwif_t *hwif)
971 mutex_unlock(&ide_cfg_mtx); 990 mutex_unlock(&ide_cfg_mtx);
972} 991}
973 992
993static ide_hwif_t *ide_ports[MAX_HWIFS];
994
995void ide_remove_port_from_hwgroup(ide_hwif_t *hwif)
996{
997 ide_hwgroup_t *hwgroup = hwif->hwgroup;
998
999 ide_ports[hwif->index] = NULL;
1000
1001 spin_lock_irq(&ide_lock);
1002 /*
1003 * Remove us from the hwgroup, and free
1004 * the hwgroup if we were the only member
1005 */
1006 if (hwif->next == hwif) {
1007 BUG_ON(hwgroup->hwif != hwif);
1008 kfree(hwgroup);
1009 } else {
1010 /* There is another interface in hwgroup.
1011 * Unlink us, and set hwgroup->drive and ->hwif to
1012 * something sane.
1013 */
1014 ide_hwif_t *g = hwgroup->hwif;
1015
1016 while (g->next != hwif)
1017 g = g->next;
1018 g->next = hwif->next;
1019 if (hwgroup->hwif == hwif) {
1020 /* Chose a random hwif for hwgroup->hwif.
1021 * It's guaranteed that there are no drives
1022 * left in the hwgroup.
1023 */
1024 BUG_ON(hwgroup->drive != NULL);
1025 hwgroup->hwif = g;
1026 }
1027 BUG_ON(hwgroup->hwif == hwif);
1028 }
1029 spin_unlock_irq(&ide_lock);
1030}
1031
974/* 1032/*
975 * This routine sets up the irq for an ide interface, and creates a new 1033 * This routine sets up the irq for an ide interface, and creates a new
976 * hwgroup for the irq/hwif if none was previously assigned. 1034 * hwgroup for the irq/hwif if none was previously assigned.
@@ -998,8 +1056,9 @@ static int init_irq (ide_hwif_t *hwif)
998 * Group up with any other hwifs that share our irq(s). 1056 * Group up with any other hwifs that share our irq(s).
999 */ 1057 */
1000 for (index = 0; index < MAX_HWIFS; index++) { 1058 for (index = 0; index < MAX_HWIFS; index++) {
1001 ide_hwif_t *h = &ide_hwifs[index]; 1059 ide_hwif_t *h = ide_ports[index];
1002 if (h->hwgroup) { /* scan only initialized hwif's */ 1060
1061 if (h && h->hwgroup) { /* scan only initialized ports */
1003 if (hwif->irq == h->irq) { 1062 if (hwif->irq == h->irq) {
1004 hwif->sharing_irq = h->sharing_irq = 1; 1063 hwif->sharing_irq = h->sharing_irq = 1;
1005 if (hwif->chipset != ide_pci || 1064 if (hwif->chipset != ide_pci ||
@@ -1053,6 +1112,8 @@ static int init_irq (ide_hwif_t *hwif)
1053 hwgroup->timer.data = (unsigned long) hwgroup; 1112 hwgroup->timer.data = (unsigned long) hwgroup;
1054 } 1113 }
1055 1114
1115 ide_ports[hwif->index] = hwif;
1116
1056 /* 1117 /*
1057 * Allocate the irq, if not already obtained for another hwif 1118 * Allocate the irq, if not already obtained for another hwif
1058 */ 1119 */
@@ -1066,8 +1127,7 @@ static int init_irq (ide_hwif_t *hwif)
1066 sa = IRQF_SHARED; 1127 sa = IRQF_SHARED;
1067 1128
1068 if (io_ports->ctl_addr) 1129 if (io_ports->ctl_addr)
1069 /* clear nIEN */ 1130 hwif->tp_ops->set_irq(hwif, 1);
1070 hwif->OUTBSYNC(hwif, ATA_DEVCTL_OBS, io_ports->ctl_addr);
1071 1131
1072 if (request_irq(hwif->irq,&ide_intr,sa,hwif->name,hwgroup)) 1132 if (request_irq(hwif->irq,&ide_intr,sa,hwif->name,hwgroup))
1073 goto out_unlink; 1133 goto out_unlink;
@@ -1345,6 +1405,9 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
1345 hwif->host_flags |= d->host_flags; 1405 hwif->host_flags |= d->host_flags;
1346 hwif->pio_mask = d->pio_mask; 1406 hwif->pio_mask = d->pio_mask;
1347 1407
1408 if (d->tp_ops)
1409 hwif->tp_ops = d->tp_ops;
1410
1348 /* ->set_pio_mode for DTC2278 is currently limited to port 0 */ 1411 /* ->set_pio_mode for DTC2278 is currently limited to port 0 */
1349 if (hwif->chipset != ide_dtc2278 || hwif->channel == 0) 1412 if (hwif->chipset != ide_dtc2278 || hwif->channel == 0)
1350 hwif->port_ops = d->port_ops; 1413 hwif->port_ops = d->port_ops;
@@ -1363,6 +1426,7 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
1363 1426
1364 if (rc < 0) { 1427 if (rc < 0) {
1365 printk(KERN_INFO "%s: DMA disabled\n", hwif->name); 1428 printk(KERN_INFO "%s: DMA disabled\n", hwif->name);
1429 hwif->dma_base = 0;
1366 hwif->swdma_mask = 0; 1430 hwif->swdma_mask = 0;
1367 hwif->mwdma_mask = 0; 1431 hwif->mwdma_mask = 0;
1368 hwif->ultra_mask = 0; 1432 hwif->ultra_mask = 0;
@@ -1446,18 +1510,20 @@ static int ide_sysfs_register_port(ide_hwif_t *hwif)
1446 return rc; 1510 return rc;
1447} 1511}
1448 1512
1513static unsigned int ide_indexes;
1514
1449/** 1515/**
1450 * ide_find_port_slot - find free ide_hwifs[] slot 1516 * ide_find_port_slot - find free port slot
1451 * @d: IDE port info 1517 * @d: IDE port info
1452 * 1518 *
1453 * Return the new hwif. If we are out of free slots return NULL. 1519 * Return the new port slot index or -ENOENT if we are out of free slots.
1454 */ 1520 */
1455 1521
1456ide_hwif_t *ide_find_port_slot(const struct ide_port_info *d) 1522static int ide_find_port_slot(const struct ide_port_info *d)
1457{ 1523{
1458 ide_hwif_t *hwif; 1524 int idx = -ENOENT;
1459 int i;
1460 u8 bootable = (d && (d->host_flags & IDE_HFLAG_NON_BOOTABLE)) ? 0 : 1; 1525 u8 bootable = (d && (d->host_flags & IDE_HFLAG_NON_BOOTABLE)) ? 0 : 1;
1526 u8 i = (d && (d->host_flags & IDE_HFLAG_QD_2ND_PORT)) ? 1 : 0;;
1461 1527
1462 /* 1528 /*
1463 * Claim an unassigned slot. 1529 * Claim an unassigned slot.
@@ -1469,51 +1535,106 @@ ide_hwif_t *ide_find_port_slot(const struct ide_port_info *d)
1469 * Unless there is a bootable card that does not use the standard 1535 * Unless there is a bootable card that does not use the standard
1470 * ports 0x1f0/0x170 (the ide0/ide1 defaults). 1536 * ports 0x1f0/0x170 (the ide0/ide1 defaults).
1471 */ 1537 */
1472 if (bootable) { 1538 mutex_lock(&ide_cfg_mtx);
1473 i = (d && (d->host_flags & IDE_HFLAG_QD_2ND_PORT)) ? 1 : 0; 1539 if (MAX_HWIFS == 1) {
1474 1540 if (ide_indexes == 0 && i == 0)
1475 for (; i < MAX_HWIFS; i++) { 1541 idx = 1;
1476 hwif = &ide_hwifs[i];
1477 if (hwif->chipset == ide_unknown)
1478 goto out_found;
1479 }
1480 } else { 1542 } else {
1481 for (i = 2; i < MAX_HWIFS; i++) { 1543 if (bootable) {
1482 hwif = &ide_hwifs[i]; 1544 if ((ide_indexes | i) != (1 << MAX_HWIFS) - 1)
1483 if (hwif->chipset == ide_unknown) 1545 idx = ffz(ide_indexes | i);
1484 goto out_found; 1546 } else {
1547 if ((ide_indexes | 3) != (1 << MAX_HWIFS) - 1)
1548 idx = ffz(ide_indexes | 3);
1549 else if ((ide_indexes & 3) != 3)
1550 idx = ffz(ide_indexes);
1485 } 1551 }
1486 for (i = 0; i < 2 && i < MAX_HWIFS; i++) { 1552 }
1487 hwif = &ide_hwifs[i]; 1553 if (idx >= 0)
1488 if (hwif->chipset == ide_unknown) 1554 ide_indexes |= (1 << idx);
1489 goto out_found; 1555 mutex_unlock(&ide_cfg_mtx);
1556
1557 return idx;
1558}
1559
1560static void ide_free_port_slot(int idx)
1561{
1562 mutex_lock(&ide_cfg_mtx);
1563 ide_indexes &= ~(1 << idx);
1564 mutex_unlock(&ide_cfg_mtx);
1565}
1566
1567struct ide_host *ide_host_alloc_all(const struct ide_port_info *d,
1568 hw_regs_t **hws)
1569{
1570 struct ide_host *host;
1571 int i;
1572
1573 host = kzalloc(sizeof(*host), GFP_KERNEL);
1574 if (host == NULL)
1575 return NULL;
1576
1577 for (i = 0; i < MAX_HWIFS; i++) {
1578 ide_hwif_t *hwif;
1579 int idx;
1580
1581 if (hws[i] == NULL)
1582 continue;
1583
1584 hwif = kzalloc(sizeof(*hwif), GFP_KERNEL);
1585 if (hwif == NULL)
1586 continue;
1587
1588 idx = ide_find_port_slot(d);
1589 if (idx < 0) {
1590 printk(KERN_ERR "%s: no free slot for interface\n",
1591 d ? d->name : "ide");
1592 kfree(hwif);
1593 continue;
1490 } 1594 }
1595
1596 ide_init_port_data(hwif, idx);
1597
1598 host->ports[i] = hwif;
1599 host->n_ports++;
1491 } 1600 }
1492 1601
1493 printk(KERN_ERR "%s: no free slot for interface\n", 1602 if (host->n_ports == 0) {
1494 d ? d->name : "ide"); 1603 kfree(host);
1604 return NULL;
1605 }
1495 1606
1496 return NULL; 1607 return host;
1608}
1609EXPORT_SYMBOL_GPL(ide_host_alloc_all);
1610
1611struct ide_host *ide_host_alloc(const struct ide_port_info *d, hw_regs_t **hws)
1612{
1613 hw_regs_t *hws_all[MAX_HWIFS];
1614 int i;
1497 1615
1498out_found: 1616 for (i = 0; i < MAX_HWIFS; i++)
1499 ide_init_port_data(hwif, i); 1617 hws_all[i] = (i < 4) ? hws[i] : NULL;
1500 return hwif; 1618
1619 return ide_host_alloc_all(d, hws_all);
1501} 1620}
1502EXPORT_SYMBOL_GPL(ide_find_port_slot); 1621EXPORT_SYMBOL_GPL(ide_host_alloc);
1503 1622
1504int ide_device_add_all(u8 *idx, const struct ide_port_info *d) 1623int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
1624 hw_regs_t **hws)
1505{ 1625{
1506 ide_hwif_t *hwif, *mate = NULL; 1626 ide_hwif_t *hwif, *mate = NULL;
1507 int i, rc = 0; 1627 int i, j = 0;
1508 1628
1509 for (i = 0; i < MAX_HWIFS; i++) { 1629 for (i = 0; i < MAX_HWIFS; i++) {
1510 if (idx[i] == 0xff) { 1630 hwif = host->ports[i];
1631
1632 if (hwif == NULL) {
1511 mate = NULL; 1633 mate = NULL;
1512 continue; 1634 continue;
1513 } 1635 }
1514 1636
1515 hwif = &ide_hwifs[idx[i]]; 1637 ide_init_port_hw(hwif, hws[i]);
1516
1517 ide_port_apply_params(hwif); 1638 ide_port_apply_params(hwif);
1518 1639
1519 if (d == NULL) { 1640 if (d == NULL) {
@@ -1534,10 +1655,10 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1534 } 1655 }
1535 1656
1536 for (i = 0; i < MAX_HWIFS; i++) { 1657 for (i = 0; i < MAX_HWIFS; i++) {
1537 if (idx[i] == 0xff) 1658 hwif = host->ports[i];
1538 continue;
1539 1659
1540 hwif = &ide_hwifs[idx[i]]; 1660 if (hwif == NULL)
1661 continue;
1541 1662
1542 if (ide_probe_port(hwif) == 0) 1663 if (ide_probe_port(hwif) == 0)
1543 hwif->present = 1; 1664 hwif->present = 1;
@@ -1551,19 +1672,20 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1551 } 1672 }
1552 1673
1553 for (i = 0; i < MAX_HWIFS; i++) { 1674 for (i = 0; i < MAX_HWIFS; i++) {
1554 if (idx[i] == 0xff) 1675 hwif = host->ports[i];
1555 continue;
1556 1676
1557 hwif = &ide_hwifs[idx[i]]; 1677 if (hwif == NULL)
1678 continue;
1558 1679
1559 if (hwif_init(hwif) == 0) { 1680 if (hwif_init(hwif) == 0) {
1560 printk(KERN_INFO "%s: failed to initialize IDE " 1681 printk(KERN_INFO "%s: failed to initialize IDE "
1561 "interface\n", hwif->name); 1682 "interface\n", hwif->name);
1562 hwif->present = 0; 1683 hwif->present = 0;
1563 rc = -1;
1564 continue; 1684 continue;
1565 } 1685 }
1566 1686
1687 j++;
1688
1567 if (hwif->present) 1689 if (hwif->present)
1568 ide_port_setup_devices(hwif); 1690 ide_port_setup_devices(hwif);
1569 1691
@@ -1574,10 +1696,10 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1574 } 1696 }
1575 1697
1576 for (i = 0; i < MAX_HWIFS; i++) { 1698 for (i = 0; i < MAX_HWIFS; i++) {
1577 if (idx[i] == 0xff) 1699 hwif = host->ports[i];
1578 continue;
1579 1700
1580 hwif = &ide_hwifs[idx[i]]; 1701 if (hwif == NULL)
1702 continue;
1581 1703
1582 if (hwif->chipset == ide_unknown) 1704 if (hwif->chipset == ide_unknown)
1583 hwif->chipset = ide_generic; 1705 hwif->chipset = ide_generic;
@@ -1587,10 +1709,10 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1587 } 1709 }
1588 1710
1589 for (i = 0; i < MAX_HWIFS; i++) { 1711 for (i = 0; i < MAX_HWIFS; i++) {
1590 if (idx[i] == 0xff) 1712 hwif = host->ports[i];
1591 continue;
1592 1713
1593 hwif = &ide_hwifs[idx[i]]; 1714 if (hwif == NULL)
1715 continue;
1594 1716
1595 ide_sysfs_register_port(hwif); 1717 ide_sysfs_register_port(hwif);
1596 ide_proc_register_port(hwif); 1718 ide_proc_register_port(hwif);
@@ -1599,21 +1721,64 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
1599 ide_proc_port_register_devices(hwif); 1721 ide_proc_port_register_devices(hwif);
1600 } 1722 }
1601 1723
1602 return rc; 1724 return j ? 0 : -1;
1603} 1725}
1604EXPORT_SYMBOL_GPL(ide_device_add_all); 1726EXPORT_SYMBOL_GPL(ide_host_register);
1605 1727
1606int ide_device_add(u8 idx[4], const struct ide_port_info *d) 1728int ide_host_add(const struct ide_port_info *d, hw_regs_t **hws,
1729 struct ide_host **hostp)
1607{ 1730{
1608 u8 idx_all[MAX_HWIFS]; 1731 struct ide_host *host;
1732 int rc;
1733
1734 host = ide_host_alloc(d, hws);
1735 if (host == NULL)
1736 return -ENOMEM;
1737
1738 rc = ide_host_register(host, d, hws);
1739 if (rc) {
1740 ide_host_free(host);
1741 return rc;
1742 }
1743
1744 if (hostp)
1745 *hostp = host;
1746
1747 return 0;
1748}
1749EXPORT_SYMBOL_GPL(ide_host_add);
1750
1751void ide_host_free(struct ide_host *host)
1752{
1753 ide_hwif_t *hwif;
1609 int i; 1754 int i;
1610 1755
1611 for (i = 0; i < MAX_HWIFS; i++) 1756 for (i = 0; i < MAX_HWIFS; i++) {
1612 idx_all[i] = (i < 4) ? idx[i] : 0xff; 1757 hwif = host->ports[i];
1613 1758
1614 return ide_device_add_all(idx_all, d); 1759 if (hwif == NULL)
1760 continue;
1761
1762 ide_free_port_slot(hwif->index);
1763 kfree(hwif);
1764 }
1765
1766 kfree(host);
1615} 1767}
1616EXPORT_SYMBOL_GPL(ide_device_add); 1768EXPORT_SYMBOL_GPL(ide_host_free);
1769
1770void ide_host_remove(struct ide_host *host)
1771{
1772 int i;
1773
1774 for (i = 0; i < MAX_HWIFS; i++) {
1775 if (host->ports[i])
1776 ide_unregister(host->ports[i]);
1777 }
1778
1779 ide_host_free(host);
1780}
1781EXPORT_SYMBOL_GPL(ide_host_remove);
1617 1782
1618void ide_port_scan(ide_hwif_t *hwif) 1783void ide_port_scan(ide_hwif_t *hwif)
1619{ 1784{
@@ -1634,11 +1799,10 @@ void ide_port_scan(ide_hwif_t *hwif)
1634} 1799}
1635EXPORT_SYMBOL_GPL(ide_port_scan); 1800EXPORT_SYMBOL_GPL(ide_port_scan);
1636 1801
1637static void ide_legacy_init_one(u8 *idx, hw_regs_t *hw, u8 port_no, 1802static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw,
1638 const struct ide_port_info *d, 1803 u8 port_no, const struct ide_port_info *d,
1639 unsigned long config) 1804 unsigned long config)
1640{ 1805{
1641 ide_hwif_t *hwif;
1642 unsigned long base, ctl; 1806 unsigned long base, ctl;
1643 int irq; 1807 int irq;
1644 1808
@@ -1668,33 +1832,25 @@ static void ide_legacy_init_one(u8 *idx, hw_regs_t *hw, u8 port_no,
1668 ide_std_init_ports(hw, base, ctl); 1832 ide_std_init_ports(hw, base, ctl);
1669 hw->irq = irq; 1833 hw->irq = irq;
1670 hw->chipset = d->chipset; 1834 hw->chipset = d->chipset;
1835 hw->config = config;
1671 1836
1672 hwif = ide_find_port_slot(d); 1837 hws[port_no] = hw;
1673 if (hwif) {
1674 ide_init_port_hw(hwif, hw);
1675 if (config)
1676 hwif->config_data = config;
1677 idx[port_no] = hwif->index;
1678 }
1679} 1838}
1680 1839
1681int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config) 1840int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config)
1682{ 1841{
1683 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 1842 hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL };
1684 hw_regs_t hw[2];
1685 1843
1686 memset(&hw, 0, sizeof(hw)); 1844 memset(&hw, 0, sizeof(hw));
1687 1845
1688 if ((d->host_flags & IDE_HFLAG_QD_2ND_PORT) == 0) 1846 if ((d->host_flags & IDE_HFLAG_QD_2ND_PORT) == 0)
1689 ide_legacy_init_one(idx, &hw[0], 0, d, config); 1847 ide_legacy_init_one(hws, &hw[0], 0, d, config);
1690 ide_legacy_init_one(idx, &hw[1], 1, d, config); 1848 ide_legacy_init_one(hws, &hw[1], 1, d, config);
1691 1849
1692 if (idx[0] == 0xff && idx[1] == 0xff && 1850 if (hws[0] == NULL && hws[1] == NULL &&
1693 (d->host_flags & IDE_HFLAG_SINGLE)) 1851 (d->host_flags & IDE_HFLAG_SINGLE))
1694 return -ENOENT; 1852 return -ENOENT;
1695 1853
1696 ide_device_add(idx, d); 1854 return ide_host_add(d, hws, NULL);
1697
1698 return 0;
1699} 1855}
1700EXPORT_SYMBOL_GPL(ide_legacy_device_add); 1856EXPORT_SYMBOL_GPL(ide_legacy_device_add);
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 8af88bf0969b..151c91e933da 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -345,7 +345,7 @@ static int set_xfer_rate (ide_drive_t *drive, int arg)
345 ide_task_t task; 345 ide_task_t task;
346 int err; 346 int err;
347 347
348 if (arg < 0 || arg > 70) 348 if (arg < XFER_PIO_0 || arg > XFER_UDMA_6)
349 return -EINVAL; 349 return -EINVAL;
350 350
351 memset(&task, 0, sizeof(task)); 351 memset(&task, 0, sizeof(task));
@@ -357,7 +357,7 @@ static int set_xfer_rate (ide_drive_t *drive, int arg)
357 357
358 err = ide_no_data_taskfile(drive, &task); 358 err = ide_no_data_taskfile(drive, &task);
359 359
360 if (!err && arg) { 360 if (!err) {
361 ide_set_xfer_rate(drive, (u8) arg); 361 ide_set_xfer_rate(drive, (u8) arg);
362 ide_driveid_update(drive); 362 ide_driveid_update(drive);
363 } 363 }
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 353dd11b9283..6962ca4891a1 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -195,23 +195,6 @@ enum {
195#define IDETAPE_BLOCK_DESCRIPTOR 0 195#define IDETAPE_BLOCK_DESCRIPTOR 0
196#define IDETAPE_CAPABILITIES_PAGE 0x2a 196#define IDETAPE_CAPABILITIES_PAGE 0x2a
197 197
198/* Tape flag bits values. */
199enum {
200 IDETAPE_FLAG_IGNORE_DSC = (1 << 0),
201 /* 0 When the tape position is unknown */
202 IDETAPE_FLAG_ADDRESS_VALID = (1 << 1),
203 /* Device already opened */
204 IDETAPE_FLAG_BUSY = (1 << 2),
205 /* Attempt to auto-detect the current user block size */
206 IDETAPE_FLAG_DETECT_BS = (1 << 3),
207 /* Currently on a filemark */
208 IDETAPE_FLAG_FILEMARK = (1 << 4),
209 /* DRQ interrupt device */
210 IDETAPE_FLAG_DRQ_INTERRUPT = (1 << 5),
211 /* 0 = no tape is loaded, so we don't rewind after ejecting */
212 IDETAPE_FLAG_MEDIUM_PRESENT = (1 << 6),
213};
214
215/* 198/*
216 * Most of our global data which we need to save even as we leave the driver due 199 * Most of our global data which we need to save even as we leave the driver due
217 * to an interrupt or a timer event is stored in the struct defined below. 200 * to an interrupt or a timer event is stored in the struct defined below.
@@ -312,8 +295,6 @@ typedef struct ide_tape_obj {
312 /* Wasted space in each stage */ 295 /* Wasted space in each stage */
313 int excess_bh_size; 296 int excess_bh_size;
314 297
315 /* Status/Action flags: long for set_bit */
316 unsigned long flags;
317 /* protects the ide-tape queue */ 298 /* protects the ide-tape queue */
318 spinlock_t lock; 299 spinlock_t lock;
319 300
@@ -398,7 +379,7 @@ static void idetape_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
398 count = min( 379 count = min(
399 (unsigned int)(bh->b_size - atomic_read(&bh->b_count)), 380 (unsigned int)(bh->b_size - atomic_read(&bh->b_count)),
400 bcount); 381 bcount);
401 drive->hwif->input_data(drive, NULL, bh->b_data + 382 drive->hwif->tp_ops->input_data(drive, NULL, bh->b_data +
402 atomic_read(&bh->b_count), count); 383 atomic_read(&bh->b_count), count);
403 bcount -= count; 384 bcount -= count;
404 atomic_add(count, &bh->b_count); 385 atomic_add(count, &bh->b_count);
@@ -424,7 +405,7 @@ static void idetape_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
424 return; 405 return;
425 } 406 }
426 count = min((unsigned int)pc->b_count, (unsigned int)bcount); 407 count = min((unsigned int)pc->b_count, (unsigned int)bcount);
427 drive->hwif->output_data(drive, NULL, pc->b_data, count); 408 drive->hwif->tp_ops->output_data(drive, NULL, pc->b_data, count);
428 bcount -= count; 409 bcount -= count;
429 pc->b_data += count; 410 pc->b_data += count;
430 pc->b_count -= count; 411 pc->b_count -= count;
@@ -585,7 +566,6 @@ static void ide_tape_kfree_buffer(idetape_tape_t *tape)
585 bh = bh->b_reqnext; 566 bh = bh->b_reqnext;
586 kfree(prev_bh); 567 kfree(prev_bh);
587 } 568 }
588 kfree(tape->merge_bh);
589} 569}
590 570
591static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects) 571static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
@@ -665,7 +645,7 @@ static void ide_tape_callback(ide_drive_t *drive)
665 if (readpos[0] & 0x4) { 645 if (readpos[0] & 0x4) {
666 printk(KERN_INFO "ide-tape: Block location is unknown" 646 printk(KERN_INFO "ide-tape: Block location is unknown"
667 "to the tape\n"); 647 "to the tape\n");
668 clear_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags); 648 clear_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags);
669 uptodate = 0; 649 uptodate = 0;
670 } else { 650 } else {
671 debug_log(DBG_SENSE, "Block Location - %u\n", 651 debug_log(DBG_SENSE, "Block Location - %u\n",
@@ -673,7 +653,7 @@ static void ide_tape_callback(ide_drive_t *drive)
673 653
674 tape->partition = readpos[1]; 654 tape->partition = readpos[1];
675 tape->first_frame = be32_to_cpu(*(u32 *)&readpos[4]); 655 tape->first_frame = be32_to_cpu(*(u32 *)&readpos[4]);
676 set_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags); 656 set_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags);
677 } 657 }
678 } 658 }
679 659
@@ -690,7 +670,6 @@ static void idetape_init_pc(struct ide_atapi_pc *pc)
690 pc->buf_size = IDETAPE_PC_BUFFER_SIZE; 670 pc->buf_size = IDETAPE_PC_BUFFER_SIZE;
691 pc->bh = NULL; 671 pc->bh = NULL;
692 pc->b_data = NULL; 672 pc->b_data = NULL;
693 pc->callback = ide_tape_callback;
694} 673}
695 674
696static void idetape_create_request_sense_cmd(struct ide_atapi_pc *pc) 675static void idetape_create_request_sense_cmd(struct ide_atapi_pc *pc)
@@ -705,7 +684,7 @@ static void idetape_init_rq(struct request *rq, u8 cmd)
705{ 684{
706 blk_rq_init(NULL, rq); 685 blk_rq_init(NULL, rq);
707 rq->cmd_type = REQ_TYPE_SPECIAL; 686 rq->cmd_type = REQ_TYPE_SPECIAL;
708 rq->cmd[0] = cmd; 687 rq->cmd[13] = cmd;
709} 688}
710 689
711/* 690/*
@@ -732,6 +711,7 @@ static void idetape_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
732 rq->cmd_flags |= REQ_PREEMPT; 711 rq->cmd_flags |= REQ_PREEMPT;
733 rq->buffer = (char *) pc; 712 rq->buffer = (char *) pc;
734 rq->rq_disk = tape->disk; 713 rq->rq_disk = tape->disk;
714 memcpy(rq->cmd, pc->c, 12);
735 ide_do_drive_cmd(drive, rq); 715 ide_do_drive_cmd(drive, rq);
736} 716}
737 717
@@ -742,7 +722,6 @@ static void idetape_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
742 */ 722 */
743static void idetape_retry_pc(ide_drive_t *drive) 723static void idetape_retry_pc(ide_drive_t *drive)
744{ 724{
745 idetape_tape_t *tape = drive->driver_data;
746 struct ide_atapi_pc *pc; 725 struct ide_atapi_pc *pc;
747 struct request *rq; 726 struct request *rq;
748 727
@@ -750,7 +729,7 @@ static void idetape_retry_pc(ide_drive_t *drive)
750 pc = idetape_next_pc_storage(drive); 729 pc = idetape_next_pc_storage(drive);
751 rq = idetape_next_rq_storage(drive); 730 rq = idetape_next_rq_storage(drive);
752 idetape_create_request_sense_cmd(pc); 731 idetape_create_request_sense_cmd(pc);
753 set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags); 732 set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
754 idetape_queue_pc_head(drive, pc, rq); 733 idetape_queue_pc_head(drive, pc, rq);
755} 734}
756 735
@@ -887,7 +866,7 @@ static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
887 pc->error = IDETAPE_ERROR_GENERAL; 866 pc->error = IDETAPE_ERROR_GENERAL;
888 } 867 }
889 tape->failed_pc = NULL; 868 tape->failed_pc = NULL;
890 pc->callback(drive); 869 drive->pc_callback(drive);
891 return ide_stopped; 870 return ide_stopped;
892 } 871 }
893 debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]); 872 debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]);
@@ -927,11 +906,12 @@ static void idetape_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code)
927 906
928static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive) 907static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
929{ 908{
909 ide_hwif_t *hwif = drive->hwif;
930 idetape_tape_t *tape = drive->driver_data; 910 idetape_tape_t *tape = drive->driver_data;
931 struct ide_atapi_pc *pc = tape->pc; 911 struct ide_atapi_pc *pc = tape->pc;
932 u8 stat; 912 u8 stat;
933 913
934 stat = ide_read_status(drive); 914 stat = hwif->tp_ops->read_status(hwif);
935 915
936 if (stat & SEEK_STAT) { 916 if (stat & SEEK_STAT) {
937 if (stat & ERR_STAT) { 917 if (stat & ERR_STAT) {
@@ -948,14 +928,17 @@ static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
948 pc->error = IDETAPE_ERROR_GENERAL; 928 pc->error = IDETAPE_ERROR_GENERAL;
949 tape->failed_pc = NULL; 929 tape->failed_pc = NULL;
950 } 930 }
951 pc->callback(drive); 931 drive->pc_callback(drive);
952 return ide_stopped; 932 return ide_stopped;
953} 933}
954 934
955static void ide_tape_create_rw_cmd(idetape_tape_t *tape, 935static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
956 struct ide_atapi_pc *pc, unsigned int length, 936 struct ide_atapi_pc *pc, struct request *rq,
957 struct idetape_bh *bh, u8 opcode) 937 u8 opcode)
958{ 938{
939 struct idetape_bh *bh = (struct idetape_bh *)rq->special;
940 unsigned int length = rq->current_nr_sectors;
941
959 idetape_init_pc(pc); 942 idetape_init_pc(pc);
960 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]); 943 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
961 pc->c[1] = 1; 944 pc->c[1] = 1;
@@ -975,11 +958,14 @@ static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
975 pc->b_data = bh->b_data; 958 pc->b_data = bh->b_data;
976 pc->b_count = atomic_read(&bh->b_count); 959 pc->b_count = atomic_read(&bh->b_count);
977 } 960 }
961
962 memcpy(rq->cmd, pc->c, 12);
978} 963}
979 964
980static ide_startstop_t idetape_do_request(ide_drive_t *drive, 965static ide_startstop_t idetape_do_request(ide_drive_t *drive,
981 struct request *rq, sector_t block) 966 struct request *rq, sector_t block)
982{ 967{
968 ide_hwif_t *hwif = drive->hwif;
983 idetape_tape_t *tape = drive->driver_data; 969 idetape_tape_t *tape = drive->driver_data;
984 struct ide_atapi_pc *pc = NULL; 970 struct ide_atapi_pc *pc = NULL;
985 struct request *postponed_rq = tape->postponed_rq; 971 struct request *postponed_rq = tape->postponed_rq;
@@ -1017,17 +1003,17 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
1017 * If the tape is still busy, postpone our request and service 1003 * If the tape is still busy, postpone our request and service
1018 * the other device meanwhile. 1004 * the other device meanwhile.
1019 */ 1005 */
1020 stat = ide_read_status(drive); 1006 stat = hwif->tp_ops->read_status(hwif);
1021 1007
1022 if (!drive->dsc_overlap && !(rq->cmd[0] & REQ_IDETAPE_PC2)) 1008 if (!drive->dsc_overlap && !(rq->cmd[13] & REQ_IDETAPE_PC2))
1023 set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags); 1009 set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
1024 1010
1025 if (drive->post_reset == 1) { 1011 if (drive->post_reset == 1) {
1026 set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags); 1012 set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
1027 drive->post_reset = 0; 1013 drive->post_reset = 0;
1028 } 1014 }
1029 1015
1030 if (!test_and_clear_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags) && 1016 if (!test_and_clear_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags) &&
1031 (stat & SEEK_STAT) == 0) { 1017 (stat & SEEK_STAT) == 0) {
1032 if (postponed_rq == NULL) { 1018 if (postponed_rq == NULL) {
1033 tape->dsc_polling_start = jiffies; 1019 tape->dsc_polling_start = jiffies;
@@ -1036,7 +1022,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
1036 } else if (time_after(jiffies, tape->dsc_timeout)) { 1022 } else if (time_after(jiffies, tape->dsc_timeout)) {
1037 printk(KERN_ERR "ide-tape: %s: DSC timeout\n", 1023 printk(KERN_ERR "ide-tape: %s: DSC timeout\n",
1038 tape->name); 1024 tape->name);
1039 if (rq->cmd[0] & REQ_IDETAPE_PC2) { 1025 if (rq->cmd[13] & REQ_IDETAPE_PC2) {
1040 idetape_media_access_finished(drive); 1026 idetape_media_access_finished(drive);
1041 return ide_stopped; 1027 return ide_stopped;
1042 } else { 1028 } else {
@@ -1049,35 +1035,29 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
1049 idetape_postpone_request(drive); 1035 idetape_postpone_request(drive);
1050 return ide_stopped; 1036 return ide_stopped;
1051 } 1037 }
1052 if (rq->cmd[0] & REQ_IDETAPE_READ) { 1038 if (rq->cmd[13] & REQ_IDETAPE_READ) {
1053 pc = idetape_next_pc_storage(drive); 1039 pc = idetape_next_pc_storage(drive);
1054 ide_tape_create_rw_cmd(tape, pc, rq->current_nr_sectors, 1040 ide_tape_create_rw_cmd(tape, pc, rq, READ_6);
1055 (struct idetape_bh *)rq->special,
1056 READ_6);
1057 goto out; 1041 goto out;
1058 } 1042 }
1059 if (rq->cmd[0] & REQ_IDETAPE_WRITE) { 1043 if (rq->cmd[13] & REQ_IDETAPE_WRITE) {
1060 pc = idetape_next_pc_storage(drive); 1044 pc = idetape_next_pc_storage(drive);
1061 ide_tape_create_rw_cmd(tape, pc, rq->current_nr_sectors, 1045 ide_tape_create_rw_cmd(tape, pc, rq, WRITE_6);
1062 (struct idetape_bh *)rq->special,
1063 WRITE_6);
1064 goto out; 1046 goto out;
1065 } 1047 }
1066 if (rq->cmd[0] & REQ_IDETAPE_PC1) { 1048 if (rq->cmd[13] & REQ_IDETAPE_PC1) {
1067 pc = (struct ide_atapi_pc *) rq->buffer; 1049 pc = (struct ide_atapi_pc *) rq->buffer;
1068 rq->cmd[0] &= ~(REQ_IDETAPE_PC1); 1050 rq->cmd[13] &= ~(REQ_IDETAPE_PC1);
1069 rq->cmd[0] |= REQ_IDETAPE_PC2; 1051 rq->cmd[13] |= REQ_IDETAPE_PC2;
1070 goto out; 1052 goto out;
1071 } 1053 }
1072 if (rq->cmd[0] & REQ_IDETAPE_PC2) { 1054 if (rq->cmd[13] & REQ_IDETAPE_PC2) {
1073 idetape_media_access_finished(drive); 1055 idetape_media_access_finished(drive);
1074 return ide_stopped; 1056 return ide_stopped;
1075 } 1057 }
1076 BUG(); 1058 BUG();
1077out:
1078 if (test_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags))
1079 pc->flags |= PC_FLAG_DRQ_INTERRUPT;
1080 1059
1060out:
1081 return idetape_issue_pc(drive, pc); 1061 return idetape_issue_pc(drive, pc);
1082} 1062}
1083 1063
@@ -1281,8 +1261,9 @@ static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
1281 1261
1282 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 1262 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
1283 rq->cmd_type = REQ_TYPE_SPECIAL; 1263 rq->cmd_type = REQ_TYPE_SPECIAL;
1284 rq->cmd[0] = REQ_IDETAPE_PC1; 1264 rq->cmd[13] = REQ_IDETAPE_PC1;
1285 rq->buffer = (char *)pc; 1265 rq->buffer = (char *)pc;
1266 memcpy(rq->cmd, pc->c, 12);
1286 error = blk_execute_rq(drive->queue, tape->disk, rq, 0); 1267 error = blk_execute_rq(drive->queue, tape->disk, rq, 0);
1287 blk_put_request(rq); 1268 blk_put_request(rq);
1288 return error; 1269 return error;
@@ -1304,7 +1285,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
1304 int load_attempted = 0; 1285 int load_attempted = 0;
1305 1286
1306 /* Wait for the tape to become ready */ 1287 /* Wait for the tape to become ready */
1307 set_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags); 1288 set_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags);
1308 timeout += jiffies; 1289 timeout += jiffies;
1309 while (time_before(jiffies, timeout)) { 1290 while (time_before(jiffies, timeout)) {
1310 idetape_create_test_unit_ready_cmd(&pc); 1291 idetape_create_test_unit_ready_cmd(&pc);
@@ -1397,7 +1378,7 @@ static void __ide_tape_discard_merge_buffer(ide_drive_t *drive)
1397 if (tape->chrdev_dir != IDETAPE_DIR_READ) 1378 if (tape->chrdev_dir != IDETAPE_DIR_READ)
1398 return; 1379 return;
1399 1380
1400 clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags); 1381 clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags);
1401 tape->merge_bh_size = 0; 1382 tape->merge_bh_size = 0;
1402 if (tape->merge_bh != NULL) { 1383 if (tape->merge_bh != NULL) {
1403 ide_tape_kfree_buffer(tape); 1384 ide_tape_kfree_buffer(tape);
@@ -1465,7 +1446,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
1465 1446
1466 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 1447 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
1467 rq->cmd_type = REQ_TYPE_SPECIAL; 1448 rq->cmd_type = REQ_TYPE_SPECIAL;
1468 rq->cmd[0] = cmd; 1449 rq->cmd[13] = cmd;
1469 rq->rq_disk = tape->disk; 1450 rq->rq_disk = tape->disk;
1470 rq->special = (void *)bh; 1451 rq->special = (void *)bh;
1471 rq->sector = tape->first_frame; 1452 rq->sector = tape->first_frame;
@@ -1636,7 +1617,7 @@ static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
1636 debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks); 1617 debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
1637 1618
1638 /* If we are at a filemark, return a read length of 0 */ 1619 /* If we are at a filemark, return a read length of 0 */
1639 if (test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) 1620 if (test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags))
1640 return 0; 1621 return 0;
1641 1622
1642 idetape_init_read(drive); 1623 idetape_init_read(drive);
@@ -1746,7 +1727,7 @@ static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
1746 1727
1747 if (tape->chrdev_dir == IDETAPE_DIR_READ) { 1728 if (tape->chrdev_dir == IDETAPE_DIR_READ) {
1748 tape->merge_bh_size = 0; 1729 tape->merge_bh_size = 0;
1749 if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) 1730 if (test_and_clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags))
1750 ++count; 1731 ++count;
1751 ide_tape_discard_merge_buffer(drive, 0); 1732 ide_tape_discard_merge_buffer(drive, 0);
1752 } 1733 }
@@ -1801,7 +1782,7 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
1801 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count); 1782 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
1802 1783
1803 if (tape->chrdev_dir != IDETAPE_DIR_READ) { 1784 if (tape->chrdev_dir != IDETAPE_DIR_READ) {
1804 if (test_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags)) 1785 if (test_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags))
1805 if (count > tape->blk_size && 1786 if (count > tape->blk_size &&
1806 (count % tape->blk_size) == 0) 1787 (count % tape->blk_size) == 0)
1807 tape->user_bs_factor = count / tape->blk_size; 1788 tape->user_bs_factor = count / tape->blk_size;
@@ -1841,7 +1822,7 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
1841 tape->merge_bh_size = bytes_read-temp; 1822 tape->merge_bh_size = bytes_read-temp;
1842 } 1823 }
1843finish: 1824finish:
1844 if (!actually_read && test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) { 1825 if (!actually_read && test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) {
1845 debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name); 1826 debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
1846 1827
1847 idetape_space_over_filemarks(drive, MTFSF, 1); 1828 idetape_space_over_filemarks(drive, MTFSF, 1);
@@ -2027,7 +2008,7 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
2027 !IDETAPE_LU_LOAD_MASK); 2008 !IDETAPE_LU_LOAD_MASK);
2028 retval = idetape_queue_pc_tail(drive, &pc); 2009 retval = idetape_queue_pc_tail(drive, &pc);
2029 if (!retval) 2010 if (!retval)
2030 clear_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags); 2011 clear_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags);
2031 return retval; 2012 return retval;
2032 case MTNOP: 2013 case MTNOP:
2033 ide_tape_discard_merge_buffer(drive, 0); 2014 ide_tape_discard_merge_buffer(drive, 0);
@@ -2050,9 +2031,9 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
2050 mt_count % tape->blk_size) 2031 mt_count % tape->blk_size)
2051 return -EIO; 2032 return -EIO;
2052 tape->user_bs_factor = mt_count / tape->blk_size; 2033 tape->user_bs_factor = mt_count / tape->blk_size;
2053 clear_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags); 2034 clear_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags);
2054 } else 2035 } else
2055 set_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags); 2036 set_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags);
2056 return 0; 2037 return 0;
2057 case MTSEEK: 2038 case MTSEEK:
2058 ide_tape_discard_merge_buffer(drive, 0); 2039 ide_tape_discard_merge_buffer(drive, 0);
@@ -2202,20 +2183,20 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
2202 2183
2203 filp->private_data = tape; 2184 filp->private_data = tape;
2204 2185
2205 if (test_and_set_bit(IDETAPE_FLAG_BUSY, &tape->flags)) { 2186 if (test_and_set_bit(IDE_AFLAG_BUSY, &drive->atapi_flags)) {
2206 retval = -EBUSY; 2187 retval = -EBUSY;
2207 goto out_put_tape; 2188 goto out_put_tape;
2208 } 2189 }
2209 2190
2210 retval = idetape_wait_ready(drive, 60 * HZ); 2191 retval = idetape_wait_ready(drive, 60 * HZ);
2211 if (retval) { 2192 if (retval) {
2212 clear_bit(IDETAPE_FLAG_BUSY, &tape->flags); 2193 clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags);
2213 printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name); 2194 printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name);
2214 goto out_put_tape; 2195 goto out_put_tape;
2215 } 2196 }
2216 2197
2217 idetape_read_position(drive); 2198 idetape_read_position(drive);
2218 if (!test_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags)) 2199 if (!test_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags))
2219 (void)idetape_rewind_tape(drive); 2200 (void)idetape_rewind_tape(drive);
2220 2201
2221 /* Read block size and write protect status from drive. */ 2202 /* Read block size and write protect status from drive. */
@@ -2231,7 +2212,7 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
2231 if (tape->write_prot) { 2212 if (tape->write_prot) {
2232 if ((filp->f_flags & O_ACCMODE) == O_WRONLY || 2213 if ((filp->f_flags & O_ACCMODE) == O_WRONLY ||
2233 (filp->f_flags & O_ACCMODE) == O_RDWR) { 2214 (filp->f_flags & O_ACCMODE) == O_RDWR) {
2234 clear_bit(IDETAPE_FLAG_BUSY, &tape->flags); 2215 clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags);
2235 retval = -EROFS; 2216 retval = -EROFS;
2236 goto out_put_tape; 2217 goto out_put_tape;
2237 } 2218 }
@@ -2291,7 +2272,7 @@ static int idetape_chrdev_release(struct inode *inode, struct file *filp)
2291 ide_tape_discard_merge_buffer(drive, 1); 2272 ide_tape_discard_merge_buffer(drive, 1);
2292 } 2273 }
2293 2274
2294 if (minor < 128 && test_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags)) 2275 if (minor < 128 && test_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags))
2295 (void) idetape_rewind_tape(drive); 2276 (void) idetape_rewind_tape(drive);
2296 if (tape->chrdev_dir == IDETAPE_DIR_NONE) { 2277 if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
2297 if (tape->door_locked == DOOR_LOCKED) { 2278 if (tape->door_locked == DOOR_LOCKED) {
@@ -2301,7 +2282,7 @@ static int idetape_chrdev_release(struct inode *inode, struct file *filp)
2301 } 2282 }
2302 } 2283 }
2303 } 2284 }
2304 clear_bit(IDETAPE_FLAG_BUSY, &tape->flags); 2285 clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags);
2305 ide_tape_put(tape); 2286 ide_tape_put(tape);
2306 unlock_kernel(); 2287 unlock_kernel();
2307 return 0; 2288 return 0;
@@ -2464,6 +2445,8 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
2464 u8 gcw[2]; 2445 u8 gcw[2];
2465 u16 *ctl = (u16 *)&tape->caps[12]; 2446 u16 *ctl = (u16 *)&tape->caps[12];
2466 2447
2448 drive->pc_callback = ide_tape_callback;
2449
2467 spin_lock_init(&tape->lock); 2450 spin_lock_init(&tape->lock);
2468 drive->dsc_overlap = 1; 2451 drive->dsc_overlap = 1;
2469 if (drive->hwif->host_flags & IDE_HFLAG_NO_DSC) { 2452 if (drive->hwif->host_flags & IDE_HFLAG_NO_DSC) {
@@ -2484,7 +2467,7 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
2484 2467
2485 /* Command packet DRQ type */ 2468 /* Command packet DRQ type */
2486 if (((gcw[0] & 0x60) >> 5) == 1) 2469 if (((gcw[0] & 0x60) >> 5) == 1)
2487 set_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags); 2470 set_bit(IDE_AFLAG_DRQ_INTERRUPT, &drive->atapi_flags);
2488 2471
2489 idetape_get_inquiry_results(drive); 2472 idetape_get_inquiry_results(drive);
2490 idetape_get_mode_sense_results(drive); 2473 idetape_get_mode_sense_results(drive);
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 1fbdb746dc88..aeddbbd69e86 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -64,6 +64,7 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
64 ide_hwif_t *hwif = HWIF(drive); 64 ide_hwif_t *hwif = HWIF(drive);
65 struct ide_taskfile *tf = &task->tf; 65 struct ide_taskfile *tf = &task->tf;
66 ide_handler_t *handler = NULL; 66 ide_handler_t *handler = NULL;
67 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
67 const struct ide_dma_ops *dma_ops = hwif->dma_ops; 68 const struct ide_dma_ops *dma_ops = hwif->dma_ops;
68 69
69 if (task->data_phase == TASKFILE_MULTI_IN || 70 if (task->data_phase == TASKFILE_MULTI_IN ||
@@ -80,15 +81,15 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
80 81
81 if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) { 82 if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
82 ide_tf_dump(drive->name, tf); 83 ide_tf_dump(drive->name, tf);
83 ide_set_irq(drive, 1); 84 tp_ops->set_irq(hwif, 1);
84 SELECT_MASK(drive, 0); 85 SELECT_MASK(drive, 0);
85 hwif->tf_load(drive, task); 86 tp_ops->tf_load(drive, task);
86 } 87 }
87 88
88 switch (task->data_phase) { 89 switch (task->data_phase) {
89 case TASKFILE_MULTI_OUT: 90 case TASKFILE_MULTI_OUT:
90 case TASKFILE_OUT: 91 case TASKFILE_OUT:
91 hwif->OUTBSYNC(hwif, tf->command, hwif->io_ports.command_addr); 92 tp_ops->exec_command(hwif, tf->command);
92 ndelay(400); /* FIXME */ 93 ndelay(400); /* FIXME */
93 return pre_task_out_intr(drive, task->rq); 94 return pre_task_out_intr(drive, task->rq);
94 case TASKFILE_MULTI_IN: 95 case TASKFILE_MULTI_IN:
@@ -124,7 +125,8 @@ EXPORT_SYMBOL_GPL(do_rw_taskfile);
124 */ 125 */
125static ide_startstop_t set_multmode_intr(ide_drive_t *drive) 126static ide_startstop_t set_multmode_intr(ide_drive_t *drive)
126{ 127{
127 u8 stat = ide_read_status(drive); 128 ide_hwif_t *hwif = drive->hwif;
129 u8 stat = hwif->tp_ops->read_status(hwif);
128 130
129 if (OK_STAT(stat, READY_STAT, BAD_STAT)) 131 if (OK_STAT(stat, READY_STAT, BAD_STAT))
130 drive->mult_count = drive->mult_req; 132 drive->mult_count = drive->mult_req;
@@ -141,11 +143,16 @@ static ide_startstop_t set_multmode_intr(ide_drive_t *drive)
141 */ 143 */
142static ide_startstop_t set_geometry_intr(ide_drive_t *drive) 144static ide_startstop_t set_geometry_intr(ide_drive_t *drive)
143{ 145{
146 ide_hwif_t *hwif = drive->hwif;
144 int retries = 5; 147 int retries = 5;
145 u8 stat; 148 u8 stat;
146 149
147 while (((stat = ide_read_status(drive)) & BUSY_STAT) && retries--) 150 while (1) {
151 stat = hwif->tp_ops->read_status(hwif);
152 if ((stat & BUSY_STAT) == 0 || retries-- == 0)
153 break;
148 udelay(10); 154 udelay(10);
155 };
149 156
150 if (OK_STAT(stat, READY_STAT, BAD_STAT)) 157 if (OK_STAT(stat, READY_STAT, BAD_STAT))
151 return ide_stopped; 158 return ide_stopped;
@@ -162,7 +169,8 @@ static ide_startstop_t set_geometry_intr(ide_drive_t *drive)
162 */ 169 */
163static ide_startstop_t recal_intr(ide_drive_t *drive) 170static ide_startstop_t recal_intr(ide_drive_t *drive)
164{ 171{
165 u8 stat = ide_read_status(drive); 172 ide_hwif_t *hwif = drive->hwif;
173 u8 stat = hwif->tp_ops->read_status(hwif);
166 174
167 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) 175 if (!OK_STAT(stat, READY_STAT, BAD_STAT))
168 return ide_error(drive, "recal_intr", stat); 176 return ide_error(drive, "recal_intr", stat);
@@ -174,11 +182,12 @@ static ide_startstop_t recal_intr(ide_drive_t *drive)
174 */ 182 */
175static ide_startstop_t task_no_data_intr(ide_drive_t *drive) 183static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
176{ 184{
177 ide_task_t *args = HWGROUP(drive)->rq->special; 185 ide_hwif_t *hwif = drive->hwif;
186 ide_task_t *args = hwif->hwgroup->rq->special;
178 u8 stat; 187 u8 stat;
179 188
180 local_irq_enable_in_hardirq(); 189 local_irq_enable_in_hardirq();
181 stat = ide_read_status(drive); 190 stat = hwif->tp_ops->read_status(hwif);
182 191
183 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) 192 if (!OK_STAT(stat, READY_STAT, BAD_STAT))
184 return ide_error(drive, "task_no_data_intr", stat); 193 return ide_error(drive, "task_no_data_intr", stat);
@@ -192,6 +201,7 @@ static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
192 201
193static u8 wait_drive_not_busy(ide_drive_t *drive) 202static u8 wait_drive_not_busy(ide_drive_t *drive)
194{ 203{
204 ide_hwif_t *hwif = drive->hwif;
195 int retries; 205 int retries;
196 u8 stat; 206 u8 stat;
197 207
@@ -200,7 +210,7 @@ static u8 wait_drive_not_busy(ide_drive_t *drive)
200 * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms. 210 * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms.
201 */ 211 */
202 for (retries = 0; retries < 1000; retries++) { 212 for (retries = 0; retries < 1000; retries++) {
203 stat = ide_read_status(drive); 213 stat = hwif->tp_ops->read_status(hwif);
204 214
205 if (stat & BUSY_STAT) 215 if (stat & BUSY_STAT)
206 udelay(10); 216 udelay(10);
@@ -255,9 +265,9 @@ static void ide_pio_sector(ide_drive_t *drive, struct request *rq,
255 265
256 /* do the actual data transfer */ 266 /* do the actual data transfer */
257 if (write) 267 if (write)
258 hwif->output_data(drive, rq, buf, SECTOR_SIZE); 268 hwif->tp_ops->output_data(drive, rq, buf, SECTOR_SIZE);
259 else 269 else
260 hwif->input_data(drive, rq, buf, SECTOR_SIZE); 270 hwif->tp_ops->input_data(drive, rq, buf, SECTOR_SIZE);
261 271
262 kunmap_atomic(buf, KM_BIO_SRC_IRQ); 272 kunmap_atomic(buf, KM_BIO_SRC_IRQ);
263#ifdef CONFIG_HIGHMEM 273#ifdef CONFIG_HIGHMEM
@@ -383,8 +393,8 @@ static ide_startstop_t task_in_unexpected(ide_drive_t *drive, struct request *rq
383static ide_startstop_t task_in_intr(ide_drive_t *drive) 393static ide_startstop_t task_in_intr(ide_drive_t *drive)
384{ 394{
385 ide_hwif_t *hwif = drive->hwif; 395 ide_hwif_t *hwif = drive->hwif;
386 struct request *rq = HWGROUP(drive)->rq; 396 struct request *rq = hwif->hwgroup->rq;
387 u8 stat = ide_read_status(drive); 397 u8 stat = hwif->tp_ops->read_status(hwif);
388 398
389 /* Error? */ 399 /* Error? */
390 if (stat & ERR_STAT) 400 if (stat & ERR_STAT)
@@ -418,7 +428,7 @@ static ide_startstop_t task_out_intr (ide_drive_t *drive)
418{ 428{
419 ide_hwif_t *hwif = drive->hwif; 429 ide_hwif_t *hwif = drive->hwif;
420 struct request *rq = HWGROUP(drive)->rq; 430 struct request *rq = HWGROUP(drive)->rq;
421 u8 stat = ide_read_status(drive); 431 u8 stat = hwif->tp_ops->read_status(hwif);
422 432
423 if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat)) 433 if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
424 return task_error(drive, rq, __func__, stat); 434 return task_error(drive, rq, __func__, stat);
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index d4a6b102a772..60f0ca66aa93 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 1994-1998 Linus Torvalds & authors (see below) 2 * Copyright (C) 1994-1998 Linus Torvalds & authors (see below)
3 * Copyrifht (C) 2003-2005, 2007 Bartlomiej Zolnierkiewicz 3 * Copyright (C) 2003-2005, 2007 Bartlomiej Zolnierkiewicz
4 */ 4 */
5 5
6/* 6/*
@@ -101,8 +101,7 @@ void ide_init_port_data(ide_hwif_t *hwif, unsigned int index)
101 101
102 init_completion(&hwif->gendev_rel_comp); 102 init_completion(&hwif->gendev_rel_comp);
103 103
104 default_hwif_iops(hwif); 104 hwif->tp_ops = &default_tp_ops;
105 default_hwif_transport(hwif);
106 105
107 ide_port_init_devices_data(hwif); 106 ide_port_init_devices_data(hwif);
108} 107}
@@ -134,41 +133,6 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif)
134 } 133 }
135} 134}
136 135
137void ide_remove_port_from_hwgroup(ide_hwif_t *hwif)
138{
139 ide_hwgroup_t *hwgroup = hwif->hwgroup;
140
141 spin_lock_irq(&ide_lock);
142 /*
143 * Remove us from the hwgroup, and free
144 * the hwgroup if we were the only member
145 */
146 if (hwif->next == hwif) {
147 BUG_ON(hwgroup->hwif != hwif);
148 kfree(hwgroup);
149 } else {
150 /* There is another interface in hwgroup.
151 * Unlink us, and set hwgroup->drive and ->hwif to
152 * something sane.
153 */
154 ide_hwif_t *g = hwgroup->hwif;
155
156 while (g->next != hwif)
157 g = g->next;
158 g->next = hwif->next;
159 if (hwgroup->hwif == hwif) {
160 /* Chose a random hwif for hwgroup->hwif.
161 * It's guaranteed that there are no drives
162 * left in the hwgroup.
163 */
164 BUG_ON(hwgroup->drive != NULL);
165 hwgroup->hwif = g;
166 }
167 BUG_ON(hwgroup->hwif == hwif);
168 }
169 spin_unlock_irq(&ide_lock);
170}
171
172/* Called with ide_lock held. */ 136/* Called with ide_lock held. */
173static void __ide_port_unregister_devices(ide_hwif_t *hwif) 137static void __ide_port_unregister_devices(ide_hwif_t *hwif)
174{ 138{
@@ -269,16 +233,9 @@ void ide_unregister(ide_hwif_t *hwif)
269 if (hwif->dma_base) 233 if (hwif->dma_base)
270 ide_release_dma_engine(hwif); 234 ide_release_dma_engine(hwif);
271 235
272 spin_lock_irq(&ide_lock);
273 /* restore hwif data to pristine status */
274 ide_init_port_data(hwif, hwif->index);
275 spin_unlock_irq(&ide_lock);
276
277 mutex_unlock(&ide_cfg_mtx); 236 mutex_unlock(&ide_cfg_mtx);
278} 237}
279 238
280EXPORT_SYMBOL(ide_unregister);
281
282void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw) 239void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw)
283{ 240{
284 memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports)); 241 memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports));
@@ -287,8 +244,8 @@ void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw)
287 hwif->dev = hw->dev; 244 hwif->dev = hw->dev;
288 hwif->gendev.parent = hw->parent ? hw->parent : hw->dev; 245 hwif->gendev.parent = hw->parent ? hw->parent : hw->dev;
289 hwif->ack_intr = hw->ack_intr; 246 hwif->ack_intr = hw->ack_intr;
247 hwif->config_data = hw->config;
290} 248}
291EXPORT_SYMBOL_GPL(ide_init_port_hw);
292 249
293/* 250/*
294 * Locks for IDE setting functionality 251 * Locks for IDE setting functionality
diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/legacy/buddha.c
index 0497e7f85b09..7c2afa97f417 100644
--- a/drivers/ide/legacy/buddha.c
+++ b/drivers/ide/legacy/buddha.c
@@ -37,6 +37,8 @@
37#define CATWEASEL_NUM_HWIFS 3 37#define CATWEASEL_NUM_HWIFS 3
38#define XSURF_NUM_HWIFS 2 38#define XSURF_NUM_HWIFS 2
39 39
40#define MAX_NUM_HWIFS 3
41
40 /* 42 /*
41 * Bases of the IDE interfaces (relative to the board address) 43 * Bases of the IDE interfaces (relative to the board address)
42 */ 44 */
@@ -148,18 +150,14 @@ static void __init buddha_setup_ports(hw_regs_t *hw, unsigned long base,
148 150
149static int __init buddha_init(void) 151static int __init buddha_init(void)
150{ 152{
151 hw_regs_t hw;
152 ide_hwif_t *hwif;
153 int i;
154
155 struct zorro_dev *z = NULL; 153 struct zorro_dev *z = NULL;
156 u_long buddha_board = 0; 154 u_long buddha_board = 0;
157 BuddhaType type; 155 BuddhaType type;
158 int buddha_num_hwifs; 156 int buddha_num_hwifs, i;
159 157
160 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) { 158 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
161 unsigned long board; 159 unsigned long board;
162 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 160 hw_regs_t hw[MAX_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL };
163 161
164 if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) { 162 if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) {
165 buddha_num_hwifs = BUDDHA_NUM_HWIFS; 163 buddha_num_hwifs = BUDDHA_NUM_HWIFS;
@@ -221,19 +219,13 @@ fail_base2:
221 ack_intr = xsurf_ack_intr; 219 ack_intr = xsurf_ack_intr;
222 } 220 }
223 221
224 buddha_setup_ports(&hw, base, ctl, irq_port, ack_intr); 222 buddha_setup_ports(&hw[i], base, ctl, irq_port,
223 ack_intr);
225 224
226 hwif = ide_find_port(); 225 hws[i] = &hw[i];
227 if (hwif) {
228 u8 index = hwif->index;
229
230 ide_init_port_hw(hwif, &hw);
231
232 idx[i] = index;
233 }
234 } 226 }
235 227
236 ide_device_add(idx, NULL); 228 ide_host_add(NULL, hws, NULL);
237 } 229 }
238 230
239 return 0; 231 return 0;
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c
index 129a812bb57f..724f95073d80 100644
--- a/drivers/ide/legacy/falconide.c
+++ b/drivers/ide/legacy/falconide.c
@@ -66,6 +66,27 @@ static void falconide_output_data(ide_drive_t *drive, struct request *rq,
66 outsw_swapw(data_addr, buf, (len + 1) / 2); 66 outsw_swapw(data_addr, buf, (len + 1) / 2);
67} 67}
68 68
69/* Atari has a byte-swapped IDE interface */
70static const struct ide_tp_ops falconide_tp_ops = {
71 .exec_command = ide_exec_command,
72 .read_status = ide_read_status,
73 .read_altstatus = ide_read_altstatus,
74 .read_sff_dma_status = ide_read_sff_dma_status,
75
76 .set_irq = ide_set_irq,
77
78 .tf_load = ide_tf_load,
79 .tf_read = ide_tf_read,
80
81 .input_data = falconide_input_data,
82 .output_data = falconide_output_data,
83};
84
85static const struct ide_port_info falconide_port_info = {
86 .tp_ops = &falconide_tp_ops,
87 .host_flags = IDE_HFLAG_NO_DMA,
88};
89
69static void __init falconide_setup_ports(hw_regs_t *hw) 90static void __init falconide_setup_ports(hw_regs_t *hw)
70{ 91{
71 int i; 92 int i;
@@ -91,11 +112,12 @@ static void __init falconide_setup_ports(hw_regs_t *hw)
91 112
92static int __init falconide_init(void) 113static int __init falconide_init(void)
93{ 114{
94 hw_regs_t hw; 115 struct ide_host *host;
95 ide_hwif_t *hwif; 116 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
117 int rc;
96 118
97 if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE)) 119 if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE))
98 return 0; 120 return -ENODEV;
99 121
100 printk(KERN_INFO "ide: Falcon IDE controller\n"); 122 printk(KERN_INFO "ide: Falcon IDE controller\n");
101 123
@@ -106,23 +128,25 @@ static int __init falconide_init(void)
106 128
107 falconide_setup_ports(&hw); 129 falconide_setup_ports(&hw);
108 130
109 hwif = ide_find_port(); 131 host = ide_host_alloc(&falconide_port_info, hws);
110 if (hwif) { 132 if (host == NULL) {
111 u8 index = hwif->index; 133 rc = -ENOMEM;
112 u8 idx[4] = { index, 0xff, 0xff, 0xff }; 134 goto err;
113 135 }
114 ide_init_port_hw(hwif, &hw);
115 136
116 /* Atari has a byte-swapped IDE interface */ 137 ide_get_lock(NULL, NULL);
117 hwif->input_data = falconide_input_data; 138 rc = ide_host_register(host, &falconide_port_info, hws);
118 hwif->output_data = falconide_output_data; 139 ide_release_lock();
119 140
120 ide_get_lock(NULL, NULL); 141 if (rc)
121 ide_device_add(idx, NULL); 142 goto err_free;
122 ide_release_lock();
123 }
124 143
125 return 0; 144 return 0;
145err_free:
146 ide_host_free(host);
147err:
148 release_mem_region(ATA_HD_BASE, 0x40);
149 return rc;
126} 150}
127 151
128module_init(falconide_init); 152module_init(falconide_init);
diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/legacy/gayle.c
index 7e74b20202df..dd5c467d8dd0 100644
--- a/drivers/ide/legacy/gayle.c
+++ b/drivers/ide/legacy/gayle.c
@@ -31,6 +31,8 @@
31#define GAYLE_BASE_4000 0xdd2020 /* A4000/A4000T */ 31#define GAYLE_BASE_4000 0xdd2020 /* A4000/A4000T */
32#define GAYLE_BASE_1200 0xda0000 /* A1200/A600 and E-Matrix 530 */ 32#define GAYLE_BASE_1200 0xda0000 /* A1200/A600 and E-Matrix 530 */
33 33
34#define GAYLE_IDEREG_SIZE 0x2000
35
34 /* 36 /*
35 * Offsets from one of the above bases 37 * Offsets from one of the above bases
36 */ 38 */
@@ -56,13 +58,11 @@
56#define GAYLE_NUM_HWIFS 1 58#define GAYLE_NUM_HWIFS 1
57#define GAYLE_NUM_PROBE_HWIFS GAYLE_NUM_HWIFS 59#define GAYLE_NUM_PROBE_HWIFS GAYLE_NUM_HWIFS
58#define GAYLE_HAS_CONTROL_REG 1 60#define GAYLE_HAS_CONTROL_REG 1
59#define GAYLE_IDEREG_SIZE 0x2000
60#else /* CONFIG_BLK_DEV_IDEDOUBLER */ 61#else /* CONFIG_BLK_DEV_IDEDOUBLER */
61#define GAYLE_NUM_HWIFS 2 62#define GAYLE_NUM_HWIFS 2
62#define GAYLE_NUM_PROBE_HWIFS (ide_doubler ? GAYLE_NUM_HWIFS : \ 63#define GAYLE_NUM_PROBE_HWIFS (ide_doubler ? GAYLE_NUM_HWIFS : \
63 GAYLE_NUM_HWIFS-1) 64 GAYLE_NUM_HWIFS-1)
64#define GAYLE_HAS_CONTROL_REG (!ide_doubler) 65#define GAYLE_HAS_CONTROL_REG (!ide_doubler)
65#define GAYLE_IDEREG_SIZE (ide_doubler ? 0x1000 : 0x2000)
66 66
67static int ide_doubler; 67static int ide_doubler;
68module_param_named(doubler, ide_doubler, bool, 0); 68module_param_named(doubler, ide_doubler, bool, 0);
@@ -124,8 +124,11 @@ static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base,
124 124
125static int __init gayle_init(void) 125static int __init gayle_init(void)
126{ 126{
127 unsigned long phys_base, res_start, res_n;
128 unsigned long base, ctrlport, irqport;
129 ide_ack_intr_t *ack_intr;
127 int a4000, i; 130 int a4000, i;
128 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 131 hw_regs_t hw[GAYLE_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL };
129 132
130 if (!MACH_IS_AMIGA) 133 if (!MACH_IS_AMIGA)
131 return -ENODEV; 134 return -ENODEV;
@@ -148,13 +151,6 @@ found:
148#endif 151#endif
149 ""); 152 "");
150 153
151 for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) {
152 unsigned long base, ctrlport, irqport;
153 ide_ack_intr_t *ack_intr;
154 hw_regs_t hw;
155 ide_hwif_t *hwif;
156 unsigned long phys_base, res_start, res_n;
157
158 if (a4000) { 154 if (a4000) {
159 phys_base = GAYLE_BASE_4000; 155 phys_base = GAYLE_BASE_4000;
160 irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_4000); 156 irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_4000);
@@ -168,33 +164,22 @@ found:
168 * FIXME: we now have selectable modes between mmio v/s iomio 164 * FIXME: we now have selectable modes between mmio v/s iomio
169 */ 165 */
170 166
171 phys_base += i*GAYLE_NEXT_PORT;
172
173 res_start = ((unsigned long)phys_base) & ~(GAYLE_NEXT_PORT-1); 167 res_start = ((unsigned long)phys_base) & ~(GAYLE_NEXT_PORT-1);
174 res_n = GAYLE_IDEREG_SIZE; 168 res_n = GAYLE_IDEREG_SIZE;
175 169
176 if (!request_mem_region(res_start, res_n, "IDE")) 170 if (!request_mem_region(res_start, res_n, "IDE"))
177 continue; 171 return -EBUSY;
178 172
179 base = (unsigned long)ZTWO_VADDR(phys_base); 173 for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) {
174 base = (unsigned long)ZTWO_VADDR(phys_base + i * GAYLE_NEXT_PORT);
180 ctrlport = GAYLE_HAS_CONTROL_REG ? (base + GAYLE_CONTROL) : 0; 175 ctrlport = GAYLE_HAS_CONTROL_REG ? (base + GAYLE_CONTROL) : 0;
181 176
182 gayle_setup_ports(&hw, base, ctrlport, irqport, ack_intr); 177 gayle_setup_ports(&hw[i], base, ctrlport, irqport, ack_intr);
183
184 hwif = ide_find_port();
185 if (hwif) {
186 u8 index = hwif->index;
187 178
188 ide_init_port_hw(hwif, &hw); 179 hws[i] = &hw[i];
189
190 idx[i] = index;
191 } else
192 release_mem_region(res_start, res_n);
193 } 180 }
194 181
195 ide_device_add(idx, NULL); 182 return ide_host_add(NULL, hws, NULL);
196
197 return 0;
198} 183}
199 184
200module_init(gayle_init); 185module_init(gayle_init);
diff --git a/drivers/ide/legacy/ide-4drives.c b/drivers/ide/legacy/ide-4drives.c
index 89c8ff0a4d08..c76d55de6996 100644
--- a/drivers/ide/legacy/ide-4drives.c
+++ b/drivers/ide/legacy/ide-4drives.c
@@ -28,10 +28,8 @@ static const struct ide_port_info ide_4drives_port_info = {
28 28
29static int __init ide_4drives_init(void) 29static int __init ide_4drives_init(void)
30{ 30{
31 ide_hwif_t *hwif, *mate;
32 unsigned long base = 0x1f0, ctl = 0x3f6; 31 unsigned long base = 0x1f0, ctl = 0x3f6;
33 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 32 hw_regs_t hw, *hws[] = { &hw, &hw, NULL, NULL };
34 hw_regs_t hw;
35 33
36 if (probe_4drives == 0) 34 if (probe_4drives == 0)
37 return -ENODEV; 35 return -ENODEV;
@@ -55,21 +53,7 @@ static int __init ide_4drives_init(void)
55 hw.irq = 14; 53 hw.irq = 14;
56 hw.chipset = ide_4drives; 54 hw.chipset = ide_4drives;
57 55
58 hwif = ide_find_port(); 56 return ide_host_add(&ide_4drives_port_info, hws, NULL);
59 if (hwif) {
60 ide_init_port_hw(hwif, &hw);
61 idx[0] = hwif->index;
62 }
63
64 mate = ide_find_port();
65 if (mate) {
66 ide_init_port_hw(mate, &hw);
67 idx[1] = mate->index;
68 }
69
70 ide_device_add(idx, &ide_4drives_port_info);
71
72 return 0;
73} 57}
74 58
75module_init(ide_4drives_init); 59module_init(ide_4drives_init);
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index 27b1e0b7ecb4..21bfac137844 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -74,7 +74,7 @@ INT_MODULE_PARM(pc_debug, 0);
74 74
75typedef struct ide_info_t { 75typedef struct ide_info_t {
76 struct pcmcia_device *p_dev; 76 struct pcmcia_device *p_dev;
77 ide_hwif_t *hwif; 77 struct ide_host *host;
78 int ndev; 78 int ndev;
79 dev_node_t node; 79 dev_node_t node;
80} ide_info_t; 80} ide_info_t;
@@ -132,7 +132,7 @@ static int ide_probe(struct pcmcia_device *link)
132static void ide_detach(struct pcmcia_device *link) 132static void ide_detach(struct pcmcia_device *link)
133{ 133{
134 ide_info_t *info = link->priv; 134 ide_info_t *info = link->priv;
135 ide_hwif_t *hwif = info->hwif; 135 ide_hwif_t *hwif = info->host->ports[0];
136 unsigned long data_addr, ctl_addr; 136 unsigned long data_addr, ctl_addr;
137 137
138 DEBUG(0, "ide_detach(0x%p)\n", link); 138 DEBUG(0, "ide_detach(0x%p)\n", link);
@@ -157,13 +157,13 @@ static const struct ide_port_info idecs_port_info = {
157 .host_flags = IDE_HFLAG_NO_DMA, 157 .host_flags = IDE_HFLAG_NO_DMA,
158}; 158};
159 159
160static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl, 160static struct ide_host *idecs_register(unsigned long io, unsigned long ctl,
161 unsigned long irq, struct pcmcia_device *handle) 161 unsigned long irq, struct pcmcia_device *handle)
162{ 162{
163 struct ide_host *host;
163 ide_hwif_t *hwif; 164 ide_hwif_t *hwif;
164 hw_regs_t hw; 165 int i, rc;
165 int i; 166 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
166 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
167 167
168 if (!request_region(io, 8, DRV_NAME)) { 168 if (!request_region(io, 8, DRV_NAME)) {
169 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n", 169 printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
@@ -184,30 +184,24 @@ static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl,
184 hw.chipset = ide_pci; 184 hw.chipset = ide_pci;
185 hw.dev = &handle->dev; 185 hw.dev = &handle->dev;
186 186
187 hwif = ide_find_port(); 187 rc = ide_host_add(&idecs_port_info, hws, &host);
188 if (hwif == NULL) 188 if (rc)
189 goto out_release; 189 goto out_release;
190 190
191 i = hwif->index; 191 hwif = host->ports[0];
192
193 ide_init_port_hw(hwif, &hw);
194
195 idx[0] = i;
196
197 ide_device_add(idx, &idecs_port_info);
198 192
199 if (hwif->present) 193 if (hwif->present)
200 return hwif; 194 return host;
201 195
202 /* retry registration in case device is still spinning up */ 196 /* retry registration in case device is still spinning up */
203 for (i = 0; i < 10; i++) { 197 for (i = 0; i < 10; i++) {
204 msleep(100); 198 msleep(100);
205 ide_port_scan(hwif); 199 ide_port_scan(hwif);
206 if (hwif->present) 200 if (hwif->present)
207 return hwif; 201 return host;
208 } 202 }
209 203
210 return hwif; 204 return host;
211 205
212out_release: 206out_release:
213 release_region(ctl, 1); 207 release_region(ctl, 1);
@@ -239,7 +233,7 @@ static int ide_config(struct pcmcia_device *link)
239 cistpl_cftable_entry_t *cfg; 233 cistpl_cftable_entry_t *cfg;
240 int pass, last_ret = 0, last_fn = 0, is_kme = 0; 234 int pass, last_ret = 0, last_fn = 0, is_kme = 0;
241 unsigned long io_base, ctl_base; 235 unsigned long io_base, ctl_base;
242 ide_hwif_t *hwif; 236 struct ide_host *host;
243 237
244 DEBUG(0, "ide_config(0x%p)\n", link); 238 DEBUG(0, "ide_config(0x%p)\n", link);
245 239
@@ -334,21 +328,21 @@ static int ide_config(struct pcmcia_device *link)
334 if (is_kme) 328 if (is_kme)
335 outb(0x81, ctl_base+1); 329 outb(0x81, ctl_base+1);
336 330
337 hwif = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link); 331 host = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link);
338 if (hwif == NULL && link->io.NumPorts1 == 0x20) { 332 if (host == NULL && link->io.NumPorts1 == 0x20) {
339 outb(0x02, ctl_base + 0x10); 333 outb(0x02, ctl_base + 0x10);
340 hwif = idecs_register(io_base + 0x10, ctl_base + 0x10, 334 host = idecs_register(io_base + 0x10, ctl_base + 0x10,
341 link->irq.AssignedIRQ, link); 335 link->irq.AssignedIRQ, link);
342 } 336 }
343 337
344 if (hwif == NULL) 338 if (host == NULL)
345 goto failed; 339 goto failed;
346 340
347 info->ndev = 1; 341 info->ndev = 1;
348 sprintf(info->node.dev_name, "hd%c", 'a' + hwif->index * 2); 342 sprintf(info->node.dev_name, "hd%c", 'a' + host->ports[0]->index * 2);
349 info->node.major = hwif->major; 343 info->node.major = host->ports[0]->major;
350 info->node.minor = 0; 344 info->node.minor = 0;
351 info->hwif = hwif; 345 info->host = host;
352 link->dev_node = &info->node; 346 link->dev_node = &info->node;
353 printk(KERN_INFO "ide-cs: %s: Vpp = %d.%d\n", 347 printk(KERN_INFO "ide-cs: %s: Vpp = %d.%d\n",
354 info->node.dev_name, link->conf.Vpp / 10, link->conf.Vpp % 10); 348 info->node.dev_name, link->conf.Vpp / 10, link->conf.Vpp % 10);
@@ -379,15 +373,15 @@ failed:
379static void ide_release(struct pcmcia_device *link) 373static void ide_release(struct pcmcia_device *link)
380{ 374{
381 ide_info_t *info = link->priv; 375 ide_info_t *info = link->priv;
382 ide_hwif_t *hwif = info->hwif; 376 struct ide_host *host = info->host;
383 377
384 DEBUG(0, "ide_release(0x%p)\n", link); 378 DEBUG(0, "ide_release(0x%p)\n", link);
385 379
386 if (info->ndev) { 380 if (info->ndev)
387 /* FIXME: if this fails we need to queue the cleanup somehow 381 /* FIXME: if this fails we need to queue the cleanup somehow
388 -- need to investigate the required PCMCIA magic */ 382 -- need to investigate the required PCMCIA magic */
389 ide_unregister(hwif); 383 ide_host_remove(host);
390 } 384
391 info->ndev = 0; 385 info->ndev = 0;
392 386
393 pcmcia_disable_device(link); 387 pcmcia_disable_device(link);
diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/legacy/ide_platform.c
index a249562b34b5..051b4ab0f359 100644
--- a/drivers/ide/legacy/ide_platform.c
+++ b/drivers/ide/legacy/ide_platform.c
@@ -52,12 +52,10 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
52{ 52{
53 struct resource *res_base, *res_alt, *res_irq; 53 struct resource *res_base, *res_alt, *res_irq;
54 void __iomem *base, *alt_base; 54 void __iomem *base, *alt_base;
55 ide_hwif_t *hwif;
56 struct pata_platform_info *pdata; 55 struct pata_platform_info *pdata;
57 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 56 struct ide_host *host;
58 int ret = 0; 57 int ret = 0, mmio = 0;
59 int mmio = 0; 58 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
60 hw_regs_t hw;
61 struct ide_port_info d = platform_ide_port_info; 59 struct ide_port_info d = platform_ide_port_info;
62 60
63 pdata = pdev->dev.platform_data; 61 pdata = pdev->dev.platform_data;
@@ -94,28 +92,18 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
94 res_alt->start, res_alt->end - res_alt->start + 1); 92 res_alt->start, res_alt->end - res_alt->start + 1);
95 } 93 }
96 94
97 hwif = ide_find_port();
98 if (!hwif) {
99 ret = -ENODEV;
100 goto out;
101 }
102
103 memset(&hw, 0, sizeof(hw)); 95 memset(&hw, 0, sizeof(hw));
104 plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start); 96 plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start);
105 hw.dev = &pdev->dev; 97 hw.dev = &pdev->dev;
106 98
107 ide_init_port_hw(hwif, &hw); 99 if (mmio)
108
109 if (mmio) {
110 d.host_flags |= IDE_HFLAG_MMIO; 100 d.host_flags |= IDE_HFLAG_MMIO;
111 default_hwif_mmiops(hwif);
112 }
113 101
114 idx[0] = hwif->index; 102 ret = ide_host_add(&d, hws, &host);
115 103 if (ret)
116 ide_device_add(idx, &d); 104 goto out;
117 105
118 platform_set_drvdata(pdev, hwif); 106 platform_set_drvdata(pdev, host);
119 107
120 return 0; 108 return 0;
121 109
@@ -125,9 +113,9 @@ out:
125 113
126static int __devexit plat_ide_remove(struct platform_device *pdev) 114static int __devexit plat_ide_remove(struct platform_device *pdev)
127{ 115{
128 ide_hwif_t *hwif = pdev->dev.driver_data; 116 struct ide_host *host = pdev->dev.driver_data;
129 117
130 ide_unregister(hwif); 118 ide_host_remove(host);
131 119
132 return 0; 120 return 0;
133} 121}
diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c
index 0a6195bcfeda..a0bb167980e7 100644
--- a/drivers/ide/legacy/macide.c
+++ b/drivers/ide/legacy/macide.c
@@ -91,11 +91,10 @@ static const char *mac_ide_name[] =
91 91
92static int __init macide_init(void) 92static int __init macide_init(void)
93{ 93{
94 ide_hwif_t *hwif;
95 ide_ack_intr_t *ack_intr; 94 ide_ack_intr_t *ack_intr;
96 unsigned long base; 95 unsigned long base;
97 int irq; 96 int irq;
98 hw_regs_t hw; 97 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
99 98
100 if (!MACH_IS_MAC) 99 if (!MACH_IS_MAC)
101 return -ENODEV; 100 return -ENODEV;
@@ -125,17 +124,7 @@ static int __init macide_init(void)
125 124
126 macide_setup_ports(&hw, base, irq, ack_intr); 125 macide_setup_ports(&hw, base, irq, ack_intr);
127 126
128 hwif = ide_find_port(); 127 return ide_host_add(NULL, hws, NULL);
129 if (hwif) {
130 u8 index = hwif->index;
131 u8 idx[4] = { index, 0xff, 0xff, 0xff };
132
133 ide_init_port_hw(hwif, &hw);
134
135 ide_device_add(idx, NULL);
136 }
137
138 return 0;
139} 128}
140 129
141module_init(macide_init); 130module_init(macide_init);
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c
index 9c2b9d078f69..4abd8fc78197 100644
--- a/drivers/ide/legacy/q40ide.c
+++ b/drivers/ide/legacy/q40ide.c
@@ -96,6 +96,27 @@ static void q40ide_output_data(ide_drive_t *drive, struct request *rq,
96 outsw_swapw(data_addr, buf, (len + 1) / 2); 96 outsw_swapw(data_addr, buf, (len + 1) / 2);
97} 97}
98 98
99/* Q40 has a byte-swapped IDE interface */
100static const struct ide_tp_ops q40ide_tp_ops = {
101 .exec_command = ide_exec_command,
102 .read_status = ide_read_status,
103 .read_altstatus = ide_read_altstatus,
104 .read_sff_dma_status = ide_read_sff_dma_status,
105
106 .set_irq = ide_set_irq,
107
108 .tf_load = ide_tf_load,
109 .tf_read = ide_tf_read,
110
111 .input_data = q40ide_input_data,
112 .output_data = q40ide_output_data,
113};
114
115static const struct ide_port_info q40ide_port_info = {
116 .tp_ops = &q40ide_tp_ops,
117 .host_flags = IDE_HFLAG_NO_DMA,
118};
119
99/* 120/*
100 * the static array is needed to have the name reported in /proc/ioports, 121 * the static array is needed to have the name reported in /proc/ioports,
101 * hwif->name unfortunately isn't available yet 122 * hwif->name unfortunately isn't available yet
@@ -111,9 +132,7 @@ static const char *q40_ide_names[Q40IDE_NUM_HWIFS]={
111static int __init q40ide_init(void) 132static int __init q40ide_init(void)
112{ 133{
113 int i; 134 int i;
114 ide_hwif_t *hwif; 135 hw_regs_t hw[Q40IDE_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL };
115 const char *name;
116 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
117 136
118 if (!MACH_IS_Q40) 137 if (!MACH_IS_Q40)
119 return -ENODEV; 138 return -ENODEV;
@@ -121,9 +140,8 @@ static int __init q40ide_init(void)
121 printk(KERN_INFO "ide: Q40 IDE controller\n"); 140 printk(KERN_INFO "ide: Q40 IDE controller\n");
122 141
123 for (i = 0; i < Q40IDE_NUM_HWIFS; i++) { 142 for (i = 0; i < Q40IDE_NUM_HWIFS; i++) {
124 hw_regs_t hw; 143 const char *name = q40_ide_names[i];
125 144
126 name = q40_ide_names[i];
127 if (!request_region(pcide_bases[i], 8, name)) { 145 if (!request_region(pcide_bases[i], 8, name)) {
128 printk("could not reserve ports %lx-%lx for %s\n", 146 printk("could not reserve ports %lx-%lx for %s\n",
129 pcide_bases[i],pcide_bases[i]+8,name); 147 pcide_bases[i],pcide_bases[i]+8,name);
@@ -135,26 +153,13 @@ static int __init q40ide_init(void)
135 release_region(pcide_bases[i], 8); 153 release_region(pcide_bases[i], 8);
136 continue; 154 continue;
137 } 155 }
138 q40_ide_setup_ports(&hw, pcide_bases[i], 156 q40_ide_setup_ports(&hw[i], pcide_bases[i], NULL,
139 NULL,
140// m68kide_iops,
141 q40ide_default_irq(pcide_bases[i])); 157 q40ide_default_irq(pcide_bases[i]));
142 158
143 hwif = ide_find_port(); 159 hws[i] = &hw[i];
144 if (hwif) {
145 ide_init_port_hw(hwif, &hw);
146
147 /* Q40 has a byte-swapped IDE interface */
148 hwif->input_data = q40ide_input_data;
149 hwif->output_data = q40ide_output_data;
150
151 idx[i] = hwif->index;
152 }
153 } 160 }
154 161
155 ide_device_add(idx, NULL); 162 return ide_host_add(&q40ide_port_info, hws, NULL);
156
157 return 0;
158} 163}
159 164
160module_init(q40ide_init); 165module_init(q40ide_init);
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
index 48d57cae63c6..11b7f61aae40 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/mips/au1xxx-ide.c
@@ -519,6 +519,23 @@ static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
519 *ata_regs = ahwif->regbase + (14 << IDE_REG_SHIFT); 519 *ata_regs = ahwif->regbase + (14 << IDE_REG_SHIFT);
520} 520}
521 521
522#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
523static const struct ide_tp_ops au1xxx_tp_ops = {
524 .exec_command = ide_exec_command,
525 .read_status = ide_read_status,
526 .read_altstatus = ide_read_altstatus,
527 .read_sff_dma_status = ide_read_sff_dma_status,
528
529 .set_irq = ide_set_irq,
530
531 .tf_load = ide_tf_load,
532 .tf_read = ide_tf_read,
533
534 .input_data = au1xxx_input_data,
535 .output_data = au1xxx_output_data,
536};
537#endif
538
522static const struct ide_port_ops au1xxx_port_ops = { 539static const struct ide_port_ops au1xxx_port_ops = {
523 .set_pio_mode = au1xxx_set_pio_mode, 540 .set_pio_mode = au1xxx_set_pio_mode,
524 .set_dma_mode = auide_set_dma_mode, 541 .set_dma_mode = auide_set_dma_mode,
@@ -526,6 +543,9 @@ static const struct ide_port_ops au1xxx_port_ops = {
526 543
527static const struct ide_port_info au1xxx_port_info = { 544static const struct ide_port_info au1xxx_port_info = {
528 .init_dma = auide_ddma_init, 545 .init_dma = auide_ddma_init,
546#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
547 .tp_ops = &au1xxx_tp_ops,
548#endif
529 .port_ops = &au1xxx_port_ops, 549 .port_ops = &au1xxx_port_ops,
530#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 550#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
531 .dma_ops = &au1xxx_dma_ops, 551 .dma_ops = &au1xxx_dma_ops,
@@ -543,11 +563,10 @@ static int au_ide_probe(struct device *dev)
543{ 563{
544 struct platform_device *pdev = to_platform_device(dev); 564 struct platform_device *pdev = to_platform_device(dev);
545 _auide_hwif *ahwif = &auide_hwif; 565 _auide_hwif *ahwif = &auide_hwif;
546 ide_hwif_t *hwif;
547 struct resource *res; 566 struct resource *res;
567 struct ide_host *host;
548 int ret = 0; 568 int ret = 0;
549 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 569 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
550 hw_regs_t hw;
551 570
552#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) 571#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
553 char *mode = "MWDMA2"; 572 char *mode = "MWDMA2";
@@ -584,36 +603,19 @@ static int au_ide_probe(struct device *dev)
584 goto out; 603 goto out;
585 } 604 }
586 605
587 hwif = ide_find_port();
588 if (hwif == NULL) {
589 ret = -ENOENT;
590 goto out;
591 }
592
593 memset(&hw, 0, sizeof(hw)); 606 memset(&hw, 0, sizeof(hw));
594 auide_setup_ports(&hw, ahwif); 607 auide_setup_ports(&hw, ahwif);
595 hw.irq = ahwif->irq; 608 hw.irq = ahwif->irq;
596 hw.dev = dev; 609 hw.dev = dev;
597 hw.chipset = ide_au1xxx; 610 hw.chipset = ide_au1xxx;
598 611
599 ide_init_port_hw(hwif, &hw); 612 ret = ide_host_add(&au1xxx_port_info, hws, &host);
600 613 if (ret)
601 /* If the user has selected DDMA assisted copies, 614 goto out;
602 then set up a few local I/O function entry points
603 */
604
605#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
606 hwif->input_data = au1xxx_input_data;
607 hwif->output_data = au1xxx_output_data;
608#endif
609
610 auide_hwif.hwif = hwif;
611
612 idx[0] = hwif->index;
613 615
614 ide_device_add(idx, &au1xxx_port_info); 616 auide_hwif.hwif = host->ports[0];
615 617
616 dev_set_drvdata(dev, hwif); 618 dev_set_drvdata(dev, host);
617 619
618 printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode ); 620 printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );
619 621
@@ -625,10 +627,10 @@ static int au_ide_remove(struct device *dev)
625{ 627{
626 struct platform_device *pdev = to_platform_device(dev); 628 struct platform_device *pdev = to_platform_device(dev);
627 struct resource *res; 629 struct resource *res;
628 ide_hwif_t *hwif = dev_get_drvdata(dev); 630 struct ide_host *host = dev_get_drvdata(dev);
629 _auide_hwif *ahwif = &auide_hwif; 631 _auide_hwif *ahwif = &auide_hwif;
630 632
631 ide_unregister(hwif); 633 ide_host_remove(host);
632 634
633 iounmap((void *)ahwif->regbase); 635 iounmap((void *)ahwif->regbase);
634 636
diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c
index 9f1212cc4aed..badf79fc9e3a 100644
--- a/drivers/ide/mips/swarm.c
+++ b/drivers/ide/mips/swarm.c
@@ -72,12 +72,11 @@ static const struct ide_port_info swarm_port_info = {
72 */ 72 */
73static int __devinit swarm_ide_probe(struct device *dev) 73static int __devinit swarm_ide_probe(struct device *dev)
74{ 74{
75 ide_hwif_t *hwif;
76 u8 __iomem *base; 75 u8 __iomem *base;
76 struct ide_host *host;
77 phys_t offset, size; 77 phys_t offset, size;
78 hw_regs_t hw; 78 int i, rc;
79 int i; 79 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
80 u8 idx[] = { 0xff, 0xff, 0xff, 0xff };
81 80
82 if (!SIBYTE_HAVE_IDE) 81 if (!SIBYTE_HAVE_IDE)
83 return -ENODEV; 82 return -ENODEV;
@@ -116,26 +115,17 @@ static int __devinit swarm_ide_probe(struct device *dev)
116 hw.irq = K_INT_GB_IDE; 115 hw.irq = K_INT_GB_IDE;
117 hw.chipset = ide_generic; 116 hw.chipset = ide_generic;
118 117
119 hwif = ide_find_port_slot(&swarm_port_info); 118 rc = ide_host_add(&swarm_port_info, hws, &host);
120 if (hwif == NULL) 119 if (rc)
121 goto err; 120 goto err;
122 121
123 ide_init_port_hw(hwif, &hw); 122 dev_set_drvdata(dev, host);
124
125 /* Setup MMIO ops. */
126 default_hwif_mmiops(hwif);
127
128 idx[0] = hwif->index;
129
130 ide_device_add(idx, &swarm_port_info);
131
132 dev_set_drvdata(dev, hwif);
133 123
134 return 0; 124 return 0;
135err: 125err:
136 release_resource(&swarm_ide_resource); 126 release_resource(&swarm_ide_resource);
137 iounmap(base); 127 iounmap(base);
138 return -ENOMEM; 128 return rc;
139} 129}
140 130
141static struct device_driver swarm_ide_driver = { 131static struct device_driver swarm_ide_driver = {
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c
index ae7a4329a581..fbc43e121e6b 100644
--- a/drivers/ide/pci/aec62xx.c
+++ b/drivers/ide/pci/aec62xx.c
@@ -195,7 +195,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
195 .host_flags = IDE_HFLAG_SERIALIZE | 195 .host_flags = IDE_HFLAG_SERIALIZE |
196 IDE_HFLAG_NO_ATAPI_DMA | 196 IDE_HFLAG_NO_ATAPI_DMA |
197 IDE_HFLAG_NO_DSC | 197 IDE_HFLAG_NO_DSC |
198 IDE_HFLAG_ABUSE_SET_DMA_MODE |
199 IDE_HFLAG_OFF_BOARD, 198 IDE_HFLAG_OFF_BOARD,
200 .pio_mask = ATA_PIO4, 199 .pio_mask = ATA_PIO4,
201 .mwdma_mask = ATA_MWDMA2, 200 .mwdma_mask = ATA_MWDMA2,
@@ -205,7 +204,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
205 .init_chipset = init_chipset_aec62xx, 204 .init_chipset = init_chipset_aec62xx,
206 .port_ops = &atp86x_port_ops, 205 .port_ops = &atp86x_port_ops,
207 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_NO_AUTODMA | 206 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_NO_AUTODMA |
208 IDE_HFLAG_ABUSE_SET_DMA_MODE |
209 IDE_HFLAG_OFF_BOARD, 207 IDE_HFLAG_OFF_BOARD,
210 .pio_mask = ATA_PIO4, 208 .pio_mask = ATA_PIO4,
211 .mwdma_mask = ATA_MWDMA2, 209 .mwdma_mask = ATA_MWDMA2,
@@ -216,7 +214,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
216 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, 214 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
217 .port_ops = &atp86x_port_ops, 215 .port_ops = &atp86x_port_ops,
218 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | 216 .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
219 IDE_HFLAG_ABUSE_SET_DMA_MODE |
220 IDE_HFLAG_NON_BOOTABLE, 217 IDE_HFLAG_NON_BOOTABLE,
221 .pio_mask = ATA_PIO4, 218 .pio_mask = ATA_PIO4,
222 .mwdma_mask = ATA_MWDMA2, 219 .mwdma_mask = ATA_MWDMA2,
@@ -226,7 +223,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
226 .init_chipset = init_chipset_aec62xx, 223 .init_chipset = init_chipset_aec62xx,
227 .port_ops = &atp86x_port_ops, 224 .port_ops = &atp86x_port_ops,
228 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | 225 .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
229 IDE_HFLAG_ABUSE_SET_DMA_MODE |
230 IDE_HFLAG_OFF_BOARD, 226 IDE_HFLAG_OFF_BOARD,
231 .pio_mask = ATA_PIO4, 227 .pio_mask = ATA_PIO4,
232 .mwdma_mask = ATA_MWDMA2, 228 .mwdma_mask = ATA_MWDMA2,
@@ -237,7 +233,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
237 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, 233 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
238 .port_ops = &atp86x_port_ops, 234 .port_ops = &atp86x_port_ops,
239 .host_flags = IDE_HFLAG_NO_ATAPI_DMA | 235 .host_flags = IDE_HFLAG_NO_ATAPI_DMA |
240 IDE_HFLAG_ABUSE_SET_DMA_MODE |
241 IDE_HFLAG_OFF_BOARD, 236 IDE_HFLAG_OFF_BOARD,
242 .pio_mask = ATA_PIO4, 237 .pio_mask = ATA_PIO4,
243 .mwdma_mask = ATA_MWDMA2, 238 .mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c
index 80d19c0eb780..5ef7817ac64f 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/pci/alim15x3.c
@@ -471,7 +471,15 @@ static int __devinit init_dma_ali15x3(ide_hwif_t *hwif,
471 struct pci_dev *dev = to_pci_dev(hwif->dev); 471 struct pci_dev *dev = to_pci_dev(hwif->dev);
472 unsigned long base = ide_pci_dma_base(hwif, d); 472 unsigned long base = ide_pci_dma_base(hwif, d);
473 473
474 if (base == 0 || ide_pci_set_master(dev, d->name) < 0) 474 if (base == 0)
475 return -1;
476
477 hwif->dma_base = base;
478
479 if (ide_pci_check_simplex(hwif, d) < 0)
480 return -1;
481
482 if (ide_pci_set_master(dev, d->name) < 0)
475 return -1; 483 return -1;
476 484
477 if (!hwif->channel) 485 if (!hwif->channel)
@@ -483,7 +491,7 @@ static int __devinit init_dma_ali15x3(ide_hwif_t *hwif,
483 if (ide_allocate_dma_engine(hwif)) 491 if (ide_allocate_dma_engine(hwif))
484 return -1; 492 return -1;
485 493
486 ide_setup_dma(hwif, base); 494 hwif->dma_ops = &sff_dma_ops;
487 495
488 return 0; 496 return 0;
489} 497}
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index 0bfcdd0e77b3..ef7d971031ee 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -218,7 +218,6 @@ static const struct ide_port_ops amd_port_ops = {
218 218
219#define IDE_HFLAGS_AMD \ 219#define IDE_HFLAGS_AMD \
220 (IDE_HFLAG_PIO_NO_BLACKLIST | \ 220 (IDE_HFLAG_PIO_NO_BLACKLIST | \
221 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
222 IDE_HFLAG_POST_SET_MODE | \ 221 IDE_HFLAG_POST_SET_MODE | \
223 IDE_HFLAG_IO_32BIT | \ 222 IDE_HFLAG_IO_32BIT | \
224 IDE_HFLAG_UNMASK_IRQS) 223 IDE_HFLAG_UNMASK_IRQS)
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c
index 1ad1e23e3105..e6c62006ca1a 100644
--- a/drivers/ide/pci/cmd640.c
+++ b/drivers/ide/pci/cmd640.c
@@ -181,11 +181,6 @@ static u8 recovery_counts[4] = {16, 16, 16, 16}; /* Recovery count (encoded) */
181static DEFINE_SPINLOCK(cmd640_lock); 181static DEFINE_SPINLOCK(cmd640_lock);
182 182
183/* 183/*
184 * These are initialized to point at the devices we control
185 */
186static ide_hwif_t *cmd_hwif0, *cmd_hwif1;
187
188/*
189 * Interface to access cmd640x registers 184 * Interface to access cmd640x registers
190 */ 185 */
191static unsigned int cmd640_key; 186static unsigned int cmd640_key;
@@ -717,8 +712,7 @@ static int __init cmd640x_init(void)
717 int second_port_cmd640 = 0, rc; 712 int second_port_cmd640 = 0, rc;
718 const char *bus_type, *port2; 713 const char *bus_type, *port2;
719 u8 b, cfr; 714 u8 b, cfr;
720 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 715 hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL };
721 hw_regs_t hw[2];
722 716
723 if (cmd640_vlb && probe_for_cmd640_vlb()) { 717 if (cmd640_vlb && probe_for_cmd640_vlb()) {
724 bus_type = "VLB"; 718 bus_type = "VLB";
@@ -781,15 +775,10 @@ static int __init cmd640x_init(void)
781 printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x" 775 printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x"
782 "\n", 'a' + cmd640_chip_version - 1, bus_type, cfr); 776 "\n", 'a' + cmd640_chip_version - 1, bus_type, cfr);
783 777
784 cmd_hwif0 = ide_find_port();
785
786 /* 778 /*
787 * Initialize data for primary port 779 * Initialize data for primary port
788 */ 780 */
789 if (cmd_hwif0) { 781 hws[0] = &hw[0];
790 ide_init_port_hw(cmd_hwif0, &hw[0]);
791 idx[0] = cmd_hwif0->index;
792 }
793 782
794 /* 783 /*
795 * Ensure compatibility by always using the slowest timings 784 * Ensure compatibility by always using the slowest timings
@@ -829,13 +818,9 @@ static int __init cmd640x_init(void)
829 /* 818 /*
830 * Initialize data for secondary cmd640 port, if enabled 819 * Initialize data for secondary cmd640 port, if enabled
831 */ 820 */
832 if (second_port_cmd640) { 821 if (second_port_cmd640)
833 cmd_hwif1 = ide_find_port(); 822 hws[1] = &hw[1];
834 if (cmd_hwif1) { 823
835 ide_init_port_hw(cmd_hwif1, &hw[1]);
836 idx[1] = cmd_hwif1->index;
837 }
838 }
839 printk(KERN_INFO "cmd640: %sserialized, secondary interface %s\n", 824 printk(KERN_INFO "cmd640: %sserialized, secondary interface %s\n",
840 second_port_cmd640 ? "" : "not ", port2); 825 second_port_cmd640 ? "" : "not ", port2);
841 826
@@ -843,9 +828,7 @@ static int __init cmd640x_init(void)
843 cmd640_dump_regs(); 828 cmd640_dump_regs();
844#endif 829#endif
845 830
846 ide_device_add(idx, &cmd640_port_info); 831 return ide_host_add(&cmd640_port_info, hws, NULL);
847
848 return 1;
849} 832}
850 833
851module_param_named(probe_vlb, cmd640_vlb, bool, 0); 834module_param_named(probe_vlb, cmd640_vlb, bool, 0);
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
index cfa784bacf48..ce58bfcdb3c6 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/pci/cmd64x.c
@@ -262,7 +262,7 @@ static int cmd648_dma_test_irq(ide_drive_t *drive)
262 unsigned long base = hwif->dma_base - (hwif->channel * 8); 262 unsigned long base = hwif->dma_base - (hwif->channel * 8);
263 u8 irq_mask = hwif->channel ? MRDMODE_INTR_CH1 : 263 u8 irq_mask = hwif->channel ? MRDMODE_INTR_CH1 :
264 MRDMODE_INTR_CH0; 264 MRDMODE_INTR_CH0;
265 u8 dma_stat = inb(hwif->dma_status); 265 u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
266 u8 mrdmode = inb(base + 1); 266 u8 mrdmode = inb(base + 1);
267 267
268#ifdef DEBUG 268#ifdef DEBUG
@@ -286,7 +286,7 @@ static int cmd64x_dma_test_irq(ide_drive_t *drive)
286 int irq_reg = hwif->channel ? ARTTIM23 : CFR; 286 int irq_reg = hwif->channel ? ARTTIM23 : CFR;
287 u8 irq_mask = hwif->channel ? ARTTIM23_INTR_CH1 : 287 u8 irq_mask = hwif->channel ? ARTTIM23_INTR_CH1 :
288 CFR_INTR_CH0; 288 CFR_INTR_CH0;
289 u8 dma_stat = inb(hwif->dma_status); 289 u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
290 u8 irq_stat = 0; 290 u8 irq_stat = 0;
291 291
292 (void) pci_read_config_byte(dev, irq_reg, &irq_stat); 292 (void) pci_read_config_byte(dev, irq_reg, &irq_stat);
@@ -317,13 +317,13 @@ static int cmd646_1_dma_end(ide_drive_t *drive)
317 317
318 drive->waiting_for_dma = 0; 318 drive->waiting_for_dma = 0;
319 /* get DMA status */ 319 /* get DMA status */
320 dma_stat = inb(hwif->dma_status); 320 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
321 /* read DMA command state */ 321 /* read DMA command state */
322 dma_cmd = inb(hwif->dma_command); 322 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
323 /* stop DMA */ 323 /* stop DMA */
324 outb(dma_cmd & ~1, hwif->dma_command); 324 outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
325 /* clear the INTR & ERROR bits */ 325 /* clear the INTR & ERROR bits */
326 outb(dma_stat | 6, hwif->dma_status); 326 outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS);
327 /* and free any DMA resources */ 327 /* and free any DMA resources */
328 ide_destroy_dmatable(drive); 328 ide_destroy_dmatable(drive);
329 /* verify good DMA status */ 329 /* verify good DMA status */
diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/pci/cs5520.c
index 992b1cf8db69..b03d8ae947e6 100644
--- a/drivers/ide/pci/cs5520.c
+++ b/drivers/ide/pci/cs5520.c
@@ -62,8 +62,6 @@ static void cs5520_set_pio_mode(ide_drive_t *drive, const u8 pio)
62 struct pci_dev *pdev = to_pci_dev(hwif->dev); 62 struct pci_dev *pdev = to_pci_dev(hwif->dev);
63 int controller = drive->dn > 1 ? 1 : 0; 63 int controller = drive->dn > 1 ? 1 : 0;
64 64
65 /* FIXME: if DMA = 1 do we need to set the DMA bit here ? */
66
67 /* 8bit CAT/CRT - 8bit command timing for channel */ 65 /* 8bit CAT/CRT - 8bit command timing for channel */
68 pci_write_config_byte(pdev, 0x62 + controller, 66 pci_write_config_byte(pdev, 0x62 + controller,
69 (cs5520_pio_clocks[pio].recovery << 4) | 67 (cs5520_pio_clocks[pio].recovery << 4) |
@@ -89,46 +87,17 @@ static void cs5520_set_dma_mode(ide_drive_t *drive, const u8 speed)
89 cs5520_set_pio_mode(drive, 0); 87 cs5520_set_pio_mode(drive, 0);
90} 88}
91 89
92/*
93 * We wrap the DMA activate to set the vdma flag. This is needed
94 * so that the IDE DMA layer issues PIO not DMA commands over the
95 * DMA channel
96 *
97 * ATAPI is harder so disable it for now using IDE_HFLAG_NO_ATAPI_DMA
98 */
99
100static void cs5520_dma_host_set(ide_drive_t *drive, int on)
101{
102 drive->vdma = on;
103 ide_dma_host_set(drive, on);
104}
105
106static const struct ide_port_ops cs5520_port_ops = { 90static const struct ide_port_ops cs5520_port_ops = {
107 .set_pio_mode = cs5520_set_pio_mode, 91 .set_pio_mode = cs5520_set_pio_mode,
108 .set_dma_mode = cs5520_set_dma_mode, 92 .set_dma_mode = cs5520_set_dma_mode,
109}; 93};
110 94
111static const struct ide_dma_ops cs5520_dma_ops = {
112 .dma_host_set = cs5520_dma_host_set,
113 .dma_setup = ide_dma_setup,
114 .dma_exec_cmd = ide_dma_exec_cmd,
115 .dma_start = ide_dma_start,
116 .dma_end = __ide_dma_end,
117 .dma_test_irq = ide_dma_test_irq,
118 .dma_lost_irq = ide_dma_lost_irq,
119 .dma_timeout = ide_dma_timeout,
120};
121
122/* FIXME: VDMA is disabled because it caused system hangs */
123#define DECLARE_CS_DEV(name_str) \ 95#define DECLARE_CS_DEV(name_str) \
124 { \ 96 { \
125 .name = name_str, \ 97 .name = name_str, \
126 .port_ops = &cs5520_port_ops, \ 98 .port_ops = &cs5520_port_ops, \
127 .dma_ops = &cs5520_dma_ops, \
128 .host_flags = IDE_HFLAG_ISA_PORTS | \ 99 .host_flags = IDE_HFLAG_ISA_PORTS | \
129 IDE_HFLAG_CS5520 | \ 100 IDE_HFLAG_CS5520, \
130 IDE_HFLAG_NO_ATAPI_DMA | \
131 IDE_HFLAG_ABUSE_SET_DMA_MODE, \
132 .pio_mask = ATA_PIO4, \ 101 .pio_mask = ATA_PIO4, \
133 } 102 }
134 103
@@ -146,7 +115,7 @@ static const struct ide_port_info cyrix_chipsets[] __devinitdata = {
146static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id) 115static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
147{ 116{
148 const struct ide_port_info *d = &cyrix_chipsets[id->driver_data]; 117 const struct ide_port_info *d = &cyrix_chipsets[id->driver_data];
149 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 118 hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL };
150 119
151 ide_setup_pci_noise(dev, d); 120 ide_setup_pci_noise(dev, d);
152 121
@@ -168,11 +137,9 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic
168 * do all the device setup for us 137 * do all the device setup for us
169 */ 138 */
170 139
171 ide_pci_setup_ports(dev, d, 14, &idx[0]); 140 ide_pci_setup_ports(dev, d, 14, &hw[0], &hws[0]);
172
173 ide_device_add(idx, d);
174 141
175 return 0; 142 return ide_host_add(d, hws, NULL);
176} 143}
177 144
178static const struct pci_device_id cs5520_pci_tbl[] = { 145static const struct pci_device_id cs5520_pci_tbl[] = {
diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c
index dc97c48623f3..5404fe4f701d 100644
--- a/drivers/ide/pci/cs5535.c
+++ b/drivers/ide/pci/cs5535.c
@@ -171,8 +171,7 @@ static const struct ide_port_ops cs5535_port_ops = {
171static const struct ide_port_info cs5535_chipset __devinitdata = { 171static const struct ide_port_info cs5535_chipset __devinitdata = {
172 .name = "CS5535", 172 .name = "CS5535",
173 .port_ops = &cs5535_port_ops, 173 .port_ops = &cs5535_port_ops,
174 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE | 174 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
175 IDE_HFLAG_ABUSE_SET_DMA_MODE,
176 .pio_mask = ATA_PIO4, 175 .pio_mask = ATA_PIO4,
177 .mwdma_mask = ATA_MWDMA2, 176 .mwdma_mask = ATA_MWDMA2,
178 .udma_mask = ATA_UDMA4, 177 .udma_mask = ATA_UDMA4,
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c
index 0106e2a2df77..f84bfb4f600f 100644
--- a/drivers/ide/pci/delkin_cb.c
+++ b/drivers/ide/pci/delkin_cb.c
@@ -56,11 +56,10 @@ static const struct ide_port_info delkin_cb_port_info = {
56static int __devinit 56static int __devinit
57delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id) 57delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
58{ 58{
59 struct ide_host *host;
59 unsigned long base; 60 unsigned long base;
60 hw_regs_t hw;
61 ide_hwif_t *hwif = NULL;
62 int i, rc; 61 int i, rc;
63 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 62 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
64 63
65 rc = pci_enable_device(dev); 64 rc = pci_enable_device(dev);
66 if (rc) { 65 if (rc) {
@@ -87,34 +86,26 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
87 hw.dev = &dev->dev; 86 hw.dev = &dev->dev;
88 hw.chipset = ide_pci; /* this enables IRQ sharing */ 87 hw.chipset = ide_pci; /* this enables IRQ sharing */
89 88
90 hwif = ide_find_port(); 89 rc = ide_host_add(&delkin_cb_port_info, hws, &host);
91 if (hwif == NULL) 90 if (rc)
92 goto out_disable; 91 goto out_disable;
93 92
94 i = hwif->index; 93 pci_set_drvdata(dev, host);
95
96 ide_init_port_hw(hwif, &hw);
97
98 idx[0] = i;
99
100 ide_device_add(idx, &delkin_cb_port_info);
101
102 pci_set_drvdata(dev, hwif);
103 94
104 return 0; 95 return 0;
105 96
106out_disable: 97out_disable:
107 pci_release_regions(dev); 98 pci_release_regions(dev);
108 pci_disable_device(dev); 99 pci_disable_device(dev);
109 return -ENODEV; 100 return rc;
110} 101}
111 102
112static void 103static void
113delkin_cb_remove (struct pci_dev *dev) 104delkin_cb_remove (struct pci_dev *dev)
114{ 105{
115 ide_hwif_t *hwif = pci_get_drvdata(dev); 106 struct ide_host *host = pci_get_drvdata(dev);
116 107
117 ide_unregister(hwif); 108 ide_host_remove(host);
118 109
119 pci_release_regions(dev); 110 pci_release_regions(dev);
120 pci_disable_device(dev); 111 pci_disable_device(dev);
diff --git a/drivers/ide/pci/hpt34x.c b/drivers/ide/pci/hpt34x.c
index 84c36c117194..9e1d1c4741da 100644
--- a/drivers/ide/pci/hpt34x.c
+++ b/drivers/ide/pci/hpt34x.c
@@ -123,7 +123,6 @@ static const struct ide_port_ops hpt34x_port_ops = {
123#define IDE_HFLAGS_HPT34X \ 123#define IDE_HFLAGS_HPT34X \
124 (IDE_HFLAG_NO_ATAPI_DMA | \ 124 (IDE_HFLAG_NO_ATAPI_DMA | \
125 IDE_HFLAG_NO_DSC | \ 125 IDE_HFLAG_NO_DSC | \
126 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
127 IDE_HFLAG_NO_AUTODMA) 126 IDE_HFLAG_NO_AUTODMA)
128 127
129static const struct ide_port_info hpt34x_chipsets[] __devinitdata = { 128static const struct ide_port_info hpt34x_chipsets[] __devinitdata = {
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index 397c6cbe953c..1f1135ce7cd6 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -801,9 +801,9 @@ static void hpt370_irq_timeout(ide_drive_t *drive)
801 printk(KERN_DEBUG "%s: %d bytes in FIFO\n", drive->name, bfifo & 0x1ff); 801 printk(KERN_DEBUG "%s: %d bytes in FIFO\n", drive->name, bfifo & 0x1ff);
802 802
803 /* get DMA command mode */ 803 /* get DMA command mode */
804 dma_cmd = inb(hwif->dma_command); 804 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
805 /* stop DMA */ 805 /* stop DMA */
806 outb(dma_cmd & ~0x1, hwif->dma_command); 806 outb(dma_cmd & ~0x1, hwif->dma_base + ATA_DMA_CMD);
807 hpt370_clear_engine(drive); 807 hpt370_clear_engine(drive);
808} 808}
809 809
@@ -818,12 +818,12 @@ static void hpt370_dma_start(ide_drive_t *drive)
818static int hpt370_dma_end(ide_drive_t *drive) 818static int hpt370_dma_end(ide_drive_t *drive)
819{ 819{
820 ide_hwif_t *hwif = HWIF(drive); 820 ide_hwif_t *hwif = HWIF(drive);
821 u8 dma_stat = inb(hwif->dma_status); 821 u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
822 822
823 if (dma_stat & 0x01) { 823 if (dma_stat & 0x01) {
824 /* wait a little */ 824 /* wait a little */
825 udelay(20); 825 udelay(20);
826 dma_stat = inb(hwif->dma_status); 826 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
827 if (dma_stat & 0x01) 827 if (dma_stat & 0x01)
828 hpt370_irq_timeout(drive); 828 hpt370_irq_timeout(drive);
829 } 829 }
@@ -850,7 +850,7 @@ static int hpt374_dma_test_irq(ide_drive_t *drive)
850 return 0; 850 return 0;
851 } 851 }
852 852
853 dma_stat = inb(hwif->dma_status); 853 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
854 /* return 1 if INTR asserted */ 854 /* return 1 if INTR asserted */
855 if (dma_stat & 4) 855 if (dma_stat & 4)
856 return 1; 856 return 1;
@@ -1320,7 +1320,15 @@ static int __devinit init_dma_hpt366(ide_hwif_t *hwif,
1320 unsigned long flags, base = ide_pci_dma_base(hwif, d); 1320 unsigned long flags, base = ide_pci_dma_base(hwif, d);
1321 u8 dma_old, dma_new, masterdma = 0, slavedma = 0; 1321 u8 dma_old, dma_new, masterdma = 0, slavedma = 0;
1322 1322
1323 if (base == 0 || ide_pci_set_master(dev, d->name) < 0) 1323 if (base == 0)
1324 return -1;
1325
1326 hwif->dma_base = base;
1327
1328 if (ide_pci_check_simplex(hwif, d) < 0)
1329 return -1;
1330
1331 if (ide_pci_set_master(dev, d->name) < 0)
1324 return -1; 1332 return -1;
1325 1333
1326 dma_old = inb(base + 2); 1334 dma_old = inb(base + 2);
@@ -1346,7 +1354,7 @@ static int __devinit init_dma_hpt366(ide_hwif_t *hwif,
1346 if (ide_allocate_dma_engine(hwif)) 1354 if (ide_allocate_dma_engine(hwif))
1347 return -1; 1355 return -1;
1348 1356
1349 ide_setup_dma(hwif, base); 1357 hwif->dma_ops = &sff_dma_ops;
1350 1358
1351 return 0; 1359 return 0;
1352} 1360}
@@ -1401,7 +1409,6 @@ static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2)
1401 1409
1402#define IDE_HFLAGS_HPT3XX \ 1410#define IDE_HFLAGS_HPT3XX \
1403 (IDE_HFLAG_NO_ATAPI_DMA | \ 1411 (IDE_HFLAG_NO_ATAPI_DMA | \
1404 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
1405 IDE_HFLAG_OFF_BOARD) 1412 IDE_HFLAG_OFF_BOARD)
1406 1413
1407static const struct ide_port_ops hpt3xx_port_ops = { 1414static const struct ide_port_ops hpt3xx_port_ops = {
diff --git a/drivers/ide/pci/ns87415.c b/drivers/ide/pci/ns87415.c
index 45ba71a7182f..5cd2b32ff0ef 100644
--- a/drivers/ide/pci/ns87415.c
+++ b/drivers/ide/pci/ns87415.c
@@ -28,10 +28,6 @@
28 */ 28 */
29#include <asm/superio.h> 29#include <asm/superio.h>
30 30
31static unsigned long superio_ide_status[2];
32static unsigned long superio_ide_select[2];
33static unsigned long superio_ide_dma_status[2];
34
35#define SUPERIO_IDE_MAX_RETRIES 25 31#define SUPERIO_IDE_MAX_RETRIES 25
36 32
37/* Because of a defect in Super I/O, all reads of the PCI DMA status 33/* Because of a defect in Super I/O, all reads of the PCI DMA status
@@ -40,27 +36,28 @@ static unsigned long superio_ide_dma_status[2];
40 */ 36 */
41static u8 superio_ide_inb (unsigned long port) 37static u8 superio_ide_inb (unsigned long port)
42{ 38{
43 if (port == superio_ide_status[0] || 39 u8 tmp;
44 port == superio_ide_status[1] || 40 int retries = SUPERIO_IDE_MAX_RETRIES;
45 port == superio_ide_select[0] ||
46 port == superio_ide_select[1] ||
47 port == superio_ide_dma_status[0] ||
48 port == superio_ide_dma_status[1]) {
49 u8 tmp;
50 int retries = SUPERIO_IDE_MAX_RETRIES;
51 41
52 /* printk(" [ reading port 0x%x with retry ] ", port); */ 42 /* printk(" [ reading port 0x%x with retry ] ", port); */
53 43
54 do { 44 do {
55 tmp = inb(port); 45 tmp = inb(port);
56 if (tmp == 0) 46 if (tmp == 0)
57 udelay(50); 47 udelay(50);
58 } while (tmp == 0 && retries-- > 0); 48 } while (tmp == 0 && retries-- > 0);
59 49
60 return tmp; 50 return tmp;
61 } 51}
62 52
63 return inb(port); 53static u8 superio_read_status(ide_hwif_t *hwif)
54{
55 return superio_ide_inb(hwif->io_ports.status_addr);
56}
57
58static u8 superio_read_sff_dma_status(ide_hwif_t *hwif)
59{
60 return superio_ide_inb(hwif->dma_base + ATA_DMA_STATUS);
64} 61}
65 62
66static void superio_tf_read(ide_drive_t *drive, ide_task_t *task) 63static void superio_tf_read(ide_drive_t *drive, ide_task_t *task)
@@ -78,6 +75,8 @@ static void superio_tf_read(ide_drive_t *drive, ide_task_t *task)
78 /* be sure we're looking at the low order bits */ 75 /* be sure we're looking at the low order bits */
79 outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr); 76 outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
80 77
78 if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
79 tf->feature = inb(io_ports->feature_addr);
81 if (task->tf_flags & IDE_TFLAG_IN_NSECT) 80 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
82 tf->nsect = inb(io_ports->nsect_addr); 81 tf->nsect = inb(io_ports->nsect_addr);
83 if (task->tf_flags & IDE_TFLAG_IN_LBAL) 82 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
@@ -105,36 +104,32 @@ static void superio_tf_read(ide_drive_t *drive, ide_task_t *task)
105 } 104 }
106} 105}
107 106
108static void __devinit superio_ide_init_iops (struct hwif_s *hwif) 107static const struct ide_tp_ops superio_tp_ops = {
109{ 108 .exec_command = ide_exec_command,
110 struct pci_dev *pdev = to_pci_dev(hwif->dev); 109 .read_status = superio_read_status,
111 u32 base, dmabase; 110 .read_altstatus = ide_read_altstatus,
112 u8 port = hwif->channel, tmp; 111 .read_sff_dma_status = superio_read_sff_dma_status,
113 112
114 base = pci_resource_start(pdev, port * 2) & ~3; 113 .set_irq = ide_set_irq,
115 dmabase = pci_resource_start(pdev, 4) & ~3;
116
117 superio_ide_status[port] = base + 7;
118 superio_ide_select[port] = base + 6;
119 superio_ide_dma_status[port] = dmabase + (!port ? 2 : 0xa);
120
121 /* Clear error/interrupt, enable dma */
122 tmp = superio_ide_inb(superio_ide_dma_status[port]);
123 outb(tmp | 0x66, superio_ide_dma_status[port]);
124 114
125 hwif->tf_read = superio_tf_read; 115 .tf_load = ide_tf_load,
116 .tf_read = superio_tf_read,
126 117
127 /* We need to override inb to workaround a SuperIO errata */ 118 .input_data = ide_input_data,
128 hwif->INB = superio_ide_inb; 119 .output_data = ide_output_data,
129} 120};
130 121
131static void __devinit init_iops_ns87415(ide_hwif_t *hwif) 122static void __devinit superio_init_iops(struct hwif_s *hwif)
132{ 123{
133 struct pci_dev *dev = to_pci_dev(hwif->dev); 124 struct pci_dev *pdev = to_pci_dev(hwif->dev);
125 u32 dma_stat;
126 u8 port = hwif->channel, tmp;
134 127
135 if (PCI_SLOT(dev->devfn) == 0xE) 128 dma_stat = (pci_resource_start(pdev, 4) & ~3) + (!port ? 2 : 0xa);
136 /* Built-in - assume it's under superio. */ 129
137 superio_ide_init_iops(hwif); 130 /* Clear error/interrupt, enable dma */
131 tmp = superio_ide_inb(dma_stat);
132 outb(tmp | 0x66, dma_stat);
138} 133}
139#endif 134#endif
140 135
@@ -200,14 +195,14 @@ static int ns87415_dma_end(ide_drive_t *drive)
200 u8 dma_stat = 0, dma_cmd = 0; 195 u8 dma_stat = 0, dma_cmd = 0;
201 196
202 drive->waiting_for_dma = 0; 197 drive->waiting_for_dma = 0;
203 dma_stat = hwif->INB(hwif->dma_status); 198 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
204 /* get dma command mode */ 199 /* get DMA command mode */
205 dma_cmd = hwif->INB(hwif->dma_command); 200 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
206 /* stop DMA */ 201 /* stop DMA */
207 outb(dma_cmd & ~1, hwif->dma_command); 202 outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
208 /* from ERRATA: clear the INTR & ERROR bits */ 203 /* from ERRATA: clear the INTR & ERROR bits */
209 dma_cmd = hwif->INB(hwif->dma_command); 204 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
210 outb(dma_cmd | 6, hwif->dma_command); 205 outb(dma_cmd | 6, hwif->dma_base + ATA_DMA_CMD);
211 /* and free any DMA resources */ 206 /* and free any DMA resources */
212 ide_destroy_dmatable(drive); 207 ide_destroy_dmatable(drive);
213 /* verify good DMA status */ 208 /* verify good DMA status */
@@ -276,7 +271,7 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
276 outb(8, hwif->io_ports.ctl_addr); 271 outb(8, hwif->io_ports.ctl_addr);
277 do { 272 do {
278 udelay(50); 273 udelay(50);
279 stat = hwif->INB(hwif->io_ports.status_addr); 274 stat = hwif->tp_ops->read_status(hwif);
280 if (stat == 0xff) 275 if (stat == 0xff)
281 break; 276 break;
282 } while ((stat & BUSY_STAT) && --timeout); 277 } while ((stat & BUSY_STAT) && --timeout);
@@ -291,7 +286,7 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
291 if (!hwif->dma_base) 286 if (!hwif->dma_base)
292 return; 287 return;
293 288
294 outb(0x60, hwif->dma_status); 289 outb(0x60, hwif->dma_base + ATA_DMA_STATUS);
295} 290}
296 291
297static const struct ide_port_ops ns87415_port_ops = { 292static const struct ide_port_ops ns87415_port_ops = {
@@ -311,9 +306,6 @@ static const struct ide_dma_ops ns87415_dma_ops = {
311 306
312static const struct ide_port_info ns87415_chipset __devinitdata = { 307static const struct ide_port_info ns87415_chipset __devinitdata = {
313 .name = "NS87415", 308 .name = "NS87415",
314#ifdef CONFIG_SUPERIO
315 .init_iops = init_iops_ns87415,
316#endif
317 .init_hwif = init_hwif_ns87415, 309 .init_hwif = init_hwif_ns87415,
318 .port_ops = &ns87415_port_ops, 310 .port_ops = &ns87415_port_ops,
319 .dma_ops = &ns87415_dma_ops, 311 .dma_ops = &ns87415_dma_ops,
@@ -323,7 +315,16 @@ static const struct ide_port_info ns87415_chipset __devinitdata = {
323 315
324static int __devinit ns87415_init_one(struct pci_dev *dev, const struct pci_device_id *id) 316static int __devinit ns87415_init_one(struct pci_dev *dev, const struct pci_device_id *id)
325{ 317{
326 return ide_setup_pci_device(dev, &ns87415_chipset); 318 struct ide_port_info d = ns87415_chipset;
319
320#ifdef CONFIG_SUPERIO
321 if (PCI_SLOT(dev->devfn) == 0xE) {
322 /* Built-in - assume it's under superio. */
323 d.init_iops = superio_init_iops;
324 d.tp_ops = &superio_tp_ops;
325 }
326#endif
327 return ide_setup_pci_device(dev, &d);
327} 328}
328 329
329static const struct pci_device_id ns87415_pci_tbl[] = { 330static const struct pci_device_id ns87415_pci_tbl[] = {
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c
index fca89eda5c02..e54dc653b8c4 100644
--- a/drivers/ide/pci/pdc202xx_old.c
+++ b/drivers/ide/pci/pdc202xx_old.c
@@ -206,7 +206,7 @@ static int pdc202xx_dma_test_irq(ide_drive_t *drive)
206{ 206{
207 ide_hwif_t *hwif = HWIF(drive); 207 ide_hwif_t *hwif = HWIF(drive);
208 unsigned long high_16 = hwif->extra_base - 16; 208 unsigned long high_16 = hwif->extra_base - 16;
209 u8 dma_stat = inb(hwif->dma_status); 209 u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
210 u8 sc1d = inb(high_16 + 0x001d); 210 u8 sc1d = inb(high_16 + 0x001d);
211 211
212 if (hwif->channel) { 212 if (hwif->channel) {
@@ -312,7 +312,6 @@ static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
312 312
313#define IDE_HFLAGS_PDC202XX \ 313#define IDE_HFLAGS_PDC202XX \
314 (IDE_HFLAG_ERROR_STOPS_FIFO | \ 314 (IDE_HFLAG_ERROR_STOPS_FIFO | \
315 IDE_HFLAG_ABUSE_SET_DMA_MODE | \
316 IDE_HFLAG_OFF_BOARD) 315 IDE_HFLAG_OFF_BOARD)
317 316
318static const struct ide_port_ops pdc20246_port_ops = { 317static const struct ide_port_ops pdc20246_port_ops = {
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
index f04738d14a6f..0ce41b4dddaf 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/pci/piix.c
@@ -227,9 +227,9 @@ static void piix_dma_clear_irq(ide_drive_t *drive)
227 u8 dma_stat; 227 u8 dma_stat;
228 228
229 /* clear the INTR & ERROR bits */ 229 /* clear the INTR & ERROR bits */
230 dma_stat = inb(hwif->dma_status); 230 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
231 /* Should we force the bit as well ? */ 231 /* Should we force the bit as well ? */
232 outb(dma_stat, hwif->dma_status); 232 outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS);
233} 233}
234 234
235struct ich_laptop { 235struct ich_laptop {
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
index 789c66dfbde5..94a7ab864236 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/pci/scc_pata.c
@@ -65,7 +65,7 @@
65 65
66static struct scc_ports { 66static struct scc_ports {
67 unsigned long ctl, dma; 67 unsigned long ctl, dma;
68 ide_hwif_t *hwif; /* for removing port from system */ 68 struct ide_host *host; /* for removing port from system */
69} scc_ports[MAX_HWIFS]; 69} scc_ports[MAX_HWIFS];
70 70
71/* PIO transfer mode table */ 71/* PIO transfer mode table */
@@ -126,6 +126,46 @@ static u8 scc_ide_inb(unsigned long port)
126 return (u8)data; 126 return (u8)data;
127} 127}
128 128
129static void scc_exec_command(ide_hwif_t *hwif, u8 cmd)
130{
131 out_be32((void *)hwif->io_ports.command_addr, cmd);
132 eieio();
133 in_be32((void *)(hwif->dma_base + 0x01c));
134 eieio();
135}
136
137static u8 scc_read_status(ide_hwif_t *hwif)
138{
139 return (u8)in_be32((void *)hwif->io_ports.status_addr);
140}
141
142static u8 scc_read_altstatus(ide_hwif_t *hwif)
143{
144 return (u8)in_be32((void *)hwif->io_ports.ctl_addr);
145}
146
147static u8 scc_read_sff_dma_status(ide_hwif_t *hwif)
148{
149 return (u8)in_be32((void *)(hwif->dma_base + 4));
150}
151
152static void scc_set_irq(ide_hwif_t *hwif, int on)
153{
154 u8 ctl = ATA_DEVCTL_OBS;
155
156 if (on == 4) { /* hack for SRST */
157 ctl |= 4;
158 on &= ~4;
159 }
160
161 ctl |= on ? 0 : 2;
162
163 out_be32((void *)hwif->io_ports.ctl_addr, ctl);
164 eieio();
165 in_be32((void *)(hwif->dma_base + 0x01c));
166 eieio();
167}
168
129static void scc_ide_insw(unsigned long port, void *addr, u32 count) 169static void scc_ide_insw(unsigned long port, void *addr, u32 count)
130{ 170{
131 u16 *ptr = (u16 *)addr; 171 u16 *ptr = (u16 *)addr;
@@ -148,14 +188,6 @@ static void scc_ide_outb(u8 addr, unsigned long port)
148 out_be32((void*)port, addr); 188 out_be32((void*)port, addr);
149} 189}
150 190
151static void scc_ide_outbsync(ide_hwif_t *hwif, u8 addr, unsigned long port)
152{
153 out_be32((void*)port, addr);
154 eieio();
155 in_be32((void*)(hwif->dma_base + 0x01c));
156 eieio();
157}
158
159static void 191static void
160scc_ide_outsw(unsigned long port, void *addr, u32 count) 192scc_ide_outsw(unsigned long port, void *addr, u32 count)
161{ 193{
@@ -261,14 +293,14 @@ static void scc_dma_host_set(ide_drive_t *drive, int on)
261{ 293{
262 ide_hwif_t *hwif = drive->hwif; 294 ide_hwif_t *hwif = drive->hwif;
263 u8 unit = (drive->select.b.unit & 0x01); 295 u8 unit = (drive->select.b.unit & 0x01);
264 u8 dma_stat = scc_ide_inb(hwif->dma_status); 296 u8 dma_stat = scc_ide_inb(hwif->dma_base + 4);
265 297
266 if (on) 298 if (on)
267 dma_stat |= (1 << (5 + unit)); 299 dma_stat |= (1 << (5 + unit));
268 else 300 else
269 dma_stat &= ~(1 << (5 + unit)); 301 dma_stat &= ~(1 << (5 + unit));
270 302
271 scc_ide_outb(dma_stat, hwif->dma_status); 303 scc_ide_outb(dma_stat, hwif->dma_base + 4);
272} 304}
273 305
274/** 306/**
@@ -304,13 +336,13 @@ static int scc_dma_setup(ide_drive_t *drive)
304 out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma); 336 out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma);
305 337
306 /* specify r/w */ 338 /* specify r/w */
307 out_be32((void __iomem *)hwif->dma_command, reading); 339 out_be32((void __iomem *)hwif->dma_base, reading);
308 340
309 /* read dma_status for INTR & ERROR flags */ 341 /* read DMA status for INTR & ERROR flags */
310 dma_stat = in_be32((void __iomem *)hwif->dma_status); 342 dma_stat = in_be32((void __iomem *)(hwif->dma_base + 4));
311 343
312 /* clear INTR & ERROR flags */ 344 /* clear INTR & ERROR flags */
313 out_be32((void __iomem *)hwif->dma_status, dma_stat|6); 345 out_be32((void __iomem *)(hwif->dma_base + 4), dma_stat | 6);
314 drive->waiting_for_dma = 1; 346 drive->waiting_for_dma = 1;
315 return 0; 347 return 0;
316} 348}
@@ -318,10 +350,10 @@ static int scc_dma_setup(ide_drive_t *drive)
318static void scc_dma_start(ide_drive_t *drive) 350static void scc_dma_start(ide_drive_t *drive)
319{ 351{
320 ide_hwif_t *hwif = drive->hwif; 352 ide_hwif_t *hwif = drive->hwif;
321 u8 dma_cmd = scc_ide_inb(hwif->dma_command); 353 u8 dma_cmd = scc_ide_inb(hwif->dma_base);
322 354
323 /* start DMA */ 355 /* start DMA */
324 scc_ide_outb(dma_cmd | 1, hwif->dma_command); 356 scc_ide_outb(dma_cmd | 1, hwif->dma_base);
325 hwif->dma = 1; 357 hwif->dma = 1;
326 wmb(); 358 wmb();
327} 359}
@@ -333,13 +365,13 @@ static int __scc_dma_end(ide_drive_t *drive)
333 365
334 drive->waiting_for_dma = 0; 366 drive->waiting_for_dma = 0;
335 /* get DMA command mode */ 367 /* get DMA command mode */
336 dma_cmd = scc_ide_inb(hwif->dma_command); 368 dma_cmd = scc_ide_inb(hwif->dma_base);
337 /* stop DMA */ 369 /* stop DMA */
338 scc_ide_outb(dma_cmd & ~1, hwif->dma_command); 370 scc_ide_outb(dma_cmd & ~1, hwif->dma_base);
339 /* get DMA status */ 371 /* get DMA status */
340 dma_stat = scc_ide_inb(hwif->dma_status); 372 dma_stat = scc_ide_inb(hwif->dma_base + 4);
341 /* clear the INTR & ERROR bits */ 373 /* clear the INTR & ERROR bits */
342 scc_ide_outb(dma_stat | 6, hwif->dma_status); 374 scc_ide_outb(dma_stat | 6, hwif->dma_base + 4);
343 /* purge DMA mappings */ 375 /* purge DMA mappings */
344 ide_destroy_dmatable(drive); 376 ide_destroy_dmatable(drive);
345 /* verify good DMA status */ 377 /* verify good DMA status */
@@ -359,6 +391,7 @@ static int __scc_dma_end(ide_drive_t *drive)
359static int scc_dma_end(ide_drive_t *drive) 391static int scc_dma_end(ide_drive_t *drive)
360{ 392{
361 ide_hwif_t *hwif = HWIF(drive); 393 ide_hwif_t *hwif = HWIF(drive);
394 void __iomem *dma_base = (void __iomem *)hwif->dma_base;
362 unsigned long intsts_port = hwif->dma_base + 0x014; 395 unsigned long intsts_port = hwif->dma_base + 0x014;
363 u32 reg; 396 u32 reg;
364 int dma_stat, data_loss = 0; 397 int dma_stat, data_loss = 0;
@@ -397,7 +430,7 @@ static int scc_dma_end(ide_drive_t *drive)
397 printk(KERN_WARNING "%s: SERROR\n", SCC_PATA_NAME); 430 printk(KERN_WARNING "%s: SERROR\n", SCC_PATA_NAME);
398 out_be32((void __iomem *)intsts_port, INTSTS_SERROR|INTSTS_BMSINT); 431 out_be32((void __iomem *)intsts_port, INTSTS_SERROR|INTSTS_BMSINT);
399 432
400 out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS); 433 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
401 continue; 434 continue;
402 } 435 }
403 436
@@ -412,7 +445,7 @@ static int scc_dma_end(ide_drive_t *drive)
412 445
413 out_be32((void __iomem *)intsts_port, INTSTS_PRERR|INTSTS_BMSINT); 446 out_be32((void __iomem *)intsts_port, INTSTS_PRERR|INTSTS_BMSINT);
414 447
415 out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS); 448 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
416 continue; 449 continue;
417 } 450 }
418 451
@@ -420,12 +453,12 @@ static int scc_dma_end(ide_drive_t *drive)
420 printk(KERN_WARNING "%s: Response Error\n", SCC_PATA_NAME); 453 printk(KERN_WARNING "%s: Response Error\n", SCC_PATA_NAME);
421 out_be32((void __iomem *)intsts_port, INTSTS_RERR|INTSTS_BMSINT); 454 out_be32((void __iomem *)intsts_port, INTSTS_RERR|INTSTS_BMSINT);
422 455
423 out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS); 456 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
424 continue; 457 continue;
425 } 458 }
426 459
427 if (reg & INTSTS_ICERR) { 460 if (reg & INTSTS_ICERR) {
428 out_be32((void __iomem *)hwif->dma_command, in_be32((void __iomem *)hwif->dma_command) & ~QCHCD_IOS_SS); 461 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
429 462
430 printk(KERN_WARNING "%s: Illegal Configuration\n", SCC_PATA_NAME); 463 printk(KERN_WARNING "%s: Illegal Configuration\n", SCC_PATA_NAME);
431 out_be32((void __iomem *)intsts_port, INTSTS_ICERR|INTSTS_BMSINT); 464 out_be32((void __iomem *)intsts_port, INTSTS_ICERR|INTSTS_BMSINT);
@@ -553,14 +586,9 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev,
553 const struct ide_port_info *d) 586 const struct ide_port_info *d)
554{ 587{
555 struct scc_ports *ports = pci_get_drvdata(dev); 588 struct scc_ports *ports = pci_get_drvdata(dev);
556 ide_hwif_t *hwif = NULL; 589 struct ide_host *host;
557 hw_regs_t hw; 590 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
558 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 591 int i, rc;
559 int i;
560
561 hwif = ide_find_port_slot(d);
562 if (hwif == NULL)
563 return -ENOMEM;
564 592
565 memset(&hw, 0, sizeof(hw)); 593 memset(&hw, 0, sizeof(hw));
566 for (i = 0; i <= 8; i++) 594 for (i = 0; i <= 8; i++)
@@ -568,11 +596,12 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev,
568 hw.irq = dev->irq; 596 hw.irq = dev->irq;
569 hw.dev = &dev->dev; 597 hw.dev = &dev->dev;
570 hw.chipset = ide_pci; 598 hw.chipset = ide_pci;
571 ide_init_port_hw(hwif, &hw);
572 599
573 idx[0] = hwif->index; 600 rc = ide_host_add(d, hws, &host);
601 if (rc)
602 return rc;
574 603
575 ide_device_add(idx, d); 604 ports->host = host;
576 605
577 return 0; 606 return 0;
578} 607}
@@ -701,6 +730,8 @@ static void scc_tf_read(ide_drive_t *drive, ide_task_t *task)
701 /* be sure we're looking at the low order bits */ 730 /* be sure we're looking at the low order bits */
702 scc_ide_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr); 731 scc_ide_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
703 732
733 if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
734 tf->feature = scc_ide_inb(io_ports->feature_addr);
704 if (task->tf_flags & IDE_TFLAG_IN_NSECT) 735 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
705 tf->nsect = scc_ide_inb(io_ports->nsect_addr); 736 tf->nsect = scc_ide_inb(io_ports->nsect_addr);
706 if (task->tf_flags & IDE_TFLAG_IN_LBAL) 737 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
@@ -774,16 +805,6 @@ static void __devinit init_mmio_iops_scc(ide_hwif_t *hwif)
774 805
775 ide_set_hwifdata(hwif, ports); 806 ide_set_hwifdata(hwif, ports);
776 807
777 hwif->tf_load = scc_tf_load;
778 hwif->tf_read = scc_tf_read;
779
780 hwif->input_data = scc_input_data;
781 hwif->output_data = scc_output_data;
782
783 hwif->INB = scc_ide_inb;
784 hwif->OUTB = scc_ide_outb;
785 hwif->OUTBSYNC = scc_ide_outbsync;
786
787 hwif->dma_base = dma_base; 808 hwif->dma_base = dma_base;
788 hwif->config_data = ports->ctl; 809 hwif->config_data = ports->ctl;
789} 810}
@@ -824,11 +845,6 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif)
824{ 845{
825 struct scc_ports *ports = ide_get_hwifdata(hwif); 846 struct scc_ports *ports = ide_get_hwifdata(hwif);
826 847
827 ports->hwif = hwif;
828
829 hwif->dma_command = hwif->dma_base;
830 hwif->dma_status = hwif->dma_base + 0x04;
831
832 /* PTERADD */ 848 /* PTERADD */
833 out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma); 849 out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
834 850
@@ -838,6 +854,21 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif)
838 hwif->ultra_mask = ATA_UDMA5; /* 100MHz */ 854 hwif->ultra_mask = ATA_UDMA5; /* 100MHz */
839} 855}
840 856
857static const struct ide_tp_ops scc_tp_ops = {
858 .exec_command = scc_exec_command,
859 .read_status = scc_read_status,
860 .read_altstatus = scc_read_altstatus,
861 .read_sff_dma_status = scc_read_sff_dma_status,
862
863 .set_irq = scc_set_irq,
864
865 .tf_load = scc_tf_load,
866 .tf_read = scc_tf_read,
867
868 .input_data = scc_input_data,
869 .output_data = scc_output_data,
870};
871
841static const struct ide_port_ops scc_port_ops = { 872static const struct ide_port_ops scc_port_ops = {
842 .set_pio_mode = scc_set_pio_mode, 873 .set_pio_mode = scc_set_pio_mode,
843 .set_dma_mode = scc_set_dma_mode, 874 .set_dma_mode = scc_set_dma_mode,
@@ -861,6 +892,7 @@ static const struct ide_dma_ops scc_dma_ops = {
861 .name = name_str, \ 892 .name = name_str, \
862 .init_iops = init_iops_scc, \ 893 .init_iops = init_iops_scc, \
863 .init_hwif = init_hwif_scc, \ 894 .init_hwif = init_hwif_scc, \
895 .tp_ops = &scc_tp_ops, \
864 .port_ops = &scc_port_ops, \ 896 .port_ops = &scc_port_ops, \
865 .dma_ops = &scc_dma_ops, \ 897 .dma_ops = &scc_dma_ops, \
866 .host_flags = IDE_HFLAG_SINGLE, \ 898 .host_flags = IDE_HFLAG_SINGLE, \
@@ -895,7 +927,8 @@ static int __devinit scc_init_one(struct pci_dev *dev, const struct pci_device_i
895static void __devexit scc_remove(struct pci_dev *dev) 927static void __devexit scc_remove(struct pci_dev *dev)
896{ 928{
897 struct scc_ports *ports = pci_get_drvdata(dev); 929 struct scc_ports *ports = pci_get_drvdata(dev);
898 ide_hwif_t *hwif = ports->hwif; 930 struct ide_host *host = ports->host;
931 ide_hwif_t *hwif = host->ports[0];
899 932
900 if (hwif->dmatable_cpu) { 933 if (hwif->dmatable_cpu) {
901 pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES, 934 pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES,
@@ -903,7 +936,7 @@ static void __devexit scc_remove(struct pci_dev *dev)
903 hwif->dmatable_cpu = NULL; 936 hwif->dmatable_cpu = NULL;
904 } 937 }
905 938
906 ide_unregister(hwif); 939 ide_host_remove(host);
907 940
908 iounmap((void*)ports->dma); 941 iounmap((void*)ports->dma);
909 iounmap((void*)ports->ctl); 942 iounmap((void*)ports->ctl);
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
index a1fb20826a5b..127ccb45e261 100644
--- a/drivers/ide/pci/serverworks.c
+++ b/drivers/ide/pci/serverworks.c
@@ -349,9 +349,7 @@ static const struct ide_port_ops svwks_port_ops = {
349 .cable_detect = svwks_cable_detect, 349 .cable_detect = svwks_cable_detect,
350}; 350};
351 351
352#define IDE_HFLAGS_SVWKS \ 352#define IDE_HFLAGS_SVWKS IDE_HFLAG_LEGACY_IRQS
353 (IDE_HFLAG_LEGACY_IRQS | \
354 IDE_HFLAG_ABUSE_SET_DMA_MODE)
355 353
356static const struct ide_port_info serverworks_chipsets[] __devinitdata = { 354static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
357 { /* 0 */ 355 { /* 0 */
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index c79ff5b41088..42eef19a18f1 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -127,7 +127,7 @@ sgiioc4_checkirq(ide_hwif_t * hwif)
127 return 0; 127 return 0;
128} 128}
129 129
130static u8 sgiioc4_INB(unsigned long); 130static u8 sgiioc4_read_status(ide_hwif_t *);
131 131
132static int 132static int
133sgiioc4_clearirq(ide_drive_t * drive) 133sgiioc4_clearirq(ide_drive_t * drive)
@@ -141,18 +141,19 @@ sgiioc4_clearirq(ide_drive_t * drive)
141 intr_reg = readl((void __iomem *)other_ir); 141 intr_reg = readl((void __iomem *)other_ir);
142 if (intr_reg & 0x03) { /* Valid IOC4-IDE interrupt */ 142 if (intr_reg & 0x03) { /* Valid IOC4-IDE interrupt */
143 /* 143 /*
144 * Using sgiioc4_INB to read the Status register has a side 144 * Using sgiioc4_read_status to read the Status register has a
145 * effect of clearing the interrupt. The first read should 145 * side effect of clearing the interrupt. The first read should
146 * clear it if it is set. The second read should return 146 * clear it if it is set. The second read should return
147 * a "clear" status if it got cleared. If not, then spin 147 * a "clear" status if it got cleared. If not, then spin
148 * for a bit trying to clear it. 148 * for a bit trying to clear it.
149 */ 149 */
150 u8 stat = sgiioc4_INB(io_ports->status_addr); 150 u8 stat = sgiioc4_read_status(hwif);
151 int count = 0; 151 int count = 0;
152 stat = sgiioc4_INB(io_ports->status_addr); 152
153 stat = sgiioc4_read_status(hwif);
153 while ((stat & 0x80) && (count++ < 100)) { 154 while ((stat & 0x80) && (count++ < 100)) {
154 udelay(1); 155 udelay(1);
155 stat = sgiioc4_INB(io_ports->status_addr); 156 stat = sgiioc4_read_status(hwif);
156 } 157 }
157 158
158 if (intr_reg & 0x02) { 159 if (intr_reg & 0x02) {
@@ -304,9 +305,9 @@ sgiioc4_dma_lost_irq(ide_drive_t * drive)
304 ide_dma_lost_irq(drive); 305 ide_dma_lost_irq(drive);
305} 306}
306 307
307static u8 308static u8 sgiioc4_read_status(ide_hwif_t *hwif)
308sgiioc4_INB(unsigned long port)
309{ 309{
310 unsigned long port = hwif->io_ports.status_addr;
310 u8 reg = (u8) readb((void __iomem *) port); 311 u8 reg = (u8) readb((void __iomem *) port);
311 312
312 if ((port & 0xFFF) == 0x11C) { /* Status register of IOC4 */ 313 if ((port & 0xFFF) == 0x11C) { /* Status register of IOC4 */
@@ -549,6 +550,21 @@ static int sgiioc4_dma_setup(ide_drive_t *drive)
549 return 0; 550 return 0;
550} 551}
551 552
553static const struct ide_tp_ops sgiioc4_tp_ops = {
554 .exec_command = ide_exec_command,
555 .read_status = sgiioc4_read_status,
556 .read_altstatus = ide_read_altstatus,
557 .read_sff_dma_status = ide_read_sff_dma_status,
558
559 .set_irq = ide_set_irq,
560
561 .tf_load = ide_tf_load,
562 .tf_read = ide_tf_read,
563
564 .input_data = ide_input_data,
565 .output_data = ide_output_data,
566};
567
552static const struct ide_port_ops sgiioc4_port_ops = { 568static const struct ide_port_ops sgiioc4_port_ops = {
553 .set_dma_mode = sgiioc4_set_dma_mode, 569 .set_dma_mode = sgiioc4_set_dma_mode,
554 /* reset DMA engine, clear IRQs */ 570 /* reset DMA engine, clear IRQs */
@@ -571,6 +587,7 @@ static const struct ide_port_info sgiioc4_port_info __devinitdata = {
571 .name = DRV_NAME, 587 .name = DRV_NAME,
572 .chipset = ide_pci, 588 .chipset = ide_pci,
573 .init_dma = ide_dma_sgiioc4, 589 .init_dma = ide_dma_sgiioc4,
590 .tp_ops = &sgiioc4_tp_ops,
574 .port_ops = &sgiioc4_port_ops, 591 .port_ops = &sgiioc4_port_ops,
575 .dma_ops = &sgiioc4_dma_ops, 592 .dma_ops = &sgiioc4_dma_ops,
576 .host_flags = IDE_HFLAG_MMIO, 593 .host_flags = IDE_HFLAG_MMIO,
@@ -583,10 +600,10 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
583 unsigned long cmd_base, irqport; 600 unsigned long cmd_base, irqport;
584 unsigned long bar0, cmd_phys_base, ctl; 601 unsigned long bar0, cmd_phys_base, ctl;
585 void __iomem *virt_base; 602 void __iomem *virt_base;
586 ide_hwif_t *hwif; 603 struct ide_host *host;
587 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 604 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
588 hw_regs_t hw;
589 struct ide_port_info d = sgiioc4_port_info; 605 struct ide_port_info d = sgiioc4_port_info;
606 int rc;
590 607
591 /* Get the CmdBlk and CtrlBlk Base Registers */ 608 /* Get the CmdBlk and CtrlBlk Base Registers */
592 bar0 = pci_resource_start(dev, 0); 609 bar0 = pci_resource_start(dev, 0);
@@ -618,30 +635,26 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
618 hw.chipset = ide_pci; 635 hw.chipset = ide_pci;
619 hw.dev = &dev->dev; 636 hw.dev = &dev->dev;
620 637
621 hwif = ide_find_port_slot(&d);
622 if (hwif == NULL)
623 goto err;
624
625 ide_init_port_hw(hwif, &hw);
626
627 /* The IOC4 uses MMIO rather than Port IO. */
628 default_hwif_mmiops(hwif);
629
630 /* Initializing chipset IRQ Registers */ 638 /* Initializing chipset IRQ Registers */
631 writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4)); 639 writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4));
632 640
633 hwif->INB = &sgiioc4_INB; 641 host = ide_host_alloc(&d, hws);
634 642 if (host == NULL) {
635 idx[0] = hwif->index; 643 rc = -ENOMEM;
644 goto err;
645 }
636 646
637 if (ide_device_add(idx, &d)) 647 rc = ide_host_register(host, &d, hws);
638 return -EIO; 648 if (rc)
649 goto err_free;
639 650
640 return 0; 651 return 0;
652err_free:
653 ide_host_free(host);
641err: 654err:
642 release_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE); 655 release_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE);
643 iounmap(virt_base); 656 iounmap(virt_base);
644 return -ENOMEM; 657 return rc;
645} 658}
646 659
647static unsigned int __devinit 660static unsigned int __devinit
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index 6e9d7655d89c..5965a35d94ae 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -334,7 +334,7 @@ static int siimage_io_dma_test_irq(ide_drive_t *drive)
334 unsigned long addr = siimage_selreg(hwif, 1); 334 unsigned long addr = siimage_selreg(hwif, 1);
335 335
336 /* return 1 if INTR asserted */ 336 /* return 1 if INTR asserted */
337 if (hwif->INB(hwif->dma_status) & 4) 337 if (inb(hwif->dma_base + ATA_DMA_STATUS) & 4)
338 return 1; 338 return 1;
339 339
340 /* return 1 if Device INTR asserted */ 340 /* return 1 if Device INTR asserted */
@@ -382,7 +382,7 @@ static int siimage_mmio_dma_test_irq(ide_drive_t *drive)
382 } 382 }
383 383
384 /* return 1 if INTR asserted */ 384 /* return 1 if INTR asserted */
385 if (readb((void __iomem *)hwif->dma_status) & 0x04) 385 if (readb((void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)) & 4)
386 return 1; 386 return 1;
387 387
388 /* return 1 if Device INTR asserted */ 388 /* return 1 if Device INTR asserted */
@@ -601,7 +601,7 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
601 * Fill in the basic hwif bits 601 * Fill in the basic hwif bits
602 */ 602 */
603 hwif->host_flags |= IDE_HFLAG_MMIO; 603 hwif->host_flags |= IDE_HFLAG_MMIO;
604 default_hwif_mmiops(hwif); 604
605 hwif->hwif_data = addr; 605 hwif->hwif_data = addr;
606 606
607 /* 607 /*
diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/pci/sl82c105.c
index 6efbde297174..f82a6502c1b7 100644
--- a/drivers/ide/pci/sl82c105.c
+++ b/drivers/ide/pci/sl82c105.c
@@ -157,9 +157,9 @@ static void sl82c105_dma_lost_irq(ide_drive_t *drive)
157 * Was DMA enabled? If so, disable it - we're resetting the 157 * Was DMA enabled? If so, disable it - we're resetting the
158 * host. The IDE layer will be handling the drive for us. 158 * host. The IDE layer will be handling the drive for us.
159 */ 159 */
160 dma_cmd = inb(hwif->dma_command); 160 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
161 if (dma_cmd & 1) { 161 if (dma_cmd & 1) {
162 outb(dma_cmd & ~1, hwif->dma_command); 162 outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
163 printk("sl82c105: DMA was enabled\n"); 163 printk("sl82c105: DMA was enabled\n");
164 } 164 }
165 165
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/pci/tc86c001.c
index 9b4b27a4c711..477e19790102 100644
--- a/drivers/ide/pci/tc86c001.c
+++ b/drivers/ide/pci/tc86c001.c
@@ -63,7 +63,7 @@ static int tc86c001_timer_expiry(ide_drive_t *drive)
63 ide_hwif_t *hwif = HWIF(drive); 63 ide_hwif_t *hwif = HWIF(drive);
64 ide_expiry_t *expiry = ide_get_hwifdata(hwif); 64 ide_expiry_t *expiry = ide_get_hwifdata(hwif);
65 ide_hwgroup_t *hwgroup = HWGROUP(drive); 65 ide_hwgroup_t *hwgroup = HWGROUP(drive);
66 u8 dma_stat = inb(hwif->dma_status); 66 u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
67 67
68 /* Restore a higher level driver's expiry handler first. */ 68 /* Restore a higher level driver's expiry handler first. */
69 hwgroup->expiry = expiry; 69 hwgroup->expiry = expiry;
@@ -71,21 +71,24 @@ static int tc86c001_timer_expiry(ide_drive_t *drive)
71 if ((dma_stat & 5) == 1) { /* DMA active and no interrupt */ 71 if ((dma_stat & 5) == 1) { /* DMA active and no interrupt */
72 unsigned long sc_base = hwif->config_data; 72 unsigned long sc_base = hwif->config_data;
73 unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04); 73 unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04);
74 u8 dma_cmd = inb(hwif->dma_command); 74 u8 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
75 75
76 printk(KERN_WARNING "%s: DMA interrupt possibly stuck, " 76 printk(KERN_WARNING "%s: DMA interrupt possibly stuck, "
77 "attempting recovery...\n", drive->name); 77 "attempting recovery...\n", drive->name);
78 78
79 /* Stop DMA */ 79 /* Stop DMA */
80 outb(dma_cmd & ~0x01, hwif->dma_command); 80 outb(dma_cmd & ~0x01, hwif->dma_base + ATA_DMA_CMD);
81 81
82 /* Setup the dummy DMA transfer */ 82 /* Setup the dummy DMA transfer */
83 outw(0, sc_base + 0x0a); /* Sector Count */ 83 outw(0, sc_base + 0x0a); /* Sector Count */
84 outw(0, twcr_port); /* Transfer Word Count 1 or 2 */ 84 outw(0, twcr_port); /* Transfer Word Count 1 or 2 */
85 85
86 /* Start the dummy DMA transfer */ 86 /* Start the dummy DMA transfer */
87 outb(0x00, hwif->dma_command); /* clear R_OR_WCTR for write */ 87
88 outb(0x01, hwif->dma_command); /* set START_STOPBM */ 88 /* clear R_OR_WCTR for write */
89 outb(0x00, hwif->dma_base + ATA_DMA_CMD);
90 /* set START_STOPBM */
91 outb(0x01, hwif->dma_base + ATA_DMA_CMD);
89 92
90 /* 93 /*
91 * If an interrupt was pending, it should come thru shortly. 94 * If an interrupt was pending, it should come thru shortly.
@@ -203,8 +206,7 @@ static const struct ide_port_info tc86c001_chipset __devinitdata = {
203 .init_hwif = init_hwif_tc86c001, 206 .init_hwif = init_hwif_tc86c001,
204 .port_ops = &tc86c001_port_ops, 207 .port_ops = &tc86c001_port_ops,
205 .dma_ops = &tc86c001_dma_ops, 208 .dma_ops = &tc86c001_dma_ops,
206 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD | 209 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD,
207 IDE_HFLAG_ABUSE_SET_DMA_MODE,
208 .pio_mask = ATA_PIO4, 210 .pio_mask = ATA_PIO4,
209 .mwdma_mask = ATA_MWDMA2, 211 .mwdma_mask = ATA_MWDMA2,
210 .udma_mask = ATA_UDMA4, 212 .udma_mask = ATA_UDMA4,
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index e47384c70c40..09dc4803ef9d 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -425,7 +425,6 @@ static const struct ide_port_info via82cxxx_chipset __devinitdata = {
425 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } }, 425 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
426 .port_ops = &via_port_ops, 426 .port_ops = &via_port_ops,
427 .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST | 427 .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST |
428 IDE_HFLAG_ABUSE_SET_DMA_MODE |
429 IDE_HFLAG_POST_SET_MODE | 428 IDE_HFLAG_POST_SET_MODE |
430 IDE_HFLAG_IO_32BIT, 429 IDE_HFLAG_IO_32BIT,
431 .pio_mask = ATA_PIO5, 430 .pio_mask = ATA_PIO5,
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index 93fb9067c043..c521bf6e1bf2 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -48,6 +48,8 @@
48#include <asm/mediabay.h> 48#include <asm/mediabay.h>
49#endif 49#endif
50 50
51#define DRV_NAME "ide-pmac"
52
51#undef IDE_PMAC_DEBUG 53#undef IDE_PMAC_DEBUG
52 54
53#define DMA_WAIT_TIMEOUT 50 55#define DMA_WAIT_TIMEOUT 50
@@ -424,7 +426,9 @@ static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
424static void 426static void
425pmac_ide_selectproc(ide_drive_t *drive) 427pmac_ide_selectproc(ide_drive_t *drive)
426{ 428{
427 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 429 ide_hwif_t *hwif = drive->hwif;
430 pmac_ide_hwif_t *pmif =
431 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
428 432
429 if (pmif == NULL) 433 if (pmif == NULL)
430 return; 434 return;
@@ -444,7 +448,9 @@ pmac_ide_selectproc(ide_drive_t *drive)
444static void 448static void
445pmac_ide_kauai_selectproc(ide_drive_t *drive) 449pmac_ide_kauai_selectproc(ide_drive_t *drive)
446{ 450{
447 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 451 ide_hwif_t *hwif = drive->hwif;
452 pmac_ide_hwif_t *pmif =
453 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
448 454
449 if (pmif == NULL) 455 if (pmif == NULL)
450 return; 456 return;
@@ -465,7 +471,9 @@ pmac_ide_kauai_selectproc(ide_drive_t *drive)
465static void 471static void
466pmac_ide_do_update_timings(ide_drive_t *drive) 472pmac_ide_do_update_timings(ide_drive_t *drive)
467{ 473{
468 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 474 ide_hwif_t *hwif = drive->hwif;
475 pmac_ide_hwif_t *pmif =
476 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
469 477
470 if (pmif == NULL) 478 if (pmif == NULL)
471 return; 479 return;
@@ -478,12 +486,26 @@ pmac_ide_do_update_timings(ide_drive_t *drive)
478 pmac_ide_selectproc(drive); 486 pmac_ide_selectproc(drive);
479} 487}
480 488
481static void pmac_outbsync(ide_hwif_t *hwif, u8 value, unsigned long port) 489static void pmac_exec_command(ide_hwif_t *hwif, u8 cmd)
482{ 490{
483 u32 tmp; 491 writeb(cmd, (void __iomem *)hwif->io_ports.command_addr);
484 492 (void)readl((void __iomem *)(hwif->io_ports.data_addr
485 writeb(value, (void __iomem *) port); 493 + IDE_TIMING_CONFIG));
486 tmp = readl((void __iomem *)(hwif->io_ports.data_addr 494}
495
496static void pmac_set_irq(ide_hwif_t *hwif, int on)
497{
498 u8 ctl = ATA_DEVCTL_OBS;
499
500 if (on == 4) { /* hack for SRST */
501 ctl |= 4;
502 on &= ~4;
503 }
504
505 ctl |= on ? 0 : 2;
506
507 writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr);
508 (void)readl((void __iomem *)(hwif->io_ports.data_addr
487 + IDE_TIMING_CONFIG)); 509 + IDE_TIMING_CONFIG));
488} 510}
489 511
@@ -493,11 +515,13 @@ static void pmac_outbsync(ide_hwif_t *hwif, u8 value, unsigned long port)
493static void 515static void
494pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio) 516pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
495{ 517{
518 ide_hwif_t *hwif = drive->hwif;
519 pmac_ide_hwif_t *pmif =
520 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
496 struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio); 521 struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio);
497 u32 *timings, t; 522 u32 *timings, t;
498 unsigned accessTicks, recTicks; 523 unsigned accessTicks, recTicks;
499 unsigned accessTime, recTime; 524 unsigned accessTime, recTime;
500 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
501 unsigned int cycle_time; 525 unsigned int cycle_time;
502 526
503 if (pmif == NULL) 527 if (pmif == NULL)
@@ -778,9 +802,11 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
778 802
779static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed) 803static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
780{ 804{
805 ide_hwif_t *hwif = drive->hwif;
806 pmac_ide_hwif_t *pmif =
807 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
781 int unit = (drive->select.b.unit & 0x01); 808 int unit = (drive->select.b.unit & 0x01);
782 int ret = 0; 809 int ret = 0;
783 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
784 u32 *timings, *timings2, tl[2]; 810 u32 *timings, *timings2, tl[2];
785 811
786 timings = &pmif->timings[unit]; 812 timings = &pmif->timings[unit];
@@ -852,11 +878,8 @@ sanitize_timings(pmac_ide_hwif_t *pmif)
852/* Suspend call back, should be called after the child devices 878/* Suspend call back, should be called after the child devices
853 * have actually been suspended 879 * have actually been suspended
854 */ 880 */
855static int 881static int pmac_ide_do_suspend(pmac_ide_hwif_t *pmif)
856pmac_ide_do_suspend(ide_hwif_t *hwif)
857{ 882{
858 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
859
860 /* We clear the timings */ 883 /* We clear the timings */
861 pmif->timings[0] = 0; 884 pmif->timings[0] = 0;
862 pmif->timings[1] = 0; 885 pmif->timings[1] = 0;
@@ -884,11 +907,8 @@ pmac_ide_do_suspend(ide_hwif_t *hwif)
884/* Resume call back, should be called before the child devices 907/* Resume call back, should be called before the child devices
885 * are resumed 908 * are resumed
886 */ 909 */
887static int 910static int pmac_ide_do_resume(pmac_ide_hwif_t *pmif)
888pmac_ide_do_resume(ide_hwif_t *hwif)
889{ 911{
890 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
891
892 /* Hard reset & re-enable controller (do we really need to reset ? -BenH) */ 912 /* Hard reset & re-enable controller (do we really need to reset ? -BenH) */
893 if (!pmif->mediabay) { 913 if (!pmif->mediabay) {
894 ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1); 914 ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1);
@@ -916,7 +936,8 @@ pmac_ide_do_resume(ide_hwif_t *hwif)
916 936
917static u8 pmac_ide_cable_detect(ide_hwif_t *hwif) 937static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
918{ 938{
919 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)ide_get_hwifdata(hwif); 939 pmac_ide_hwif_t *pmif =
940 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
920 struct device_node *np = pmif->node; 941 struct device_node *np = pmif->node;
921 const char *cable = of_get_property(np, "cable-type", NULL); 942 const char *cable = of_get_property(np, "cable-type", NULL);
922 943
@@ -936,7 +957,40 @@ static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
936 return ATA_CBL_PATA40; 957 return ATA_CBL_PATA40;
937} 958}
938 959
960static void pmac_ide_init_dev(ide_drive_t *drive)
961{
962 ide_hwif_t *hwif = drive->hwif;
963 pmac_ide_hwif_t *pmif =
964 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
965
966 if (pmif->mediabay) {
967#ifdef CONFIG_PMAC_MEDIABAY
968 if (check_media_bay_by_base(pmif->regbase, MB_CD) == 0) {
969 drive->noprobe = 0;
970 return;
971 }
972#endif
973 drive->noprobe = 1;
974 }
975}
976
977static const struct ide_tp_ops pmac_tp_ops = {
978 .exec_command = pmac_exec_command,
979 .read_status = ide_read_status,
980 .read_altstatus = ide_read_altstatus,
981 .read_sff_dma_status = ide_read_sff_dma_status,
982
983 .set_irq = pmac_set_irq,
984
985 .tf_load = ide_tf_load,
986 .tf_read = ide_tf_read,
987
988 .input_data = ide_input_data,
989 .output_data = ide_output_data,
990};
991
939static const struct ide_port_ops pmac_ide_ata6_port_ops = { 992static const struct ide_port_ops pmac_ide_ata6_port_ops = {
993 .init_dev = pmac_ide_init_dev,
940 .set_pio_mode = pmac_ide_set_pio_mode, 994 .set_pio_mode = pmac_ide_set_pio_mode,
941 .set_dma_mode = pmac_ide_set_dma_mode, 995 .set_dma_mode = pmac_ide_set_dma_mode,
942 .selectproc = pmac_ide_kauai_selectproc, 996 .selectproc = pmac_ide_kauai_selectproc,
@@ -944,6 +998,7 @@ static const struct ide_port_ops pmac_ide_ata6_port_ops = {
944}; 998};
945 999
946static const struct ide_port_ops pmac_ide_ata4_port_ops = { 1000static const struct ide_port_ops pmac_ide_ata4_port_ops = {
1001 .init_dev = pmac_ide_init_dev,
947 .set_pio_mode = pmac_ide_set_pio_mode, 1002 .set_pio_mode = pmac_ide_set_pio_mode,
948 .set_dma_mode = pmac_ide_set_dma_mode, 1003 .set_dma_mode = pmac_ide_set_dma_mode,
949 .selectproc = pmac_ide_selectproc, 1004 .selectproc = pmac_ide_selectproc,
@@ -951,6 +1006,7 @@ static const struct ide_port_ops pmac_ide_ata4_port_ops = {
951}; 1006};
952 1007
953static const struct ide_port_ops pmac_ide_port_ops = { 1008static const struct ide_port_ops pmac_ide_port_ops = {
1009 .init_dev = pmac_ide_init_dev,
954 .set_pio_mode = pmac_ide_set_pio_mode, 1010 .set_pio_mode = pmac_ide_set_pio_mode,
955 .set_dma_mode = pmac_ide_set_dma_mode, 1011 .set_dma_mode = pmac_ide_set_dma_mode,
956 .selectproc = pmac_ide_selectproc, 1012 .selectproc = pmac_ide_selectproc,
@@ -959,12 +1015,14 @@ static const struct ide_port_ops pmac_ide_port_ops = {
959static const struct ide_dma_ops pmac_dma_ops; 1015static const struct ide_dma_ops pmac_dma_ops;
960 1016
961static const struct ide_port_info pmac_port_info = { 1017static const struct ide_port_info pmac_port_info = {
1018 .name = DRV_NAME,
962 .init_dma = pmac_ide_init_dma, 1019 .init_dma = pmac_ide_init_dma,
963 .chipset = ide_pmac, 1020 .chipset = ide_pmac,
1021 .tp_ops = &pmac_tp_ops,
1022 .port_ops = &pmac_ide_port_ops,
964#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC 1023#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
965 .dma_ops = &pmac_dma_ops, 1024 .dma_ops = &pmac_dma_ops,
966#endif 1025#endif
967 .port_ops = &pmac_ide_port_ops,
968 .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA | 1026 .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
969 IDE_HFLAG_POST_SET_MODE | 1027 IDE_HFLAG_POST_SET_MODE |
970 IDE_HFLAG_MMIO | 1028 IDE_HFLAG_MMIO |
@@ -977,13 +1035,15 @@ static const struct ide_port_info pmac_port_info = {
977 * Setup, register & probe an IDE channel driven by this driver, this is 1035 * Setup, register & probe an IDE channel driven by this driver, this is
978 * called by one of the 2 probe functions (macio or PCI). 1036 * called by one of the 2 probe functions (macio or PCI).
979 */ 1037 */
980static int __devinit 1038static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, hw_regs_t *hw)
981pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif, hw_regs_t *hw)
982{ 1039{
983 struct device_node *np = pmif->node; 1040 struct device_node *np = pmif->node;
984 const int *bidp; 1041 const int *bidp;
985 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 1042 struct ide_host *host;
1043 ide_hwif_t *hwif;
1044 hw_regs_t *hws[] = { hw, NULL, NULL, NULL };
986 struct ide_port_info d = pmac_port_info; 1045 struct ide_port_info d = pmac_port_info;
1046 int rc;
987 1047
988 pmif->broken_dma = pmif->broken_dma_warn = 0; 1048 pmif->broken_dma = pmif->broken_dma_warn = 0;
989 if (of_device_is_compatible(np, "shasta-ata")) { 1049 if (of_device_is_compatible(np, "shasta-ata")) {
@@ -1054,31 +1114,16 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif, hw_regs_t *hw)
1054 msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY)); 1114 msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY));
1055 } 1115 }
1056 1116
1057 /* Setup MMIO ops */ 1117 printk(KERN_INFO DRV_NAME ": Found Apple %s controller (%s), "
1058 default_hwif_mmiops(hwif); 1118 "bus ID %d%s, irq %d\n", model_name[pmif->kind],
1059 hwif->OUTBSYNC = pmac_outbsync; 1119 pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id,
1120 pmif->mediabay ? " (mediabay)" : "", hw->irq);
1060 1121
1061 hwif->hwif_data = pmif; 1122 rc = ide_host_add(&d, hws, &host);
1062 ide_init_port_hw(hwif, hw); 1123 if (rc)
1124 return rc;
1063 1125
1064 printk(KERN_INFO "ide%d: Found Apple %s controller, bus ID %d%s, irq %d\n", 1126 hwif = host->ports[0];
1065 hwif->index, model_name[pmif->kind], pmif->aapl_bus_id,
1066 pmif->mediabay ? " (mediabay)" : "", hwif->irq);
1067
1068 if (pmif->mediabay) {
1069#ifdef CONFIG_PMAC_MEDIABAY
1070 if (check_media_bay_by_base(pmif->regbase, MB_CD)) {
1071#else
1072 if (1) {
1073#endif
1074 hwif->drives[0].noprobe = 1;
1075 hwif->drives[1].noprobe = 1;
1076 }
1077 }
1078
1079 idx[0] = hwif->index;
1080
1081 ide_device_add(idx, &d);
1082 1127
1083 return 0; 1128 return 0;
1084} 1129}
@@ -1101,7 +1146,6 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1101{ 1146{
1102 void __iomem *base; 1147 void __iomem *base;
1103 unsigned long regbase; 1148 unsigned long regbase;
1104 ide_hwif_t *hwif;
1105 pmac_ide_hwif_t *pmif; 1149 pmac_ide_hwif_t *pmif;
1106 int irq, rc; 1150 int irq, rc;
1107 hw_regs_t hw; 1151 hw_regs_t hw;
@@ -1110,14 +1154,6 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1110 if (pmif == NULL) 1154 if (pmif == NULL)
1111 return -ENOMEM; 1155 return -ENOMEM;
1112 1156
1113 hwif = ide_find_port();
1114 if (hwif == NULL) {
1115 printk(KERN_ERR "ide-pmac: MacIO interface attach with no slot\n");
1116 printk(KERN_ERR " %s\n", mdev->ofdev.node->full_name);
1117 rc = -ENODEV;
1118 goto out_free_pmif;
1119 }
1120
1121 if (macio_resource_count(mdev) == 0) { 1157 if (macio_resource_count(mdev) == 0) {
1122 printk(KERN_WARNING "ide-pmac: no address for %s\n", 1158 printk(KERN_WARNING "ide-pmac: no address for %s\n",
1123 mdev->ofdev.node->full_name); 1159 mdev->ofdev.node->full_name);
@@ -1164,7 +1200,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1164 } else 1200 } else
1165 pmif->dma_regs = NULL; 1201 pmif->dma_regs = NULL;
1166#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ 1202#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
1167 dev_set_drvdata(&mdev->ofdev.dev, hwif); 1203 dev_set_drvdata(&mdev->ofdev.dev, pmif);
1168 1204
1169 memset(&hw, 0, sizeof(hw)); 1205 memset(&hw, 0, sizeof(hw));
1170 pmac_ide_init_ports(&hw, pmif->regbase); 1206 pmac_ide_init_ports(&hw, pmif->regbase);
@@ -1172,7 +1208,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1172 hw.dev = &mdev->bus->pdev->dev; 1208 hw.dev = &mdev->bus->pdev->dev;
1173 hw.parent = &mdev->ofdev.dev; 1209 hw.parent = &mdev->ofdev.dev;
1174 1210
1175 rc = pmac_ide_setup_device(pmif, hwif, &hw); 1211 rc = pmac_ide_setup_device(pmif, &hw);
1176 if (rc != 0) { 1212 if (rc != 0) {
1177 /* The inteface is released to the common IDE layer */ 1213 /* The inteface is released to the common IDE layer */
1178 dev_set_drvdata(&mdev->ofdev.dev, NULL); 1214 dev_set_drvdata(&mdev->ofdev.dev, NULL);
@@ -1195,12 +1231,13 @@ out_free_pmif:
1195static int 1231static int
1196pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg) 1232pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
1197{ 1233{
1198 ide_hwif_t *hwif = (ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev); 1234 pmac_ide_hwif_t *pmif =
1199 int rc = 0; 1235 (pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
1236 int rc = 0;
1200 1237
1201 if (mesg.event != mdev->ofdev.dev.power.power_state.event 1238 if (mesg.event != mdev->ofdev.dev.power.power_state.event
1202 && (mesg.event & PM_EVENT_SLEEP)) { 1239 && (mesg.event & PM_EVENT_SLEEP)) {
1203 rc = pmac_ide_do_suspend(hwif); 1240 rc = pmac_ide_do_suspend(pmif);
1204 if (rc == 0) 1241 if (rc == 0)
1205 mdev->ofdev.dev.power.power_state = mesg; 1242 mdev->ofdev.dev.power.power_state = mesg;
1206 } 1243 }
@@ -1211,11 +1248,12 @@ pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
1211static int 1248static int
1212pmac_ide_macio_resume(struct macio_dev *mdev) 1249pmac_ide_macio_resume(struct macio_dev *mdev)
1213{ 1250{
1214 ide_hwif_t *hwif = (ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev); 1251 pmac_ide_hwif_t *pmif =
1215 int rc = 0; 1252 (pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
1216 1253 int rc = 0;
1254
1217 if (mdev->ofdev.dev.power.power_state.event != PM_EVENT_ON) { 1255 if (mdev->ofdev.dev.power.power_state.event != PM_EVENT_ON) {
1218 rc = pmac_ide_do_resume(hwif); 1256 rc = pmac_ide_do_resume(pmif);
1219 if (rc == 0) 1257 if (rc == 0)
1220 mdev->ofdev.dev.power.power_state = PMSG_ON; 1258 mdev->ofdev.dev.power.power_state = PMSG_ON;
1221 } 1259 }
@@ -1229,7 +1267,6 @@ pmac_ide_macio_resume(struct macio_dev *mdev)
1229static int __devinit 1267static int __devinit
1230pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id) 1268pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1231{ 1269{
1232 ide_hwif_t *hwif;
1233 struct device_node *np; 1270 struct device_node *np;
1234 pmac_ide_hwif_t *pmif; 1271 pmac_ide_hwif_t *pmif;
1235 void __iomem *base; 1272 void __iomem *base;
@@ -1247,14 +1284,6 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1247 if (pmif == NULL) 1284 if (pmif == NULL)
1248 return -ENOMEM; 1285 return -ENOMEM;
1249 1286
1250 hwif = ide_find_port();
1251 if (hwif == NULL) {
1252 printk(KERN_ERR "ide-pmac: PCI interface attach with no slot\n");
1253 printk(KERN_ERR " %s\n", np->full_name);
1254 rc = -ENODEV;
1255 goto out_free_pmif;
1256 }
1257
1258 if (pci_enable_device(pdev)) { 1287 if (pci_enable_device(pdev)) {
1259 printk(KERN_WARNING "ide-pmac: Can't enable PCI device for " 1288 printk(KERN_WARNING "ide-pmac: Can't enable PCI device for "
1260 "%s\n", np->full_name); 1289 "%s\n", np->full_name);
@@ -1284,14 +1313,14 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1284 pmif->kauai_fcr = base; 1313 pmif->kauai_fcr = base;
1285 pmif->irq = pdev->irq; 1314 pmif->irq = pdev->irq;
1286 1315
1287 pci_set_drvdata(pdev, hwif); 1316 pci_set_drvdata(pdev, pmif);
1288 1317
1289 memset(&hw, 0, sizeof(hw)); 1318 memset(&hw, 0, sizeof(hw));
1290 pmac_ide_init_ports(&hw, pmif->regbase); 1319 pmac_ide_init_ports(&hw, pmif->regbase);
1291 hw.irq = pdev->irq; 1320 hw.irq = pdev->irq;
1292 hw.dev = &pdev->dev; 1321 hw.dev = &pdev->dev;
1293 1322
1294 rc = pmac_ide_setup_device(pmif, hwif, &hw); 1323 rc = pmac_ide_setup_device(pmif, &hw);
1295 if (rc != 0) { 1324 if (rc != 0) {
1296 /* The inteface is released to the common IDE layer */ 1325 /* The inteface is released to the common IDE layer */
1297 pci_set_drvdata(pdev, NULL); 1326 pci_set_drvdata(pdev, NULL);
@@ -1310,12 +1339,12 @@ out_free_pmif:
1310static int 1339static int
1311pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) 1340pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
1312{ 1341{
1313 ide_hwif_t *hwif = (ide_hwif_t *)pci_get_drvdata(pdev); 1342 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)pci_get_drvdata(pdev);
1314 int rc = 0; 1343 int rc = 0;
1315 1344
1316 if (mesg.event != pdev->dev.power.power_state.event 1345 if (mesg.event != pdev->dev.power.power_state.event
1317 && (mesg.event & PM_EVENT_SLEEP)) { 1346 && (mesg.event & PM_EVENT_SLEEP)) {
1318 rc = pmac_ide_do_suspend(hwif); 1347 rc = pmac_ide_do_suspend(pmif);
1319 if (rc == 0) 1348 if (rc == 0)
1320 pdev->dev.power.power_state = mesg; 1349 pdev->dev.power.power_state = mesg;
1321 } 1350 }
@@ -1326,11 +1355,11 @@ pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
1326static int 1355static int
1327pmac_ide_pci_resume(struct pci_dev *pdev) 1356pmac_ide_pci_resume(struct pci_dev *pdev)
1328{ 1357{
1329 ide_hwif_t *hwif = (ide_hwif_t *)pci_get_drvdata(pdev); 1358 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)pci_get_drvdata(pdev);
1330 int rc = 0; 1359 int rc = 0;
1331 1360
1332 if (pdev->dev.power.power_state.event != PM_EVENT_ON) { 1361 if (pdev->dev.power.power_state.event != PM_EVENT_ON) {
1333 rc = pmac_ide_do_resume(hwif); 1362 rc = pmac_ide_do_resume(pmif);
1334 if (rc == 0) 1363 if (rc == 0)
1335 pdev->dev.power.power_state = PMSG_ON; 1364 pdev->dev.power.power_state = PMSG_ON;
1336 } 1365 }
@@ -1421,10 +1450,11 @@ out:
1421static int 1450static int
1422pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq) 1451pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
1423{ 1452{
1453 ide_hwif_t *hwif = drive->hwif;
1454 pmac_ide_hwif_t *pmif =
1455 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
1424 struct dbdma_cmd *table; 1456 struct dbdma_cmd *table;
1425 int i, count = 0; 1457 int i, count = 0;
1426 ide_hwif_t *hwif = HWIF(drive);
1427 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1428 volatile struct dbdma_regs __iomem *dma = pmif->dma_regs; 1458 volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
1429 struct scatterlist *sg; 1459 struct scatterlist *sg;
1430 int wr = (rq_data_dir(rq) == WRITE); 1460 int wr = (rq_data_dir(rq) == WRITE);
@@ -1520,7 +1550,8 @@ static int
1520pmac_ide_dma_setup(ide_drive_t *drive) 1550pmac_ide_dma_setup(ide_drive_t *drive)
1521{ 1551{
1522 ide_hwif_t *hwif = HWIF(drive); 1552 ide_hwif_t *hwif = HWIF(drive);
1523 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data; 1553 pmac_ide_hwif_t *pmif =
1554 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
1524 struct request *rq = HWGROUP(drive)->rq; 1555 struct request *rq = HWGROUP(drive)->rq;
1525 u8 unit = (drive->select.b.unit & 0x01); 1556 u8 unit = (drive->select.b.unit & 0x01);
1526 u8 ata4; 1557 u8 ata4;
@@ -1560,7 +1591,9 @@ pmac_ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
1560static void 1591static void
1561pmac_ide_dma_start(ide_drive_t *drive) 1592pmac_ide_dma_start(ide_drive_t *drive)
1562{ 1593{
1563 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 1594 ide_hwif_t *hwif = drive->hwif;
1595 pmac_ide_hwif_t *pmif =
1596 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
1564 volatile struct dbdma_regs __iomem *dma; 1597 volatile struct dbdma_regs __iomem *dma;
1565 1598
1566 dma = pmif->dma_regs; 1599 dma = pmif->dma_regs;
@@ -1576,7 +1609,9 @@ pmac_ide_dma_start(ide_drive_t *drive)
1576static int 1609static int
1577pmac_ide_dma_end (ide_drive_t *drive) 1610pmac_ide_dma_end (ide_drive_t *drive)
1578{ 1611{
1579 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 1612 ide_hwif_t *hwif = drive->hwif;
1613 pmac_ide_hwif_t *pmif =
1614 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
1580 volatile struct dbdma_regs __iomem *dma; 1615 volatile struct dbdma_regs __iomem *dma;
1581 u32 dstat; 1616 u32 dstat;
1582 1617
@@ -1604,7 +1639,9 @@ pmac_ide_dma_end (ide_drive_t *drive)
1604static int 1639static int
1605pmac_ide_dma_test_irq (ide_drive_t *drive) 1640pmac_ide_dma_test_irq (ide_drive_t *drive)
1606{ 1641{
1607 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 1642 ide_hwif_t *hwif = drive->hwif;
1643 pmac_ide_hwif_t *pmif =
1644 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
1608 volatile struct dbdma_regs __iomem *dma; 1645 volatile struct dbdma_regs __iomem *dma;
1609 unsigned long status, timeout; 1646 unsigned long status, timeout;
1610 1647
@@ -1664,7 +1701,9 @@ static void pmac_ide_dma_host_set(ide_drive_t *drive, int on)
1664static void 1701static void
1665pmac_ide_dma_lost_irq (ide_drive_t *drive) 1702pmac_ide_dma_lost_irq (ide_drive_t *drive)
1666{ 1703{
1667 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 1704 ide_hwif_t *hwif = drive->hwif;
1705 pmac_ide_hwif_t *pmif =
1706 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
1668 volatile struct dbdma_regs __iomem *dma; 1707 volatile struct dbdma_regs __iomem *dma;
1669 unsigned long status; 1708 unsigned long status;
1670 1709
@@ -1694,7 +1733,8 @@ static const struct ide_dma_ops pmac_dma_ops = {
1694static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif, 1733static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
1695 const struct ide_port_info *d) 1734 const struct ide_port_info *d)
1696{ 1735{
1697 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data; 1736 pmac_ide_hwif_t *pmif =
1737 (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
1698 struct pci_dev *dev = to_pci_dev(hwif->dev); 1738 struct pci_dev *dev = to_pci_dev(hwif->dev);
1699 1739
1700 /* We won't need pci_dev if we switch to generic consistent 1740 /* We won't need pci_dev if we switch to generic consistent
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index 65fc08b6b6d0..b15cad58dc81 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -73,15 +73,12 @@ static void ide_pci_clear_simplex(unsigned long dma_base, const char *name)
73 * @d: IDE port info 73 * @d: IDE port info
74 * 74 *
75 * Fetch the DMA Bus-Master-I/O-Base-Address (BMIBA) from PCI space. 75 * Fetch the DMA Bus-Master-I/O-Base-Address (BMIBA) from PCI space.
76 * Where a device has a partner that is already in DMA mode we check
77 * and enforce IDE simplex rules.
78 */ 76 */
79 77
80unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d) 78unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
81{ 79{
82 struct pci_dev *dev = to_pci_dev(hwif->dev); 80 struct pci_dev *dev = to_pci_dev(hwif->dev);
83 unsigned long dma_base = 0; 81 unsigned long dma_base = 0;
84 u8 dma_stat = 0;
85 82
86 if (hwif->host_flags & IDE_HFLAG_MMIO) 83 if (hwif->host_flags & IDE_HFLAG_MMIO)
87 return hwif->dma_base; 84 return hwif->dma_base;
@@ -102,11 +99,19 @@ unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
102 if (hwif->channel) 99 if (hwif->channel)
103 dma_base += 8; 100 dma_base += 8;
104 101
105 if (d->host_flags & IDE_HFLAG_CS5520) 102 return dma_base;
103}
104EXPORT_SYMBOL_GPL(ide_pci_dma_base);
105
106int ide_pci_check_simplex(ide_hwif_t *hwif, const struct ide_port_info *d)
107{
108 u8 dma_stat;
109
110 if (d->host_flags & (IDE_HFLAG_MMIO | IDE_HFLAG_CS5520))
106 goto out; 111 goto out;
107 112
108 if (d->host_flags & IDE_HFLAG_CLEAR_SIMPLEX) { 113 if (d->host_flags & IDE_HFLAG_CLEAR_SIMPLEX) {
109 ide_pci_clear_simplex(dma_base, d->name); 114 ide_pci_clear_simplex(hwif->dma_base, d->name);
110 goto out; 115 goto out;
111 } 116 }
112 117
@@ -120,15 +125,15 @@ unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
120 * we tune the drive then try to grab DMA ownership if we want to be 125 * we tune the drive then try to grab DMA ownership if we want to be
121 * the DMA end. This has to be become dynamic to handle hot-plug. 126 * the DMA end. This has to be become dynamic to handle hot-plug.
122 */ 127 */
123 dma_stat = hwif->INB(dma_base + 2); 128 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
124 if ((dma_stat & 0x80) && hwif->mate && hwif->mate->dma_base) { 129 if ((dma_stat & 0x80) && hwif->mate && hwif->mate->dma_base) {
125 printk(KERN_INFO "%s: simplex device: DMA disabled\n", d->name); 130 printk(KERN_INFO "%s: simplex device: DMA disabled\n", d->name);
126 dma_base = 0; 131 return -1;
127 } 132 }
128out: 133out:
129 return dma_base; 134 return 0;
130} 135}
131EXPORT_SYMBOL_GPL(ide_pci_dma_base); 136EXPORT_SYMBOL_GPL(ide_pci_check_simplex);
132 137
133/* 138/*
134 * Set up BM-DMA capability (PnP BIOS should have done this) 139 * Set up BM-DMA capability (PnP BIOS should have done this)
@@ -284,33 +289,31 @@ static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *
284} 289}
285 290
286/** 291/**
287 * ide_hwif_configure - configure an IDE interface 292 * ide_hw_configure - configure a hw_regs_t instance
288 * @dev: PCI device holding interface 293 * @dev: PCI device holding interface
289 * @d: IDE port info 294 * @d: IDE port info
290 * @port: port number 295 * @port: port number
291 * @irq: PCI IRQ 296 * @irq: PCI IRQ
297 * @hw: hw_regs_t instance corresponding to this port
292 * 298 *
293 * Perform the initial set up for the hardware interface structure. This 299 * Perform the initial set up for the hardware interface structure. This
294 * is done per interface port rather than per PCI device. There may be 300 * is done per interface port rather than per PCI device. There may be
295 * more than one port per device. 301 * more than one port per device.
296 * 302 *
297 * Returns the new hardware interface structure, or NULL on a failure 303 * Returns zero on success or an error code.
298 */ 304 */
299 305
300static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev, 306static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d,
301 const struct ide_port_info *d, 307 unsigned int port, int irq, hw_regs_t *hw)
302 unsigned int port, int irq)
303{ 308{
304 unsigned long ctl = 0, base = 0; 309 unsigned long ctl = 0, base = 0;
305 ide_hwif_t *hwif;
306 struct hw_regs_s hw;
307 310
308 if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) { 311 if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) {
309 if (ide_pci_check_iomem(dev, d, 2 * port) || 312 if (ide_pci_check_iomem(dev, d, 2 * port) ||
310 ide_pci_check_iomem(dev, d, 2 * port + 1)) { 313 ide_pci_check_iomem(dev, d, 2 * port + 1)) {
311 printk(KERN_ERR "%s: I/O baseregs (BIOS) are reported " 314 printk(KERN_ERR "%s: I/O baseregs (BIOS) are reported "
312 "as MEM for port %d!\n", d->name, port); 315 "as MEM for port %d!\n", d->name, port);
313 return NULL; 316 return -EINVAL;
314 } 317 }
315 318
316 ctl = pci_resource_start(dev, 2*port+1); 319 ctl = pci_resource_start(dev, 2*port+1);
@@ -324,22 +327,16 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev,
324 if (!base || !ctl) { 327 if (!base || !ctl) {
325 printk(KERN_ERR "%s: bad PCI BARs for port %d, skipping\n", 328 printk(KERN_ERR "%s: bad PCI BARs for port %d, skipping\n",
326 d->name, port); 329 d->name, port);
327 return NULL; 330 return -EINVAL;
328 } 331 }
329 332
330 hwif = ide_find_port_slot(d); 333 memset(hw, 0, sizeof(*hw));
331 if (hwif == NULL) 334 hw->irq = irq;
332 return NULL; 335 hw->dev = &dev->dev;
333 336 hw->chipset = d->chipset ? d->chipset : ide_pci;
334 memset(&hw, 0, sizeof(hw)); 337 ide_std_init_ports(hw, base, ctl | 2);
335 hw.irq = irq;
336 hw.dev = &dev->dev;
337 hw.chipset = d->chipset ? d->chipset : ide_pci;
338 ide_std_init_ports(&hw, base, ctl | 2);
339
340 ide_init_port_hw(hwif, &hw);
341 338
342 return hwif; 339 return 0;
343} 340}
344 341
345#ifdef CONFIG_BLK_DEV_IDEDMA_PCI 342#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
@@ -362,7 +359,15 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
362 (dev->class & 0x80))) { 359 (dev->class & 0x80))) {
363 unsigned long base = ide_pci_dma_base(hwif, d); 360 unsigned long base = ide_pci_dma_base(hwif, d);
364 361
365 if (base == 0 || ide_pci_set_master(dev, d->name) < 0) 362 if (base == 0)
363 return -1;
364
365 hwif->dma_base = base;
366
367 if (ide_pci_check_simplex(hwif, d) < 0)
368 return -1;
369
370 if (ide_pci_set_master(dev, d->name) < 0)
366 return -1; 371 return -1;
367 372
368 if (hwif->host_flags & IDE_HFLAG_MMIO) 373 if (hwif->host_flags & IDE_HFLAG_MMIO)
@@ -376,7 +381,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
376 if (ide_allocate_dma_engine(hwif)) 381 if (ide_allocate_dma_engine(hwif))
377 return -1; 382 return -1;
378 383
379 ide_setup_dma(hwif, base); 384 hwif->dma_ops = &sff_dma_ops;
380 } 385 }
381 386
382 return 0; 387 return 0;
@@ -429,7 +434,8 @@ out:
429 * @dev: PCI device 434 * @dev: PCI device
430 * @d: IDE port info 435 * @d: IDE port info
431 * @pciirq: IRQ line 436 * @pciirq: IRQ line
432 * @idx: ATA index table to update 437 * @hw: hw_regs_t instances corresponding to this PCI IDE device
438 * @hws: hw_regs_t pointers table to update
433 * 439 *
434 * Scan the interfaces attached to this device and do any 440 * Scan the interfaces attached to this device and do any
435 * necessary per port setup. Attach the devices and ask the 441 * necessary per port setup. Attach the devices and ask the
@@ -440,10 +446,10 @@ out:
440 * where the chipset setup is not the default PCI IDE one. 446 * where the chipset setup is not the default PCI IDE one.
441 */ 447 */
442 448
443void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, int pciirq, u8 *idx) 449void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d,
450 int pciirq, hw_regs_t *hw, hw_regs_t **hws)
444{ 451{
445 int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port; 452 int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port;
446 ide_hwif_t *hwif;
447 u8 tmp; 453 u8 tmp;
448 454
449 /* 455 /*
@@ -459,11 +465,10 @@ void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, int
459 continue; /* port not enabled */ 465 continue; /* port not enabled */
460 } 466 }
461 467
462 hwif = ide_hwif_configure(dev, d, port, pciirq); 468 if (ide_hw_configure(dev, d, port, pciirq, hw + port))
463 if (hwif == NULL)
464 continue; 469 continue;
465 470
466 *(idx + port) = hwif->index; 471 *(hws + port) = hw + port;
467 } 472 }
468} 473}
469EXPORT_SYMBOL_GPL(ide_pci_setup_ports); 474EXPORT_SYMBOL_GPL(ide_pci_setup_ports);
@@ -480,7 +485,7 @@ EXPORT_SYMBOL_GPL(ide_pci_setup_ports);
480 */ 485 */
481static int do_ide_setup_pci_device(struct pci_dev *dev, 486static int do_ide_setup_pci_device(struct pci_dev *dev,
482 const struct ide_port_info *d, 487 const struct ide_port_info *d,
483 u8 *idx, u8 noisy) 488 u8 noisy)
484{ 489{
485 int tried_config = 0; 490 int tried_config = 0;
486 int pciirq, ret; 491 int pciirq, ret;
@@ -529,22 +534,24 @@ static int do_ide_setup_pci_device(struct pci_dev *dev,
529 d->name, pciirq); 534 d->name, pciirq);
530 } 535 }
531 536
532 /* FIXME: silent failure can happen */ 537 ret = pciirq;
533
534 ide_pci_setup_ports(dev, d, pciirq, idx);
535out: 538out:
536 return ret; 539 return ret;
537} 540}
538 541
539int ide_setup_pci_device(struct pci_dev *dev, const struct ide_port_info *d) 542int ide_setup_pci_device(struct pci_dev *dev, const struct ide_port_info *d)
540{ 543{
541 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 544 hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL };
542 int ret; 545 int ret;
543 546
544 ret = do_ide_setup_pci_device(dev, d, &idx[0], 1); 547 ret = do_ide_setup_pci_device(dev, d, 1);
548
549 if (ret >= 0) {
550 /* FIXME: silent failure can happen */
551 ide_pci_setup_ports(dev, d, ret, &hw[0], &hws[0]);
545 552
546 if (ret >= 0) 553 ret = ide_host_add(d, hws, NULL);
547 ide_device_add(idx, d); 554 }
548 555
549 return ret; 556 return ret;
550} 557}
@@ -555,19 +562,23 @@ int ide_setup_pci_devices(struct pci_dev *dev1, struct pci_dev *dev2,
555{ 562{
556 struct pci_dev *pdev[] = { dev1, dev2 }; 563 struct pci_dev *pdev[] = { dev1, dev2 };
557 int ret, i; 564 int ret, i;
558 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff }; 565 hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL };
559 566
560 for (i = 0; i < 2; i++) { 567 for (i = 0; i < 2; i++) {
561 ret = do_ide_setup_pci_device(pdev[i], d, &idx[i*2], !i); 568 ret = do_ide_setup_pci_device(pdev[i], d, !i);
569
562 /* 570 /*
563 * FIXME: Mom, mom, they stole me the helper function to undo 571 * FIXME: Mom, mom, they stole me the helper function to undo
564 * do_ide_setup_pci_device() on the first device! 572 * do_ide_setup_pci_device() on the first device!
565 */ 573 */
566 if (ret < 0) 574 if (ret < 0)
567 goto out; 575 goto out;
576
577 /* FIXME: silent failure can happen */
578 ide_pci_setup_ports(pdev[i], d, ret, &hw[i*2], &hws[i*2]);
568 } 579 }
569 580
570 ide_device_add(idx, d); 581 ret = ide_host_add(d, hws, NULL);
571out: 582out:
572 return ret; 583 return ret;
573} 584}
diff --git a/drivers/ieee1394/iso.c b/drivers/ieee1394/iso.c
index 07ca35c98f96..1cf6487b65ba 100644
--- a/drivers/ieee1394/iso.c
+++ b/drivers/ieee1394/iso.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/pci.h> 12#include <linux/pci.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/mm.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
15 16
16#include "hosts.h" 17#include "hosts.h"
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 0792d930c481..7a64aa9b51b6 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -646,8 +646,8 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
646 ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); 646 ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
647 647
648 spin_lock_irqsave(&pool->last_cpu_lock, flags); 648 spin_lock_irqsave(&pool->last_cpu_lock, flags);
649 cpu = next_cpu(pool->last_cpu, cpu_online_map); 649 cpu = next_cpu_nr(pool->last_cpu, cpu_online_map);
650 if (cpu == NR_CPUS) 650 if (cpu >= nr_cpu_ids)
651 cpu = first_cpu(cpu_online_map); 651 cpu = first_cpu(cpu_online_map);
652 pool->last_cpu = cpu; 652 pool->last_cpu = cpu;
653 spin_unlock_irqrestore(&pool->last_cpu_lock, flags); 653 spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
diff --git a/drivers/input/keyboard/tosakbd.c b/drivers/input/keyboard/tosakbd.c
index 94e444b4ee15..b12b7ee4b6aa 100644
--- a/drivers/input/keyboard/tosakbd.c
+++ b/drivers/input/keyboard/tosakbd.c
@@ -215,8 +215,6 @@ static int tosakbd_suspend(struct platform_device *dev, pm_message_t state)
215 unsigned long flags; 215 unsigned long flags;
216 216
217 spin_lock_irqsave(&tosakbd->lock, flags); 217 spin_lock_irqsave(&tosakbd->lock, flags);
218 PGSR1 = (PGSR1 & ~TOSA_GPIO_LOW_STROBE_BIT);
219 PGSR2 = (PGSR2 & ~TOSA_GPIO_HIGH_STROBE_BIT);
220 tosakbd->suspended = 1; 218 tosakbd->suspended = 1;
221 spin_unlock_irqrestore(&tosakbd->lock, flags); 219 spin_unlock_irqrestore(&tosakbd->lock, flags);
222 220
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c
index 091deb9d1c47..c2bd97d29273 100644
--- a/drivers/isdn/gigaset/asyncdata.c
+++ b/drivers/isdn/gigaset/asyncdata.c
@@ -575,7 +575,8 @@ int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb)
575 else 575 else
576 skb = iraw_encode(skb, HW_HDR_LEN, 0); 576 skb = iraw_encode(skb, HW_HDR_LEN, 0);
577 if (!skb) { 577 if (!skb) {
578 err("unable to allocate memory for encoding!\n"); 578 dev_err(bcs->cs->dev,
579 "unable to allocate memory for encoding!\n");
579 return -ENOMEM; 580 return -ENOMEM;
580 } 581 }
581 582
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 5255b5e20e13..3f11910c7ccd 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -1050,10 +1050,9 @@ static int submit_iso_write_urb(struct isow_urbctx_t *ucx)
1050 } 1050 }
1051 1051
1052 /* retrieve block of data to send */ 1052 /* retrieve block of data to send */
1053 ifd->offset = gigaset_isowbuf_getbytes(ubc->isooutbuf, 1053 rc = gigaset_isowbuf_getbytes(ubc->isooutbuf, ifd->length);
1054 ifd->length); 1054 if (rc < 0) {
1055 if (ifd->offset < 0) { 1055 if (rc == -EBUSY) {
1056 if (ifd->offset == -EBUSY) {
1057 gig_dbg(DEBUG_ISO, 1056 gig_dbg(DEBUG_ISO,
1058 "%s: buffer busy at frame %d", 1057 "%s: buffer busy at frame %d",
1059 __func__, nframe); 1058 __func__, nframe);
@@ -1062,11 +1061,12 @@ static int submit_iso_write_urb(struct isow_urbctx_t *ucx)
1062 } else { 1061 } else {
1063 dev_err(ucx->bcs->cs->dev, 1062 dev_err(ucx->bcs->cs->dev,
1064 "%s: buffer error %d at frame %d\n", 1063 "%s: buffer error %d at frame %d\n",
1065 __func__, ifd->offset, nframe); 1064 __func__, rc, nframe);
1066 return ifd->offset; 1065 return rc;
1067 } 1066 }
1068 break; 1067 break;
1069 } 1068 }
1069 ifd->offset = rc;
1070 ucx->limit = ubc->isooutbuf->nextread; 1070 ucx->limit = ubc->isooutbuf->nextread;
1071 ifd->status = 0; 1071 ifd->status = 0;
1072 ifd->actual_length = 0; 1072 ifd->actual_length = 0;
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index 827c32c16795..9d3ce7718e58 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -287,7 +287,7 @@ struct event_t *gigaset_add_event(struct cardstate *cs,
287 tail = cs->ev_tail; 287 tail = cs->ev_tail;
288 next = (tail + 1) % MAX_EVENTS; 288 next = (tail + 1) % MAX_EVENTS;
289 if (unlikely(next == cs->ev_head)) 289 if (unlikely(next == cs->ev_head))
290 err("event queue full"); 290 dev_err(cs->dev, "event queue full\n");
291 else { 291 else {
292 event = cs->events + tail; 292 event = cs->events + tail;
293 event->type = type; 293 event->type = type;
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index f365993161fc..003752954993 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -106,7 +106,6 @@ enum debuglevel {
106#undef err 106#undef err
107#undef info 107#undef info
108#undef warn 108#undef warn
109#undef notice
110 109
111#define err(format, arg...) printk(KERN_ERR KBUILD_MODNAME ": " \ 110#define err(format, arg...) printk(KERN_ERR KBUILD_MODNAME ": " \
112 format "\n" , ## arg) 111 format "\n" , ## arg)
@@ -114,8 +113,6 @@ enum debuglevel {
114 format "\n" , ## arg) 113 format "\n" , ## arg)
115#define warn(format, arg...) printk(KERN_WARNING KBUILD_MODNAME ": " \ 114#define warn(format, arg...) printk(KERN_WARNING KBUILD_MODNAME ": " \
116 format "\n" , ## arg) 115 format "\n" , ## arg)
117#define notice(format, arg...) printk(KERN_NOTICE KBUILD_MODNAME ": " \
118 format "\n" , ## arg)
119 116
120#ifdef CONFIG_GIGASET_DEBUG 117#ifdef CONFIG_GIGASET_DEBUG
121 118
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index 9e089f06a942..3c127a8cbaf2 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -46,7 +46,8 @@ static int writebuf_from_LL(int driverID, int channel, int ack,
46 return -ENODEV; 46 return -ENODEV;
47 } 47 }
48 if (channel < 0 || channel >= cs->channels) { 48 if (channel < 0 || channel >= cs->channels) {
49 err("%s: invalid channel ID (%d)", __func__, channel); 49 dev_err(cs->dev, "%s: invalid channel ID (%d)\n",
50 __func__, channel);
50 return -ENODEV; 51 return -ENODEV;
51 } 52 }
52 bcs = &cs->bcs[channel]; 53 bcs = &cs->bcs[channel];
@@ -58,11 +59,13 @@ static int writebuf_from_LL(int driverID, int channel, int ack,
58 59
59 if (!len) { 60 if (!len) {
60 if (ack) 61 if (ack)
61 notice("%s: not ACKing empty packet", __func__); 62 dev_notice(cs->dev, "%s: not ACKing empty packet\n",
63 __func__);
62 return 0; 64 return 0;
63 } 65 }
64 if (len > MAX_BUF_SIZE) { 66 if (len > MAX_BUF_SIZE) {
65 err("%s: packet too large (%d bytes)", __func__, len); 67 dev_err(cs->dev, "%s: packet too large (%d bytes)\n",
68 __func__, len);
66 return -EINVAL; 69 return -EINVAL;
67 } 70 }
68 71
@@ -116,8 +119,7 @@ static int command_from_LL(isdn_ctrl *cntrl)
116 gigaset_debugdrivers(); 119 gigaset_debugdrivers();
117 120
118 if (!cs) { 121 if (!cs) {
119 warn("LL tried to access unknown device with nr. %d", 122 err("%s: invalid driver ID (%d)", __func__, cntrl->driver);
120 cntrl->driver);
121 return -ENODEV; 123 return -ENODEV;
122 } 124 }
123 125
@@ -126,7 +128,7 @@ static int command_from_LL(isdn_ctrl *cntrl)
126 gig_dbg(DEBUG_ANY, "ISDN_CMD_IOCTL (driver: %d, arg: %ld)", 128 gig_dbg(DEBUG_ANY, "ISDN_CMD_IOCTL (driver: %d, arg: %ld)",
127 cntrl->driver, cntrl->arg); 129 cntrl->driver, cntrl->arg);
128 130
129 warn("ISDN_CMD_IOCTL is not supported."); 131 dev_warn(cs->dev, "ISDN_CMD_IOCTL not supported\n");
130 return -EINVAL; 132 return -EINVAL;
131 133
132 case ISDN_CMD_DIAL: 134 case ISDN_CMD_DIAL:
@@ -138,22 +140,23 @@ static int command_from_LL(isdn_ctrl *cntrl)
138 cntrl->parm.setup.si1, cntrl->parm.setup.si2); 140 cntrl->parm.setup.si1, cntrl->parm.setup.si2);
139 141
140 if (cntrl->arg >= cs->channels) { 142 if (cntrl->arg >= cs->channels) {
141 err("ISDN_CMD_DIAL: invalid channel (%d)", 143 dev_err(cs->dev,
142 (int) cntrl->arg); 144 "ISDN_CMD_DIAL: invalid channel (%d)\n",
145 (int) cntrl->arg);
143 return -EINVAL; 146 return -EINVAL;
144 } 147 }
145 148
146 bcs = cs->bcs + cntrl->arg; 149 bcs = cs->bcs + cntrl->arg;
147 150
148 if (!gigaset_get_channel(bcs)) { 151 if (!gigaset_get_channel(bcs)) {
149 err("ISDN_CMD_DIAL: channel not free"); 152 dev_err(cs->dev, "ISDN_CMD_DIAL: channel not free\n");
150 return -EBUSY; 153 return -EBUSY;
151 } 154 }
152 155
153 sp = kmalloc(sizeof *sp, GFP_ATOMIC); 156 sp = kmalloc(sizeof *sp, GFP_ATOMIC);
154 if (!sp) { 157 if (!sp) {
155 gigaset_free_channel(bcs); 158 gigaset_free_channel(bcs);
156 err("ISDN_CMD_DIAL: out of memory"); 159 dev_err(cs->dev, "ISDN_CMD_DIAL: out of memory\n");
157 return -ENOMEM; 160 return -ENOMEM;
158 } 161 }
159 *sp = cntrl->parm.setup; 162 *sp = cntrl->parm.setup;
@@ -173,8 +176,9 @@ static int command_from_LL(isdn_ctrl *cntrl)
173 gig_dbg(DEBUG_ANY, "ISDN_CMD_ACCEPTD"); 176 gig_dbg(DEBUG_ANY, "ISDN_CMD_ACCEPTD");
174 177
175 if (cntrl->arg >= cs->channels) { 178 if (cntrl->arg >= cs->channels) {
176 err("ISDN_CMD_ACCEPTD: invalid channel (%d)", 179 dev_err(cs->dev,
177 (int) cntrl->arg); 180 "ISDN_CMD_ACCEPTD: invalid channel (%d)\n",
181 (int) cntrl->arg);
178 return -EINVAL; 182 return -EINVAL;
179 } 183 }
180 184
@@ -196,8 +200,9 @@ static int command_from_LL(isdn_ctrl *cntrl)
196 (int) cntrl->arg); 200 (int) cntrl->arg);
197 201
198 if (cntrl->arg >= cs->channels) { 202 if (cntrl->arg >= cs->channels) {
199 err("ISDN_CMD_HANGUP: invalid channel (%u)", 203 dev_err(cs->dev,
200 (unsigned) cntrl->arg); 204 "ISDN_CMD_HANGUP: invalid channel (%d)\n",
205 (int) cntrl->arg);
201 return -EINVAL; 206 return -EINVAL;
202 } 207 }
203 208
@@ -224,8 +229,9 @@ static int command_from_LL(isdn_ctrl *cntrl)
224 cntrl->arg & 0xff, (cntrl->arg >> 8)); 229 cntrl->arg & 0xff, (cntrl->arg >> 8));
225 230
226 if ((cntrl->arg & 0xff) >= cs->channels) { 231 if ((cntrl->arg & 0xff) >= cs->channels) {
227 err("ISDN_CMD_SETL2: invalid channel (%u)", 232 dev_err(cs->dev,
228 (unsigned) cntrl->arg & 0xff); 233 "ISDN_CMD_SETL2: invalid channel (%d)\n",
234 (int) cntrl->arg & 0xff);
229 return -EINVAL; 235 return -EINVAL;
230 } 236 }
231 237
@@ -244,14 +250,16 @@ static int command_from_LL(isdn_ctrl *cntrl)
244 cntrl->arg & 0xff, (cntrl->arg >> 8)); 250 cntrl->arg & 0xff, (cntrl->arg >> 8));
245 251
246 if ((cntrl->arg & 0xff) >= cs->channels) { 252 if ((cntrl->arg & 0xff) >= cs->channels) {
247 err("ISDN_CMD_SETL3: invalid channel (%u)", 253 dev_err(cs->dev,
248 (unsigned) cntrl->arg & 0xff); 254 "ISDN_CMD_SETL3: invalid channel (%d)\n",
255 (int) cntrl->arg & 0xff);
249 return -EINVAL; 256 return -EINVAL;
250 } 257 }
251 258
252 if (cntrl->arg >> 8 != ISDN_PROTO_L3_TRANS) { 259 if (cntrl->arg >> 8 != ISDN_PROTO_L3_TRANS) {
253 err("ISDN_CMD_SETL3: invalid protocol %lu", 260 dev_err(cs->dev,
254 cntrl->arg >> 8); 261 "ISDN_CMD_SETL3: invalid protocol %lu\n",
262 cntrl->arg >> 8);
255 return -EINVAL; 263 return -EINVAL;
256 } 264 }
257 265
@@ -262,8 +270,9 @@ static int command_from_LL(isdn_ctrl *cntrl)
262 case ISDN_CMD_ALERT: 270 case ISDN_CMD_ALERT:
263 gig_dbg(DEBUG_ANY, "ISDN_CMD_ALERT"); //FIXME 271 gig_dbg(DEBUG_ANY, "ISDN_CMD_ALERT"); //FIXME
264 if (cntrl->arg >= cs->channels) { 272 if (cntrl->arg >= cs->channels) {
265 err("ISDN_CMD_ALERT: invalid channel (%d)", 273 dev_err(cs->dev,
266 (int) cntrl->arg); 274 "ISDN_CMD_ALERT: invalid channel (%d)\n",
275 (int) cntrl->arg);
267 return -EINVAL; 276 return -EINVAL;
268 } 277 }
269 //bcs = cs->bcs + cntrl->arg; 278 //bcs = cs->bcs + cntrl->arg;
@@ -295,7 +304,8 @@ static int command_from_LL(isdn_ctrl *cntrl)
295 gig_dbg(DEBUG_ANY, "ISDN_CMD_GETSIL"); 304 gig_dbg(DEBUG_ANY, "ISDN_CMD_GETSIL");
296 break; 305 break;
297 default: 306 default:
298 err("unknown command %d from LL", cntrl->command); 307 dev_err(cs->dev, "unknown command %d from LL\n",
308 cntrl->command);
299 return -EINVAL; 309 return -EINVAL;
300 } 310 }
301 311
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index af195b07c191..521951a898ec 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -197,7 +197,7 @@ static void if_close(struct tty_struct *tty, struct file *filp)
197 mutex_lock(&cs->mutex); 197 mutex_lock(&cs->mutex);
198 198
199 if (!cs->open_count) 199 if (!cs->open_count)
200 warn("%s: device not opened", __func__); 200 dev_warn(cs->dev, "%s: device not opened\n", __func__);
201 else { 201 else {
202 if (!--cs->open_count) { 202 if (!--cs->open_count) {
203 spin_lock_irqsave(&cs->lock, flags); 203 spin_lock_irqsave(&cs->lock, flags);
@@ -232,7 +232,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
232 return -ERESTARTSYS; // FIXME -EINTR? 232 return -ERESTARTSYS; // FIXME -EINTR?
233 233
234 if (!cs->open_count) 234 if (!cs->open_count)
235 warn("%s: device not opened", __func__); 235 dev_warn(cs->dev, "%s: device not opened\n", __func__);
236 else { 236 else {
237 retval = 0; 237 retval = 0;
238 switch (cmd) { 238 switch (cmd) {
@@ -364,9 +364,9 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
364 return -ERESTARTSYS; // FIXME -EINTR? 364 return -ERESTARTSYS; // FIXME -EINTR?
365 365
366 if (!cs->open_count) 366 if (!cs->open_count)
367 warn("%s: device not opened", __func__); 367 dev_warn(cs->dev, "%s: device not opened\n", __func__);
368 else if (cs->mstate != MS_LOCKED) { 368 else if (cs->mstate != MS_LOCKED) {
369 warn("can't write to unlocked device"); 369 dev_warn(cs->dev, "can't write to unlocked device\n");
370 retval = -EBUSY; 370 retval = -EBUSY;
371 } else if (!cs->connected) { 371 } else if (!cs->connected) {
372 gig_dbg(DEBUG_ANY, "can't write to unplugged device"); 372 gig_dbg(DEBUG_ANY, "can't write to unplugged device");
@@ -398,9 +398,9 @@ static int if_write_room(struct tty_struct *tty)
398 return -ERESTARTSYS; // FIXME -EINTR? 398 return -ERESTARTSYS; // FIXME -EINTR?
399 399
400 if (!cs->open_count) 400 if (!cs->open_count)
401 warn("%s: device not opened", __func__); 401 dev_warn(cs->dev, "%s: device not opened\n", __func__);
402 else if (cs->mstate != MS_LOCKED) { 402 else if (cs->mstate != MS_LOCKED) {
403 warn("can't write to unlocked device"); 403 dev_warn(cs->dev, "can't write to unlocked device\n");
404 retval = -EBUSY; 404 retval = -EBUSY;
405 } else if (!cs->connected) { 405 } else if (!cs->connected) {
406 gig_dbg(DEBUG_ANY, "can't write to unplugged device"); 406 gig_dbg(DEBUG_ANY, "can't write to unplugged device");
@@ -430,9 +430,9 @@ static int if_chars_in_buffer(struct tty_struct *tty)
430 return -ERESTARTSYS; // FIXME -EINTR? 430 return -ERESTARTSYS; // FIXME -EINTR?
431 431
432 if (!cs->open_count) 432 if (!cs->open_count)
433 warn("%s: device not opened", __func__); 433 dev_warn(cs->dev, "%s: device not opened\n", __func__);
434 else if (cs->mstate != MS_LOCKED) { 434 else if (cs->mstate != MS_LOCKED) {
435 warn("can't write to unlocked device"); 435 dev_warn(cs->dev, "can't write to unlocked device\n");
436 retval = -EBUSY; 436 retval = -EBUSY;
437 } else if (!cs->connected) { 437 } else if (!cs->connected) {
438 gig_dbg(DEBUG_ANY, "can't write to unplugged device"); 438 gig_dbg(DEBUG_ANY, "can't write to unplugged device");
@@ -460,7 +460,7 @@ static void if_throttle(struct tty_struct *tty)
460 mutex_lock(&cs->mutex); 460 mutex_lock(&cs->mutex);
461 461
462 if (!cs->open_count) 462 if (!cs->open_count)
463 warn("%s: device not opened", __func__); 463 dev_warn(cs->dev, "%s: device not opened\n", __func__);
464 else { 464 else {
465 //FIXME 465 //FIXME
466 } 466 }
@@ -483,7 +483,7 @@ static void if_unthrottle(struct tty_struct *tty)
483 mutex_lock(&cs->mutex); 483 mutex_lock(&cs->mutex);
484 484
485 if (!cs->open_count) 485 if (!cs->open_count)
486 warn("%s: device not opened", __func__); 486 dev_warn(cs->dev, "%s: device not opened\n", __func__);
487 else { 487 else {
488 //FIXME 488 //FIXME
489 } 489 }
@@ -510,7 +510,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
510 mutex_lock(&cs->mutex); 510 mutex_lock(&cs->mutex);
511 511
512 if (!cs->open_count) { 512 if (!cs->open_count) {
513 warn("%s: device not opened", __func__); 513 dev_warn(cs->dev, "%s: device not opened\n", __func__);
514 goto out; 514 goto out;
515 } 515 }
516 516
@@ -623,7 +623,8 @@ void gigaset_if_init(struct cardstate *cs)
623 if (!IS_ERR(cs->tty_dev)) 623 if (!IS_ERR(cs->tty_dev))
624 dev_set_drvdata(cs->tty_dev, cs); 624 dev_set_drvdata(cs->tty_dev, cs);
625 else { 625 else {
626 warn("could not register device to the tty subsystem"); 626 dev_warn(cs->dev,
627 "could not register device to the tty subsystem\n");
627 cs->tty_dev = NULL; 628 cs->tty_dev = NULL;
628 } 629 }
629 mutex_unlock(&cs->mutex); 630 mutex_unlock(&cs->mutex);
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 77d20ab0cd4d..4661830a49db 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -498,8 +498,9 @@ static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb)
498 498
499 if (status) { 499 if (status) {
500 ucs->busy = 0; 500 ucs->busy = 0;
501 err("could not submit urb (error %d)\n", 501 dev_err(cs->dev,
502 -status); 502 "could not submit urb (error %d)\n",
503 -status);
503 cb->len = 0; /* skip urb => remove cb+wakeup 504 cb->len = 0; /* skip urb => remove cb+wakeup
504 in next loop cycle */ 505 in next loop cycle */
505 } 506 }
@@ -670,7 +671,7 @@ static int write_modem(struct cardstate *cs)
670 spin_unlock_irqrestore(&cs->lock, flags); 671 spin_unlock_irqrestore(&cs->lock, flags);
671 672
672 if (ret) { 673 if (ret) {
673 err("could not submit urb (error %d)\n", -ret); 674 dev_err(cs->dev, "could not submit urb (error %d)\n", -ret);
674 ucs->busy = 0; 675 ucs->busy = 0;
675 } 676 }
676 677
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 86a369bc57d6..9556262dda5a 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -103,6 +103,14 @@ config LEDS_HP6XX
103 This option enables led support for the handheld 103 This option enables led support for the handheld
104 HP Jornada 620/660/680/690. 104 HP Jornada 620/660/680/690.
105 105
106config LEDS_PCA9532
107 tristate "LED driver for PCA9532 dimmer"
108 depends on LEDS_CLASS && I2C && INPUT && EXPERIMENTAL
109 help
110 This option enables support for NXP pca9532
111 led controller. It is generally only usefull
112 as a platform driver
113
106config LEDS_GPIO 114config LEDS_GPIO
107 tristate "LED Support for GPIO connected LEDs" 115 tristate "LED Support for GPIO connected LEDs"
108 depends on LEDS_CLASS && GENERIC_GPIO 116 depends on LEDS_CLASS && GENERIC_GPIO
@@ -147,6 +155,14 @@ config LEDS_CLEVO_MAIL
147 To compile this driver as a module, choose M here: the 155 To compile this driver as a module, choose M here: the
148 module will be called leds-clevo-mail. 156 module will be called leds-clevo-mail.
149 157
158config LEDS_PCA955X
159 tristate "LED Support for PCA955x I2C chips"
160 depends on LEDS_CLASS && I2C
161 help
162 This option enables support for LEDs connected to PCA955x
163 LED driver chips accessed via the I2C bus. Supported
164 devices include PCA9550, PCA9551, PCA9552, and PCA9553.
165
150comment "LED Triggers" 166comment "LED Triggers"
151 167
152config LEDS_TRIGGERS 168config LEDS_TRIGGERS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 973d626f5f4a..ff7982b44565 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -16,11 +16,13 @@ obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o
16obj-$(CONFIG_LEDS_H1940) += leds-h1940.o 16obj-$(CONFIG_LEDS_H1940) += leds-h1940.o
17obj-$(CONFIG_LEDS_COBALT_QUBE) += leds-cobalt-qube.o 17obj-$(CONFIG_LEDS_COBALT_QUBE) += leds-cobalt-qube.o
18obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o 18obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o
19obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
19obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o 20obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
20obj-$(CONFIG_LEDS_CM_X270) += leds-cm-x270.o 21obj-$(CONFIG_LEDS_CM_X270) += leds-cm-x270.o
21obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o 22obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
22obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o 23obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
23obj-$(CONFIG_LEDS_FSG) += leds-fsg.o 24obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
25obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
24 26
25# LED Triggers 27# LED Triggers
26obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o 28obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 0f242b3f09b6..f910eaffe3a6 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -111,16 +111,17 @@ void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger)
111 flags); 111 flags);
112 if (led_cdev->trigger->deactivate) 112 if (led_cdev->trigger->deactivate)
113 led_cdev->trigger->deactivate(led_cdev); 113 led_cdev->trigger->deactivate(led_cdev);
114 led_cdev->trigger = NULL;
114 led_set_brightness(led_cdev, LED_OFF); 115 led_set_brightness(led_cdev, LED_OFF);
115 } 116 }
116 if (trigger) { 117 if (trigger) {
117 write_lock_irqsave(&trigger->leddev_list_lock, flags); 118 write_lock_irqsave(&trigger->leddev_list_lock, flags);
118 list_add_tail(&led_cdev->trig_list, &trigger->led_cdevs); 119 list_add_tail(&led_cdev->trig_list, &trigger->led_cdevs);
119 write_unlock_irqrestore(&trigger->leddev_list_lock, flags); 120 write_unlock_irqrestore(&trigger->leddev_list_lock, flags);
121 led_cdev->trigger = trigger;
120 if (trigger->activate) 122 if (trigger->activate)
121 trigger->activate(led_cdev); 123 trigger->activate(led_cdev);
122 } 124 }
123 led_cdev->trigger = trigger;
124} 125}
125EXPORT_SYMBOL_GPL(led_trigger_set); 126EXPORT_SYMBOL_GPL(led_trigger_set);
126 127
diff --git a/drivers/leds/leds-atmel-pwm.c b/drivers/leds/leds-atmel-pwm.c
index 28db6c1444ed..52297c3ab246 100644
--- a/drivers/leds/leds-atmel-pwm.c
+++ b/drivers/leds/leds-atmel-pwm.c
@@ -37,7 +37,7 @@ static int __init pwmled_probe(struct platform_device *pdev)
37{ 37{
38 const struct gpio_led_platform_data *pdata; 38 const struct gpio_led_platform_data *pdata;
39 struct pwmled *leds; 39 struct pwmled *leds;
40 unsigned i; 40 int i;
41 int status; 41 int status;
42 42
43 pdata = pdev->dev.platform_data; 43 pdata = pdev->dev.platform_data;
diff --git a/drivers/leds/leds-h1940.c b/drivers/leds/leds-h1940.c
index bcec42230389..73c705021686 100644
--- a/drivers/leds/leds-h1940.c
+++ b/drivers/leds/leds-h1940.c
@@ -23,7 +23,8 @@
23/* 23/*
24 * Green led. 24 * Green led.
25 */ 25 */
26void h1940_greenled_set(struct led_classdev *led_dev, enum led_brightness value) 26static void h1940_greenled_set(struct led_classdev *led_dev,
27 enum led_brightness value)
27{ 28{
28 switch (value) { 29 switch (value) {
29 case LED_HALF: 30 case LED_HALF:
@@ -52,7 +53,8 @@ static struct led_classdev h1940_greenled = {
52/* 53/*
53 * Red led. 54 * Red led.
54 */ 55 */
55void h1940_redled_set(struct led_classdev *led_dev, enum led_brightness value) 56static void h1940_redled_set(struct led_classdev *led_dev,
57 enum led_brightness value)
56{ 58{
57 switch (value) { 59 switch (value) {
58 case LED_HALF: 60 case LED_HALF:
@@ -82,7 +84,8 @@ static struct led_classdev h1940_redled = {
82 * Blue led. 84 * Blue led.
83 * (it can only be blue flashing led) 85 * (it can only be blue flashing led)
84 */ 86 */
85void h1940_blueled_set(struct led_classdev *led_dev, enum led_brightness value) 87static void h1940_blueled_set(struct led_classdev *led_dev,
88 enum led_brightness value)
86{ 89{
87 if (value) { 90 if (value) {
88 /* flashing Blue */ 91 /* flashing Blue */
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
new file mode 100644
index 000000000000..4064d4f6b33b
--- /dev/null
+++ b/drivers/leds/leds-pca9532.c
@@ -0,0 +1,337 @@
1/*
2 * pca9532.c - 16-bit Led dimmer
3 *
4 * Copyright (C) 2008 Riku Voipio <riku.voipio@movial.fi>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * Datasheet: http://www.nxp.com/acrobat/datasheets/PCA9532_3.pdf
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/i2c.h>
16#include <linux/leds.h>
17#include <linux/input.h>
18#include <linux/mutex.h>
19#include <linux/leds-pca9532.h>
20
21static const unsigned short normal_i2c[] = { /*0x60,*/ I2C_CLIENT_END};
22I2C_CLIENT_INSMOD_1(pca9532);
23
24#define PCA9532_REG_PSC(i) (0x2+(i)*2)
25#define PCA9532_REG_PWM(i) (0x3+(i)*2)
26#define PCA9532_REG_LS0 0x6
27#define LED_REG(led) ((led>>2)+PCA9532_REG_LS0)
28#define LED_NUM(led) (led & 0x3)
29
30#define ldev_to_led(c) container_of(c, struct pca9532_led, ldev)
31
32struct pca9532_data {
33 struct i2c_client *client;
34 struct pca9532_led leds[16];
35 struct mutex update_lock;
36 struct input_dev *idev;
37 u8 pwm[2];
38 u8 psc[2];
39};
40
41static int pca9532_probe(struct i2c_client *client,
42 const struct i2c_device_id *id);
43static int pca9532_remove(struct i2c_client *client);
44
45static const struct i2c_device_id pca9532_id[] = {
46 { "pca9532", 0 },
47 { }
48};
49
50MODULE_DEVICE_TABLE(i2c, pca9532_id);
51
52static struct i2c_driver pca9532_driver = {
53 .driver = {
54 .name = "pca9532",
55 },
56 .probe = pca9532_probe,
57 .remove = pca9532_remove,
58 .id_table = pca9532_id,
59};
60
61/* We have two pwm/blinkers, but 16 possible leds to drive. Additionaly,
62 * the clever Thecus people are using one pwm to drive the beeper. So,
63 * as a compromise we average one pwm to the values requested by all
64 * leds that are not ON/OFF.
65 * */
66static int pca9532_setpwm(struct i2c_client *client, int pwm, int blink,
67 enum led_brightness value)
68{
69 int a = 0, b = 0, i = 0;
70 struct pca9532_data *data = i2c_get_clientdata(client);
71 for (i = 0; i < 16; i++) {
72 if (data->leds[i].type == PCA9532_TYPE_LED &&
73 data->leds[i].state == PCA9532_PWM0+pwm) {
74 a++;
75 b += data->leds[i].ldev.brightness;
76 }
77 }
78 if (a == 0) {
79 dev_err(&client->dev,
80 "fear of division by zero %d/%d, wanted %d\n",
81 b, a, value);
82 return -EINVAL;
83 }
84 b = b/a;
85 if (b > 0xFF)
86 return -EINVAL;
87 mutex_lock(&data->update_lock);
88 data->pwm[pwm] = b;
89 i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(pwm),
90 data->pwm[pwm]);
91 data->psc[pwm] = blink;
92 i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(pwm),
93 data->psc[pwm]);
94 mutex_unlock(&data->update_lock);
95 return 0;
96}
97
98/* Set LED routing */
99static void pca9532_setled(struct pca9532_led *led)
100{
101 struct i2c_client *client = led->client;
102 struct pca9532_data *data = i2c_get_clientdata(client);
103 char reg;
104
105 mutex_lock(&data->update_lock);
106 reg = i2c_smbus_read_byte_data(client, LED_REG(led->id));
107 /* zero led bits */
108 reg = reg & ~(0x3<<LED_NUM(led->id)*2);
109 /* set the new value */
110 reg = reg | (led->state << LED_NUM(led->id)*2);
111 i2c_smbus_write_byte_data(client, LED_REG(led->id), reg);
112 mutex_unlock(&data->update_lock);
113}
114
115static void pca9532_set_brightness(struct led_classdev *led_cdev,
116 enum led_brightness value)
117{
118 int err = 0;
119 struct pca9532_led *led = ldev_to_led(led_cdev);
120
121 if (value == LED_OFF)
122 led->state = PCA9532_OFF;
123 else if (value == LED_FULL)
124 led->state = PCA9532_ON;
125 else {
126 led->state = PCA9532_PWM0; /* Thecus: hardcode one pwm */
127 err = pca9532_setpwm(led->client, 0, 0, value);
128 if (err)
129 return; /* XXX: led api doesn't allow error code? */
130 }
131 pca9532_setled(led);
132}
133
134static int pca9532_set_blink(struct led_classdev *led_cdev,
135 unsigned long *delay_on, unsigned long *delay_off)
136{
137 struct pca9532_led *led = ldev_to_led(led_cdev);
138 struct i2c_client *client = led->client;
139 int psc;
140
141 if (*delay_on == 0 && *delay_off == 0) {
142 /* led subsystem ask us for a blink rate */
143 *delay_on = 1000;
144 *delay_off = 1000;
145 }
146 if (*delay_on != *delay_off || *delay_on > 1690 || *delay_on < 6)
147 return -EINVAL;
148
149 /* Thecus specific: only use PSC/PWM 0 */
150 psc = (*delay_on * 152-1)/1000;
151 return pca9532_setpwm(client, 0, psc, led_cdev->brightness);
152}
153
154int pca9532_event(struct input_dev *dev, unsigned int type, unsigned int code,
155 int value)
156{
157 struct pca9532_data *data = input_get_drvdata(dev);
158
159 if (type != EV_SND && (code != SND_BELL || code != SND_TONE))
160 return -1;
161
162 /* XXX: allow different kind of beeps with psc/pwm modifications */
163 if (value > 1 && value < 32767)
164 data->pwm[1] = 127;
165 else
166 data->pwm[1] = 0;
167
168 dev_info(&dev->dev, "setting beep to %d \n", data->pwm[1]);
169 mutex_lock(&data->update_lock);
170 i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(1),
171 data->pwm[1]);
172 mutex_unlock(&data->update_lock);
173
174 return 0;
175}
176
177static int pca9532_configure(struct i2c_client *client,
178 struct pca9532_data *data, struct pca9532_platform_data *pdata)
179{
180 int i, err = 0;
181
182 for (i = 0; i < 2; i++) {
183 data->pwm[i] = pdata->pwm[i];
184 data->psc[i] = pdata->psc[i];
185 i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(i),
186 data->pwm[i]);
187 i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(i),
188 data->psc[i]);
189 }
190
191 for (i = 0; i < 16; i++) {
192 struct pca9532_led *led = &data->leds[i];
193 struct pca9532_led *pled = &pdata->leds[i];
194 led->client = client;
195 led->id = i;
196 led->type = pled->type;
197 switch (led->type) {
198 case PCA9532_TYPE_NONE:
199 break;
200 case PCA9532_TYPE_LED:
201 led->state = pled->state;
202 led->name = pled->name;
203 led->ldev.name = led->name;
204 led->ldev.brightness = LED_OFF;
205 led->ldev.brightness_set = pca9532_set_brightness;
206 led->ldev.blink_set = pca9532_set_blink;
207 if (led_classdev_register(&client->dev,
208 &led->ldev) < 0) {
209 dev_err(&client->dev,
210 "couldn't register LED %s\n",
211 led->name);
212 goto exit;
213 }
214 pca9532_setled(led);
215 break;
216 case PCA9532_TYPE_N2100_BEEP:
217 BUG_ON(data->idev);
218 led->state = PCA9532_PWM1;
219 pca9532_setled(led);
220 data->idev = input_allocate_device();
221 if (data->idev == NULL) {
222 err = -ENOMEM;
223 goto exit;
224 }
225 data->idev->name = pled->name;
226 data->idev->phys = "i2c/pca9532";
227 data->idev->id.bustype = BUS_HOST;
228 data->idev->id.vendor = 0x001f;
229 data->idev->id.product = 0x0001;
230 data->idev->id.version = 0x0100;
231 data->idev->evbit[0] = BIT_MASK(EV_SND);
232 data->idev->sndbit[0] = BIT_MASK(SND_BELL) |
233 BIT_MASK(SND_TONE);
234 data->idev->event = pca9532_event;
235 input_set_drvdata(data->idev, data);
236 err = input_register_device(data->idev);
237 if (err) {
238 input_free_device(data->idev);
239 data->idev = NULL;
240 goto exit;
241 }
242 break;
243 }
244 }
245 return 0;
246
247exit:
248 if (i > 0)
249 for (i = i - 1; i >= 0; i--)
250 switch (data->leds[i].type) {
251 case PCA9532_TYPE_NONE:
252 break;
253 case PCA9532_TYPE_LED:
254 led_classdev_unregister(&data->leds[i].ldev);
255 break;
256 case PCA9532_TYPE_N2100_BEEP:
257 if (data->idev != NULL) {
258 input_unregister_device(data->idev);
259 input_free_device(data->idev);
260 data->idev = NULL;
261 }
262 break;
263 }
264
265 return err;
266
267}
268
269static int pca9532_probe(struct i2c_client *client,
270 const struct i2c_device_id *id)
271{
272 struct pca9532_data *data = i2c_get_clientdata(client);
273 struct pca9532_platform_data *pca9532_pdata = client->dev.platform_data;
274
275 if (!i2c_check_functionality(client->adapter,
276 I2C_FUNC_SMBUS_BYTE_DATA))
277 return -EIO;
278
279 data = kzalloc(sizeof(struct pca9532_data), GFP_KERNEL);
280 if (!data)
281 return -ENOMEM;
282
283 dev_info(&client->dev, "setting platform data\n");
284 i2c_set_clientdata(client, data);
285 data->client = client;
286 mutex_init(&data->update_lock);
287
288 if (pca9532_pdata == NULL)
289 return -EIO;
290
291 pca9532_configure(client, data, pca9532_pdata);
292 return 0;
293
294}
295
296static int pca9532_remove(struct i2c_client *client)
297{
298 struct pca9532_data *data = i2c_get_clientdata(client);
299 int i;
300 for (i = 0; i < 16; i++)
301 switch (data->leds[i].type) {
302 case PCA9532_TYPE_NONE:
303 break;
304 case PCA9532_TYPE_LED:
305 led_classdev_unregister(&data->leds[i].ldev);
306 break;
307 case PCA9532_TYPE_N2100_BEEP:
308 if (data->idev != NULL) {
309 input_unregister_device(data->idev);
310 input_free_device(data->idev);
311 data->idev = NULL;
312 }
313 break;
314 }
315
316 kfree(data);
317 i2c_set_clientdata(client, NULL);
318 return 0;
319}
320
321static int __init pca9532_init(void)
322{
323 return i2c_add_driver(&pca9532_driver);
324}
325
326static void __exit pca9532_exit(void)
327{
328 i2c_del_driver(&pca9532_driver);
329}
330
331MODULE_AUTHOR("Riku Voipio <riku.voipio@movial.fi>");
332MODULE_LICENSE("GPL");
333MODULE_DESCRIPTION("PCA 9532 LED dimmer");
334
335module_init(pca9532_init);
336module_exit(pca9532_exit);
337
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
new file mode 100644
index 000000000000..146c06972863
--- /dev/null
+++ b/drivers/leds/leds-pca955x.c
@@ -0,0 +1,384 @@
1/*
2 * Copyright 2007-2008 Extreme Engineering Solutions, Inc.
3 *
4 * Author: Nate Case <ncase@xes-inc.com>
5 *
6 * This file is subject to the terms and conditions of version 2 of
7 * the GNU General Public License. See the file COPYING in the main
8 * directory of this archive for more details.
9 *
10 * LED driver for various PCA955x I2C LED drivers
11 *
12 * Supported devices:
13 *
14 * Device Description 7-bit slave address
15 * ------ ----------- -------------------
16 * PCA9550 2-bit driver 0x60 .. 0x61
17 * PCA9551 8-bit driver 0x60 .. 0x67
18 * PCA9552 16-bit driver 0x60 .. 0x67
19 * PCA9553/01 4-bit driver 0x62
20 * PCA9553/02 4-bit driver 0x63
21 *
22 * Philips PCA955x LED driver chips follow a register map as shown below:
23 *
24 * Control Register Description
25 * ---------------- -----------
26 * 0x0 Input register 0
27 * ..
28 * NUM_INPUT_REGS - 1 Last Input register X
29 *
30 * NUM_INPUT_REGS Frequency prescaler 0
31 * NUM_INPUT_REGS + 1 PWM register 0
32 * NUM_INPUT_REGS + 2 Frequency prescaler 1
33 * NUM_INPUT_REGS + 3 PWM register 1
34 *
35 * NUM_INPUT_REGS + 4 LED selector 0
36 * NUM_INPUT_REGS + 4
37 * + NUM_LED_REGS - 1 Last LED selector
38 *
39 * where NUM_INPUT_REGS and NUM_LED_REGS vary depending on how many
40 * bits the chip supports.
41 */
42
43#include <linux/module.h>
44#include <linux/delay.h>
45#include <linux/string.h>
46#include <linux/ctype.h>
47#include <linux/leds.h>
48#include <linux/err.h>
49#include <linux/i2c.h>
50#include <linux/workqueue.h>
51
52/* LED select registers determine the source that drives LED outputs */
53#define PCA955X_LS_LED_ON 0x0 /* Output LOW */
54#define PCA955X_LS_LED_OFF 0x1 /* Output HI-Z */
55#define PCA955X_LS_BLINK0 0x2 /* Blink at PWM0 rate */
56#define PCA955X_LS_BLINK1 0x3 /* Blink at PWM1 rate */
57
58enum pca955x_type {
59 pca9550,
60 pca9551,
61 pca9552,
62 pca9553,
63};
64
65struct pca955x_chipdef {
66 int bits;
67 u8 slv_addr; /* 7-bit slave address mask */
68 int slv_addr_shift; /* Number of bits to ignore */
69};
70
71static struct pca955x_chipdef pca955x_chipdefs[] = {
72 [pca9550] = {
73 .bits = 2,
74 .slv_addr = /* 110000x */ 0x60,
75 .slv_addr_shift = 1,
76 },
77 [pca9551] = {
78 .bits = 8,
79 .slv_addr = /* 1100xxx */ 0x60,
80 .slv_addr_shift = 3,
81 },
82 [pca9552] = {
83 .bits = 16,
84 .slv_addr = /* 1100xxx */ 0x60,
85 .slv_addr_shift = 3,
86 },
87 [pca9553] = {
88 .bits = 4,
89 .slv_addr = /* 110001x */ 0x62,
90 .slv_addr_shift = 1,
91 },
92};
93
94static const struct i2c_device_id pca955x_id[] = {
95 { "pca9550", pca9550 },
96 { "pca9551", pca9551 },
97 { "pca9552", pca9552 },
98 { "pca9553", pca9553 },
99 { }
100};
101MODULE_DEVICE_TABLE(i2c, pca955x_id);
102
103struct pca955x_led {
104 struct pca955x_chipdef *chipdef;
105 struct i2c_client *client;
106 struct work_struct work;
107 spinlock_t lock;
108 enum led_brightness brightness;
109 struct led_classdev led_cdev;
110 int led_num; /* 0 .. 15 potentially */
111 char name[32];
112};
113
114/* 8 bits per input register */
115static inline int pca95xx_num_input_regs(int bits)
116{
117 return (bits + 7) / 8;
118}
119
120/* 4 bits per LED selector register */
121static inline int pca95xx_num_led_regs(int bits)
122{
123 return (bits + 3) / 4;
124}
125
126/*
127 * Return an LED selector register value based on an existing one, with
128 * the appropriate 2-bit state value set for the given LED number (0-3).
129 */
130static inline u8 pca955x_ledsel(u8 oldval, int led_num, int state)
131{
132 return (oldval & (~(0x3 << (led_num << 1)))) |
133 ((state & 0x3) << (led_num << 1));
134}
135
136/*
137 * Write to frequency prescaler register, used to program the
138 * period of the PWM output. period = (PSCx + 1) / 38
139 */
140static void pca955x_write_psc(struct i2c_client *client, int n, u8 val)
141{
142 struct pca955x_led *pca955x = i2c_get_clientdata(client);
143
144 i2c_smbus_write_byte_data(client,
145 pca95xx_num_input_regs(pca955x->chipdef->bits) + 2*n,
146 val);
147}
148
149/*
150 * Write to PWM register, which determines the duty cycle of the
151 * output. LED is OFF when the count is less than the value of this
152 * register, and ON when it is greater. If PWMx == 0, LED is always OFF.
153 *
154 * Duty cycle is (256 - PWMx) / 256
155 */
156static void pca955x_write_pwm(struct i2c_client *client, int n, u8 val)
157{
158 struct pca955x_led *pca955x = i2c_get_clientdata(client);
159
160 i2c_smbus_write_byte_data(client,
161 pca95xx_num_input_regs(pca955x->chipdef->bits) + 1 + 2*n,
162 val);
163}
164
165/*
166 * Write to LED selector register, which determines the source that
167 * drives the LED output.
168 */
169static void pca955x_write_ls(struct i2c_client *client, int n, u8 val)
170{
171 struct pca955x_led *pca955x = i2c_get_clientdata(client);
172
173 i2c_smbus_write_byte_data(client,
174 pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n,
175 val);
176}
177
178/*
179 * Read the LED selector register, which determines the source that
180 * drives the LED output.
181 */
182static u8 pca955x_read_ls(struct i2c_client *client, int n)
183{
184 struct pca955x_led *pca955x = i2c_get_clientdata(client);
185
186 return (u8) i2c_smbus_read_byte_data(client,
187 pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n);
188}
189
190static void pca955x_led_work(struct work_struct *work)
191{
192 struct pca955x_led *pca955x;
193 u8 ls;
194 int chip_ls; /* which LSx to use (0-3 potentially) */
195 int ls_led; /* which set of bits within LSx to use (0-3) */
196
197 pca955x = container_of(work, struct pca955x_led, work);
198 chip_ls = pca955x->led_num / 4;
199 ls_led = pca955x->led_num % 4;
200
201 ls = pca955x_read_ls(pca955x->client, chip_ls);
202
203 switch (pca955x->brightness) {
204 case LED_FULL:
205 ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_ON);
206 break;
207 case LED_OFF:
208 ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_OFF);
209 break;
210 case LED_HALF:
211 ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK0);
212 break;
213 default:
214 /*
215 * Use PWM1 for all other values. This has the unwanted
216 * side effect of making all LEDs on the chip share the
217 * same brightness level if set to a value other than
218 * OFF, HALF, or FULL. But, this is probably better than
219 * just turning off for all other values.
220 */
221 pca955x_write_pwm(pca955x->client, 1, 255-pca955x->brightness);
222 ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK1);
223 break;
224 }
225
226 pca955x_write_ls(pca955x->client, chip_ls, ls);
227}
228
229void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness value)
230{
231 struct pca955x_led *pca955x;
232
233 pca955x = container_of(led_cdev, struct pca955x_led, led_cdev);
234
235 spin_lock(&pca955x->lock);
236 pca955x->brightness = value;
237
238 /*
239 * Must use workqueue for the actual I/O since I2C operations
240 * can sleep.
241 */
242 schedule_work(&pca955x->work);
243
244 spin_unlock(&pca955x->lock);
245}
246
247static int __devinit pca955x_probe(struct i2c_client *client,
248 const struct i2c_device_id *id)
249{
250 struct pca955x_led *pca955x;
251 int i;
252 int err = -ENODEV;
253 struct pca955x_chipdef *chip;
254 struct i2c_adapter *adapter;
255 struct led_platform_data *pdata;
256
257 chip = &pca955x_chipdefs[id->driver_data];
258 adapter = to_i2c_adapter(client->dev.parent);
259 pdata = client->dev.platform_data;
260
261 /* Make sure the slave address / chip type combo given is possible */
262 if ((client->addr & ~((1 << chip->slv_addr_shift) - 1)) !=
263 chip->slv_addr) {
264 dev_err(&client->dev, "invalid slave address %02x\n",
265 client->addr);
266 return -ENODEV;
267 }
268
269 printk(KERN_INFO "leds-pca955x: Using %s %d-bit LED driver at "
270 "slave address 0x%02x\n",
271 id->name, chip->bits, client->addr);
272
273 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
274 return -EIO;
275
276 if (pdata) {
277 if (pdata->num_leds != chip->bits) {
278 dev_err(&client->dev, "board info claims %d LEDs"
279 " on a %d-bit chip\n",
280 pdata->num_leds, chip->bits);
281 return -ENODEV;
282 }
283 }
284
285 for (i = 0; i < chip->bits; i++) {
286 pca955x = kzalloc(sizeof(struct pca955x_led), GFP_KERNEL);
287 if (!pca955x) {
288 err = -ENOMEM;
289 goto exit;
290 }
291
292 pca955x->chipdef = chip;
293 pca955x->client = client;
294 pca955x->led_num = i;
295 /* Platform data can specify LED names and default triggers */
296 if (pdata) {
297 if (pdata->leds[i].name)
298 snprintf(pca955x->name, 32, "pca955x:%s",
299 pdata->leds[i].name);
300 if (pdata->leds[i].default_trigger)
301 pca955x->led_cdev.default_trigger =
302 pdata->leds[i].default_trigger;
303 } else {
304 snprintf(pca955x->name, 32, "pca955x:%d", i);
305 }
306 spin_lock_init(&pca955x->lock);
307
308 pca955x->led_cdev.name = pca955x->name;
309 pca955x->led_cdev.brightness_set =
310 pca955x_led_set;
311
312 /*
313 * Client data is a pointer to the _first_ pca955x_led
314 * struct
315 */
316 if (i == 0)
317 i2c_set_clientdata(client, pca955x);
318
319 INIT_WORK(&(pca955x->work), pca955x_led_work);
320
321 led_classdev_register(&client->dev, &(pca955x->led_cdev));
322 }
323
324 /* Turn off LEDs */
325 for (i = 0; i < pca95xx_num_led_regs(chip->bits); i++)
326 pca955x_write_ls(client, i, 0x55);
327
328 /* PWM0 is used for half brightness or 50% duty cycle */
329 pca955x_write_pwm(client, 0, 255-LED_HALF);
330
331 /* PWM1 is used for variable brightness, default to OFF */
332 pca955x_write_pwm(client, 1, 0);
333
334 /* Set to fast frequency so we do not see flashing */
335 pca955x_write_psc(client, 0, 0);
336 pca955x_write_psc(client, 1, 0);
337
338 return 0;
339exit:
340 return err;
341}
342
343static int __devexit pca955x_remove(struct i2c_client *client)
344{
345 struct pca955x_led *pca955x = i2c_get_clientdata(client);
346 int leds = pca955x->chipdef->bits;
347 int i;
348
349 for (i = 0; i < leds; i++) {
350 led_classdev_unregister(&(pca955x->led_cdev));
351 cancel_work_sync(&(pca955x->work));
352 kfree(pca955x);
353 pca955x = pca955x + 1;
354 }
355
356 return 0;
357}
358
359static struct i2c_driver pca955x_driver = {
360 .driver = {
361 .name = "leds-pca955x",
362 .owner = THIS_MODULE,
363 },
364 .probe = pca955x_probe,
365 .remove = __devexit_p(pca955x_remove),
366 .id_table = pca955x_id,
367};
368
369static int __init pca955x_leds_init(void)
370{
371 return i2c_add_driver(&pca955x_driver);
372}
373
374static void __exit pca955x_leds_exit(void)
375{
376 i2c_del_driver(&pca955x_driver);
377}
378
379module_init(pca955x_leds_init);
380module_exit(pca955x_leds_exit);
381
382MODULE_AUTHOR("Nate Case <ncase@xes-inc.com>");
383MODULE_DESCRIPTION("PCA955x LED driver");
384MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/pvrusb2/pvrusb2-dvb.c b/drivers/media/video/pvrusb2/pvrusb2-dvb.c
index 6ec4bf81fc7f..77b3c3385066 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-dvb.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-dvb.c
@@ -20,6 +20,7 @@
20 20
21#include <linux/kthread.h> 21#include <linux/kthread.h>
22#include <linux/freezer.h> 22#include <linux/freezer.h>
23#include <linux/mm.h>
23#include "dvbdev.h" 24#include "dvbdev.h"
24#include "pvrusb2-debug.h" 25#include "pvrusb2-debug.h"
25#include "pvrusb2-hdw-internal.h" 26#include "pvrusb2-hdw-internal.h"
diff --git a/drivers/media/video/pvrusb2/pvrusb2-ioread.c b/drivers/media/video/pvrusb2/pvrusb2-ioread.c
index 05a1376405e7..b4824782d858 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-ioread.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-ioread.c
@@ -22,6 +22,7 @@
22#include "pvrusb2-debug.h" 22#include "pvrusb2-debug.h"
23#include <linux/errno.h> 23#include <linux/errno.h>
24#include <linux/string.h> 24#include <linux/string.h>
25#include <linux/mm.h>
25#include <linux/slab.h> 26#include <linux/slab.h>
26#include <linux/mutex.h> 27#include <linux/mutex.h>
27#include <asm/uaccess.h> 28#include <asm/uaccess.h>
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index 7388d0cee3d4..5646a6a32939 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/version.h> 15#include <linux/version.h>
16#include <linux/mm.h>
16#include <linux/list.h> 17#include <linux/list.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <linux/usb.h> 19#include <linux/usb.h>
diff --git a/drivers/media/video/videobuf-core.c b/drivers/media/video/videobuf-core.c
index 0a88c44ace00..b7b05842cf28 100644
--- a/drivers/media/video/videobuf-core.c
+++ b/drivers/media/video/videobuf-core.c
@@ -16,6 +16,7 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/moduleparam.h> 18#include <linux/moduleparam.h>
19#include <linux/mm.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
20#include <linux/interrupt.h> 21#include <linux/interrupt.h>
21 22
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c
index 489d7c5c4965..8774c670e668 100644
--- a/drivers/message/i2o/device.c
+++ b/drivers/message/i2o/device.c
@@ -243,29 +243,41 @@ static int i2o_device_add(struct i2o_controller *c, i2o_lct_entry *entry)
243 243
244 /* create user entries for this device */ 244 /* create user entries for this device */
245 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid); 245 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid);
246 if (tmp && (tmp != i2o_dev)) 246 if (tmp && (tmp != i2o_dev)) {
247 sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj, 247 rc = sysfs_create_link(&i2o_dev->device.kobj,
248 "user"); 248 &tmp->device.kobj, "user");
249 if (rc)
250 goto unreg_dev;
251 }
249 252
250 /* create user entries refering to this device */ 253 /* create user entries refering to this device */
251 list_for_each_entry(tmp, &c->devices, list) 254 list_for_each_entry(tmp, &c->devices, list)
252 if ((tmp->lct_data.user_tid == i2o_dev->lct_data.tid) 255 if ((tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
253 && (tmp != i2o_dev)) 256 && (tmp != i2o_dev)) {
254 sysfs_create_link(&tmp->device.kobj, 257 rc = sysfs_create_link(&tmp->device.kobj,
255 &i2o_dev->device.kobj, "user"); 258 &i2o_dev->device.kobj, "user");
259 if (rc)
260 goto rmlink1;
261 }
256 262
257 /* create parent entries for this device */ 263 /* create parent entries for this device */
258 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid); 264 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid);
259 if (tmp && (tmp != i2o_dev)) 265 if (tmp && (tmp != i2o_dev)) {
260 sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj, 266 rc = sysfs_create_link(&i2o_dev->device.kobj,
261 "parent"); 267 &tmp->device.kobj, "parent");
268 if (rc)
269 goto rmlink1;
270 }
262 271
263 /* create parent entries refering to this device */ 272 /* create parent entries refering to this device */
264 list_for_each_entry(tmp, &c->devices, list) 273 list_for_each_entry(tmp, &c->devices, list)
265 if ((tmp->lct_data.parent_tid == i2o_dev->lct_data.tid) 274 if ((tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
266 && (tmp != i2o_dev)) 275 && (tmp != i2o_dev)) {
267 sysfs_create_link(&tmp->device.kobj, 276 rc = sysfs_create_link(&tmp->device.kobj,
268 &i2o_dev->device.kobj, "parent"); 277 &i2o_dev->device.kobj, "parent");
278 if (rc)
279 goto rmlink2;
280 }
269 281
270 i2o_driver_notify_device_add_all(i2o_dev); 282 i2o_driver_notify_device_add_all(i2o_dev);
271 283
@@ -273,6 +285,24 @@ static int i2o_device_add(struct i2o_controller *c, i2o_lct_entry *entry)
273 285
274 return 0; 286 return 0;
275 287
288rmlink2:
289 /* If link creating failed halfway, we loop whole list to cleanup.
290 * And we don't care wrong removing of link, because sysfs_remove_link
291 * will take care of it.
292 */
293 list_for_each_entry(tmp, &c->devices, list) {
294 if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
295 sysfs_remove_link(&tmp->device.kobj, "parent");
296 }
297 sysfs_remove_link(&i2o_dev->device.kobj, "parent");
298rmlink1:
299 list_for_each_entry(tmp, &c->devices, list)
300 if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
301 sysfs_remove_link(&tmp->device.kobj, "user");
302 sysfs_remove_link(&i2o_dev->device.kobj, "user");
303unreg_dev:
304 list_del(&i2o_dev->list);
305 device_unregister(&i2o_dev->device);
276err: 306err:
277 kfree(i2o_dev); 307 kfree(i2o_dev);
278 return rc; 308 return rc;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 260bade0a5ec..9f93c29fed35 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -5,6 +5,10 @@
5menu "Multifunction device drivers" 5menu "Multifunction device drivers"
6 depends on HAS_IOMEM 6 depends on HAS_IOMEM
7 7
8config MFD_CORE
9 tristate
10 default n
11
8config MFD_SM501 12config MFD_SM501
9 tristate "Support for Silicon Motion SM501" 13 tristate "Support for Silicon Motion SM501"
10 ---help--- 14 ---help---
@@ -38,6 +42,13 @@ config HTC_PASIC3
38 HTC Magician devices, respectively. Actual functionality is 42 HTC Magician devices, respectively. Actual functionality is
39 handled by the leds-pasic3 and ds1wm drivers. 43 handled by the leds-pasic3 and ds1wm drivers.
40 44
45config MFD_TC6393XB
46 bool "Support Toshiba TC6393XB"
47 depends on HAVE_GPIO_LIB
48 select MFD_CORE
49 help
50 Support for Toshiba Mobile IO Controller TC6393XB
51
41endmenu 52endmenu
42 53
43menu "Multimedia Capabilities Port drivers" 54menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index eef4e26807df..33daa2f45dd8 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -8,6 +8,10 @@ obj-$(CONFIG_MFD_ASIC3) += asic3.o
8obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o 8obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o
9obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o 9obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
10 10
11obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o
12
13obj-$(CONFIG_MFD_CORE) += mfd-core.o
14
11obj-$(CONFIG_MCP) += mcp-core.o 15obj-$(CONFIG_MCP) += mcp-core.o
12obj-$(CONFIG_MCP_SA11X0) += mcp-sa11x0.o 16obj-$(CONFIG_MCP_SA11X0) += mcp-sa11x0.o
13obj-$(CONFIG_MCP_UCB1200) += ucb1x00-core.o 17obj-$(CONFIG_MCP_UCB1200) += ucb1x00-core.o
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
new file mode 100644
index 000000000000..d7d88ce053a6
--- /dev/null
+++ b/drivers/mfd/mfd-core.c
@@ -0,0 +1,114 @@
1/*
2 * drivers/mfd/mfd-core.c
3 *
4 * core MFD support
5 * Copyright (c) 2006 Ian Molton
6 * Copyright (c) 2007,2008 Dmitry Baryshkov
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/kernel.h>
15#include <linux/platform_device.h>
16#include <linux/mfd/core.h>
17
18static int mfd_add_device(struct platform_device *parent,
19 const struct mfd_cell *cell,
20 struct resource *mem_base,
21 int irq_base)
22{
23 struct resource res[cell->num_resources];
24 struct platform_device *pdev;
25 int ret = -ENOMEM;
26 int r;
27
28 pdev = platform_device_alloc(cell->name, parent->id);
29 if (!pdev)
30 goto fail_alloc;
31
32 pdev->dev.parent = &parent->dev;
33
34 ret = platform_device_add_data(pdev,
35 cell, sizeof(struct mfd_cell));
36 if (ret)
37 goto fail_device;
38
39 memzero(res, sizeof(res));
40 for (r = 0; r < cell->num_resources; r++) {
41 res[r].name = cell->resources[r].name;
42 res[r].flags = cell->resources[r].flags;
43
44 /* Find out base to use */
45 if (cell->resources[r].flags & IORESOURCE_MEM) {
46 res[r].parent = mem_base;
47 res[r].start = mem_base->start +
48 cell->resources[r].start;
49 res[r].end = mem_base->start +
50 cell->resources[r].end;
51 } else if (cell->resources[r].flags & IORESOURCE_IRQ) {
52 res[r].start = irq_base +
53 cell->resources[r].start;
54 res[r].end = irq_base +
55 cell->resources[r].end;
56 } else {
57 res[r].parent = cell->resources[r].parent;
58 res[r].start = cell->resources[r].start;
59 res[r].end = cell->resources[r].end;
60 }
61 }
62
63 platform_device_add_resources(pdev, res, cell->num_resources);
64
65 ret = platform_device_add(pdev);
66 if (ret)
67 goto fail_device;
68
69 return 0;
70
71/* platform_device_del(pdev); */
72fail_device:
73 platform_device_put(pdev);
74fail_alloc:
75 return ret;
76}
77
78int mfd_add_devices(
79 struct platform_device *parent,
80 const struct mfd_cell *cells, int n_devs,
81 struct resource *mem_base,
82 int irq_base)
83{
84 int i;
85 int ret = 0;
86
87 for (i = 0; i < n_devs; i++) {
88 ret = mfd_add_device(parent, cells + i, mem_base, irq_base);
89 if (ret)
90 break;
91 }
92
93 if (ret)
94 mfd_remove_devices(parent);
95
96 return ret;
97}
98EXPORT_SYMBOL(mfd_add_devices);
99
100static int mfd_remove_devices_fn(struct device *dev, void *unused)
101{
102 platform_device_unregister(
103 container_of(dev, struct platform_device, dev));
104 return 0;
105}
106
107void mfd_remove_devices(struct platform_device *parent)
108{
109 device_for_each_child(&parent->dev, NULL, mfd_remove_devices_fn);
110}
111EXPORT_SYMBOL(mfd_remove_devices);
112
113MODULE_LICENSE("GPL");
114MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov");
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
new file mode 100644
index 000000000000..2d87501b6fd4
--- /dev/null
+++ b/drivers/mfd/tc6393xb.c
@@ -0,0 +1,600 @@
1/*
2 * Toshiba TC6393XB SoC support
3 *
4 * Copyright(c) 2005-2006 Chris Humbert
5 * Copyright(c) 2005 Dirk Opfer
6 * Copyright(c) 2005 Ian Molton <spyro@f2s.com>
7 * Copyright(c) 2007 Dmitry Baryshkov
8 *
9 * Based on code written by Sharp/Lineo for 2.4 kernels
10 * Based on locomo.c
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/io.h>
20#include <linux/irq.h>
21#include <linux/platform_device.h>
22#include <linux/fb.h>
23#include <linux/clk.h>
24#include <linux/mfd/core.h>
25#include <linux/mfd/tmio.h>
26#include <linux/mfd/tc6393xb.h>
27#include <linux/gpio.h>
28
29#define SCR_REVID 0x08 /* b Revision ID */
30#define SCR_ISR 0x50 /* b Interrupt Status */
31#define SCR_IMR 0x52 /* b Interrupt Mask */
32#define SCR_IRR 0x54 /* b Interrupt Routing */
33#define SCR_GPER 0x60 /* w GP Enable */
34#define SCR_GPI_SR(i) (0x64 + (i)) /* b3 GPI Status */
35#define SCR_GPI_IMR(i) (0x68 + (i)) /* b3 GPI INT Mask */
36#define SCR_GPI_EDER(i) (0x6c + (i)) /* b3 GPI Edge Detect Enable */
37#define SCR_GPI_LIR(i) (0x70 + (i)) /* b3 GPI Level Invert */
38#define SCR_GPO_DSR(i) (0x78 + (i)) /* b3 GPO Data Set */
39#define SCR_GPO_DOECR(i) (0x7c + (i)) /* b3 GPO Data OE Control */
40#define SCR_GP_IARCR(i) (0x80 + (i)) /* b3 GP Internal Active Register Control */
41#define SCR_GP_IARLCR(i) (0x84 + (i)) /* b3 GP INTERNAL Active Register Level Control */
42#define SCR_GPI_BCR(i) (0x88 + (i)) /* b3 GPI Buffer Control */
43#define SCR_GPA_IARCR 0x8c /* w GPa Internal Active Register Control */
44#define SCR_GPA_IARLCR 0x90 /* w GPa Internal Active Register Level Control */
45#define SCR_GPA_BCR 0x94 /* w GPa Buffer Control */
46#define SCR_CCR 0x98 /* w Clock Control */
47#define SCR_PLL2CR 0x9a /* w PLL2 Control */
48#define SCR_PLL1CR 0x9c /* l PLL1 Control */
49#define SCR_DIARCR 0xa0 /* b Device Internal Active Register Control */
50#define SCR_DBOCR 0xa1 /* b Device Buffer Off Control */
51#define SCR_FER 0xe0 /* b Function Enable */
52#define SCR_MCR 0xe4 /* w Mode Control */
53#define SCR_CONFIG 0xfc /* b Configuration Control */
54#define SCR_DEBUG 0xff /* b Debug */
55
56#define SCR_CCR_CK32K BIT(0)
57#define SCR_CCR_USBCK BIT(1)
58#define SCR_CCR_UNK1 BIT(4)
59#define SCR_CCR_MCLK_MASK (7 << 8)
60#define SCR_CCR_MCLK_OFF (0 << 8)
61#define SCR_CCR_MCLK_12 (1 << 8)
62#define SCR_CCR_MCLK_24 (2 << 8)
63#define SCR_CCR_MCLK_48 (3 << 8)
64#define SCR_CCR_HCLK_MASK (3 << 12)
65#define SCR_CCR_HCLK_24 (0 << 12)
66#define SCR_CCR_HCLK_48 (1 << 12)
67
68#define SCR_FER_USBEN BIT(0) /* USB host enable */
69#define SCR_FER_LCDCVEN BIT(1) /* polysilicon TFT enable */
70#define SCR_FER_SLCDEN BIT(2) /* SLCD enable */
71
72#define SCR_MCR_RDY_MASK (3 << 0)
73#define SCR_MCR_RDY_OPENDRAIN (0 << 0)
74#define SCR_MCR_RDY_TRISTATE (1 << 0)
75#define SCR_MCR_RDY_PUSHPULL (2 << 0)
76#define SCR_MCR_RDY_UNK BIT(2)
77#define SCR_MCR_RDY_EN BIT(3)
78#define SCR_MCR_INT_MASK (3 << 4)
79#define SCR_MCR_INT_OPENDRAIN (0 << 4)
80#define SCR_MCR_INT_TRISTATE (1 << 4)
81#define SCR_MCR_INT_PUSHPULL (2 << 4)
82#define SCR_MCR_INT_UNK BIT(6)
83#define SCR_MCR_INT_EN BIT(7)
84/* bits 8 - 16 are unknown */
85
86#define TC_GPIO_BIT(i) (1 << (i & 0x7))
87
88/*--------------------------------------------------------------------------*/
89
90struct tc6393xb {
91 void __iomem *scr;
92
93 struct gpio_chip gpio;
94
95 struct clk *clk; /* 3,6 Mhz */
96
97 spinlock_t lock; /* protects RMW cycles */
98
99 struct {
100 u8 fer;
101 u16 ccr;
102 u8 gpi_bcr[3];
103 u8 gpo_dsr[3];
104 u8 gpo_doecr[3];
105 } suspend_state;
106
107 struct resource rscr;
108 struct resource *iomem;
109 int irq;
110 int irq_base;
111};
112
113enum {
114 TC6393XB_CELL_NAND,
115};
116
117/*--------------------------------------------------------------------------*/
118
119static int tc6393xb_nand_enable(struct platform_device *nand)
120{
121 struct platform_device *dev = to_platform_device(nand->dev.parent);
122 struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
123 unsigned long flags;
124
125 spin_lock_irqsave(&tc6393xb->lock, flags);
126
127 /* SMD buffer on */
128 dev_dbg(&dev->dev, "SMD buffer on\n");
129 iowrite8(0xff, tc6393xb->scr + SCR_GPI_BCR(1));
130
131 spin_unlock_irqrestore(&tc6393xb->lock, flags);
132
133 return 0;
134}
135
136static struct resource __devinitdata tc6393xb_nand_resources[] = {
137 {
138 .name = TMIO_NAND_CONFIG,
139 .start = 0x0100,
140 .end = 0x01ff,
141 .flags = IORESOURCE_MEM,
142 },
143 {
144 .name = TMIO_NAND_CONTROL,
145 .start = 0x1000,
146 .end = 0x1007,
147 .flags = IORESOURCE_MEM,
148 },
149 {
150 .name = TMIO_NAND_IRQ,
151 .start = IRQ_TC6393_NAND,
152 .end = IRQ_TC6393_NAND,
153 .flags = IORESOURCE_IRQ,
154 },
155};
156
157static struct mfd_cell __devinitdata tc6393xb_cells[] = {
158 [TC6393XB_CELL_NAND] = {
159 .name = "tmio-nand",
160 .enable = tc6393xb_nand_enable,
161 .num_resources = ARRAY_SIZE(tc6393xb_nand_resources),
162 .resources = tc6393xb_nand_resources,
163 },
164};
165
166/*--------------------------------------------------------------------------*/
167
168static int tc6393xb_gpio_get(struct gpio_chip *chip,
169 unsigned offset)
170{
171 struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
172
173 /* XXX: does dsr also represent inputs? */
174 return ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8))
175 & TC_GPIO_BIT(offset);
176}
177
178static void __tc6393xb_gpio_set(struct gpio_chip *chip,
179 unsigned offset, int value)
180{
181 struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
182 u8 dsr;
183
184 dsr = ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8));
185 if (value)
186 dsr |= TC_GPIO_BIT(offset);
187 else
188 dsr &= ~TC_GPIO_BIT(offset);
189
190 iowrite8(dsr, tc6393xb->scr + SCR_GPO_DSR(offset / 8));
191}
192
193static void tc6393xb_gpio_set(struct gpio_chip *chip,
194 unsigned offset, int value)
195{
196 struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
197 unsigned long flags;
198
199 spin_lock_irqsave(&tc6393xb->lock, flags);
200
201 __tc6393xb_gpio_set(chip, offset, value);
202
203 spin_unlock_irqrestore(&tc6393xb->lock, flags);
204}
205
206static int tc6393xb_gpio_direction_input(struct gpio_chip *chip,
207 unsigned offset)
208{
209 struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
210 unsigned long flags;
211 u8 doecr;
212
213 spin_lock_irqsave(&tc6393xb->lock, flags);
214
215 doecr = ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
216 doecr &= ~TC_GPIO_BIT(offset);
217 iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
218
219 spin_unlock_irqrestore(&tc6393xb->lock, flags);
220
221 return 0;
222}
223
224static int tc6393xb_gpio_direction_output(struct gpio_chip *chip,
225 unsigned offset, int value)
226{
227 struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
228 unsigned long flags;
229 u8 doecr;
230
231 spin_lock_irqsave(&tc6393xb->lock, flags);
232
233 __tc6393xb_gpio_set(chip, offset, value);
234
235 doecr = ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
236 doecr |= TC_GPIO_BIT(offset);
237 iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
238
239 spin_unlock_irqrestore(&tc6393xb->lock, flags);
240
241 return 0;
242}
243
244static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb, int gpio_base)
245{
246 tc6393xb->gpio.label = "tc6393xb";
247 tc6393xb->gpio.base = gpio_base;
248 tc6393xb->gpio.ngpio = 16;
249 tc6393xb->gpio.set = tc6393xb_gpio_set;
250 tc6393xb->gpio.get = tc6393xb_gpio_get;
251 tc6393xb->gpio.direction_input = tc6393xb_gpio_direction_input;
252 tc6393xb->gpio.direction_output = tc6393xb_gpio_direction_output;
253
254 return gpiochip_add(&tc6393xb->gpio);
255}
256
257/*--------------------------------------------------------------------------*/
258
259static void
260tc6393xb_irq(unsigned int irq, struct irq_desc *desc)
261{
262 struct tc6393xb *tc6393xb = get_irq_data(irq);
263 unsigned int isr;
264 unsigned int i, irq_base;
265
266 irq_base = tc6393xb->irq_base;
267
268 while ((isr = ioread8(tc6393xb->scr + SCR_ISR) &
269 ~ioread8(tc6393xb->scr + SCR_IMR)))
270 for (i = 0; i < TC6393XB_NR_IRQS; i++) {
271 if (isr & (1 << i))
272 generic_handle_irq(irq_base + i);
273 }
274}
275
276static void tc6393xb_irq_ack(unsigned int irq)
277{
278}
279
280static void tc6393xb_irq_mask(unsigned int irq)
281{
282 struct tc6393xb *tc6393xb = get_irq_chip_data(irq);
283 unsigned long flags;
284 u8 imr;
285
286 spin_lock_irqsave(&tc6393xb->lock, flags);
287 imr = ioread8(tc6393xb->scr + SCR_IMR);
288 imr |= 1 << (irq - tc6393xb->irq_base);
289 iowrite8(imr, tc6393xb->scr + SCR_IMR);
290 spin_unlock_irqrestore(&tc6393xb->lock, flags);
291}
292
293static void tc6393xb_irq_unmask(unsigned int irq)
294{
295 struct tc6393xb *tc6393xb = get_irq_chip_data(irq);
296 unsigned long flags;
297 u8 imr;
298
299 spin_lock_irqsave(&tc6393xb->lock, flags);
300 imr = ioread8(tc6393xb->scr + SCR_IMR);
301 imr &= ~(1 << (irq - tc6393xb->irq_base));
302 iowrite8(imr, tc6393xb->scr + SCR_IMR);
303 spin_unlock_irqrestore(&tc6393xb->lock, flags);
304}
305
306static struct irq_chip tc6393xb_chip = {
307 .name = "tc6393xb",
308 .ack = tc6393xb_irq_ack,
309 .mask = tc6393xb_irq_mask,
310 .unmask = tc6393xb_irq_unmask,
311};
312
313static void tc6393xb_attach_irq(struct platform_device *dev)
314{
315 struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
316 unsigned int irq, irq_base;
317
318 irq_base = tc6393xb->irq_base;
319
320 for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) {
321 set_irq_chip(irq, &tc6393xb_chip);
322 set_irq_chip_data(irq, tc6393xb);
323 set_irq_handler(irq, handle_edge_irq);
324 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
325 }
326
327 set_irq_type(tc6393xb->irq, IRQT_FALLING);
328 set_irq_data(tc6393xb->irq, tc6393xb);
329 set_irq_chained_handler(tc6393xb->irq, tc6393xb_irq);
330}
331
332static void tc6393xb_detach_irq(struct platform_device *dev)
333{
334 struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
335 unsigned int irq, irq_base;
336
337 set_irq_chained_handler(tc6393xb->irq, NULL);
338 set_irq_data(tc6393xb->irq, NULL);
339
340 irq_base = tc6393xb->irq_base;
341
342 for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) {
343 set_irq_flags(irq, 0);
344 set_irq_chip(irq, NULL);
345 set_irq_chip_data(irq, NULL);
346 }
347}
348
349/*--------------------------------------------------------------------------*/
350
351static int tc6393xb_hw_init(struct platform_device *dev)
352{
353 struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
354 struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
355 int i;
356
357 iowrite8(tc6393xb->suspend_state.fer, tc6393xb->scr + SCR_FER);
358 iowrite16(tcpd->scr_pll2cr, tc6393xb->scr + SCR_PLL2CR);
359 iowrite16(tc6393xb->suspend_state.ccr, tc6393xb->scr + SCR_CCR);
360 iowrite16(SCR_MCR_RDY_OPENDRAIN | SCR_MCR_RDY_UNK | SCR_MCR_RDY_EN |
361 SCR_MCR_INT_OPENDRAIN | SCR_MCR_INT_UNK | SCR_MCR_INT_EN |
362 BIT(15), tc6393xb->scr + SCR_MCR);
363 iowrite16(tcpd->scr_gper, tc6393xb->scr + SCR_GPER);
364 iowrite8(0, tc6393xb->scr + SCR_IRR);
365 iowrite8(0xbf, tc6393xb->scr + SCR_IMR);
366
367 for (i = 0; i < 3; i++) {
368 iowrite8(tc6393xb->suspend_state.gpo_dsr[i],
369 tc6393xb->scr + SCR_GPO_DSR(i));
370 iowrite8(tc6393xb->suspend_state.gpo_doecr[i],
371 tc6393xb->scr + SCR_GPO_DOECR(i));
372 iowrite8(tc6393xb->suspend_state.gpi_bcr[i],
373 tc6393xb->scr + SCR_GPI_BCR(i));
374 }
375
376 return 0;
377}
378
379static int __devinit tc6393xb_probe(struct platform_device *dev)
380{
381 struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
382 struct tc6393xb *tc6393xb;
383 struct resource *iomem;
384 struct resource *rscr;
385 int retval, temp;
386 int i;
387
388 iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
389 if (!iomem)
390 return -EINVAL;
391
392 tc6393xb = kzalloc(sizeof *tc6393xb, GFP_KERNEL);
393 if (!tc6393xb) {
394 retval = -ENOMEM;
395 goto err_kzalloc;
396 }
397
398 spin_lock_init(&tc6393xb->lock);
399
400 platform_set_drvdata(dev, tc6393xb);
401 tc6393xb->iomem = iomem;
402 tc6393xb->irq = platform_get_irq(dev, 0);
403 tc6393xb->irq_base = tcpd->irq_base;
404
405 tc6393xb->clk = clk_get(&dev->dev, "GPIO27_CLK" /* "CK3P6MI" */);
406 if (IS_ERR(tc6393xb->clk)) {
407 retval = PTR_ERR(tc6393xb->clk);
408 goto err_clk_get;
409 }
410
411 rscr = &tc6393xb->rscr;
412 rscr->name = "tc6393xb-core";
413 rscr->start = iomem->start;
414 rscr->end = iomem->start + 0xff;
415 rscr->flags = IORESOURCE_MEM;
416
417 retval = request_resource(iomem, rscr);
418 if (retval)
419 goto err_request_scr;
420
421 tc6393xb->scr = ioremap(rscr->start, rscr->end - rscr->start + 1);
422 if (!tc6393xb->scr) {
423 retval = -ENOMEM;
424 goto err_ioremap;
425 }
426
427 retval = clk_enable(tc6393xb->clk);
428 if (retval)
429 goto err_clk_enable;
430
431 retval = tcpd->enable(dev);
432 if (retval)
433 goto err_enable;
434
435 tc6393xb->suspend_state.fer = 0;
436 for (i = 0; i < 3; i++) {
437 tc6393xb->suspend_state.gpo_dsr[i] =
438 (tcpd->scr_gpo_dsr >> (8 * i)) & 0xff;
439 tc6393xb->suspend_state.gpo_doecr[i] =
440 (tcpd->scr_gpo_doecr >> (8 * i)) & 0xff;
441 }
442 /*
443 * It may be necessary to change this back to
444 * platform-dependant code
445 */
446 tc6393xb->suspend_state.ccr = SCR_CCR_UNK1 |
447 SCR_CCR_HCLK_48;
448
449 retval = tc6393xb_hw_init(dev);
450 if (retval)
451 goto err_hw_init;
452
453 printk(KERN_INFO "Toshiba tc6393xb revision %d at 0x%08lx, irq %d\n",
454 ioread8(tc6393xb->scr + SCR_REVID),
455 (unsigned long) iomem->start, tc6393xb->irq);
456
457 tc6393xb->gpio.base = -1;
458
459 if (tcpd->gpio_base >= 0) {
460 retval = tc6393xb_register_gpio(tc6393xb, tcpd->gpio_base);
461 if (retval)
462 goto err_gpio_add;
463 }
464
465 if (tc6393xb->irq)
466 tc6393xb_attach_irq(dev);
467
468 tc6393xb_cells[TC6393XB_CELL_NAND].driver_data = tcpd->nand_data;
469
470 retval = mfd_add_devices(dev,
471 tc6393xb_cells, ARRAY_SIZE(tc6393xb_cells),
472 iomem, tcpd->irq_base);
473
474 return 0;
475
476 if (tc6393xb->irq)
477 tc6393xb_detach_irq(dev);
478
479err_gpio_add:
480 if (tc6393xb->gpio.base != -1)
481 temp = gpiochip_remove(&tc6393xb->gpio);
482err_hw_init:
483 tcpd->disable(dev);
484err_clk_enable:
485 clk_disable(tc6393xb->clk);
486err_enable:
487 iounmap(tc6393xb->scr);
488err_ioremap:
489 release_resource(&tc6393xb->rscr);
490err_request_scr:
491 clk_put(tc6393xb->clk);
492err_clk_get:
493 kfree(tc6393xb);
494err_kzalloc:
495 return retval;
496}
497
498static int __devexit tc6393xb_remove(struct platform_device *dev)
499{
500 struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
501 struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
502 int ret;
503
504 mfd_remove_devices(dev);
505
506 if (tc6393xb->irq)
507 tc6393xb_detach_irq(dev);
508
509 if (tc6393xb->gpio.base != -1) {
510 ret = gpiochip_remove(&tc6393xb->gpio);
511 if (ret) {
512 dev_err(&dev->dev, "Can't remove gpio chip: %d\n", ret);
513 return ret;
514 }
515 }
516
517 ret = tcpd->disable(dev);
518
519 clk_disable(tc6393xb->clk);
520
521 iounmap(tc6393xb->scr);
522
523 release_resource(&tc6393xb->rscr);
524
525 platform_set_drvdata(dev, NULL);
526
527 clk_put(tc6393xb->clk);
528
529 kfree(tc6393xb);
530
531 return ret;
532}
533
534#ifdef CONFIG_PM
535static int tc6393xb_suspend(struct platform_device *dev, pm_message_t state)
536{
537 struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
538 struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
539 int i;
540
541
542 tc6393xb->suspend_state.ccr = ioread16(tc6393xb->scr + SCR_CCR);
543 tc6393xb->suspend_state.fer = ioread8(tc6393xb->scr + SCR_FER);
544
545 for (i = 0; i < 3; i++) {
546 tc6393xb->suspend_state.gpo_dsr[i] =
547 ioread8(tc6393xb->scr + SCR_GPO_DSR(i));
548 tc6393xb->suspend_state.gpo_doecr[i] =
549 ioread8(tc6393xb->scr + SCR_GPO_DOECR(i));
550 tc6393xb->suspend_state.gpi_bcr[i] =
551 ioread8(tc6393xb->scr + SCR_GPI_BCR(i));
552 }
553
554 return tcpd->suspend(dev);
555}
556
557static int tc6393xb_resume(struct platform_device *dev)
558{
559 struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
560 int ret = tcpd->resume(dev);
561
562 if (ret)
563 return ret;
564
565 return tc6393xb_hw_init(dev);
566}
567#else
568#define tc6393xb_suspend NULL
569#define tc6393xb_resume NULL
570#endif
571
572static struct platform_driver tc6393xb_driver = {
573 .probe = tc6393xb_probe,
574 .remove = __devexit_p(tc6393xb_remove),
575 .suspend = tc6393xb_suspend,
576 .resume = tc6393xb_resume,
577
578 .driver = {
579 .name = "tc6393xb",
580 .owner = THIS_MODULE,
581 },
582};
583
584static int __init tc6393xb_init(void)
585{
586 return platform_driver_register(&tc6393xb_driver);
587}
588
589static void __exit tc6393xb_exit(void)
590{
591 platform_driver_unregister(&tc6393xb_driver);
592}
593
594subsys_initcall(tc6393xb_init);
595module_exit(tc6393xb_exit);
596
597MODULE_LICENSE("GPL");
598MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov and Dirk Opfer");
599MODULE_DESCRIPTION("tc6393xb Toshiba Mobile IO Controller");
600MODULE_ALIAS("platform:tc6393xb");
diff --git a/drivers/misc/atmel_pwm.c b/drivers/misc/atmel_pwm.c
index 5b5a14dab3d3..6aa5294dfec4 100644
--- a/drivers/misc/atmel_pwm.c
+++ b/drivers/misc/atmel_pwm.c
@@ -211,8 +211,7 @@ int pwm_clk_alloc(unsigned prescale, unsigned div)
211 if ((mr & 0xffff) == 0) { 211 if ((mr & 0xffff) == 0) {
212 mr |= val; 212 mr |= val;
213 ret = PWM_CPR_CLKA; 213 ret = PWM_CPR_CLKA;
214 } 214 } else if ((mr & (0xffff << 16)) == 0) {
215 if ((mr & (0xffff << 16)) == 0) {
216 mr |= val << 16; 215 mr |= val << 16;
217 ret = PWM_CPR_CLKB; 216 ret = PWM_CPR_CLKB;
218 } 217 }
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 08256ed0d9a6..579b01ff82d4 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -229,10 +229,11 @@ xpc_hb_checker(void *ignore)
229 int last_IRQ_count = 0; 229 int last_IRQ_count = 0;
230 int new_IRQ_count; 230 int new_IRQ_count;
231 int force_IRQ = 0; 231 int force_IRQ = 0;
232 cpumask_of_cpu_ptr(cpumask, XPC_HB_CHECK_CPU);
232 233
233 /* this thread was marked active by xpc_hb_init() */ 234 /* this thread was marked active by xpc_hb_init() */
234 235
235 set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU)); 236 set_cpus_allowed_ptr(current, cpumask);
236 237
237 /* set our heartbeating to other partitions into motion */ 238 /* set our heartbeating to other partitions into motion */
238 xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); 239 xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index d6b9b486417c..a067fe436301 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -21,13 +21,17 @@
21#define RESULT_UNSUP_HOST 2 21#define RESULT_UNSUP_HOST 2
22#define RESULT_UNSUP_CARD 3 22#define RESULT_UNSUP_CARD 3
23 23
24#define BUFFER_SIZE (PAGE_SIZE * 4) 24#define BUFFER_ORDER 2
25#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
25 26
26struct mmc_test_card { 27struct mmc_test_card {
27 struct mmc_card *card; 28 struct mmc_card *card;
28 29
29 u8 scratch[BUFFER_SIZE]; 30 u8 scratch[BUFFER_SIZE];
30 u8 *buffer; 31 u8 *buffer;
32#ifdef CONFIG_HIGHMEM
33 struct page *highmem;
34#endif
31}; 35};
32 36
33/*******************************************************************/ 37/*******************************************************************/
@@ -384,14 +388,16 @@ static int mmc_test_transfer(struct mmc_test_card *test,
384 int ret, i; 388 int ret, i;
385 unsigned long flags; 389 unsigned long flags;
386 390
391 BUG_ON(blocks * blksz > BUFFER_SIZE);
392
387 if (write) { 393 if (write) {
388 for (i = 0;i < blocks * blksz;i++) 394 for (i = 0;i < blocks * blksz;i++)
389 test->scratch[i] = i; 395 test->scratch[i] = i;
390 } else { 396 } else {
391 memset(test->scratch, 0, BUFFER_SIZE); 397 memset(test->scratch, 0, blocks * blksz);
392 } 398 }
393 local_irq_save(flags); 399 local_irq_save(flags);
394 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); 400 sg_copy_from_buffer(sg, sg_len, test->scratch, blocks * blksz);
395 local_irq_restore(flags); 401 local_irq_restore(flags);
396 402
397 ret = mmc_test_set_blksize(test, blksz); 403 ret = mmc_test_set_blksize(test, blksz);
@@ -438,7 +444,7 @@ static int mmc_test_transfer(struct mmc_test_card *test,
438 } 444 }
439 } else { 445 } else {
440 local_irq_save(flags); 446 local_irq_save(flags);
441 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); 447 sg_copy_to_buffer(sg, sg_len, test->scratch, blocks * blksz);
442 local_irq_restore(flags); 448 local_irq_restore(flags);
443 for (i = 0;i < blocks * blksz;i++) { 449 for (i = 0;i < blocks * blksz;i++) {
444 if (test->scratch[i] != (u8)i) 450 if (test->scratch[i] != (u8)i)
@@ -799,6 +805,157 @@ static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
799 return 0; 805 return 0;
800} 806}
801 807
808static int mmc_test_bigsg_write(struct mmc_test_card *test)
809{
810 int ret;
811 unsigned int size;
812 struct scatterlist sg;
813
814 if (test->card->host->max_blk_count == 1)
815 return RESULT_UNSUP_HOST;
816
817 size = PAGE_SIZE * 2;
818 size = min(size, test->card->host->max_req_size);
819 size = min(size, test->card->host->max_seg_size);
820 size = min(size, test->card->host->max_blk_count * 512);
821
822 memset(test->buffer, 0, BUFFER_SIZE);
823
824 if (size < 1024)
825 return RESULT_UNSUP_HOST;
826
827 sg_init_table(&sg, 1);
828 sg_init_one(&sg, test->buffer, BUFFER_SIZE);
829
830 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
831 if (ret)
832 return ret;
833
834 return 0;
835}
836
837static int mmc_test_bigsg_read(struct mmc_test_card *test)
838{
839 int ret, i;
840 unsigned int size;
841 struct scatterlist sg;
842
843 if (test->card->host->max_blk_count == 1)
844 return RESULT_UNSUP_HOST;
845
846 size = PAGE_SIZE * 2;
847 size = min(size, test->card->host->max_req_size);
848 size = min(size, test->card->host->max_seg_size);
849 size = min(size, test->card->host->max_blk_count * 512);
850
851 if (size < 1024)
852 return RESULT_UNSUP_HOST;
853
854 memset(test->buffer, 0xCD, BUFFER_SIZE);
855
856 sg_init_table(&sg, 1);
857 sg_init_one(&sg, test->buffer, BUFFER_SIZE);
858 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
859 if (ret)
860 return ret;
861
862 /* mmc_test_transfer() doesn't check for read overflows */
863 for (i = size;i < BUFFER_SIZE;i++) {
864 if (test->buffer[i] != 0xCD)
865 return RESULT_FAIL;
866 }
867
868 return 0;
869}
870
871#ifdef CONFIG_HIGHMEM
872
873static int mmc_test_write_high(struct mmc_test_card *test)
874{
875 int ret;
876 struct scatterlist sg;
877
878 sg_init_table(&sg, 1);
879 sg_set_page(&sg, test->highmem, 512, 0);
880
881 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
882 if (ret)
883 return ret;
884
885 return 0;
886}
887
888static int mmc_test_read_high(struct mmc_test_card *test)
889{
890 int ret;
891 struct scatterlist sg;
892
893 sg_init_table(&sg, 1);
894 sg_set_page(&sg, test->highmem, 512, 0);
895
896 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
897 if (ret)
898 return ret;
899
900 return 0;
901}
902
903static int mmc_test_multi_write_high(struct mmc_test_card *test)
904{
905 int ret;
906 unsigned int size;
907 struct scatterlist sg;
908
909 if (test->card->host->max_blk_count == 1)
910 return RESULT_UNSUP_HOST;
911
912 size = PAGE_SIZE * 2;
913 size = min(size, test->card->host->max_req_size);
914 size = min(size, test->card->host->max_seg_size);
915 size = min(size, test->card->host->max_blk_count * 512);
916
917 if (size < 1024)
918 return RESULT_UNSUP_HOST;
919
920 sg_init_table(&sg, 1);
921 sg_set_page(&sg, test->highmem, size, 0);
922
923 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
924 if (ret)
925 return ret;
926
927 return 0;
928}
929
930static int mmc_test_multi_read_high(struct mmc_test_card *test)
931{
932 int ret;
933 unsigned int size;
934 struct scatterlist sg;
935
936 if (test->card->host->max_blk_count == 1)
937 return RESULT_UNSUP_HOST;
938
939 size = PAGE_SIZE * 2;
940 size = min(size, test->card->host->max_req_size);
941 size = min(size, test->card->host->max_seg_size);
942 size = min(size, test->card->host->max_blk_count * 512);
943
944 if (size < 1024)
945 return RESULT_UNSUP_HOST;
946
947 sg_init_table(&sg, 1);
948 sg_set_page(&sg, test->highmem, size, 0);
949
950 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
951 if (ret)
952 return ret;
953
954 return 0;
955}
956
957#endif /* CONFIG_HIGHMEM */
958
802static const struct mmc_test_case mmc_test_cases[] = { 959static const struct mmc_test_case mmc_test_cases[] = {
803 { 960 {
804 .name = "Basic write (no data verification)", 961 .name = "Basic write (no data verification)",
@@ -913,6 +1070,53 @@ static const struct mmc_test_case mmc_test_cases[] = {
913 .name = "Correct xfer_size at read (midway failure)", 1070 .name = "Correct xfer_size at read (midway failure)",
914 .run = mmc_test_multi_xfersize_read, 1071 .run = mmc_test_multi_xfersize_read,
915 }, 1072 },
1073
1074 {
1075 .name = "Over-sized SG list write",
1076 .prepare = mmc_test_prepare_write,
1077 .run = mmc_test_bigsg_write,
1078 .cleanup = mmc_test_cleanup,
1079 },
1080
1081 {
1082 .name = "Over-sized SG list read",
1083 .prepare = mmc_test_prepare_read,
1084 .run = mmc_test_bigsg_read,
1085 .cleanup = mmc_test_cleanup,
1086 },
1087
1088#ifdef CONFIG_HIGHMEM
1089
1090 {
1091 .name = "Highmem write",
1092 .prepare = mmc_test_prepare_write,
1093 .run = mmc_test_write_high,
1094 .cleanup = mmc_test_cleanup,
1095 },
1096
1097 {
1098 .name = "Highmem read",
1099 .prepare = mmc_test_prepare_read,
1100 .run = mmc_test_read_high,
1101 .cleanup = mmc_test_cleanup,
1102 },
1103
1104 {
1105 .name = "Multi-block highmem write",
1106 .prepare = mmc_test_prepare_write,
1107 .run = mmc_test_multi_write_high,
1108 .cleanup = mmc_test_cleanup,
1109 },
1110
1111 {
1112 .name = "Multi-block highmem read",
1113 .prepare = mmc_test_prepare_read,
1114 .run = mmc_test_multi_read_high,
1115 .cleanup = mmc_test_cleanup,
1116 },
1117
1118#endif /* CONFIG_HIGHMEM */
1119
916}; 1120};
917 1121
918static struct mutex mmc_test_lock; 1122static struct mutex mmc_test_lock;
@@ -1014,12 +1218,23 @@ static ssize_t mmc_test_store(struct device *dev,
1014 test->card = card; 1218 test->card = card;
1015 1219
1016 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL); 1220 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
1221#ifdef CONFIG_HIGHMEM
1222 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
1223#endif
1224
1225#ifdef CONFIG_HIGHMEM
1226 if (test->buffer && test->highmem) {
1227#else
1017 if (test->buffer) { 1228 if (test->buffer) {
1229#endif
1018 mutex_lock(&mmc_test_lock); 1230 mutex_lock(&mmc_test_lock);
1019 mmc_test_run(test, testcase); 1231 mmc_test_run(test, testcase);
1020 mutex_unlock(&mmc_test_lock); 1232 mutex_unlock(&mmc_test_lock);
1021 } 1233 }
1022 1234
1235#ifdef CONFIG_HIGHMEM
1236 __free_pages(test->highmem, BUFFER_ORDER);
1237#endif
1023 kfree(test->buffer); 1238 kfree(test->buffer);
1024 kfree(test); 1239 kfree(test);
1025 1240
@@ -1041,6 +1256,8 @@ static int mmc_test_probe(struct mmc_card *card)
1041 if (ret) 1256 if (ret)
1042 return ret; 1257 return ret;
1043 1258
1259 dev_info(&card->dev, "Card claimed for testing.\n");
1260
1044 return 0; 1261 return 0;
1045} 1262}
1046 1263
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 7731ddefdc1b..3dee97e7d165 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -148,7 +148,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
148 printk(KERN_WARNING "%s: unable to allocate " 148 printk(KERN_WARNING "%s: unable to allocate "
149 "bounce buffer\n", mmc_card_name(card)); 149 "bounce buffer\n", mmc_card_name(card));
150 } else { 150 } else {
151 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH); 151 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
152 blk_queue_max_sectors(mq->queue, bouncesz / 512); 152 blk_queue_max_sectors(mq->queue, bouncesz / 512);
153 blk_queue_max_phys_segments(mq->queue, bouncesz / 512); 153 blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
154 blk_queue_max_hw_segments(mq->queue, bouncesz / 512); 154 blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
@@ -290,55 +290,15 @@ void mmc_queue_resume(struct mmc_queue *mq)
290 } 290 }
291} 291}
292 292
293static void copy_sg(struct scatterlist *dst, unsigned int dst_len, 293/*
294 struct scatterlist *src, unsigned int src_len) 294 * Prepare the sg list(s) to be handed of to the host driver
295{ 295 */
296 unsigned int chunk;
297 char *dst_buf, *src_buf;
298 unsigned int dst_size, src_size;
299
300 dst_buf = NULL;
301 src_buf = NULL;
302 dst_size = 0;
303 src_size = 0;
304
305 while (src_len) {
306 BUG_ON(dst_len == 0);
307
308 if (dst_size == 0) {
309 dst_buf = sg_virt(dst);
310 dst_size = dst->length;
311 }
312
313 if (src_size == 0) {
314 src_buf = sg_virt(src);
315 src_size = src->length;
316 }
317
318 chunk = min(dst_size, src_size);
319
320 memcpy(dst_buf, src_buf, chunk);
321
322 dst_buf += chunk;
323 src_buf += chunk;
324 dst_size -= chunk;
325 src_size -= chunk;
326
327 if (dst_size == 0) {
328 dst++;
329 dst_len--;
330 }
331
332 if (src_size == 0) {
333 src++;
334 src_len--;
335 }
336 }
337}
338
339unsigned int mmc_queue_map_sg(struct mmc_queue *mq) 296unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
340{ 297{
341 unsigned int sg_len; 298 unsigned int sg_len;
299 size_t buflen;
300 struct scatterlist *sg;
301 int i;
342 302
343 if (!mq->bounce_buf) 303 if (!mq->bounce_buf)
344 return blk_rq_map_sg(mq->queue, mq->req, mq->sg); 304 return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
@@ -349,47 +309,52 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
349 309
350 mq->bounce_sg_len = sg_len; 310 mq->bounce_sg_len = sg_len;
351 311
352 /* 312 buflen = 0;
353 * Shortcut in the event we only get a single entry. 313 for_each_sg(mq->bounce_sg, sg, sg_len, i)
354 */ 314 buflen += sg->length;
355 if (sg_len == 1) {
356 memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist));
357 return 1;
358 }
359 315
360 sg_init_one(mq->sg, mq->bounce_buf, 0); 316 sg_init_one(mq->sg, mq->bounce_buf, buflen);
361
362 while (sg_len) {
363 mq->sg[0].length += mq->bounce_sg[sg_len - 1].length;
364 sg_len--;
365 }
366 317
367 return 1; 318 return 1;
368} 319}
369 320
321/*
322 * If writing, bounce the data to the buffer before the request
323 * is sent to the host driver
324 */
370void mmc_queue_bounce_pre(struct mmc_queue *mq) 325void mmc_queue_bounce_pre(struct mmc_queue *mq)
371{ 326{
327 unsigned long flags;
328
372 if (!mq->bounce_buf) 329 if (!mq->bounce_buf)
373 return; 330 return;
374 331
375 if (mq->bounce_sg_len == 1)
376 return;
377 if (rq_data_dir(mq->req) != WRITE) 332 if (rq_data_dir(mq->req) != WRITE)
378 return; 333 return;
379 334
380 copy_sg(mq->sg, 1, mq->bounce_sg, mq->bounce_sg_len); 335 local_irq_save(flags);
336 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
337 mq->bounce_buf, mq->sg[0].length);
338 local_irq_restore(flags);
381} 339}
382 340
341/*
342 * If reading, bounce the data from the buffer after the request
343 * has been handled by the host driver
344 */
383void mmc_queue_bounce_post(struct mmc_queue *mq) 345void mmc_queue_bounce_post(struct mmc_queue *mq)
384{ 346{
347 unsigned long flags;
348
385 if (!mq->bounce_buf) 349 if (!mq->bounce_buf)
386 return; 350 return;
387 351
388 if (mq->bounce_sg_len == 1)
389 return;
390 if (rq_data_dir(mq->req) != READ) 352 if (rq_data_dir(mq->req) != READ)
391 return; 353 return;
392 354
393 copy_sg(mq->bounce_sg, mq->bounce_sg_len, mq->sg, 1); 355 local_irq_save(flags);
356 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
357 mq->bounce_buf, mq->sg[0].length);
358 local_irq_restore(flags);
394} 359}
395 360
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index 3f15eb204895..99b20917cc0f 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -1043,7 +1043,7 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
1043 goto out6; 1043 goto out6;
1044 } 1044 }
1045 1045
1046 platform_set_drvdata(pdev, mmc); 1046 platform_set_drvdata(pdev, host);
1047 1047
1048 printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X" 1048 printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X"
1049 " (mode=%s)\n", pdev->id, host->iobase, 1049 " (mode=%s)\n", pdev->id, host->iobase,
@@ -1087,13 +1087,10 @@ out0:
1087 1087
1088static int __devexit au1xmmc_remove(struct platform_device *pdev) 1088static int __devexit au1xmmc_remove(struct platform_device *pdev)
1089{ 1089{
1090 struct mmc_host *mmc = platform_get_drvdata(pdev); 1090 struct au1xmmc_host *host = platform_get_drvdata(pdev);
1091 struct au1xmmc_host *host;
1092
1093 if (mmc) {
1094 host = mmc_priv(mmc);
1095 1091
1096 mmc_remove_host(mmc); 1092 if (host) {
1093 mmc_remove_host(host->mmc);
1097 1094
1098#ifdef CONFIG_LEDS_CLASS 1095#ifdef CONFIG_LEDS_CLASS
1099 if (host->platdata && host->platdata->led) 1096 if (host->platdata && host->platdata->led)
@@ -1101,8 +1098,8 @@ static int __devexit au1xmmc_remove(struct platform_device *pdev)
1101#endif 1098#endif
1102 1099
1103 if (host->platdata && host->platdata->cd_setup && 1100 if (host->platdata && host->platdata->cd_setup &&
1104 !(mmc->caps & MMC_CAP_NEEDS_POLL)) 1101 !(host->mmc->caps & MMC_CAP_NEEDS_POLL))
1105 host->platdata->cd_setup(mmc, 0); 1102 host->platdata->cd_setup(host->mmc, 0);
1106 1103
1107 au_writel(0, HOST_ENABLE(host)); 1104 au_writel(0, HOST_ENABLE(host));
1108 au_writel(0, HOST_CONFIG(host)); 1105 au_writel(0, HOST_CONFIG(host));
@@ -1122,16 +1119,49 @@ static int __devexit au1xmmc_remove(struct platform_device *pdev)
1122 release_resource(host->ioarea); 1119 release_resource(host->ioarea);
1123 kfree(host->ioarea); 1120 kfree(host->ioarea);
1124 1121
1125 mmc_free_host(mmc); 1122 mmc_free_host(host->mmc);
1123 platform_set_drvdata(pdev, NULL);
1126 } 1124 }
1127 return 0; 1125 return 0;
1128} 1126}
1129 1127
1128#ifdef CONFIG_PM
1129static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
1130{
1131 struct au1xmmc_host *host = platform_get_drvdata(pdev);
1132 int ret;
1133
1134 ret = mmc_suspend_host(host->mmc, state);
1135 if (ret)
1136 return ret;
1137
1138 au_writel(0, HOST_CONFIG2(host));
1139 au_writel(0, HOST_CONFIG(host));
1140 au_writel(0xffffffff, HOST_STATUS(host));
1141 au_writel(0, HOST_ENABLE(host));
1142 au_sync();
1143
1144 return 0;
1145}
1146
1147static int au1xmmc_resume(struct platform_device *pdev)
1148{
1149 struct au1xmmc_host *host = platform_get_drvdata(pdev);
1150
1151 au1xmmc_reset_controller(host);
1152
1153 return mmc_resume_host(host->mmc);
1154}
1155#else
1156#define au1xmmc_suspend NULL
1157#define au1xmmc_resume NULL
1158#endif
1159
1130static struct platform_driver au1xmmc_driver = { 1160static struct platform_driver au1xmmc_driver = {
1131 .probe = au1xmmc_probe, 1161 .probe = au1xmmc_probe,
1132 .remove = au1xmmc_remove, 1162 .remove = au1xmmc_remove,
1133 .suspend = NULL, 1163 .suspend = au1xmmc_suspend,
1134 .resume = NULL, 1164 .resume = au1xmmc_resume,
1135 .driver = { 1165 .driver = {
1136 .name = DRIVER_NAME, 1166 .name = DRIVER_NAME,
1137 .owner = THIS_MODULE, 1167 .owner = THIS_MODULE,
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index d39f59738866..a8e18fe53077 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -177,7 +177,7 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
177 if (dalgn) 177 if (dalgn)
178 DALGN |= (1 << host->dma); 178 DALGN |= (1 << host->dma);
179 else 179 else
180 DALGN &= (1 << host->dma); 180 DALGN &= ~(1 << host->dma);
181 DDADR(host->dma) = host->sg_dma; 181 DDADR(host->dma) = host->sg_dma;
182 DCSR(host->dma) = DCSR_RUN; 182 DCSR(host->dma) = DCSR_RUN;
183} 183}
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 6a1e4994b724..be550c26da68 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1331,21 +1331,30 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1331 return ret; 1331 return ret;
1332} 1332}
1333 1333
1334static void s3cmci_shutdown(struct platform_device *pdev)
1335{
1336 struct mmc_host *mmc = platform_get_drvdata(pdev);
1337 struct s3cmci_host *host = mmc_priv(mmc);
1338
1339 if (host->irq_cd >= 0)
1340 free_irq(host->irq_cd, host);
1341
1342 mmc_remove_host(mmc);
1343 clk_disable(host->clk);
1344}
1345
1334static int __devexit s3cmci_remove(struct platform_device *pdev) 1346static int __devexit s3cmci_remove(struct platform_device *pdev)
1335{ 1347{
1336 struct mmc_host *mmc = platform_get_drvdata(pdev); 1348 struct mmc_host *mmc = platform_get_drvdata(pdev);
1337 struct s3cmci_host *host = mmc_priv(mmc); 1349 struct s3cmci_host *host = mmc_priv(mmc);
1338 1350
1339 mmc_remove_host(mmc); 1351 s3cmci_shutdown(pdev);
1340 1352
1341 clk_disable(host->clk);
1342 clk_put(host->clk); 1353 clk_put(host->clk);
1343 1354
1344 tasklet_disable(&host->pio_tasklet); 1355 tasklet_disable(&host->pio_tasklet);
1345 s3c2410_dma_free(S3CMCI_DMA, &s3cmci_dma_client); 1356 s3c2410_dma_free(S3CMCI_DMA, &s3cmci_dma_client);
1346 1357
1347 if (host->irq_cd >= 0)
1348 free_irq(host->irq_cd, host);
1349 free_irq(host->irq, host); 1358 free_irq(host->irq, host);
1350 1359
1351 iounmap(host->base); 1360 iounmap(host->base);
@@ -1355,17 +1364,17 @@ static int __devexit s3cmci_remove(struct platform_device *pdev)
1355 return 0; 1364 return 0;
1356} 1365}
1357 1366
1358static int __devinit s3cmci_probe_2410(struct platform_device *dev) 1367static int __devinit s3cmci_2410_probe(struct platform_device *dev)
1359{ 1368{
1360 return s3cmci_probe(dev, 0); 1369 return s3cmci_probe(dev, 0);
1361} 1370}
1362 1371
1363static int __devinit s3cmci_probe_2412(struct platform_device *dev) 1372static int __devinit s3cmci_2412_probe(struct platform_device *dev)
1364{ 1373{
1365 return s3cmci_probe(dev, 1); 1374 return s3cmci_probe(dev, 1);
1366} 1375}
1367 1376
1368static int __devinit s3cmci_probe_2440(struct platform_device *dev) 1377static int __devinit s3cmci_2440_probe(struct platform_device *dev)
1369{ 1378{
1370 return s3cmci_probe(dev, 1); 1379 return s3cmci_probe(dev, 1);
1371} 1380}
@@ -1392,29 +1401,32 @@ static int s3cmci_resume(struct platform_device *dev)
1392#endif /* CONFIG_PM */ 1401#endif /* CONFIG_PM */
1393 1402
1394 1403
1395static struct platform_driver s3cmci_driver_2410 = { 1404static struct platform_driver s3cmci_2410_driver = {
1396 .driver.name = "s3c2410-sdi", 1405 .driver.name = "s3c2410-sdi",
1397 .driver.owner = THIS_MODULE, 1406 .driver.owner = THIS_MODULE,
1398 .probe = s3cmci_probe_2410, 1407 .probe = s3cmci_2410_probe,
1399 .remove = __devexit_p(s3cmci_remove), 1408 .remove = __devexit_p(s3cmci_remove),
1409 .shutdown = s3cmci_shutdown,
1400 .suspend = s3cmci_suspend, 1410 .suspend = s3cmci_suspend,
1401 .resume = s3cmci_resume, 1411 .resume = s3cmci_resume,
1402}; 1412};
1403 1413
1404static struct platform_driver s3cmci_driver_2412 = { 1414static struct platform_driver s3cmci_2412_driver = {
1405 .driver.name = "s3c2412-sdi", 1415 .driver.name = "s3c2412-sdi",
1406 .driver.owner = THIS_MODULE, 1416 .driver.owner = THIS_MODULE,
1407 .probe = s3cmci_probe_2412, 1417 .probe = s3cmci_2412_probe,
1408 .remove = __devexit_p(s3cmci_remove), 1418 .remove = __devexit_p(s3cmci_remove),
1419 .shutdown = s3cmci_shutdown,
1409 .suspend = s3cmci_suspend, 1420 .suspend = s3cmci_suspend,
1410 .resume = s3cmci_resume, 1421 .resume = s3cmci_resume,
1411}; 1422};
1412 1423
1413static struct platform_driver s3cmci_driver_2440 = { 1424static struct platform_driver s3cmci_2440_driver = {
1414 .driver.name = "s3c2440-sdi", 1425 .driver.name = "s3c2440-sdi",
1415 .driver.owner = THIS_MODULE, 1426 .driver.owner = THIS_MODULE,
1416 .probe = s3cmci_probe_2440, 1427 .probe = s3cmci_2440_probe,
1417 .remove = __devexit_p(s3cmci_remove), 1428 .remove = __devexit_p(s3cmci_remove),
1429 .shutdown = s3cmci_shutdown,
1418 .suspend = s3cmci_suspend, 1430 .suspend = s3cmci_suspend,
1419 .resume = s3cmci_resume, 1431 .resume = s3cmci_resume,
1420}; 1432};
@@ -1422,17 +1434,17 @@ static struct platform_driver s3cmci_driver_2440 = {
1422 1434
1423static int __init s3cmci_init(void) 1435static int __init s3cmci_init(void)
1424{ 1436{
1425 platform_driver_register(&s3cmci_driver_2410); 1437 platform_driver_register(&s3cmci_2410_driver);
1426 platform_driver_register(&s3cmci_driver_2412); 1438 platform_driver_register(&s3cmci_2412_driver);
1427 platform_driver_register(&s3cmci_driver_2440); 1439 platform_driver_register(&s3cmci_2440_driver);
1428 return 0; 1440 return 0;
1429} 1441}
1430 1442
1431static void __exit s3cmci_exit(void) 1443static void __exit s3cmci_exit(void)
1432{ 1444{
1433 platform_driver_unregister(&s3cmci_driver_2410); 1445 platform_driver_unregister(&s3cmci_2410_driver);
1434 platform_driver_unregister(&s3cmci_driver_2412); 1446 platform_driver_unregister(&s3cmci_2412_driver);
1435 platform_driver_unregister(&s3cmci_driver_2440); 1447 platform_driver_unregister(&s3cmci_2440_driver);
1436} 1448}
1437 1449
1438module_init(s3cmci_init); 1450module_init(s3cmci_init);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 17701c3da733..c3a5db72ddd7 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -173,119 +173,95 @@ static void sdhci_led_control(struct led_classdev *led,
173 * * 173 * *
174\*****************************************************************************/ 174\*****************************************************************************/
175 175
176static inline char* sdhci_sg_to_buffer(struct sdhci_host* host)
177{
178 return sg_virt(host->cur_sg);
179}
180
181static inline int sdhci_next_sg(struct sdhci_host* host)
182{
183 /*
184 * Skip to next SG entry.
185 */
186 host->cur_sg++;
187 host->num_sg--;
188
189 /*
190 * Any entries left?
191 */
192 if (host->num_sg > 0) {
193 host->offset = 0;
194 host->remain = host->cur_sg->length;
195 }
196
197 return host->num_sg;
198}
199
200static void sdhci_read_block_pio(struct sdhci_host *host) 176static void sdhci_read_block_pio(struct sdhci_host *host)
201{ 177{
202 int blksize, chunk_remain; 178 unsigned long flags;
203 u32 data; 179 size_t blksize, len, chunk;
204 char *buffer; 180 u32 scratch;
205 int size; 181 u8 *buf;
206 182
207 DBG("PIO reading\n"); 183 DBG("PIO reading\n");
208 184
209 blksize = host->data->blksz; 185 blksize = host->data->blksz;
210 chunk_remain = 0; 186 chunk = 0;
211 data = 0;
212 187
213 buffer = sdhci_sg_to_buffer(host) + host->offset; 188 local_irq_save(flags);
214 189
215 while (blksize) { 190 while (blksize) {
216 if (chunk_remain == 0) { 191 if (!sg_miter_next(&host->sg_miter))
217 data = readl(host->ioaddr + SDHCI_BUFFER); 192 BUG();
218 chunk_remain = min(blksize, 4);
219 }
220 193
221 size = min(host->remain, chunk_remain); 194 len = min(host->sg_miter.length, blksize);
222 195
223 chunk_remain -= size; 196 blksize -= len;
224 blksize -= size; 197 host->sg_miter.consumed = len;
225 host->offset += size;
226 host->remain -= size;
227 198
228 while (size) { 199 buf = host->sg_miter.addr;
229 *buffer = data & 0xFF;
230 buffer++;
231 data >>= 8;
232 size--;
233 }
234 200
235 if (host->remain == 0) { 201 while (len) {
236 if (sdhci_next_sg(host) == 0) { 202 if (chunk == 0) {
237 BUG_ON(blksize != 0); 203 scratch = readl(host->ioaddr + SDHCI_BUFFER);
238 return; 204 chunk = 4;
239 } 205 }
240 buffer = sdhci_sg_to_buffer(host); 206
207 *buf = scratch & 0xFF;
208
209 buf++;
210 scratch >>= 8;
211 chunk--;
212 len--;
241 } 213 }
242 } 214 }
215
216 sg_miter_stop(&host->sg_miter);
217
218 local_irq_restore(flags);
243} 219}
244 220
245static void sdhci_write_block_pio(struct sdhci_host *host) 221static void sdhci_write_block_pio(struct sdhci_host *host)
246{ 222{
247 int blksize, chunk_remain; 223 unsigned long flags;
248 u32 data; 224 size_t blksize, len, chunk;
249 char *buffer; 225 u32 scratch;
250 int bytes, size; 226 u8 *buf;
251 227
252 DBG("PIO writing\n"); 228 DBG("PIO writing\n");
253 229
254 blksize = host->data->blksz; 230 blksize = host->data->blksz;
255 chunk_remain = 4; 231 chunk = 0;
256 data = 0; 232 scratch = 0;
257 233
258 bytes = 0; 234 local_irq_save(flags);
259 buffer = sdhci_sg_to_buffer(host) + host->offset;
260 235
261 while (blksize) { 236 while (blksize) {
262 size = min(host->remain, chunk_remain); 237 if (!sg_miter_next(&host->sg_miter))
263 238 BUG();
264 chunk_remain -= size;
265 blksize -= size;
266 host->offset += size;
267 host->remain -= size;
268
269 while (size) {
270 data >>= 8;
271 data |= (u32)*buffer << 24;
272 buffer++;
273 size--;
274 }
275 239
276 if (chunk_remain == 0) { 240 len = min(host->sg_miter.length, blksize);
277 writel(data, host->ioaddr + SDHCI_BUFFER); 241
278 chunk_remain = min(blksize, 4); 242 blksize -= len;
279 } 243 host->sg_miter.consumed = len;
244
245 buf = host->sg_miter.addr;
280 246
281 if (host->remain == 0) { 247 while (len) {
282 if (sdhci_next_sg(host) == 0) { 248 scratch |= (u32)*buf << (chunk * 8);
283 BUG_ON(blksize != 0); 249
284 return; 250 buf++;
251 chunk++;
252 len--;
253
254 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
255 writel(scratch, host->ioaddr + SDHCI_BUFFER);
256 chunk = 0;
257 scratch = 0;
285 } 258 }
286 buffer = sdhci_sg_to_buffer(host);
287 } 259 }
288 } 260 }
261
262 sg_miter_stop(&host->sg_miter);
263
264 local_irq_restore(flags);
289} 265}
290 266
291static void sdhci_transfer_pio(struct sdhci_host *host) 267static void sdhci_transfer_pio(struct sdhci_host *host)
@@ -294,7 +270,7 @@ static void sdhci_transfer_pio(struct sdhci_host *host)
294 270
295 BUG_ON(!host->data); 271 BUG_ON(!host->data);
296 272
297 if (host->num_sg == 0) 273 if (host->blocks == 0)
298 return; 274 return;
299 275
300 if (host->data->flags & MMC_DATA_READ) 276 if (host->data->flags & MMC_DATA_READ)
@@ -308,7 +284,8 @@ static void sdhci_transfer_pio(struct sdhci_host *host)
308 else 284 else
309 sdhci_write_block_pio(host); 285 sdhci_write_block_pio(host);
310 286
311 if (host->num_sg == 0) 287 host->blocks--;
288 if (host->blocks == 0)
312 break; 289 break;
313 } 290 }
314 291
@@ -389,6 +366,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
389 if (offset) { 366 if (offset) {
390 if (data->flags & MMC_DATA_WRITE) { 367 if (data->flags & MMC_DATA_WRITE) {
391 buffer = sdhci_kmap_atomic(sg, &flags); 368 buffer = sdhci_kmap_atomic(sg, &flags);
369 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
392 memcpy(align, buffer, offset); 370 memcpy(align, buffer, offset);
393 sdhci_kunmap_atomic(buffer, &flags); 371 sdhci_kunmap_atomic(buffer, &flags);
394 } 372 }
@@ -510,6 +488,7 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
510 size = 4 - (sg_dma_address(sg) & 0x3); 488 size = 4 - (sg_dma_address(sg) & 0x3);
511 489
512 buffer = sdhci_kmap_atomic(sg, &flags); 490 buffer = sdhci_kmap_atomic(sg, &flags);
491 WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3));
513 memcpy(buffer, align, size); 492 memcpy(buffer, align, size);
514 sdhci_kunmap_atomic(buffer, &flags); 493 sdhci_kunmap_atomic(buffer, &flags);
515 494
@@ -687,7 +666,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
687 WARN_ON(1); 666 WARN_ON(1);
688 host->flags &= ~SDHCI_USE_DMA; 667 host->flags &= ~SDHCI_USE_DMA;
689 } else { 668 } else {
690 WARN_ON(count != 1); 669 WARN_ON(sg_cnt != 1);
691 writel(sg_dma_address(data->sg), 670 writel(sg_dma_address(data->sg),
692 host->ioaddr + SDHCI_DMA_ADDRESS); 671 host->ioaddr + SDHCI_DMA_ADDRESS);
693 } 672 }
@@ -711,11 +690,9 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
711 } 690 }
712 691
713 if (!(host->flags & SDHCI_REQ_USE_DMA)) { 692 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
714 host->cur_sg = data->sg; 693 sg_miter_start(&host->sg_miter,
715 host->num_sg = data->sg_len; 694 data->sg, data->sg_len, SG_MITER_ATOMIC);
716 695 host->blocks = data->blocks;
717 host->offset = 0;
718 host->remain = host->cur_sg->length;
719 } 696 }
720 697
721 /* We do not handle DMA boundaries, so set it to max (512 KiB) */ 698 /* We do not handle DMA boundaries, so set it to max (512 KiB) */
@@ -1581,9 +1558,15 @@ int sdhci_add_host(struct sdhci_host *host)
1581 } 1558 }
1582 } 1559 }
1583 1560
1584 /* XXX: Hack to get MMC layer to avoid highmem */ 1561 /*
1585 if (!(host->flags & SDHCI_USE_DMA)) 1562 * If we use DMA, then it's up to the caller to set the DMA
1586 mmc_dev(host->mmc)->dma_mask = NULL; 1563 * mask, but PIO does not need the hw shim so we set a new
1564 * mask here in that case.
1565 */
1566 if (!(host->flags & SDHCI_USE_DMA)) {
1567 host->dma_mask = DMA_BIT_MASK(64);
1568 mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
1569 }
1587 1570
1588 host->max_clk = 1571 host->max_clk =
1589 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; 1572 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 5bb355281765..a06bf8b89343 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -212,6 +212,7 @@ struct sdhci_host {
212 212
213 /* Internal data */ 213 /* Internal data */
214 struct mmc_host *mmc; /* MMC structure */ 214 struct mmc_host *mmc; /* MMC structure */
215 u64 dma_mask; /* custom DMA mask */
215 216
216#ifdef CONFIG_LEDS_CLASS 217#ifdef CONFIG_LEDS_CLASS
217 struct led_classdev led; /* LED control */ 218 struct led_classdev led; /* LED control */
@@ -238,10 +239,8 @@ struct sdhci_host {
238 struct mmc_data *data; /* Current data request */ 239 struct mmc_data *data; /* Current data request */
239 unsigned int data_early:1; /* Data finished before cmd */ 240 unsigned int data_early:1; /* Data finished before cmd */
240 241
241 struct scatterlist *cur_sg; /* We're working on this */ 242 struct sg_mapping_iter sg_miter; /* SG state for PIO */
242 int num_sg; /* Entries left */ 243 unsigned int blocks; /* remaining PIO blocks */
243 int offset; /* Offset into current sg */
244 int remain; /* Bytes left in current */
245 244
246 int sg_count; /* Mapped sg entries */ 245 int sg_count; /* Mapped sg entries */
247 246
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index c42f4b83f686..3fcf92130aa4 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/mm.h>
18#include <linux/major.h> 19#include <linux/major.h>
19#include <linux/mtd/mtd.h> 20#include <linux/mtd/mtd.h>
20#include <linux/mtd/map.h> 21#include <linux/mtd/map.h>
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index cb663ef245d5..fc8529bedfdf 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -20,9 +20,11 @@
20 20
21#include <linux/mtd/nand.h> 21#include <linux/mtd/nand.h>
22#include <linux/mtd/partitions.h> 22#include <linux/mtd/partitions.h>
23#include <linux/gpio.h>
23 24
24#include <asm/io.h> 25#include <asm/io.h>
25#include <asm/irq.h> 26#include <asm/irq.h>
27#include <asm/mach-types.h>
26 28
27#include <asm/arch/hardware.h> 29#include <asm/arch/hardware.h>
28#include <asm/arch/pxa-regs.h> 30#include <asm/arch/pxa-regs.h>
@@ -30,20 +32,6 @@
30#define GPIO_NAND_CS (11) 32#define GPIO_NAND_CS (11)
31#define GPIO_NAND_RB (89) 33#define GPIO_NAND_RB (89)
32 34
33/* This macro needed to ensure in-order operation of GPIO and local
34 * bus. Without both asm command and dummy uncached read there're
35 * states when NAND access is broken. I've looked for such macro(s) in
36 * include/asm-arm but found nothing approptiate.
37 * dmac_clean_range is close, but is makes cache invalidation
38 * unnecessary here and it cannot be used in module
39 */
40#define DRAIN_WB() \
41 do { \
42 unsigned char dummy; \
43 asm volatile ("mcr p15, 0, r0, c7, c10, 4":::"r0"); \
44 dummy=*((unsigned char*)UNCACHED_ADDR); \
45 } while(0)
46
47/* MTD structure for CM-X270 board */ 35/* MTD structure for CM-X270 board */
48static struct mtd_info *cmx270_nand_mtd; 36static struct mtd_info *cmx270_nand_mtd;
49 37
@@ -103,14 +91,14 @@ static int cmx270_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
103 91
104static inline void nand_cs_on(void) 92static inline void nand_cs_on(void)
105{ 93{
106 GPCR(GPIO_NAND_CS) = GPIO_bit(GPIO_NAND_CS); 94 gpio_set_value(GPIO_NAND_CS, 0);
107} 95}
108 96
109static void nand_cs_off(void) 97static void nand_cs_off(void)
110{ 98{
111 DRAIN_WB(); 99 dsb();
112 100
113 GPSR(GPIO_NAND_CS) = GPIO_bit(GPIO_NAND_CS); 101 gpio_set_value(GPIO_NAND_CS, 1);
114} 102}
115 103
116/* 104/*
@@ -122,7 +110,7 @@ static void cmx270_hwcontrol(struct mtd_info *mtd, int dat,
122 struct nand_chip* this = mtd->priv; 110 struct nand_chip* this = mtd->priv;
123 unsigned int nandaddr = (unsigned int)this->IO_ADDR_W; 111 unsigned int nandaddr = (unsigned int)this->IO_ADDR_W;
124 112
125 DRAIN_WB(); 113 dsb();
126 114
127 if (ctrl & NAND_CTRL_CHANGE) { 115 if (ctrl & NAND_CTRL_CHANGE) {
128 if ( ctrl & NAND_ALE ) 116 if ( ctrl & NAND_ALE )
@@ -139,12 +127,12 @@ static void cmx270_hwcontrol(struct mtd_info *mtd, int dat,
139 nand_cs_off(); 127 nand_cs_off();
140 } 128 }
141 129
142 DRAIN_WB(); 130 dsb();
143 this->IO_ADDR_W = (void __iomem*)nandaddr; 131 this->IO_ADDR_W = (void __iomem*)nandaddr;
144 if (dat != NAND_CMD_NONE) 132 if (dat != NAND_CMD_NONE)
145 writel((dat << 16), this->IO_ADDR_W); 133 writel((dat << 16), this->IO_ADDR_W);
146 134
147 DRAIN_WB(); 135 dsb();
148} 136}
149 137
150/* 138/*
@@ -152,9 +140,9 @@ static void cmx270_hwcontrol(struct mtd_info *mtd, int dat,
152 */ 140 */
153static int cmx270_device_ready(struct mtd_info *mtd) 141static int cmx270_device_ready(struct mtd_info *mtd)
154{ 142{
155 DRAIN_WB(); 143 dsb();
156 144
157 return (GPLR(GPIO_NAND_RB) & GPIO_bit(GPIO_NAND_RB)); 145 return (gpio_get_value(GPIO_NAND_RB));
158} 146}
159 147
160/* 148/*
@@ -168,20 +156,40 @@ static int cmx270_init(void)
168 int mtd_parts_nb = 0; 156 int mtd_parts_nb = 0;
169 int ret; 157 int ret;
170 158
159 if (!machine_is_armcore())
160 return -ENODEV;
161
162 ret = gpio_request(GPIO_NAND_CS, "NAND CS");
163 if (ret) {
164 pr_warning("CM-X270: failed to request NAND CS gpio\n");
165 return ret;
166 }
167
168 gpio_direction_output(GPIO_NAND_CS, 1);
169
170 ret = gpio_request(GPIO_NAND_RB, "NAND R/B");
171 if (ret) {
172 pr_warning("CM-X270: failed to request NAND R/B gpio\n");
173 goto err_gpio_request;
174 }
175
176 gpio_direction_input(GPIO_NAND_RB);
177
171 /* Allocate memory for MTD device structure and private data */ 178 /* Allocate memory for MTD device structure and private data */
172 cmx270_nand_mtd = kzalloc(sizeof(struct mtd_info) + 179 cmx270_nand_mtd = kzalloc(sizeof(struct mtd_info) +
173 sizeof(struct nand_chip), 180 sizeof(struct nand_chip),
174 GFP_KERNEL); 181 GFP_KERNEL);
175 if (!cmx270_nand_mtd) { 182 if (!cmx270_nand_mtd) {
176 printk("Unable to allocate CM-X270 NAND MTD device structure.\n"); 183 pr_debug("Unable to allocate CM-X270 NAND MTD device structure.\n");
177 return -ENOMEM; 184 ret = -ENOMEM;
185 goto err_kzalloc;
178 } 186 }
179 187
180 cmx270_nand_io = ioremap(PXA_CS1_PHYS, 12); 188 cmx270_nand_io = ioremap(PXA_CS1_PHYS, 12);
181 if (!cmx270_nand_io) { 189 if (!cmx270_nand_io) {
182 printk("Unable to ioremap NAND device\n"); 190 pr_debug("Unable to ioremap NAND device\n");
183 ret = -EINVAL; 191 ret = -EINVAL;
184 goto err1; 192 goto err_ioremap;
185 } 193 }
186 194
187 /* Get pointer to private data */ 195 /* Get pointer to private data */
@@ -209,9 +217,9 @@ static int cmx270_init(void)
209 217
210 /* Scan to find existence of the device */ 218 /* Scan to find existence of the device */
211 if (nand_scan (cmx270_nand_mtd, 1)) { 219 if (nand_scan (cmx270_nand_mtd, 1)) {
212 printk(KERN_NOTICE "No NAND device\n"); 220 pr_notice("No NAND device\n");
213 ret = -ENXIO; 221 ret = -ENXIO;
214 goto err2; 222 goto err_scan;
215 } 223 }
216 224
217#ifdef CONFIG_MTD_CMDLINE_PARTS 225#ifdef CONFIG_MTD_CMDLINE_PARTS
@@ -229,18 +237,22 @@ static int cmx270_init(void)
229 } 237 }
230 238
231 /* Register the partitions */ 239 /* Register the partitions */
232 printk(KERN_NOTICE "Using %s partition definition\n", part_type); 240 pr_notice("Using %s partition definition\n", part_type);
233 ret = add_mtd_partitions(cmx270_nand_mtd, mtd_parts, mtd_parts_nb); 241 ret = add_mtd_partitions(cmx270_nand_mtd, mtd_parts, mtd_parts_nb);
234 if (ret) 242 if (ret)
235 goto err2; 243 goto err_scan;
236 244
237 /* Return happy */ 245 /* Return happy */
238 return 0; 246 return 0;
239 247
240err2: 248err_scan:
241 iounmap(cmx270_nand_io); 249 iounmap(cmx270_nand_io);
242err1: 250err_ioremap:
243 kfree(cmx270_nand_mtd); 251 kfree(cmx270_nand_mtd);
252err_kzalloc:
253 gpio_free(GPIO_NAND_RB);
254err_gpio_request:
255 gpio_free(GPIO_NAND_CS);
244 256
245 return ret; 257 return ret;
246 258
@@ -255,6 +267,9 @@ static void cmx270_cleanup(void)
255 /* Release resources, unregister device */ 267 /* Release resources, unregister device */
256 nand_release(cmx270_nand_mtd); 268 nand_release(cmx270_nand_mtd);
257 269
270 gpio_free(GPIO_NAND_RB);
271 gpio_free(GPIO_NAND_CS);
272
258 iounmap(cmx270_nand_io); 273 iounmap(cmx270_nand_io);
259 274
260 /* Free the MTD device structure */ 275 /* Free the MTD device structure */
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 869544b8c05c..9c0f56b3c518 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4067,8 +4067,6 @@ static void e1000_netpoll(struct net_device *netdev)
4067 disable_irq(adapter->pdev->irq); 4067 disable_irq(adapter->pdev->irq);
4068 e1000_intr(adapter->pdev->irq, netdev); 4068 e1000_intr(adapter->pdev->irq, netdev);
4069 4069
4070 e1000_clean_tx_irq(adapter);
4071
4072 enable_irq(adapter->pdev->irq); 4070 enable_irq(adapter->pdev->irq);
4073} 4071}
4074#endif 4072#endif
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index e141a1513f07..ea3a09aaa844 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -33,6 +33,7 @@
33 33
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/interrupt.h> 35#include <linux/interrupt.h>
36#include <linux/mm.h>
36#include <linux/dma-mapping.h> 37#include <linux/dma-mapping.h>
37 38
38#include <linux/mlx4/cmd.h> 39#include <linux/mlx4/cmd.h>
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 711e4a8948e0..5257cf464f1a 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1829,9 +1829,6 @@ static int sky2_down(struct net_device *dev)
1829 if (netif_msg_ifdown(sky2)) 1829 if (netif_msg_ifdown(sky2))
1830 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); 1830 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
1831 1831
1832 /* Stop more packets from being queued */
1833 netif_stop_queue(dev);
1834
1835 /* Disable port IRQ */ 1832 /* Disable port IRQ */
1836 imask = sky2_read32(hw, B0_IMSK); 1833 imask = sky2_read32(hw, B0_IMSK);
1837 imask &= ~portirq_msk[port]; 1834 imask &= ~portirq_msk[port];
@@ -1887,8 +1884,6 @@ static int sky2_down(struct net_device *dev)
1887 1884
1888 sky2_phy_power_down(hw, port); 1885 sky2_phy_power_down(hw, port);
1889 1886
1890 netif_carrier_off(dev);
1891
1892 /* turn off LED's */ 1887 /* turn off LED's */
1893 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF); 1888 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
1894 1889
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index f2051b209da2..2040965d7724 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -308,7 +308,7 @@ static void smc_reset(struct net_device *dev)
308 * can't handle it then there will be no recovery except for 308 * can't handle it then there will be no recovery except for
309 * a hard reset or power cycle 309 * a hard reset or power cycle
310 */ 310 */
311 if (nowait) 311 if (lp->cfg.flags & SMC91X_NOWAIT)
312 cfg |= CONFIG_NO_WAIT; 312 cfg |= CONFIG_NO_WAIT;
313 313
314 /* 314 /*
@@ -1939,8 +1939,11 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr,
1939 if (retval) 1939 if (retval)
1940 goto err_out; 1940 goto err_out;
1941 1941
1942#ifdef SMC_USE_PXA_DMA 1942#ifdef CONFIG_ARCH_PXA
1943 { 1943# ifdef SMC_USE_PXA_DMA
1944 lp->cfg.flags |= SMC91X_USE_DMA;
1945# endif
1946 if (lp->cfg.flags & SMC91X_USE_DMA) {
1944 int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW, 1947 int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW,
1945 smc_pxa_dma_irq, NULL); 1948 smc_pxa_dma_irq, NULL);
1946 if (dma >= 0) 1949 if (dma >= 0)
@@ -1980,7 +1983,7 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr,
1980 } 1983 }
1981 1984
1982err_out: 1985err_out:
1983#ifdef SMC_USE_PXA_DMA 1986#ifdef CONFIG_ARCH_PXA
1984 if (retval && dev->dma != (unsigned char)-1) 1987 if (retval && dev->dma != (unsigned char)-1)
1985 pxa_free_dma(dev->dma); 1988 pxa_free_dma(dev->dma);
1986#endif 1989#endif
@@ -2050,9 +2053,11 @@ static int smc_enable_device(struct platform_device *pdev)
2050 return 0; 2053 return 0;
2051} 2054}
2052 2055
2053static int smc_request_attrib(struct platform_device *pdev) 2056static int smc_request_attrib(struct platform_device *pdev,
2057 struct net_device *ndev)
2054{ 2058{
2055 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); 2059 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
2060 struct smc_local *lp = netdev_priv(ndev);
2056 2061
2057 if (!res) 2062 if (!res)
2058 return 0; 2063 return 0;
@@ -2063,9 +2068,11 @@ static int smc_request_attrib(struct platform_device *pdev)
2063 return 0; 2068 return 0;
2064} 2069}
2065 2070
2066static void smc_release_attrib(struct platform_device *pdev) 2071static void smc_release_attrib(struct platform_device *pdev,
2072 struct net_device *ndev)
2067{ 2073{
2068 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); 2074 struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
2075 struct smc_local *lp = netdev_priv(ndev);
2069 2076
2070 if (res) 2077 if (res)
2071 release_mem_region(res->start, ATTRIB_SIZE); 2078 release_mem_region(res->start, ATTRIB_SIZE);
@@ -2123,27 +2130,14 @@ static int smc_drv_probe(struct platform_device *pdev)
2123 struct net_device *ndev; 2130 struct net_device *ndev;
2124 struct resource *res, *ires; 2131 struct resource *res, *ires;
2125 unsigned int __iomem *addr; 2132 unsigned int __iomem *addr;
2133 unsigned long irq_flags = SMC_IRQ_FLAGS;
2126 int ret; 2134 int ret;
2127 2135
2128 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
2129 if (!res)
2130 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2131 if (!res) {
2132 ret = -ENODEV;
2133 goto out;
2134 }
2135
2136
2137 if (!request_mem_region(res->start, SMC_IO_EXTENT, CARDNAME)) {
2138 ret = -EBUSY;
2139 goto out;
2140 }
2141
2142 ndev = alloc_etherdev(sizeof(struct smc_local)); 2136 ndev = alloc_etherdev(sizeof(struct smc_local));
2143 if (!ndev) { 2137 if (!ndev) {
2144 printk("%s: could not allocate device.\n", CARDNAME); 2138 printk("%s: could not allocate device.\n", CARDNAME);
2145 ret = -ENOMEM; 2139 ret = -ENOMEM;
2146 goto out_release_io; 2140 goto out;
2147 } 2141 }
2148 SET_NETDEV_DEV(ndev, &pdev->dev); 2142 SET_NETDEV_DEV(ndev, &pdev->dev);
2149 2143
@@ -2152,37 +2146,47 @@ static int smc_drv_probe(struct platform_device *pdev)
2152 */ 2146 */
2153 2147
2154 lp = netdev_priv(ndev); 2148 lp = netdev_priv(ndev);
2155 lp->cfg.irq_flags = SMC_IRQ_FLAGS;
2156 2149
2157#ifdef SMC_DYNAMIC_BUS_CONFIG 2150 if (pd) {
2158 if (pd)
2159 memcpy(&lp->cfg, pd, sizeof(lp->cfg)); 2151 memcpy(&lp->cfg, pd, sizeof(lp->cfg));
2160 else { 2152 lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
2161 lp->cfg.flags = SMC91X_USE_8BIT; 2153 } else {
2162 lp->cfg.flags |= SMC91X_USE_16BIT; 2154 lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0;
2163 lp->cfg.flags |= SMC91X_USE_32BIT; 2155 lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0;
2156 lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0;
2157 lp->cfg.flags |= (nowait) ? SMC91X_NOWAIT : 0;
2164 } 2158 }
2165 2159
2166 lp->cfg.flags &= ~(SMC_CAN_USE_8BIT ? 0 : SMC91X_USE_8BIT);
2167 lp->cfg.flags &= ~(SMC_CAN_USE_16BIT ? 0 : SMC91X_USE_16BIT);
2168 lp->cfg.flags &= ~(SMC_CAN_USE_32BIT ? 0 : SMC91X_USE_32BIT);
2169#endif
2170
2171 ndev->dma = (unsigned char)-1; 2160 ndev->dma = (unsigned char)-1;
2172 2161
2162 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
2163 if (!res)
2164 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2165 if (!res) {
2166 ret = -ENODEV;
2167 goto out_free_netdev;
2168 }
2169
2170
2171 if (!request_mem_region(res->start, SMC_IO_EXTENT, CARDNAME)) {
2172 ret = -EBUSY;
2173 goto out_free_netdev;
2174 }
2175
2173 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2176 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2174 if (!ires) { 2177 if (!ires) {
2175 ret = -ENODEV; 2178 ret = -ENODEV;
2176 goto out_free_netdev; 2179 goto out_release_io;
2177 } 2180 }
2178 2181
2179 ndev->irq = ires->start; 2182 ndev->irq = ires->start;
2180 if (SMC_IRQ_FLAGS == -1)
2181 lp->cfg.irq_flags = ires->flags & IRQF_TRIGGER_MASK;
2182 2183
2183 ret = smc_request_attrib(pdev); 2184 if (ires->flags & IRQF_TRIGGER_MASK)
2185 irq_flags = ires->flags & IRQF_TRIGGER_MASK;
2186
2187 ret = smc_request_attrib(pdev, ndev);
2184 if (ret) 2188 if (ret)
2185 goto out_free_netdev; 2189 goto out_release_io;
2186#if defined(CONFIG_SA1100_ASSABET) 2190#if defined(CONFIG_SA1100_ASSABET)
2187 NCR_0 |= NCR_ENET_OSC_EN; 2191 NCR_0 |= NCR_ENET_OSC_EN;
2188#endif 2192#endif
@@ -2197,7 +2201,7 @@ static int smc_drv_probe(struct platform_device *pdev)
2197 goto out_release_attrib; 2201 goto out_release_attrib;
2198 } 2202 }
2199 2203
2200#ifdef SMC_USE_PXA_DMA 2204#ifdef CONFIG_ARCH_PXA
2201 { 2205 {
2202 struct smc_local *lp = netdev_priv(ndev); 2206 struct smc_local *lp = netdev_priv(ndev);
2203 lp->device = &pdev->dev; 2207 lp->device = &pdev->dev;
@@ -2205,7 +2209,7 @@ static int smc_drv_probe(struct platform_device *pdev)
2205 } 2209 }
2206#endif 2210#endif
2207 2211
2208 ret = smc_probe(ndev, addr, lp->cfg.irq_flags); 2212 ret = smc_probe(ndev, addr, irq_flags);
2209 if (ret != 0) 2213 if (ret != 0)
2210 goto out_iounmap; 2214 goto out_iounmap;
2211 2215
@@ -2217,11 +2221,11 @@ static int smc_drv_probe(struct platform_device *pdev)
2217 platform_set_drvdata(pdev, NULL); 2221 platform_set_drvdata(pdev, NULL);
2218 iounmap(addr); 2222 iounmap(addr);
2219 out_release_attrib: 2223 out_release_attrib:
2220 smc_release_attrib(pdev); 2224 smc_release_attrib(pdev, ndev);
2221 out_free_netdev:
2222 free_netdev(ndev);
2223 out_release_io: 2225 out_release_io:
2224 release_mem_region(res->start, SMC_IO_EXTENT); 2226 release_mem_region(res->start, SMC_IO_EXTENT);
2227 out_free_netdev:
2228 free_netdev(ndev);
2225 out: 2229 out:
2226 printk("%s: not found (%d).\n", CARDNAME, ret); 2230 printk("%s: not found (%d).\n", CARDNAME, ret);
2227 2231
@@ -2240,14 +2244,14 @@ static int smc_drv_remove(struct platform_device *pdev)
2240 2244
2241 free_irq(ndev->irq, ndev); 2245 free_irq(ndev->irq, ndev);
2242 2246
2243#ifdef SMC_USE_PXA_DMA 2247#ifdef CONFIG_ARCH_PXA
2244 if (ndev->dma != (unsigned char)-1) 2248 if (ndev->dma != (unsigned char)-1)
2245 pxa_free_dma(ndev->dma); 2249 pxa_free_dma(ndev->dma);
2246#endif 2250#endif
2247 iounmap(lp->base); 2251 iounmap(lp->base);
2248 2252
2249 smc_release_datacs(pdev,ndev); 2253 smc_release_datacs(pdev,ndev);
2250 smc_release_attrib(pdev); 2254 smc_release_attrib(pdev,ndev);
2251 2255
2252 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs"); 2256 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
2253 if (!res) 2257 if (!res)
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 8606818653f8..22209b6f1405 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -40,23 +40,46 @@
40 * Define your architecture specific bus configuration parameters here. 40 * Define your architecture specific bus configuration parameters here.
41 */ 41 */
42 42
43#if defined(CONFIG_ARCH_LUBBOCK) 43#if defined(CONFIG_ARCH_LUBBOCK) ||\
44 defined(CONFIG_MACH_MAINSTONE) ||\
45 defined(CONFIG_MACH_ZYLONITE) ||\
46 defined(CONFIG_MACH_LITTLETON)
44 47
45/* We can only do 16-bit reads and writes in the static memory space. */ 48#include <asm/mach-types.h>
46#define SMC_CAN_USE_8BIT 0 49
50/* Now the bus width is specified in the platform data
51 * pretend here to support all I/O access types
52 */
53#define SMC_CAN_USE_8BIT 1
47#define SMC_CAN_USE_16BIT 1 54#define SMC_CAN_USE_16BIT 1
48#define SMC_CAN_USE_32BIT 0 55#define SMC_CAN_USE_32BIT 1
49#define SMC_NOWAIT 1 56#define SMC_NOWAIT 1
50 57
51/* The first two address lines aren't connected... */ 58#define SMC_IO_SHIFT (lp->io_shift)
52#define SMC_IO_SHIFT 2
53 59
60#define SMC_inb(a, r) readb((a) + (r))
54#define SMC_inw(a, r) readw((a) + (r)) 61#define SMC_inw(a, r) readw((a) + (r))
55#define SMC_outw(v, a, r) writew(v, (a) + (r)) 62#define SMC_inl(a, r) readl((a) + (r))
63#define SMC_outb(v, a, r) writeb(v, (a) + (r))
64#define SMC_outl(v, a, r) writel(v, (a) + (r))
56#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) 65#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
57#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) 66#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
67#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
68#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
58#define SMC_IRQ_FLAGS (-1) /* from resource */ 69#define SMC_IRQ_FLAGS (-1) /* from resource */
59 70
71/* We actually can't write halfwords properly if not word aligned */
72static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
73{
74 if (machine_is_mainstone() && reg & 2) {
75 unsigned int v = val << 16;
76 v |= readl(ioaddr + (reg & ~2)) & 0xffff;
77 writel(v, ioaddr + (reg & ~2));
78 } else {
79 writew(val, ioaddr + reg);
80 }
81}
82
60#elif defined(CONFIG_BLACKFIN) 83#elif defined(CONFIG_BLACKFIN)
61 84
62#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH 85#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH
@@ -195,7 +218,6 @@
195#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) 218#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
196 219
197#elif defined(CONFIG_ARCH_INNOKOM) || \ 220#elif defined(CONFIG_ARCH_INNOKOM) || \
198 defined(CONFIG_MACH_MAINSTONE) || \
199 defined(CONFIG_ARCH_PXA_IDP) || \ 221 defined(CONFIG_ARCH_PXA_IDP) || \
200 defined(CONFIG_ARCH_RAMSES) || \ 222 defined(CONFIG_ARCH_RAMSES) || \
201 defined(CONFIG_ARCH_PCM027) 223 defined(CONFIG_ARCH_PCM027)
@@ -229,22 +251,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
229 } 251 }
230} 252}
231 253
232#elif defined(CONFIG_MACH_ZYLONITE)
233
234#define SMC_CAN_USE_8BIT 1
235#define SMC_CAN_USE_16BIT 1
236#define SMC_CAN_USE_32BIT 0
237#define SMC_IO_SHIFT 0
238#define SMC_NOWAIT 1
239#define SMC_USE_PXA_DMA 1
240#define SMC_inb(a, r) readb((a) + (r))
241#define SMC_inw(a, r) readw((a) + (r))
242#define SMC_insw(a, r, p, l) insw((a) + (r), p, l)
243#define SMC_outsw(a, r, p, l) outsw((a) + (r), p, l)
244#define SMC_outb(v, a, r) writeb(v, (a) + (r))
245#define SMC_outw(v, a, r) writew(v, (a) + (r))
246#define SMC_IRQ_FLAGS (-1) /* from resource */
247
248#elif defined(CONFIG_ARCH_OMAP) 254#elif defined(CONFIG_ARCH_OMAP)
249 255
250/* We can only do 16-bit reads and writes in the static memory space. */ 256/* We can only do 16-bit reads and writes in the static memory space. */
@@ -454,7 +460,6 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r,
454#define RPC_LSA_DEFAULT RPC_LED_100_10 460#define RPC_LSA_DEFAULT RPC_LED_100_10
455#define RPC_LSB_DEFAULT RPC_LED_TX_RX 461#define RPC_LSB_DEFAULT RPC_LED_TX_RX
456 462
457#define SMC_DYNAMIC_BUS_CONFIG
458#endif 463#endif
459 464
460 465
@@ -493,7 +498,7 @@ struct smc_local {
493 498
494 spinlock_t lock; 499 spinlock_t lock;
495 500
496#ifdef SMC_USE_PXA_DMA 501#ifdef CONFIG_ARCH_PXA
497 /* DMA needs the physical address of the chip */ 502 /* DMA needs the physical address of the chip */
498 u_long physaddr; 503 u_long physaddr;
499 struct device *device; 504 struct device *device;
@@ -501,20 +506,17 @@ struct smc_local {
501 void __iomem *base; 506 void __iomem *base;
502 void __iomem *datacs; 507 void __iomem *datacs;
503 508
509 /* the low address lines on some platforms aren't connected... */
510 int io_shift;
511
504 struct smc91x_platdata cfg; 512 struct smc91x_platdata cfg;
505}; 513};
506 514
507#ifdef SMC_DYNAMIC_BUS_CONFIG 515#define SMC_8BIT(p) ((p)->cfg.flags & SMC91X_USE_8BIT)
508#define SMC_8BIT(p) (((p)->cfg.flags & SMC91X_USE_8BIT) && SMC_CAN_USE_8BIT) 516#define SMC_16BIT(p) ((p)->cfg.flags & SMC91X_USE_16BIT)
509#define SMC_16BIT(p) (((p)->cfg.flags & SMC91X_USE_16BIT) && SMC_CAN_USE_16BIT) 517#define SMC_32BIT(p) ((p)->cfg.flags & SMC91X_USE_32BIT)
510#define SMC_32BIT(p) (((p)->cfg.flags & SMC91X_USE_32BIT) && SMC_CAN_USE_32BIT)
511#else
512#define SMC_8BIT(p) SMC_CAN_USE_8BIT
513#define SMC_16BIT(p) SMC_CAN_USE_16BIT
514#define SMC_32BIT(p) SMC_CAN_USE_32BIT
515#endif
516 518
517#ifdef SMC_USE_PXA_DMA 519#ifdef CONFIG_ARCH_PXA
518/* 520/*
519 * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is 521 * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is
520 * always happening in irq context so no need to worry about races. TX is 522 * always happening in irq context so no need to worry about races. TX is
@@ -608,7 +610,7 @@ smc_pxa_dma_irq(int dma, void *dummy)
608{ 610{
609 DCSR(dma) = 0; 611 DCSR(dma) = 0;
610} 612}
611#endif /* SMC_USE_PXA_DMA */ 613#endif /* CONFIG_ARCH_PXA */
612 614
613 615
614/* 616/*
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index e45402adac3f..e0f884034c9f 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -219,7 +219,8 @@ config PCMCIA_SA1111
219config PCMCIA_PXA2XX 219config PCMCIA_PXA2XX
220 tristate "PXA2xx support" 220 tristate "PXA2xx support"
221 depends on ARM && ARCH_PXA && PCMCIA 221 depends on ARM && ARCH_PXA && PCMCIA
222 depends on ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL || MACH_ARMCORE 222 depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \
223 || MACH_ARMCORE || ARCH_PXA_PALM)
223 help 224 help
224 Say Y here to include support for the PXA2xx PCMCIA controller 225 Say Y here to include support for the PXA2xx PCMCIA controller
225 226
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 85c6cc931f97..269a9e913ba2 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -72,4 +72,5 @@ pxa2xx_cs-$(CONFIG_ARCH_LUBBOCK) += pxa2xx_lubbock.o sa1111_generic.o
72pxa2xx_cs-$(CONFIG_MACH_MAINSTONE) += pxa2xx_mainstone.o 72pxa2xx_cs-$(CONFIG_MACH_MAINSTONE) += pxa2xx_mainstone.o
73pxa2xx_cs-$(CONFIG_PXA_SHARPSL) += pxa2xx_sharpsl.o 73pxa2xx_cs-$(CONFIG_PXA_SHARPSL) += pxa2xx_sharpsl.o
74pxa2xx_cs-$(CONFIG_MACH_ARMCORE) += pxa2xx_cm_x270.o 74pxa2xx_cs-$(CONFIG_MACH_ARMCORE) += pxa2xx_cm_x270.o
75pxa2xx_cs-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o
75 76
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index c21f9a9c3e3f..a34284b1482a 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -28,6 +28,7 @@
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/interrupt.h> 30#include <linux/interrupt.h>
31#include <linux/mm.h>
31#include <linux/vmalloc.h> 32#include <linux/vmalloc.h>
32#include <linux/of_platform.h> 33#include <linux/of_platform.h>
33 34
diff --git a/drivers/pcmcia/pxa2xx_cm_x270.c b/drivers/pcmcia/pxa2xx_cm_x270.c
index f123fce65f2e..bb95db7d2b76 100644
--- a/drivers/pcmcia/pxa2xx_cm_x270.c
+++ b/drivers/pcmcia/pxa2xx_cm_x270.c
@@ -5,83 +5,60 @@
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 * 7 *
8 * Compulab Ltd., 2003, 2007 8 * Compulab Ltd., 2003, 2007, 2008
9 * Mike Rapoport <mike@compulab.co.il> 9 * Mike Rapoport <mike@compulab.co.il>
10 * 10 *
11 */ 11 */
12 12
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/platform_device.h> 13#include <linux/platform_device.h>
16#include <linux/irq.h> 14#include <linux/irq.h>
17#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/gpio.h>
18 17
19#include <pcmcia/ss.h>
20#include <asm/hardware.h>
21#include <asm/mach-types.h> 18#include <asm/mach-types.h>
22
23#include <asm/arch/pxa-regs.h> 19#include <asm/arch/pxa-regs.h>
24#include <asm/arch/pxa2xx-gpio.h>
25#include <asm/arch/cm-x270.h>
26 20
27#include "soc_common.h" 21#include "soc_common.h"
28 22
23#define GPIO_PCMCIA_S0_CD_VALID (84)
24#define GPIO_PCMCIA_S0_RDYINT (82)
25#define GPIO_PCMCIA_RESET (53)
26
27#define PCMCIA_S0_CD_VALID IRQ_GPIO(GPIO_PCMCIA_S0_CD_VALID)
28#define PCMCIA_S0_RDYINT IRQ_GPIO(GPIO_PCMCIA_S0_RDYINT)
29
30
29static struct pcmcia_irqs irqs[] = { 31static struct pcmcia_irqs irqs[] = {
30 { 0, PCMCIA_S0_CD_VALID, "PCMCIA0 CD" }, 32 { 0, PCMCIA_S0_CD_VALID, "PCMCIA0 CD" },
31 { 1, PCMCIA_S1_CD_VALID, "PCMCIA1 CD" },
32}; 33};
33 34
34static int cmx270_pcmcia_hw_init(struct soc_pcmcia_socket *skt) 35static int cmx270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
35{ 36{
36 GPSR(GPIO48_nPOE) = GPIO_bit(GPIO48_nPOE) | 37 int ret = gpio_request(GPIO_PCMCIA_RESET, "PCCard reset");
37 GPIO_bit(GPIO49_nPWE) | 38 if (ret)
38 GPIO_bit(GPIO50_nPIOR) | 39 return ret;
39 GPIO_bit(GPIO51_nPIOW) | 40 gpio_direction_output(GPIO_PCMCIA_RESET, 0);
40 GPIO_bit(GPIO85_nPCE_1) | 41
41 GPIO_bit(GPIO54_nPCE_2); 42 skt->irq = PCMCIA_S0_RDYINT;
42 43 ret = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
43 pxa_gpio_mode(GPIO48_nPOE_MD); 44 if (!ret)
44 pxa_gpio_mode(GPIO49_nPWE_MD); 45 gpio_free(GPIO_PCMCIA_RESET);
45 pxa_gpio_mode(GPIO50_nPIOR_MD); 46
46 pxa_gpio_mode(GPIO51_nPIOW_MD); 47 return ret;
47 pxa_gpio_mode(GPIO85_nPCE_1_MD);
48 pxa_gpio_mode(GPIO54_nPCE_2_MD);
49 pxa_gpio_mode(GPIO55_nPREG_MD);
50 pxa_gpio_mode(GPIO56_nPWAIT_MD);
51 pxa_gpio_mode(GPIO57_nIOIS16_MD);
52
53 /* Reset signal */
54 pxa_gpio_mode(GPIO53_nPCE_2 | GPIO_OUT);
55 GPCR(GPIO53_nPCE_2) = GPIO_bit(GPIO53_nPCE_2);
56
57 set_irq_type(PCMCIA_S0_CD_VALID, IRQ_TYPE_EDGE_BOTH);
58 set_irq_type(PCMCIA_S1_CD_VALID, IRQ_TYPE_EDGE_BOTH);
59
60 /* irq's for slots: */
61 set_irq_type(PCMCIA_S0_RDYINT, IRQ_TYPE_EDGE_FALLING);
62 set_irq_type(PCMCIA_S1_RDYINT, IRQ_TYPE_EDGE_FALLING);
63
64 skt->irq = (skt->nr == 0) ? PCMCIA_S0_RDYINT : PCMCIA_S1_RDYINT;
65 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
66} 48}
67 49
68static void cmx270_pcmcia_shutdown(struct soc_pcmcia_socket *skt) 50static void cmx270_pcmcia_shutdown(struct soc_pcmcia_socket *skt)
69{ 51{
70 soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs)); 52 soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs));
71 53 gpio_free(GPIO_PCMCIA_RESET);
72 set_irq_type(IRQ_TO_GPIO(PCMCIA_S0_CD_VALID), IRQ_TYPE_NONE);
73 set_irq_type(IRQ_TO_GPIO(PCMCIA_S1_CD_VALID), IRQ_TYPE_NONE);
74
75 set_irq_type(IRQ_TO_GPIO(PCMCIA_S0_RDYINT), IRQ_TYPE_NONE);
76 set_irq_type(IRQ_TO_GPIO(PCMCIA_S1_RDYINT), IRQ_TYPE_NONE);
77} 54}
78 55
79 56
80static void cmx270_pcmcia_socket_state(struct soc_pcmcia_socket *skt, 57static void cmx270_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
81 struct pcmcia_state *state) 58 struct pcmcia_state *state)
82{ 59{
83 state->detect = (PCC_DETECT(skt->nr) == 0) ? 1 : 0; 60 state->detect = (gpio_get_value(GPIO_PCMCIA_S0_CD_VALID) == 0) ? 1 : 0;
84 state->ready = (PCC_READY(skt->nr) == 0) ? 0 : 1; 61 state->ready = (gpio_get_value(GPIO_PCMCIA_S0_RDYINT) == 0) ? 0 : 1;
85 state->bvd1 = 1; 62 state->bvd1 = 1;
86 state->bvd2 = 1; 63 state->bvd2 = 1;
87 state->vs_3v = 0; 64 state->vs_3v = 0;
@@ -93,32 +70,16 @@ static void cmx270_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
93static int cmx270_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, 70static int cmx270_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
94 const socket_state_t *state) 71 const socket_state_t *state)
95{ 72{
96 GPSR(GPIO49_nPWE) = GPIO_bit(GPIO49_nPWE);
97 pxa_gpio_mode(GPIO49_nPWE | GPIO_OUT);
98
99 switch (skt->nr) { 73 switch (skt->nr) {
100 case 0: 74 case 0:
101 if (state->flags & SS_RESET) { 75 if (state->flags & SS_RESET) {
102 GPCR(GPIO49_nPWE) = GPIO_bit(GPIO49_nPWE); 76 gpio_set_value(GPIO_PCMCIA_RESET, 1);
103 GPSR(GPIO53_nPCE_2) = GPIO_bit(GPIO53_nPCE_2);
104 udelay(10);
105 GPCR(GPIO53_nPCE_2) = GPIO_bit(GPIO53_nPCE_2);
106 GPSR(GPIO49_nPWE) = GPIO_bit(GPIO49_nPWE);
107 }
108 break;
109 case 1:
110 if (state->flags & SS_RESET) {
111 GPCR(GPIO49_nPWE) = GPIO_bit(GPIO49_nPWE);
112 GPSR(GPIO53_nPCE_2) = GPIO_bit(GPIO53_nPCE_2);
113 udelay(10); 77 udelay(10);
114 GPCR(GPIO53_nPCE_2) = GPIO_bit(GPIO53_nPCE_2); 78 gpio_set_value(GPIO_PCMCIA_RESET, 0);
115 GPSR(GPIO49_nPWE) = GPIO_bit(GPIO49_nPWE);
116 } 79 }
117 break; 80 break;
118 } 81 }
119 82
120 pxa_gpio_mode(GPIO49_nPWE_MD);
121
122 return 0; 83 return 0;
123} 84}
124 85
@@ -139,7 +100,7 @@ static struct pcmcia_low_level cmx270_pcmcia_ops __initdata = {
139 .configure_socket = cmx270_pcmcia_configure_socket, 100 .configure_socket = cmx270_pcmcia_configure_socket,
140 .socket_init = cmx270_pcmcia_socket_init, 101 .socket_init = cmx270_pcmcia_socket_init,
141 .socket_suspend = cmx270_pcmcia_socket_suspend, 102 .socket_suspend = cmx270_pcmcia_socket_suspend,
142 .nr = 2, 103 .nr = 1,
143}; 104};
144 105
145static struct platform_device *cmx270_pcmcia_device; 106static struct platform_device *cmx270_pcmcia_device;
diff --git a/drivers/pcmcia/pxa2xx_palmtx.c b/drivers/pcmcia/pxa2xx_palmtx.c
new file mode 100644
index 000000000000..4abde190c1f5
--- /dev/null
+++ b/drivers/pcmcia/pxa2xx_palmtx.c
@@ -0,0 +1,118 @@
1/*
2 * linux/drivers/pcmcia/pxa2xx_palmtx.c
3 *
4 * Driver for Palm T|X PCMCIA
5 *
6 * Copyright (C) 2007-2008 Marek Vasut <marek.vasut@gmail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/platform_device.h>
16
17#include <asm/mach-types.h>
18
19#include <asm/arch/gpio.h>
20#include <asm/arch/palmtx.h>
21
22#include "soc_common.h"
23
24static int palmtx_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
25{
26 skt->irq = IRQ_GPIO(GPIO_NR_PALMTX_PCMCIA_READY);
27 return 0;
28}
29
30static void palmtx_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
31{
32}
33
34static void palmtx_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
35 struct pcmcia_state *state)
36{
37 state->detect = 1; /* always inserted */
38 state->ready = !!gpio_get_value(GPIO_NR_PALMTX_PCMCIA_READY);
39 state->bvd1 = 1;
40 state->bvd2 = 1;
41 state->wrprot = 0;
42 state->vs_3v = 1;
43 state->vs_Xv = 0;
44}
45
46static int
47palmtx_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
48 const socket_state_t *state)
49{
50 gpio_set_value(GPIO_NR_PALMTX_PCMCIA_POWER1, 1);
51 gpio_set_value(GPIO_NR_PALMTX_PCMCIA_POWER2, 1);
52 gpio_set_value(GPIO_NR_PALMTX_PCMCIA_RESET,
53 !!(state->flags & SS_RESET));
54
55 return 0;
56}
57
58static void palmtx_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
59{
60}
61
62static void palmtx_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
63{
64}
65
66static struct pcmcia_low_level palmtx_pcmcia_ops = {
67 .owner = THIS_MODULE,
68
69 .first = 0,
70 .nr = 1,
71
72 .hw_init = palmtx_pcmcia_hw_init,
73 .hw_shutdown = palmtx_pcmcia_hw_shutdown,
74
75 .socket_state = palmtx_pcmcia_socket_state,
76 .configure_socket = palmtx_pcmcia_configure_socket,
77
78 .socket_init = palmtx_pcmcia_socket_init,
79 .socket_suspend = palmtx_pcmcia_socket_suspend,
80};
81
82static struct platform_device *palmtx_pcmcia_device;
83
84static int __init palmtx_pcmcia_init(void)
85{
86 int ret;
87
88 if (!machine_is_palmtx())
89 return -ENODEV;
90
91 palmtx_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
92 if (!palmtx_pcmcia_device)
93 return -ENOMEM;
94
95 ret = platform_device_add_data(palmtx_pcmcia_device, &palmtx_pcmcia_ops,
96 sizeof(palmtx_pcmcia_ops));
97
98 if (!ret)
99 ret = platform_device_add(palmtx_pcmcia_device);
100
101 if (ret)
102 platform_device_put(palmtx_pcmcia_device);
103
104 return ret;
105}
106
107static void __exit palmtx_pcmcia_exit(void)
108{
109 platform_device_unregister(palmtx_pcmcia_device);
110}
111
112fs_initcall(palmtx_pcmcia_init);
113module_exit(palmtx_pcmcia_exit);
114
115MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
116MODULE_DESCRIPTION("PCMCIA support for Palm T|X");
117MODULE_ALIAS("platform:pxa2xx-pcmcia");
118MODULE_LICENSE("GPL");
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 58c806e9c58a..4d17d384578d 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -49,4 +49,10 @@ config BATTERY_OLPC
49 help 49 help
50 Say Y to enable support for the battery on the OLPC laptop. 50 Say Y to enable support for the battery on the OLPC laptop.
51 51
52config BATTERY_PALMTX
53 tristate "Palm T|X battery"
54 depends on MACH_PALMTX
55 help
56 Say Y to enable support for the battery in Palm T|X.
57
52endif # POWER_SUPPLY 58endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 6413ded5fe5f..6f43a54ee420 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -20,3 +20,4 @@ obj-$(CONFIG_APM_POWER) += apm_power.o
20obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o 20obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o
21obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o 21obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o
22obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o 22obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
23obj-$(CONFIG_BATTERY_PALMTX) += palmtx_battery.o
diff --git a/drivers/power/palmtx_battery.c b/drivers/power/palmtx_battery.c
new file mode 100644
index 000000000000..244bb273a637
--- /dev/null
+++ b/drivers/power/palmtx_battery.c
@@ -0,0 +1,198 @@
1/*
2 * linux/drivers/power/palmtx_battery.c
3 *
4 * Battery measurement code for Palm T|X Handheld computer
5 *
6 * based on tosa_battery.c
7 *
8 * Copyright (C) 2008 Marek Vasut <marek.vasut@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/power_supply.h>
18#include <linux/wm97xx.h>
19#include <linux/delay.h>
20#include <linux/spinlock.h>
21#include <linux/interrupt.h>
22#include <linux/gpio.h>
23
24#include <asm/mach-types.h>
25#include <asm/arch/palmtx.h>
26
27static DEFINE_MUTEX(bat_lock);
28static struct work_struct bat_work;
29struct mutex work_lock;
30int bat_status = POWER_SUPPLY_STATUS_DISCHARGING;
31
32static unsigned long palmtx_read_bat(struct power_supply *bat_ps)
33{
34 return wm97xx_read_aux_adc(bat_ps->dev->parent->driver_data,
35 WM97XX_AUX_ID3) * 1000 / 414;
36}
37
38static unsigned long palmtx_read_temp(struct power_supply *bat_ps)
39{
40 return wm97xx_read_aux_adc(bat_ps->dev->parent->driver_data,
41 WM97XX_AUX_ID2);
42}
43
44static int palmtx_bat_get_property(struct power_supply *bat_ps,
45 enum power_supply_property psp,
46 union power_supply_propval *val)
47{
48 switch (psp) {
49 case POWER_SUPPLY_PROP_STATUS:
50 val->intval = bat_status;
51 break;
52 case POWER_SUPPLY_PROP_TECHNOLOGY:
53 val->intval = POWER_SUPPLY_TECHNOLOGY_LIPO;
54 break;
55 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
56 val->intval = palmtx_read_bat(bat_ps);
57 break;
58 case POWER_SUPPLY_PROP_VOLTAGE_MAX:
59 case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
60 val->intval = PALMTX_BAT_MAX_VOLTAGE;
61 break;
62 case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
63 val->intval = PALMTX_BAT_MIN_VOLTAGE;
64 break;
65 case POWER_SUPPLY_PROP_TEMP:
66 val->intval = palmtx_read_temp(bat_ps);
67 break;
68 case POWER_SUPPLY_PROP_PRESENT:
69 val->intval = 1;
70 break;
71 default:
72 return -EINVAL;
73 }
74 return 0;
75}
76
77static void palmtx_bat_external_power_changed(struct power_supply *bat_ps)
78{
79 schedule_work(&bat_work);
80}
81
82static char *status_text[] = {
83 [POWER_SUPPLY_STATUS_UNKNOWN] = "Unknown",
84 [POWER_SUPPLY_STATUS_CHARGING] = "Charging",
85 [POWER_SUPPLY_STATUS_DISCHARGING] = "Discharging",
86};
87
88static void palmtx_bat_update(struct power_supply *bat_ps)
89{
90 int old_status = bat_status;
91
92 mutex_lock(&work_lock);
93
94 bat_status = gpio_get_value(GPIO_NR_PALMTX_POWER_DETECT) ?
95 POWER_SUPPLY_STATUS_CHARGING :
96 POWER_SUPPLY_STATUS_DISCHARGING;
97
98 if (old_status != bat_status) {
99 pr_debug("%s %s -> %s\n", bat_ps->name,
100 status_text[old_status],
101 status_text[bat_status]);
102 power_supply_changed(bat_ps);
103 }
104
105 mutex_unlock(&work_lock);
106}
107
108static enum power_supply_property palmtx_bat_main_props[] = {
109 POWER_SUPPLY_PROP_STATUS,
110 POWER_SUPPLY_PROP_TECHNOLOGY,
111 POWER_SUPPLY_PROP_VOLTAGE_NOW,
112 POWER_SUPPLY_PROP_VOLTAGE_MAX,
113 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
114 POWER_SUPPLY_PROP_TEMP,
115 POWER_SUPPLY_PROP_PRESENT,
116};
117
118struct power_supply bat_ps = {
119 .name = "main-battery",
120 .type = POWER_SUPPLY_TYPE_BATTERY,
121 .properties = palmtx_bat_main_props,
122 .num_properties = ARRAY_SIZE(palmtx_bat_main_props),
123 .get_property = palmtx_bat_get_property,
124 .external_power_changed = palmtx_bat_external_power_changed,
125 .use_for_apm = 1,
126};
127
128static void palmtx_bat_work(struct work_struct *work)
129{
130 palmtx_bat_update(&bat_ps);
131}
132
133#ifdef CONFIG_PM
134static int palmtx_bat_suspend(struct platform_device *dev, pm_message_t state)
135{
136 flush_scheduled_work();
137 return 0;
138}
139
140static int palmtx_bat_resume(struct platform_device *dev)
141{
142 schedule_work(&bat_work);
143 return 0;
144}
145#else
146#define palmtx_bat_suspend NULL
147#define palmtx_bat_resume NULL
148#endif
149
150static int __devinit palmtx_bat_probe(struct platform_device *dev)
151{
152 int ret = 0;
153
154 if (!machine_is_palmtx())
155 return -ENODEV;
156
157 mutex_init(&work_lock);
158
159 INIT_WORK(&bat_work, palmtx_bat_work);
160
161 ret = power_supply_register(&dev->dev, &bat_ps);
162 if (!ret)
163 schedule_work(&bat_work);
164
165 return ret;
166}
167
168static int __devexit palmtx_bat_remove(struct platform_device *dev)
169{
170 power_supply_unregister(&bat_ps);
171 return 0;
172}
173
174static struct platform_driver palmtx_bat_driver = {
175 .driver.name = "wm97xx-battery",
176 .driver.owner = THIS_MODULE,
177 .probe = palmtx_bat_probe,
178 .remove = __devexit_p(palmtx_bat_remove),
179 .suspend = palmtx_bat_suspend,
180 .resume = palmtx_bat_resume,
181};
182
183static int __init palmtx_bat_init(void)
184{
185 return platform_driver_register(&palmtx_bat_driver);
186}
187
188static void __exit palmtx_bat_exit(void)
189{
190 platform_driver_unregister(&palmtx_bat_driver);
191}
192
193module_init(palmtx_bat_init);
194module_exit(palmtx_bat_exit);
195
196MODULE_LICENSE("GPL");
197MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
198MODULE_DESCRIPTION("Palm T|X battery driver");
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index fc85bf2e4a97..90ab73825401 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -273,6 +273,25 @@ comment "SPI RTC drivers"
273 273
274if SPI_MASTER 274if SPI_MASTER
275 275
276config RTC_DRV_M41T94
277 tristate "ST M41T94"
278 help
279 If you say yes here you will get support for the
280 ST M41T94 SPI RTC chip.
281
282 This driver can also be built as a module. If so, the module
283 will be called rtc-m41t94.
284
285config RTC_DRV_DS1305
286 tristate "Dallas/Maxim DS1305/DS1306"
287 help
288 Select this driver to get support for the Dallas/Maxim DS1305
289 and DS1306 real time clock chips. These support a trickle
290 charger, alarms, and NVRAM in addition to the clock.
291
292 This driver can also be built as a module. If so, the module
293 will be called rtc-ds1305.
294
276config RTC_DRV_MAX6902 295config RTC_DRV_MAX6902
277 tristate "Maxim MAX6902" 296 tristate "Maxim MAX6902"
278 help 297 help
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index b5d9d67df887..18622ef84cab 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_RTC_DRV_BFIN) += rtc-bfin.o
24obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o 24obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
25obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o 25obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o
26obj-$(CONFIG_RTC_DRV_DS1302) += rtc-ds1302.o 26obj-$(CONFIG_RTC_DRV_DS1302) += rtc-ds1302.o
27obj-$(CONFIG_RTC_DRV_DS1305) += rtc-ds1305.o
27obj-$(CONFIG_RTC_DRV_DS1307) += rtc-ds1307.o 28obj-$(CONFIG_RTC_DRV_DS1307) += rtc-ds1307.o
28obj-$(CONFIG_RTC_DRV_DS1374) += rtc-ds1374.o 29obj-$(CONFIG_RTC_DRV_DS1374) += rtc-ds1374.o
29obj-$(CONFIG_RTC_DRV_DS1511) += rtc-ds1511.o 30obj-$(CONFIG_RTC_DRV_DS1511) += rtc-ds1511.o
@@ -34,6 +35,7 @@ obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o
34obj-$(CONFIG_RTC_DRV_FM3130) += rtc-fm3130.o 35obj-$(CONFIG_RTC_DRV_FM3130) += rtc-fm3130.o
35obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o 36obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o
36obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o 37obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o
38obj-$(CONFIG_RTC_DRV_M41T94) += rtc-m41t94.o
37obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o 39obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o
38obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o 40obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
39obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o 41obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 9c3db934cc24..cd32d05db773 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -171,8 +171,10 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
171 | BIN2BCD(tm.tm_mday) << 24 171 | BIN2BCD(tm.tm_mday) << 24
172 | AT91_RTC_DATEEN | AT91_RTC_MTHEN); 172 | AT91_RTC_DATEEN | AT91_RTC_MTHEN);
173 173
174 if (alrm->enabled) 174 if (alrm->enabled) {
175 at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
175 at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM); 176 at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
177 }
176 178
177 pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, 179 pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
178 at91_alarm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour, 180 at91_alarm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour,
@@ -191,28 +193,22 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
191 193
192 pr_debug("%s(): cmd=%08x, arg=%08lx.\n", __func__, cmd, arg); 194 pr_debug("%s(): cmd=%08x, arg=%08lx.\n", __func__, cmd, arg);
193 195
196 /* important: scrub old status before enabling IRQs */
194 switch (cmd) { 197 switch (cmd) {
195 case RTC_AIE_OFF: /* alarm off */ 198 case RTC_AIE_OFF: /* alarm off */
196 at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM); 199 at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM);
197 break; 200 break;
198 case RTC_AIE_ON: /* alarm on */ 201 case RTC_AIE_ON: /* alarm on */
202 at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
199 at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM); 203 at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
200 break; 204 break;
201 case RTC_UIE_OFF: /* update off */ 205 case RTC_UIE_OFF: /* update off */
202 case RTC_PIE_OFF: /* periodic off */
203 at91_sys_write(AT91_RTC_IDR, AT91_RTC_SECEV); 206 at91_sys_write(AT91_RTC_IDR, AT91_RTC_SECEV);
204 break; 207 break;
205 case RTC_UIE_ON: /* update on */ 208 case RTC_UIE_ON: /* update on */
206 case RTC_PIE_ON: /* periodic on */ 209 at91_sys_write(AT91_RTC_SCCR, AT91_RTC_SECEV);
207 at91_sys_write(AT91_RTC_IER, AT91_RTC_SECEV); 210 at91_sys_write(AT91_RTC_IER, AT91_RTC_SECEV);
208 break; 211 break;
209 case RTC_IRQP_READ: /* read periodic alarm frequency */
210 ret = put_user(AT91_RTC_FREQ, (unsigned long *) arg);
211 break;
212 case RTC_IRQP_SET: /* set periodic alarm frequency */
213 if (arg != AT91_RTC_FREQ)
214 ret = -EINVAL;
215 break;
216 default: 212 default:
217 ret = -ENOIOCTLCMD; 213 ret = -ENOIOCTLCMD;
218 break; 214 break;
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index d7bb9bac71df..6ea349aba3ba 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -36,25 +36,9 @@
36#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/mod_devicetable.h> 37#include <linux/mod_devicetable.h>
38 38
39#ifdef CONFIG_HPET_EMULATE_RTC
40#include <asm/hpet.h>
41#endif
42
43/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */ 39/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
44#include <asm-generic/rtc.h> 40#include <asm-generic/rtc.h>
45 41
46#ifndef CONFIG_HPET_EMULATE_RTC
47#define is_hpet_enabled() 0
48#define hpet_set_alarm_time(hrs, min, sec) do { } while (0)
49#define hpet_set_periodic_freq(arg) 0
50#define hpet_mask_rtc_irq_bit(arg) do { } while (0)
51#define hpet_set_rtc_irq_bit(arg) do { } while (0)
52#define hpet_rtc_timer_init() do { } while (0)
53#define hpet_register_irq_handler(h) 0
54#define hpet_unregister_irq_handler(h) do { } while (0)
55extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id);
56#endif
57
58struct cmos_rtc { 42struct cmos_rtc {
59 struct rtc_device *rtc; 43 struct rtc_device *rtc;
60 struct device *dev; 44 struct device *dev;
@@ -93,6 +77,72 @@ static inline int is_intr(u8 rtc_intr)
93 77
94/*----------------------------------------------------------------*/ 78/*----------------------------------------------------------------*/
95 79
80/* Much modern x86 hardware has HPETs (10+ MHz timers) which, because
81 * many BIOS programmers don't set up "sane mode" IRQ routing, are mostly
82 * used in a broken "legacy replacement" mode. The breakage includes
83 * HPET #1 hijacking the IRQ for this RTC, and being unavailable for
84 * other (better) use.
85 *
86 * When that broken mode is in use, platform glue provides a partial
87 * emulation of hardware RTC IRQ facilities using HPET #1. We don't
88 * want to use HPET for anything except those IRQs though...
89 */
90#ifdef CONFIG_HPET_EMULATE_RTC
91#include <asm/hpet.h>
92#else
93
94static inline int is_hpet_enabled(void)
95{
96 return 0;
97}
98
99static inline int hpet_mask_rtc_irq_bit(unsigned long mask)
100{
101 return 0;
102}
103
104static inline int hpet_set_rtc_irq_bit(unsigned long mask)
105{
106 return 0;
107}
108
109static inline int
110hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec)
111{
112 return 0;
113}
114
115static inline int hpet_set_periodic_freq(unsigned long freq)
116{
117 return 0;
118}
119
120static inline int hpet_rtc_dropped_irq(void)
121{
122 return 0;
123}
124
125static inline int hpet_rtc_timer_init(void)
126{
127 return 0;
128}
129
130extern irq_handler_t hpet_rtc_interrupt;
131
132static inline int hpet_register_irq_handler(irq_handler_t handler)
133{
134 return 0;
135}
136
137static inline int hpet_unregister_irq_handler(irq_handler_t handler)
138{
139 return 0;
140}
141
142#endif
143
144/*----------------------------------------------------------------*/
145
96static int cmos_read_time(struct device *dev, struct rtc_time *t) 146static int cmos_read_time(struct device *dev, struct rtc_time *t)
97{ 147{
98 /* REVISIT: if the clock has a "century" register, use 148 /* REVISIT: if the clock has a "century" register, use
@@ -185,11 +235,56 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
185 return 0; 235 return 0;
186} 236}
187 237
238static void cmos_checkintr(struct cmos_rtc *cmos, unsigned char rtc_control)
239{
240 unsigned char rtc_intr;
241
242 /* NOTE after changing RTC_xIE bits we always read INTR_FLAGS;
243 * allegedly some older rtcs need that to handle irqs properly
244 */
245 rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
246
247 if (is_hpet_enabled())
248 return;
249
250 rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
251 if (is_intr(rtc_intr))
252 rtc_update_irq(cmos->rtc, 1, rtc_intr);
253}
254
255static void cmos_irq_enable(struct cmos_rtc *cmos, unsigned char mask)
256{
257 unsigned char rtc_control;
258
259 /* flush any pending IRQ status, notably for update irqs,
260 * before we enable new IRQs
261 */
262 rtc_control = CMOS_READ(RTC_CONTROL);
263 cmos_checkintr(cmos, rtc_control);
264
265 rtc_control |= mask;
266 CMOS_WRITE(rtc_control, RTC_CONTROL);
267 hpet_set_rtc_irq_bit(mask);
268
269 cmos_checkintr(cmos, rtc_control);
270}
271
272static void cmos_irq_disable(struct cmos_rtc *cmos, unsigned char mask)
273{
274 unsigned char rtc_control;
275
276 rtc_control = CMOS_READ(RTC_CONTROL);
277 rtc_control &= ~mask;
278 CMOS_WRITE(rtc_control, RTC_CONTROL);
279 hpet_mask_rtc_irq_bit(mask);
280
281 cmos_checkintr(cmos, rtc_control);
282}
283
188static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t) 284static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
189{ 285{
190 struct cmos_rtc *cmos = dev_get_drvdata(dev); 286 struct cmos_rtc *cmos = dev_get_drvdata(dev);
191 unsigned char mon, mday, hrs, min, sec; 287 unsigned char mon, mday, hrs, min, sec;
192 unsigned char rtc_control, rtc_intr;
193 288
194 if (!is_valid_irq(cmos->irq)) 289 if (!is_valid_irq(cmos->irq))
195 return -EIO; 290 return -EIO;
@@ -213,17 +308,10 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
213 sec = t->time.tm_sec; 308 sec = t->time.tm_sec;
214 sec = (sec < 60) ? BIN2BCD(sec) : 0xff; 309 sec = (sec < 60) ? BIN2BCD(sec) : 0xff;
215 310
216 hpet_set_alarm_time(t->time.tm_hour, t->time.tm_min, t->time.tm_sec);
217 spin_lock_irq(&rtc_lock); 311 spin_lock_irq(&rtc_lock);
218 312
219 /* next rtc irq must not be from previous alarm setting */ 313 /* next rtc irq must not be from previous alarm setting */
220 rtc_control = CMOS_READ(RTC_CONTROL); 314 cmos_irq_disable(cmos, RTC_AIE);
221 rtc_control &= ~RTC_AIE;
222 CMOS_WRITE(rtc_control, RTC_CONTROL);
223 rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
224 rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
225 if (is_intr(rtc_intr))
226 rtc_update_irq(cmos->rtc, 1, rtc_intr);
227 315
228 /* update alarm */ 316 /* update alarm */
229 CMOS_WRITE(hrs, RTC_HOURS_ALARM); 317 CMOS_WRITE(hrs, RTC_HOURS_ALARM);
@@ -237,14 +325,13 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
237 CMOS_WRITE(mon, cmos->mon_alrm); 325 CMOS_WRITE(mon, cmos->mon_alrm);
238 } 326 }
239 327
240 if (t->enabled) { 328 /* FIXME the HPET alarm glue currently ignores day_alrm
241 rtc_control |= RTC_AIE; 329 * and mon_alrm ...
242 CMOS_WRITE(rtc_control, RTC_CONTROL); 330 */
243 rtc_intr = CMOS_READ(RTC_INTR_FLAGS); 331 hpet_set_alarm_time(t->time.tm_hour, t->time.tm_min, t->time.tm_sec);
244 rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF; 332
245 if (is_intr(rtc_intr)) 333 if (t->enabled)
246 rtc_update_irq(cmos->rtc, 1, rtc_intr); 334 cmos_irq_enable(cmos, RTC_AIE);
247 }
248 335
249 spin_unlock_irq(&rtc_lock); 336 spin_unlock_irq(&rtc_lock);
250 337
@@ -267,8 +354,8 @@ static int cmos_irq_set_freq(struct device *dev, int freq)
267 f = 16 - f; 354 f = 16 - f;
268 355
269 spin_lock_irqsave(&rtc_lock, flags); 356 spin_lock_irqsave(&rtc_lock, flags);
270 if (!hpet_set_periodic_freq(freq)) 357 hpet_set_periodic_freq(freq);
271 CMOS_WRITE(RTC_REF_CLCK_32KHZ | f, RTC_FREQ_SELECT); 358 CMOS_WRITE(RTC_REF_CLCK_32KHZ | f, RTC_FREQ_SELECT);
272 spin_unlock_irqrestore(&rtc_lock, flags); 359 spin_unlock_irqrestore(&rtc_lock, flags);
273 360
274 return 0; 361 return 0;
@@ -277,26 +364,17 @@ static int cmos_irq_set_freq(struct device *dev, int freq)
277static int cmos_irq_set_state(struct device *dev, int enabled) 364static int cmos_irq_set_state(struct device *dev, int enabled)
278{ 365{
279 struct cmos_rtc *cmos = dev_get_drvdata(dev); 366 struct cmos_rtc *cmos = dev_get_drvdata(dev);
280 unsigned char rtc_control, rtc_intr;
281 unsigned long flags; 367 unsigned long flags;
282 368
283 if (!is_valid_irq(cmos->irq)) 369 if (!is_valid_irq(cmos->irq))
284 return -ENXIO; 370 return -ENXIO;
285 371
286 spin_lock_irqsave(&rtc_lock, flags); 372 spin_lock_irqsave(&rtc_lock, flags);
287 rtc_control = CMOS_READ(RTC_CONTROL);
288 373
289 if (enabled) 374 if (enabled)
290 rtc_control |= RTC_PIE; 375 cmos_irq_enable(cmos, RTC_PIE);
291 else 376 else
292 rtc_control &= ~RTC_PIE; 377 cmos_irq_disable(cmos, RTC_PIE);
293
294 CMOS_WRITE(rtc_control, RTC_CONTROL);
295
296 rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
297 rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
298 if (is_intr(rtc_intr))
299 rtc_update_irq(cmos->rtc, 1, rtc_intr);
300 378
301 spin_unlock_irqrestore(&rtc_lock, flags); 379 spin_unlock_irqrestore(&rtc_lock, flags);
302 return 0; 380 return 0;
@@ -308,7 +386,6 @@ static int
308cmos_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) 386cmos_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
309{ 387{
310 struct cmos_rtc *cmos = dev_get_drvdata(dev); 388 struct cmos_rtc *cmos = dev_get_drvdata(dev);
311 unsigned char rtc_control, rtc_intr;
312 unsigned long flags; 389 unsigned long flags;
313 390
314 switch (cmd) { 391 switch (cmd) {
@@ -316,51 +393,29 @@ cmos_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
316 case RTC_AIE_ON: 393 case RTC_AIE_ON:
317 case RTC_UIE_OFF: 394 case RTC_UIE_OFF:
318 case RTC_UIE_ON: 395 case RTC_UIE_ON:
319 case RTC_PIE_OFF:
320 case RTC_PIE_ON:
321 if (!is_valid_irq(cmos->irq)) 396 if (!is_valid_irq(cmos->irq))
322 return -EINVAL; 397 return -EINVAL;
323 break; 398 break;
399 /* PIE ON/OFF is handled by cmos_irq_set_state() */
324 default: 400 default:
325 return -ENOIOCTLCMD; 401 return -ENOIOCTLCMD;
326 } 402 }
327 403
328 spin_lock_irqsave(&rtc_lock, flags); 404 spin_lock_irqsave(&rtc_lock, flags);
329 rtc_control = CMOS_READ(RTC_CONTROL);
330 switch (cmd) { 405 switch (cmd) {
331 case RTC_AIE_OFF: /* alarm off */ 406 case RTC_AIE_OFF: /* alarm off */
332 rtc_control &= ~RTC_AIE; 407 cmos_irq_disable(cmos, RTC_AIE);
333 hpet_mask_rtc_irq_bit(RTC_AIE);
334 break; 408 break;
335 case RTC_AIE_ON: /* alarm on */ 409 case RTC_AIE_ON: /* alarm on */
336 rtc_control |= RTC_AIE; 410 cmos_irq_enable(cmos, RTC_AIE);
337 hpet_set_rtc_irq_bit(RTC_AIE);
338 break; 411 break;
339 case RTC_UIE_OFF: /* update off */ 412 case RTC_UIE_OFF: /* update off */
340 rtc_control &= ~RTC_UIE; 413 cmos_irq_disable(cmos, RTC_UIE);
341 hpet_mask_rtc_irq_bit(RTC_UIE);
342 break; 414 break;
343 case RTC_UIE_ON: /* update on */ 415 case RTC_UIE_ON: /* update on */
344 rtc_control |= RTC_UIE; 416 cmos_irq_enable(cmos, RTC_UIE);
345 hpet_set_rtc_irq_bit(RTC_UIE);
346 break;
347 case RTC_PIE_OFF: /* periodic off */
348 rtc_control &= ~RTC_PIE;
349 hpet_mask_rtc_irq_bit(RTC_PIE);
350 break;
351 case RTC_PIE_ON: /* periodic on */
352 rtc_control |= RTC_PIE;
353 hpet_set_rtc_irq_bit(RTC_PIE);
354 break; 417 break;
355 } 418 }
356 if (!is_hpet_enabled())
357 CMOS_WRITE(rtc_control, RTC_CONTROL);
358
359 rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
360 rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
361 if (is_intr(rtc_intr))
362 rtc_update_irq(cmos->rtc, 1, rtc_intr);
363
364 spin_unlock_irqrestore(&rtc_lock, flags); 419 spin_unlock_irqrestore(&rtc_lock, flags);
365 return 0; 420 return 0;
366} 421}
@@ -502,27 +557,29 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
502 u8 rtc_control; 557 u8 rtc_control;
503 558
504 spin_lock(&rtc_lock); 559 spin_lock(&rtc_lock);
505 /* 560
506 * In this case it is HPET RTC interrupt handler 561 /* When the HPET interrupt handler calls us, the interrupt
507 * calling us, with the interrupt information 562 * status is passed as arg1 instead of the irq number. But
508 * passed as arg1, instead of irq. 563 * always clear irq status, even when HPET is in the way.
564 *
565 * Note that HPET and RTC are almost certainly out of phase,
566 * giving different IRQ status ...
509 */ 567 */
568 irqstat = CMOS_READ(RTC_INTR_FLAGS);
569 rtc_control = CMOS_READ(RTC_CONTROL);
510 if (is_hpet_enabled()) 570 if (is_hpet_enabled())
511 irqstat = (unsigned long)irq & 0xF0; 571 irqstat = (unsigned long)irq & 0xF0;
512 else { 572 irqstat &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
513 irqstat = CMOS_READ(RTC_INTR_FLAGS);
514 rtc_control = CMOS_READ(RTC_CONTROL);
515 irqstat &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
516 }
517 573
518 /* All Linux RTC alarms should be treated as if they were oneshot. 574 /* All Linux RTC alarms should be treated as if they were oneshot.
519 * Similar code may be needed in system wakeup paths, in case the 575 * Similar code may be needed in system wakeup paths, in case the
520 * alarm woke the system. 576 * alarm woke the system.
521 */ 577 */
522 if (irqstat & RTC_AIE) { 578 if (irqstat & RTC_AIE) {
523 rtc_control = CMOS_READ(RTC_CONTROL);
524 rtc_control &= ~RTC_AIE; 579 rtc_control &= ~RTC_AIE;
525 CMOS_WRITE(rtc_control, RTC_CONTROL); 580 CMOS_WRITE(rtc_control, RTC_CONTROL);
581 hpet_mask_rtc_irq_bit(RTC_AIE);
582
526 CMOS_READ(RTC_INTR_FLAGS); 583 CMOS_READ(RTC_INTR_FLAGS);
527 } 584 }
528 spin_unlock(&rtc_lock); 585 spin_unlock(&rtc_lock);
@@ -629,18 +686,13 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
629 * do something about other clock frequencies. 686 * do something about other clock frequencies.
630 */ 687 */
631 cmos_rtc.rtc->irq_freq = 1024; 688 cmos_rtc.rtc->irq_freq = 1024;
632 if (!hpet_set_periodic_freq(cmos_rtc.rtc->irq_freq)) 689 hpet_set_periodic_freq(cmos_rtc.rtc->irq_freq);
633 CMOS_WRITE(RTC_REF_CLCK_32KHZ | 0x06, RTC_FREQ_SELECT); 690 CMOS_WRITE(RTC_REF_CLCK_32KHZ | 0x06, RTC_FREQ_SELECT);
691
692 /* disable irqs */
693 cmos_irq_disable(&cmos_rtc, RTC_PIE | RTC_AIE | RTC_UIE);
634 694
635 /* disable irqs.
636 *
637 * NOTE after changing RTC_xIE bits we always read INTR_FLAGS;
638 * allegedly some older rtcs need that to handle irqs properly
639 */
640 rtc_control = CMOS_READ(RTC_CONTROL); 695 rtc_control = CMOS_READ(RTC_CONTROL);
641 rtc_control &= ~(RTC_PIE | RTC_AIE | RTC_UIE);
642 CMOS_WRITE(rtc_control, RTC_CONTROL);
643 CMOS_READ(RTC_INTR_FLAGS);
644 696
645 spin_unlock_irq(&rtc_lock); 697 spin_unlock_irq(&rtc_lock);
646 698
@@ -687,7 +739,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
687 goto cleanup2; 739 goto cleanup2;
688 } 740 }
689 741
690 pr_info("%s: alarms up to one %s%s\n", 742 pr_info("%s: alarms up to one %s%s%s\n",
691 cmos_rtc.rtc->dev.bus_id, 743 cmos_rtc.rtc->dev.bus_id,
692 is_valid_irq(rtc_irq) 744 is_valid_irq(rtc_irq)
693 ? (cmos_rtc.mon_alrm 745 ? (cmos_rtc.mon_alrm
@@ -695,8 +747,8 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
695 : (cmos_rtc.day_alrm 747 : (cmos_rtc.day_alrm
696 ? "month" : "day")) 748 ? "month" : "day"))
697 : "no", 749 : "no",
698 cmos_rtc.century ? ", y3k" : "" 750 cmos_rtc.century ? ", y3k" : "",
699 ); 751 is_hpet_enabled() ? ", hpet irqs" : "");
700 752
701 return 0; 753 return 0;
702 754
@@ -713,13 +765,8 @@ cleanup0:
713 765
714static void cmos_do_shutdown(void) 766static void cmos_do_shutdown(void)
715{ 767{
716 unsigned char rtc_control;
717
718 spin_lock_irq(&rtc_lock); 768 spin_lock_irq(&rtc_lock);
719 rtc_control = CMOS_READ(RTC_CONTROL); 769 cmos_irq_disable(&cmos_rtc, RTC_IRQMASK);
720 rtc_control &= ~(RTC_PIE|RTC_AIE|RTC_UIE);
721 CMOS_WRITE(rtc_control, RTC_CONTROL);
722 CMOS_READ(RTC_INTR_FLAGS);
723 spin_unlock_irq(&rtc_lock); 770 spin_unlock_irq(&rtc_lock);
724} 771}
725 772
@@ -760,17 +807,17 @@ static int cmos_suspend(struct device *dev, pm_message_t mesg)
760 spin_lock_irq(&rtc_lock); 807 spin_lock_irq(&rtc_lock);
761 cmos->suspend_ctrl = tmp = CMOS_READ(RTC_CONTROL); 808 cmos->suspend_ctrl = tmp = CMOS_READ(RTC_CONTROL);
762 if (tmp & (RTC_PIE|RTC_AIE|RTC_UIE)) { 809 if (tmp & (RTC_PIE|RTC_AIE|RTC_UIE)) {
763 unsigned char irqstat; 810 unsigned char mask;
764 811
765 if (do_wake) 812 if (do_wake)
766 tmp &= ~(RTC_PIE|RTC_UIE); 813 mask = RTC_IRQMASK & ~RTC_AIE;
767 else 814 else
768 tmp &= ~(RTC_PIE|RTC_AIE|RTC_UIE); 815 mask = RTC_IRQMASK;
816 tmp &= ~mask;
769 CMOS_WRITE(tmp, RTC_CONTROL); 817 CMOS_WRITE(tmp, RTC_CONTROL);
770 irqstat = CMOS_READ(RTC_INTR_FLAGS); 818 hpet_mask_rtc_irq_bit(mask);
771 irqstat &= (tmp & RTC_IRQMASK) | RTC_IRQF; 819
772 if (is_intr(irqstat)) 820 cmos_checkintr(cmos, tmp);
773 rtc_update_irq(cmos->rtc, 1, irqstat);
774 } 821 }
775 spin_unlock_irq(&rtc_lock); 822 spin_unlock_irq(&rtc_lock);
776 823
@@ -796,7 +843,8 @@ static int cmos_resume(struct device *dev)
796 unsigned char tmp = cmos->suspend_ctrl; 843 unsigned char tmp = cmos->suspend_ctrl;
797 844
798 /* re-enable any irqs previously active */ 845 /* re-enable any irqs previously active */
799 if (tmp & (RTC_PIE|RTC_AIE|RTC_UIE)) { 846 if (tmp & RTC_IRQMASK) {
847 unsigned char mask;
800 848
801 if (cmos->enabled_wake) { 849 if (cmos->enabled_wake) {
802 if (cmos->wake_off) 850 if (cmos->wake_off)
@@ -807,18 +855,28 @@ static int cmos_resume(struct device *dev)
807 } 855 }
808 856
809 spin_lock_irq(&rtc_lock); 857 spin_lock_irq(&rtc_lock);
810 CMOS_WRITE(tmp, RTC_CONTROL); 858 do {
811 tmp = CMOS_READ(RTC_INTR_FLAGS); 859 CMOS_WRITE(tmp, RTC_CONTROL);
812 tmp &= (cmos->suspend_ctrl & RTC_IRQMASK) | RTC_IRQF; 860 hpet_set_rtc_irq_bit(tmp & RTC_IRQMASK);
813 if (is_intr(tmp)) 861
814 rtc_update_irq(cmos->rtc, 1, tmp); 862 mask = CMOS_READ(RTC_INTR_FLAGS);
863 mask &= (tmp & RTC_IRQMASK) | RTC_IRQF;
864 if (!is_hpet_enabled() || !is_intr(mask))
865 break;
866
867 /* force one-shot behavior if HPET blocked
868 * the wake alarm's irq
869 */
870 rtc_update_irq(cmos->rtc, 1, mask);
871 tmp &= ~RTC_AIE;
872 hpet_mask_rtc_irq_bit(RTC_AIE);
873 } while (mask & RTC_AIE);
815 spin_unlock_irq(&rtc_lock); 874 spin_unlock_irq(&rtc_lock);
816 } 875 }
817 876
818 pr_debug("%s: resume, ctrl %02x\n", 877 pr_debug("%s: resume, ctrl %02x\n",
819 cmos_rtc.rtc->dev.bus_id, 878 cmos_rtc.rtc->dev.bus_id,
820 cmos->suspend_ctrl); 879 tmp);
821
822 880
823 return 0; 881 return 0;
824} 882}
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 0114a78b7cbb..0a870b7e5c32 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -209,7 +209,7 @@ static unsigned int rtc_dev_poll(struct file *file, poll_table *wait)
209 return (data != 0) ? (POLLIN | POLLRDNORM) : 0; 209 return (data != 0) ? (POLLIN | POLLRDNORM) : 0;
210} 210}
211 211
212static int rtc_dev_ioctl(struct inode *inode, struct file *file, 212static long rtc_dev_ioctl(struct file *file,
213 unsigned int cmd, unsigned long arg) 213 unsigned int cmd, unsigned long arg)
214{ 214{
215 int err = 0; 215 int err = 0;
@@ -219,6 +219,10 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file,
219 struct rtc_wkalrm alarm; 219 struct rtc_wkalrm alarm;
220 void __user *uarg = (void __user *) arg; 220 void __user *uarg = (void __user *) arg;
221 221
222 err = mutex_lock_interruptible(&rtc->ops_lock);
223 if (err)
224 return -EBUSY;
225
222 /* check that the calling task has appropriate permissions 226 /* check that the calling task has appropriate permissions
223 * for certain ioctls. doing this check here is useful 227 * for certain ioctls. doing this check here is useful
224 * to avoid duplicate code in each driver. 228 * to avoid duplicate code in each driver.
@@ -227,26 +231,31 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file,
227 case RTC_EPOCH_SET: 231 case RTC_EPOCH_SET:
228 case RTC_SET_TIME: 232 case RTC_SET_TIME:
229 if (!capable(CAP_SYS_TIME)) 233 if (!capable(CAP_SYS_TIME))
230 return -EACCES; 234 err = -EACCES;
231 break; 235 break;
232 236
233 case RTC_IRQP_SET: 237 case RTC_IRQP_SET:
234 if (arg > rtc->max_user_freq && !capable(CAP_SYS_RESOURCE)) 238 if (arg > rtc->max_user_freq && !capable(CAP_SYS_RESOURCE))
235 return -EACCES; 239 err = -EACCES;
236 break; 240 break;
237 241
238 case RTC_PIE_ON: 242 case RTC_PIE_ON:
239 if (rtc->irq_freq > rtc->max_user_freq && 243 if (rtc->irq_freq > rtc->max_user_freq &&
240 !capable(CAP_SYS_RESOURCE)) 244 !capable(CAP_SYS_RESOURCE))
241 return -EACCES; 245 err = -EACCES;
242 break; 246 break;
243 } 247 }
244 248
249 if (err)
250 goto done;
251
245 /* try the driver's ioctl interface */ 252 /* try the driver's ioctl interface */
246 if (ops->ioctl) { 253 if (ops->ioctl) {
247 err = ops->ioctl(rtc->dev.parent, cmd, arg); 254 err = ops->ioctl(rtc->dev.parent, cmd, arg);
248 if (err != -ENOIOCTLCMD) 255 if (err != -ENOIOCTLCMD) {
256 mutex_unlock(&rtc->ops_lock);
249 return err; 257 return err;
258 }
250 } 259 }
251 260
252 /* if the driver does not provide the ioctl interface 261 /* if the driver does not provide the ioctl interface
@@ -265,15 +274,19 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file,
265 274
266 switch (cmd) { 275 switch (cmd) {
267 case RTC_ALM_READ: 276 case RTC_ALM_READ:
277 mutex_unlock(&rtc->ops_lock);
278
268 err = rtc_read_alarm(rtc, &alarm); 279 err = rtc_read_alarm(rtc, &alarm);
269 if (err < 0) 280 if (err < 0)
270 return err; 281 return err;
271 282
272 if (copy_to_user(uarg, &alarm.time, sizeof(tm))) 283 if (copy_to_user(uarg, &alarm.time, sizeof(tm)))
273 return -EFAULT; 284 err = -EFAULT;
274 break; 285 return err;
275 286
276 case RTC_ALM_SET: 287 case RTC_ALM_SET:
288 mutex_unlock(&rtc->ops_lock);
289
277 if (copy_from_user(&alarm.time, uarg, sizeof(tm))) 290 if (copy_from_user(&alarm.time, uarg, sizeof(tm)))
278 return -EFAULT; 291 return -EFAULT;
279 292
@@ -321,24 +334,26 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file,
321 } 334 }
322 } 335 }
323 336
324 err = rtc_set_alarm(rtc, &alarm); 337 return rtc_set_alarm(rtc, &alarm);
325 break;
326 338
327 case RTC_RD_TIME: 339 case RTC_RD_TIME:
340 mutex_unlock(&rtc->ops_lock);
341
328 err = rtc_read_time(rtc, &tm); 342 err = rtc_read_time(rtc, &tm);
329 if (err < 0) 343 if (err < 0)
330 return err; 344 return err;
331 345
332 if (copy_to_user(uarg, &tm, sizeof(tm))) 346 if (copy_to_user(uarg, &tm, sizeof(tm)))
333 return -EFAULT; 347 err = -EFAULT;
334 break; 348 return err;
335 349
336 case RTC_SET_TIME: 350 case RTC_SET_TIME:
351 mutex_unlock(&rtc->ops_lock);
352
337 if (copy_from_user(&tm, uarg, sizeof(tm))) 353 if (copy_from_user(&tm, uarg, sizeof(tm)))
338 return -EFAULT; 354 return -EFAULT;
339 355
340 err = rtc_set_time(rtc, &tm); 356 return rtc_set_time(rtc, &tm);
341 break;
342 357
343 case RTC_PIE_ON: 358 case RTC_PIE_ON:
344 err = rtc_irq_set_state(rtc, NULL, 1); 359 err = rtc_irq_set_state(rtc, NULL, 1);
@@ -376,34 +391,37 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file,
376 break; 391 break;
377#endif 392#endif
378 case RTC_WKALM_SET: 393 case RTC_WKALM_SET:
394 mutex_unlock(&rtc->ops_lock);
379 if (copy_from_user(&alarm, uarg, sizeof(alarm))) 395 if (copy_from_user(&alarm, uarg, sizeof(alarm)))
380 return -EFAULT; 396 return -EFAULT;
381 397
382 err = rtc_set_alarm(rtc, &alarm); 398 return rtc_set_alarm(rtc, &alarm);
383 break;
384 399
385 case RTC_WKALM_RD: 400 case RTC_WKALM_RD:
401 mutex_unlock(&rtc->ops_lock);
386 err = rtc_read_alarm(rtc, &alarm); 402 err = rtc_read_alarm(rtc, &alarm);
387 if (err < 0) 403 if (err < 0)
388 return err; 404 return err;
389 405
390 if (copy_to_user(uarg, &alarm, sizeof(alarm))) 406 if (copy_to_user(uarg, &alarm, sizeof(alarm)))
391 return -EFAULT; 407 err = -EFAULT;
392 break; 408 return err;
393 409
394#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL 410#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
395 case RTC_UIE_OFF: 411 case RTC_UIE_OFF:
396 clear_uie(rtc); 412 clear_uie(rtc);
397 return 0; 413 break;
398 414
399 case RTC_UIE_ON: 415 case RTC_UIE_ON:
400 return set_uie(rtc); 416 err = set_uie(rtc);
401#endif 417#endif
402 default: 418 default:
403 err = -ENOTTY; 419 err = -ENOTTY;
404 break; 420 break;
405 } 421 }
406 422
423done:
424 mutex_unlock(&rtc->ops_lock);
407 return err; 425 return err;
408} 426}
409 427
@@ -432,7 +450,7 @@ static const struct file_operations rtc_dev_fops = {
432 .llseek = no_llseek, 450 .llseek = no_llseek,
433 .read = rtc_dev_read, 451 .read = rtc_dev_read,
434 .poll = rtc_dev_poll, 452 .poll = rtc_dev_poll,
435 .ioctl = rtc_dev_ioctl, 453 .unlocked_ioctl = rtc_dev_ioctl,
436 .open = rtc_dev_open, 454 .open = rtc_dev_open,
437 .release = rtc_dev_release, 455 .release = rtc_dev_release,
438 .fasync = rtc_dev_fasync, 456 .fasync = rtc_dev_fasync,
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
new file mode 100644
index 000000000000..b91d02a3ace9
--- /dev/null
+++ b/drivers/rtc/rtc-ds1305.c
@@ -0,0 +1,847 @@
1/*
2 * rtc-ds1305.c -- driver for DS1305 and DS1306 SPI RTC chips
3 *
4 * Copyright (C) 2008 David Brownell
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/bcd.h>
14#include <linux/rtc.h>
15#include <linux/workqueue.h>
16
17#include <linux/spi/spi.h>
18#include <linux/spi/ds1305.h>
19
20
21/*
22 * Registers ... mask DS1305_WRITE into register address to write,
23 * otherwise you're reading it. All non-bitmask values are BCD.
24 */
25#define DS1305_WRITE 0x80
26
27
28/* RTC date/time ... the main special cases are that we:
29 * - Need fancy "hours" encoding in 12hour mode
30 * - Don't rely on the "day-of-week" field (or tm_wday)
31 * - Are a 21st-century clock (2000 <= year < 2100)
32 */
33#define DS1305_RTC_LEN 7 /* bytes for RTC regs */
34
35#define DS1305_SEC 0x00 /* register addresses */
36#define DS1305_MIN 0x01
37#define DS1305_HOUR 0x02
38# define DS1305_HR_12 0x40 /* set == 12 hr mode */
39# define DS1305_HR_PM 0x20 /* set == PM (12hr mode) */
40#define DS1305_WDAY 0x03
41#define DS1305_MDAY 0x04
42#define DS1305_MON 0x05
43#define DS1305_YEAR 0x06
44
45
46/* The two alarms have only sec/min/hour/wday fields (ALM_LEN).
47 * DS1305_ALM_DISABLE disables a match field (some combos are bad).
48 *
49 * NOTE that since we don't use WDAY, we limit ourselves to alarms
50 * only one day into the future (vs potentially up to a week).
51 *
52 * NOTE ALSO that while we could generate once-a-second IRQs (UIE), we
53 * don't currently support them. We'd either need to do it only when
54 * no alarm is pending (not the standard model), or to use the second
55 * alarm (implying that this is a DS1305 not DS1306, *and* that either
56 * it's wired up a second IRQ we know, or that INTCN is set)
57 */
58#define DS1305_ALM_LEN 4 /* bytes for ALM regs */
59#define DS1305_ALM_DISABLE 0x80
60
61#define DS1305_ALM0(r) (0x07 + (r)) /* register addresses */
62#define DS1305_ALM1(r) (0x0b + (r))
63
64
65/* three control registers */
66#define DS1305_CONTROL_LEN 3 /* bytes of control regs */
67
68#define DS1305_CONTROL 0x0f /* register addresses */
69# define DS1305_nEOSC 0x80 /* low enables oscillator */
70# define DS1305_WP 0x40 /* write protect */
71# define DS1305_INTCN 0x04 /* clear == only int0 used */
72# define DS1306_1HZ 0x04 /* enable 1Hz output */
73# define DS1305_AEI1 0x02 /* enable ALM1 IRQ */
74# define DS1305_AEI0 0x01 /* enable ALM0 IRQ */
75#define DS1305_STATUS 0x10
76/* status has just AEIx bits, mirrored as IRQFx */
77#define DS1305_TRICKLE 0x11
78/* trickle bits are defined in <linux/spi/ds1305.h> */
79
80/* a bunch of NVRAM */
81#define DS1305_NVRAM_LEN 96 /* bytes of NVRAM */
82
83#define DS1305_NVRAM 0x20 /* register addresses */
84
85
86struct ds1305 {
87 struct spi_device *spi;
88 struct rtc_device *rtc;
89
90 struct work_struct work;
91
92 unsigned long flags;
93#define FLAG_EXITING 0
94
95 bool hr12;
96 u8 ctrl[DS1305_CONTROL_LEN];
97};
98
99
100/*----------------------------------------------------------------------*/
101
102/*
103 * Utilities ... tolerate 12-hour AM/PM notation in case of non-Linux
104 * software (like a bootloader) which may require it.
105 */
106
107static unsigned bcd2hour(u8 bcd)
108{
109 if (bcd & DS1305_HR_12) {
110 unsigned hour = 0;
111
112 bcd &= ~DS1305_HR_12;
113 if (bcd & DS1305_HR_PM) {
114 hour = 12;
115 bcd &= ~DS1305_HR_PM;
116 }
117 hour += BCD2BIN(bcd);
118 return hour - 1;
119 }
120 return BCD2BIN(bcd);
121}
122
123static u8 hour2bcd(bool hr12, int hour)
124{
125 if (hr12) {
126 hour++;
127 if (hour <= 12)
128 return DS1305_HR_12 | BIN2BCD(hour);
129 hour -= 12;
130 return DS1305_HR_12 | DS1305_HR_PM | BIN2BCD(hour);
131 }
132 return BIN2BCD(hour);
133}
134
135/*----------------------------------------------------------------------*/
136
137/*
138 * Interface to RTC framework
139 */
140
141#ifdef CONFIG_RTC_INTF_DEV
142
143/*
144 * Context: caller holds rtc->ops_lock (to protect ds1305->ctrl)
145 */
146static int ds1305_ioctl(struct device *dev, unsigned cmd, unsigned long arg)
147{
148 struct ds1305 *ds1305 = dev_get_drvdata(dev);
149 u8 buf[2];
150 int status = -ENOIOCTLCMD;
151
152 buf[0] = DS1305_WRITE | DS1305_CONTROL;
153 buf[1] = ds1305->ctrl[0];
154
155 switch (cmd) {
156 case RTC_AIE_OFF:
157 status = 0;
158 if (!(buf[1] & DS1305_AEI0))
159 goto done;
160 buf[1] &= ~DS1305_AEI0;
161 break;
162
163 case RTC_AIE_ON:
164 status = 0;
165 if (ds1305->ctrl[0] & DS1305_AEI0)
166 goto done;
167 buf[1] |= DS1305_AEI0;
168 break;
169 }
170 if (status == 0) {
171 status = spi_write_then_read(ds1305->spi, buf, sizeof buf,
172 NULL, 0);
173 if (status >= 0)
174 ds1305->ctrl[0] = buf[1];
175 }
176
177done:
178 return status;
179}
180
181#else
182#define ds1305_ioctl NULL
183#endif
184
185/*
186 * Get/set of date and time is pretty normal.
187 */
188
189static int ds1305_get_time(struct device *dev, struct rtc_time *time)
190{
191 struct ds1305 *ds1305 = dev_get_drvdata(dev);
192 u8 addr = DS1305_SEC;
193 u8 buf[DS1305_RTC_LEN];
194 int status;
195
196 /* Use write-then-read to get all the date/time registers
197 * since dma from stack is nonportable
198 */
199 status = spi_write_then_read(ds1305->spi, &addr, sizeof addr,
200 buf, sizeof buf);
201 if (status < 0)
202 return status;
203
204 dev_vdbg(dev, "%s: %02x %02x %02x, %02x %02x %02x %02x\n",
205 "read", buf[0], buf[1], buf[2], buf[3],
206 buf[4], buf[5], buf[6]);
207
208 /* Decode the registers */
209 time->tm_sec = BCD2BIN(buf[DS1305_SEC]);
210 time->tm_min = BCD2BIN(buf[DS1305_MIN]);
211 time->tm_hour = bcd2hour(buf[DS1305_HOUR]);
212 time->tm_wday = buf[DS1305_WDAY] - 1;
213 time->tm_mday = BCD2BIN(buf[DS1305_MDAY]);
214 time->tm_mon = BCD2BIN(buf[DS1305_MON]) - 1;
215 time->tm_year = BCD2BIN(buf[DS1305_YEAR]) + 100;
216
217 dev_vdbg(dev, "%s secs=%d, mins=%d, "
218 "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
219 "read", time->tm_sec, time->tm_min,
220 time->tm_hour, time->tm_mday,
221 time->tm_mon, time->tm_year, time->tm_wday);
222
223 /* Time may not be set */
224 return rtc_valid_tm(time);
225}
226
227static int ds1305_set_time(struct device *dev, struct rtc_time *time)
228{
229 struct ds1305 *ds1305 = dev_get_drvdata(dev);
230 u8 buf[1 + DS1305_RTC_LEN];
231 u8 *bp = buf;
232
233 dev_vdbg(dev, "%s secs=%d, mins=%d, "
234 "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
235 "write", time->tm_sec, time->tm_min,
236 time->tm_hour, time->tm_mday,
237 time->tm_mon, time->tm_year, time->tm_wday);
238
239 /* Write registers starting at the first time/date address. */
240 *bp++ = DS1305_WRITE | DS1305_SEC;
241
242 *bp++ = BIN2BCD(time->tm_sec);
243 *bp++ = BIN2BCD(time->tm_min);
244 *bp++ = hour2bcd(ds1305->hr12, time->tm_hour);
245 *bp++ = (time->tm_wday < 7) ? (time->tm_wday + 1) : 1;
246 *bp++ = BIN2BCD(time->tm_mday);
247 *bp++ = BIN2BCD(time->tm_mon + 1);
248 *bp++ = BIN2BCD(time->tm_year - 100);
249
250 dev_dbg(dev, "%s: %02x %02x %02x, %02x %02x %02x %02x\n",
251 "write", buf[1], buf[2], buf[3],
252 buf[4], buf[5], buf[6], buf[7]);
253
254 /* use write-then-read since dma from stack is nonportable */
255 return spi_write_then_read(ds1305->spi, buf, sizeof buf,
256 NULL, 0);
257}
258
259/*
260 * Get/set of alarm is a bit funky:
261 *
262 * - First there's the inherent raciness of getting the (partitioned)
263 * status of an alarm that could trigger while we're reading parts
264 * of that status.
265 *
266 * - Second there's its limited range (we could increase it a bit by
267 * relying on WDAY), which means it will easily roll over.
268 *
269 * - Third there's the choice of two alarms and alarm signals.
270 * Here we use ALM0 and expect that nINT0 (open drain) is used;
271 * that's the only real option for DS1306 runtime alarms, and is
272 * natural on DS1305.
273 *
274 * - Fourth, there's also ALM1, and a second interrupt signal:
275 * + On DS1305 ALM1 uses nINT1 (when INTCN=1) else nINT0;
276 * + On DS1306 ALM1 only uses INT1 (an active high pulse)
277 * and it won't work when VCC1 is active.
278 *
279 * So to be most general, we should probably set both alarms to the
280 * same value, letting ALM1 be the wakeup event source on DS1306
281 * and handling several wiring options on DS1305.
282 *
283 * - Fifth, we support the polled mode (as well as possible; why not?)
284 * even when no interrupt line is wired to an IRQ.
285 */
286
287/*
288 * Context: caller holds rtc->ops_lock (to protect ds1305->ctrl)
289 */
290static int ds1305_get_alarm(struct device *dev, struct rtc_wkalrm *alm)
291{
292 struct ds1305 *ds1305 = dev_get_drvdata(dev);
293 struct spi_device *spi = ds1305->spi;
294 u8 addr;
295 int status;
296 u8 buf[DS1305_ALM_LEN];
297
298 /* Refresh control register cache BEFORE reading ALM0 registers,
299 * since reading alarm registers acks any pending IRQ. That
300 * makes returning "pending" status a bit of a lie, but that bit
301 * of EFI status is at best fragile anyway (given IRQ handlers).
302 */
303 addr = DS1305_CONTROL;
304 status = spi_write_then_read(spi, &addr, sizeof addr,
305 ds1305->ctrl, sizeof ds1305->ctrl);
306 if (status < 0)
307 return status;
308
309 alm->enabled = !!(ds1305->ctrl[0] & DS1305_AEI0);
310 alm->pending = !!(ds1305->ctrl[1] & DS1305_AEI0);
311
312 /* get and check ALM0 registers */
313 addr = DS1305_ALM0(DS1305_SEC);
314 status = spi_write_then_read(spi, &addr, sizeof addr,
315 buf, sizeof buf);
316 if (status < 0)
317 return status;
318
319 dev_vdbg(dev, "%s: %02x %02x %02x %02x\n",
320 "alm0 read", buf[DS1305_SEC], buf[DS1305_MIN],
321 buf[DS1305_HOUR], buf[DS1305_WDAY]);
322
323 if ((DS1305_ALM_DISABLE & buf[DS1305_SEC])
324 || (DS1305_ALM_DISABLE & buf[DS1305_MIN])
325 || (DS1305_ALM_DISABLE & buf[DS1305_HOUR]))
326 return -EIO;
327
328 /* Stuff these values into alm->time and let RTC framework code
329 * fill in the rest ... and also handle rollover to tomorrow when
330 * that's needed.
331 */
332 alm->time.tm_sec = BCD2BIN(buf[DS1305_SEC]);
333 alm->time.tm_min = BCD2BIN(buf[DS1305_MIN]);
334 alm->time.tm_hour = bcd2hour(buf[DS1305_HOUR]);
335 alm->time.tm_mday = -1;
336 alm->time.tm_mon = -1;
337 alm->time.tm_year = -1;
338 /* next three fields are unused by Linux */
339 alm->time.tm_wday = -1;
340 alm->time.tm_mday = -1;
341 alm->time.tm_isdst = -1;
342
343 return 0;
344}
345
346/*
347 * Context: caller holds rtc->ops_lock (to protect ds1305->ctrl)
348 */
349static int ds1305_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
350{
351 struct ds1305 *ds1305 = dev_get_drvdata(dev);
352 struct spi_device *spi = ds1305->spi;
353 unsigned long now, later;
354 struct rtc_time tm;
355 int status;
356 u8 buf[1 + DS1305_ALM_LEN];
357
358 /* convert desired alarm to time_t */
359 status = rtc_tm_to_time(&alm->time, &later);
360 if (status < 0)
361 return status;
362
363 /* Read current time as time_t */
364 status = ds1305_get_time(dev, &tm);
365 if (status < 0)
366 return status;
367 status = rtc_tm_to_time(&tm, &now);
368 if (status < 0)
369 return status;
370
371 /* make sure alarm fires within the next 24 hours */
372 if (later <= now)
373 return -EINVAL;
374 if ((later - now) > 24 * 60 * 60)
375 return -EDOM;
376
377 /* disable alarm if needed */
378 if (ds1305->ctrl[0] & DS1305_AEI0) {
379 ds1305->ctrl[0] &= ~DS1305_AEI0;
380
381 buf[0] = DS1305_WRITE | DS1305_CONTROL;
382 buf[1] = ds1305->ctrl[0];
383 status = spi_write_then_read(ds1305->spi, buf, 2, NULL, 0);
384 if (status < 0)
385 return status;
386 }
387
388 /* write alarm */
389 buf[0] = DS1305_WRITE | DS1305_ALM0(DS1305_SEC);
390 buf[1 + DS1305_SEC] = BIN2BCD(alm->time.tm_sec);
391 buf[1 + DS1305_MIN] = BIN2BCD(alm->time.tm_min);
392 buf[1 + DS1305_HOUR] = hour2bcd(ds1305->hr12, alm->time.tm_hour);
393 buf[1 + DS1305_WDAY] = DS1305_ALM_DISABLE;
394
395 dev_dbg(dev, "%s: %02x %02x %02x %02x\n",
396 "alm0 write", buf[1 + DS1305_SEC], buf[1 + DS1305_MIN],
397 buf[1 + DS1305_HOUR], buf[1 + DS1305_WDAY]);
398
399 status = spi_write_then_read(spi, buf, sizeof buf, NULL, 0);
400 if (status < 0)
401 return status;
402
403 /* enable alarm if requested */
404 if (alm->enabled) {
405 ds1305->ctrl[0] |= DS1305_AEI0;
406
407 buf[0] = DS1305_WRITE | DS1305_CONTROL;
408 buf[1] = ds1305->ctrl[0];
409 status = spi_write_then_read(ds1305->spi, buf, 2, NULL, 0);
410 }
411
412 return status;
413}
414
415#ifdef CONFIG_PROC_FS
416
417static int ds1305_proc(struct device *dev, struct seq_file *seq)
418{
419 struct ds1305 *ds1305 = dev_get_drvdata(dev);
420 char *diodes = "no";
421 char *resistors = "";
422
423 /* ctrl[2] is treated as read-only; no locking needed */
424 if ((ds1305->ctrl[2] & 0xf0) == DS1305_TRICKLE_MAGIC) {
425 switch (ds1305->ctrl[2] & 0x0c) {
426 case DS1305_TRICKLE_DS2:
427 diodes = "2 diodes, ";
428 break;
429 case DS1305_TRICKLE_DS1:
430 diodes = "1 diode, ";
431 break;
432 default:
433 goto done;
434 }
435 switch (ds1305->ctrl[2] & 0x03) {
436 case DS1305_TRICKLE_2K:
437 resistors = "2k Ohm";
438 break;
439 case DS1305_TRICKLE_4K:
440 resistors = "4k Ohm";
441 break;
442 case DS1305_TRICKLE_8K:
443 resistors = "8k Ohm";
444 break;
445 default:
446 diodes = "no";
447 break;
448 }
449 }
450
451done:
452 return seq_printf(seq,
453 "trickle_charge\t: %s%s\n",
454 diodes, resistors);
455}
456
457#else
458#define ds1305_proc NULL
459#endif
460
461static const struct rtc_class_ops ds1305_ops = {
462 .ioctl = ds1305_ioctl,
463 .read_time = ds1305_get_time,
464 .set_time = ds1305_set_time,
465 .read_alarm = ds1305_get_alarm,
466 .set_alarm = ds1305_set_alarm,
467 .proc = ds1305_proc,
468};
469
470static void ds1305_work(struct work_struct *work)
471{
472 struct ds1305 *ds1305 = container_of(work, struct ds1305, work);
473 struct mutex *lock = &ds1305->rtc->ops_lock;
474 struct spi_device *spi = ds1305->spi;
475 u8 buf[3];
476 int status;
477
478 /* lock to protect ds1305->ctrl */
479 mutex_lock(lock);
480
481 /* Disable the IRQ, and clear its status ... for now, we "know"
482 * that if more than one alarm is active, they're in sync.
483 * Note that reading ALM data registers also clears IRQ status.
484 */
485 ds1305->ctrl[0] &= ~(DS1305_AEI1 | DS1305_AEI0);
486 ds1305->ctrl[1] = 0;
487
488 buf[0] = DS1305_WRITE | DS1305_CONTROL;
489 buf[1] = ds1305->ctrl[0];
490 buf[2] = 0;
491
492 status = spi_write_then_read(spi, buf, sizeof buf,
493 NULL, 0);
494 if (status < 0)
495 dev_dbg(&spi->dev, "clear irq --> %d\n", status);
496
497 mutex_unlock(lock);
498
499 if (!test_bit(FLAG_EXITING, &ds1305->flags))
500 enable_irq(spi->irq);
501
502 /* rtc_update_irq() requires an IRQ-disabled context */
503 local_irq_disable();
504 rtc_update_irq(ds1305->rtc, 1, RTC_AF | RTC_IRQF);
505 local_irq_enable();
506}
507
508/*
509 * This "real" IRQ handler hands off to a workqueue mostly to allow
510 * mutex locking for ds1305->ctrl ... unlike I2C, we could issue async
511 * I/O requests in IRQ context (to clear the IRQ status).
512 */
513static irqreturn_t ds1305_irq(int irq, void *p)
514{
515 struct ds1305 *ds1305 = p;
516
517 disable_irq(irq);
518 schedule_work(&ds1305->work);
519 return IRQ_HANDLED;
520}
521
522/*----------------------------------------------------------------------*/
523
524/*
525 * Interface for NVRAM
526 */
527
528static void msg_init(struct spi_message *m, struct spi_transfer *x,
529 u8 *addr, size_t count, char *tx, char *rx)
530{
531 spi_message_init(m);
532 memset(x, 0, 2 * sizeof(*x));
533
534 x->tx_buf = addr;
535 x->len = 1;
536 spi_message_add_tail(x, m);
537
538 x++;
539
540 x->tx_buf = tx;
541 x->rx_buf = rx;
542 x->len = count;
543 spi_message_add_tail(x, m);
544}
545
546static ssize_t
547ds1305_nvram_read(struct kobject *kobj, struct bin_attribute *attr,
548 char *buf, loff_t off, size_t count)
549{
550 struct spi_device *spi;
551 u8 addr;
552 struct spi_message m;
553 struct spi_transfer x[2];
554 int status;
555
556 spi = container_of(kobj, struct spi_device, dev.kobj);
557
558 if (unlikely(off >= DS1305_NVRAM_LEN))
559 return 0;
560 if (count >= DS1305_NVRAM_LEN)
561 count = DS1305_NVRAM_LEN;
562 if ((off + count) > DS1305_NVRAM_LEN)
563 count = DS1305_NVRAM_LEN - off;
564 if (unlikely(!count))
565 return count;
566
567 addr = DS1305_NVRAM + off;
568 msg_init(&m, x, &addr, count, NULL, buf);
569
570 status = spi_sync(spi, &m);
571 if (status < 0)
572 dev_err(&spi->dev, "nvram %s error %d\n", "read", status);
573 return (status < 0) ? status : count;
574}
575
576static ssize_t
577ds1305_nvram_write(struct kobject *kobj, struct bin_attribute *attr,
578 char *buf, loff_t off, size_t count)
579{
580 struct spi_device *spi;
581 u8 addr;
582 struct spi_message m;
583 struct spi_transfer x[2];
584 int status;
585
586 spi = container_of(kobj, struct spi_device, dev.kobj);
587
588 if (unlikely(off >= DS1305_NVRAM_LEN))
589 return -EFBIG;
590 if (count >= DS1305_NVRAM_LEN)
591 count = DS1305_NVRAM_LEN;
592 if ((off + count) > DS1305_NVRAM_LEN)
593 count = DS1305_NVRAM_LEN - off;
594 if (unlikely(!count))
595 return count;
596
597 addr = (DS1305_WRITE | DS1305_NVRAM) + off;
598 msg_init(&m, x, &addr, count, buf, NULL);
599
600 status = spi_sync(spi, &m);
601 if (status < 0)
602 dev_err(&spi->dev, "nvram %s error %d\n", "write", status);
603 return (status < 0) ? status : count;
604}
605
606static struct bin_attribute nvram = {
607 .attr.name = "nvram",
608 .attr.mode = S_IRUGO | S_IWUSR,
609 .attr.owner = THIS_MODULE,
610 .read = ds1305_nvram_read,
611 .write = ds1305_nvram_write,
612 .size = DS1305_NVRAM_LEN,
613};
614
615/*----------------------------------------------------------------------*/
616
617/*
618 * Interface to SPI stack
619 */
620
621static int __devinit ds1305_probe(struct spi_device *spi)
622{
623 struct ds1305 *ds1305;
624 struct rtc_device *rtc;
625 int status;
626 u8 addr, value;
627 struct ds1305_platform_data *pdata = spi->dev.platform_data;
628 bool write_ctrl = false;
629
630 /* Sanity check board setup data. This may be hooked up
631 * in 3wire mode, but we don't care. Note that unless
632 * there's an inverter in place, this needs SPI_CS_HIGH!
633 */
634 if ((spi->bits_per_word && spi->bits_per_word != 8)
635 || (spi->max_speed_hz > 2000000)
636 || !(spi->mode & SPI_CPHA))
637 return -EINVAL;
638
639 /* set up driver data */
640 ds1305 = kzalloc(sizeof *ds1305, GFP_KERNEL);
641 if (!ds1305)
642 return -ENOMEM;
643 ds1305->spi = spi;
644 spi_set_drvdata(spi, ds1305);
645
646 /* read and cache control registers */
647 addr = DS1305_CONTROL;
648 status = spi_write_then_read(spi, &addr, sizeof addr,
649 ds1305->ctrl, sizeof ds1305->ctrl);
650 if (status < 0) {
651 dev_dbg(&spi->dev, "can't %s, %d\n",
652 "read", status);
653 goto fail0;
654 }
655
656 dev_dbg(&spi->dev, "ctrl %s: %02x %02x %02x\n",
657 "read", ds1305->ctrl[0],
658 ds1305->ctrl[1], ds1305->ctrl[2]);
659
660 /* Sanity check register values ... partially compensating for the
661 * fact that SPI has no device handshake. A pullup on MISO would
662 * make these tests fail; but not all systems will have one. If
663 * some register is neither 0x00 nor 0xff, a chip is likely there.
664 */
665 if ((ds1305->ctrl[0] & 0x38) != 0 || (ds1305->ctrl[1] & 0xfc) != 0) {
666 dev_dbg(&spi->dev, "RTC chip is not present\n");
667 status = -ENODEV;
668 goto fail0;
669 }
670 if (ds1305->ctrl[2] == 0)
671 dev_dbg(&spi->dev, "chip may not be present\n");
672
673 /* enable writes if needed ... if we were paranoid it would
674 * make sense to enable them only when absolutely necessary.
675 */
676 if (ds1305->ctrl[0] & DS1305_WP) {
677 u8 buf[2];
678
679 ds1305->ctrl[0] &= ~DS1305_WP;
680
681 buf[0] = DS1305_WRITE | DS1305_CONTROL;
682 buf[1] = ds1305->ctrl[0];
683 status = spi_write_then_read(spi, buf, sizeof buf, NULL, 0);
684
685 dev_dbg(&spi->dev, "clear WP --> %d\n", status);
686 if (status < 0)
687 goto fail0;
688 }
689
690 /* on DS1305, maybe start oscillator; like most low power
691 * oscillators, it may take a second to stabilize
692 */
693 if (ds1305->ctrl[0] & DS1305_nEOSC) {
694 ds1305->ctrl[0] &= ~DS1305_nEOSC;
695 write_ctrl = true;
696 dev_warn(&spi->dev, "SET TIME!\n");
697 }
698
699 /* ack any pending IRQs */
700 if (ds1305->ctrl[1]) {
701 ds1305->ctrl[1] = 0;
702 write_ctrl = true;
703 }
704
705 /* this may need one-time (re)init */
706 if (pdata) {
707 /* maybe enable trickle charge */
708 if (((ds1305->ctrl[2] & 0xf0) != DS1305_TRICKLE_MAGIC)) {
709 ds1305->ctrl[2] = DS1305_TRICKLE_MAGIC
710 | pdata->trickle;
711 write_ctrl = true;
712 }
713
714 /* on DS1306, configure 1 Hz signal */
715 if (pdata->is_ds1306) {
716 if (pdata->en_1hz) {
717 if (!(ds1305->ctrl[0] & DS1306_1HZ)) {
718 ds1305->ctrl[0] |= DS1306_1HZ;
719 write_ctrl = true;
720 }
721 } else {
722 if (ds1305->ctrl[0] & DS1306_1HZ) {
723 ds1305->ctrl[0] &= ~DS1306_1HZ;
724 write_ctrl = true;
725 }
726 }
727 }
728 }
729
730 if (write_ctrl) {
731 u8 buf[4];
732
733 buf[0] = DS1305_WRITE | DS1305_CONTROL;
734 buf[1] = ds1305->ctrl[0];
735 buf[2] = ds1305->ctrl[1];
736 buf[3] = ds1305->ctrl[2];
737 status = spi_write_then_read(spi, buf, sizeof buf, NULL, 0);
738 if (status < 0) {
739 dev_dbg(&spi->dev, "can't %s, %d\n",
740 "write", status);
741 goto fail0;
742 }
743
744 dev_dbg(&spi->dev, "ctrl %s: %02x %02x %02x\n",
745 "write", ds1305->ctrl[0],
746 ds1305->ctrl[1], ds1305->ctrl[2]);
747 }
748
749 /* see if non-Linux software set up AM/PM mode */
750 addr = DS1305_HOUR;
751 status = spi_write_then_read(spi, &addr, sizeof addr,
752 &value, sizeof value);
753 if (status < 0) {
754 dev_dbg(&spi->dev, "read HOUR --> %d\n", status);
755 goto fail0;
756 }
757
758 ds1305->hr12 = (DS1305_HR_12 & value) != 0;
759 if (ds1305->hr12)
760 dev_dbg(&spi->dev, "AM/PM\n");
761
762 /* register RTC ... from here on, ds1305->ctrl needs locking */
763 rtc = rtc_device_register("ds1305", &spi->dev,
764 &ds1305_ops, THIS_MODULE);
765 if (IS_ERR(rtc)) {
766 status = PTR_ERR(rtc);
767 dev_dbg(&spi->dev, "register rtc --> %d\n", status);
768 goto fail0;
769 }
770 ds1305->rtc = rtc;
771
772 /* Maybe set up alarm IRQ; be ready to handle it triggering right
773 * away. NOTE that we don't share this. The signal is active low,
774 * and we can't ack it before a SPI message delay. We temporarily
775 * disable the IRQ until it's acked, which lets us work with more
776 * IRQ trigger modes (not all IRQ controllers can do falling edge).
777 */
778 if (spi->irq) {
779 INIT_WORK(&ds1305->work, ds1305_work);
780 status = request_irq(spi->irq, ds1305_irq,
781 0, dev_name(&rtc->dev), ds1305);
782 if (status < 0) {
783 dev_dbg(&spi->dev, "request_irq %d --> %d\n",
784 spi->irq, status);
785 goto fail1;
786 }
787 }
788
789 /* export NVRAM */
790 status = sysfs_create_bin_file(&spi->dev.kobj, &nvram);
791 if (status < 0) {
792 dev_dbg(&spi->dev, "register nvram --> %d\n", status);
793 goto fail2;
794 }
795
796 return 0;
797
798fail2:
799 free_irq(spi->irq, ds1305);
800fail1:
801 rtc_device_unregister(rtc);
802fail0:
803 kfree(ds1305);
804 return status;
805}
806
807static int __devexit ds1305_remove(struct spi_device *spi)
808{
809 struct ds1305 *ds1305 = spi_get_drvdata(spi);
810
811 sysfs_remove_bin_file(&spi->dev.kobj, &nvram);
812
813 /* carefully shut down irq and workqueue, if present */
814 if (spi->irq) {
815 set_bit(FLAG_EXITING, &ds1305->flags);
816 free_irq(spi->irq, ds1305);
817 flush_scheduled_work();
818 }
819
820 rtc_device_unregister(ds1305->rtc);
821 spi_set_drvdata(spi, NULL);
822 kfree(ds1305);
823 return 0;
824}
825
826static struct spi_driver ds1305_driver = {
827 .driver.name = "rtc-ds1305",
828 .driver.owner = THIS_MODULE,
829 .probe = ds1305_probe,
830 .remove = __devexit_p(ds1305_remove),
831 /* REVISIT add suspend/resume */
832};
833
834static int __init ds1305_init(void)
835{
836 return spi_register_driver(&ds1305_driver);
837}
838module_init(ds1305_init);
839
840static void __exit ds1305_exit(void)
841{
842 spi_unregister_driver(&ds1305_driver);
843}
844module_exit(ds1305_exit);
845
846MODULE_DESCRIPTION("RTC driver for DS1305 and DS1306 chips");
847MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 0a19c06019be..24bc1689fc74 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -13,21 +13,21 @@
13 * 13 *
14 */ 14 */
15 15
16#include <linux/module.h> 16#include <linux/bcd.h>
17#include <linux/i2c.h>
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/rtc.h>
19#include <linux/slab.h> 22#include <linux/slab.h>
20#include <linux/smp_lock.h> 23#include <linux/smp_lock.h>
21#include <linux/string.h> 24#include <linux/string.h>
22#include <linux/i2c.h>
23#include <linux/rtc.h>
24#include <linux/bcd.h>
25#ifdef CONFIG_RTC_DRV_M41T80_WDT 25#ifdef CONFIG_RTC_DRV_M41T80_WDT
26#include <linux/miscdevice.h>
27#include <linux/watchdog.h>
28#include <linux/reboot.h>
29#include <linux/fs.h> 26#include <linux/fs.h>
30#include <linux/ioctl.h> 27#include <linux/ioctl.h>
28#include <linux/miscdevice.h>
29#include <linux/reboot.h>
30#include <linux/watchdog.h>
31#endif 31#endif
32 32
33#define M41T80_REG_SSEC 0 33#define M41T80_REG_SSEC 0
@@ -631,14 +631,12 @@ static int wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
631 return -EFAULT; 631 return -EFAULT;
632 632
633 if (rv & WDIOS_DISABLECARD) { 633 if (rv & WDIOS_DISABLECARD) {
634 printk(KERN_INFO 634 pr_info("rtc-m41t80: disable watchdog\n");
635 "rtc-m41t80: disable watchdog\n");
636 wdt_disable(); 635 wdt_disable();
637 } 636 }
638 637
639 if (rv & WDIOS_ENABLECARD) { 638 if (rv & WDIOS_ENABLECARD) {
640 printk(KERN_INFO 639 pr_info("rtc-m41t80: enable watchdog\n");
641 "rtc-m41t80: enable watchdog\n");
642 wdt_ping(); 640 wdt_ping();
643 } 641 }
644 642
diff --git a/drivers/rtc/rtc-m41t94.c b/drivers/rtc/rtc-m41t94.c
new file mode 100644
index 000000000000..9b19499c829e
--- /dev/null
+++ b/drivers/rtc/rtc-m41t94.c
@@ -0,0 +1,173 @@
1/*
2 * Driver for ST M41T94 SPI RTC
3 *
4 * Copyright (C) 2008 Kim B. Heino
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/platform_device.h>
14#include <linux/rtc.h>
15#include <linux/spi/spi.h>
16#include <linux/bcd.h>
17
18#define M41T94_REG_SECONDS 0x01
19#define M41T94_REG_MINUTES 0x02
20#define M41T94_REG_HOURS 0x03
21#define M41T94_REG_WDAY 0x04
22#define M41T94_REG_DAY 0x05
23#define M41T94_REG_MONTH 0x06
24#define M41T94_REG_YEAR 0x07
25#define M41T94_REG_HT 0x0c
26
27#define M41T94_BIT_HALT 0x40
28#define M41T94_BIT_STOP 0x80
29#define M41T94_BIT_CB 0x40
30#define M41T94_BIT_CEB 0x80
31
32static int m41t94_set_time(struct device *dev, struct rtc_time *tm)
33{
34 struct spi_device *spi = to_spi_device(dev);
35 u8 buf[8]; /* write cmd + 7 registers */
36
37 dev_dbg(dev, "%s secs=%d, mins=%d, "
38 "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
39 "write", tm->tm_sec, tm->tm_min,
40 tm->tm_hour, tm->tm_mday,
41 tm->tm_mon, tm->tm_year, tm->tm_wday);
42
43 buf[0] = 0x80 | M41T94_REG_SECONDS; /* write time + date */
44 buf[M41T94_REG_SECONDS] = BIN2BCD(tm->tm_sec);
45 buf[M41T94_REG_MINUTES] = BIN2BCD(tm->tm_min);
46 buf[M41T94_REG_HOURS] = BIN2BCD(tm->tm_hour);
47 buf[M41T94_REG_WDAY] = BIN2BCD(tm->tm_wday + 1);
48 buf[M41T94_REG_DAY] = BIN2BCD(tm->tm_mday);
49 buf[M41T94_REG_MONTH] = BIN2BCD(tm->tm_mon + 1);
50
51 buf[M41T94_REG_HOURS] |= M41T94_BIT_CEB;
52 if (tm->tm_year >= 100)
53 buf[M41T94_REG_HOURS] |= M41T94_BIT_CB;
54 buf[M41T94_REG_YEAR] = BIN2BCD(tm->tm_year % 100);
55
56 return spi_write(spi, buf, 8);
57}
58
59static int m41t94_read_time(struct device *dev, struct rtc_time *tm)
60{
61 struct spi_device *spi = to_spi_device(dev);
62 u8 buf[2];
63 int ret, hour;
64
65 /* clear halt update bit */
66 ret = spi_w8r8(spi, M41T94_REG_HT);
67 if (ret < 0)
68 return ret;
69 if (ret & M41T94_BIT_HALT) {
70 buf[0] = 0x80 | M41T94_REG_HT;
71 buf[1] = ret & ~M41T94_BIT_HALT;
72 spi_write(spi, buf, 2);
73 }
74
75 /* clear stop bit */
76 ret = spi_w8r8(spi, M41T94_REG_SECONDS);
77 if (ret < 0)
78 return ret;
79 if (ret & M41T94_BIT_STOP) {
80 buf[0] = 0x80 | M41T94_REG_SECONDS;
81 buf[1] = ret & ~M41T94_BIT_STOP;
82 spi_write(spi, buf, 2);
83 }
84
85 tm->tm_sec = BCD2BIN(spi_w8r8(spi, M41T94_REG_SECONDS));
86 tm->tm_min = BCD2BIN(spi_w8r8(spi, M41T94_REG_MINUTES));
87 hour = spi_w8r8(spi, M41T94_REG_HOURS);
88 tm->tm_hour = BCD2BIN(hour & 0x3f);
89 tm->tm_wday = BCD2BIN(spi_w8r8(spi, M41T94_REG_WDAY)) - 1;
90 tm->tm_mday = BCD2BIN(spi_w8r8(spi, M41T94_REG_DAY));
91 tm->tm_mon = BCD2BIN(spi_w8r8(spi, M41T94_REG_MONTH)) - 1;
92 tm->tm_year = BCD2BIN(spi_w8r8(spi, M41T94_REG_YEAR));
93 if ((hour & M41T94_BIT_CB) || !(hour & M41T94_BIT_CEB))
94 tm->tm_year += 100;
95
96 dev_dbg(dev, "%s secs=%d, mins=%d, "
97 "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
98 "read", tm->tm_sec, tm->tm_min,
99 tm->tm_hour, tm->tm_mday,
100 tm->tm_mon, tm->tm_year, tm->tm_wday);
101
102 /* initial clock setting can be undefined */
103 return rtc_valid_tm(tm);
104}
105
106static const struct rtc_class_ops m41t94_rtc_ops = {
107 .read_time = m41t94_read_time,
108 .set_time = m41t94_set_time,
109};
110
111static struct spi_driver m41t94_driver;
112
113static int __devinit m41t94_probe(struct spi_device *spi)
114{
115 struct rtc_device *rtc;
116 int res;
117
118 spi->bits_per_word = 8;
119 spi_setup(spi);
120
121 res = spi_w8r8(spi, M41T94_REG_SECONDS);
122 if (res < 0) {
123 dev_err(&spi->dev, "not found.\n");
124 return res;
125 }
126
127 rtc = rtc_device_register(m41t94_driver.driver.name,
128 &spi->dev, &m41t94_rtc_ops, THIS_MODULE);
129 if (IS_ERR(rtc))
130 return PTR_ERR(rtc);
131
132 dev_set_drvdata(&spi->dev, rtc);
133
134 return 0;
135}
136
137static int __devexit m41t94_remove(struct spi_device *spi)
138{
139 struct rtc_device *rtc = platform_get_drvdata(spi);
140
141 if (rtc)
142 rtc_device_unregister(rtc);
143
144 return 0;
145}
146
147static struct spi_driver m41t94_driver = {
148 .driver = {
149 .name = "rtc-m41t94",
150 .bus = &spi_bus_type,
151 .owner = THIS_MODULE,
152 },
153 .probe = m41t94_probe,
154 .remove = __devexit_p(m41t94_remove),
155};
156
157static __init int m41t94_init(void)
158{
159 return spi_register_driver(&m41t94_driver);
160}
161
162module_init(m41t94_init);
163
164static __exit void m41t94_exit(void)
165{
166 spi_unregister_driver(&m41t94_driver);
167}
168
169module_exit(m41t94_exit);
170
171MODULE_AUTHOR("Kim B. Heino <Kim.Heino@bluegiga.com>");
172MODULE_DESCRIPTION("Driver for ST M41T94 SPI RTC");
173MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index eb23d8423f42..8876605d4d4b 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -92,18 +92,6 @@
92#define rtc_write(val, addr) omap_writeb(val, OMAP_RTC_BASE + (addr)) 92#define rtc_write(val, addr) omap_writeb(val, OMAP_RTC_BASE + (addr))
93 93
94 94
95/* platform_bus isn't hotpluggable, so for static linkage it'd be safe
96 * to get rid of probe() and remove() code ... too bad the driver struct
97 * remembers probe(), that's about 25% of the runtime footprint!!
98 */
99#ifndef MODULE
100#undef __devexit
101#undef __devexit_p
102#define __devexit __exit
103#define __devexit_p __exit_p
104#endif
105
106
107/* we rely on the rtc framework to handle locking (rtc->ops_lock), 95/* we rely on the rtc framework to handle locking (rtc->ops_lock),
108 * so the only other requirement is that register accesses which 96 * so the only other requirement is that register accesses which
109 * require BUSY to be clear are made with IRQs locally disabled 97 * require BUSY to be clear are made with IRQs locally disabled
@@ -324,7 +312,7 @@ static struct rtc_class_ops omap_rtc_ops = {
324static int omap_rtc_alarm; 312static int omap_rtc_alarm;
325static int omap_rtc_timer; 313static int omap_rtc_timer;
326 314
327static int __devinit omap_rtc_probe(struct platform_device *pdev) 315static int __init omap_rtc_probe(struct platform_device *pdev)
328{ 316{
329 struct resource *res, *mem; 317 struct resource *res, *mem;
330 struct rtc_device *rtc; 318 struct rtc_device *rtc;
@@ -440,7 +428,7 @@ fail:
440 return -EIO; 428 return -EIO;
441} 429}
442 430
443static int __devexit omap_rtc_remove(struct platform_device *pdev) 431static int __exit omap_rtc_remove(struct platform_device *pdev)
444{ 432{
445 struct rtc_device *rtc = platform_get_drvdata(pdev);; 433 struct rtc_device *rtc = platform_get_drvdata(pdev);;
446 434
@@ -498,8 +486,7 @@ static void omap_rtc_shutdown(struct platform_device *pdev)
498 486
499MODULE_ALIAS("platform:omap_rtc"); 487MODULE_ALIAS("platform:omap_rtc");
500static struct platform_driver omap_rtc_driver = { 488static struct platform_driver omap_rtc_driver = {
501 .probe = omap_rtc_probe, 489 .remove = __exit_p(omap_rtc_remove),
502 .remove = __devexit_p(omap_rtc_remove),
503 .suspend = omap_rtc_suspend, 490 .suspend = omap_rtc_suspend,
504 .resume = omap_rtc_resume, 491 .resume = omap_rtc_resume,
505 .shutdown = omap_rtc_shutdown, 492 .shutdown = omap_rtc_shutdown,
@@ -511,7 +498,7 @@ static struct platform_driver omap_rtc_driver = {
511 498
512static int __init rtc_init(void) 499static int __init rtc_init(void)
513{ 500{
514 return platform_driver_register(&omap_rtc_driver); 501 return platform_driver_probe(&omap_rtc_driver, omap_rtc_probe);
515} 502}
516module_init(rtc_init); 503module_init(rtc_init);
517 504
diff --git a/drivers/rtc/rtc-pcf8583.c b/drivers/rtc/rtc-pcf8583.c
index 3d09d8f0b1f0..d388c662bf4b 100644
--- a/drivers/rtc/rtc-pcf8583.c
+++ b/drivers/rtc/rtc-pcf8583.c
@@ -2,6 +2,7 @@
2 * drivers/rtc/rtc-pcf8583.c 2 * drivers/rtc/rtc-pcf8583.c
3 * 3 *
4 * Copyright (C) 2000 Russell King 4 * Copyright (C) 2000 Russell King
5 * Copyright (C) 2008 Wolfram Sang & Juergen Beisert, Pengutronix
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -14,7 +15,6 @@
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/i2c.h> 16#include <linux/i2c.h>
16#include <linux/slab.h> 17#include <linux/slab.h>
17#include <linux/string.h>
18#include <linux/rtc.h> 18#include <linux/rtc.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/errno.h> 20#include <linux/errno.h>
@@ -27,7 +27,6 @@ struct rtc_mem {
27}; 27};
28 28
29struct pcf8583 { 29struct pcf8583 {
30 struct i2c_client client;
31 struct rtc_device *rtc; 30 struct rtc_device *rtc;
32 unsigned char ctrl; 31 unsigned char ctrl;
33}; 32};
@@ -40,10 +39,6 @@ struct pcf8583 {
40#define CTRL_ALARM 0x02 39#define CTRL_ALARM 0x02
41#define CTRL_TIMER 0x01 40#define CTRL_TIMER 0x01
42 41
43static const unsigned short normal_i2c[] = { 0x50, I2C_CLIENT_END };
44
45/* Module parameters */
46I2C_CLIENT_INSMOD;
47 42
48static struct i2c_driver pcf8583_driver; 43static struct i2c_driver pcf8583_driver;
49 44
@@ -269,106 +264,60 @@ static const struct rtc_class_ops pcf8583_rtc_ops = {
269 .set_time = pcf8583_rtc_set_time, 264 .set_time = pcf8583_rtc_set_time,
270}; 265};
271 266
272static int pcf8583_probe(struct i2c_adapter *adap, int addr, int kind); 267static int pcf8583_probe(struct i2c_client *client,
273 268 const struct i2c_device_id *id)
274static int pcf8583_attach(struct i2c_adapter *adap)
275{
276 return i2c_probe(adap, &addr_data, pcf8583_probe);
277}
278
279static int pcf8583_detach(struct i2c_client *client)
280{
281 int err;
282 struct pcf8583 *pcf = i2c_get_clientdata(client);
283 struct rtc_device *rtc = pcf->rtc;
284
285 if (rtc)
286 rtc_device_unregister(rtc);
287
288 if ((err = i2c_detach_client(client)))
289 return err;
290
291 kfree(pcf);
292 return 0;
293}
294
295static struct i2c_driver pcf8583_driver = {
296 .driver = {
297 .name = "pcf8583",
298 },
299 .id = I2C_DRIVERID_PCF8583,
300 .attach_adapter = pcf8583_attach,
301 .detach_client = pcf8583_detach,
302};
303
304static int pcf8583_probe(struct i2c_adapter *adap, int addr, int kind)
305{ 269{
306 struct pcf8583 *pcf; 270 struct pcf8583 *pcf8583;
307 struct i2c_client *client;
308 struct rtc_device *rtc;
309 unsigned char buf[1], ad[1] = { 0 };
310 int err; 271 int err;
311 struct i2c_msg msgs[2] = {
312 {
313 .addr = addr,
314 .flags = 0,
315 .len = 1,
316 .buf = ad,
317 }, {
318 .addr = addr,
319 .flags = I2C_M_RD,
320 .len = 1,
321 .buf = buf,
322 }
323 };
324 272
325 if (!i2c_check_functionality(adap, I2C_FUNC_I2C)) 273 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
326 return 0; 274 return -ENODEV;
327 275
328 pcf = kzalloc(sizeof(*pcf), GFP_KERNEL); 276 pcf8583 = kzalloc(sizeof(struct pcf8583), GFP_KERNEL);
329 if (!pcf) 277 if (!pcf8583)
330 return -ENOMEM; 278 return -ENOMEM;
331 279
332 client = &pcf->client; 280 pcf8583->rtc = rtc_device_register(pcf8583_driver.driver.name,
281 &client->dev, &pcf8583_rtc_ops, THIS_MODULE);
333 282
334 client->addr = addr; 283 if (IS_ERR(pcf8583->rtc)) {
335 client->adapter = adap; 284 err = PTR_ERR(pcf8583->rtc);
336 client->driver = &pcf8583_driver;
337
338 strlcpy(client->name, pcf8583_driver.driver.name, I2C_NAME_SIZE);
339
340 if (i2c_transfer(client->adapter, msgs, 2) != 2) {
341 err = -EIO;
342 goto exit_kfree; 285 goto exit_kfree;
343 } 286 }
344 287
345 err = i2c_attach_client(client); 288 i2c_set_clientdata(client, pcf8583);
346 289 return 0;
347 if (err)
348 goto exit_kfree;
349
350 rtc = rtc_device_register(pcf8583_driver.driver.name, &client->dev,
351 &pcf8583_rtc_ops, THIS_MODULE);
352 290
353 if (IS_ERR(rtc)) { 291exit_kfree:
354 err = PTR_ERR(rtc); 292 kfree(pcf8583);
355 goto exit_detach; 293 return err;
356 } 294}
357 295
358 pcf->rtc = rtc; 296static int __devexit pcf8583_remove(struct i2c_client *client)
359 i2c_set_clientdata(client, pcf); 297{
360 set_ctrl(client, buf[0]); 298 struct pcf8583 *pcf8583 = i2c_get_clientdata(client);
361 299
300 if (pcf8583->rtc)
301 rtc_device_unregister(pcf8583->rtc);
302 kfree(pcf8583);
362 return 0; 303 return 0;
304}
363 305
364exit_detach: 306static const struct i2c_device_id pcf8583_id[] = {
365 i2c_detach_client(client); 307 { "pcf8583", 0 },
366 308 { }
367exit_kfree: 309};
368 kfree(pcf); 310MODULE_DEVICE_TABLE(i2c, pcf8583_id);
369 311
370 return err; 312static struct i2c_driver pcf8583_driver = {
371} 313 .driver = {
314 .name = "pcf8583",
315 .owner = THIS_MODULE,
316 },
317 .probe = pcf8583_probe,
318 .remove = __devexit_p(pcf8583_remove),
319 .id_table = pcf8583_id,
320};
372 321
373static __init int pcf8583_init(void) 322static __init int pcf8583_init(void)
374{ 323{
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index fed86e507fdf..54b1ebb01502 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -36,10 +36,8 @@ static struct resource *s3c_rtc_mem;
36static void __iomem *s3c_rtc_base; 36static void __iomem *s3c_rtc_base;
37static int s3c_rtc_alarmno = NO_IRQ; 37static int s3c_rtc_alarmno = NO_IRQ;
38static int s3c_rtc_tickno = NO_IRQ; 38static int s3c_rtc_tickno = NO_IRQ;
39static int s3c_rtc_freq = 1;
40 39
41static DEFINE_SPINLOCK(s3c_rtc_pie_lock); 40static DEFINE_SPINLOCK(s3c_rtc_pie_lock);
42static unsigned int tick_count;
43 41
44/* IRQ Handlers */ 42/* IRQ Handlers */
45 43
@@ -55,7 +53,7 @@ static irqreturn_t s3c_rtc_tickirq(int irq, void *id)
55{ 53{
56 struct rtc_device *rdev = id; 54 struct rtc_device *rdev = id;
57 55
58 rtc_update_irq(rdev, tick_count++, RTC_PF | RTC_IRQF); 56 rtc_update_irq(rdev, 1, RTC_PF | RTC_IRQF);
59 return IRQ_HANDLED; 57 return IRQ_HANDLED;
60} 58}
61 59
@@ -74,35 +72,37 @@ static void s3c_rtc_setaie(int to)
74 writeb(tmp, s3c_rtc_base + S3C2410_RTCALM); 72 writeb(tmp, s3c_rtc_base + S3C2410_RTCALM);
75} 73}
76 74
77static void s3c_rtc_setpie(int to) 75static int s3c_rtc_setpie(struct device *dev, int enabled)
78{ 76{
79 unsigned int tmp; 77 unsigned int tmp;
80 78
81 pr_debug("%s: pie=%d\n", __func__, to); 79 pr_debug("%s: pie=%d\n", __func__, enabled);
82 80
83 spin_lock_irq(&s3c_rtc_pie_lock); 81 spin_lock_irq(&s3c_rtc_pie_lock);
84 tmp = readb(s3c_rtc_base + S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE; 82 tmp = readb(s3c_rtc_base + S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE;
85 83
86 if (to) 84 if (enabled)
87 tmp |= S3C2410_TICNT_ENABLE; 85 tmp |= S3C2410_TICNT_ENABLE;
88 86
89 writeb(tmp, s3c_rtc_base + S3C2410_TICNT); 87 writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
90 spin_unlock_irq(&s3c_rtc_pie_lock); 88 spin_unlock_irq(&s3c_rtc_pie_lock);
89
90 return 0;
91} 91}
92 92
93static void s3c_rtc_setfreq(int freq) 93static int s3c_rtc_setfreq(struct device *dev, int freq)
94{ 94{
95 unsigned int tmp; 95 unsigned int tmp;
96 96
97 spin_lock_irq(&s3c_rtc_pie_lock); 97 spin_lock_irq(&s3c_rtc_pie_lock);
98 tmp = readb(s3c_rtc_base + S3C2410_TICNT) & S3C2410_TICNT_ENABLE;
99
100 s3c_rtc_freq = freq;
101 98
99 tmp = readb(s3c_rtc_base + S3C2410_TICNT) & S3C2410_TICNT_ENABLE;
102 tmp |= (128 / freq)-1; 100 tmp |= (128 / freq)-1;
103 101
104 writeb(tmp, s3c_rtc_base + S3C2410_TICNT); 102 writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
105 spin_unlock_irq(&s3c_rtc_pie_lock); 103 spin_unlock_irq(&s3c_rtc_pie_lock);
104
105 return 0;
106} 106}
107 107
108/* Time read/write */ 108/* Time read/write */
@@ -267,12 +267,7 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
267 267
268 writeb(alrm_en, base + S3C2410_RTCALM); 268 writeb(alrm_en, base + S3C2410_RTCALM);
269 269
270 if (0) { 270 s3c_rtc_setaie(alrm->enabled);
271 alrm_en = readb(base + S3C2410_RTCALM);
272 alrm_en &= ~S3C2410_RTCALM_ALMEN;
273 writeb(alrm_en, base + S3C2410_RTCALM);
274 disable_irq_wake(s3c_rtc_alarmno);
275 }
276 271
277 if (alrm->enabled) 272 if (alrm->enabled)
278 enable_irq_wake(s3c_rtc_alarmno); 273 enable_irq_wake(s3c_rtc_alarmno);
@@ -282,59 +277,12 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
282 return 0; 277 return 0;
283} 278}
284 279
285static int s3c_rtc_ioctl(struct device *dev,
286 unsigned int cmd, unsigned long arg)
287{
288 unsigned int ret = -ENOIOCTLCMD;
289
290 switch (cmd) {
291 case RTC_AIE_OFF:
292 case RTC_AIE_ON:
293 s3c_rtc_setaie((cmd == RTC_AIE_ON) ? 1 : 0);
294 ret = 0;
295 break;
296
297 case RTC_PIE_OFF:
298 case RTC_PIE_ON:
299 tick_count = 0;
300 s3c_rtc_setpie((cmd == RTC_PIE_ON) ? 1 : 0);
301 ret = 0;
302 break;
303
304 case RTC_IRQP_READ:
305 ret = put_user(s3c_rtc_freq, (unsigned long __user *)arg);
306 break;
307
308 case RTC_IRQP_SET:
309 if (!is_power_of_2(arg)) {
310 ret = -EINVAL;
311 goto exit;
312 }
313
314 pr_debug("s3c2410_rtc: setting frequency %ld\n", arg);
315
316 s3c_rtc_setfreq(arg);
317 ret = 0;
318 break;
319
320 case RTC_UIE_ON:
321 case RTC_UIE_OFF:
322 ret = -EINVAL;
323 }
324
325 exit:
326 return ret;
327}
328
329static int s3c_rtc_proc(struct device *dev, struct seq_file *seq) 280static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
330{ 281{
331 unsigned int ticnt = readb(s3c_rtc_base + S3C2410_TICNT); 282 unsigned int ticnt = readb(s3c_rtc_base + S3C2410_TICNT);
332 283
333 seq_printf(seq, "periodic_IRQ\t: %s\n", 284 seq_printf(seq, "periodic_IRQ\t: %s\n",
334 (ticnt & S3C2410_TICNT_ENABLE) ? "yes" : "no" ); 285 (ticnt & S3C2410_TICNT_ENABLE) ? "yes" : "no" );
335
336 seq_printf(seq, "periodic_freq\t: %d\n", s3c_rtc_freq);
337
338 return 0; 286 return 0;
339} 287}
340 288
@@ -374,7 +322,7 @@ static void s3c_rtc_release(struct device *dev)
374 322
375 /* do not clear AIE here, it may be needed for wake */ 323 /* do not clear AIE here, it may be needed for wake */
376 324
377 s3c_rtc_setpie(0); 325 s3c_rtc_setpie(dev, 0);
378 free_irq(s3c_rtc_alarmno, rtc_dev); 326 free_irq(s3c_rtc_alarmno, rtc_dev);
379 free_irq(s3c_rtc_tickno, rtc_dev); 327 free_irq(s3c_rtc_tickno, rtc_dev);
380} 328}
@@ -382,11 +330,12 @@ static void s3c_rtc_release(struct device *dev)
382static const struct rtc_class_ops s3c_rtcops = { 330static const struct rtc_class_ops s3c_rtcops = {
383 .open = s3c_rtc_open, 331 .open = s3c_rtc_open,
384 .release = s3c_rtc_release, 332 .release = s3c_rtc_release,
385 .ioctl = s3c_rtc_ioctl,
386 .read_time = s3c_rtc_gettime, 333 .read_time = s3c_rtc_gettime,
387 .set_time = s3c_rtc_settime, 334 .set_time = s3c_rtc_settime,
388 .read_alarm = s3c_rtc_getalarm, 335 .read_alarm = s3c_rtc_getalarm,
389 .set_alarm = s3c_rtc_setalarm, 336 .set_alarm = s3c_rtc_setalarm,
337 .irq_set_freq = s3c_rtc_setfreq,
338 .irq_set_state = s3c_rtc_setpie,
390 .proc = s3c_rtc_proc, 339 .proc = s3c_rtc_proc,
391}; 340};
392 341
@@ -430,14 +379,14 @@ static void s3c_rtc_enable(struct platform_device *pdev, int en)
430 } 379 }
431} 380}
432 381
433static int s3c_rtc_remove(struct platform_device *dev) 382static int __devexit s3c_rtc_remove(struct platform_device *dev)
434{ 383{
435 struct rtc_device *rtc = platform_get_drvdata(dev); 384 struct rtc_device *rtc = platform_get_drvdata(dev);
436 385
437 platform_set_drvdata(dev, NULL); 386 platform_set_drvdata(dev, NULL);
438 rtc_device_unregister(rtc); 387 rtc_device_unregister(rtc);
439 388
440 s3c_rtc_setpie(0); 389 s3c_rtc_setpie(&dev->dev, 0);
441 s3c_rtc_setaie(0); 390 s3c_rtc_setaie(0);
442 391
443 iounmap(s3c_rtc_base); 392 iounmap(s3c_rtc_base);
@@ -447,7 +396,7 @@ static int s3c_rtc_remove(struct platform_device *dev)
447 return 0; 396 return 0;
448} 397}
449 398
450static int s3c_rtc_probe(struct platform_device *pdev) 399static int __devinit s3c_rtc_probe(struct platform_device *pdev)
451{ 400{
452 struct rtc_device *rtc; 401 struct rtc_device *rtc;
453 struct resource *res; 402 struct resource *res;
@@ -504,7 +453,7 @@ static int s3c_rtc_probe(struct platform_device *pdev)
504 pr_debug("s3c2410_rtc: RTCCON=%02x\n", 453 pr_debug("s3c2410_rtc: RTCCON=%02x\n",
505 readb(s3c_rtc_base + S3C2410_RTCCON)); 454 readb(s3c_rtc_base + S3C2410_RTCCON));
506 455
507 s3c_rtc_setfreq(s3c_rtc_freq); 456 s3c_rtc_setfreq(&pdev->dev, 1);
508 457
509 /* register RTC and exit */ 458 /* register RTC and exit */
510 459
@@ -560,7 +509,7 @@ static int s3c_rtc_resume(struct platform_device *pdev)
560 509
561static struct platform_driver s3c2410_rtcdrv = { 510static struct platform_driver s3c2410_rtcdrv = {
562 .probe = s3c_rtc_probe, 511 .probe = s3c_rtc_probe,
563 .remove = s3c_rtc_remove, 512 .remove = __devexit_p(s3c_rtc_remove),
564 .suspend = s3c_rtc_suspend, 513 .suspend = s3c_rtc_suspend,
565 .resume = s3c_rtc_resume, 514 .resume = s3c_rtc_resume,
566 .driver = { 515 .driver = {
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index be9c70d0b193..884b635f028b 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for NEC VR4100 series Real Time Clock unit. 2 * Driver for NEC VR4100 series Real Time Clock unit.
3 * 3 *
4 * Copyright (C) 2003-2006 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> 4 * Copyright (C) 2003-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -34,7 +34,7 @@
34 34
35MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>"); 35MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>");
36MODULE_DESCRIPTION("NEC VR4100 series RTC driver"); 36MODULE_DESCRIPTION("NEC VR4100 series RTC driver");
37MODULE_LICENSE("GPL"); 37MODULE_LICENSE("GPL v2");
38 38
39/* RTC 1 registers */ 39/* RTC 1 registers */
40#define ETIMELREG 0x00 40#define ETIMELREG 0x00
@@ -82,7 +82,6 @@ static unsigned long epoch = 1970; /* Jan 1 1970 00:00:00 */
82 82
83static DEFINE_SPINLOCK(rtc_lock); 83static DEFINE_SPINLOCK(rtc_lock);
84static char rtc_name[] = "RTC"; 84static char rtc_name[] = "RTC";
85static unsigned long periodic_frequency;
86static unsigned long periodic_count; 85static unsigned long periodic_count;
87static unsigned int alarm_enabled; 86static unsigned int alarm_enabled;
88static int aie_irq = -1; 87static int aie_irq = -1;
@@ -207,10 +206,37 @@ static int vr41xx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
207 return 0; 206 return 0;
208} 207}
209 208
210static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) 209static int vr41xx_rtc_irq_set_freq(struct device *dev, int freq)
211{ 210{
212 unsigned long count; 211 unsigned long count;
213 212
213 count = RTC_FREQUENCY;
214 do_div(count, freq);
215
216 periodic_count = count;
217
218 spin_lock_irq(&rtc_lock);
219
220 rtc1_write(RTCL1LREG, count);
221 rtc1_write(RTCL1HREG, count >> 16);
222
223 spin_unlock_irq(&rtc_lock);
224
225 return 0;
226}
227
228static int vr41xx_rtc_irq_set_state(struct device *dev, int enabled)
229{
230 if (enabled)
231 enable_irq(pie_irq);
232 else
233 disable_irq(pie_irq);
234
235 return 0;
236}
237
238static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
239{
214 switch (cmd) { 240 switch (cmd) {
215 case RTC_AIE_ON: 241 case RTC_AIE_ON:
216 spin_lock_irq(&rtc_lock); 242 spin_lock_irq(&rtc_lock);
@@ -232,33 +258,6 @@ static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long
232 258
233 spin_unlock_irq(&rtc_lock); 259 spin_unlock_irq(&rtc_lock);
234 break; 260 break;
235 case RTC_PIE_ON:
236 enable_irq(pie_irq);
237 break;
238 case RTC_PIE_OFF:
239 disable_irq(pie_irq);
240 break;
241 case RTC_IRQP_READ:
242 return put_user(periodic_frequency, (unsigned long __user *)arg);
243 break;
244 case RTC_IRQP_SET:
245 if (arg > MAX_PERIODIC_RATE)
246 return -EINVAL;
247
248 periodic_frequency = arg;
249
250 count = RTC_FREQUENCY;
251 do_div(count, arg);
252
253 periodic_count = count;
254
255 spin_lock_irq(&rtc_lock);
256
257 rtc1_write(RTCL1LREG, count);
258 rtc1_write(RTCL1HREG, count >> 16);
259
260 spin_unlock_irq(&rtc_lock);
261 break;
262 case RTC_EPOCH_READ: 261 case RTC_EPOCH_READ:
263 return put_user(epoch, (unsigned long __user *)arg); 262 return put_user(epoch, (unsigned long __user *)arg);
264 case RTC_EPOCH_SET: 263 case RTC_EPOCH_SET:
@@ -309,6 +308,8 @@ static const struct rtc_class_ops vr41xx_rtc_ops = {
309 .set_time = vr41xx_rtc_set_time, 308 .set_time = vr41xx_rtc_set_time,
310 .read_alarm = vr41xx_rtc_read_alarm, 309 .read_alarm = vr41xx_rtc_read_alarm,
311 .set_alarm = vr41xx_rtc_set_alarm, 310 .set_alarm = vr41xx_rtc_set_alarm,
311 .irq_set_freq = vr41xx_rtc_irq_set_freq,
312 .irq_set_state = vr41xx_rtc_irq_set_state,
312}; 313};
313 314
314static int __devinit rtc_probe(struct platform_device *pdev) 315static int __devinit rtc_probe(struct platform_device *pdev)
@@ -346,6 +347,8 @@ static int __devinit rtc_probe(struct platform_device *pdev)
346 goto err_iounmap_all; 347 goto err_iounmap_all;
347 } 348 }
348 349
350 rtc->max_user_freq = MAX_PERIODIC_RATE;
351
349 spin_lock_irq(&rtc_lock); 352 spin_lock_irq(&rtc_lock);
350 353
351 rtc1_write(ECMPLREG, 0); 354 rtc1_write(ECMPLREG, 0);
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index f843c1383a4b..538552495d48 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -84,7 +84,6 @@ typedef struct ide_scsi_obj {
84 struct Scsi_Host *host; 84 struct Scsi_Host *host;
85 85
86 struct ide_atapi_pc *pc; /* Current packet command */ 86 struct ide_atapi_pc *pc; /* Current packet command */
87 unsigned long flags; /* Status/Action flags */
88 unsigned long transform; /* SCSI cmd translation layer */ 87 unsigned long transform; /* SCSI cmd translation layer */
89 unsigned long log; /* log flags */ 88 unsigned long log; /* log flags */
90} idescsi_scsi_t; 89} idescsi_scsi_t;
@@ -126,23 +125,14 @@ static inline idescsi_scsi_t *drive_to_idescsi(ide_drive_t *ide_drive)
126} 125}
127 126
128/* 127/*
129 * Per ATAPI device status bits.
130 */
131#define IDESCSI_DRQ_INTERRUPT 0 /* DRQ interrupt device */
132
133/*
134 * ide-scsi requests.
135 */
136#define IDESCSI_PC_RQ 90
137
138/*
139 * PIO data transfer routine using the scatter gather table. 128 * PIO data transfer routine using the scatter gather table.
140 */ 129 */
141static void ide_scsi_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc, 130static void ide_scsi_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
142 unsigned int bcount, int write) 131 unsigned int bcount, int write)
143{ 132{
144 ide_hwif_t *hwif = drive->hwif; 133 ide_hwif_t *hwif = drive->hwif;
145 xfer_func_t *xf = write ? hwif->output_data : hwif->input_data; 134 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
135 xfer_func_t *xf = write ? tp_ops->output_data : tp_ops->input_data;
146 char *buf; 136 char *buf;
147 int count; 137 int count;
148 138
@@ -228,7 +218,6 @@ static int idescsi_check_condition(ide_drive_t *drive,
228 rq->cmd_type = REQ_TYPE_SENSE; 218 rq->cmd_type = REQ_TYPE_SENSE;
229 rq->cmd_flags |= REQ_PREEMPT; 219 rq->cmd_flags |= REQ_PREEMPT;
230 pc->timeout = jiffies + WAIT_READY; 220 pc->timeout = jiffies + WAIT_READY;
231 pc->callback = ide_scsi_callback;
232 /* NOTE! Save the failed packet command in "rq->buffer" */ 221 /* NOTE! Save the failed packet command in "rq->buffer" */
233 rq->buffer = (void *) failed_cmd->special; 222 rq->buffer = (void *) failed_cmd->special;
234 pc->scsi_cmd = ((struct ide_atapi_pc *) failed_cmd->special)->scsi_cmd; 223 pc->scsi_cmd = ((struct ide_atapi_pc *) failed_cmd->special)->scsi_cmd;
@@ -237,6 +226,7 @@ static int idescsi_check_condition(ide_drive_t *drive,
237 ide_scsi_hex_dump(pc->c, 6); 226 ide_scsi_hex_dump(pc->c, 6);
238 } 227 }
239 rq->rq_disk = scsi->disk; 228 rq->rq_disk = scsi->disk;
229 memcpy(rq->cmd, pc->c, 12);
240 ide_do_drive_cmd(drive, rq); 230 ide_do_drive_cmd(drive, rq);
241 return 0; 231 return 0;
242} 232}
@@ -246,10 +236,9 @@ idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
246{ 236{
247 ide_hwif_t *hwif = drive->hwif; 237 ide_hwif_t *hwif = drive->hwif;
248 238
249 if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) 239 if (hwif->tp_ops->read_status(hwif) & (BUSY_STAT | DRQ_STAT))
250 /* force an abort */ 240 /* force an abort */
251 hwif->OUTBSYNC(hwif, WIN_IDLEIMMEDIATE, 241 hwif->tp_ops->exec_command(hwif, WIN_IDLEIMMEDIATE);
252 hwif->io_ports.command_addr);
253 242
254 rq->errors++; 243 rq->errors++;
255 244
@@ -421,10 +410,6 @@ static ide_startstop_t idescsi_do_request (ide_drive_t *drive, struct request *r
421 410
422 if (blk_sense_request(rq) || blk_special_request(rq)) { 411 if (blk_sense_request(rq) || blk_special_request(rq)) {
423 struct ide_atapi_pc *pc = (struct ide_atapi_pc *)rq->special; 412 struct ide_atapi_pc *pc = (struct ide_atapi_pc *)rq->special;
424 idescsi_scsi_t *scsi = drive_to_idescsi(drive);
425
426 if (test_bit(IDESCSI_DRQ_INTERRUPT, &scsi->flags))
427 pc->flags |= PC_FLAG_DRQ_INTERRUPT;
428 413
429 if (drive->using_dma && !idescsi_map_sg(drive, pc)) 414 if (drive->using_dma && !idescsi_map_sg(drive, pc))
430 pc->flags |= PC_FLAG_DMA_OK; 415 pc->flags |= PC_FLAG_DMA_OK;
@@ -460,11 +445,14 @@ static inline void idescsi_add_settings(ide_drive_t *drive) { ; }
460static void idescsi_setup (ide_drive_t *drive, idescsi_scsi_t *scsi) 445static void idescsi_setup (ide_drive_t *drive, idescsi_scsi_t *scsi)
461{ 446{
462 if (drive->id && (drive->id->config & 0x0060) == 0x20) 447 if (drive->id && (drive->id->config & 0x0060) == 0x20)
463 set_bit (IDESCSI_DRQ_INTERRUPT, &scsi->flags); 448 set_bit(IDE_AFLAG_DRQ_INTERRUPT, &drive->atapi_flags);
464 clear_bit(IDESCSI_SG_TRANSFORM, &scsi->transform); 449 clear_bit(IDESCSI_SG_TRANSFORM, &scsi->transform);
465#if IDESCSI_DEBUG_LOG 450#if IDESCSI_DEBUG_LOG
466 set_bit(IDESCSI_LOG_CMD, &scsi->log); 451 set_bit(IDESCSI_LOG_CMD, &scsi->log);
467#endif /* IDESCSI_DEBUG_LOG */ 452#endif /* IDESCSI_DEBUG_LOG */
453
454 drive->pc_callback = ide_scsi_callback;
455
468 idescsi_add_settings(drive); 456 idescsi_add_settings(drive);
469} 457}
470 458
@@ -616,7 +604,6 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
616 pc->scsi_cmd = cmd; 604 pc->scsi_cmd = cmd;
617 pc->done = done; 605 pc->done = done;
618 pc->timeout = jiffies + cmd->timeout_per_command; 606 pc->timeout = jiffies + cmd->timeout_per_command;
619 pc->callback = ide_scsi_callback;
620 607
621 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) { 608 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) {
622 printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number); 609 printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number);
@@ -631,6 +618,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
631 rq->special = (char *) pc; 618 rq->special = (char *) pc;
632 rq->cmd_type = REQ_TYPE_SPECIAL; 619 rq->cmd_type = REQ_TYPE_SPECIAL;
633 spin_unlock_irq(host->host_lock); 620 spin_unlock_irq(host->host_lock);
621 memcpy(rq->cmd, pc->c, 12);
634 blk_execute_rq_nowait(drive->queue, scsi->disk, rq, 0, NULL); 622 blk_execute_rq_nowait(drive->queue, scsi->disk, rq, 0, NULL);
635 spin_lock_irq(host->host_lock); 623 spin_lock_irq(host->host_lock);
636 return 0; 624 return 0;
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index 2c87db98cdfb..f9cf70151366 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -7,6 +7,7 @@
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/delay.h> 8#include <linux/delay.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/mm.h>
10#include <linux/init.h> 11#include <linux/init.h>
11 12
12#include <asm/irq.h> 13#include <asm/irq.h>
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 27f34a9f9cb7..a97f1ae11f78 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -1293,7 +1293,18 @@ receive_chars(struct uart_8250_port *up, unsigned int *status)
1293 char flag; 1293 char flag;
1294 1294
1295 do { 1295 do {
1296 ch = serial_inp(up, UART_RX); 1296 if (likely(lsr & UART_LSR_DR))
1297 ch = serial_inp(up, UART_RX);
1298 else
1299 /*
1300 * Intel 82571 has a Serial Over Lan device that will
1301 * set UART_LSR_BI without setting UART_LSR_DR when
1302 * it receives a break. To avoid reading from the
1303 * receive buffer without UART_LSR_DR bit set, we
1304 * just force the read character to be 0
1305 */
1306 ch = 0;
1307
1297 flag = TTY_NORMAL; 1308 flag = TTY_NORMAL;
1298 up->port.icount.rx++; 1309 up->port.icount.rx++;
1299 1310
@@ -1342,7 +1353,7 @@ receive_chars(struct uart_8250_port *up, unsigned int *status)
1342 1353
1343ignore_char: 1354ignore_char:
1344 lsr = serial_inp(up, UART_LSR); 1355 lsr = serial_inp(up, UART_LSR);
1345 } while ((lsr & UART_LSR_DR) && (max_count-- > 0)); 1356 } while ((lsr & (UART_LSR_DR | UART_LSR_BI)) && (max_count-- > 0));
1346 spin_unlock(&up->port.lock); 1357 spin_unlock(&up->port.lock);
1347 tty_flip_buffer_push(tty); 1358 tty_flip_buffer_push(tty);
1348 spin_lock(&up->port.lock); 1359 spin_lock(&up->port.lock);
@@ -1425,7 +1436,7 @@ serial8250_handle_port(struct uart_8250_port *up)
1425 1436
1426 DEBUG_INTR("status = %x...", status); 1437 DEBUG_INTR("status = %x...", status);
1427 1438
1428 if (status & UART_LSR_DR) 1439 if (status & (UART_LSR_DR | UART_LSR_BI))
1429 receive_chars(up, &status); 1440 receive_chars(up, &status);
1430 check_modem_status(up); 1441 check_modem_status(up);
1431 if (status & UART_LSR_THRE) 1442 if (status & UART_LSR_THRE)
diff --git a/drivers/serial/8250_gsc.c b/drivers/serial/8250_gsc.c
index 4eb7437a404a..0416ad3bc127 100644
--- a/drivers/serial/8250_gsc.c
+++ b/drivers/serial/8250_gsc.c
@@ -119,3 +119,5 @@ int __init probe_serial_gsc(void)
119} 119}
120 120
121module_init(probe_serial_gsc); 121module_init(probe_serial_gsc);
122
123MODULE_LICENSE("GPL");
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 1b36087665a2..c2f23933155b 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -767,6 +767,9 @@ pci_default_setup(struct serial_private *priv, struct pciserial_board *board,
767#define PCI_SUBDEVICE_ID_POCTAL232 0x0308 767#define PCI_SUBDEVICE_ID_POCTAL232 0x0308
768#define PCI_SUBDEVICE_ID_POCTAL422 0x0408 768#define PCI_SUBDEVICE_ID_POCTAL422 0x0408
769 769
770/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
771#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
772
770/* 773/*
771 * Master list of serial port init/setup/exit quirks. 774 * Master list of serial port init/setup/exit quirks.
772 * This does not describe the general nature of the port. 775 * This does not describe the general nature of the port.
@@ -882,6 +885,15 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
882 }, 885 },
883 { 886 {
884 .vendor = PCI_VENDOR_ID_PLX, 887 .vendor = PCI_VENDOR_ID_PLX,
888 .device = PCI_DEVICE_ID_PLX_9050,
889 .subvendor = PCI_VENDOR_ID_PLX,
890 .subdevice = PCI_SUBDEVICE_ID_UNKNOWN_0x1584,
891 .init = pci_plx9050_init,
892 .setup = pci_default_setup,
893 .exit = __devexit_p(pci_plx9050_exit),
894 },
895 {
896 .vendor = PCI_VENDOR_ID_PLX,
885 .device = PCI_DEVICE_ID_PLX_ROMULUS, 897 .device = PCI_DEVICE_ID_PLX_ROMULUS,
886 .subvendor = PCI_VENDOR_ID_PLX, 898 .subvendor = PCI_VENDOR_ID_PLX,
887 .subdevice = PCI_DEVICE_ID_PLX_ROMULUS, 899 .subdevice = PCI_DEVICE_ID_PLX_ROMULUS,
@@ -2197,6 +2209,11 @@ static struct pci_device_id serial_pci_tbl[] = {
2197 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_1077, 2209 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_1077,
2198 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2210 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2199 pbn_b2_4_921600 }, 2211 pbn_b2_4_921600 },
2212 /* Unknown card - subdevice 0x1584 */
2213 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
2214 PCI_VENDOR_ID_PLX,
2215 PCI_SUBDEVICE_ID_UNKNOWN_0x1584, 0, 0,
2216 pbn_b0_4_115200 },
2200 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, 2217 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
2201 PCI_SUBVENDOR_ID_KEYSPAN, 2218 PCI_SUBVENDOR_ID_KEYSPAN,
2202 PCI_SUBDEVICE_ID_KEYSPAN_SX2, 0, 0, 2219 PCI_SUBDEVICE_ID_KEYSPAN_SX2, 0, 0,
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 8fc7451c0049..3b4a14e355c1 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -942,22 +942,6 @@ config SERIAL_IP22_ZILOG_CONSOLE
942 depends on SERIAL_IP22_ZILOG=y 942 depends on SERIAL_IP22_ZILOG=y
943 select SERIAL_CORE_CONSOLE 943 select SERIAL_CORE_CONSOLE
944 944
945config V850E_UART
946 bool "NEC V850E on-chip UART support"
947 depends on V850E_MA1 || V850E_ME2 || V850E_TEG || V850E2_ANNA || V850E_AS85EP1
948 select SERIAL_CORE
949 default y
950
951config V850E_UARTB
952 bool
953 depends on V850E_UART && V850E_ME2
954 default y
955
956config V850E_UART_CONSOLE
957 bool "Use NEC V850E on-chip UART for console"
958 depends on V850E_UART
959 select SERIAL_CORE_CONSOLE
960
961config SERIAL_SH_SCI 945config SERIAL_SH_SCI
962 tristate "SuperH SCI(F) serial port support" 946 tristate "SuperH SCI(F) serial port support"
963 depends on SUPERH || H8300 947 depends on SUPERH || H8300
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c
index 93e407ee08b9..a4f86927a74b 100644
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/serial/cpm_uart/cpm_uart_core.c
@@ -201,6 +201,10 @@ static void cpm_uart_int_tx(struct uart_port *port)
201 cpm_uart_tx_pump(port); 201 cpm_uart_tx_pump(port);
202} 202}
203 203
204#ifdef CONFIG_CONSOLE_POLL
205static int serial_polled;
206#endif
207
204/* 208/*
205 * Receive characters 209 * Receive characters
206 */ 210 */
@@ -222,6 +226,12 @@ static void cpm_uart_int_rx(struct uart_port *port)
222 */ 226 */
223 bdp = pinfo->rx_cur; 227 bdp = pinfo->rx_cur;
224 for (;;) { 228 for (;;) {
229#ifdef CONFIG_CONSOLE_POLL
230 if (unlikely(serial_polled)) {
231 serial_polled = 0;
232 return;
233 }
234#endif
225 /* get status */ 235 /* get status */
226 status = in_be16(&bdp->cbd_sc); 236 status = in_be16(&bdp->cbd_sc);
227 /* If this one is empty, return happy */ 237 /* If this one is empty, return happy */
@@ -253,7 +263,12 @@ static void cpm_uart_int_rx(struct uart_port *port)
253 goto handle_error; 263 goto handle_error;
254 if (uart_handle_sysrq_char(port, ch)) 264 if (uart_handle_sysrq_char(port, ch))
255 continue; 265 continue;
256 266#ifdef CONFIG_CONSOLE_POLL
267 if (unlikely(serial_polled)) {
268 serial_polled = 0;
269 return;
270 }
271#endif
257 error_return: 272 error_return:
258 tty_insert_flip_char(tty, ch, flg); 273 tty_insert_flip_char(tty, ch, flg);
259 274
@@ -420,10 +435,13 @@ static void cpm_uart_shutdown(struct uart_port *port)
420 } 435 }
421 436
422 /* Shut them really down and reinit buffer descriptors */ 437 /* Shut them really down and reinit buffer descriptors */
423 if (IS_SMC(pinfo)) 438 if (IS_SMC(pinfo)) {
439 out_be16(&pinfo->smcup->smc_brkcr, 0);
424 cpm_line_cr_cmd(pinfo, CPM_CR_STOP_TX); 440 cpm_line_cr_cmd(pinfo, CPM_CR_STOP_TX);
425 else 441 } else {
442 out_be16(&pinfo->sccup->scc_brkcr, 0);
426 cpm_line_cr_cmd(pinfo, CPM_CR_GRA_STOP_TX); 443 cpm_line_cr_cmd(pinfo, CPM_CR_GRA_STOP_TX);
444 }
427 445
428 cpm_uart_initbd(pinfo); 446 cpm_uart_initbd(pinfo);
429 } 447 }
@@ -539,9 +557,11 @@ static void cpm_uart_set_termios(struct uart_port *port,
539 * enables, because we want to put them back if they were 557 * enables, because we want to put them back if they were
540 * present. 558 * present.
541 */ 559 */
542 prev_mode = in_be16(&smcp->smc_smcmr); 560 prev_mode = in_be16(&smcp->smc_smcmr) & (SMCMR_REN | SMCMR_TEN);
543 out_be16(&smcp->smc_smcmr, smcr_mk_clen(bits) | cval | SMCMR_SM_UART); 561 /* Output in *one* operation, so we don't interrupt RX/TX if they
544 setbits16(&smcp->smc_smcmr, (prev_mode & (SMCMR_REN | SMCMR_TEN))); 562 * were already enabled. */
563 out_be16(&smcp->smc_smcmr, smcr_mk_clen(bits) | cval |
564 SMCMR_SM_UART | prev_mode);
545 } else { 565 } else {
546 out_be16(&sccp->scc_psmr, (sbits << 12) | scval); 566 out_be16(&sccp->scc_psmr, (sbits << 12) | scval);
547 } 567 }
@@ -865,6 +885,80 @@ static void cpm_uart_config_port(struct uart_port *port, int flags)
865 cpm_uart_request_port(port); 885 cpm_uart_request_port(port);
866 } 886 }
867} 887}
888
889#ifdef CONFIG_CONSOLE_POLL
890/* Serial polling routines for writing and reading from the uart while
891 * in an interrupt or debug context.
892 */
893
894#define GDB_BUF_SIZE 512 /* power of 2, please */
895
896static char poll_buf[GDB_BUF_SIZE];
897static char *pollp;
898static int poll_chars;
899
900static int poll_wait_key(char *obuf, struct uart_cpm_port *pinfo)
901{
902 u_char c, *cp;
903 volatile cbd_t *bdp;
904 int i;
905
906 /* Get the address of the host memory buffer.
907 */
908 bdp = pinfo->rx_cur;
909 while (bdp->cbd_sc & BD_SC_EMPTY)
910 ;
911
912 /* If the buffer address is in the CPM DPRAM, don't
913 * convert it.
914 */
915 cp = cpm2cpu_addr(bdp->cbd_bufaddr, pinfo);
916
917 if (obuf) {
918 i = c = bdp->cbd_datlen;
919 while (i-- > 0)
920 *obuf++ = *cp++;
921 } else
922 c = *cp;
923 bdp->cbd_sc &= ~(BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV | BD_SC_ID);
924 bdp->cbd_sc |= BD_SC_EMPTY;
925
926 if (bdp->cbd_sc & BD_SC_WRAP)
927 bdp = pinfo->rx_bd_base;
928 else
929 bdp++;
930 pinfo->rx_cur = (cbd_t *)bdp;
931
932 return (int)c;
933}
934
935static int cpm_get_poll_char(struct uart_port *port)
936{
937 struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
938
939 if (!serial_polled) {
940 serial_polled = 1;
941 poll_chars = 0;
942 }
943 if (poll_chars <= 0) {
944 poll_chars = poll_wait_key(poll_buf, pinfo);
945 pollp = poll_buf;
946 }
947 poll_chars--;
948 return *pollp++;
949}
950
951static void cpm_put_poll_char(struct uart_port *port,
952 unsigned char c)
953{
954 struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
955 static char ch[2];
956
957 ch[0] = (char)c;
958 cpm_uart_early_write(pinfo->port.line, ch, 1);
959}
960#endif /* CONFIG_CONSOLE_POLL */
961
868static struct uart_ops cpm_uart_pops = { 962static struct uart_ops cpm_uart_pops = {
869 .tx_empty = cpm_uart_tx_empty, 963 .tx_empty = cpm_uart_tx_empty,
870 .set_mctrl = cpm_uart_set_mctrl, 964 .set_mctrl = cpm_uart_set_mctrl,
@@ -882,6 +976,10 @@ static struct uart_ops cpm_uart_pops = {
882 .request_port = cpm_uart_request_port, 976 .request_port = cpm_uart_request_port,
883 .config_port = cpm_uart_config_port, 977 .config_port = cpm_uart_config_port,
884 .verify_port = cpm_uart_verify_port, 978 .verify_port = cpm_uart_verify_port,
979#ifdef CONFIG_CONSOLE_POLL
980 .poll_get_char = cpm_get_poll_char,
981 .poll_put_char = cpm_put_poll_char,
982#endif
885}; 983};
886 984
887struct uart_cpm_port cpm_uart_ports[UART_NR]; 985struct uart_cpm_port cpm_uart_ports[UART_NR];
@@ -1105,12 +1203,14 @@ static int __init cpm_uart_console_setup(struct console *co, char *options)
1105 udbg_putc = NULL; 1203 udbg_putc = NULL;
1106#endif 1204#endif
1107 1205
1108 cpm_line_cr_cmd(pinfo, CPM_CR_STOP_TX);
1109
1110 if (IS_SMC(pinfo)) { 1206 if (IS_SMC(pinfo)) {
1207 out_be16(&pinfo->smcup->smc_brkcr, 0);
1208 cpm_line_cr_cmd(pinfo, CPM_CR_STOP_TX);
1111 clrbits8(&pinfo->smcp->smc_smcm, SMCM_RX | SMCM_TX); 1209 clrbits8(&pinfo->smcp->smc_smcm, SMCM_RX | SMCM_TX);
1112 clrbits16(&pinfo->smcp->smc_smcmr, SMCMR_REN | SMCMR_TEN); 1210 clrbits16(&pinfo->smcp->smc_smcmr, SMCMR_REN | SMCMR_TEN);
1113 } else { 1211 } else {
1212 out_be16(&pinfo->sccup->scc_brkcr, 0);
1213 cpm_line_cr_cmd(pinfo, CPM_CR_GRA_STOP_TX);
1114 clrbits16(&pinfo->sccp->scc_sccm, UART_SCCM_TX | UART_SCCM_RX); 1214 clrbits16(&pinfo->sccp->scc_sccm, UART_SCCM_TX | UART_SCCM_RX);
1115 clrbits32(&pinfo->sccp->scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); 1215 clrbits32(&pinfo->sccp->scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
1116 } 1216 }
diff --git a/drivers/serial/dz.c b/drivers/serial/dz.c
index a81d2c2ff8a2..6042b87797a1 100644
--- a/drivers/serial/dz.c
+++ b/drivers/serial/dz.c
@@ -642,6 +642,26 @@ static void dz_set_termios(struct uart_port *uport, struct ktermios *termios,
642 spin_unlock_irqrestore(&dport->port.lock, flags); 642 spin_unlock_irqrestore(&dport->port.lock, flags);
643} 643}
644 644
645/*
646 * Hack alert!
647 * Required solely so that the initial PROM-based console
648 * works undisturbed in parallel with this one.
649 */
650static void dz_pm(struct uart_port *uport, unsigned int state,
651 unsigned int oldstate)
652{
653 struct dz_port *dport = to_dport(uport);
654 unsigned long flags;
655
656 spin_lock_irqsave(&dport->port.lock, flags);
657 if (state < 3)
658 dz_start_tx(&dport->port);
659 else
660 dz_stop_tx(&dport->port);
661 spin_unlock_irqrestore(&dport->port.lock, flags);
662}
663
664
645static const char *dz_type(struct uart_port *uport) 665static const char *dz_type(struct uart_port *uport)
646{ 666{
647 return "DZ"; 667 return "DZ";
@@ -738,6 +758,7 @@ static struct uart_ops dz_ops = {
738 .startup = dz_startup, 758 .startup = dz_startup,
739 .shutdown = dz_shutdown, 759 .shutdown = dz_shutdown,
740 .set_termios = dz_set_termios, 760 .set_termios = dz_set_termios,
761 .pm = dz_pm,
741 .type = dz_type, 762 .type = dz_type,
742 .release_port = dz_release_port, 763 .release_port = dz_release_port,
743 .request_port = dz_request_port, 764 .request_port = dz_request_port,
@@ -861,7 +882,10 @@ static int __init dz_console_setup(struct console *co, char *options)
861 if (ret) 882 if (ret)
862 return ret; 883 return ret;
863 884
885 spin_lock_init(&dport->port.lock); /* For dz_pm(). */
886
864 dz_reset(dport); 887 dz_reset(dport);
888 dz_pm(uport, 0, -1);
865 889
866 if (options) 890 if (options)
867 uart_parse_options(options, &baud, &parity, &bits, &flow); 891 uart_parse_options(options, &baud, &parity, &bits, &flow);
diff --git a/drivers/serial/mpsc.c b/drivers/serial/mpsc.c
index c9f53e71f252..61d3ade5286c 100644
--- a/drivers/serial/mpsc.c
+++ b/drivers/serial/mpsc.c
@@ -921,6 +921,10 @@ static int mpsc_make_ready(struct mpsc_port_info *pi)
921 return 0; 921 return 0;
922} 922}
923 923
924#ifdef CONFIG_CONSOLE_POLL
925static int serial_polled;
926#endif
927
924/* 928/*
925 ****************************************************************************** 929 ******************************************************************************
926 * 930 *
@@ -956,7 +960,12 @@ static int mpsc_rx_intr(struct mpsc_port_info *pi)
956 while (!((cmdstat = be32_to_cpu(rxre->cmdstat)) 960 while (!((cmdstat = be32_to_cpu(rxre->cmdstat))
957 & SDMA_DESC_CMDSTAT_O)) { 961 & SDMA_DESC_CMDSTAT_O)) {
958 bytes_in = be16_to_cpu(rxre->bytecnt); 962 bytes_in = be16_to_cpu(rxre->bytecnt);
959 963#ifdef CONFIG_CONSOLE_POLL
964 if (unlikely(serial_polled)) {
965 serial_polled = 0;
966 return 0;
967 }
968#endif
960 /* Following use of tty struct directly is deprecated */ 969 /* Following use of tty struct directly is deprecated */
961 if (unlikely(tty_buffer_request_room(tty, bytes_in) 970 if (unlikely(tty_buffer_request_room(tty, bytes_in)
962 < bytes_in)) { 971 < bytes_in)) {
@@ -1017,6 +1026,12 @@ static int mpsc_rx_intr(struct mpsc_port_info *pi)
1017 if (uart_handle_sysrq_char(&pi->port, *bp)) { 1026 if (uart_handle_sysrq_char(&pi->port, *bp)) {
1018 bp++; 1027 bp++;
1019 bytes_in--; 1028 bytes_in--;
1029#ifdef CONFIG_CONSOLE_POLL
1030 if (unlikely(serial_polled)) {
1031 serial_polled = 0;
1032 return 0;
1033 }
1034#endif
1020 goto next_frame; 1035 goto next_frame;
1021 } 1036 }
1022 1037
@@ -1519,6 +1534,133 @@ static int mpsc_verify_port(struct uart_port *port, struct serial_struct *ser)
1519 1534
1520 return rc; 1535 return rc;
1521} 1536}
1537#ifdef CONFIG_CONSOLE_POLL
1538/* Serial polling routines for writing and reading from the uart while
1539 * in an interrupt or debug context.
1540 */
1541
1542static char poll_buf[2048];
1543static int poll_ptr;
1544static int poll_cnt;
1545static void mpsc_put_poll_char(struct uart_port *port,
1546 unsigned char c);
1547
1548static int mpsc_get_poll_char(struct uart_port *port)
1549{
1550 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1551 struct mpsc_rx_desc *rxre;
1552 u32 cmdstat, bytes_in, i;
1553 u8 *bp;
1554
1555 if (!serial_polled)
1556 serial_polled = 1;
1557
1558 pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line);
1559
1560 if (poll_cnt) {
1561 poll_cnt--;
1562 return poll_buf[poll_ptr++];
1563 }
1564 poll_ptr = 0;
1565 poll_cnt = 0;
1566
1567 while (poll_cnt == 0) {
1568 rxre = (struct mpsc_rx_desc *)(pi->rxr +
1569 (pi->rxr_posn*MPSC_RXRE_SIZE));
1570 dma_cache_sync(pi->port.dev, (void *)rxre,
1571 MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
1572#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1573 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1574 invalidate_dcache_range((ulong)rxre,
1575 (ulong)rxre + MPSC_RXRE_SIZE);
1576#endif
1577 /*
1578 * Loop through Rx descriptors handling ones that have
1579 * been completed.
1580 */
1581 while (poll_cnt == 0 &&
1582 !((cmdstat = be32_to_cpu(rxre->cmdstat)) &
1583 SDMA_DESC_CMDSTAT_O)){
1584 bytes_in = be16_to_cpu(rxre->bytecnt);
1585 bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
1586 dma_cache_sync(pi->port.dev, (void *) bp,
1587 MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
1588#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1589 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1590 invalidate_dcache_range((ulong)bp,
1591 (ulong)bp + MPSC_RXBE_SIZE);
1592#endif
1593 if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR |
1594 SDMA_DESC_CMDSTAT_FR | SDMA_DESC_CMDSTAT_OR))) &&
1595 !(cmdstat & pi->port.ignore_status_mask)) {
1596 poll_buf[poll_cnt] = *bp;
1597 poll_cnt++;
1598 } else {
1599 for (i = 0; i < bytes_in; i++) {
1600 poll_buf[poll_cnt] = *bp++;
1601 poll_cnt++;
1602 }
1603 pi->port.icount.rx += bytes_in;
1604 }
1605 rxre->bytecnt = cpu_to_be16(0);
1606 wmb();
1607 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O |
1608 SDMA_DESC_CMDSTAT_EI |
1609 SDMA_DESC_CMDSTAT_F |
1610 SDMA_DESC_CMDSTAT_L);
1611 wmb();
1612 dma_cache_sync(pi->port.dev, (void *)rxre,
1613 MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL);
1614#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1615 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1616 flush_dcache_range((ulong)rxre,
1617 (ulong)rxre + MPSC_RXRE_SIZE);
1618#endif
1619
1620 /* Advance to next descriptor */
1621 pi->rxr_posn = (pi->rxr_posn + 1) &
1622 (MPSC_RXR_ENTRIES - 1);
1623 rxre = (struct mpsc_rx_desc *)(pi->rxr +
1624 (pi->rxr_posn * MPSC_RXRE_SIZE));
1625 dma_cache_sync(pi->port.dev, (void *)rxre,
1626 MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
1627#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1628 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1629 invalidate_dcache_range((ulong)rxre,
1630 (ulong)rxre + MPSC_RXRE_SIZE);
1631#endif
1632 }
1633
1634 /* Restart rx engine, if its stopped */
1635 if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0)
1636 mpsc_start_rx(pi);
1637 }
1638 if (poll_cnt) {
1639 poll_cnt--;
1640 return poll_buf[poll_ptr++];
1641 }
1642
1643 return 0;
1644}
1645
1646
1647static void mpsc_put_poll_char(struct uart_port *port,
1648 unsigned char c)
1649{
1650 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1651 u32 data;
1652
1653 data = readl(pi->mpsc_base + MPSC_MPCR);
1654 writeb(c, pi->mpsc_base + MPSC_CHR_1);
1655 mb();
1656 data = readl(pi->mpsc_base + MPSC_CHR_2);
1657 data |= MPSC_CHR_2_TTCS;
1658 writel(data, pi->mpsc_base + MPSC_CHR_2);
1659 mb();
1660
1661 while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_TTCS);
1662}
1663#endif
1522 1664
1523static struct uart_ops mpsc_pops = { 1665static struct uart_ops mpsc_pops = {
1524 .tx_empty = mpsc_tx_empty, 1666 .tx_empty = mpsc_tx_empty,
@@ -1537,6 +1679,10 @@ static struct uart_ops mpsc_pops = {
1537 .request_port = mpsc_request_port, 1679 .request_port = mpsc_request_port,
1538 .config_port = mpsc_config_port, 1680 .config_port = mpsc_config_port,
1539 .verify_port = mpsc_verify_port, 1681 .verify_port = mpsc_verify_port,
1682#ifdef CONFIG_CONSOLE_POLL
1683 .poll_get_char = mpsc_get_poll_char,
1684 .poll_put_char = mpsc_put_poll_char,
1685#endif
1540}; 1686};
1541 1687
1542/* 1688/*
diff --git a/drivers/serial/zs.c b/drivers/serial/zs.c
index bd45b6230fd8..9e6a873f8203 100644
--- a/drivers/serial/zs.c
+++ b/drivers/serial/zs.c
@@ -787,7 +787,6 @@ static int zs_startup(struct uart_port *uport)
787 zport->regs[1] &= ~RxINT_MASK; 787 zport->regs[1] &= ~RxINT_MASK;
788 zport->regs[1] |= RxINT_ALL | TxINT_ENAB | EXT_INT_ENAB; 788 zport->regs[1] |= RxINT_ALL | TxINT_ENAB | EXT_INT_ENAB;
789 zport->regs[3] |= RxENABLE; 789 zport->regs[3] |= RxENABLE;
790 zport->regs[5] |= TxENAB;
791 zport->regs[15] |= BRKIE; 790 zport->regs[15] |= BRKIE;
792 write_zsreg(zport, R1, zport->regs[1]); 791 write_zsreg(zport, R1, zport->regs[1]);
793 write_zsreg(zport, R3, zport->regs[3]); 792 write_zsreg(zport, R3, zport->regs[3]);
@@ -814,7 +813,6 @@ static void zs_shutdown(struct uart_port *uport)
814 813
815 spin_lock_irqsave(&scc->zlock, flags); 814 spin_lock_irqsave(&scc->zlock, flags);
816 815
817 zport->regs[5] &= ~TxENAB;
818 zport->regs[3] &= ~RxENABLE; 816 zport->regs[3] &= ~RxENABLE;
819 write_zsreg(zport, R5, zport->regs[5]); 817 write_zsreg(zport, R5, zport->regs[5]);
820 write_zsreg(zport, R3, zport->regs[3]); 818 write_zsreg(zport, R3, zport->regs[3]);
@@ -959,6 +957,23 @@ static void zs_set_termios(struct uart_port *uport, struct ktermios *termios,
959 spin_unlock_irqrestore(&scc->zlock, flags); 957 spin_unlock_irqrestore(&scc->zlock, flags);
960} 958}
961 959
960/*
961 * Hack alert!
962 * Required solely so that the initial PROM-based console
963 * works undisturbed in parallel with this one.
964 */
965static void zs_pm(struct uart_port *uport, unsigned int state,
966 unsigned int oldstate)
967{
968 struct zs_port *zport = to_zport(uport);
969
970 if (state < 3)
971 zport->regs[5] |= TxENAB;
972 else
973 zport->regs[5] &= ~TxENAB;
974 write_zsreg(zport, R5, zport->regs[5]);
975}
976
962 977
963static const char *zs_type(struct uart_port *uport) 978static const char *zs_type(struct uart_port *uport)
964{ 979{
@@ -1041,6 +1056,7 @@ static struct uart_ops zs_ops = {
1041 .startup = zs_startup, 1056 .startup = zs_startup,
1042 .shutdown = zs_shutdown, 1057 .shutdown = zs_shutdown,
1043 .set_termios = zs_set_termios, 1058 .set_termios = zs_set_termios,
1059 .pm = zs_pm,
1044 .type = zs_type, 1060 .type = zs_type,
1045 .release_port = zs_release_port, 1061 .release_port = zs_release_port,
1046 .request_port = zs_request_port, 1062 .request_port = zs_request_port,
@@ -1190,6 +1206,7 @@ static int __init zs_console_setup(struct console *co, char *options)
1190 return ret; 1206 return ret;
1191 1207
1192 zs_reset(zport); 1208 zs_reset(zport);
1209 zs_pm(uport, 0, -1);
1193 1210
1194 if (options) 1211 if (options)
1195 uart_parse_options(options, &baud, &parity, &bits, &flow); 1212 uart_parse_options(options, &baud, &parity, &bits, &flow);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 66ec5d8808de..2303521b4f09 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -49,25 +49,26 @@ config SPI_MASTER
49 controller and the protocol drivers for the SPI slave chips 49 controller and the protocol drivers for the SPI slave chips
50 that are connected. 50 that are connected.
51 51
52if SPI_MASTER
53
52comment "SPI Master Controller Drivers" 54comment "SPI Master Controller Drivers"
53 depends on SPI_MASTER
54 55
55config SPI_ATMEL 56config SPI_ATMEL
56 tristate "Atmel SPI Controller" 57 tristate "Atmel SPI Controller"
57 depends on (ARCH_AT91 || AVR32) && SPI_MASTER 58 depends on (ARCH_AT91 || AVR32)
58 help 59 help
59 This selects a driver for the Atmel SPI Controller, present on 60 This selects a driver for the Atmel SPI Controller, present on
60 many AT32 (AVR32) and AT91 (ARM) chips. 61 many AT32 (AVR32) and AT91 (ARM) chips.
61 62
62config SPI_BFIN 63config SPI_BFIN
63 tristate "SPI controller driver for ADI Blackfin5xx" 64 tristate "SPI controller driver for ADI Blackfin5xx"
64 depends on SPI_MASTER && BLACKFIN 65 depends on BLACKFIN
65 help 66 help
66 This is the SPI controller master driver for Blackfin 5xx processor. 67 This is the SPI controller master driver for Blackfin 5xx processor.
67 68
68config SPI_AU1550 69config SPI_AU1550
69 tristate "Au1550/Au12x0 SPI Controller" 70 tristate "Au1550/Au12x0 SPI Controller"
70 depends on SPI_MASTER && (SOC_AU1550 || SOC_AU1200) && EXPERIMENTAL 71 depends on (SOC_AU1550 || SOC_AU1200) && EXPERIMENTAL
71 select SPI_BITBANG 72 select SPI_BITBANG
72 help 73 help
73 If you say yes to this option, support will be included for the 74 If you say yes to this option, support will be included for the
@@ -78,7 +79,6 @@ config SPI_AU1550
78 79
79config SPI_BITBANG 80config SPI_BITBANG
80 tristate "Bitbanging SPI master" 81 tristate "Bitbanging SPI master"
81 depends on SPI_MASTER && EXPERIMENTAL
82 help 82 help
83 With a few GPIO pins, your system can bitbang the SPI protocol. 83 With a few GPIO pins, your system can bitbang the SPI protocol.
84 Select this to get SPI support through I/O pins (GPIO, parallel 84 Select this to get SPI support through I/O pins (GPIO, parallel
@@ -92,7 +92,7 @@ config SPI_BITBANG
92 92
93config SPI_BUTTERFLY 93config SPI_BUTTERFLY
94 tristate "Parallel port adapter for AVR Butterfly (DEVELOPMENT)" 94 tristate "Parallel port adapter for AVR Butterfly (DEVELOPMENT)"
95 depends on SPI_MASTER && PARPORT && EXPERIMENTAL 95 depends on PARPORT
96 select SPI_BITBANG 96 select SPI_BITBANG
97 help 97 help
98 This uses a custom parallel port cable to connect to an AVR 98 This uses a custom parallel port cable to connect to an AVR
@@ -102,14 +102,14 @@ config SPI_BUTTERFLY
102 102
103config SPI_IMX 103config SPI_IMX
104 tristate "Freescale iMX SPI controller" 104 tristate "Freescale iMX SPI controller"
105 depends on SPI_MASTER && ARCH_IMX && EXPERIMENTAL 105 depends on ARCH_IMX && EXPERIMENTAL
106 help 106 help
107 This enables using the Freescale iMX SPI controller in master 107 This enables using the Freescale iMX SPI controller in master
108 mode. 108 mode.
109 109
110config SPI_LM70_LLP 110config SPI_LM70_LLP
111 tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)" 111 tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
112 depends on SPI_MASTER && PARPORT && EXPERIMENTAL 112 depends on PARPORT && EXPERIMENTAL
113 select SPI_BITBANG 113 select SPI_BITBANG
114 help 114 help
115 This driver supports the NS LM70 LLP Evaluation Board, 115 This driver supports the NS LM70 LLP Evaluation Board,
@@ -118,14 +118,14 @@ config SPI_LM70_LLP
118 118
119config SPI_MPC52xx_PSC 119config SPI_MPC52xx_PSC
120 tristate "Freescale MPC52xx PSC SPI controller" 120 tristate "Freescale MPC52xx PSC SPI controller"
121 depends on SPI_MASTER && PPC_MPC52xx && EXPERIMENTAL 121 depends on PPC_MPC52xx && EXPERIMENTAL
122 help 122 help
123 This enables using the Freescale MPC52xx Programmable Serial 123 This enables using the Freescale MPC52xx Programmable Serial
124 Controller in master SPI mode. 124 Controller in master SPI mode.
125 125
126config SPI_MPC83xx 126config SPI_MPC83xx
127 tristate "Freescale MPC83xx/QUICC Engine SPI controller" 127 tristate "Freescale MPC83xx/QUICC Engine SPI controller"
128 depends on SPI_MASTER && (PPC_83xx || QUICC_ENGINE) && EXPERIMENTAL 128 depends on (PPC_83xx || QUICC_ENGINE) && EXPERIMENTAL
129 help 129 help
130 This enables using the Freescale MPC83xx and QUICC Engine SPI 130 This enables using the Freescale MPC83xx and QUICC Engine SPI
131 controllers in master mode. 131 controllers in master mode.
@@ -137,21 +137,21 @@ config SPI_MPC83xx
137 137
138config SPI_OMAP_UWIRE 138config SPI_OMAP_UWIRE
139 tristate "OMAP1 MicroWire" 139 tristate "OMAP1 MicroWire"
140 depends on SPI_MASTER && ARCH_OMAP1 140 depends on ARCH_OMAP1
141 select SPI_BITBANG 141 select SPI_BITBANG
142 help 142 help
143 This hooks up to the MicroWire controller on OMAP1 chips. 143 This hooks up to the MicroWire controller on OMAP1 chips.
144 144
145config SPI_OMAP24XX 145config SPI_OMAP24XX
146 tristate "McSPI driver for OMAP24xx/OMAP34xx" 146 tristate "McSPI driver for OMAP24xx/OMAP34xx"
147 depends on SPI_MASTER && (ARCH_OMAP24XX || ARCH_OMAP34XX) 147 depends on ARCH_OMAP24XX || ARCH_OMAP34XX
148 help 148 help
149 SPI master controller for OMAP24xx/OMAP34xx Multichannel SPI 149 SPI master controller for OMAP24xx/OMAP34xx Multichannel SPI
150 (McSPI) modules. 150 (McSPI) modules.
151 151
152config SPI_PXA2XX 152config SPI_PXA2XX
153 tristate "PXA2xx SSP SPI master" 153 tristate "PXA2xx SSP SPI master"
154 depends on SPI_MASTER && ARCH_PXA && EXPERIMENTAL 154 depends on ARCH_PXA && EXPERIMENTAL
155 select PXA_SSP 155 select PXA_SSP
156 help 156 help
157 This enables using a PXA2xx SSP port as a SPI master controller. 157 This enables using a PXA2xx SSP port as a SPI master controller.
@@ -160,14 +160,14 @@ config SPI_PXA2XX
160 160
161config SPI_S3C24XX 161config SPI_S3C24XX
162 tristate "Samsung S3C24XX series SPI" 162 tristate "Samsung S3C24XX series SPI"
163 depends on SPI_MASTER && ARCH_S3C2410 && EXPERIMENTAL 163 depends on ARCH_S3C2410 && EXPERIMENTAL
164 select SPI_BITBANG 164 select SPI_BITBANG
165 help 165 help
166 SPI driver for Samsung S3C24XX series ARM SoCs 166 SPI driver for Samsung S3C24XX series ARM SoCs
167 167
168config SPI_S3C24XX_GPIO 168config SPI_S3C24XX_GPIO
169 tristate "Samsung S3C24XX series SPI by GPIO" 169 tristate "Samsung S3C24XX series SPI by GPIO"
170 depends on SPI_MASTER && ARCH_S3C2410 && EXPERIMENTAL 170 depends on ARCH_S3C2410 && EXPERIMENTAL
171 select SPI_BITBANG 171 select SPI_BITBANG
172 help 172 help
173 SPI driver for Samsung S3C24XX series ARM SoCs using 173 SPI driver for Samsung S3C24XX series ARM SoCs using
@@ -177,20 +177,20 @@ config SPI_S3C24XX_GPIO
177 177
178config SPI_SH_SCI 178config SPI_SH_SCI
179 tristate "SuperH SCI SPI controller" 179 tristate "SuperH SCI SPI controller"
180 depends on SPI_MASTER && SUPERH 180 depends on SUPERH
181 select SPI_BITBANG 181 select SPI_BITBANG
182 help 182 help
183 SPI driver for SuperH SCI blocks. 183 SPI driver for SuperH SCI blocks.
184 184
185config SPI_TXX9 185config SPI_TXX9
186 tristate "Toshiba TXx9 SPI controller" 186 tristate "Toshiba TXx9 SPI controller"
187 depends on SPI_MASTER && GENERIC_GPIO && CPU_TX49XX 187 depends on GENERIC_GPIO && CPU_TX49XX
188 help 188 help
189 SPI driver for Toshiba TXx9 MIPS SoCs 189 SPI driver for Toshiba TXx9 MIPS SoCs
190 190
191config SPI_XILINX 191config SPI_XILINX
192 tristate "Xilinx SPI controller" 192 tristate "Xilinx SPI controller"
193 depends on SPI_MASTER && XILINX_VIRTEX && EXPERIMENTAL 193 depends on XILINX_VIRTEX && EXPERIMENTAL
194 select SPI_BITBANG 194 select SPI_BITBANG
195 help 195 help
196 This exposes the SPI controller IP from the Xilinx EDK. 196 This exposes the SPI controller IP from the Xilinx EDK.
@@ -207,11 +207,10 @@ config SPI_XILINX
207# being probably the most widely used ones. 207# being probably the most widely used ones.
208# 208#
209comment "SPI Protocol Masters" 209comment "SPI Protocol Masters"
210 depends on SPI_MASTER
211 210
212config SPI_AT25 211config SPI_AT25
213 tristate "SPI EEPROMs from most vendors" 212 tristate "SPI EEPROMs from most vendors"
214 depends on SPI_MASTER && SYSFS 213 depends on SYSFS
215 help 214 help
216 Enable this driver to get read/write support to most SPI EEPROMs, 215 Enable this driver to get read/write support to most SPI EEPROMs,
217 after you configure the board init code to know about each eeprom 216 after you configure the board init code to know about each eeprom
@@ -222,7 +221,7 @@ config SPI_AT25
222 221
223config SPI_SPIDEV 222config SPI_SPIDEV
224 tristate "User mode SPI device driver support" 223 tristate "User mode SPI device driver support"
225 depends on SPI_MASTER && EXPERIMENTAL 224 depends on EXPERIMENTAL
226 help 225 help
227 This supports user mode SPI protocol drivers. 226 This supports user mode SPI protocol drivers.
228 227
@@ -231,7 +230,7 @@ config SPI_SPIDEV
231 230
232config SPI_TLE62X0 231config SPI_TLE62X0
233 tristate "Infineon TLE62X0 (for power switching)" 232 tristate "Infineon TLE62X0 (for power switching)"
234 depends on SPI_MASTER && SYSFS 233 depends on SYSFS
235 help 234 help
236 SPI driver for Infineon TLE62X0 series line driver chips, 235 SPI driver for Infineon TLE62X0 series line driver chips,
237 such as the TLE6220, TLE6230 and TLE6240. This provides a 236 such as the TLE6220, TLE6230 and TLE6240. This provides a
@@ -242,6 +241,8 @@ config SPI_TLE62X0
242# Add new SPI protocol masters in alphabetical order above this line 241# Add new SPI protocol masters in alphabetical order above this line
243# 242#
244 243
244endif # SPI_MASTER
245
245# (slave support would go here) 246# (slave support would go here)
246 247
247endif # SPI 248endif # SPI
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c
index 072c4a595334..9149689c79d9 100644
--- a/drivers/spi/au1550_spi.c
+++ b/drivers/spi/au1550_spi.c
@@ -26,6 +26,7 @@
26#include <linux/errno.h> 26#include <linux/errno.h>
27#include <linux/device.h> 27#include <linux/device.h>
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/resource.h>
29#include <linux/spi/spi.h> 30#include <linux/spi/spi.h>
30#include <linux/spi/spi_bitbang.h> 31#include <linux/spi/spi_bitbang.h>
31#include <linux/dma-mapping.h> 32#include <linux/dma-mapping.h>
@@ -81,6 +82,7 @@ struct au1550_spi {
81 struct spi_master *master; 82 struct spi_master *master;
82 struct device *dev; 83 struct device *dev;
83 struct au1550_spi_info *pdata; 84 struct au1550_spi_info *pdata;
85 struct resource *ioarea;
84}; 86};
85 87
86 88
@@ -96,6 +98,8 @@ static dbdev_tab_t au1550_spi_mem_dbdev =
96 .dev_intpolarity = 0 98 .dev_intpolarity = 0
97}; 99};
98 100
101static int ddma_memid; /* id to above mem dma device */
102
99static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw); 103static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw);
100 104
101 105
@@ -480,9 +484,13 @@ static irqreturn_t au1550_spi_dma_irq_callback(struct au1550_spi *hw)
480 au1xxx_dbdma_reset(hw->dma_tx_ch); 484 au1xxx_dbdma_reset(hw->dma_tx_ch);
481 au1550_spi_reset_fifos(hw); 485 au1550_spi_reset_fifos(hw);
482 486
483 dev_err(hw->dev, 487 if (evnt == PSC_SPIEVNT_RO)
484 "Unexpected SPI error: event=0x%x stat=0x%x!\n", 488 dev_err(hw->dev,
485 evnt, stat); 489 "dma transfer: receive FIFO overflow!\n");
490 else
491 dev_err(hw->dev,
492 "dma transfer: unexpected SPI error "
493 "(event=0x%x stat=0x%x)!\n", evnt, stat);
486 494
487 complete(&hw->master_done); 495 complete(&hw->master_done);
488 return IRQ_HANDLED; 496 return IRQ_HANDLED;
@@ -592,17 +600,17 @@ static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw)
592 600
593 if ((evnt & (PSC_SPIEVNT_MM | PSC_SPIEVNT_RO 601 if ((evnt & (PSC_SPIEVNT_MM | PSC_SPIEVNT_RO
594 | PSC_SPIEVNT_RU | PSC_SPIEVNT_TO 602 | PSC_SPIEVNT_RU | PSC_SPIEVNT_TO
595 | PSC_SPIEVNT_TU | PSC_SPIEVNT_SD)) 603 | PSC_SPIEVNT_SD))
596 != 0) { 604 != 0) {
597 dev_err(hw->dev,
598 "Unexpected SPI error: event=0x%x stat=0x%x!\n",
599 evnt, stat);
600 /* 605 /*
601 * due to an error we consider transfer as done, 606 * due to an error we consider transfer as done,
602 * so mask all events until before next transfer start 607 * so mask all events until before next transfer start
603 */ 608 */
604 au1550_spi_mask_ack_all(hw); 609 au1550_spi_mask_ack_all(hw);
605 au1550_spi_reset_fifos(hw); 610 au1550_spi_reset_fifos(hw);
611 dev_err(hw->dev,
612 "pio transfer: unexpected SPI error "
613 "(event=0x%x stat=0x%x)!\n", evnt, stat);
606 complete(&hw->master_done); 614 complete(&hw->master_done);
607 return IRQ_HANDLED; 615 return IRQ_HANDLED;
608 } 616 }
@@ -616,27 +624,50 @@ static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw)
616 stat = hw->regs->psc_spistat; 624 stat = hw->regs->psc_spistat;
617 au_sync(); 625 au_sync();
618 626
619 if ((stat & PSC_SPISTAT_RE) == 0 && hw->rx_count < hw->len) { 627 /*
628 * Take care to not let the Rx FIFO overflow.
629 *
630 * We only write a byte if we have read one at least. Initially,
631 * the write fifo is full, so we should read from the read fifo
632 * first.
633 * In case we miss a word from the read fifo, we should get a
634 * RO event and should back out.
635 */
636 if (!(stat & PSC_SPISTAT_RE) && hw->rx_count < hw->len) {
620 hw->rx_word(hw); 637 hw->rx_word(hw);
621 /* ack the receive request event */
622 hw->regs->psc_spievent = PSC_SPIEVNT_RR;
623 au_sync();
624 busy = 1; 638 busy = 1;
625 }
626 639
627 if ((stat & PSC_SPISTAT_TF) == 0 && hw->tx_count < hw->len) { 640 if (!(stat & PSC_SPISTAT_TF) && hw->tx_count < hw->len)
628 hw->tx_word(hw); 641 hw->tx_word(hw);
629 /* ack the transmit request event */
630 hw->regs->psc_spievent = PSC_SPIEVNT_TR;
631 au_sync();
632 busy = 1;
633 } 642 }
634 } while (busy); 643 } while (busy);
635 644
636 evnt = hw->regs->psc_spievent; 645 hw->regs->psc_spievent = PSC_SPIEVNT_RR | PSC_SPIEVNT_TR;
637 au_sync(); 646 au_sync();
638 647
639 if (hw->rx_count >= hw->len || (evnt & PSC_SPIEVNT_MD) != 0) { 648 /*
649 * Restart the SPI transmission in case of a transmit underflow.
650 * This seems to work despite the notes in the Au1550 data book
651 * of Figure 8-4 with flowchart for SPI master operation:
652 *
653 * """Note 1: An XFR Error Interrupt occurs, unless masked,
654 * for any of the following events: Tx FIFO Underflow,
655 * Rx FIFO Overflow, or Multiple-master Error
656 * Note 2: In case of a Tx Underflow Error, all zeroes are
657 * transmitted."""
658 *
659 * By simply restarting the spi transfer on Tx Underflow Error,
660 * we assume that spi transfer was paused instead of zeroes
661 * transmittion mentioned in the Note 2 of Au1550 data book.
662 */
663 if (evnt & PSC_SPIEVNT_TU) {
664 hw->regs->psc_spievent = PSC_SPIEVNT_TU | PSC_SPIEVNT_MD;
665 au_sync();
666 hw->regs->psc_spipcr = PSC_SPIPCR_MS;
667 au_sync();
668 }
669
670 if (hw->rx_count >= hw->len) {
640 /* transfer completed successfully */ 671 /* transfer completed successfully */
641 au1550_spi_mask_ack_all(hw); 672 au1550_spi_mask_ack_all(hw);
642 complete(&hw->master_done); 673 complete(&hw->master_done);
@@ -725,6 +756,8 @@ static void __init au1550_spi_setup_psc_as_spi(struct au1550_spi *hw)
725 stat = hw->regs->psc_spistat; 756 stat = hw->regs->psc_spistat;
726 au_sync(); 757 au_sync();
727 } while ((stat & PSC_SPISTAT_DR) == 0); 758 } while ((stat & PSC_SPISTAT_DR) == 0);
759
760 au1550_spi_reset_fifos(hw);
728} 761}
729 762
730 763
@@ -732,6 +765,7 @@ static int __init au1550_spi_probe(struct platform_device *pdev)
732{ 765{
733 struct au1550_spi *hw; 766 struct au1550_spi *hw;
734 struct spi_master *master; 767 struct spi_master *master;
768 struct resource *r;
735 int err = 0; 769 int err = 0;
736 770
737 master = spi_alloc_master(&pdev->dev, sizeof(struct au1550_spi)); 771 master = spi_alloc_master(&pdev->dev, sizeof(struct au1550_spi));
@@ -753,76 +787,64 @@ static int __init au1550_spi_probe(struct platform_device *pdev)
753 goto err_no_pdata; 787 goto err_no_pdata;
754 } 788 }
755 789
756 platform_set_drvdata(pdev, hw); 790 r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
757 791 if (!r) {
758 init_completion(&hw->master_done); 792 dev_err(&pdev->dev, "no IRQ\n");
759 793 err = -ENODEV;
760 hw->bitbang.master = hw->master; 794 goto err_no_iores;
761 hw->bitbang.setup_transfer = au1550_spi_setupxfer; 795 }
762 hw->bitbang.chipselect = au1550_spi_chipsel; 796 hw->irq = r->start;
763 hw->bitbang.master->setup = au1550_spi_setup; 797
764 hw->bitbang.txrx_bufs = au1550_spi_txrx_bufs; 798 hw->usedma = 0;
799 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
800 if (r) {
801 hw->dma_tx_id = r->start;
802 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
803 if (r) {
804 hw->dma_rx_id = r->start;
805 if (usedma && ddma_memid) {
806 if (pdev->dev.dma_mask == NULL)
807 dev_warn(&pdev->dev, "no dma mask\n");
808 else
809 hw->usedma = 1;
810 }
811 }
812 }
765 813
766 switch (hw->pdata->bus_num) { 814 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
767 case 0: 815 if (!r) {
768 hw->irq = AU1550_PSC0_INT; 816 dev_err(&pdev->dev, "no mmio resource\n");
769 hw->regs = (volatile psc_spi_t *)PSC0_BASE_ADDR; 817 err = -ENODEV;
770 hw->dma_rx_id = DSCR_CMD0_PSC0_RX; 818 goto err_no_iores;
771 hw->dma_tx_id = DSCR_CMD0_PSC0_TX;
772 break;
773 case 1:
774 hw->irq = AU1550_PSC1_INT;
775 hw->regs = (volatile psc_spi_t *)PSC1_BASE_ADDR;
776 hw->dma_rx_id = DSCR_CMD0_PSC1_RX;
777 hw->dma_tx_id = DSCR_CMD0_PSC1_TX;
778 break;
779 case 2:
780 hw->irq = AU1550_PSC2_INT;
781 hw->regs = (volatile psc_spi_t *)PSC2_BASE_ADDR;
782 hw->dma_rx_id = DSCR_CMD0_PSC2_RX;
783 hw->dma_tx_id = DSCR_CMD0_PSC2_TX;
784 break;
785 case 3:
786 hw->irq = AU1550_PSC3_INT;
787 hw->regs = (volatile psc_spi_t *)PSC3_BASE_ADDR;
788 hw->dma_rx_id = DSCR_CMD0_PSC3_RX;
789 hw->dma_tx_id = DSCR_CMD0_PSC3_TX;
790 break;
791 default:
792 dev_err(&pdev->dev, "Wrong bus_num of SPI\n");
793 err = -ENOENT;
794 goto err_no_pdata;
795 } 819 }
796 820
797 if (request_mem_region((unsigned long)hw->regs, sizeof(psc_spi_t), 821 hw->ioarea = request_mem_region(r->start, sizeof(psc_spi_t),
798 pdev->name) == NULL) { 822 pdev->name);
823 if (!hw->ioarea) {
799 dev_err(&pdev->dev, "Cannot reserve iomem region\n"); 824 dev_err(&pdev->dev, "Cannot reserve iomem region\n");
800 err = -ENXIO; 825 err = -ENXIO;
801 goto err_no_iores; 826 goto err_no_iores;
802 } 827 }
803 828
804 829 hw->regs = (psc_spi_t __iomem *)ioremap(r->start, sizeof(psc_spi_t));
805 if (usedma) { 830 if (!hw->regs) {
806 if (pdev->dev.dma_mask == NULL) 831 dev_err(&pdev->dev, "cannot ioremap\n");
807 dev_warn(&pdev->dev, "no dma mask\n"); 832 err = -ENXIO;
808 else 833 goto err_ioremap;
809 hw->usedma = 1;
810 } 834 }
811 835
812 if (hw->usedma) { 836 platform_set_drvdata(pdev, hw);
813 /* 837
814 * create memory device with 8 bits dev_devwidth 838 init_completion(&hw->master_done);
815 * needed for proper byte ordering to spi fifo 839
816 */ 840 hw->bitbang.master = hw->master;
817 int memid = au1xxx_ddma_add_device(&au1550_spi_mem_dbdev); 841 hw->bitbang.setup_transfer = au1550_spi_setupxfer;
818 if (!memid) { 842 hw->bitbang.chipselect = au1550_spi_chipsel;
819 dev_err(&pdev->dev, 843 hw->bitbang.master->setup = au1550_spi_setup;
820 "Cannot create dma 8 bit mem device\n"); 844 hw->bitbang.txrx_bufs = au1550_spi_txrx_bufs;
821 err = -ENXIO;
822 goto err_dma_add_dev;
823 }
824 845
825 hw->dma_tx_ch = au1xxx_dbdma_chan_alloc(memid, 846 if (hw->usedma) {
847 hw->dma_tx_ch = au1xxx_dbdma_chan_alloc(ddma_memid,
826 hw->dma_tx_id, NULL, (void *)hw); 848 hw->dma_tx_id, NULL, (void *)hw);
827 if (hw->dma_tx_ch == 0) { 849 if (hw->dma_tx_ch == 0) {
828 dev_err(&pdev->dev, 850 dev_err(&pdev->dev,
@@ -841,7 +863,7 @@ static int __init au1550_spi_probe(struct platform_device *pdev)
841 863
842 864
843 hw->dma_rx_ch = au1xxx_dbdma_chan_alloc(hw->dma_rx_id, 865 hw->dma_rx_ch = au1xxx_dbdma_chan_alloc(hw->dma_rx_id,
844 memid, NULL, (void *)hw); 866 ddma_memid, NULL, (void *)hw);
845 if (hw->dma_rx_ch == 0) { 867 if (hw->dma_rx_ch == 0) {
846 dev_err(&pdev->dev, 868 dev_err(&pdev->dev,
847 "Cannot allocate rx dma channel\n"); 869 "Cannot allocate rx dma channel\n");
@@ -874,7 +896,7 @@ static int __init au1550_spi_probe(struct platform_device *pdev)
874 goto err_no_irq; 896 goto err_no_irq;
875 } 897 }
876 898
877 master->bus_num = hw->pdata->bus_num; 899 master->bus_num = pdev->id;
878 master->num_chipselect = hw->pdata->num_chipselect; 900 master->num_chipselect = hw->pdata->num_chipselect;
879 901
880 /* 902 /*
@@ -924,8 +946,11 @@ err_no_txdma_descr:
924 au1xxx_dbdma_chan_free(hw->dma_tx_ch); 946 au1xxx_dbdma_chan_free(hw->dma_tx_ch);
925 947
926err_no_txdma: 948err_no_txdma:
927err_dma_add_dev: 949 iounmap((void __iomem *)hw->regs);
928 release_mem_region((unsigned long)hw->regs, sizeof(psc_spi_t)); 950
951err_ioremap:
952 release_resource(hw->ioarea);
953 kfree(hw->ioarea);
929 954
930err_no_iores: 955err_no_iores:
931err_no_pdata: 956err_no_pdata:
@@ -944,7 +969,9 @@ static int __exit au1550_spi_remove(struct platform_device *pdev)
944 969
945 spi_bitbang_stop(&hw->bitbang); 970 spi_bitbang_stop(&hw->bitbang);
946 free_irq(hw->irq, hw); 971 free_irq(hw->irq, hw);
947 release_mem_region((unsigned long)hw->regs, sizeof(psc_spi_t)); 972 iounmap((void __iomem *)hw->regs);
973 release_resource(hw->ioarea);
974 kfree(hw->ioarea);
948 975
949 if (hw->usedma) { 976 if (hw->usedma) {
950 au1550_spi_dma_rxtmp_free(hw); 977 au1550_spi_dma_rxtmp_free(hw);
@@ -971,12 +998,24 @@ static struct platform_driver au1550_spi_drv = {
971 998
972static int __init au1550_spi_init(void) 999static int __init au1550_spi_init(void)
973{ 1000{
1001 /*
1002 * create memory device with 8 bits dev_devwidth
1003 * needed for proper byte ordering to spi fifo
1004 */
1005 if (usedma) {
1006 ddma_memid = au1xxx_ddma_add_device(&au1550_spi_mem_dbdev);
1007 if (!ddma_memid)
1008 printk(KERN_ERR "au1550-spi: cannot add memory"
1009 "dbdma device\n");
1010 }
974 return platform_driver_probe(&au1550_spi_drv, au1550_spi_probe); 1011 return platform_driver_probe(&au1550_spi_drv, au1550_spi_probe);
975} 1012}
976module_init(au1550_spi_init); 1013module_init(au1550_spi_init);
977 1014
978static void __exit au1550_spi_exit(void) 1015static void __exit au1550_spi_exit(void)
979{ 1016{
1017 if (usedma && ddma_memid)
1018 au1xxx_ddma_del_device(ddma_memid);
980 platform_driver_unregister(&au1550_spi_drv); 1019 platform_driver_unregister(&au1550_spi_drv);
981} 1020}
982module_exit(au1550_spi_exit); 1021module_exit(au1550_spi_exit);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 1771b2456bfa..ecca4a6a6f94 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -218,6 +218,8 @@ struct spi_device *spi_new_device(struct spi_master *master,
218 if (!spi_master_get(master)) 218 if (!spi_master_get(master))
219 return NULL; 219 return NULL;
220 220
221 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
222
221 proxy = kzalloc(sizeof *proxy, GFP_KERNEL); 223 proxy = kzalloc(sizeof *proxy, GFP_KERNEL);
222 if (!proxy) { 224 if (!proxy) {
223 dev_err(dev, "can't alloc dev for cs%d\n", 225 dev_err(dev, "can't alloc dev for cs%d\n",
@@ -229,7 +231,7 @@ struct spi_device *spi_new_device(struct spi_master *master,
229 proxy->max_speed_hz = chip->max_speed_hz; 231 proxy->max_speed_hz = chip->max_speed_hz;
230 proxy->mode = chip->mode; 232 proxy->mode = chip->mode;
231 proxy->irq = chip->irq; 233 proxy->irq = chip->irq;
232 proxy->modalias = chip->modalias; 234 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
233 235
234 snprintf(proxy->dev.bus_id, sizeof proxy->dev.bus_id, 236 snprintf(proxy->dev.bus_id, sizeof proxy->dev.bus_id,
235 "%s.%u", master->dev.bus_id, 237 "%s.%u", master->dev.bus_id,
diff --git a/drivers/spi/spi_mpc83xx.c b/drivers/spi/spi_mpc83xx.c
index 6832da6f7109..070c6219e2d6 100644
--- a/drivers/spi/spi_mpc83xx.c
+++ b/drivers/spi/spi_mpc83xx.c
@@ -266,21 +266,24 @@ int mpc83xx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
266 266
267 cs->hw_mode |= SPMODE_LEN(bits_per_word); 267 cs->hw_mode |= SPMODE_LEN(bits_per_word);
268 268
269 if ((mpc83xx_spi->spibrg / hz) >= 64) { 269 if ((mpc83xx_spi->spibrg / hz) > 64) {
270 pm = mpc83xx_spi->spibrg / (hz * 64) - 1; 270 pm = mpc83xx_spi->spibrg / (hz * 64);
271 if (pm > 0x0f) { 271 if (pm > 16) {
272 dev_err(&spi->dev, "Requested speed is too " 272 cs->hw_mode |= SPMODE_DIV16;
273 "low: %d Hz. Will use %d Hz instead.\n", 273 pm /= 16;
274 hz, mpc83xx_spi->spibrg / 1024); 274 if (pm > 16) {
275 pm = 0x0f; 275 dev_err(&spi->dev, "Requested speed is too "
276 "low: %d Hz. Will use %d Hz instead.\n",
277 hz, mpc83xx_spi->spibrg / 1024);
278 pm = 16;
279 }
276 } 280 }
277 cs->hw_mode |= SPMODE_PM(pm) | SPMODE_DIV16; 281 } else
278 } else {
279 pm = mpc83xx_spi->spibrg / (hz * 4); 282 pm = mpc83xx_spi->spibrg / (hz * 4);
280 if (pm) 283 if (pm)
281 pm--; 284 pm--;
282 cs->hw_mode |= SPMODE_PM(pm); 285
283 } 286 cs->hw_mode |= SPMODE_PM(pm);
284 regval = mpc83xx_spi_read_reg(&mpc83xx_spi->base->mode); 287 regval = mpc83xx_spi_read_reg(&mpc83xx_spi->base->mode);
285 if (cs->hw_mode != regval) { 288 if (cs->hw_mode != regval) {
286 unsigned long flags; 289 unsigned long flags;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 2833fd772a24..e5e0cfed5e3b 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -228,7 +228,6 @@ static int spidev_message(struct spidev_data *spidev,
228 * We walk the array of user-provided transfers, using each one 228 * We walk the array of user-provided transfers, using each one
229 * to initialize a kernel version of the same transfer. 229 * to initialize a kernel version of the same transfer.
230 */ 230 */
231 mutex_lock(&spidev->buf_lock);
232 buf = spidev->buffer; 231 buf = spidev->buffer;
233 total = 0; 232 total = 0;
234 for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; 233 for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
@@ -296,14 +295,12 @@ static int spidev_message(struct spidev_data *spidev,
296 status = total; 295 status = total;
297 296
298done: 297done:
299 mutex_unlock(&spidev->buf_lock);
300 kfree(k_xfers); 298 kfree(k_xfers);
301 return status; 299 return status;
302} 300}
303 301
304static int 302static long
305spidev_ioctl(struct inode *inode, struct file *filp, 303spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
306 unsigned int cmd, unsigned long arg)
307{ 304{
308 int err = 0; 305 int err = 0;
309 int retval = 0; 306 int retval = 0;
@@ -341,6 +338,14 @@ spidev_ioctl(struct inode *inode, struct file *filp,
341 if (spi == NULL) 338 if (spi == NULL)
342 return -ESHUTDOWN; 339 return -ESHUTDOWN;
343 340
341 /* use the buffer lock here for triple duty:
342 * - prevent I/O (from us) so calling spi_setup() is safe;
343 * - prevent concurrent SPI_IOC_WR_* from morphing
344 * data fields while SPI_IOC_RD_* reads them;
345 * - SPI_IOC_MESSAGE needs the buffer locked "normally".
346 */
347 mutex_lock(&spidev->buf_lock);
348
344 switch (cmd) { 349 switch (cmd) {
345 /* read requests */ 350 /* read requests */
346 case SPI_IOC_RD_MODE: 351 case SPI_IOC_RD_MODE:
@@ -456,6 +461,8 @@ spidev_ioctl(struct inode *inode, struct file *filp,
456 kfree(ioc); 461 kfree(ioc);
457 break; 462 break;
458 } 463 }
464
465 mutex_unlock(&spidev->buf_lock);
459 spi_dev_put(spi); 466 spi_dev_put(spi);
460 return retval; 467 return retval;
461} 468}
@@ -533,7 +540,7 @@ static struct file_operations spidev_fops = {
533 */ 540 */
534 .write = spidev_write, 541 .write = spidev_write,
535 .read = spidev_read, 542 .read = spidev_read,
536 .ioctl = spidev_ioctl, 543 .unlocked_ioctl = spidev_ioctl,
537 .open = spidev_open, 544 .open = spidev_open,
538 .release = spidev_release, 545 .release = spidev_release,
539}; 546};
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
index 113a0468ffcb..68d6f4988fb5 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/xilinx_spi.c
@@ -353,11 +353,12 @@ static int __init xilinx_spi_probe(struct platform_device *dev)
353 goto put_master; 353 goto put_master;
354 } 354 }
355 355
356 xspi->irq = platform_get_irq(dev, 0); 356 ret = platform_get_irq(dev, 0);
357 if (xspi->irq < 0) { 357 if (ret < 0) {
358 ret = -ENXIO; 358 ret = -ENXIO;
359 goto unmap_io; 359 goto unmap_io;
360 } 360 }
361 xspi->irq = ret;
361 362
362 master->bus_num = pdata->bus_num; 363 master->bus_num = pdata->bus_num;
363 master->num_chipselect = pdata->num_chipselect; 364 master->num_chipselect = pdata->num_chipselect;
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index fbd6289977c8..8fb0066609bb 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -152,9 +152,10 @@ static int is_vbus_present(void)
152static void pullup_off(void) 152static void pullup_off(void)
153{ 153{
154 struct pxa2xx_udc_mach_info *mach = the_controller->mach; 154 struct pxa2xx_udc_mach_info *mach = the_controller->mach;
155 int off_level = mach->gpio_pullup_inverted;
155 156
156 if (mach->gpio_pullup) 157 if (mach->gpio_pullup)
157 gpio_set_value(mach->gpio_pullup, 0); 158 gpio_set_value(mach->gpio_pullup, off_level);
158 else if (mach->udc_command) 159 else if (mach->udc_command)
159 mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT); 160 mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
160} 161}
@@ -162,9 +163,10 @@ static void pullup_off(void)
162static void pullup_on(void) 163static void pullup_on(void)
163{ 164{
164 struct pxa2xx_udc_mach_info *mach = the_controller->mach; 165 struct pxa2xx_udc_mach_info *mach = the_controller->mach;
166 int on_level = !mach->gpio_pullup_inverted;
165 167
166 if (mach->gpio_pullup) 168 if (mach->gpio_pullup)
167 gpio_set_value(mach->gpio_pullup, 1); 169 gpio_set_value(mach->gpio_pullup, on_level);
168 else if (mach->udc_command) 170 else if (mach->udc_command)
169 mach->udc_command(PXA2XX_UDC_CMD_CONNECT); 171 mach->udc_command(PXA2XX_UDC_CMD_CONNECT);
170} 172}
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 9b887ef64ff1..70d135e0cc47 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1658,6 +1658,32 @@ config FB_PM3
1658 similar boards, 3DLabs Permedia3 Create!, Appian Jeronimo 2000 1658 similar boards, 3DLabs Permedia3 Create!, Appian Jeronimo 2000
1659 and maybe other boards. 1659 and maybe other boards.
1660 1660
1661config FB_CARMINE
1662 tristate "Fujitsu carmine frame buffer support"
1663 depends on FB && PCI
1664 select FB_CFB_FILLRECT
1665 select FB_CFB_COPYAREA
1666 select FB_CFB_IMAGEBLIT
1667 help
1668 This is the frame buffer device driver for the Fujitsu Carmine chip.
1669 The driver provides two independent frame buffer devices.
1670
1671choice
1672 depends on FB_CARMINE
1673 prompt "DRAM timing"
1674 default FB_CARMINE_DRAM_EVAL
1675
1676config FB_CARMINE_DRAM_EVAL
1677 bool "Eval board timings"
1678 help
1679 Use timings which work on the eval card.
1680
1681config CARMINE_DRAM_CUSTOM
1682 bool "Custom board timings"
1683 help
1684 Use custom board timings.
1685endchoice
1686
1661config FB_AU1100 1687config FB_AU1100
1662 bool "Au1100 LCD Driver" 1688 bool "Au1100 LCD Driver"
1663 depends on (FB = y) && MIPS && SOC_AU1100 1689 depends on (FB = y) && MIPS && SOC_AU1100
@@ -1840,6 +1866,16 @@ config FB_W100
1840 1866
1841 If unsure, say N. 1867 If unsure, say N.
1842 1868
1869config FB_SH_MOBILE_LCDC
1870 tristate "SuperH Mobile LCDC framebuffer support"
1871 depends on FB && SUPERH
1872 select FB_CFB_FILLRECT
1873 select FB_CFB_COPYAREA
1874 select FB_CFB_IMAGEBLIT
1875 default m
1876 ---help---
1877 Frame buffer driver for the on-chip SH-Mobile LCD controller.
1878
1843config FB_S3C2410 1879config FB_S3C2410
1844 tristate "S3C2410 LCD framebuffer support" 1880 tristate "S3C2410 LCD framebuffer support"
1845 depends on FB && ARCH_S3C2410 1881 depends on FB && ARCH_S3C2410
@@ -1951,6 +1987,23 @@ config FB_AM200EPD
1951 This enables support for the Metronome display controller used on 1987 This enables support for the Metronome display controller used on
1952 the E-Ink AM-200 EPD devkit. 1988 the E-Ink AM-200 EPD devkit.
1953 1989
1990config FB_COBALT
1991 tristate "Cobalt server LCD frame buffer support"
1992 depends on FB && MIPS_COBALT
1993
1994config FB_SH7760
1995 bool "SH7760/SH7763 LCDC support"
1996 depends on FB && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763)
1997 select FB_CFB_FILLRECT
1998 select FB_CFB_COPYAREA
1999 select FB_CFB_IMAGEBLIT
2000 help
2001 Support for the SH7760/SH7763 integrated (D)STN/TFT LCD Controller.
2002 Supports display resolutions up to 1024x1024 pixel, grayscale and
2003 color operation, with depths ranging from 1 bpp to 8 bpp monochrome
2004 and 8, 15 or 16 bpp color; 90 degrees clockwise display rotation for
2005 panels <= 320 pixel horizontal resolution.
2006
1954config FB_VIRTUAL 2007config FB_VIRTUAL
1955 tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)" 2008 tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)"
1956 depends on FB 2009 depends on FB
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 04bca35403ff..0ebc1bfd2514 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -106,17 +106,22 @@ obj-$(CONFIG_FB_PMAGB_B) += pmagb-b-fb.o
106obj-$(CONFIG_FB_MAXINE) += maxinefb.o 106obj-$(CONFIG_FB_MAXINE) += maxinefb.o
107obj-$(CONFIG_FB_METRONOME) += metronomefb.o 107obj-$(CONFIG_FB_METRONOME) += metronomefb.o
108obj-$(CONFIG_FB_S1D13XXX) += s1d13xxxfb.o 108obj-$(CONFIG_FB_S1D13XXX) += s1d13xxxfb.o
109obj-$(CONFIG_FB_SH7760) += sh7760fb.o
109obj-$(CONFIG_FB_IMX) += imxfb.o 110obj-$(CONFIG_FB_IMX) += imxfb.o
110obj-$(CONFIG_FB_S3C2410) += s3c2410fb.o 111obj-$(CONFIG_FB_S3C2410) += s3c2410fb.o
111obj-$(CONFIG_FB_FSL_DIU) += fsl-diu-fb.o 112obj-$(CONFIG_FB_FSL_DIU) += fsl-diu-fb.o
113obj-$(CONFIG_FB_COBALT) += cobalt_lcdfb.o
112obj-$(CONFIG_FB_PNX4008_DUM) += pnx4008/ 114obj-$(CONFIG_FB_PNX4008_DUM) += pnx4008/
113obj-$(CONFIG_FB_PNX4008_DUM_RGB) += pnx4008/ 115obj-$(CONFIG_FB_PNX4008_DUM_RGB) += pnx4008/
114obj-$(CONFIG_FB_IBM_GXT4500) += gxt4500.o 116obj-$(CONFIG_FB_IBM_GXT4500) += gxt4500.o
115obj-$(CONFIG_FB_PS3) += ps3fb.o 117obj-$(CONFIG_FB_PS3) += ps3fb.o
116obj-$(CONFIG_FB_SM501) += sm501fb.o 118obj-$(CONFIG_FB_SM501) += sm501fb.o
117obj-$(CONFIG_FB_XILINX) += xilinxfb.o 119obj-$(CONFIG_FB_XILINX) += xilinxfb.o
120obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o
121obj-$(CONFIG_FB_SH7343VOU) += sh7343_voufb.o
118obj-$(CONFIG_FB_OMAP) += omap/ 122obj-$(CONFIG_FB_OMAP) += omap/
119obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o 123obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o
124obj-$(CONFIG_FB_CARMINE) += carminefb.o
120 125
121# Platform or fallback drivers go here 126# Platform or fallback drivers go here
122obj-$(CONFIG_FB_UVESA) += uvesafb.o 127obj-$(CONFIG_FB_UVESA) += uvesafb.o
diff --git a/drivers/video/acornfb.c b/drivers/video/acornfb.c
index eedb8285e32f..017233d0c481 100644
--- a/drivers/video/acornfb.c
+++ b/drivers/video/acornfb.c
@@ -23,6 +23,7 @@
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/ctype.h> 24#include <linux/ctype.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/mm.h>
26#include <linux/init.h> 27#include <linux/init.h>
27#include <linux/fb.h> 28#include <linux/fb.h>
28#include <linux/platform_device.h> 29#include <linux/platform_device.h>
diff --git a/drivers/video/amifb.c b/drivers/video/amifb.c
index 45c154ade9ca..b8e9a8682f2d 100644
--- a/drivers/video/amifb.c
+++ b/drivers/video/amifb.c
@@ -1136,7 +1136,6 @@ static int amifb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg
1136 * Interface to the low level console driver 1136 * Interface to the low level console driver
1137 */ 1137 */
1138 1138
1139int amifb_init(void);
1140static void amifb_deinit(void); 1139static void amifb_deinit(void);
1141 1140
1142 /* 1141 /*
@@ -2048,13 +2047,16 @@ static void amifb_copyarea(struct fb_info *info,
2048 width = x2 - dx; 2047 width = x2 - dx;
2049 height = y2 - dy; 2048 height = y2 - dy;
2050 2049
2050 if (area->sx + dx < area->dx || area->sy + dy < area->dy)
2051 return;
2052
2051 /* update sx,sy */ 2053 /* update sx,sy */
2052 sx = area->sx + (dx - area->dx); 2054 sx = area->sx + (dx - area->dx);
2053 sy = area->sy + (dy - area->dy); 2055 sy = area->sy + (dy - area->dy);
2054 2056
2055 /* the source must be completely inside the virtual screen */ 2057 /* the source must be completely inside the virtual screen */
2056 if (sx < 0 || sy < 0 || (sx + width) > info->var.xres_virtual || 2058 if (sx + width > info->var.xres_virtual ||
2057 (sy + height) > info->var.yres_virtual) 2059 sy + height > info->var.yres_virtual)
2058 return; 2060 return;
2059 2061
2060 if (dy > sy || (dy == sy && dx > sx)) { 2062 if (dy > sy || (dy == sy && dx > sx)) {
@@ -2245,7 +2247,7 @@ static inline void chipfree(void)
2245 * Initialisation 2247 * Initialisation
2246 */ 2248 */
2247 2249
2248int __init amifb_init(void) 2250static int __init amifb_init(void)
2249{ 2251{
2250 int tag, i, err = 0; 2252 int tag, i, err = 0;
2251 u_long chipptr; 2253 u_long chipptr;
@@ -3790,16 +3792,14 @@ static void ami_rebuild_copper(void)
3790 } 3792 }
3791} 3793}
3792 3794
3793 3795static void __exit amifb_exit(void)
3794module_init(amifb_init);
3795
3796#ifdef MODULE
3797MODULE_LICENSE("GPL");
3798
3799void cleanup_module(void)
3800{ 3796{
3801 unregister_framebuffer(&fb_info); 3797 unregister_framebuffer(&fb_info);
3802 amifb_deinit(); 3798 amifb_deinit();
3803 amifb_video_off(); 3799 amifb_video_off();
3804} 3800}
3805#endif /* MODULE */ 3801
3802module_init(amifb_init);
3803module_exit(amifb_exit);
3804
3805MODULE_LICENSE("GPL");
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c
index fa55d356b535..77eb8b34fbfa 100644
--- a/drivers/video/atafb.c
+++ b/drivers/video/atafb.c
@@ -2593,13 +2593,16 @@ static void atafb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
2593 width = x2 - dx; 2593 width = x2 - dx;
2594 height = y2 - dy; 2594 height = y2 - dy;
2595 2595
2596 if (area->sx + dx < area->dx || area->sy + dy < area->dy)
2597 return;
2598
2596 /* update sx,sy */ 2599 /* update sx,sy */
2597 sx = area->sx + (dx - area->dx); 2600 sx = area->sx + (dx - area->dx);
2598 sy = area->sy + (dy - area->dy); 2601 sy = area->sy + (dy - area->dy);
2599 2602
2600 /* the source must be completely inside the virtual screen */ 2603 /* the source must be completely inside the virtual screen */
2601 if (sx < 0 || sy < 0 || (sx + width) > info->var.xres_virtual || 2604 if (sx + width > info->var.xres_virtual ||
2602 (sy + height) > info->var.yres_virtual) 2605 sy + height > info->var.yres_virtual)
2603 return; 2606 return;
2604 2607
2605 if (dy > sy || (dy == sy && dx > sx)) { 2608 if (dy > sy || (dy == sy && dx > sx)) {
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index b004036d4087..5b3a15dffb5f 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -256,6 +256,20 @@ static int atmel_lcdfb_alloc_video_memory(struct atmel_lcdfb_info *sinfo)
256 return 0; 256 return 0;
257} 257}
258 258
259static const struct fb_videomode *atmel_lcdfb_choose_mode(struct fb_var_screeninfo *var,
260 struct fb_info *info)
261{
262 struct fb_videomode varfbmode;
263 const struct fb_videomode *fbmode = NULL;
264
265 fb_var_to_videomode(&varfbmode, var);
266 fbmode = fb_find_nearest_mode(&varfbmode, &info->modelist);
267 if (fbmode)
268 fb_videomode_to_var(var, fbmode);
269 return fbmode;
270}
271
272
259/** 273/**
260 * atmel_lcdfb_check_var - Validates a var passed in. 274 * atmel_lcdfb_check_var - Validates a var passed in.
261 * @var: frame buffer variable screen structure 275 * @var: frame buffer variable screen structure
@@ -289,6 +303,15 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
289 clk_value_khz = clk_get_rate(sinfo->lcdc_clk) / 1000; 303 clk_value_khz = clk_get_rate(sinfo->lcdc_clk) / 1000;
290 304
291 dev_dbg(dev, "%s:\n", __func__); 305 dev_dbg(dev, "%s:\n", __func__);
306
307 if (!(var->pixclock && var->bits_per_pixel)) {
308 /* choose a suitable mode if possible */
309 if (!atmel_lcdfb_choose_mode(var, info)) {
310 dev_err(dev, "needed value not specified\n");
311 return -EINVAL;
312 }
313 }
314
292 dev_dbg(dev, " resolution: %ux%u\n", var->xres, var->yres); 315 dev_dbg(dev, " resolution: %ux%u\n", var->xres, var->yres);
293 dev_dbg(dev, " pixclk: %lu KHz\n", PICOS2KHZ(var->pixclock)); 316 dev_dbg(dev, " pixclk: %lu KHz\n", PICOS2KHZ(var->pixclock));
294 dev_dbg(dev, " bpp: %u\n", var->bits_per_pixel); 317 dev_dbg(dev, " bpp: %u\n", var->bits_per_pixel);
@@ -299,6 +322,13 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
299 return -EINVAL; 322 return -EINVAL;
300 } 323 }
301 324
325 /* Do not allow to have real resoulution larger than virtual */
326 if (var->xres > var->xres_virtual)
327 var->xres_virtual = var->xres;
328
329 if (var->yres > var->yres_virtual)
330 var->yres_virtual = var->yres;
331
302 /* Force same alignment for each line */ 332 /* Force same alignment for each line */
303 var->xres = (var->xres + 3) & ~3UL; 333 var->xres = (var->xres + 3) & ~3UL;
304 var->xres_virtual = (var->xres_virtual + 3) & ~3UL; 334 var->xres_virtual = (var->xres_virtual + 3) & ~3UL;
@@ -379,6 +409,35 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
379 return 0; 409 return 0;
380} 410}
381 411
412/*
413 * LCD reset sequence
414 */
415static void atmel_lcdfb_reset(struct atmel_lcdfb_info *sinfo)
416{
417 might_sleep();
418
419 /* LCD power off */
420 lcdc_writel(sinfo, ATMEL_LCDC_PWRCON, sinfo->guard_time << ATMEL_LCDC_GUARDT_OFFSET);
421
422 /* wait for the LCDC core to become idle */
423 while (lcdc_readl(sinfo, ATMEL_LCDC_PWRCON) & ATMEL_LCDC_BUSY)
424 msleep(10);
425
426 /* DMA disable */
427 lcdc_writel(sinfo, ATMEL_LCDC_DMACON, 0);
428
429 /* wait for DMA engine to become idle */
430 while (lcdc_readl(sinfo, ATMEL_LCDC_DMACON) & ATMEL_LCDC_DMABUSY)
431 msleep(10);
432
433 /* LCD power on */
434 lcdc_writel(sinfo, ATMEL_LCDC_PWRCON,
435 (sinfo->guard_time << ATMEL_LCDC_GUARDT_OFFSET) | ATMEL_LCDC_PWR);
436
437 /* DMA enable */
438 lcdc_writel(sinfo, ATMEL_LCDC_DMACON, sinfo->default_dmacon);
439}
440
382/** 441/**
383 * atmel_lcdfb_set_par - Alters the hardware state. 442 * atmel_lcdfb_set_par - Alters the hardware state.
384 * @info: frame buffer structure that represents a single frame buffer 443 * @info: frame buffer structure that represents a single frame buffer
@@ -401,6 +460,8 @@ static int atmel_lcdfb_set_par(struct fb_info *info)
401 unsigned long clk_value_khz; 460 unsigned long clk_value_khz;
402 unsigned long bits_per_line; 461 unsigned long bits_per_line;
403 462
463 might_sleep();
464
404 dev_dbg(info->device, "%s:\n", __func__); 465 dev_dbg(info->device, "%s:\n", __func__);
405 dev_dbg(info->device, " * resolution: %ux%u (%ux%u virtual)\n", 466 dev_dbg(info->device, " * resolution: %ux%u (%ux%u virtual)\n",
406 info->var.xres, info->var.yres, 467 info->var.xres, info->var.yres,
@@ -511,6 +572,8 @@ static int atmel_lcdfb_set_par(struct fb_info *info)
511 572
512 /* Disable all interrupts */ 573 /* Disable all interrupts */
513 lcdc_writel(sinfo, ATMEL_LCDC_IDR, ~0UL); 574 lcdc_writel(sinfo, ATMEL_LCDC_IDR, ~0UL);
575 /* Enable FIFO & DMA errors */
576 lcdc_writel(sinfo, ATMEL_LCDC_IER, ATMEL_LCDC_UFLWI | ATMEL_LCDC_OWRI | ATMEL_LCDC_MERI);
514 577
515 /* ...wait for DMA engine to become idle... */ 578 /* ...wait for DMA engine to become idle... */
516 while (lcdc_readl(sinfo, ATMEL_LCDC_DMACON) & ATMEL_LCDC_DMABUSY) 579 while (lcdc_readl(sinfo, ATMEL_LCDC_DMACON) & ATMEL_LCDC_DMABUSY)
@@ -645,10 +708,26 @@ static irqreturn_t atmel_lcdfb_interrupt(int irq, void *dev_id)
645 u32 status; 708 u32 status;
646 709
647 status = lcdc_readl(sinfo, ATMEL_LCDC_ISR); 710 status = lcdc_readl(sinfo, ATMEL_LCDC_ISR);
648 lcdc_writel(sinfo, ATMEL_LCDC_IDR, status); 711 if (status & ATMEL_LCDC_UFLWI) {
712 dev_warn(info->device, "FIFO underflow %#x\n", status);
713 /* reset DMA and FIFO to avoid screen shifting */
714 schedule_work(&sinfo->task);
715 }
716 lcdc_writel(sinfo, ATMEL_LCDC_ICR, status);
649 return IRQ_HANDLED; 717 return IRQ_HANDLED;
650} 718}
651 719
720/*
721 * LCD controller task (to reset the LCD)
722 */
723static void atmel_lcdfb_task(struct work_struct *work)
724{
725 struct atmel_lcdfb_info *sinfo =
726 container_of(work, struct atmel_lcdfb_info, task);
727
728 atmel_lcdfb_reset(sinfo);
729}
730
652static int __init atmel_lcdfb_init_fbinfo(struct atmel_lcdfb_info *sinfo) 731static int __init atmel_lcdfb_init_fbinfo(struct atmel_lcdfb_info *sinfo)
653{ 732{
654 struct fb_info *info = sinfo->info; 733 struct fb_info *info = sinfo->info;
@@ -691,6 +770,7 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
691 struct fb_info *info; 770 struct fb_info *info;
692 struct atmel_lcdfb_info *sinfo; 771 struct atmel_lcdfb_info *sinfo;
693 struct atmel_lcdfb_info *pdata_sinfo; 772 struct atmel_lcdfb_info *pdata_sinfo;
773 struct fb_videomode fbmode;
694 struct resource *regs = NULL; 774 struct resource *regs = NULL;
695 struct resource *map = NULL; 775 struct resource *map = NULL;
696 int ret; 776 int ret;
@@ -824,6 +904,10 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
824 goto unmap_mmio; 904 goto unmap_mmio;
825 } 905 }
826 906
907 /* Some operations on the LCDC might sleep and
908 * require a preemptible task context */
909 INIT_WORK(&sinfo->task, atmel_lcdfb_task);
910
827 ret = atmel_lcdfb_init_fbinfo(sinfo); 911 ret = atmel_lcdfb_init_fbinfo(sinfo);
828 if (ret < 0) { 912 if (ret < 0) {
829 dev_err(dev, "init fbinfo failed: %d\n", ret); 913 dev_err(dev, "init fbinfo failed: %d\n", ret);
@@ -853,6 +937,10 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
853 goto free_cmap; 937 goto free_cmap;
854 } 938 }
855 939
940 /* add selected videomode to modelist */
941 fb_var_to_videomode(&fbmode, &info->var);
942 fb_add_videomode(&fbmode, &info->modelist);
943
856 /* Power up the LCDC screen */ 944 /* Power up the LCDC screen */
857 if (sinfo->atmel_lcdfb_power_control) 945 if (sinfo->atmel_lcdfb_power_control)
858 sinfo->atmel_lcdfb_power_control(1); 946 sinfo->atmel_lcdfb_power_control(1);
@@ -866,6 +954,7 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
866free_cmap: 954free_cmap:
867 fb_dealloc_cmap(&info->cmap); 955 fb_dealloc_cmap(&info->cmap);
868unregister_irqs: 956unregister_irqs:
957 cancel_work_sync(&sinfo->task);
869 free_irq(sinfo->irq_base, info); 958 free_irq(sinfo->irq_base, info);
870unmap_mmio: 959unmap_mmio:
871 exit_backlight(sinfo); 960 exit_backlight(sinfo);
@@ -903,6 +992,7 @@ static int __exit atmel_lcdfb_remove(struct platform_device *pdev)
903 if (!sinfo) 992 if (!sinfo)
904 return 0; 993 return 0;
905 994
995 cancel_work_sync(&sinfo->task);
906 exit_backlight(sinfo); 996 exit_backlight(sinfo);
907 if (sinfo->atmel_lcdfb_power_control) 997 if (sinfo->atmel_lcdfb_power_control)
908 sinfo->atmel_lcdfb_power_control(0); 998 sinfo->atmel_lcdfb_power_control(0);
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 07b6addbb3c1..243ea4ab20c8 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -1339,10 +1339,8 @@ static int aty128_var_to_pll(u32 period_in_ps, struct aty128_pll *pll,
1339 if (vclk * 12 < c.ppll_min) 1339 if (vclk * 12 < c.ppll_min)
1340 vclk = c.ppll_min/12; 1340 vclk = c.ppll_min/12;
1341 1341
1342 pll->post_divider = -1;
1343
1344 /* now, find an acceptable divider */ 1342 /* now, find an acceptable divider */
1345 for (i = 0; i < sizeof(post_dividers); i++) { 1343 for (i = 0; i < ARRAY_SIZE(post_dividers); i++) {
1346 output_freq = post_dividers[i] * vclk; 1344 output_freq = post_dividers[i] * vclk;
1347 if (output_freq >= c.ppll_min && output_freq <= c.ppll_max) { 1345 if (output_freq >= c.ppll_min && output_freq <= c.ppll_max) {
1348 pll->post_divider = post_dividers[i]; 1346 pll->post_divider = post_dividers[i];
@@ -1350,7 +1348,7 @@ static int aty128_var_to_pll(u32 period_in_ps, struct aty128_pll *pll,
1350 } 1348 }
1351 } 1349 }
1352 1350
1353 if (pll->post_divider < 0) 1351 if (i == ARRAY_SIZE(post_dividers))
1354 return -EINVAL; 1352 return -EINVAL;
1355 1353
1356 /* calculate feedback divider */ 1354 /* calculate feedback divider */
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index bd4ac0bafecb..620ba8120368 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -424,7 +424,6 @@ static struct {
424#endif /* CONFIG_FB_ATY_CT */ 424#endif /* CONFIG_FB_ATY_CT */
425}; 425};
426 426
427/* can not fail */
428static int __devinit correct_chipset(struct atyfb_par *par) 427static int __devinit correct_chipset(struct atyfb_par *par)
429{ 428{
430 u8 rev; 429 u8 rev;
@@ -437,6 +436,9 @@ static int __devinit correct_chipset(struct atyfb_par *par)
437 if (par->pci_id == aty_chips[i].pci_id) 436 if (par->pci_id == aty_chips[i].pci_id)
438 break; 437 break;
439 438
439 if (i < 0)
440 return -ENODEV;
441
440 name = aty_chips[i].name; 442 name = aty_chips[i].name;
441 par->pll_limits.pll_max = aty_chips[i].pll; 443 par->pll_limits.pll_max = aty_chips[i].pll;
442 par->pll_limits.mclk = aty_chips[i].mclk; 444 par->pll_limits.mclk = aty_chips[i].mclk;
@@ -2229,6 +2231,7 @@ static int __devinit aty_init(struct fb_info *info)
2229 const char *ramname = NULL, *xtal; 2231 const char *ramname = NULL, *xtal;
2230 int gtb_memsize, has_var = 0; 2232 int gtb_memsize, has_var = 0;
2231 struct fb_var_screeninfo var; 2233 struct fb_var_screeninfo var;
2234 int ret;
2232 2235
2233 init_waitqueue_head(&par->vblank.wait); 2236 init_waitqueue_head(&par->vblank.wait);
2234 spin_lock_init(&par->int_lock); 2237 spin_lock_init(&par->int_lock);
@@ -2610,7 +2613,8 @@ static int __devinit aty_init(struct fb_info *info)
2610 var.yres_virtual = var.yres; 2613 var.yres_virtual = var.yres;
2611 } 2614 }
2612 2615
2613 if (atyfb_check_var(&var, info)) { 2616 ret = atyfb_check_var(&var, info);
2617 if (ret) {
2614 PRINTKE("can't set default video mode\n"); 2618 PRINTKE("can't set default video mode\n");
2615 goto aty_init_exit; 2619 goto aty_init_exit;
2616 } 2620 }
@@ -2621,10 +2625,12 @@ static int __devinit aty_init(struct fb_info *info)
2621#endif /* CONFIG_FB_ATY_CT */ 2625#endif /* CONFIG_FB_ATY_CT */
2622 info->var = var; 2626 info->var = var;
2623 2627
2624 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) 2628 ret = fb_alloc_cmap(&info->cmap, 256, 0);
2629 if (ret < 0)
2625 goto aty_init_exit; 2630 goto aty_init_exit;
2626 2631
2627 if (register_framebuffer(info) < 0) { 2632 ret = register_framebuffer(info);
2633 if (ret < 0) {
2628 fb_dealloc_cmap(&info->cmap); 2634 fb_dealloc_cmap(&info->cmap);
2629 goto aty_init_exit; 2635 goto aty_init_exit;
2630 } 2636 }
@@ -2650,7 +2656,7 @@ aty_init_exit:
2650 par->mtrr_aper = -1; 2656 par->mtrr_aper = -1;
2651 } 2657 }
2652#endif 2658#endif
2653 return -1; 2659 return ret;
2654} 2660}
2655 2661
2656static void aty_resume_chip(struct fb_info *info) 2662static void aty_resume_chip(struct fb_info *info)
@@ -2709,8 +2715,7 @@ static int atyfb_blank(int blank, struct fb_info *info)
2709 if (par->lock_blank || par->asleep) 2715 if (par->lock_blank || par->asleep)
2710 return 0; 2716 return 0;
2711 2717
2712#ifdef CONFIG_FB_ATY_BACKLIGHT 2718#ifdef CONFIG_FB_ATY_GENERIC_LCD
2713#elif defined(CONFIG_FB_ATY_GENERIC_LCD)
2714 if (par->lcd_table && blank > FB_BLANK_NORMAL && 2719 if (par->lcd_table && blank > FB_BLANK_NORMAL &&
2715 (aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) { 2720 (aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) {
2716 u32 pm = aty_ld_lcd(POWER_MANAGEMENT, par); 2721 u32 pm = aty_ld_lcd(POWER_MANAGEMENT, par);
@@ -2739,8 +2744,7 @@ static int atyfb_blank(int blank, struct fb_info *info)
2739 } 2744 }
2740 aty_st_le32(CRTC_GEN_CNTL, gen_cntl, par); 2745 aty_st_le32(CRTC_GEN_CNTL, gen_cntl, par);
2741 2746
2742#ifdef CONFIG_FB_ATY_BACKLIGHT 2747#ifdef CONFIG_FB_ATY_GENERIC_LCD
2743#elif defined(CONFIG_FB_ATY_GENERIC_LCD)
2744 if (par->lcd_table && blank <= FB_BLANK_NORMAL && 2748 if (par->lcd_table && blank <= FB_BLANK_NORMAL &&
2745 (aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) { 2749 (aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) {
2746 u32 pm = aty_ld_lcd(POWER_MANAGEMENT, par); 2750 u32 pm = aty_ld_lcd(POWER_MANAGEMENT, par);
@@ -3331,7 +3335,7 @@ static int __devinit init_from_bios(struct atyfb_par *par)
3331 PRINTKE("no BIOS frequency table found, use parameters\n"); 3335 PRINTKE("no BIOS frequency table found, use parameters\n");
3332 ret = -ENXIO; 3336 ret = -ENXIO;
3333 } 3337 }
3334 iounmap((void* __iomem )bios_base); 3338 iounmap((void __iomem *)bios_base);
3335 3339
3336 return ret; 3340 return ret;
3337} 3341}
@@ -3418,14 +3422,7 @@ static int __devinit atyfb_pci_probe(struct pci_dev *pdev, const struct pci_devi
3418 struct fb_info *info; 3422 struct fb_info *info;
3419 struct resource *rp; 3423 struct resource *rp;
3420 struct atyfb_par *par; 3424 struct atyfb_par *par;
3421 int i, rc = -ENOMEM; 3425 int rc = -ENOMEM;
3422
3423 for (i = ARRAY_SIZE(aty_chips) - 1; i >= 0; i--)
3424 if (pdev->device == aty_chips[i].pci_id)
3425 break;
3426
3427 if (i < 0)
3428 return -ENODEV;
3429 3426
3430 /* Enable device in PCI config */ 3427 /* Enable device in PCI config */
3431 if (pci_enable_device(pdev)) { 3428 if (pci_enable_device(pdev)) {
@@ -3456,7 +3453,7 @@ static int __devinit atyfb_pci_probe(struct pci_dev *pdev, const struct pci_devi
3456 par = info->par; 3453 par = info->par;
3457 info->fix = atyfb_fix; 3454 info->fix = atyfb_fix;
3458 info->device = &pdev->dev; 3455 info->device = &pdev->dev;
3459 par->pci_id = aty_chips[i].pci_id; 3456 par->pci_id = pdev->device;
3460 par->res_start = res_start; 3457 par->res_start = res_start;
3461 par->res_size = res_size; 3458 par->res_size = res_size;
3462 par->irq = pdev->irq; 3459 par->irq = pdev->irq;
@@ -3474,7 +3471,8 @@ static int __devinit atyfb_pci_probe(struct pci_dev *pdev, const struct pci_devi
3474 pci_set_drvdata(pdev, info); 3471 pci_set_drvdata(pdev, info);
3475 3472
3476 /* Init chip & register framebuffer */ 3473 /* Init chip & register framebuffer */
3477 if (aty_init(info)) 3474 rc = aty_init(info);
3475 if (rc)
3478 goto err_release_io; 3476 goto err_release_io;
3479 3477
3480#ifdef __sparc__ 3478#ifdef __sparc__
@@ -3655,18 +3653,62 @@ static void __devexit atyfb_pci_remove(struct pci_dev *pdev)
3655 atyfb_remove(info); 3653 atyfb_remove(info);
3656} 3654}
3657 3655
3658/*
3659 * This driver uses its own matching table. That will be more difficult
3660 * to fix, so for now, we just match against any ATI ID and let the
3661 * probe() function find out what's up. That also mean we don't have
3662 * a module ID table though.
3663 */
3664static struct pci_device_id atyfb_pci_tbl[] = { 3656static struct pci_device_id atyfb_pci_tbl[] = {
3665 { PCI_VENDOR_ID_ATI, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 3657#ifdef CONFIG_FB_ATY_GX
3666 PCI_BASE_CLASS_DISPLAY << 16, 0xff0000, 0 }, 3658 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GX) },
3667 { 0, } 3659 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64CX) },
3660#endif /* CONFIG_FB_ATY_GX */
3661
3662#ifdef CONFIG_FB_ATY_CT
3663 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64CT) },
3664 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64ET) },
3665
3666 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LT) },
3667
3668 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64VT) },
3669 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GT) },
3670
3671 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64VU) },
3672 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GU) },
3673
3674 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LG) },
3675
3676 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64VV) },
3677
3678 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GV) },
3679 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GW) },
3680 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GY) },
3681 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GZ) },
3682
3683 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GB) },
3684 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GD) },
3685 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GI) },
3686 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GP) },
3687 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GQ) },
3688
3689 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LB) },
3690 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LD) },
3691 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LI) },
3692 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LP) },
3693 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LQ) },
3694
3695 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GM) },
3696 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GN) },
3697 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GO) },
3698 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GL) },
3699 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GR) },
3700 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GS) },
3701
3702 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LM) },
3703 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LN) },
3704 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LR) },
3705 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LS) },
3706#endif /* CONFIG_FB_ATY_CT */
3707 { }
3668}; 3708};
3669 3709
3710MODULE_DEVICE_TABLE(pci, atyfb_pci_tbl);
3711
3670static struct pci_driver atyfb_driver = { 3712static struct pci_driver atyfb_driver = {
3671 .name = "atyfb", 3713 .name = "atyfb",
3672 .id_table = atyfb_pci_tbl, 3714 .id_table = atyfb_pci_tbl,
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 400e9264e456..652273e9f5f9 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -2098,15 +2098,7 @@ static void radeon_identify_vram(struct radeonfb_info *rinfo)
2098 2098
2099static ssize_t radeon_show_one_edid(char *buf, loff_t off, size_t count, const u8 *edid) 2099static ssize_t radeon_show_one_edid(char *buf, loff_t off, size_t count, const u8 *edid)
2100{ 2100{
2101 if (off > EDID_LENGTH) 2101 return memory_read_from_buffer(buf, count, &off, edid, EDID_LENGTH);
2102 return 0;
2103
2104 if (off + count > EDID_LENGTH)
2105 count = EDID_LENGTH - off;
2106
2107 memcpy(buf, edid + off, count);
2108
2109 return count;
2110} 2102}
2111 2103
2112 2104
@@ -2161,6 +2153,7 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev,
2161 struct radeonfb_info *rinfo; 2153 struct radeonfb_info *rinfo;
2162 int ret; 2154 int ret;
2163 unsigned char c1, c2; 2155 unsigned char c1, c2;
2156 int err = 0;
2164 2157
2165 pr_debug("radeonfb_pci_register BEGIN\n"); 2158 pr_debug("radeonfb_pci_register BEGIN\n");
2166 2159
@@ -2340,9 +2333,14 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev,
2340 2333
2341 /* Register some sysfs stuff (should be done better) */ 2334 /* Register some sysfs stuff (should be done better) */
2342 if (rinfo->mon1_EDID) 2335 if (rinfo->mon1_EDID)
2343 sysfs_create_bin_file(&rinfo->pdev->dev.kobj, &edid1_attr); 2336 err |= sysfs_create_bin_file(&rinfo->pdev->dev.kobj,
2337 &edid1_attr);
2344 if (rinfo->mon2_EDID) 2338 if (rinfo->mon2_EDID)
2345 sysfs_create_bin_file(&rinfo->pdev->dev.kobj, &edid2_attr); 2339 err |= sysfs_create_bin_file(&rinfo->pdev->dev.kobj,
2340 &edid2_attr);
2341 if (err)
2342 pr_warning("%s() Creating sysfs files failed, continuing\n",
2343 __func__);
2346 2344
2347 /* save current mode regs before we switch into the new one 2345 /* save current mode regs before we switch into the new one
2348 * so we can restore this upon __exit 2346 * so we can restore this upon __exit
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 62547bd2ea49..452b770d8cc9 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -36,6 +36,30 @@ config LCD_LTV350QV
36 36
37 The LTV350QV panel is present on all ATSTK1000 boards. 37 The LTV350QV panel is present on all ATSTK1000 boards.
38 38
39config LCD_ILI9320
40 tristate
41 depends on LCD_CLASS_DEVICE && BACKLIGHT_LCD_SUPPORT
42 default n
43 help
44 If you have a panel based on the ILI9320 controller chip
45 then say y to include a power driver for it.
46
47config LCD_VGG2432A4
48 tristate "VGG2432A4 LCM device support"
49 depends on BACKLIGHT_LCD_SUPPORT && LCD_CLASS_DEVICE && SPI_MASTER
50 select LCD_ILI9320
51 default n
52 help
53 If you have a VGG2432A4 panel based on the ILI9320 controller chip
54 then say y to include a power driver for it.
55
56config LCD_PLATFORM
57 tristate "Platform LCD controls"
58 depends on LCD_CLASS_DEVICE
59 help
60 This driver provides a platform-device registered LCD power
61 control interface.
62
39# 63#
40# Backlight 64# Backlight
41# 65#
@@ -63,6 +87,18 @@ config BACKLIGHT_ATMEL_LCDC
63 If in doubt, it's safe to enable this option; it doesn't kick 87 If in doubt, it's safe to enable this option; it doesn't kick
64 in unless the board's description says it's wired that way. 88 in unless the board's description says it's wired that way.
65 89
90config BACKLIGHT_ATMEL_PWM
91 tristate "Atmel PWM backlight control"
92 depends on BACKLIGHT_CLASS_DEVICE && ATMEL_PWM
93 default n
94 help
95 Say Y here if you want to use the PWM peripheral in Atmel AT91 and
96 AVR32 devices. This driver will need additional platform data to know
97 which PWM instance to use and how to configure it.
98
99 To compile this driver as a module, choose M here: the module will be
100 called atmel-pwm-bl.
101
66config BACKLIGHT_CORGI 102config BACKLIGHT_CORGI
67 tristate "Generic (aka Sharp Corgi) Backlight Driver" 103 tristate "Generic (aka Sharp Corgi) Backlight Driver"
68 depends on BACKLIGHT_CLASS_DEVICE 104 depends on BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index c7c4d95fdc1c..b405aace803f 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -1,9 +1,13 @@
1# Backlight & LCD drivers 1# Backlight & LCD drivers
2 2
3obj-$(CONFIG_LCD_CLASS_DEVICE) += lcd.o 3obj-$(CONFIG_LCD_CLASS_DEVICE) += lcd.o
4obj-$(CONFIG_LCD_LTV350QV) += ltv350qv.o 4obj-$(CONFIG_LCD_LTV350QV) += ltv350qv.o
5obj-$(CONFIG_LCD_ILI9320) += ili9320.o
6obj-$(CONFIG_LCD_PLATFORM) += platform_lcd.o
7obj-$(CONFIG_LCD_VGG2432A4) += vgg2432a4.o
5 8
6obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o 9obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
10obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o
7obj-$(CONFIG_BACKLIGHT_CORGI) += corgi_bl.o 11obj-$(CONFIG_BACKLIGHT_CORGI) += corgi_bl.o
8obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o 12obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
9obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o 13obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o
diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
new file mode 100644
index 000000000000..505c0823a105
--- /dev/null
+++ b/drivers/video/backlight/atmel-pwm-bl.c
@@ -0,0 +1,244 @@
1/*
2 * Copyright (C) 2008 Atmel Corporation
3 *
4 * Backlight driver using Atmel PWM peripheral.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 */
10#include <linux/init.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/platform_device.h>
14#include <linux/fb.h>
15#include <linux/clk.h>
16#include <linux/gpio.h>
17#include <linux/backlight.h>
18#include <linux/atmel_pwm.h>
19#include <linux/atmel-pwm-bl.h>
20
21struct atmel_pwm_bl {
22 const struct atmel_pwm_bl_platform_data *pdata;
23 struct backlight_device *bldev;
24 struct platform_device *pdev;
25 struct pwm_channel pwmc;
26 int gpio_on;
27};
28
29static int atmel_pwm_bl_set_intensity(struct backlight_device *bd)
30{
31 struct atmel_pwm_bl *pwmbl = bl_get_data(bd);
32 int intensity = bd->props.brightness;
33 int pwm_duty;
34
35 if (bd->props.power != FB_BLANK_UNBLANK)
36 intensity = 0;
37 if (bd->props.fb_blank != FB_BLANK_UNBLANK)
38 intensity = 0;
39
40 if (pwmbl->pdata->pwm_active_low)
41 pwm_duty = pwmbl->pdata->pwm_duty_min + intensity;
42 else
43 pwm_duty = pwmbl->pdata->pwm_duty_max - intensity;
44
45 if (pwm_duty > pwmbl->pdata->pwm_duty_max)
46 pwm_duty = pwmbl->pdata->pwm_duty_max;
47 if (pwm_duty < pwmbl->pdata->pwm_duty_min)
48 pwm_duty = pwmbl->pdata->pwm_duty_min;
49
50 if (!intensity) {
51 if (pwmbl->gpio_on != -1) {
52 gpio_set_value(pwmbl->gpio_on,
53 0 ^ pwmbl->pdata->on_active_low);
54 }
55 pwm_channel_writel(&pwmbl->pwmc, PWM_CUPD, pwm_duty);
56 pwm_channel_disable(&pwmbl->pwmc);
57 } else {
58 pwm_channel_enable(&pwmbl->pwmc);
59 pwm_channel_writel(&pwmbl->pwmc, PWM_CUPD, pwm_duty);
60 if (pwmbl->gpio_on != -1) {
61 gpio_set_value(pwmbl->gpio_on,
62 1 ^ pwmbl->pdata->on_active_low);
63 }
64 }
65
66 return 0;
67}
68
69static int atmel_pwm_bl_get_intensity(struct backlight_device *bd)
70{
71 struct atmel_pwm_bl *pwmbl = bl_get_data(bd);
72 u8 intensity;
73
74 if (pwmbl->pdata->pwm_active_low) {
75 intensity = pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY) -
76 pwmbl->pdata->pwm_duty_min;
77 } else {
78 intensity = pwmbl->pdata->pwm_duty_max -
79 pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY);
80 }
81
82 return intensity;
83}
84
85static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
86{
87 unsigned long pwm_rate = pwmbl->pwmc.mck;
88 unsigned long prescale = DIV_ROUND_UP(pwm_rate,
89 (pwmbl->pdata->pwm_frequency *
90 pwmbl->pdata->pwm_compare_max)) - 1;
91
92 /*
93 * Prescale must be power of two and maximum 0xf in size because of
94 * hardware limit. PWM speed will be:
95 * PWM module clock speed / (2 ^ prescale).
96 */
97 prescale = fls(prescale);
98 if (prescale > 0xf)
99 prescale = 0xf;
100
101 pwm_channel_writel(&pwmbl->pwmc, PWM_CMR, prescale);
102 pwm_channel_writel(&pwmbl->pwmc, PWM_CDTY,
103 pwmbl->pdata->pwm_duty_min +
104 pwmbl->bldev->props.brightness);
105 pwm_channel_writel(&pwmbl->pwmc, PWM_CPRD,
106 pwmbl->pdata->pwm_compare_max);
107
108 dev_info(&pwmbl->pdev->dev, "Atmel PWM backlight driver "
109 "(%lu Hz)\n", pwmbl->pwmc.mck /
110 pwmbl->pdata->pwm_compare_max /
111 (1 << prescale));
112
113 return pwm_channel_enable(&pwmbl->pwmc);
114}
115
116static struct backlight_ops atmel_pwm_bl_ops = {
117 .get_brightness = atmel_pwm_bl_get_intensity,
118 .update_status = atmel_pwm_bl_set_intensity,
119};
120
121static int atmel_pwm_bl_probe(struct platform_device *pdev)
122{
123 const struct atmel_pwm_bl_platform_data *pdata;
124 struct backlight_device *bldev;
125 struct atmel_pwm_bl *pwmbl;
126 int retval;
127
128 pwmbl = kzalloc(sizeof(struct atmel_pwm_bl), GFP_KERNEL);
129 if (!pwmbl)
130 return -ENOMEM;
131
132 pwmbl->pdev = pdev;
133
134 pdata = pdev->dev.platform_data;
135 if (!pdata) {
136 retval = -ENODEV;
137 goto err_free_mem;
138 }
139
140 if (pdata->pwm_compare_max < pdata->pwm_duty_max ||
141 pdata->pwm_duty_min > pdata->pwm_duty_max ||
142 pdata->pwm_frequency == 0) {
143 retval = -EINVAL;
144 goto err_free_mem;
145 }
146
147 pwmbl->pdata = pdata;
148 pwmbl->gpio_on = pdata->gpio_on;
149
150 retval = pwm_channel_alloc(pdata->pwm_channel, &pwmbl->pwmc);
151 if (retval)
152 goto err_free_mem;
153
154 if (pwmbl->gpio_on != -1) {
155 retval = gpio_request(pwmbl->gpio_on, "gpio_atmel_pwm_bl");
156 if (retval) {
157 pwmbl->gpio_on = -1;
158 goto err_free_pwm;
159 }
160
161 /* Turn display off by defatult. */
162 retval = gpio_direction_output(pwmbl->gpio_on,
163 0 ^ pdata->on_active_low);
164 if (retval)
165 goto err_free_gpio;
166 }
167
168 bldev = backlight_device_register("atmel-pwm-bl",
169 &pdev->dev, pwmbl, &atmel_pwm_bl_ops);
170 if (IS_ERR(bldev)) {
171 retval = PTR_ERR(bldev);
172 goto err_free_gpio;
173 }
174
175 pwmbl->bldev = bldev;
176
177 platform_set_drvdata(pdev, pwmbl);
178
179 /* Power up the backlight by default at middle intesity. */
180 bldev->props.power = FB_BLANK_UNBLANK;
181 bldev->props.max_brightness = pdata->pwm_duty_max - pdata->pwm_duty_min;
182 bldev->props.brightness = bldev->props.max_brightness / 2;
183
184 retval = atmel_pwm_bl_init_pwm(pwmbl);
185 if (retval)
186 goto err_free_bl_dev;
187
188 atmel_pwm_bl_set_intensity(bldev);
189
190 return 0;
191
192err_free_bl_dev:
193 platform_set_drvdata(pdev, NULL);
194 backlight_device_unregister(bldev);
195err_free_gpio:
196 if (pwmbl->gpio_on != -1)
197 gpio_free(pwmbl->gpio_on);
198err_free_pwm:
199 pwm_channel_free(&pwmbl->pwmc);
200err_free_mem:
201 kfree(pwmbl);
202 return retval;
203}
204
205static int __exit atmel_pwm_bl_remove(struct platform_device *pdev)
206{
207 struct atmel_pwm_bl *pwmbl = platform_get_drvdata(pdev);
208
209 if (pwmbl->gpio_on != -1) {
210 gpio_set_value(pwmbl->gpio_on, 0);
211 gpio_free(pwmbl->gpio_on);
212 }
213 pwm_channel_disable(&pwmbl->pwmc);
214 pwm_channel_free(&pwmbl->pwmc);
215 backlight_device_unregister(pwmbl->bldev);
216 platform_set_drvdata(pdev, NULL);
217 kfree(pwmbl);
218
219 return 0;
220}
221
222static struct platform_driver atmel_pwm_bl_driver = {
223 .driver = {
224 .name = "atmel-pwm-bl",
225 },
226 /* REVISIT add suspend() and resume() */
227 .remove = __exit_p(atmel_pwm_bl_remove),
228};
229
230static int __init atmel_pwm_bl_init(void)
231{
232 return platform_driver_probe(&atmel_pwm_bl_driver, atmel_pwm_bl_probe);
233}
234module_init(atmel_pwm_bl_init);
235
236static void __exit atmel_pwm_bl_exit(void)
237{
238 platform_driver_unregister(&atmel_pwm_bl_driver);
239}
240module_exit(atmel_pwm_bl_exit);
241
242MODULE_AUTHOR("Hans-Christian egtvedt <hans-christian.egtvedt@atmel.com>");
243MODULE_DESCRIPTION("Atmel PWM backlight driver");
244MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/ili9320.c b/drivers/video/backlight/ili9320.c
new file mode 100644
index 000000000000..ba89b41b639c
--- /dev/null
+++ b/drivers/video/backlight/ili9320.c
@@ -0,0 +1,330 @@
1/* drivers/video/backlight/ili9320.c
2 *
3 * ILI9320 LCD controller driver core.
4 *
5 * Copyright 2007 Simtec Electronics
6 * http://armlinux.simtec.co.uk/
7 * Ben Dooks <ben@simtec.co.uk>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12*/
13
14#include <linux/delay.h>
15#include <linux/err.h>
16#include <linux/fb.h>
17#include <linux/init.h>
18#include <linux/lcd.h>
19#include <linux/module.h>
20
21#include <linux/spi/spi.h>
22
23#include <video/ili9320.h>
24
25#include "ili9320.h"
26
27
28static inline int ili9320_write_spi(struct ili9320 *ili,
29 unsigned int reg,
30 unsigned int value)
31{
32 struct ili9320_spi *spi = &ili->access.spi;
33 unsigned char *addr = spi->buffer_addr;
34 unsigned char *data = spi->buffer_data;
35
36 /* spi message consits of:
37 * first byte: ID and operation
38 */
39
40 addr[0] = spi->id | ILI9320_SPI_INDEX | ILI9320_SPI_WRITE;
41 addr[1] = reg >> 8;
42 addr[2] = reg;
43
44 /* second message is the data to transfer */
45
46 data[0] = spi->id | ILI9320_SPI_DATA | ILI9320_SPI_WRITE;
47 data[1] = value >> 8;
48 data[2] = value;
49
50 return spi_sync(spi->dev, &spi->message);
51}
52
53int ili9320_write(struct ili9320 *ili, unsigned int reg, unsigned int value)
54{
55 dev_dbg(ili->dev, "write: reg=%02x, val=%04x\n", reg, value);
56 return ili->write(ili, reg, value);
57}
58
59EXPORT_SYMBOL_GPL(ili9320_write);
60
61int ili9320_write_regs(struct ili9320 *ili,
62 struct ili9320_reg *values,
63 int nr_values)
64{
65 int index;
66 int ret;
67
68 for (index = 0; index < nr_values; index++, values++) {
69 ret = ili9320_write(ili, values->address, values->value);
70 if (ret != 0)
71 return ret;
72 }
73
74 return 0;
75}
76
77EXPORT_SYMBOL_GPL(ili9320_write_regs);
78
79static void ili9320_reset(struct ili9320 *lcd)
80{
81 struct ili9320_platdata *cfg = lcd->platdata;
82
83 cfg->reset(1);
84 mdelay(50);
85
86 cfg->reset(0);
87 mdelay(50);
88
89 cfg->reset(1);
90 mdelay(100);
91}
92
93static inline int ili9320_init_chip(struct ili9320 *lcd)
94{
95 int ret;
96
97 ili9320_reset(lcd);
98
99 ret = lcd->client->init(lcd, lcd->platdata);
100 if (ret != 0) {
101 dev_err(lcd->dev, "failed to initialise display\n");
102 return ret;
103 }
104
105 lcd->initialised = 1;
106 return 0;
107}
108
109static inline int ili9320_power_on(struct ili9320 *lcd)
110{
111 if (!lcd->initialised)
112 ili9320_init_chip(lcd);
113
114 lcd->display1 |= (ILI9320_DISPLAY1_D(3) | ILI9320_DISPLAY1_BASEE);
115 ili9320_write(lcd, ILI9320_DISPLAY1, lcd->display1);
116
117 return 0;
118}
119
120static inline int ili9320_power_off(struct ili9320 *lcd)
121{
122 lcd->display1 &= ~(ILI9320_DISPLAY1_D(3) | ILI9320_DISPLAY1_BASEE);
123 ili9320_write(lcd, ILI9320_DISPLAY1, lcd->display1);
124
125 return 0;
126}
127
128#define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL)
129
130static int ili9320_power(struct ili9320 *lcd, int power)
131{
132 int ret = 0;
133
134 dev_dbg(lcd->dev, "power %d => %d\n", lcd->power, power);
135
136 if (POWER_IS_ON(power) && !POWER_IS_ON(lcd->power))
137 ret = ili9320_power_on(lcd);
138 else if (!POWER_IS_ON(power) && POWER_IS_ON(lcd->power))
139 ret = ili9320_power_off(lcd);
140
141 if (ret == 0)
142 lcd->power = power;
143 else
144 dev_warn(lcd->dev, "failed to set power mode %d\n", power);
145
146 return ret;
147}
148
149static inline struct ili9320 *to_our_lcd(struct lcd_device *lcd)
150{
151 return lcd_get_data(lcd);
152}
153
154static int ili9320_set_power(struct lcd_device *ld, int power)
155{
156 struct ili9320 *lcd = to_our_lcd(ld);
157
158 return ili9320_power(lcd, power);
159}
160
161static int ili9320_get_power(struct lcd_device *ld)
162{
163 struct ili9320 *lcd = to_our_lcd(ld);
164
165 return lcd->power;
166}
167
168static struct lcd_ops ili9320_ops = {
169 .get_power = ili9320_get_power,
170 .set_power = ili9320_set_power,
171};
172
173static void __devinit ili9320_setup_spi(struct ili9320 *ili,
174 struct spi_device *dev)
175{
176 struct ili9320_spi *spi = &ili->access.spi;
177
178 ili->write = ili9320_write_spi;
179 spi->dev = dev;
180
181 /* fill the two messages we are going to use to send the data
182 * with, the first the address followed by the data. The datasheet
183 * says they should be done as two distinct cycles of the SPI CS line.
184 */
185
186 spi->xfer[0].tx_buf = spi->buffer_addr;
187 spi->xfer[1].tx_buf = spi->buffer_data;
188 spi->xfer[0].len = 3;
189 spi->xfer[1].len = 3;
190 spi->xfer[0].bits_per_word = 8;
191 spi->xfer[1].bits_per_word = 8;
192 spi->xfer[0].cs_change = 1;
193
194 spi_message_init(&spi->message);
195 spi_message_add_tail(&spi->xfer[0], &spi->message);
196 spi_message_add_tail(&spi->xfer[1], &spi->message);
197}
198
199int __devinit ili9320_probe_spi(struct spi_device *spi,
200 struct ili9320_client *client)
201{
202 struct ili9320_platdata *cfg = spi->dev.platform_data;
203 struct device *dev = &spi->dev;
204 struct ili9320 *ili;
205 struct lcd_device *lcd;
206 int ret = 0;
207
208 /* verify we where given some information */
209
210 if (cfg == NULL) {
211 dev_err(dev, "no platform data supplied\n");
212 return -EINVAL;
213 }
214
215 if (cfg->hsize <= 0 || cfg->vsize <= 0 || cfg->reset == NULL) {
216 dev_err(dev, "invalid platform data supplied\n");
217 return -EINVAL;
218 }
219
220 /* allocate and initialse our state */
221
222 ili = kzalloc(sizeof(struct ili9320), GFP_KERNEL);
223 if (ili == NULL) {
224 dev_err(dev, "no memory for device\n");
225 return -ENOMEM;
226 }
227
228 ili->access.spi.id = ILI9320_SPI_IDCODE | ILI9320_SPI_ID(1);
229
230 ili->dev = dev;
231 ili->client = client;
232 ili->power = FB_BLANK_POWERDOWN;
233 ili->platdata = cfg;
234
235 dev_set_drvdata(&spi->dev, ili);
236
237 ili9320_setup_spi(ili, spi);
238
239 lcd = lcd_device_register("ili9320", dev, ili, &ili9320_ops);
240 if (IS_ERR(lcd)) {
241 dev_err(dev, "failed to register lcd device\n");
242 ret = PTR_ERR(lcd);
243 goto err_free;
244 }
245
246 ili->lcd = lcd;
247
248 dev_info(dev, "initialising %s\n", client->name);
249
250 ret = ili9320_power(ili, FB_BLANK_UNBLANK);
251 if (ret != 0) {
252 dev_err(dev, "failed to set lcd power state\n");
253 goto err_unregister;
254 }
255
256 return 0;
257
258 err_unregister:
259 lcd_device_unregister(lcd);
260
261 err_free:
262 kfree(ili);
263
264 return ret;
265}
266
267EXPORT_SYMBOL_GPL(ili9320_probe_spi);
268
269int __devexit ili9320_remove(struct ili9320 *ili)
270{
271 ili9320_power(ili, FB_BLANK_POWERDOWN);
272
273 lcd_device_unregister(ili->lcd);
274 kfree(ili);
275
276 return 0;
277}
278
279EXPORT_SYMBOL_GPL(ili9320_remove);
280
281#ifdef CONFIG_PM
282int ili9320_suspend(struct ili9320 *lcd, pm_message_t state)
283{
284 int ret;
285
286 dev_dbg(lcd->dev, "%s: event %d\n", __func__, state.event);
287
288 if (state.event == PM_EVENT_SUSPEND) {
289 ret = ili9320_power(lcd, FB_BLANK_POWERDOWN);
290
291 if (lcd->platdata->suspend == ILI9320_SUSPEND_DEEP) {
292 ili9320_write(lcd, ILI9320_POWER1, lcd->power1 |
293 ILI9320_POWER1_SLP |
294 ILI9320_POWER1_DSTB);
295 lcd->initialised = 0;
296 }
297
298 return ret;
299 }
300
301 return 0;
302}
303
304EXPORT_SYMBOL_GPL(ili9320_suspend);
305
306int ili9320_resume(struct ili9320 *lcd)
307{
308 dev_info(lcd->dev, "resuming from power state %d\n", lcd->power);
309
310 if (lcd->platdata->suspend == ILI9320_SUSPEND_DEEP) {
311 ili9320_write(lcd, ILI9320_POWER1, 0x00);
312 }
313
314 return ili9320_power(lcd, FB_BLANK_UNBLANK);
315}
316
317EXPORT_SYMBOL_GPL(ili9320_resume);
318#endif
319
320/* Power down all displays on reboot, poweroff or halt */
321void ili9320_shutdown(struct ili9320 *lcd)
322{
323 ili9320_power(lcd, FB_BLANK_POWERDOWN);
324}
325
326EXPORT_SYMBOL_GPL(ili9320_shutdown);
327
328MODULE_AUTHOR("Ben Dooks <ben-linux@fluff.org>");
329MODULE_DESCRIPTION("ILI9320 LCD Driver");
330MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/ili9320.h b/drivers/video/backlight/ili9320.h
new file mode 100644
index 000000000000..e388eca7cac5
--- /dev/null
+++ b/drivers/video/backlight/ili9320.h
@@ -0,0 +1,80 @@
1/* drivers/video/backlight/ili9320.h
2 *
3 * ILI9320 LCD controller driver core.
4 *
5 * Copyright 2007 Simtec Electronics
6 * Ben Dooks <ben@simtec.co.uk>
7 *
8 * http://armlinux.simtec.co.uk/
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15/* Holder for register and value pairs. */
16struct ili9320_reg {
17 unsigned short address;
18 unsigned short value;
19};
20
21struct ili9320;
22
23struct ili9320_client {
24 const char *name;
25 int (*init)(struct ili9320 *ili, struct ili9320_platdata *cfg);
26
27};
28/* Device attached via an SPI bus. */
29struct ili9320_spi {
30 struct spi_device *dev;
31 struct spi_message message;
32 struct spi_transfer xfer[2];
33
34 unsigned char id;
35 unsigned char buffer_addr[4];
36 unsigned char buffer_data[4];
37};
38
39/* ILI9320 device state. */
40struct ili9320 {
41 union {
42 struct ili9320_spi spi; /* SPI attachged device. */
43 } access; /* Register access method. */
44
45 struct device *dev;
46 struct lcd_device *lcd; /* LCD device we created. */
47 struct ili9320_client *client;
48 struct ili9320_platdata *platdata;
49
50 int power; /* current power state. */
51 int initialised;
52
53 unsigned short display1;
54 unsigned short power1;
55
56 int (*write)(struct ili9320 *ili, unsigned int reg, unsigned int val);
57};
58
59
60/* ILI9320 register access routines */
61
62extern int ili9320_write(struct ili9320 *ili,
63 unsigned int reg, unsigned int value);
64
65extern int ili9320_write_regs(struct ili9320 *ili,
66 struct ili9320_reg *values,
67 int nr_values);
68
69/* Device probe */
70
71extern int ili9320_probe_spi(struct spi_device *spi,
72 struct ili9320_client *cli);
73
74extern int ili9320_remove(struct ili9320 *lcd);
75extern void ili9320_shutdown(struct ili9320 *lcd);
76
77/* PM */
78
79extern int ili9320_suspend(struct ili9320 *lcd, pm_message_t state);
80extern int ili9320_resume(struct ili9320 *lcd);
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 299fd318dd45..b15b2b84a6f7 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -33,7 +33,7 @@ static int fb_notifier_callback(struct notifier_block *self,
33 ld = container_of(self, struct lcd_device, fb_notif); 33 ld = container_of(self, struct lcd_device, fb_notif);
34 mutex_lock(&ld->ops_lock); 34 mutex_lock(&ld->ops_lock);
35 if (ld->ops) 35 if (ld->ops)
36 if (!ld->ops->check_fb || ld->ops->check_fb(evdata->info)) 36 if (!ld->ops->check_fb || ld->ops->check_fb(ld, evdata->info))
37 ld->ops->set_power(ld, *(int *)evdata->data); 37 ld->ops->set_power(ld, *(int *)evdata->data);
38 mutex_unlock(&ld->ops_lock); 38 mutex_unlock(&ld->ops_lock);
39 return 0; 39 return 0;
diff --git a/drivers/video/backlight/platform_lcd.c b/drivers/video/backlight/platform_lcd.c
new file mode 100644
index 000000000000..72d44dbfce82
--- /dev/null
+++ b/drivers/video/backlight/platform_lcd.c
@@ -0,0 +1,172 @@
1/* drivers/video/backlight/platform_lcd.c
2 *
3 * Copyright 2008 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * Generic platform-device LCD power control interface.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12*/
13
14#include <linux/module.h>
15#include <linux/platform_device.h>
16#include <linux/fb.h>
17#include <linux/backlight.h>
18#include <linux/lcd.h>
19
20#include <video/platform_lcd.h>
21
22struct platform_lcd {
23 struct device *us;
24 struct lcd_device *lcd;
25 struct plat_lcd_data *pdata;
26
27 unsigned int power;
28 unsigned int suspended : 1;
29};
30
31static inline struct platform_lcd *to_our_lcd(struct lcd_device *lcd)
32{
33 return lcd_get_data(lcd);
34}
35
36static int platform_lcd_get_power(struct lcd_device *lcd)
37{
38 struct platform_lcd *plcd = to_our_lcd(lcd);
39
40 return plcd->power;
41}
42
43static int platform_lcd_set_power(struct lcd_device *lcd, int power)
44{
45 struct platform_lcd *plcd = to_our_lcd(lcd);
46 int lcd_power = 1;
47
48 if (power == FB_BLANK_POWERDOWN || plcd->suspended)
49 lcd_power = 0;
50
51 plcd->pdata->set_power(plcd->pdata, lcd_power);
52 plcd->power = power;
53
54 return 0;
55}
56
57static int platform_lcd_match(struct lcd_device *lcd, struct fb_info *info)
58{
59 struct platform_lcd *plcd = to_our_lcd(lcd);
60 struct plat_lcd_data *pdata = plcd->pdata;
61
62 if (pdata->match_fb)
63 return pdata->match_fb(pdata, info);
64
65 return plcd->us->parent == info->device;
66}
67
68static struct lcd_ops platform_lcd_ops = {
69 .get_power = platform_lcd_get_power,
70 .set_power = platform_lcd_set_power,
71 .check_fb = platform_lcd_match,
72};
73
74static int __devinit platform_lcd_probe(struct platform_device *pdev)
75{
76 struct plat_lcd_data *pdata;
77 struct platform_lcd *plcd;
78 struct device *dev = &pdev->dev;
79 int err;
80
81 pdata = pdev->dev.platform_data;
82 if (!pdata) {
83 dev_err(dev, "no platform data supplied\n");
84 return -EINVAL;
85 }
86
87 plcd = kzalloc(sizeof(struct platform_lcd), GFP_KERNEL);
88 if (!plcd) {
89 dev_err(dev, "no memory for state\n");
90 return -ENOMEM;
91 }
92
93 plcd->us = dev;
94 plcd->pdata = pdata;
95 plcd->lcd = lcd_device_register("platform-lcd", dev,
96 plcd, &platform_lcd_ops);
97 if (IS_ERR(plcd->lcd)) {
98 dev_err(dev, "cannot register lcd device\n");
99 err = PTR_ERR(plcd->lcd);
100 goto err_mem;
101 }
102
103 platform_set_drvdata(pdev, plcd);
104 return 0;
105
106 err_mem:
107 kfree(plcd);
108 return err;
109}
110
111static int __devexit platform_lcd_remove(struct platform_device *pdev)
112{
113 struct platform_lcd *plcd = platform_get_drvdata(pdev);
114
115 lcd_device_unregister(plcd->lcd);
116 kfree(plcd);
117
118 return 0;
119}
120
121#ifdef CONFIG_PM
122static int platform_lcd_suspend(struct platform_device *pdev, pm_message_t st)
123{
124 struct platform_lcd *plcd = platform_get_drvdata(pdev);
125
126 plcd->suspended = 1;
127 platform_lcd_set_power(plcd->lcd, plcd->power);
128
129 return 0;
130}
131
132static int platform_lcd_resume(struct platform_device *pdev)
133{
134 struct platform_lcd *plcd = platform_get_drvdata(pdev);
135
136 plcd->suspended = 0;
137 platform_lcd_set_power(plcd->lcd, plcd->power);
138
139 return 0;
140}
141#else
142#define platform_lcd_suspend NULL
143#define platform_lcd_resume NULL
144#endif
145
146static struct platform_driver platform_lcd_driver = {
147 .driver = {
148 .name = "platform-lcd",
149 .owner = THIS_MODULE,
150 },
151 .probe = platform_lcd_probe,
152 .remove = __devexit_p(platform_lcd_remove),
153 .suspend = platform_lcd_suspend,
154 .resume = platform_lcd_resume,
155};
156
157static int __init platform_lcd_init(void)
158{
159 return platform_driver_register(&platform_lcd_driver);
160}
161
162static void __exit platform_lcd_cleanup(void)
163{
164 platform_driver_unregister(&platform_lcd_driver);
165}
166
167module_init(platform_lcd_init);
168module_exit(platform_lcd_cleanup);
169
170MODULE_AUTHOR("Ben Dooks <ben-linux@fluff.org>");
171MODULE_LICENSE("GPL v2");
172MODULE_ALIAS("platform:platform-lcd");
diff --git a/drivers/video/backlight/vgg2432a4.c b/drivers/video/backlight/vgg2432a4.c
new file mode 100644
index 000000000000..593c7687d54a
--- /dev/null
+++ b/drivers/video/backlight/vgg2432a4.c
@@ -0,0 +1,284 @@
1/* drivers/video/backlight/vgg2432a4.c
2 *
3 * VGG2432A4 (ILI9320) LCD controller driver.
4 *
5 * Copyright 2007 Simtec Electronics
6 * http://armlinux.simtec.co.uk/
7 * Ben Dooks <ben@simtec.co.uk>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12*/
13
14#include <linux/delay.h>
15#include <linux/err.h>
16#include <linux/fb.h>
17#include <linux/init.h>
18#include <linux/lcd.h>
19#include <linux/module.h>
20
21#include <linux/spi/spi.h>
22
23#include <video/ili9320.h>
24
25#include "ili9320.h"
26
27/* Device initialisation sequences */
28
29static struct ili9320_reg vgg_init1[] = {
30 {
31 .address = ILI9320_POWER1,
32 .value = ILI9320_POWER1_AP(0) | ILI9320_POWER1_BT(0),
33 }, {
34 .address = ILI9320_POWER2,
35 .value = (ILI9320_POWER2_VC(7) |
36 ILI9320_POWER2_DC0(0) | ILI9320_POWER2_DC1(0)),
37 }, {
38 .address = ILI9320_POWER3,
39 .value = ILI9320_POWER3_VRH(0),
40 }, {
41 .address = ILI9320_POWER4,
42 .value = ILI9320_POWER4_VREOUT(0),
43 },
44};
45
46static struct ili9320_reg vgg_init2[] = {
47 {
48 .address = ILI9320_POWER1,
49 .value = (ILI9320_POWER1_AP(3) | ILI9320_POWER1_APE |
50 ILI9320_POWER1_BT(7) | ILI9320_POWER1_SAP),
51 }, {
52 .address = ILI9320_POWER2,
53 .value = ILI9320_POWER2_VC(7) | ILI9320_POWER2_DC0(3),
54 }
55};
56
57static struct ili9320_reg vgg_gamma[] = {
58 {
59 .address = ILI9320_GAMMA1,
60 .value = 0x0000,
61 }, {
62 .address = ILI9320_GAMMA2,
63 .value = 0x0505,
64 }, {
65 .address = ILI9320_GAMMA3,
66 .value = 0x0004,
67 }, {
68 .address = ILI9320_GAMMA4,
69 .value = 0x0006,
70 }, {
71 .address = ILI9320_GAMMA5,
72 .value = 0x0707,
73 }, {
74 .address = ILI9320_GAMMA6,
75 .value = 0x0105,
76 }, {
77 .address = ILI9320_GAMMA7,
78 .value = 0x0002,
79 }, {
80 .address = ILI9320_GAMMA8,
81 .value = 0x0707,
82 }, {
83 .address = ILI9320_GAMMA9,
84 .value = 0x0704,
85 }, {
86 .address = ILI9320_GAMMA10,
87 .value = 0x807,
88 }
89
90};
91
92static struct ili9320_reg vgg_init0[] = {
93 [0] = {
94 /* set direction and scan mode gate */
95 .address = ILI9320_DRIVER,
96 .value = ILI9320_DRIVER_SS,
97 }, {
98 .address = ILI9320_DRIVEWAVE,
99 .value = (ILI9320_DRIVEWAVE_MUSTSET |
100 ILI9320_DRIVEWAVE_EOR | ILI9320_DRIVEWAVE_BC),
101 }, {
102 .address = ILI9320_ENTRYMODE,
103 .value = ILI9320_ENTRYMODE_ID(3) | ILI9320_ENTRYMODE_BGR,
104 }, {
105 .address = ILI9320_RESIZING,
106 .value = 0x0,
107 },
108};
109
110
111static int vgg2432a4_lcd_init(struct ili9320 *lcd,
112 struct ili9320_platdata *cfg)
113{
114 unsigned int addr;
115 int ret;
116
117 /* Set VCore before anything else (VGG243237-6UFLWA) */
118 ret = ili9320_write(lcd, 0x00e5, 0x8000);
119 if (ret)
120 goto err_initial;
121
122 /* Start the oscillator up before we can do anything else. */
123 ret = ili9320_write(lcd, ILI9320_OSCILATION, ILI9320_OSCILATION_OSC);
124 if (ret)
125 goto err_initial;
126
127 /* must wait at-lesat 10ms after starting */
128 mdelay(15);
129
130 ret = ili9320_write_regs(lcd, vgg_init0, ARRAY_SIZE(vgg_init0));
131 if (ret != 0)
132 goto err_initial;
133
134 ili9320_write(lcd, ILI9320_DISPLAY2, cfg->display2);
135 ili9320_write(lcd, ILI9320_DISPLAY3, cfg->display3);
136 ili9320_write(lcd, ILI9320_DISPLAY4, cfg->display4);
137
138 ili9320_write(lcd, ILI9320_RGB_IF1, cfg->rgb_if1);
139 ili9320_write(lcd, ILI9320_FRAMEMAKER, 0x0);
140 ili9320_write(lcd, ILI9320_RGB_IF2, ILI9320_RGBIF2_DPL);
141
142 ret = ili9320_write_regs(lcd, vgg_init1, ARRAY_SIZE(vgg_init1));
143 if (ret != 0)
144 goto err_vgg;
145
146 mdelay(300);
147
148 ret = ili9320_write_regs(lcd, vgg_init2, ARRAY_SIZE(vgg_init2));
149 if (ret != 0)
150 goto err_vgg2;
151
152 mdelay(100);
153
154 ili9320_write(lcd, ILI9320_POWER3, 0x13c);
155
156 mdelay(100);
157
158 ili9320_write(lcd, ILI9320_POWER4, 0x1c00);
159 ili9320_write(lcd, ILI9320_POWER7, 0x000e);
160
161 mdelay(100);
162
163 ili9320_write(lcd, ILI9320_GRAM_HORIZ_ADDR, 0x00);
164 ili9320_write(lcd, ILI9320_GRAM_VERT_ADD, 0x00);
165
166 ret = ili9320_write_regs(lcd, vgg_gamma, ARRAY_SIZE(vgg_gamma));
167 if (ret != 0)
168 goto err_vgg3;
169
170 ili9320_write(lcd, ILI9320_HORIZ_START, 0x0);
171 ili9320_write(lcd, ILI9320_HORIZ_END, cfg->hsize - 1);
172 ili9320_write(lcd, ILI9320_VERT_START, 0x0);
173 ili9320_write(lcd, ILI9320_VERT_END, cfg->vsize - 1);
174
175 ili9320_write(lcd, ILI9320_DRIVER2,
176 ILI9320_DRIVER2_NL(((cfg->vsize - 240) / 8) + 0x1D));
177
178 ili9320_write(lcd, ILI9320_BASE_IMAGE, 0x1);
179 ili9320_write(lcd, ILI9320_VERT_SCROLL, 0x00);
180
181 for (addr = ILI9320_PARTIAL1_POSITION; addr <= ILI9320_PARTIAL2_END;
182 addr++) {
183 ili9320_write(lcd, addr, 0x0);
184 }
185
186 ili9320_write(lcd, ILI9320_INTERFACE1, 0x10);
187 ili9320_write(lcd, ILI9320_INTERFACE2, cfg->interface2);
188 ili9320_write(lcd, ILI9320_INTERFACE3, cfg->interface3);
189 ili9320_write(lcd, ILI9320_INTERFACE4, cfg->interface4);
190 ili9320_write(lcd, ILI9320_INTERFACE5, cfg->interface5);
191 ili9320_write(lcd, ILI9320_INTERFACE6, cfg->interface6);
192
193 lcd->display1 = (ILI9320_DISPLAY1_D(3) | ILI9320_DISPLAY1_DTE |
194 ILI9320_DISPLAY1_GON | ILI9320_DISPLAY1_BASEE |
195 0x40);
196
197 ili9320_write(lcd, ILI9320_DISPLAY1, lcd->display1);
198
199 return 0;
200
201 err_vgg3:
202 err_vgg2:
203 err_vgg:
204 err_initial:
205 return ret;
206}
207
208#ifdef CONFIG_PM
209static int vgg2432a4_suspend(struct spi_device *spi, pm_message_t state)
210{
211 return ili9320_suspend(dev_get_drvdata(&spi->dev), state);
212}
213
214static int vgg2432a4_resume(struct spi_device *spi)
215{
216 return ili9320_resume(dev_get_drvdata(&spi->dev));
217}
218#else
219#define vgg2432a4_suspend NULL
220#define vgg2432a4_resume NULL
221#endif
222
223static struct ili9320_client vgg2432a4_client = {
224 .name = "VGG2432A4",
225 .init = vgg2432a4_lcd_init,
226};
227
228/* Device probe */
229
230static int __devinit vgg2432a4_probe(struct spi_device *spi)
231{
232 int ret;
233
234 ret = ili9320_probe_spi(spi, &vgg2432a4_client);
235 if (ret != 0) {
236 dev_err(&spi->dev, "failed to initialise ili9320\n");
237 return ret;
238 }
239
240 return 0;
241}
242
243static int __devexit vgg2432a4_remove(struct spi_device *spi)
244{
245 return ili9320_remove(dev_get_drvdata(&spi->dev));
246}
247
248static void vgg2432a4_shutdown(struct spi_device *spi)
249{
250 ili9320_shutdown(dev_get_drvdata(&spi->dev));
251}
252
253static struct spi_driver vgg2432a4_driver = {
254 .driver = {
255 .name = "VGG2432A4",
256 .owner = THIS_MODULE,
257 },
258 .probe = vgg2432a4_probe,
259 .remove = __devexit_p(vgg2432a4_remove),
260 .shutdown = vgg2432a4_shutdown,
261 .suspend = vgg2432a4_suspend,
262 .resume = vgg2432a4_resume,
263};
264
265/* Device driver initialisation */
266
267static int __init vgg2432a4_init(void)
268{
269 return spi_register_driver(&vgg2432a4_driver);
270}
271
272static void __exit vgg2432a4_exit(void)
273{
274 spi_unregister_driver(&vgg2432a4_driver);
275}
276
277module_init(vgg2432a4_init);
278module_exit(vgg2432a4_exit);
279
280MODULE_AUTHOR("Ben Dooks <ben-linux@fluff.org>");
281MODULE_DESCRIPTION("VGG2432A4 LCD Driver");
282MODULE_LICENSE("GPL v2");
283
284
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 49834a67a623..940467aed13f 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -478,7 +478,7 @@ static int bfin_lcd_set_contrast(struct lcd_device *dev, int contrast)
478 return 0; 478 return 0;
479} 479}
480 480
481static int bfin_lcd_check_fb(struct fb_info *fi) 481static int bfin_lcd_check_fb(struct lcd_device *dev, struct fb_info *fi)
482{ 482{
483 if (!fi || (fi == &bfin_bf54x_fb)) 483 if (!fi || (fi == &bfin_bf54x_fb))
484 return 1; 484 return 1;
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index 135d6dd7e672..7d1b819e501c 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -396,7 +396,7 @@ static int bfin_lcd_set_contrast(struct lcd_device *dev, int contrast)
396 return 0; 396 return 0;
397} 397}
398 398
399static int bfin_lcd_check_fb(struct fb_info *fi) 399static int bfin_lcd_check_fb(struct lcd_device *dev, struct fb_info *fi)
400{ 400{
401 if (!fi || (fi == &bfin_t350mcqb_fb)) 401 if (!fi || (fi == &bfin_t350mcqb_fb))
402 return 1; 402 return 1;
diff --git a/drivers/video/carminefb.c b/drivers/video/carminefb.c
new file mode 100644
index 000000000000..e15bb447440a
--- /dev/null
+++ b/drivers/video/carminefb.c
@@ -0,0 +1,790 @@
1/*
2 * Frame buffer driver for the Carmine GPU.
3 *
4 * The driver configures the GPU as follows
5 * - FB0 is display 0 with unique memory area
6 * - FB1 is display 1 with unique memory area
7 * - both display use 32 bit colors
8 */
9#include <linux/delay.h>
10#include <linux/errno.h>
11#include <linux/fb.h>
12#include <linux/interrupt.h>
13#include <linux/pci.h>
14
15#include "carminefb.h"
16#include "carminefb_regs.h"
17
18#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN)
19#error "The endianness of the target host has not been defined."
20#endif
21
22/*
23 * The initial video mode can be supplied via two different ways:
24 * - as a string that is passed to fb_find_mode() (module option fb_mode_str)
25 * - as an integer that picks the video mode from carmine_modedb[] (module
26 * option fb_mode)
27 *
28 * If nothing is used than the initial video mode will be the
29 * CARMINEFB_DEFAULT_VIDEO_MODE member of the carmine_modedb[].
30 */
31#define CARMINEFB_DEFAULT_VIDEO_MODE 1
32
33static unsigned int fb_mode = CARMINEFB_DEFAULT_VIDEO_MODE;
34module_param(fb_mode, uint, 444);
35MODULE_PARM_DESC(fb_mode, "Initial video mode as integer.");
36
37static char *fb_mode_str;
38module_param(fb_mode_str, charp, 444);
39MODULE_PARM_DESC(fb_mode_str, "Initial video mode in characters.");
40
41/*
42 * Carminefb displays:
43 * 0b000 None
44 * 0b001 Display 0
45 * 0b010 Display 1
46 */
47static int fb_displays = CARMINE_USE_DISPLAY0 | CARMINE_USE_DISPLAY1;
48module_param(fb_displays, int, 444);
49MODULE_PARM_DESC(fb_displays, "Bit mode, which displays are used");
50
51struct carmine_hw {
52 void __iomem *v_regs;
53 void __iomem *screen_mem;
54 struct fb_info *fb[MAX_DISPLAY];
55};
56
57struct carmine_resolution {
58 u32 htp;
59 u32 hsp;
60 u32 hsw;
61 u32 hdp;
62 u32 vtr;
63 u32 vsp;
64 u32 vsw;
65 u32 vdp;
66 u32 disp_mode;
67};
68
69struct carmine_fb {
70 void __iomem *display_reg;
71 void __iomem *screen_base;
72 u32 smem_offset;
73 u32 cur_mode;
74 u32 new_mode;
75 struct carmine_resolution *res;
76 u32 pseudo_palette[16];
77};
78
79static struct fb_fix_screeninfo carminefb_fix __devinitdata = {
80 .id = "Carmine",
81 .type = FB_TYPE_PACKED_PIXELS,
82 .visual = FB_VISUAL_TRUECOLOR,
83 .accel = FB_ACCEL_NONE,
84};
85
86static const struct fb_videomode carmine_modedb[] = {
87 {
88 .name = "640x480",
89 .xres = 640,
90 .yres = 480,
91 }, {
92 .name = "800x600",
93 .xres = 800,
94 .yres = 600,
95 },
96};
97
98static struct carmine_resolution car_modes[] = {
99 {
100 /* 640x480 */
101 .htp = 800,
102 .hsp = 672,
103 .hsw = 96,
104 .hdp = 640,
105 .vtr = 525,
106 .vsp = 490,
107 .vsw = 2,
108 .vdp = 480,
109 .disp_mode = 0x1400,
110 },
111 {
112 /* 800x600 */
113 .htp = 1060,
114 .hsp = 864,
115 .hsw = 72,
116 .hdp = 800,
117 .vtr = 628,
118 .vsp = 601,
119 .vsw = 2,
120 .vdp = 600,
121 .disp_mode = 0x0d00,
122 }
123};
124
125static int carmine_find_mode(const struct fb_var_screeninfo *var)
126{
127 int i;
128
129 for (i = 0; i < ARRAY_SIZE(car_modes); i++)
130 if (car_modes[i].hdp == var->xres &&
131 car_modes[i].vdp == var->yres)
132 return i;
133 return -EINVAL;
134}
135
136static void c_set_disp_reg(const struct carmine_fb *par,
137 u32 offset, u32 val)
138{
139 writel(val, par->display_reg + offset);
140}
141
142static u32 c_get_disp_reg(const struct carmine_fb *par,
143 u32 offset)
144{
145 return readl(par->display_reg + offset);
146}
147
148static void c_set_hw_reg(const struct carmine_hw *hw,
149 u32 offset, u32 val)
150{
151 writel(val, hw->v_regs + offset);
152}
153
154static u32 c_get_hw_reg(const struct carmine_hw *hw,
155 u32 offset)
156{
157 return readl(hw->v_regs + offset);
158}
159
160static int carmine_setcolreg(unsigned regno, unsigned red, unsigned green,
161 unsigned blue, unsigned transp, struct fb_info *info)
162{
163 if (regno >= 16)
164 return 1;
165
166 red >>= 8;
167 green >>= 8;
168 blue >>= 8;
169 transp >>= 8;
170
171 ((u32 *)info->pseudo_palette)[regno] = be32_to_cpu(transp << 24 |
172 red << 0 | green << 8 | blue << 16);
173 return 0;
174}
175
176static int carmine_check_var(struct fb_var_screeninfo *var,
177 struct fb_info *info)
178{
179 int ret;
180
181 ret = carmine_find_mode(var);
182 if (ret < 0)
183 return ret;
184
185 if (var->grayscale || var->rotate || var->nonstd)
186 return -EINVAL;
187
188 var->xres_virtual = var->xres;
189 var->yres_virtual = var->yres;
190
191 var->bits_per_pixel = 32;
192
193#ifdef __BIG_ENDIAN
194 var->transp.offset = 24;
195 var->red.offset = 0;
196 var->green.offset = 8;
197 var->blue.offset = 16;
198#else
199 var->transp.offset = 24;
200 var->red.offset = 16;
201 var->green.offset = 8;
202 var->blue.offset = 0;
203#endif
204
205 var->red.length = 8;
206 var->green.length = 8;
207 var->blue.length = 8;
208 var->transp.length = 8;
209
210 var->red.msb_right = 0;
211 var->green.msb_right = 0;
212 var->blue.msb_right = 0;
213 var->transp.msb_right = 0;
214 return 0;
215}
216
217static void carmine_init_display_param(struct carmine_fb *par)
218{
219 u32 width;
220 u32 height;
221 u32 param;
222 u32 window_size;
223 u32 soffset = par->smem_offset;
224
225 c_set_disp_reg(par, CARMINE_DISP_REG_C_TRANS, 0);
226 c_set_disp_reg(par, CARMINE_DISP_REG_MLMR_TRANS, 0);
227 c_set_disp_reg(par, CARMINE_DISP_REG_CURSOR_MODE,
228 CARMINE_CURSOR0_PRIORITY_MASK |
229 CARMINE_CURSOR1_PRIORITY_MASK |
230 CARMINE_CURSOR_CUTZ_MASK);
231
232 /* Set default cursor position */
233 c_set_disp_reg(par, CARMINE_DISP_REG_CUR1_POS, 0 << 16 | 0);
234 c_set_disp_reg(par, CARMINE_DISP_REG_CUR2_POS, 0 << 16 | 0);
235
236 /* Set default display mode */
237 c_set_disp_reg(par, CARMINE_DISP_REG_L0_EXT_MODE, CARMINE_WINDOW_MODE |
238 CARMINE_EXT_CMODE_DIRECT24_RGBA);
239 c_set_disp_reg(par, CARMINE_DISP_REG_L1_EXT_MODE,
240 CARMINE_EXT_CMODE_DIRECT24_RGBA);
241 c_set_disp_reg(par, CARMINE_DISP_REG_L2_EXT_MODE, CARMINE_EXTEND_MODE |
242 CARMINE_EXT_CMODE_DIRECT24_RGBA);
243 c_set_disp_reg(par, CARMINE_DISP_REG_L3_EXT_MODE, CARMINE_EXTEND_MODE |
244 CARMINE_EXT_CMODE_DIRECT24_RGBA);
245 c_set_disp_reg(par, CARMINE_DISP_REG_L4_EXT_MODE, CARMINE_EXTEND_MODE |
246 CARMINE_EXT_CMODE_DIRECT24_RGBA);
247 c_set_disp_reg(par, CARMINE_DISP_REG_L5_EXT_MODE, CARMINE_EXTEND_MODE |
248 CARMINE_EXT_CMODE_DIRECT24_RGBA);
249 c_set_disp_reg(par, CARMINE_DISP_REG_L6_EXT_MODE, CARMINE_EXTEND_MODE |
250 CARMINE_EXT_CMODE_DIRECT24_RGBA);
251 c_set_disp_reg(par, CARMINE_DISP_REG_L7_EXT_MODE, CARMINE_EXTEND_MODE |
252 CARMINE_EXT_CMODE_DIRECT24_RGBA);
253
254 /* Set default frame size to layer mode register */
255 width = par->res->hdp * 4 / CARMINE_DISP_WIDTH_UNIT;
256 width = width << CARMINE_DISP_WIDTH_SHIFT;
257
258 height = par->res->vdp - 1;
259 param = width | height;
260
261 c_set_disp_reg(par, CARMINE_DISP_REG_L0_MODE_W_H, param);
262 c_set_disp_reg(par, CARMINE_DISP_REG_L1_WIDTH, width);
263 c_set_disp_reg(par, CARMINE_DISP_REG_L2_MODE_W_H, param);
264 c_set_disp_reg(par, CARMINE_DISP_REG_L3_MODE_W_H, param);
265 c_set_disp_reg(par, CARMINE_DISP_REG_L4_MODE_W_H, param);
266 c_set_disp_reg(par, CARMINE_DISP_REG_L5_MODE_W_H, param);
267 c_set_disp_reg(par, CARMINE_DISP_REG_L6_MODE_W_H, param);
268 c_set_disp_reg(par, CARMINE_DISP_REG_L7_MODE_W_H, param);
269
270 /* Set default pos and size */
271 window_size = (par->res->vdp - 1) << CARMINE_DISP_WIN_H_SHIFT;
272 window_size |= par->res->hdp;
273
274 c_set_disp_reg(par, CARMINE_DISP_REG_L0_WIN_POS, 0);
275 c_set_disp_reg(par, CARMINE_DISP_REG_L0_WIN_SIZE, window_size);
276 c_set_disp_reg(par, CARMINE_DISP_REG_L1_WIN_POS, 0);
277 c_set_disp_reg(par, CARMINE_DISP_REG_L1_WIN_SIZE, window_size);
278 c_set_disp_reg(par, CARMINE_DISP_REG_L2_WIN_POS, 0);
279 c_set_disp_reg(par, CARMINE_DISP_REG_L2_WIN_SIZE, window_size);
280 c_set_disp_reg(par, CARMINE_DISP_REG_L3_WIN_POS, 0);
281 c_set_disp_reg(par, CARMINE_DISP_REG_L3_WIN_SIZE, window_size);
282 c_set_disp_reg(par, CARMINE_DISP_REG_L4_WIN_POS, 0);
283 c_set_disp_reg(par, CARMINE_DISP_REG_L4_WIN_SIZE, window_size);
284 c_set_disp_reg(par, CARMINE_DISP_REG_L5_WIN_POS, 0);
285 c_set_disp_reg(par, CARMINE_DISP_REG_L5_WIN_SIZE, window_size);
286 c_set_disp_reg(par, CARMINE_DISP_REG_L6_WIN_POS, 0);
287 c_set_disp_reg(par, CARMINE_DISP_REG_L6_WIN_SIZE, window_size);
288 c_set_disp_reg(par, CARMINE_DISP_REG_L7_WIN_POS, 0);
289 c_set_disp_reg(par, CARMINE_DISP_REG_L7_WIN_SIZE, window_size);
290
291 /* Set default origin address */
292 c_set_disp_reg(par, CARMINE_DISP_REG_L0_ORG_ADR, soffset);
293 c_set_disp_reg(par, CARMINE_DISP_REG_L1_ORG_ADR, soffset);
294 c_set_disp_reg(par, CARMINE_DISP_REG_L2_ORG_ADR1, soffset);
295 c_set_disp_reg(par, CARMINE_DISP_REG_L3_ORG_ADR1, soffset);
296 c_set_disp_reg(par, CARMINE_DISP_REG_L4_ORG_ADR1, soffset);
297 c_set_disp_reg(par, CARMINE_DISP_REG_L5_ORG_ADR1, soffset);
298 c_set_disp_reg(par, CARMINE_DISP_REG_L6_ORG_ADR1, soffset);
299 c_set_disp_reg(par, CARMINE_DISP_REG_L7_ORG_ADR1, soffset);
300
301 /* Set default display address */
302 c_set_disp_reg(par, CARMINE_DISP_REG_L0_DISP_ADR, soffset);
303 c_set_disp_reg(par, CARMINE_DISP_REG_L2_DISP_ADR1, soffset);
304 c_set_disp_reg(par, CARMINE_DISP_REG_L3_DISP_ADR1, soffset);
305 c_set_disp_reg(par, CARMINE_DISP_REG_L4_DISP_ADR1, soffset);
306 c_set_disp_reg(par, CARMINE_DISP_REG_L5_DISP_ADR1, soffset);
307 c_set_disp_reg(par, CARMINE_DISP_REG_L6_DISP_ADR0, soffset);
308 c_set_disp_reg(par, CARMINE_DISP_REG_L7_DISP_ADR0, soffset);
309
310 /* Set default display position */
311 c_set_disp_reg(par, CARMINE_DISP_REG_L0_DISP_POS, 0);
312 c_set_disp_reg(par, CARMINE_DISP_REG_L2_DISP_POS, 0);
313 c_set_disp_reg(par, CARMINE_DISP_REG_L3_DISP_POS, 0);
314 c_set_disp_reg(par, CARMINE_DISP_REG_L4_DISP_POS, 0);
315 c_set_disp_reg(par, CARMINE_DISP_REG_L5_DISP_POS, 0);
316 c_set_disp_reg(par, CARMINE_DISP_REG_L6_DISP_POS, 0);
317 c_set_disp_reg(par, CARMINE_DISP_REG_L7_DISP_POS, 0);
318
319 /* Set default blend mode */
320 c_set_disp_reg(par, CARMINE_DISP_REG_BLEND_MODE_L0, 0);
321 c_set_disp_reg(par, CARMINE_DISP_REG_BLEND_MODE_L1, 0);
322 c_set_disp_reg(par, CARMINE_DISP_REG_BLEND_MODE_L2, 0);
323 c_set_disp_reg(par, CARMINE_DISP_REG_BLEND_MODE_L3, 0);
324 c_set_disp_reg(par, CARMINE_DISP_REG_BLEND_MODE_L4, 0);
325 c_set_disp_reg(par, CARMINE_DISP_REG_BLEND_MODE_L5, 0);
326 c_set_disp_reg(par, CARMINE_DISP_REG_BLEND_MODE_L6, 0);
327 c_set_disp_reg(par, CARMINE_DISP_REG_BLEND_MODE_L7, 0);
328
329 /* default transparency mode */
330 c_set_disp_reg(par, CARMINE_DISP_REG_L0_TRANS, 0);
331 c_set_disp_reg(par, CARMINE_DISP_REG_L1_TRANS, 0);
332 c_set_disp_reg(par, CARMINE_DISP_REG_L2_TRANS, 0);
333 c_set_disp_reg(par, CARMINE_DISP_REG_L3_TRANS, 0);
334 c_set_disp_reg(par, CARMINE_DISP_REG_L4_TRANS, 0);
335 c_set_disp_reg(par, CARMINE_DISP_REG_L5_TRANS, 0);
336 c_set_disp_reg(par, CARMINE_DISP_REG_L6_TRANS, 0);
337 c_set_disp_reg(par, CARMINE_DISP_REG_L7_TRANS, 0);
338
339 /* Set default read skip parameter */
340 c_set_disp_reg(par, CARMINE_DISP_REG_L0RM, 0);
341 c_set_disp_reg(par, CARMINE_DISP_REG_L2RM, 0);
342 c_set_disp_reg(par, CARMINE_DISP_REG_L3RM, 0);
343 c_set_disp_reg(par, CARMINE_DISP_REG_L4RM, 0);
344 c_set_disp_reg(par, CARMINE_DISP_REG_L5RM, 0);
345 c_set_disp_reg(par, CARMINE_DISP_REG_L6RM, 0);
346 c_set_disp_reg(par, CARMINE_DISP_REG_L7RM, 0);
347
348 c_set_disp_reg(par, CARMINE_DISP_REG_L0PX, 0);
349 c_set_disp_reg(par, CARMINE_DISP_REG_L2PX, 0);
350 c_set_disp_reg(par, CARMINE_DISP_REG_L3PX, 0);
351 c_set_disp_reg(par, CARMINE_DISP_REG_L4PX, 0);
352 c_set_disp_reg(par, CARMINE_DISP_REG_L5PX, 0);
353 c_set_disp_reg(par, CARMINE_DISP_REG_L6PX, 0);
354 c_set_disp_reg(par, CARMINE_DISP_REG_L7PX, 0);
355
356 c_set_disp_reg(par, CARMINE_DISP_REG_L0PY, 0);
357 c_set_disp_reg(par, CARMINE_DISP_REG_L2PY, 0);
358 c_set_disp_reg(par, CARMINE_DISP_REG_L3PY, 0);
359 c_set_disp_reg(par, CARMINE_DISP_REG_L4PY, 0);
360 c_set_disp_reg(par, CARMINE_DISP_REG_L5PY, 0);
361 c_set_disp_reg(par, CARMINE_DISP_REG_L6PY, 0);
362 c_set_disp_reg(par, CARMINE_DISP_REG_L7PY, 0);
363}
364
365static void set_display_parameters(struct carmine_fb *par)
366{
367 u32 mode;
368 u32 hdp, vdp, htp, hsp, hsw, vtr, vsp, vsw;
369
370 /*
371 * display timing. Parameters are decreased by one because hardware
372 * spec is 0 to (n - 1)
373 * */
374 hdp = par->res->hdp - 1;
375 vdp = par->res->vdp - 1;
376 htp = par->res->htp - 1;
377 hsp = par->res->hsp - 1;
378 hsw = par->res->hsw - 1;
379 vtr = par->res->vtr - 1;
380 vsp = par->res->vsp - 1;
381 vsw = par->res->vsw - 1;
382
383 c_set_disp_reg(par, CARMINE_DISP_REG_H_TOTAL,
384 htp << CARMINE_DISP_HTP_SHIFT);
385 c_set_disp_reg(par, CARMINE_DISP_REG_H_PERIOD,
386 (hdp << CARMINE_DISP_HDB_SHIFT) | hdp);
387 c_set_disp_reg(par, CARMINE_DISP_REG_V_H_W_H_POS,
388 (vsw << CARMINE_DISP_VSW_SHIFT) |
389 (hsw << CARMINE_DISP_HSW_SHIFT) |
390 (hsp));
391 c_set_disp_reg(par, CARMINE_DISP_REG_V_TOTAL,
392 vtr << CARMINE_DISP_VTR_SHIFT);
393 c_set_disp_reg(par, CARMINE_DISP_REG_V_PERIOD_POS,
394 (vdp << CARMINE_DISP_VDP_SHIFT) | vsp);
395
396 /* clock */
397 mode = c_get_disp_reg(par, CARMINE_DISP_REG_DCM1);
398 mode = (mode & ~CARMINE_DISP_DCM_MASK) |
399 (par->res->disp_mode & CARMINE_DISP_DCM_MASK);
400 /* enable video output and layer 0 */
401 mode |= CARMINE_DEN | CARMINE_L0E;
402 c_set_disp_reg(par, CARMINE_DISP_REG_DCM1, mode);
403}
404
405static int carmine_set_par(struct fb_info *info)
406{
407 struct carmine_fb *par = info->par;
408 int ret;
409
410 ret = carmine_find_mode(&info->var);
411 if (ret < 0)
412 return ret;
413
414 par->new_mode = ret;
415 if (par->cur_mode != par->new_mode) {
416
417 par->cur_mode = par->new_mode;
418 par->res = &car_modes[par->new_mode];
419
420 carmine_init_display_param(par);
421 set_display_parameters(par);
422 }
423
424 info->fix.line_length = info->var.xres * info->var.bits_per_pixel / 8;
425 return 0;
426}
427
428static int init_hardware(struct carmine_hw *hw)
429{
430 u32 flags;
431 u32 loops;
432 u32 ret;
433
434 /* Initalize Carmine */
435 /* Sets internal clock */
436 c_set_hw_reg(hw, CARMINE_CTL_REG + CARMINE_CTL_REG_CLOCK_ENABLE,
437 CARMINE_DFLT_IP_CLOCK_ENABLE);
438
439 /* Video signal output is turned off */
440 c_set_hw_reg(hw, CARMINE_DISP0_REG + CARMINE_DISP_REG_DCM1, 0);
441 c_set_hw_reg(hw, CARMINE_DISP1_REG + CARMINE_DISP_REG_DCM1, 0);
442
443 /* Software reset */
444 c_set_hw_reg(hw, CARMINE_CTL_REG + CARMINE_CTL_REG_SOFTWARE_RESET, 1);
445 c_set_hw_reg(hw, CARMINE_CTL_REG + CARMINE_CTL_REG_SOFTWARE_RESET, 0);
446
447 /* I/O mode settings */
448 flags = CARMINE_DFLT_IP_DCTL_IO_CONT1 << 16 |
449 CARMINE_DFLT_IP_DCTL_IO_CONT0;
450 c_set_hw_reg(hw, CARMINE_DCTL_REG + CARMINE_DCTL_REG_IOCONT1_IOCONT0,
451 flags);
452
453 /* DRAM initial sequence */
454 flags = CARMINE_DFLT_IP_DCTL_MODE << 16 | CARMINE_DFLT_IP_DCTL_ADD;
455 c_set_hw_reg(hw, CARMINE_DCTL_REG + CARMINE_DCTL_REG_MODE_ADD,
456 flags);
457
458 flags = CARMINE_DFLT_IP_DCTL_SET_TIME1 << 16 |
459 CARMINE_DFLT_IP_DCTL_EMODE;
460 c_set_hw_reg(hw, CARMINE_DCTL_REG + CARMINE_DCTL_REG_SETTIME1_EMODE,
461 flags);
462
463 flags = CARMINE_DFLT_IP_DCTL_REFRESH << 16 |
464 CARMINE_DFLT_IP_DCTL_SET_TIME2;
465 c_set_hw_reg(hw, CARMINE_DCTL_REG + CARMINE_DCTL_REG_REFRESH_SETTIME2,
466 flags);
467
468 flags = CARMINE_DFLT_IP_DCTL_RESERVE2 << 16 |
469 CARMINE_DFLT_IP_DCTL_FIFO_DEPTH;
470 c_set_hw_reg(hw, CARMINE_DCTL_REG + CARMINE_DCTL_REG_RSV2_RSV1, flags);
471
472 flags = CARMINE_DFLT_IP_DCTL_DDRIF2 << 16 | CARMINE_DFLT_IP_DCTL_DDRIF1;
473 c_set_hw_reg(hw, CARMINE_DCTL_REG + CARMINE_DCTL_REG_DDRIF2_DDRIF1,
474 flags);
475
476 flags = CARMINE_DFLT_IP_DCTL_RESERVE0 << 16 |
477 CARMINE_DFLT_IP_DCTL_STATES;
478 c_set_hw_reg(hw, CARMINE_DCTL_REG + CARMINE_DCTL_REG_RSV0_STATES,
479 flags);
480
481 /* Executes DLL reset */
482 if (CARMINE_DCTL_DLL_RESET) {
483 for (loops = 0; loops < CARMINE_DCTL_INIT_WAIT_LIMIT; loops++) {
484
485 ret = c_get_hw_reg(hw, CARMINE_DCTL_REG +
486 CARMINE_DCTL_REG_RSV0_STATES);
487 ret &= CARMINE_DCTL_REG_STATES_MASK;
488 if (!ret)
489 break;
490
491 mdelay(CARMINE_DCTL_INIT_WAIT_INTERVAL);
492 }
493
494 if (loops >= CARMINE_DCTL_INIT_WAIT_LIMIT) {
495 printk(KERN_ERR "DRAM init failed\n");
496 return -EIO;
497 }
498 }
499
500 flags = CARMINE_DFLT_IP_DCTL_MODE_AFT_RST << 16 |
501 CARMINE_DFLT_IP_DCTL_ADD;
502 c_set_hw_reg(hw, CARMINE_DCTL_REG + CARMINE_DCTL_REG_MODE_ADD, flags);
503
504 flags = CARMINE_DFLT_IP_DCTL_RESERVE0 << 16 |
505 CARMINE_DFLT_IP_DCTL_STATES_AFT_RST;
506 c_set_hw_reg(hw, CARMINE_DCTL_REG + CARMINE_DCTL_REG_RSV0_STATES,
507 flags);
508
509 /* Initialize the write back register */
510 c_set_hw_reg(hw, CARMINE_WB_REG + CARMINE_WB_REG_WBM,
511 CARMINE_WB_REG_WBM_DEFAULT);
512
513 /* Initialize the Kottos registers */
514 c_set_hw_reg(hw, CARMINE_GRAPH_REG + CARMINE_GRAPH_REG_VRINTM, 0);
515 c_set_hw_reg(hw, CARMINE_GRAPH_REG + CARMINE_GRAPH_REG_VRERRM, 0);
516
517 /* Set DC offsets */
518 c_set_hw_reg(hw, CARMINE_GRAPH_REG + CARMINE_GRAPH_REG_DC_OFFSET_PX, 0);
519 c_set_hw_reg(hw, CARMINE_GRAPH_REG + CARMINE_GRAPH_REG_DC_OFFSET_PY, 0);
520 c_set_hw_reg(hw, CARMINE_GRAPH_REG + CARMINE_GRAPH_REG_DC_OFFSET_LX, 0);
521 c_set_hw_reg(hw, CARMINE_GRAPH_REG + CARMINE_GRAPH_REG_DC_OFFSET_LY, 0);
522 c_set_hw_reg(hw, CARMINE_GRAPH_REG + CARMINE_GRAPH_REG_DC_OFFSET_TX, 0);
523 c_set_hw_reg(hw, CARMINE_GRAPH_REG + CARMINE_GRAPH_REG_DC_OFFSET_TY, 0);
524 return 0;
525}
526
527static struct fb_ops carminefb_ops = {
528 .owner = THIS_MODULE,
529 .fb_fillrect = cfb_fillrect,
530 .fb_copyarea = cfb_copyarea,
531 .fb_imageblit = cfb_imageblit,
532
533 .fb_check_var = carmine_check_var,
534 .fb_set_par = carmine_set_par,
535 .fb_setcolreg = carmine_setcolreg,
536};
537
538static int alloc_carmine_fb(void __iomem *regs, void __iomem *smem_base,
539 int smem_offset, struct device *device, struct fb_info **rinfo)
540{
541 int ret;
542 struct fb_info *info;
543 struct carmine_fb *par;
544
545 info = framebuffer_alloc(sizeof *par, device);
546 if (!info)
547 return -ENOMEM;
548
549 par = info->par;
550 par->display_reg = regs;
551 par->smem_offset = smem_offset;
552
553 info->screen_base = smem_base + smem_offset;
554 info->screen_size = CARMINE_DISPLAY_MEM;
555 info->fbops = &carminefb_ops;
556
557 info->fix = carminefb_fix;
558 info->pseudo_palette = par->pseudo_palette;
559 info->flags = FBINFO_DEFAULT;
560
561 ret = fb_alloc_cmap(&info->cmap, 256, 1);
562 if (ret < 0)
563 goto err_free_fb;
564
565 if (fb_mode > ARRAY_SIZE(carmine_modedb))
566 fb_mode = CARMINEFB_DEFAULT_VIDEO_MODE;
567
568 par->cur_mode = par->new_mode = ~0;
569
570 ret = fb_find_mode(&info->var, info, fb_mode_str, carmine_modedb,
571 ARRAY_SIZE(carmine_modedb),
572 &carmine_modedb[fb_mode], 32);
573 if (!ret || ret == 4) {
574 ret = -EINVAL;
575 goto err_dealloc_cmap;
576 }
577
578 fb_videomode_to_modelist(carmine_modedb, ARRAY_SIZE(carmine_modedb),
579 &info->modelist);
580
581 ret = register_framebuffer(info);
582 if (ret < 0)
583 goto err_dealloc_cmap;
584
585 printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
586 info->fix.id);
587
588 *rinfo = info;
589 return 0;
590
591err_dealloc_cmap:
592 fb_dealloc_cmap(&info->cmap);
593err_free_fb:
594 framebuffer_release(info);
595 return ret;
596}
597
598static void cleanup_fb_device(struct fb_info *info)
599{
600 if (info) {
601 unregister_framebuffer(info);
602 fb_dealloc_cmap(&info->cmap);
603 framebuffer_release(info);
604 }
605}
606
607static int __devinit carminefb_probe(struct pci_dev *dev,
608 const struct pci_device_id *ent)
609{
610 struct carmine_hw *hw;
611 struct device *device = &dev->dev;
612 struct fb_info *info;
613 int ret;
614
615 ret = pci_enable_device(dev);
616 if (ret)
617 return ret;
618
619 ret = -ENOMEM;
620 hw = kzalloc(sizeof *hw, GFP_KERNEL);
621 if (!hw)
622 goto err_enable_pci;
623
624 carminefb_fix.mmio_start = pci_resource_start(dev, CARMINE_CONFIG_BAR);
625 carminefb_fix.mmio_len = pci_resource_len(dev, CARMINE_CONFIG_BAR);
626
627 if (!request_mem_region(carminefb_fix.mmio_start,
628 carminefb_fix.mmio_len,
629 "carminefb regbase")) {
630 printk(KERN_ERR "carminefb: Can't reserve regbase.\n");
631 ret = -EBUSY;
632 goto err_free_hw;
633 }
634 hw->v_regs = ioremap_nocache(carminefb_fix.mmio_start,
635 carminefb_fix.mmio_len);
636 if (!hw->v_regs) {
637 printk(KERN_ERR "carminefb: Can't remap %s register.\n",
638 carminefb_fix.id);
639 goto err_free_reg_mmio;
640 }
641
642 carminefb_fix.smem_start = pci_resource_start(dev, CARMINE_MEMORY_BAR);
643 carminefb_fix.smem_len = pci_resource_len(dev, CARMINE_MEMORY_BAR);
644
645 /* The memory area tends to be very large (256 MiB). Remap only what
646 * is required for that largest resolution to avoid remaps at run
647 * time
648 */
649 if (carminefb_fix.smem_len > CARMINE_TOTAL_DIPLAY_MEM)
650 carminefb_fix.smem_len = CARMINE_TOTAL_DIPLAY_MEM;
651
652 else if (carminefb_fix.smem_len < CARMINE_TOTAL_DIPLAY_MEM) {
653 printk(KERN_ERR "carminefb: Memory bar is only %d bytes, %d "
654 "are required.", carminefb_fix.smem_len,
655 CARMINE_TOTAL_DIPLAY_MEM);
656 goto err_free_reg_mmio;
657 }
658
659 if (!request_mem_region(carminefb_fix.smem_start,
660 carminefb_fix.smem_len, "carminefb smem")) {
661 printk(KERN_ERR "carminefb: Can't reserve smem.\n");
662 goto err_unmap_vregs;
663 }
664
665 hw->screen_mem = ioremap_nocache(carminefb_fix.smem_start,
666 carminefb_fix.smem_len);
667 if (!hw->screen_mem) {
668 printk(KERN_ERR "carmine: Can't ioremap smem area.\n");
669 release_mem_region(carminefb_fix.smem_start,
670 carminefb_fix.smem_len);
671 goto err_reg_smem;
672 }
673
674 ret = init_hardware(hw);
675 if (ret)
676 goto err_unmap_screen;
677
678 info = NULL;
679 if (fb_displays & CARMINE_USE_DISPLAY0) {
680 ret = alloc_carmine_fb(hw->v_regs + CARMINE_DISP0_REG,
681 hw->screen_mem, CARMINE_DISPLAY_MEM * 0,
682 device, &info);
683 if (ret)
684 goto err_deinit_hw;
685 }
686
687 hw->fb[0] = info;
688
689 info = NULL;
690 if (fb_displays & CARMINE_USE_DISPLAY1) {
691 ret = alloc_carmine_fb(hw->v_regs + CARMINE_DISP1_REG,
692 hw->screen_mem, CARMINE_DISPLAY_MEM * 1,
693 device, &info);
694 if (ret)
695 goto err_cleanup_fb0;
696 }
697
698 hw->fb[1] = info;
699 info = NULL;
700
701 pci_set_drvdata(dev, hw);
702 return 0;
703
704err_cleanup_fb0:
705 cleanup_fb_device(hw->fb[0]);
706err_deinit_hw:
707 /* disable clock, etc */
708 c_set_hw_reg(hw, CARMINE_CTL_REG + CARMINE_CTL_REG_CLOCK_ENABLE, 0);
709err_unmap_screen:
710 iounmap(hw->screen_mem);
711err_reg_smem:
712 release_mem_region(carminefb_fix.mmio_start, carminefb_fix.mmio_len);
713err_unmap_vregs:
714 iounmap(hw->v_regs);
715err_free_reg_mmio:
716 release_mem_region(carminefb_fix.mmio_start, carminefb_fix.mmio_len);
717err_free_hw:
718 kfree(hw);
719err_enable_pci:
720 pci_disable_device(dev);
721 return ret;
722}
723
724static void __devexit carminefb_remove(struct pci_dev *dev)
725{
726 struct carmine_hw *hw = pci_get_drvdata(dev);
727 struct fb_fix_screeninfo fix;
728 int i;
729
730 /* in case we use only fb1 and not fb1 */
731 if (hw->fb[0])
732 fix = hw->fb[0]->fix;
733 else
734 fix = hw->fb[1]->fix;
735
736 /* deactivate display(s) and switch clocks */
737 c_set_hw_reg(hw, CARMINE_DISP0_REG + CARMINE_DISP_REG_DCM1, 0);
738 c_set_hw_reg(hw, CARMINE_DISP1_REG + CARMINE_DISP_REG_DCM1, 0);
739 c_set_hw_reg(hw, CARMINE_CTL_REG + CARMINE_CTL_REG_CLOCK_ENABLE, 0);
740
741 for (i = 0; i < MAX_DISPLAY; i++)
742 cleanup_fb_device(hw->fb[i]);
743
744 iounmap(hw->screen_mem);
745 release_mem_region(fix.smem_start, fix.smem_len);
746 iounmap(hw->v_regs);
747 release_mem_region(fix.mmio_start, fix.mmio_len);
748
749 pci_set_drvdata(dev, NULL);
750 pci_disable_device(dev);
751 kfree(hw);
752}
753
754#define PCI_VENDOR_ID_FUJITU_LIMITED 0x10cf
755static struct pci_device_id carmine_devices[] __devinitdata = {
756{
757 PCI_DEVICE(PCI_VENDOR_ID_FUJITU_LIMITED, 0x202b)},
758 {0, 0, 0, 0, 0, 0, 0}
759};
760
761MODULE_DEVICE_TABLE(pci, carmine_devices);
762
763static struct pci_driver carmine_pci_driver = {
764 .name = "carminefb",
765 .id_table = carmine_devices,
766 .probe = carminefb_probe,
767 .remove = __devexit_p(carminefb_remove),
768};
769
770static int __init carminefb_init(void)
771{
772 if (!(fb_displays &
773 (CARMINE_USE_DISPLAY0 | CARMINE_USE_DISPLAY1))) {
774 printk(KERN_ERR "If you disable both displays than you don't "
775 "need the driver at all\n");
776 return -EINVAL;
777 }
778 return pci_register_driver(&carmine_pci_driver);
779}
780module_init(carminefb_init);
781
782static void __exit carminefb_cleanup(void)
783{
784 pci_unregister_driver(&carmine_pci_driver);
785}
786module_exit(carminefb_cleanup);
787
788MODULE_AUTHOR("Sebastian Siewior <bigeasy@linutronix.de>");
789MODULE_DESCRIPTION("Framebuffer driver for Fujitsu Carmine based devices");
790MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/carminefb.h b/drivers/video/carminefb.h
new file mode 100644
index 000000000000..05306de0c6b6
--- /dev/null
+++ b/drivers/video/carminefb.h
@@ -0,0 +1,64 @@
1#ifndef CARMINE_CARMINE_H
2#define CARMINE_CARMINE_H
3
4#define CARMINE_MEMORY_BAR 2
5#define CARMINE_CONFIG_BAR 3
6
7#define MAX_DISPLAY 2
8#define CARMINE_DISPLAY_MEM (800 * 600 * 4)
9#define CARMINE_TOTAL_DIPLAY_MEM (CARMINE_DISPLAY_MEM * MAX_DISPLAY)
10
11#define CARMINE_USE_DISPLAY0 (1 << 0)
12#define CARMINE_USE_DISPLAY1 (1 << 1)
13
14/*
15 * This values work on the eval card. Custom boards may use different timings,
16 * here an example :)
17 */
18
19/* DRAM initialization values */
20#ifdef CONFIG_FB_CARMINE_DRAM_EVAL
21
22#define CARMINE_DFLT_IP_CLOCK_ENABLE (0x03ff)
23#define CARMINE_DFLT_IP_DCTL_ADD (0x05c3)
24#define CARMINE_DFLT_IP_DCTL_MODE (0x0121)
25#define CARMINE_DFLT_IP_DCTL_EMODE (0x8000)
26#define CARMINE_DFLT_IP_DCTL_SET_TIME1 (0x4749)
27#define CARMINE_DFLT_IP_DCTL_SET_TIME2 (0x2a22)
28#define CARMINE_DFLT_IP_DCTL_REFRESH (0x0042)
29#define CARMINE_DFLT_IP_DCTL_STATES (0x0003)
30#define CARMINE_DFLT_IP_DCTL_RESERVE0 (0x0020)
31#define CARMINE_DFLT_IP_DCTL_FIFO_DEPTH (0x000f)
32#define CARMINE_DFLT_IP_DCTL_RESERVE2 (0x0000)
33#define CARMINE_DFLT_IP_DCTL_DDRIF1 (0x6646)
34#define CARMINE_DFLT_IP_DCTL_DDRIF2 (0x0055)
35#define CARMINE_DFLT_IP_DCTL_MODE_AFT_RST (0x0021)
36#define CARMINE_DFLT_IP_DCTL_STATES_AFT_RST (0x0002)
37#define CARMINE_DFLT_IP_DCTL_IO_CONT0 (0x0555)
38#define CARMINE_DFLT_IP_DCTL_IO_CONT1 (0x0555)
39#define CARMINE_DCTL_DLL_RESET (1)
40#endif
41
42#ifdef CONFIG_CARMINE_DRAM_CUSTOM
43
44#define CARMINE_DFLT_IP_CLOCK_ENABLE (0x03ff)
45#define CARMINE_DFLT_IP_DCTL_ADD (0x03b2)
46#define CARMINE_DFLT_IP_DCTL_MODE (0x0161)
47#define CARMINE_DFLT_IP_DCTL_EMODE (0x8000)
48#define CARMINE_DFLT_IP_DCTL_SET_TIME1 (0x2628)
49#define CARMINE_DFLT_IP_DCTL_SET_TIME2 (0x1a09)
50#define CARMINE_DFLT_IP_DCTL_REFRESH (0x00fe)
51#define CARMINE_DFLT_IP_DCTL_STATES (0x0003)
52#define CARMINE_DFLT_IP_DCTL_RESERVE0 (0x0020)
53#define CARMINE_DFLT_IP_DCTL_FIFO_DEPTH (0x000f)
54#define CARMINE_DFLT_IP_DCTL_RESERVE2 (0x0000)
55#define CARMINE_DFLT_IP_DCTL_DDRIF1 (0x0646)
56#define CARMINE_DFLT_IP_DCTL_DDRIF2 (0x55aa)
57#define CARMINE_DFLT_IP_DCTL_MODE_AFT_RST (0x0061)
58#define CARMINE_DFLT_IP_DCTL_STATES_AFT_RST (0x0002)
59#define CARMINE_DFLT_IP_DCTL_IO_CONT0 (0x0555)
60#define CARMINE_DFLT_IP_DCTL_IO_CONT1 (0x0555)
61#define CARMINE_DCTL_DLL_RESET (1)
62#endif
63
64#endif
diff --git a/drivers/video/carminefb_regs.h b/drivers/video/carminefb_regs.h
new file mode 100644
index 000000000000..045215600b73
--- /dev/null
+++ b/drivers/video/carminefb_regs.h
@@ -0,0 +1,159 @@
1#ifndef _CARMINEFB_REGS_H
2#define _CARMINEFB_REGS_H
3
4#define CARMINE_OVERLAY_EXT_MODE (0x00000002)
5#define CARMINE_GRAPH_REG (0x00000000)
6#define CARMINE_DISP0_REG (0x00100000)
7#define CARMINE_DISP1_REG (0x00140000)
8#define CARMINE_WB_REG (0x00180000)
9#define CARMINE_DCTL_REG (0x00300000)
10#define CARMINE_CTL_REG (0x00400000)
11#define CARMINE_WINDOW_MODE (0x00000001)
12#define CARMINE_EXTEND_MODE (CARMINE_WINDOW_MODE | \
13 CARMINE_OVERLAY_EXT_MODE)
14#define CARMINE_L0E (1 << 16)
15#define CARMINE_L2E (1 << 18)
16#define CARMINE_DEN (1 << 31)
17
18#define CARMINE_EXT_CMODE_DIRECT24_RGBA (0xC0000000)
19#define CARMINE_DCTL_REG_MODE_ADD (0x00)
20#define CARMINE_DCTL_REG_SETTIME1_EMODE (0x04)
21#define CARMINE_DCTL_REG_REFRESH_SETTIME2 (0x08)
22#define CARMINE_DCTL_REG_RSV0_STATES (0x0C)
23#define CARMINE_DCTL_REG_RSV2_RSV1 (0x10)
24#define CARMINE_DCTL_REG_DDRIF2_DDRIF1 (0x14)
25#define CARMINE_DCTL_REG_IOCONT1_IOCONT0 (0x24)
26#define CARMINE_DCTL_REG_STATES_MASK (0x000F)
27#define CARMINE_DCTL_INIT_WAIT_INTERVAL (1)
28#define CARMINE_DCTL_INIT_WAIT_LIMIT (5000)
29#define CARMINE_WB_REG_WBM_DEFAULT (0x0001c020)
30#define CARMINE_DISP_REG_L0RM (0x1880)
31#define CARMINE_DISP_REG_L0PX (0x1884)
32#define CARMINE_DISP_REG_L0PY (0x1888)
33#define CARMINE_DISP_REG_L2RM (0x18A0)
34#define CARMINE_DISP_REG_L2PX (0x18A4)
35#define CARMINE_DISP_REG_L2PY (0x18A8)
36#define CARMINE_DISP_REG_L3RM (0x18B0)
37#define CARMINE_DISP_REG_L3PX (0x18B4)
38#define CARMINE_DISP_REG_L3PY (0x18B8)
39#define CARMINE_DISP_REG_L4RM (0x18C0)
40#define CARMINE_DISP_REG_L4PX (0x18C4)
41#define CARMINE_DISP_REG_L4PY (0x18C8)
42#define CARMINE_DISP_REG_L5RM (0x18D0)
43#define CARMINE_DISP_REG_L5PX (0x18D4)
44#define CARMINE_DISP_REG_L5PY (0x18D8)
45#define CARMINE_DISP_REG_L6RM (0x1924)
46#define CARMINE_DISP_REG_L6PX (0x1928)
47#define CARMINE_DISP_REG_L6PY (0x192C)
48#define CARMINE_DISP_REG_L7RM (0x1964)
49#define CARMINE_DISP_REG_L7PX (0x1968)
50#define CARMINE_DISP_REG_L7PY (0x196C)
51#define CARMINE_WB_REG_WBM (0x0004)
52#define CARMINE_DISP_HTP_SHIFT (16)
53#define CARMINE_DISP_HDB_SHIFT (16)
54#define CARMINE_DISP_HSW_SHIFT (16)
55#define CARMINE_DISP_VSW_SHIFT (24)
56#define CARMINE_DISP_VTR_SHIFT (16)
57#define CARMINE_DISP_VDP_SHIFT (16)
58#define CARMINE_CURSOR_CUTZ_MASK (0x00000100)
59#define CARMINE_CURSOR0_PRIORITY_MASK (0x00010000)
60#define CARMINE_CURSOR1_PRIORITY_MASK (0x00020000)
61#define CARMINE_DISP_WIDTH_SHIFT (16)
62#define CARMINE_DISP_WIN_H_SHIFT (16)
63#define CARMINE_DISP_REG_H_TOTAL (0x0004)
64#define CARMINE_DISP_REG_H_PERIOD (0x0008)
65#define CARMINE_DISP_REG_V_H_W_H_POS (0x000C)
66#define CARMINE_DISP_REG_V_TOTAL (0x0010)
67#define CARMINE_DISP_REG_V_PERIOD_POS (0x0014)
68#define CARMINE_DISP_REG_L0_MODE_W_H (0x0020)
69#define CARMINE_DISP_REG_L0_ORG_ADR (0x0024)
70#define CARMINE_DISP_REG_L0_DISP_ADR (0x0028)
71#define CARMINE_DISP_REG_L0_DISP_POS (0x002C)
72#define CARMINE_DISP_REG_L1_WIDTH (0x0030)
73#define CARMINE_DISP_REG_L1_ORG_ADR (0x0034)
74#define CARMINE_DISP_REG_L2_MODE_W_H (0x0040)
75#define CARMINE_DISP_REG_L2_ORG_ADR1 (0x0044)
76#define CARMINE_DISP_REG_L2_DISP_ADR1 (0x0048)
77#define CARMINE_DISP_REG_L2_DISP_POS (0x0054)
78#define CARMINE_DISP_REG_L3_MODE_W_H (0x0058)
79#define CARMINE_DISP_REG_L3_ORG_ADR1 (0x005C)
80#define CARMINE_DISP_REG_L3_DISP_ADR1 (0x0060)
81#define CARMINE_DISP_REG_L3_DISP_POS (0x006C)
82#define CARMINE_DISP_REG_L4_MODE_W_H (0x0070)
83#define CARMINE_DISP_REG_L4_ORG_ADR1 (0x0074)
84#define CARMINE_DISP_REG_L4_DISP_ADR1 (0x0078)
85#define CARMINE_DISP_REG_L4_DISP_POS (0x0084)
86#define CARMINE_DISP_REG_L5_MODE_W_H (0x0088)
87#define CARMINE_DISP_REG_L5_ORG_ADR1 (0x008C)
88#define CARMINE_DISP_REG_L5_DISP_ADR1 (0x0090)
89#define CARMINE_DISP_REG_L5_DISP_POS (0x009C)
90#define CARMINE_DISP_REG_CURSOR_MODE (0x00A0)
91#define CARMINE_DISP_REG_CUR1_POS (0x00A8)
92#define CARMINE_DISP_REG_CUR2_POS (0x00B0)
93#define CARMINE_DISP_REG_C_TRANS (0x00BC)
94#define CARMINE_DISP_REG_MLMR_TRANS (0x00C0)
95#define CARMINE_DISP_REG_L0_EXT_MODE (0x0110)
96#define CARMINE_DISP_REG_L0_WIN_POS (0x0114)
97#define CARMINE_DISP_REG_L0_WIN_SIZE (0x0118)
98#define CARMINE_DISP_REG_L1_EXT_MODE (0x0120)
99#define CARMINE_DISP_REG_L1_WIN_POS (0x0124)
100#define CARMINE_DISP_REG_L1_WIN_SIZE (0x0128)
101#define CARMINE_DISP_REG_L2_EXT_MODE (0x0130)
102#define CARMINE_DISP_REG_L2_WIN_POS (0x0134)
103#define CARMINE_DISP_REG_L2_WIN_SIZE (0x0138)
104#define CARMINE_DISP_REG_L3_EXT_MODE (0x0140)
105#define CARMINE_DISP_REG_L3_WIN_POS (0x0144)
106#define CARMINE_DISP_REG_L3_WIN_SIZE (0x0148)
107#define CARMINE_DISP_REG_L4_EXT_MODE (0x0150)
108#define CARMINE_DISP_REG_L4_WIN_POS (0x0154)
109#define CARMINE_DISP_REG_L4_WIN_SIZE (0x0158)
110#define CARMINE_DISP_REG_L5_EXT_MODE (0x0160)
111#define CARMINE_DISP_REG_L5_WIN_POS (0x0164)
112#define CARMINE_DISP_REG_L5_WIN_SIZE (0x0168)
113#define CARMINE_DISP_REG_L6_EXT_MODE (0x1918)
114#define CARMINE_DISP_REG_L6_WIN_POS (0x191c)
115#define CARMINE_DISP_REG_L6_WIN_SIZE (0x1920)
116#define CARMINE_DISP_REG_L7_EXT_MODE (0x1958)
117#define CARMINE_DISP_REG_L7_WIN_POS (0x195c)
118#define CARMINE_DISP_REG_L7_WIN_SIZE (0x1960)
119#define CARMINE_DISP_REG_BLEND_MODE_L0 (0x00B4)
120#define CARMINE_DISP_REG_BLEND_MODE_L1 (0x0188)
121#define CARMINE_DISP_REG_BLEND_MODE_L2 (0x018C)
122#define CARMINE_DISP_REG_BLEND_MODE_L3 (0x0190)
123#define CARMINE_DISP_REG_BLEND_MODE_L4 (0x0194)
124#define CARMINE_DISP_REG_BLEND_MODE_L5 (0x0198)
125#define CARMINE_DISP_REG_BLEND_MODE_L6 (0x1990)
126#define CARMINE_DISP_REG_BLEND_MODE_L7 (0x1994)
127#define CARMINE_DISP_REG_L0_TRANS (0x01A0)
128#define CARMINE_DISP_REG_L1_TRANS (0x01A4)
129#define CARMINE_DISP_REG_L2_TRANS (0x01A8)
130#define CARMINE_DISP_REG_L3_TRANS (0x01AC)
131#define CARMINE_DISP_REG_L4_TRANS (0x01B0)
132#define CARMINE_DISP_REG_L5_TRANS (0x01B4)
133#define CARMINE_DISP_REG_L6_TRANS (0x1998)
134#define CARMINE_DISP_REG_L7_TRANS (0x199c)
135#define CARMINE_EXTEND_MODE_MASK (0x00000003)
136#define CARMINE_DISP_DCM_MASK (0x0000FFFF)
137#define CARMINE_DISP_REG_DCM1 (0x0100)
138#define CARMINE_DISP_WIDTH_UNIT (64)
139#define CARMINE_DISP_REG_L6_MODE_W_H (0x1900)
140#define CARMINE_DISP_REG_L6_ORG_ADR1 (0x1904)
141#define CARMINE_DISP_REG_L6_DISP_ADR0 (0x1908)
142#define CARMINE_DISP_REG_L6_DISP_POS (0x1914)
143#define CARMINE_DISP_REG_L7_MODE_W_H (0x1940)
144#define CARMINE_DISP_REG_L7_ORG_ADR1 (0x1944)
145#define CARMINE_DISP_REG_L7_DISP_ADR0 (0x1948)
146#define CARMINE_DISP_REG_L7_DISP_POS (0x1954)
147#define CARMINE_CTL_REG_CLOCK_ENABLE (0x000C)
148#define CARMINE_CTL_REG_SOFTWARE_RESET (0x0010)
149#define CARMINE_CTL_REG_IST_MASK_ALL (0x07FFFFFF)
150#define CARMINE_GRAPH_REG_VRINTM (0x00028064)
151#define CARMINE_GRAPH_REG_VRERRM (0x0002806C)
152#define CARMINE_GRAPH_REG_DC_OFFSET_PX (0x0004005C)
153#define CARMINE_GRAPH_REG_DC_OFFSET_PY (0x00040060)
154#define CARMINE_GRAPH_REG_DC_OFFSET_LX (0x00040064)
155#define CARMINE_GRAPH_REG_DC_OFFSET_LY (0x00040068)
156#define CARMINE_GRAPH_REG_DC_OFFSET_TX (0x0004006C)
157#define CARMINE_GRAPH_REG_DC_OFFSET_TY (0x00040070)
158
159#endif
diff --git a/drivers/video/cobalt_lcdfb.c b/drivers/video/cobalt_lcdfb.c
new file mode 100644
index 000000000000..7bad24ed04ef
--- /dev/null
+++ b/drivers/video/cobalt_lcdfb.c
@@ -0,0 +1,371 @@
1/*
2 * Cobalt server LCD frame buffer driver.
3 *
4 * Copyright (C) 2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20#include <linux/delay.h>
21#include <linux/fb.h>
22#include <linux/init.h>
23#include <linux/io.h>
24#include <linux/ioport.h>
25#include <linux/uaccess.h>
26#include <linux/platform_device.h>
27
28/*
29 * Cursor position address
30 * \X 0 1 2 ... 14 15
31 * Y+----+----+----+---+----+----+
32 * 0|0x00|0x01|0x02|...|0x0e|0x0f|
33 * +----+----+----+---+----+----+
34 * 1|0x40|0x41|0x42|...|0x4e|0x4f|
35 * +----+----+----+---+----+----+
36 */
37#define LCD_DATA_REG_OFFSET 0x10
38#define LCD_XRES_MAX 16
39#define LCD_YRES_MAX 2
40#define LCD_CHARS_MAX 32
41
42#define LCD_CLEAR 0x01
43#define LCD_CURSOR_MOVE_HOME 0x02
44#define LCD_RESET 0x06
45#define LCD_OFF 0x08
46#define LCD_CURSOR_OFF 0x0c
47#define LCD_CURSOR_BLINK_OFF 0x0e
48#define LCD_CURSOR_ON 0x0f
49#define LCD_ON LCD_CURSOR_ON
50#define LCD_CURSOR_MOVE_LEFT 0x10
51#define LCD_CURSOR_MOVE_RIGHT 0x14
52#define LCD_DISPLAY_LEFT 0x18
53#define LCD_DISPLAY_RIGHT 0x1c
54#define LCD_PRERESET 0x3f /* execute 4 times continuously */
55#define LCD_BUSY 0x80
56
57#define LCD_GRAPHIC_MODE 0x40
58#define LCD_TEXT_MODE 0x80
59#define LCD_CUR_POS_MASK 0x7f
60
61#define LCD_CUR_POS(x) ((x) & LCD_CUR_POS_MASK)
62#define LCD_TEXT_POS(x) ((x) | LCD_TEXT_MODE)
63
64static inline void lcd_write_control(struct fb_info *info, u8 control)
65{
66 writel((u32)control << 24, info->screen_base);
67}
68
69static inline u8 lcd_read_control(struct fb_info *info)
70{
71 return readl(info->screen_base) >> 24;
72}
73
74static inline void lcd_write_data(struct fb_info *info, u8 data)
75{
76 writel((u32)data << 24, info->screen_base + LCD_DATA_REG_OFFSET);
77}
78
79static inline u8 lcd_read_data(struct fb_info *info)
80{
81 return readl(info->screen_base + LCD_DATA_REG_OFFSET) >> 24;
82}
83
84static int lcd_busy_wait(struct fb_info *info)
85{
86 u8 val = 0;
87 int timeout = 10, retval = 0;
88
89 do {
90 val = lcd_read_control(info);
91 val &= LCD_BUSY;
92 if (val != LCD_BUSY)
93 break;
94
95 if (msleep_interruptible(1))
96 return -EINTR;
97
98 timeout--;
99 } while (timeout);
100
101 if (val == LCD_BUSY)
102 retval = -EBUSY;
103
104 return retval;
105}
106
107static void lcd_clear(struct fb_info *info)
108{
109 int i;
110
111 for (i = 0; i < 4; i++) {
112 udelay(150);
113
114 lcd_write_control(info, LCD_PRERESET);
115 }
116
117 udelay(150);
118
119 lcd_write_control(info, LCD_CLEAR);
120
121 udelay(150);
122
123 lcd_write_control(info, LCD_RESET);
124}
125
126static struct fb_fix_screeninfo cobalt_lcdfb_fix __initdata = {
127 .id = "cobalt-lcd",
128 .type = FB_TYPE_TEXT,
129 .type_aux = FB_AUX_TEXT_MDA,
130 .visual = FB_VISUAL_MONO01,
131 .line_length = LCD_XRES_MAX,
132 .accel = FB_ACCEL_NONE,
133};
134
135static ssize_t cobalt_lcdfb_read(struct fb_info *info, char __user *buf,
136 size_t count, loff_t *ppos)
137{
138 char src[LCD_CHARS_MAX];
139 unsigned long pos;
140 int len, retval = 0;
141
142 pos = *ppos;
143 if (pos >= LCD_CHARS_MAX || count == 0)
144 return 0;
145
146 if (count > LCD_CHARS_MAX)
147 count = LCD_CHARS_MAX;
148
149 if (pos + count > LCD_CHARS_MAX)
150 count = LCD_CHARS_MAX - pos;
151
152 for (len = 0; len < count; len++) {
153 retval = lcd_busy_wait(info);
154 if (retval < 0)
155 break;
156
157 lcd_write_control(info, LCD_TEXT_POS(pos));
158
159 retval = lcd_busy_wait(info);
160 if (retval < 0)
161 break;
162
163 src[len] = lcd_read_data(info);
164 if (pos == 0x0f)
165 pos = 0x40;
166 else
167 pos++;
168 }
169
170 if (retval < 0 && signal_pending(current))
171 return -ERESTARTSYS;
172
173 if (copy_to_user(buf, src, len))
174 return -EFAULT;
175
176 *ppos += len;
177
178 return len;
179}
180
181static ssize_t cobalt_lcdfb_write(struct fb_info *info, const char __user *buf,
182 size_t count, loff_t *ppos)
183{
184 char dst[LCD_CHARS_MAX];
185 unsigned long pos;
186 int len, retval = 0;
187
188 pos = *ppos;
189 if (pos >= LCD_CHARS_MAX || count == 0)
190 return 0;
191
192 if (count > LCD_CHARS_MAX)
193 count = LCD_CHARS_MAX;
194
195 if (pos + count > LCD_CHARS_MAX)
196 count = LCD_CHARS_MAX - pos;
197
198 if (copy_from_user(dst, buf, count))
199 return -EFAULT;
200
201 for (len = 0; len < count; len++) {
202 retval = lcd_busy_wait(info);
203 if (retval < 0)
204 break;
205
206 lcd_write_control(info, LCD_TEXT_POS(pos));
207
208 retval = lcd_busy_wait(info);
209 if (retval < 0)
210 break;
211
212 lcd_write_data(info, dst[len]);
213 if (pos == 0x0f)
214 pos = 0x40;
215 else
216 pos++;
217 }
218
219 if (retval < 0 && signal_pending(current))
220 return -ERESTARTSYS;
221
222 *ppos += len;
223
224 return len;
225}
226
227static int cobalt_lcdfb_blank(int blank_mode, struct fb_info *info)
228{
229 int retval;
230
231 retval = lcd_busy_wait(info);
232 if (retval < 0)
233 return retval;
234
235 switch (blank_mode) {
236 case FB_BLANK_UNBLANK:
237 lcd_write_control(info, LCD_ON);
238 break;
239 default:
240 lcd_write_control(info, LCD_OFF);
241 break;
242 }
243
244 return 0;
245}
246
247static int cobalt_lcdfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
248{
249 u32 x, y;
250 int retval;
251
252 switch (cursor->set) {
253 case FB_CUR_SETPOS:
254 x = cursor->image.dx;
255 y = cursor->image.dy;
256 if (x >= LCD_XRES_MAX || y >= LCD_YRES_MAX)
257 return -EINVAL;
258
259 retval = lcd_busy_wait(info);
260 if (retval < 0)
261 return retval;
262
263 lcd_write_control(info,
264 LCD_TEXT_POS(info->fix.line_length * y + x));
265 break;
266 default:
267 return -EINVAL;
268 }
269
270 retval = lcd_busy_wait(info);
271 if (retval < 0)
272 return retval;
273
274 if (cursor->enable)
275 lcd_write_control(info, LCD_CURSOR_ON);
276 else
277 lcd_write_control(info, LCD_CURSOR_OFF);
278
279 return 0;
280}
281
282static struct fb_ops cobalt_lcd_fbops = {
283 .owner = THIS_MODULE,
284 .fb_read = cobalt_lcdfb_read,
285 .fb_write = cobalt_lcdfb_write,
286 .fb_blank = cobalt_lcdfb_blank,
287 .fb_cursor = cobalt_lcdfb_cursor,
288};
289
290static int __init cobalt_lcdfb_probe(struct platform_device *dev)
291{
292 struct fb_info *info;
293 struct resource *res;
294 int retval;
295
296 info = framebuffer_alloc(0, &dev->dev);
297 if (!info)
298 return -ENOMEM;
299
300 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
301 if (!res) {
302 framebuffer_release(info);
303 return -EBUSY;
304 }
305
306 info->screen_size = res->end - res->start + 1;
307 info->screen_base = ioremap(res->start, info->screen_size);
308 info->fbops = &cobalt_lcd_fbops;
309 info->fix = cobalt_lcdfb_fix;
310 info->fix.smem_start = res->start;
311 info->fix.smem_len = info->screen_size;
312 info->pseudo_palette = NULL;
313 info->par = NULL;
314 info->flags = FBINFO_DEFAULT;
315
316 retval = register_framebuffer(info);
317 if (retval < 0) {
318 iounmap(info->screen_base);
319 framebuffer_release(info);
320 return retval;
321 }
322
323 platform_set_drvdata(dev, info);
324
325 lcd_clear(info);
326
327 printk(KERN_INFO "fb%d: Cobalt server LCD frame buffer device\n",
328 info->node);
329
330 return 0;
331}
332
333static int __devexit cobalt_lcdfb_remove(struct platform_device *dev)
334{
335 struct fb_info *info;
336
337 info = platform_get_drvdata(dev);
338 if (info) {
339 iounmap(info->screen_base);
340 unregister_framebuffer(info);
341 framebuffer_release(info);
342 }
343
344 return 0;
345}
346
347static struct platform_driver cobalt_lcdfb_driver = {
348 .probe = cobalt_lcdfb_probe,
349 .remove = __devexit_p(cobalt_lcdfb_remove),
350 .driver = {
351 .name = "cobalt-lcd",
352 .owner = THIS_MODULE,
353 },
354};
355
356static int __init cobalt_lcdfb_init(void)
357{
358 return platform_driver_register(&cobalt_lcdfb_driver);
359}
360
361static void __exit cobalt_lcdfb_exit(void)
362{
363 platform_driver_unregister(&cobalt_lcdfb_driver);
364}
365
366module_init(cobalt_lcdfb_init);
367module_exit(cobalt_lcdfb_exit);
368
369MODULE_LICENSE("GPL v2");
370MODULE_AUTHOR("Yoichi Yuasa");
371MODULE_DESCRIPTION("Cobalt server LCD frame buffer driver");
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 4be3b46c069b..3ccfa76d9b2a 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -107,9 +107,7 @@ static struct display fb_display[MAX_NR_CONSOLES];
107 107
108static signed char con2fb_map[MAX_NR_CONSOLES]; 108static signed char con2fb_map[MAX_NR_CONSOLES];
109static signed char con2fb_map_boot[MAX_NR_CONSOLES]; 109static signed char con2fb_map_boot[MAX_NR_CONSOLES];
110#ifndef MODULE 110
111static int logo_height;
112#endif
113static int logo_lines; 111static int logo_lines;
114/* logo_shown is an index to vc_cons when >= 0; otherwise follows FBCON_LOGO 112/* logo_shown is an index to vc_cons when >= 0; otherwise follows FBCON_LOGO
115 enums. */ 113 enums. */
@@ -607,6 +605,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
607 struct fbcon_ops *ops = info->fbcon_par; 605 struct fbcon_ops *ops = info->fbcon_par;
608 int cnt, erase = vc->vc_video_erase_char, step; 606 int cnt, erase = vc->vc_video_erase_char, step;
609 unsigned short *save = NULL, *r, *q; 607 unsigned short *save = NULL, *r, *q;
608 int logo_height;
610 609
611 if (info->flags & FBINFO_MODULE) { 610 if (info->flags & FBINFO_MODULE) {
612 logo_shown = FBCON_LOGO_DONTSHOW; 611 logo_shown = FBCON_LOGO_DONTSHOW;
diff --git a/drivers/video/console/fbcon.h b/drivers/video/console/fbcon.h
index 0135e0395456..de1b1365279b 100644
--- a/drivers/video/console/fbcon.h
+++ b/drivers/video/console/fbcon.h
@@ -92,7 +92,7 @@ struct fbcon_ops {
92#define attr_fgcol(fgshift,s) \ 92#define attr_fgcol(fgshift,s) \
93 (((s) >> (fgshift)) & 0x0f) 93 (((s) >> (fgshift)) & 0x0f)
94#define attr_bgcol(bgshift,s) \ 94#define attr_bgcol(bgshift,s) \
95 (((s) >> (bgshift)) & 0x0f) 95 (((s) >> (bgshift)) & 0x07)
96 96
97/* Monochrome */ 97/* Monochrome */
98#define attr_bold(s) \ 98#define attr_bold(s) \
@@ -146,10 +146,8 @@ static inline int attr_col_ec(int shift, struct vc_data *vc,
146 return is_fg ? fg : bg; 146 return is_fg ? fg : bg;
147} 147}
148 148
149#define attr_bgcol_ec(bgshift,vc,info) \ 149#define attr_bgcol_ec(bgshift, vc, info) attr_col_ec(bgshift, vc, info, 0)
150 attr_col_ec(bgshift,vc,info,0); 150#define attr_fgcol_ec(fgshift, vc, info) attr_col_ec(fgshift, vc, info, 1)
151#define attr_fgcol_ec(fgshift,vc,info) \
152 attr_col_ec(fgshift,vc,info,1);
153 151
154/* Font */ 152/* Font */
155#define REFCOUNT(fd) (((int *)(fd))[-1]) 153#define REFCOUNT(fd) (((int *)(fd))[-1])
diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c
index 38a296bbdfc9..9901064199bd 100644
--- a/drivers/video/console/mdacon.c
+++ b/drivers/video/console/mdacon.c
@@ -71,13 +71,15 @@ static char *mda_type_name;
71 71
72/* console information */ 72/* console information */
73 73
74static int mda_first_vc = 1; 74static int mda_first_vc = 13;
75static int mda_last_vc = 16; 75static int mda_last_vc = 16;
76 76
77static struct vc_data *mda_display_fg = NULL; 77static struct vc_data *mda_display_fg = NULL;
78 78
79module_param(mda_first_vc, int, 0); 79module_param(mda_first_vc, int, 0);
80MODULE_PARM_DESC(mda_first_vc, "First virtual console. Default: 13");
80module_param(mda_last_vc, int, 0); 81module_param(mda_last_vc, int, 0);
82MODULE_PARM_DESC(mda_last_vc, "Last virtual console. Default: 16");
81 83
82/* MDA register values 84/* MDA register values
83 */ 85 */
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 1cd5071e5362..5d84b3431098 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -35,6 +35,7 @@
35#include <linux/device.h> 35#include <linux/device.h>
36#include <linux/efi.h> 36#include <linux/efi.h>
37#include <linux/fb.h> 37#include <linux/fb.h>
38#include <linux/major.h>
38 39
39#include <asm/fb.h> 40#include <asm/fb.h>
40 41
@@ -848,9 +849,8 @@ int
848fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var) 849fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var)
849{ 850{
850 struct fb_fix_screeninfo *fix = &info->fix; 851 struct fb_fix_screeninfo *fix = &info->fix;
851 int xoffset = var->xoffset; 852 unsigned int yres = info->var.yres;
852 int yoffset = var->yoffset; 853 int err = 0;
853 int err = 0, yres = info->var.yres;
854 854
855 if (var->yoffset > 0) { 855 if (var->yoffset > 0) {
856 if (var->vmode & FB_VMODE_YWRAP) { 856 if (var->vmode & FB_VMODE_YWRAP) {
@@ -866,8 +866,8 @@ fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var)
866 (var->xoffset % fix->xpanstep))) 866 (var->xoffset % fix->xpanstep)))
867 err = -EINVAL; 867 err = -EINVAL;
868 868
869 if (err || !info->fbops->fb_pan_display || xoffset < 0 || 869 if (err || !info->fbops->fb_pan_display ||
870 yoffset < 0 || var->yoffset + yres > info->var.yres_virtual || 870 var->yoffset + yres > info->var.yres_virtual ||
871 var->xoffset + info->var.xres > info->var.xres_virtual) 871 var->xoffset + info->var.xres > info->var.xres_virtual)
872 return -EINVAL; 872 return -EINVAL;
873 873
diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c
index 052e18058498..6a0aa180c266 100644
--- a/drivers/video/fbmon.c
+++ b/drivers/video/fbmon.c
@@ -879,7 +879,7 @@ int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var)
879 if (edid_is_timing_block(block)) { 879 if (edid_is_timing_block(block)) {
880 var->xres = var->xres_virtual = H_ACTIVE; 880 var->xres = var->xres_virtual = H_ACTIVE;
881 var->yres = var->yres_virtual = V_ACTIVE; 881 var->yres = var->yres_virtual = V_ACTIVE;
882 var->height = var->width = -1; 882 var->height = var->width = 0;
883 var->right_margin = H_SYNC_OFFSET; 883 var->right_margin = H_SYNC_OFFSET;
884 var->left_margin = (H_ACTIVE + H_BLANKING) - 884 var->left_margin = (H_ACTIVE + H_BLANKING) -
885 (H_ACTIVE + H_SYNC_OFFSET + H_SYNC_WIDTH); 885 (H_ACTIVE + H_SYNC_OFFSET + H_SYNC_WIDTH);
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index 09d7e22c6fef..9cd36c223d33 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -279,58 +279,42 @@ static struct diu_hw dr = {
279 279
280static struct diu_pool pool; 280static struct diu_pool pool;
281 281
282/* To allocate memory for framebuffer. First try __get_free_pages(). If it 282/**
283 * fails, try rh_alloc. The reason is __get_free_pages() cannot allocate 283 * fsl_diu_alloc - allocate memory for the DIU
284 * very large memory (more than 4MB). We don't want to allocate all memory 284 * @size: number of bytes to allocate
285 * in rheap since small memory allocation/deallocation will fragment the 285 * @param: returned physical address of memory
286 * rheap and make the furture large allocation fail. 286 *
287 * This function allocates a physically-contiguous block of memory.
287 */ 288 */
288 289static void *fsl_diu_alloc(size_t size, phys_addr_t *phys)
289static void *fsl_diu_alloc(unsigned long size, phys_addr_t *phys)
290{ 290{
291 void *virt; 291 void *virt;
292 292
293 pr_debug("size=%lu\n", size); 293 pr_debug("size=%zu\n", size);
294 294
295 virt = (void *)__get_free_pages(GFP_DMA | __GFP_ZERO, get_order(size)); 295 virt = alloc_pages_exact(size, GFP_DMA | __GFP_ZERO);
296 if (virt) { 296 if (virt) {
297 *phys = virt_to_phys(virt); 297 *phys = virt_to_phys(virt);
298 pr_debug("virt %p, phys=%llx\n", virt, (uint64_t) *phys); 298 pr_debug("virt=%p phys=%llx\n", virt,
299 return virt; 299 (unsigned long long)*phys);
300 }
301 if (!diu_ops.diu_mem) {
302 printk(KERN_INFO "%s: no diu_mem."
303 " To reserve more memory, put 'diufb=15M' "
304 "in the command line\n", __func__);
305 return NULL;
306 }
307
308 virt = (void *)rh_alloc(&diu_ops.diu_rh_info, size, "DIU");
309 if (virt) {
310 *phys = virt_to_bus(virt);
311 memset(virt, 0, size);
312 } 300 }
313 301
314 pr_debug("rh virt=%p phys=%llx\n", virt, (unsigned long long)*phys);
315
316 return virt; 302 return virt;
317} 303}
318 304
319static void fsl_diu_free(void *p, unsigned long size) 305/**
306 * fsl_diu_free - release DIU memory
307 * @virt: pointer returned by fsl_diu_alloc()
308 * @size: number of bytes allocated by fsl_diu_alloc()
309 *
310 * This function releases memory allocated by fsl_diu_alloc().
311 */
312static void fsl_diu_free(void *virt, size_t size)
320{ 313{
321 pr_debug("p=%p size=%lu\n", p, size); 314 pr_debug("virt=%p size=%zu\n", virt, size);
322 315
323 if (!p) 316 if (virt && size)
324 return; 317 free_pages_exact(virt, size);
325
326 if ((p >= diu_ops.diu_mem) &&
327 (p < (diu_ops.diu_mem + diu_ops.diu_size))) {
328 pr_debug("rh\n");
329 rh_free(&diu_ops.diu_rh_info, (unsigned long) p);
330 } else {
331 pr_debug("dma\n");
332 free_pages((unsigned long)p, get_order(size));
333 }
334} 318}
335 319
336static int fsl_diu_enable_panel(struct fb_info *info) 320static int fsl_diu_enable_panel(struct fb_info *info)
diff --git a/drivers/video/geode/lxfb.h b/drivers/video/geode/lxfb.h
index 3b9416f4ee20..6a51448fd3f7 100644
--- a/drivers/video/geode/lxfb.h
+++ b/drivers/video/geode/lxfb.h
@@ -51,8 +51,6 @@ static inline unsigned int lx_get_pitch(unsigned int xres, int bpp)
51} 51}
52 52
53void lx_set_mode(struct fb_info *); 53void lx_set_mode(struct fb_info *);
54void lx_get_gamma(struct fb_info *, unsigned int *, int);
55void lx_set_gamma(struct fb_info *, unsigned int *, int);
56unsigned int lx_framebuffer_size(void); 54unsigned int lx_framebuffer_size(void);
57int lx_blank_display(struct fb_info *, int); 55int lx_blank_display(struct fb_info *, int);
58void lx_set_palette_reg(struct fb_info *, unsigned int, unsigned int, 56void lx_set_palette_reg(struct fb_info *, unsigned int, unsigned int,
diff --git a/drivers/video/geode/lxfb_ops.c b/drivers/video/geode/lxfb_ops.c
index aaef9165ec9b..b1cd49c99356 100644
--- a/drivers/video/geode/lxfb_ops.c
+++ b/drivers/video/geode/lxfb_ops.c
@@ -517,25 +517,25 @@ void lx_set_palette_reg(struct fb_info *info, unsigned regno,
517int lx_blank_display(struct fb_info *info, int blank_mode) 517int lx_blank_display(struct fb_info *info, int blank_mode)
518{ 518{
519 struct lxfb_par *par = info->par; 519 struct lxfb_par *par = info->par;
520 u32 dcfg, fp_pm; 520 u32 dcfg, misc, fp_pm;
521 int blank, hsync, vsync, crt; 521 int blank, hsync, vsync;
522 522
523 /* CRT power saving modes. */ 523 /* CRT power saving modes. */
524 switch (blank_mode) { 524 switch (blank_mode) {
525 case FB_BLANK_UNBLANK: 525 case FB_BLANK_UNBLANK:
526 blank = 0; hsync = 1; vsync = 1; crt = 1; 526 blank = 0; hsync = 1; vsync = 1;
527 break; 527 break;
528 case FB_BLANK_NORMAL: 528 case FB_BLANK_NORMAL:
529 blank = 1; hsync = 1; vsync = 1; crt = 1; 529 blank = 1; hsync = 1; vsync = 1;
530 break; 530 break;
531 case FB_BLANK_VSYNC_SUSPEND: 531 case FB_BLANK_VSYNC_SUSPEND:
532 blank = 1; hsync = 1; vsync = 0; crt = 1; 532 blank = 1; hsync = 1; vsync = 0;
533 break; 533 break;
534 case FB_BLANK_HSYNC_SUSPEND: 534 case FB_BLANK_HSYNC_SUSPEND:
535 blank = 1; hsync = 0; vsync = 1; crt = 1; 535 blank = 1; hsync = 0; vsync = 1;
536 break; 536 break;
537 case FB_BLANK_POWERDOWN: 537 case FB_BLANK_POWERDOWN:
538 blank = 1; hsync = 0; vsync = 0; crt = 0; 538 blank = 1; hsync = 0; vsync = 0;
539 break; 539 break;
540 default: 540 default:
541 return -EINVAL; 541 return -EINVAL;
@@ -545,15 +545,23 @@ int lx_blank_display(struct fb_info *info, int blank_mode)
545 dcfg &= ~(VP_DCFG_DAC_BL_EN | VP_DCFG_HSYNC_EN | VP_DCFG_VSYNC_EN | 545 dcfg &= ~(VP_DCFG_DAC_BL_EN | VP_DCFG_HSYNC_EN | VP_DCFG_VSYNC_EN |
546 VP_DCFG_CRT_EN); 546 VP_DCFG_CRT_EN);
547 if (!blank) 547 if (!blank)
548 dcfg |= VP_DCFG_DAC_BL_EN; 548 dcfg |= VP_DCFG_DAC_BL_EN | VP_DCFG_CRT_EN;
549 if (hsync) 549 if (hsync)
550 dcfg |= VP_DCFG_HSYNC_EN; 550 dcfg |= VP_DCFG_HSYNC_EN;
551 if (vsync) 551 if (vsync)
552 dcfg |= VP_DCFG_VSYNC_EN; 552 dcfg |= VP_DCFG_VSYNC_EN;
553 if (crt) 553
554 dcfg |= VP_DCFG_CRT_EN;
555 write_vp(par, VP_DCFG, dcfg); 554 write_vp(par, VP_DCFG, dcfg);
556 555
556 misc = read_vp(par, VP_MISC);
557
558 if (vsync && hsync)
559 misc &= ~VP_MISC_DACPWRDN;
560 else
561 misc |= VP_MISC_DACPWRDN;
562
563 write_vp(par, VP_MISC, misc);
564
557 /* Power on/off flat panel */ 565 /* Power on/off flat panel */
558 566
559 if (par->output & OUTPUT_PANEL) { 567 if (par->output & OUTPUT_PANEL) {
diff --git a/drivers/video/hgafb.c b/drivers/video/hgafb.c
index c18880d9db1f..0129c044f6d6 100644
--- a/drivers/video/hgafb.c
+++ b/drivers/video/hgafb.c
@@ -551,7 +551,7 @@ static struct fb_ops hgafb_ops = {
551 * Initialization 551 * Initialization
552 */ 552 */
553 553
554static int __init hgafb_probe(struct device *device) 554static int __init hgafb_probe(struct platform_device *pdev)
555{ 555{
556 struct fb_info *info; 556 struct fb_info *info;
557 557
@@ -565,7 +565,7 @@ static int __init hgafb_probe(struct device *device)
565 printk(KERN_INFO "hgafb: %s with %ldK of memory detected.\n", 565 printk(KERN_INFO "hgafb: %s with %ldK of memory detected.\n",
566 hga_type_name, hga_vram_len/1024); 566 hga_type_name, hga_vram_len/1024);
567 567
568 info = framebuffer_alloc(0, NULL); 568 info = framebuffer_alloc(0, &pdev->dev);
569 if (!info) { 569 if (!info) {
570 iounmap(hga_vram); 570 iounmap(hga_vram);
571 return -ENOMEM; 571 return -ENOMEM;
@@ -593,13 +593,13 @@ static int __init hgafb_probe(struct device *device)
593 593
594 printk(KERN_INFO "fb%d: %s frame buffer device\n", 594 printk(KERN_INFO "fb%d: %s frame buffer device\n",
595 info->node, info->fix.id); 595 info->node, info->fix.id);
596 dev_set_drvdata(device, info); 596 platform_set_drvdata(pdev, info);
597 return 0; 597 return 0;
598} 598}
599 599
600static int hgafb_remove(struct device *device) 600static int hgafb_remove(struct platform_device *pdev)
601{ 601{
602 struct fb_info *info = dev_get_drvdata(device); 602 struct fb_info *info = platform_get_drvdata(pdev);
603 603
604 hga_txt_mode(); 604 hga_txt_mode();
605 hga_clear_screen(); 605 hga_clear_screen();
@@ -620,16 +620,15 @@ static int hgafb_remove(struct device *device)
620 return 0; 620 return 0;
621} 621}
622 622
623static struct device_driver hgafb_driver = { 623static struct platform_driver hgafb_driver = {
624 .name = "hgafb",
625 .bus = &platform_bus_type,
626 .probe = hgafb_probe, 624 .probe = hgafb_probe,
627 .remove = hgafb_remove, 625 .remove = hgafb_remove,
626 .driver = {
627 .name = "hgafb",
628 },
628}; 629};
629 630
630static struct platform_device hgafb_device = { 631static struct platform_device *hgafb_device;
631 .name = "hgafb",
632};
633 632
634static int __init hgafb_init(void) 633static int __init hgafb_init(void)
635{ 634{
@@ -638,12 +637,15 @@ static int __init hgafb_init(void)
638 if (fb_get_options("hgafb", NULL)) 637 if (fb_get_options("hgafb", NULL))
639 return -ENODEV; 638 return -ENODEV;
640 639
641 ret = driver_register(&hgafb_driver); 640 ret = platform_driver_register(&hgafb_driver);
642 641
643 if (!ret) { 642 if (!ret) {
644 ret = platform_device_register(&hgafb_device); 643 hgafb_device = platform_device_register_simple("hgafb", 0, NULL, 0);
645 if (ret) 644
646 driver_unregister(&hgafb_driver); 645 if (IS_ERR(hgafb_device)) {
646 platform_driver_unregister(&hgafb_driver);
647 ret = PTR_ERR(hgafb_device);
648 }
647 } 649 }
648 650
649 return ret; 651 return ret;
@@ -651,8 +653,8 @@ static int __init hgafb_init(void)
651 653
652static void __exit hgafb_exit(void) 654static void __exit hgafb_exit(void)
653{ 655{
654 platform_device_unregister(&hgafb_device); 656 platform_device_unregister(hgafb_device);
655 driver_unregister(&hgafb_driver); 657 platform_driver_unregister(&hgafb_driver);
656} 658}
657 659
658/* ------------------------------------------------------------------------- 660/* -------------------------------------------------------------------------
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index 94e4d3ac1a05..0c5a475c1cae 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -24,6 +24,7 @@
24#include <linux/string.h> 24#include <linux/string.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/mm.h>
27#include <linux/fb.h> 28#include <linux/fb.h>
28#include <linux/delay.h> 29#include <linux/delay.h>
29#include <linux/init.h> 30#include <linux/init.h>
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c
index 5246b0402d76..25172b2a2a94 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/neofb.c
@@ -201,7 +201,6 @@ static int neoFindMode(int xres, int yres, int depth)
201 * 201 *
202 * Determine the closest clock frequency to the one requested. 202 * Determine the closest clock frequency to the one requested.
203 */ 203 */
204#define REF_FREQ 0xe517 /* 14.31818 in 20.12 fixed point */
205#define MAX_N 127 204#define MAX_N 127
206#define MAX_D 31 205#define MAX_D 31
207#define MAX_F 1 206#define MAX_F 1
@@ -211,27 +210,24 @@ static void neoCalcVCLK(const struct fb_info *info,
211{ 210{
212 int n, d, f; 211 int n, d, f;
213 int n_best = 0, d_best = 0, f_best = 0; 212 int n_best = 0, d_best = 0, f_best = 0;
214 long f_best_diff = (0x7ffff << 12); /* 20.12 */ 213 long f_best_diff = 0x7ffff;
215 long f_target = (freq << 12) / 1000; /* 20.12 */
216 214
217 for (f = 0; f <= MAX_F; f++) 215 for (f = 0; f <= MAX_F; f++)
218 for (n = 0; n <= MAX_N; n++) 216 for (d = 0; d <= MAX_D; d++)
219 for (d = 0; d <= MAX_D; d++) { 217 for (n = 0; n <= MAX_N; n++) {
220 long f_out; /* 20.12 */ 218 long f_out;
221 long f_diff; /* 20.12 */ 219 long f_diff;
222 220
223 f_out = 221 f_out = ((14318 * (n + 1)) / (d + 1)) >> f;
224 ((((n + 1) << 12) / ((d + 222 f_diff = abs(f_out - freq);
225 1) * 223 if (f_diff <= f_best_diff) {
226 (1 << f))) >> 12)
227 * REF_FREQ;
228 f_diff = abs(f_out - f_target);
229 if (f_diff < f_best_diff) {
230 f_best_diff = f_diff; 224 f_best_diff = f_diff;
231 n_best = n; 225 n_best = n;
232 d_best = d; 226 d_best = d;
233 f_best = f; 227 f_best = f;
234 } 228 }
229 if (f_out > freq)
230 break;
235 } 231 }
236 232
237 if (info->fix.accel == FB_ACCEL_NEOMAGIC_NM2200 || 233 if (info->fix.accel == FB_ACCEL_NEOMAGIC_NM2200 ||
@@ -248,11 +244,11 @@ static void neoCalcVCLK(const struct fb_info *info,
248 par->VCLK3Denominator = d_best; 244 par->VCLK3Denominator = d_best;
249 245
250#ifdef NEOFB_DEBUG 246#ifdef NEOFB_DEBUG
251 printk("neoVCLK: f:%d NumLow=%d NumHi=%d Den=%d Df=%d\n", 247 printk(KERN_DEBUG "neoVCLK: f:%ld NumLow=%d NumHi=%d Den=%d Df=%ld\n",
252 f_target >> 12, 248 freq,
253 par->VCLK3NumeratorLow, 249 par->VCLK3NumeratorLow,
254 par->VCLK3NumeratorHigh, 250 par->VCLK3NumeratorHigh,
255 par->VCLK3Denominator, f_best_diff >> 12); 251 par->VCLK3Denominator, f_best_diff);
256#endif 252#endif
257} 253}
258 254
@@ -263,15 +259,20 @@ static void neoCalcVCLK(const struct fb_info *info,
263 */ 259 */
264 260
265static int vgaHWInit(const struct fb_var_screeninfo *var, 261static int vgaHWInit(const struct fb_var_screeninfo *var,
266 const struct fb_info *info, 262 struct neofb_par *par)
267 struct neofb_par *par, struct xtimings *timings)
268{ 263{
264 int hsync_end = var->xres + var->right_margin + var->hsync_len;
265 int htotal = (hsync_end + var->left_margin) >> 3;
266 int vsync_start = var->yres + var->lower_margin;
267 int vsync_end = vsync_start + var->vsync_len;
268 int vtotal = vsync_end + var->upper_margin;
269
269 par->MiscOutReg = 0x23; 270 par->MiscOutReg = 0x23;
270 271
271 if (!(timings->sync & FB_SYNC_HOR_HIGH_ACT)) 272 if (!(var->sync & FB_SYNC_HOR_HIGH_ACT))
272 par->MiscOutReg |= 0x40; 273 par->MiscOutReg |= 0x40;
273 274
274 if (!(timings->sync & FB_SYNC_VERT_HIGH_ACT)) 275 if (!(var->sync & FB_SYNC_VERT_HIGH_ACT))
275 par->MiscOutReg |= 0x80; 276 par->MiscOutReg |= 0x80;
276 277
277 /* 278 /*
@@ -286,25 +287,25 @@ static int vgaHWInit(const struct fb_var_screeninfo *var,
286 /* 287 /*
287 * CRTC Controller 288 * CRTC Controller
288 */ 289 */
289 par->CRTC[0] = (timings->HTotal >> 3) - 5; 290 par->CRTC[0] = htotal - 5;
290 par->CRTC[1] = (timings->HDisplay >> 3) - 1; 291 par->CRTC[1] = (var->xres >> 3) - 1;
291 par->CRTC[2] = (timings->HDisplay >> 3) - 1; 292 par->CRTC[2] = (var->xres >> 3) - 1;
292 par->CRTC[3] = (((timings->HTotal >> 3) - 1) & 0x1F) | 0x80; 293 par->CRTC[3] = ((htotal - 1) & 0x1F) | 0x80;
293 par->CRTC[4] = (timings->HSyncStart >> 3); 294 par->CRTC[4] = ((var->xres + var->right_margin) >> 3);
294 par->CRTC[5] = ((((timings->HTotal >> 3) - 1) & 0x20) << 2) 295 par->CRTC[5] = (((htotal - 1) & 0x20) << 2)
295 | (((timings->HSyncEnd >> 3)) & 0x1F); 296 | (((hsync_end >> 3)) & 0x1F);
296 par->CRTC[6] = (timings->VTotal - 2) & 0xFF; 297 par->CRTC[6] = (vtotal - 2) & 0xFF;
297 par->CRTC[7] = (((timings->VTotal - 2) & 0x100) >> 8) 298 par->CRTC[7] = (((vtotal - 2) & 0x100) >> 8)
298 | (((timings->VDisplay - 1) & 0x100) >> 7) 299 | (((var->yres - 1) & 0x100) >> 7)
299 | ((timings->VSyncStart & 0x100) >> 6) 300 | ((vsync_start & 0x100) >> 6)
300 | (((timings->VDisplay - 1) & 0x100) >> 5) 301 | (((var->yres - 1) & 0x100) >> 5)
301 | 0x10 | (((timings->VTotal - 2) & 0x200) >> 4) 302 | 0x10 | (((vtotal - 2) & 0x200) >> 4)
302 | (((timings->VDisplay - 1) & 0x200) >> 3) 303 | (((var->yres - 1) & 0x200) >> 3)
303 | ((timings->VSyncStart & 0x200) >> 2); 304 | ((vsync_start & 0x200) >> 2);
304 par->CRTC[8] = 0x00; 305 par->CRTC[8] = 0x00;
305 par->CRTC[9] = (((timings->VDisplay - 1) & 0x200) >> 4) | 0x40; 306 par->CRTC[9] = (((var->yres - 1) & 0x200) >> 4) | 0x40;
306 307
307 if (timings->dblscan) 308 if (var->vmode & FB_VMODE_DOUBLE)
308 par->CRTC[9] |= 0x80; 309 par->CRTC[9] |= 0x80;
309 310
310 par->CRTC[10] = 0x00; 311 par->CRTC[10] = 0x00;
@@ -313,13 +314,13 @@ static int vgaHWInit(const struct fb_var_screeninfo *var,
313 par->CRTC[13] = 0x00; 314 par->CRTC[13] = 0x00;
314 par->CRTC[14] = 0x00; 315 par->CRTC[14] = 0x00;
315 par->CRTC[15] = 0x00; 316 par->CRTC[15] = 0x00;
316 par->CRTC[16] = timings->VSyncStart & 0xFF; 317 par->CRTC[16] = vsync_start & 0xFF;
317 par->CRTC[17] = (timings->VSyncEnd & 0x0F) | 0x20; 318 par->CRTC[17] = (vsync_end & 0x0F) | 0x20;
318 par->CRTC[18] = (timings->VDisplay - 1) & 0xFF; 319 par->CRTC[18] = (var->yres - 1) & 0xFF;
319 par->CRTC[19] = var->xres_virtual >> 4; 320 par->CRTC[19] = var->xres_virtual >> 4;
320 par->CRTC[20] = 0x00; 321 par->CRTC[20] = 0x00;
321 par->CRTC[21] = (timings->VDisplay - 1) & 0xFF; 322 par->CRTC[21] = (var->yres - 1) & 0xFF;
322 par->CRTC[22] = (timings->VTotal - 1) & 0xFF; 323 par->CRTC[22] = (vtotal - 1) & 0xFF;
323 par->CRTC[23] = 0xC3; 324 par->CRTC[23] = 0xC3;
324 par->CRTC[24] = 0xFF; 325 par->CRTC[24] = 0xFF;
325 326
@@ -483,7 +484,8 @@ static inline int neo2200_sync(struct fb_info *info)
483{ 484{
484 struct neofb_par *par = info->par; 485 struct neofb_par *par = info->par;
485 486
486 while (readl(&par->neo2200->bltStat) & 1); 487 while (readl(&par->neo2200->bltStat) & 1)
488 cpu_relax();
487 return 0; 489 return 0;
488} 490}
489 491
@@ -591,34 +593,14 @@ static int
591neofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) 593neofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
592{ 594{
593 struct neofb_par *par = info->par; 595 struct neofb_par *par = info->par;
594 unsigned int pixclock = var->pixclock;
595 struct xtimings timings;
596 int memlen, vramlen; 596 int memlen, vramlen;
597 int mode_ok = 0; 597 int mode_ok = 0;
598 598
599 DBG("neofb_check_var"); 599 DBG("neofb_check_var");
600 600
601 if (!pixclock) 601 if (PICOS2KHZ(var->pixclock) > par->maxClock)
602 pixclock = 10000; /* 10ns = 100MHz */
603 timings.pixclock = 1000000000 / pixclock;
604 if (timings.pixclock < 1)
605 timings.pixclock = 1;
606
607 if (timings.pixclock > par->maxClock)
608 return -EINVAL; 602 return -EINVAL;
609 603
610 timings.dblscan = var->vmode & FB_VMODE_DOUBLE;
611 timings.interlaced = var->vmode & FB_VMODE_INTERLACED;
612 timings.HDisplay = var->xres;
613 timings.HSyncStart = timings.HDisplay + var->right_margin;
614 timings.HSyncEnd = timings.HSyncStart + var->hsync_len;
615 timings.HTotal = timings.HSyncEnd + var->left_margin;
616 timings.VDisplay = var->yres;
617 timings.VSyncStart = timings.VDisplay + var->lower_margin;
618 timings.VSyncEnd = timings.VSyncStart + var->vsync_len;
619 timings.VTotal = timings.VSyncEnd + var->upper_margin;
620 timings.sync = var->sync;
621
622 /* Is the mode larger than the LCD panel? */ 604 /* Is the mode larger than the LCD panel? */
623 if (par->internal_display && 605 if (par->internal_display &&
624 ((var->xres > par->NeoPanelWidth) || 606 ((var->xres > par->NeoPanelWidth) ||
@@ -759,11 +741,11 @@ neofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
759static int neofb_set_par(struct fb_info *info) 741static int neofb_set_par(struct fb_info *info)
760{ 742{
761 struct neofb_par *par = info->par; 743 struct neofb_par *par = info->par;
762 struct xtimings timings;
763 unsigned char temp; 744 unsigned char temp;
764 int i, clock_hi = 0; 745 int i, clock_hi = 0;
765 int lcd_stretch; 746 int lcd_stretch;
766 int hoffset, voffset; 747 int hoffset, voffset;
748 int vsync_start, vtotal;
767 749
768 DBG("neofb_set_par"); 750 DBG("neofb_set_par");
769 751
@@ -771,28 +753,15 @@ static int neofb_set_par(struct fb_info *info)
771 753
772 vgaHWProtect(1); /* Blank the screen */ 754 vgaHWProtect(1); /* Blank the screen */
773 755
774 timings.dblscan = info->var.vmode & FB_VMODE_DOUBLE; 756 vsync_start = info->var.yres + info->var.lower_margin;
775 timings.interlaced = info->var.vmode & FB_VMODE_INTERLACED; 757 vtotal = vsync_start + info->var.vsync_len + info->var.upper_margin;
776 timings.HDisplay = info->var.xres;
777 timings.HSyncStart = timings.HDisplay + info->var.right_margin;
778 timings.HSyncEnd = timings.HSyncStart + info->var.hsync_len;
779 timings.HTotal = timings.HSyncEnd + info->var.left_margin;
780 timings.VDisplay = info->var.yres;
781 timings.VSyncStart = timings.VDisplay + info->var.lower_margin;
782 timings.VSyncEnd = timings.VSyncStart + info->var.vsync_len;
783 timings.VTotal = timings.VSyncEnd + info->var.upper_margin;
784 timings.sync = info->var.sync;
785 timings.pixclock = PICOS2KHZ(info->var.pixclock);
786
787 if (timings.pixclock < 1)
788 timings.pixclock = 1;
789 758
790 /* 759 /*
791 * This will allocate the datastructure and initialize all of the 760 * This will allocate the datastructure and initialize all of the
792 * generic VGA registers. 761 * generic VGA registers.
793 */ 762 */
794 763
795 if (vgaHWInit(&info->var, info, par, &timings)) 764 if (vgaHWInit(&info->var, par))
796 return -EINVAL; 765 return -EINVAL;
797 766
798 /* 767 /*
@@ -831,10 +800,10 @@ static int neofb_set_par(struct fb_info *info)
831 par->ExtCRTDispAddr = 0x10; 800 par->ExtCRTDispAddr = 0x10;
832 801
833 /* Vertical Extension */ 802 /* Vertical Extension */
834 par->VerticalExt = (((timings.VTotal - 2) & 0x400) >> 10) 803 par->VerticalExt = (((vtotal - 2) & 0x400) >> 10)
835 | (((timings.VDisplay - 1) & 0x400) >> 9) 804 | (((info->var.yres - 1) & 0x400) >> 9)
836 | (((timings.VSyncStart) & 0x400) >> 8) 805 | (((vsync_start) & 0x400) >> 8)
837 | (((timings.VSyncStart) & 0x400) >> 7); 806 | (((vsync_start) & 0x400) >> 7);
838 807
839 /* Fast write bursts on unless disabled. */ 808 /* Fast write bursts on unless disabled. */
840 if (par->pci_burst) 809 if (par->pci_burst)
@@ -995,7 +964,7 @@ static int neofb_set_par(struct fb_info *info)
995 * Calculate the VCLK that most closely matches the requested dot 964 * Calculate the VCLK that most closely matches the requested dot
996 * clock. 965 * clock.
997 */ 966 */
998 neoCalcVCLK(info, par, timings.pixclock); 967 neoCalcVCLK(info, par, PICOS2KHZ(info->var.pixclock));
999 968
1000 /* Since we program the clocks ourselves, always use VCLK3. */ 969 /* Since we program the clocks ourselves, always use VCLK3. */
1001 par->MiscOutReg |= 0x0C; 970 par->MiscOutReg |= 0x0C;
@@ -1927,9 +1896,6 @@ static int __devinit neo_init_hw(struct fb_info *info)
1927 int maxClock = 65000; 1896 int maxClock = 65000;
1928 int CursorMem = 1024; 1897 int CursorMem = 1024;
1929 int CursorOff = 0x100; 1898 int CursorOff = 0x100;
1930 int linearSize = 1024;
1931 int maxWidth = 1024;
1932 int maxHeight = 1024;
1933 1899
1934 DBG("neo_init_hw"); 1900 DBG("neo_init_hw");
1935 1901
@@ -1948,81 +1914,52 @@ static int __devinit neo_init_hw(struct fb_info *info)
1948 case FB_ACCEL_NEOMAGIC_NM2070: 1914 case FB_ACCEL_NEOMAGIC_NM2070:
1949 videoRam = 896; 1915 videoRam = 896;
1950 maxClock = 65000; 1916 maxClock = 65000;
1951 CursorMem = 2048;
1952 CursorOff = 0x100;
1953 linearSize = 1024;
1954 maxWidth = 1024;
1955 maxHeight = 1024;
1956 break; 1917 break;
1957 case FB_ACCEL_NEOMAGIC_NM2090: 1918 case FB_ACCEL_NEOMAGIC_NM2090:
1958 case FB_ACCEL_NEOMAGIC_NM2093: 1919 case FB_ACCEL_NEOMAGIC_NM2093:
1959 videoRam = 1152;
1960 maxClock = 80000;
1961 CursorMem = 2048;
1962 CursorOff = 0x100;
1963 linearSize = 2048;
1964 maxWidth = 1024;
1965 maxHeight = 1024;
1966 break;
1967 case FB_ACCEL_NEOMAGIC_NM2097: 1920 case FB_ACCEL_NEOMAGIC_NM2097:
1968 videoRam = 1152; 1921 videoRam = 1152;
1969 maxClock = 80000; 1922 maxClock = 80000;
1970 CursorMem = 1024;
1971 CursorOff = 0x100;
1972 linearSize = 2048;
1973 maxWidth = 1024;
1974 maxHeight = 1024;
1975 break; 1923 break;
1976 case FB_ACCEL_NEOMAGIC_NM2160: 1924 case FB_ACCEL_NEOMAGIC_NM2160:
1977 videoRam = 2048; 1925 videoRam = 2048;
1978 maxClock = 90000; 1926 maxClock = 90000;
1979 CursorMem = 1024;
1980 CursorOff = 0x100;
1981 linearSize = 2048;
1982 maxWidth = 1024;
1983 maxHeight = 1024;
1984 break; 1927 break;
1985 case FB_ACCEL_NEOMAGIC_NM2200: 1928 case FB_ACCEL_NEOMAGIC_NM2200:
1986 videoRam = 2560; 1929 videoRam = 2560;
1987 maxClock = 110000; 1930 maxClock = 110000;
1988 CursorMem = 1024;
1989 CursorOff = 0x1000;
1990 linearSize = 4096;
1991 maxWidth = 1280;
1992 maxHeight = 1024; /* ???? */
1993
1994 par->neo2200 = (Neo2200 __iomem *) par->mmio_vbase;
1995 break; 1931 break;
1996 case FB_ACCEL_NEOMAGIC_NM2230: 1932 case FB_ACCEL_NEOMAGIC_NM2230:
1997 videoRam = 3008; 1933 videoRam = 3008;
1998 maxClock = 110000; 1934 maxClock = 110000;
1999 CursorMem = 1024;
2000 CursorOff = 0x1000;
2001 linearSize = 4096;
2002 maxWidth = 1280;
2003 maxHeight = 1024; /* ???? */
2004
2005 par->neo2200 = (Neo2200 __iomem *) par->mmio_vbase;
2006 break; 1935 break;
2007 case FB_ACCEL_NEOMAGIC_NM2360: 1936 case FB_ACCEL_NEOMAGIC_NM2360:
2008 videoRam = 4096; 1937 videoRam = 4096;
2009 maxClock = 110000; 1938 maxClock = 110000;
2010 CursorMem = 1024;
2011 CursorOff = 0x1000;
2012 linearSize = 4096;
2013 maxWidth = 1280;
2014 maxHeight = 1024; /* ???? */
2015
2016 par->neo2200 = (Neo2200 __iomem *) par->mmio_vbase;
2017 break; 1939 break;
2018 case FB_ACCEL_NEOMAGIC_NM2380: 1940 case FB_ACCEL_NEOMAGIC_NM2380:
2019 videoRam = 6144; 1941 videoRam = 6144;
2020 maxClock = 110000; 1942 maxClock = 110000;
1943 break;
1944 }
1945 switch (info->fix.accel) {
1946 case FB_ACCEL_NEOMAGIC_NM2070:
1947 case FB_ACCEL_NEOMAGIC_NM2090:
1948 case FB_ACCEL_NEOMAGIC_NM2093:
1949 CursorMem = 2048;
1950 CursorOff = 0x100;
1951 break;
1952 case FB_ACCEL_NEOMAGIC_NM2097:
1953 case FB_ACCEL_NEOMAGIC_NM2160:
1954 CursorMem = 1024;
1955 CursorOff = 0x100;
1956 break;
1957 case FB_ACCEL_NEOMAGIC_NM2200:
1958 case FB_ACCEL_NEOMAGIC_NM2230:
1959 case FB_ACCEL_NEOMAGIC_NM2360:
1960 case FB_ACCEL_NEOMAGIC_NM2380:
2021 CursorMem = 1024; 1961 CursorMem = 1024;
2022 CursorOff = 0x1000; 1962 CursorOff = 0x1000;
2023 linearSize = 8192;
2024 maxWidth = 1280;
2025 maxHeight = 1024; /* ???? */
2026 1963
2027 par->neo2200 = (Neo2200 __iomem *) par->mmio_vbase; 1964 par->neo2200 = (Neo2200 __iomem *) par->mmio_vbase;
2028 break; 1965 break;
@@ -2036,7 +1973,7 @@ static int __devinit neo_init_hw(struct fb_info *info)
2036*/ 1973*/
2037 par->maxClock = maxClock; 1974 par->maxClock = maxClock;
2038 par->cursorOff = CursorOff; 1975 par->cursorOff = CursorOff;
2039 return ((videoRam * 1024)); 1976 return videoRam * 1024;
2040} 1977}
2041 1978
2042 1979
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index ab32ceb06178..ab77c51fe9d6 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -20,6 +20,7 @@
20 */ 20 */
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
23#include <linux/mm.h>
23#include <linux/vmalloc.h> 24#include <linux/vmalloc.h>
24#include <linux/clk.h> 25#include <linux/clk.h>
25#include <linux/io.h> 26#include <linux/io.h>
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index 14d0f7a11145..f85af5c4fa68 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -25,6 +25,7 @@
25 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 25 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 */ 26 */
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/mm.h>
28#include <linux/uaccess.h> 29#include <linux/uaccess.h>
29 30
30#include <asm/mach-types.h> 31#include <asm/mach-types.h>
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index d0746261c957..2b707a8ce5de 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -30,6 +30,7 @@
30#include <linux/string.h> 30#include <linux/string.h>
31#include <linux/interrupt.h> 31#include <linux/interrupt.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/mm.h>
33#include <linux/fb.h> 34#include <linux/fb.h>
34#include <linux/delay.h> 35#include <linux/delay.h>
35#include <linux/init.h> 36#include <linux/init.h>
@@ -40,6 +41,7 @@
40#include <linux/clk.h> 41#include <linux/clk.h>
41#include <linux/err.h> 42#include <linux/err.h>
42#include <linux/completion.h> 43#include <linux/completion.h>
44#include <linux/mutex.h>
43#include <linux/kthread.h> 45#include <linux/kthread.h>
44#include <linux/freezer.h> 46#include <linux/freezer.h>
45 47
@@ -227,6 +229,22 @@ static int pxafb_bpp_to_lccr3(struct fb_var_screeninfo *var)
227 case 4: ret = LCCR3_4BPP; break; 229 case 4: ret = LCCR3_4BPP; break;
228 case 8: ret = LCCR3_8BPP; break; 230 case 8: ret = LCCR3_8BPP; break;
229 case 16: ret = LCCR3_16BPP; break; 231 case 16: ret = LCCR3_16BPP; break;
232 case 24:
233 switch (var->red.length + var->green.length +
234 var->blue.length + var->transp.length) {
235 case 18: ret = LCCR3_18BPP_P | LCCR3_PDFOR_3; break;
236 case 19: ret = LCCR3_19BPP_P; break;
237 }
238 break;
239 case 32:
240 switch (var->red.length + var->green.length +
241 var->blue.length + var->transp.length) {
242 case 18: ret = LCCR3_18BPP | LCCR3_PDFOR_3; break;
243 case 19: ret = LCCR3_19BPP; break;
244 case 24: ret = LCCR3_24BPP | LCCR3_PDFOR_3; break;
245 case 25: ret = LCCR3_25BPP; break;
246 }
247 break;
230 } 248 }
231 return ret; 249 return ret;
232} 250}
@@ -345,6 +363,41 @@ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
345 var->green.offset = 5; var->green.length = 6; 363 var->green.offset = 5; var->green.length = 6;
346 var->blue.offset = 0; var->blue.length = 5; 364 var->blue.offset = 0; var->blue.length = 5;
347 var->transp.offset = var->transp.length = 0; 365 var->transp.offset = var->transp.length = 0;
366 } else if (var->bits_per_pixel > 16) {
367 struct pxafb_mode_info *mode;
368
369 mode = pxafb_getmode(inf, var);
370 if (!mode)
371 return -EINVAL;
372
373 switch (mode->depth) {
374 case 18: /* RGB666 */
375 var->transp.offset = var->transp.length = 0;
376 var->red.offset = 12; var->red.length = 6;
377 var->green.offset = 6; var->green.length = 6;
378 var->blue.offset = 0; var->blue.length = 6;
379 break;
380 case 19: /* RGBT666 */
381 var->transp.offset = 18; var->transp.length = 1;
382 var->red.offset = 12; var->red.length = 6;
383 var->green.offset = 6; var->green.length = 6;
384 var->blue.offset = 0; var->blue.length = 6;
385 break;
386 case 24: /* RGB888 */
387 var->transp.offset = var->transp.length = 0;
388 var->red.offset = 16; var->red.length = 8;
389 var->green.offset = 8; var->green.length = 8;
390 var->blue.offset = 0; var->blue.length = 8;
391 break;
392 case 25: /* RGBT888 */
393 var->transp.offset = 24; var->transp.length = 1;
394 var->red.offset = 16; var->red.length = 8;
395 var->green.offset = 8; var->green.length = 8;
396 var->blue.offset = 0; var->blue.length = 8;
397 break;
398 default:
399 return -EINVAL;
400 }
348 } else { 401 } else {
349 var->red.offset = var->green.offset = 0; 402 var->red.offset = var->green.offset = 0;
350 var->blue.offset = var->transp.offset = 0; 403 var->blue.offset = var->transp.offset = 0;
@@ -376,7 +429,7 @@ static int pxafb_set_par(struct fb_info *info)
376 struct pxafb_info *fbi = (struct pxafb_info *)info; 429 struct pxafb_info *fbi = (struct pxafb_info *)info;
377 struct fb_var_screeninfo *var = &info->var; 430 struct fb_var_screeninfo *var = &info->var;
378 431
379 if (var->bits_per_pixel == 16) 432 if (var->bits_per_pixel >= 16)
380 fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR; 433 fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR;
381 else if (!fbi->cmap_static) 434 else if (!fbi->cmap_static)
382 fbi->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR; 435 fbi->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR;
@@ -391,7 +444,7 @@ static int pxafb_set_par(struct fb_info *info)
391 444
392 fbi->fb.fix.line_length = var->xres_virtual * 445 fbi->fb.fix.line_length = var->xres_virtual *
393 var->bits_per_pixel / 8; 446 var->bits_per_pixel / 8;
394 if (var->bits_per_pixel == 16) 447 if (var->bits_per_pixel >= 16)
395 fbi->palette_size = 0; 448 fbi->palette_size = 0;
396 else 449 else
397 fbi->palette_size = var->bits_per_pixel == 1 ? 450 fbi->palette_size = var->bits_per_pixel == 1 ?
@@ -404,7 +457,7 @@ static int pxafb_set_par(struct fb_info *info)
404 */ 457 */
405 pxafb_set_truecolor(fbi->fb.fix.visual == FB_VISUAL_TRUECOLOR); 458 pxafb_set_truecolor(fbi->fb.fix.visual == FB_VISUAL_TRUECOLOR);
406 459
407 if (fbi->fb.var.bits_per_pixel == 16) 460 if (fbi->fb.var.bits_per_pixel >= 16)
408 fb_dealloc_cmap(&fbi->fb.cmap); 461 fb_dealloc_cmap(&fbi->fb.cmap);
409 else 462 else
410 fb_alloc_cmap(&fbi->fb.cmap, 1<<fbi->fb.var.bits_per_pixel, 0); 463 fb_alloc_cmap(&fbi->fb.cmap, 1<<fbi->fb.var.bits_per_pixel, 0);
@@ -831,6 +884,8 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var,
831 case 4: 884 case 4:
832 case 8: 885 case 8:
833 case 16: 886 case 16:
887 case 24:
888 case 32:
834 break; 889 break;
835 default: 890 default:
836 printk(KERN_ERR "%s: invalid bit depth %d\n", 891 printk(KERN_ERR "%s: invalid bit depth %d\n",
@@ -968,6 +1023,11 @@ static void pxafb_setup_gpio(struct pxafb_info *fbi)
968 1023
969 for (gpio = 58; ldd_bits; gpio++, ldd_bits--) 1024 for (gpio = 58; ldd_bits; gpio++, ldd_bits--)
970 pxa_gpio_mode(gpio | GPIO_ALT_FN_2_OUT); 1025 pxa_gpio_mode(gpio | GPIO_ALT_FN_2_OUT);
1026 /* 18 bit interface */
1027 if (fbi->fb.var.bits_per_pixel > 16) {
1028 pxa_gpio_mode(86 | GPIO_ALT_FN_2_OUT);
1029 pxa_gpio_mode(87 | GPIO_ALT_FN_2_OUT);
1030 }
971 pxa_gpio_mode(GPIO74_LCD_FCLK_MD); 1031 pxa_gpio_mode(GPIO74_LCD_FCLK_MD);
972 pxa_gpio_mode(GPIO75_LCD_LCLK_MD); 1032 pxa_gpio_mode(GPIO75_LCD_LCLK_MD);
973 pxa_gpio_mode(GPIO76_LCD_PCLK_MD); 1033 pxa_gpio_mode(GPIO76_LCD_PCLK_MD);
@@ -1058,7 +1118,7 @@ static void set_ctrlr_state(struct pxafb_info *fbi, u_int state)
1058{ 1118{
1059 u_int old_state; 1119 u_int old_state;
1060 1120
1061 down(&fbi->ctrlr_sem); 1121 mutex_lock(&fbi->ctrlr_lock);
1062 1122
1063 old_state = fbi->state; 1123 old_state = fbi->state;
1064 1124
@@ -1146,7 +1206,7 @@ static void set_ctrlr_state(struct pxafb_info *fbi, u_int state)
1146 } 1206 }
1147 break; 1207 break;
1148 } 1208 }
1149 up(&fbi->ctrlr_sem); 1209 mutex_unlock(&fbi->ctrlr_lock);
1150} 1210}
1151 1211
1152/* 1212/*
@@ -1399,7 +1459,7 @@ static struct pxafb_info * __devinit pxafb_init_fbinfo(struct device *dev)
1399 1459
1400 init_waitqueue_head(&fbi->ctrlr_wait); 1460 init_waitqueue_head(&fbi->ctrlr_wait);
1401 INIT_WORK(&fbi->task, pxafb_task); 1461 INIT_WORK(&fbi->task, pxafb_task);
1402 init_MUTEX(&fbi->ctrlr_sem); 1462 mutex_init(&fbi->ctrlr_lock);
1403 init_completion(&fbi->disable_done); 1463 init_completion(&fbi->disable_done);
1404#ifdef CONFIG_FB_PXA_SMARTPANEL 1464#ifdef CONFIG_FB_PXA_SMARTPANEL
1405 init_completion(&fbi->command_done); 1465 init_completion(&fbi->command_done);
diff --git a/drivers/video/pxafb.h b/drivers/video/pxafb.h
index 8238dc826429..31541b86f13d 100644
--- a/drivers/video/pxafb.h
+++ b/drivers/video/pxafb.h
@@ -106,7 +106,7 @@ struct pxafb_info {
106 106
107 volatile u_char state; 107 volatile u_char state;
108 volatile u_char task_state; 108 volatile u_char task_state;
109 struct semaphore ctrlr_sem; 109 struct mutex ctrlr_lock;
110 wait_queue_head_t ctrlr_wait; 110 wait_queue_head_t ctrlr_wait;
111 struct work_struct task; 111 struct work_struct task;
112 112
diff --git a/drivers/video/sa1100fb.c b/drivers/video/sa1100fb.c
index ab2b2110478b..78bcdbc3f484 100644
--- a/drivers/video/sa1100fb.c
+++ b/drivers/video/sa1100fb.c
@@ -167,6 +167,7 @@
167#include <linux/string.h> 167#include <linux/string.h>
168#include <linux/interrupt.h> 168#include <linux/interrupt.h>
169#include <linux/slab.h> 169#include <linux/slab.h>
170#include <linux/mm.h>
170#include <linux/fb.h> 171#include <linux/fb.h>
171#include <linux/delay.h> 172#include <linux/delay.h>
172#include <linux/init.h> 173#include <linux/init.h>
@@ -174,6 +175,7 @@
174#include <linux/cpufreq.h> 175#include <linux/cpufreq.h>
175#include <linux/platform_device.h> 176#include <linux/platform_device.h>
176#include <linux/dma-mapping.h> 177#include <linux/dma-mapping.h>
178#include <linux/mutex.h>
177 179
178#include <asm/hardware.h> 180#include <asm/hardware.h>
179#include <asm/io.h> 181#include <asm/io.h>
@@ -1107,7 +1109,7 @@ static void set_ctrlr_state(struct sa1100fb_info *fbi, u_int state)
1107{ 1109{
1108 u_int old_state; 1110 u_int old_state;
1109 1111
1110 down(&fbi->ctrlr_sem); 1112 mutex_lock(&fbi->ctrlr_lock);
1111 1113
1112 old_state = fbi->state; 1114 old_state = fbi->state;
1113 1115
@@ -1192,7 +1194,7 @@ static void set_ctrlr_state(struct sa1100fb_info *fbi, u_int state)
1192 } 1194 }
1193 break; 1195 break;
1194 } 1196 }
1195 up(&fbi->ctrlr_sem); 1197 mutex_unlock(&fbi->ctrlr_lock);
1196} 1198}
1197 1199
1198/* 1200/*
@@ -1444,7 +1446,7 @@ static struct sa1100fb_info * __init sa1100fb_init_fbinfo(struct device *dev)
1444 1446
1445 init_waitqueue_head(&fbi->ctrlr_wait); 1447 init_waitqueue_head(&fbi->ctrlr_wait);
1446 INIT_WORK(&fbi->task, sa1100fb_task); 1448 INIT_WORK(&fbi->task, sa1100fb_task);
1447 init_MUTEX(&fbi->ctrlr_sem); 1449 mutex_init(&fbi->ctrlr_lock);
1448 1450
1449 return fbi; 1451 return fbi;
1450} 1452}
diff --git a/drivers/video/sa1100fb.h b/drivers/video/sa1100fb.h
index f465b27ed860..86831db9a042 100644
--- a/drivers/video/sa1100fb.h
+++ b/drivers/video/sa1100fb.h
@@ -100,7 +100,7 @@ struct sa1100fb_info {
100 100
101 volatile u_char state; 101 volatile u_char state;
102 volatile u_char task_state; 102 volatile u_char task_state;
103 struct semaphore ctrlr_sem; 103 struct mutex ctrlr_lock;
104 wait_queue_head_t ctrlr_wait; 104 wait_queue_head_t ctrlr_wait;
105 struct work_struct task; 105 struct work_struct task;
106 106
diff --git a/drivers/video/sh7760fb.c b/drivers/video/sh7760fb.c
new file mode 100644
index 000000000000..4d0e28c5790b
--- /dev/null
+++ b/drivers/video/sh7760fb.c
@@ -0,0 +1,658 @@
1/*
2 * SH7760/SH7763 LCDC Framebuffer driver.
3 *
4 * (c) 2006-2008 MSC Vertriebsges.m.b.H.,
5 * Manuel Lauss <mano@roarinelk.homelinux.net>
6 * (c) 2008 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 *
12 * PLEASE HAVE A LOOK AT Documentation/fb/sh7760fb.txt!
13 *
14 * Thanks to Siegfried Schaefer <s.schaefer at schaefer-edv.de>
15 * for his original source and testing!
16 */
17
18#include <linux/completion.h>
19#include <linux/delay.h>
20#include <linux/dma-mapping.h>
21#include <linux/fb.h>
22#include <linux/interrupt.h>
23#include <linux/io.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/platform_device.h>
27
28#include <asm/sh7760fb.h>
29
30struct sh7760fb_par {
31 void __iomem *base;
32 int irq;
33
34 struct sh7760fb_platdata *pd; /* display information */
35
36 dma_addr_t fbdma; /* physical address */
37
38 int rot; /* rotation enabled? */
39
40 u32 pseudo_palette[16];
41
42 struct platform_device *dev;
43 struct resource *ioarea;
44 struct completion vsync; /* vsync irq event */
45};
46
47static irqreturn_t sh7760fb_irq(int irq, void *data)
48{
49 struct completion *c = data;
50
51 complete(c);
52
53 return IRQ_HANDLED;
54}
55
56static void sh7760fb_wait_vsync(struct fb_info *info)
57{
58 struct sh7760fb_par *par = info->par;
59
60 if (par->pd->novsync)
61 return;
62
63 iowrite16(ioread16(par->base + LDINTR) & ~VINT_CHECK,
64 par->base + LDINTR);
65
66 if (par->irq < 0) {
67 /* poll for vert. retrace: status bit is sticky */
68 while (!(ioread16(par->base + LDINTR) & VINT_CHECK))
69 cpu_relax();
70 } else {
71 /* a "wait_for_irq_event(par->irq)" would be extremely nice */
72 init_completion(&par->vsync);
73 enable_irq(par->irq);
74 wait_for_completion(&par->vsync);
75 disable_irq_nosync(par->irq);
76 }
77}
78
79/* wait_for_lps - wait until power supply has reached a certain state. */
80static int wait_for_lps(struct sh7760fb_par *par, int val)
81{
82 int i = 100;
83 while (--i && ((ioread16(par->base + LDPMMR) & 3) != val))
84 msleep(1);
85
86 if (i <= 0)
87 return -ETIMEDOUT;
88
89 return 0;
90}
91
92/* en/disable the LCDC */
93static int sh7760fb_blank(int blank, struct fb_info *info)
94{
95 struct sh7760fb_par *par = info->par;
96 struct sh7760fb_platdata *pd = par->pd;
97 unsigned short cntr = ioread16(par->base + LDCNTR);
98 unsigned short intr = ioread16(par->base + LDINTR);
99 int lps;
100
101 if (blank == FB_BLANK_UNBLANK) {
102 intr |= VINT_START;
103 cntr = LDCNTR_DON2 | LDCNTR_DON;
104 lps = 3;
105 } else {
106 intr &= ~VINT_START;
107 cntr = LDCNTR_DON2;
108 lps = 0;
109 }
110
111 if (pd->blank)
112 pd->blank(blank);
113
114 iowrite16(intr, par->base + LDINTR);
115 iowrite16(cntr, par->base + LDCNTR);
116
117 return wait_for_lps(par, lps);
118}
119
120/* set color registers */
121static int sh7760fb_setcmap(struct fb_cmap *cmap, struct fb_info *info)
122{
123 struct sh7760fb_par *par = info->par;
124 u32 s = cmap->start;
125 u32 l = cmap->len;
126 u16 *r = cmap->red;
127 u16 *g = cmap->green;
128 u16 *b = cmap->blue;
129 u32 col, tmo;
130 int ret;
131
132 ret = 0;
133
134 sh7760fb_wait_vsync(info);
135
136 /* request palette access */
137 iowrite16(LDPALCR_PALEN, par->base + LDPALCR);
138
139 /* poll for access grant */
140 tmo = 100;
141 while (!(ioread16(par->base + LDPALCR) & LDPALCR_PALS) && (--tmo))
142 cpu_relax();
143
144 if (!tmo) {
145 ret = 1;
146 dev_dbg(info->dev, "no palette access!\n");
147 goto out;
148 }
149
150 while (l && (s < 256)) {
151 col = ((*r) & 0xff) << 16;
152 col |= ((*g) & 0xff) << 8;
153 col |= ((*b) & 0xff);
154 col &= SH7760FB_PALETTE_MASK;
155
156 if (s < 16)
157 ((u32 *) (info->pseudo_palette))[s] = s;
158
159 s++;
160 l--;
161 r++;
162 g++;
163 b++;
164 }
165out:
166 iowrite16(0, par->base + LDPALCR);
167 return ret;
168}
169
170static void encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info,
171 unsigned long stride)
172{
173 memset(fix, 0, sizeof(struct fb_fix_screeninfo));
174 strcpy(fix->id, "sh7760-lcdc");
175
176 fix->smem_start = (unsigned long)info->screen_base;
177 fix->smem_len = info->screen_size;
178
179 fix->line_length = stride;
180}
181
182static int sh7760fb_get_color_info(struct device *dev,
183 u16 lddfr, int *bpp, int *gray)
184{
185 int lbpp, lgray;
186
187 lgray = lbpp = 0;
188
189 switch (lddfr & LDDFR_COLOR_MASK) {
190 case LDDFR_1BPP_MONO:
191 lgray = 1;
192 lbpp = 1;
193 break;
194 case LDDFR_2BPP_MONO:
195 lgray = 1;
196 lbpp = 2;
197 break;
198 case LDDFR_4BPP_MONO:
199 lgray = 1;
200 case LDDFR_4BPP:
201 lbpp = 4;
202 break;
203 case LDDFR_6BPP_MONO:
204 lgray = 1;
205 case LDDFR_8BPP:
206 lbpp = 8;
207 break;
208 case LDDFR_16BPP_RGB555:
209 case LDDFR_16BPP_RGB565:
210 lbpp = 16;
211 lgray = 0;
212 break;
213 default:
214 dev_dbg(dev, "unsupported LDDFR bit depth.\n");
215 return -EINVAL;
216 }
217
218 if (bpp)
219 *bpp = lbpp;
220 if (gray)
221 *gray = lgray;
222
223 return 0;
224}
225
226static int sh7760fb_check_var(struct fb_var_screeninfo *var,
227 struct fb_info *info)
228{
229 struct fb_fix_screeninfo *fix = &info->fix;
230 struct sh7760fb_par *par = info->par;
231 int ret, bpp;
232
233 /* get color info from register value */
234 ret = sh7760fb_get_color_info(info->dev, par->pd->lddfr, &bpp, NULL);
235 if (ret)
236 return ret;
237
238 var->bits_per_pixel = bpp;
239
240 if ((var->grayscale) && (var->bits_per_pixel == 1))
241 fix->visual = FB_VISUAL_MONO10;
242 else if (var->bits_per_pixel >= 15)
243 fix->visual = FB_VISUAL_TRUECOLOR;
244 else
245 fix->visual = FB_VISUAL_PSEUDOCOLOR;
246
247 /* TODO: add some more validation here */
248 return 0;
249}
250
251/*
252 * sh7760fb_set_par - set videomode.
253 *
254 * NOTE: The rotation, grayscale and DSTN codepaths are
255 * totally untested!
256 */
257static int sh7760fb_set_par(struct fb_info *info)
258{
259 struct sh7760fb_par *par = info->par;
260 struct fb_videomode *vm = par->pd->def_mode;
261 unsigned long sbase, dstn_off, ldsarl, stride;
262 unsigned short hsynp, hsynw, htcn, hdcn;
263 unsigned short vsynp, vsynw, vtln, vdln;
264 unsigned short lddfr, ldmtr;
265 int ret, bpp, gray;
266
267 par->rot = par->pd->rotate;
268
269 /* rotate only works with xres <= 320 */
270 if (par->rot && (vm->xres > 320)) {
271 dev_dbg(info->dev, "rotation disabled due to display size\n");
272 par->rot = 0;
273 }
274
275 /* calculate LCDC reg vals from display parameters */
276 hsynp = vm->right_margin + vm->xres;
277 hsynw = vm->hsync_len;
278 htcn = vm->left_margin + hsynp + hsynw;
279 hdcn = vm->xres;
280 vsynp = vm->lower_margin + vm->yres;
281 vsynw = vm->vsync_len;
282 vtln = vm->upper_margin + vsynp + vsynw;
283 vdln = vm->yres;
284
285 /* get color info from register value */
286 ret = sh7760fb_get_color_info(info->dev, par->pd->lddfr, &bpp, &gray);
287 if (ret)
288 return ret;
289
290 dev_dbg(info->dev, "%dx%d %dbpp %s (orientation %s)\n", hdcn,
291 vdln, bpp, gray ? "grayscale" : "color",
292 par->rot ? "rotated" : "normal");
293
294#ifdef CONFIG_CPU_LITTLE_ENDIAN
295 lddfr = par->pd->lddfr | (1 << 8);
296#else
297 lddfr = par->pd->lddfr & ~(1 << 8);
298#endif
299
300 ldmtr = par->pd->ldmtr;
301
302 if (!(vm->sync & FB_SYNC_HOR_HIGH_ACT))
303 ldmtr |= LDMTR_CL1POL;
304 if (!(vm->sync & FB_SYNC_VERT_HIGH_ACT))
305 ldmtr |= LDMTR_FLMPOL;
306
307 /* shut down LCDC before changing display parameters */
308 sh7760fb_blank(FB_BLANK_POWERDOWN, info);
309
310 iowrite16(par->pd->ldickr, par->base + LDICKR); /* pixclock */
311 iowrite16(ldmtr, par->base + LDMTR); /* polarities */
312 iowrite16(lddfr, par->base + LDDFR); /* color/depth */
313 iowrite16((par->rot ? 1 << 13 : 0), par->base + LDSMR); /* rotate */
314 iowrite16(par->pd->ldpmmr, par->base + LDPMMR); /* Power Management */
315 iowrite16(par->pd->ldpspr, par->base + LDPSPR); /* Power Supply Ctrl */
316
317 /* display resolution */
318 iowrite16(((htcn >> 3) - 1) | (((hdcn >> 3) - 1) << 8),
319 par->base + LDHCNR);
320 iowrite16(vdln - 1, par->base + LDVDLNR);
321 iowrite16(vtln - 1, par->base + LDVTLNR);
322 /* h/v sync signals */
323 iowrite16((vsynp - 1) | ((vsynw - 1) << 12), par->base + LDVSYNR);
324 iowrite16(((hsynp >> 3) - 1) | (((hsynw >> 3) - 1) << 12),
325 par->base + LDHSYNR);
326 /* AC modulation sig */
327 iowrite16(par->pd->ldaclnr, par->base + LDACLNR);
328
329 stride = (par->rot) ? vtln : hdcn;
330 if (!gray)
331 stride *= (bpp + 7) >> 3;
332 else {
333 if (bpp == 1)
334 stride >>= 3;
335 else if (bpp == 2)
336 stride >>= 2;
337 else if (bpp == 4)
338 stride >>= 1;
339 /* 6 bpp == 8 bpp */
340 }
341
342 /* if rotated, stride must be power of 2 */
343 if (par->rot) {
344 unsigned long bit = 1 << 31;
345 while (bit) {
346 if (stride & bit)
347 break;
348 bit >>= 1;
349 }
350 if (stride & ~bit)
351 stride = bit << 1; /* not P-o-2, round up */
352 }
353 iowrite16(stride, par->base + LDLAOR);
354
355 /* set display mem start address */
356 sbase = (unsigned long)par->fbdma;
357 if (par->rot)
358 sbase += (hdcn - 1) * stride;
359
360 iowrite32(sbase, par->base + LDSARU);
361
362 /*
363 * for DSTN need to set address for lower half.
364 * I (mlau) don't know which address to set it to,
365 * so I guessed at (stride * yres/2).
366 */
367 if (((ldmtr & 0x003f) >= LDMTR_DSTN_MONO_8) &&
368 ((ldmtr & 0x003f) <= LDMTR_DSTN_COLOR_16)) {
369
370 dev_dbg(info->dev, " ***** DSTN untested! *****\n");
371
372 dstn_off = stride;
373 if (par->rot)
374 dstn_off *= hdcn >> 1;
375 else
376 dstn_off *= vdln >> 1;
377
378 ldsarl = sbase + dstn_off;
379 } else
380 ldsarl = 0;
381
382 iowrite32(ldsarl, par->base + LDSARL); /* mem for lower half of DSTN */
383
384 encode_fix(&info->fix, info, stride);
385 sh7760fb_check_var(&info->var, info);
386
387 sh7760fb_blank(FB_BLANK_UNBLANK, info); /* panel on! */
388
389 dev_dbg(info->dev, "hdcn : %6d htcn : %6d\n", hdcn, htcn);
390 dev_dbg(info->dev, "hsynw : %6d hsynp : %6d\n", hsynw, hsynp);
391 dev_dbg(info->dev, "vdln : %6d vtln : %6d\n", vdln, vtln);
392 dev_dbg(info->dev, "vsynw : %6d vsynp : %6d\n", vsynw, vsynp);
393 dev_dbg(info->dev, "clksrc: %6d clkdiv: %6d\n",
394 (par->pd->ldickr >> 12) & 3, par->pd->ldickr & 0x1f);
395 dev_dbg(info->dev, "ldpmmr: 0x%04x ldpspr: 0x%04x\n", par->pd->ldpmmr,
396 par->pd->ldpspr);
397 dev_dbg(info->dev, "ldmtr : 0x%04x lddfr : 0x%04x\n", ldmtr, lddfr);
398 dev_dbg(info->dev, "ldlaor: %ld\n", stride);
399 dev_dbg(info->dev, "ldsaru: 0x%08lx ldsarl: 0x%08lx\n", sbase, ldsarl);
400
401 return 0;
402}
403
404static struct fb_ops sh7760fb_ops = {
405 .owner = THIS_MODULE,
406 .fb_blank = sh7760fb_blank,
407 .fb_check_var = sh7760fb_check_var,
408 .fb_setcmap = sh7760fb_setcmap,
409 .fb_set_par = sh7760fb_set_par,
410 .fb_fillrect = cfb_fillrect,
411 .fb_copyarea = cfb_copyarea,
412 .fb_imageblit = cfb_imageblit,
413};
414
415static void sh7760fb_free_mem(struct fb_info *info)
416{
417 struct sh7760fb_par *par = info->par;
418
419 if (!info->screen_base)
420 return;
421
422 dma_free_coherent(info->dev, info->screen_size,
423 info->screen_base, par->fbdma);
424
425 par->fbdma = 0;
426 info->screen_base = NULL;
427 info->screen_size = 0;
428}
429
430/* allocate the framebuffer memory. This memory must be in Area3,
431 * (dictated by the DMA engine) and contiguous, at a 512 byte boundary.
432 */
433static int sh7760fb_alloc_mem(struct fb_info *info)
434{
435 struct sh7760fb_par *par = info->par;
436 void *fbmem;
437 unsigned long vram;
438 int ret, bpp;
439
440 if (info->screen_base)
441 return 0;
442
443 /* get color info from register value */
444 ret = sh7760fb_get_color_info(info->dev, par->pd->lddfr, &bpp, NULL);
445 if (ret) {
446 printk(KERN_ERR "colinfo\n");
447 return ret;
448 }
449
450 /* min VRAM: xres_min = 16, yres_min = 1, bpp = 1: 2byte -> 1 page
451 max VRAM: xres_max = 1024, yres_max = 1024, bpp = 16: 2MB */
452
453 vram = info->var.xres * info->var.yres;
454 if (info->var.grayscale) {
455 if (bpp == 1)
456 vram >>= 3;
457 else if (bpp == 2)
458 vram >>= 2;
459 else if (bpp == 4)
460 vram >>= 1;
461 } else if (bpp > 8)
462 vram *= 2;
463 if ((vram < 1) || (vram > 1024 * 2048)) {
464 dev_dbg(info->dev, "too much VRAM required. Check settings\n");
465 return -ENODEV;
466 }
467
468 if (vram < PAGE_SIZE)
469 vram = PAGE_SIZE;
470
471 fbmem = dma_alloc_coherent(info->dev, vram, &par->fbdma, GFP_KERNEL);
472
473 if (!fbmem)
474 return -ENOMEM;
475
476 if ((par->fbdma & SH7760FB_DMA_MASK) != SH7760FB_DMA_MASK) {
477 sh7760fb_free_mem(info);
478 dev_err(info->dev, "kernel gave me memory at 0x%08lx, which is"
479 "unusable for the LCDC\n", (unsigned long)par->fbdma);
480 return -ENOMEM;
481 }
482
483 info->screen_base = fbmem;
484 info->screen_size = vram;
485
486 return 0;
487}
488
489static int __devinit sh7760fb_probe(struct platform_device *pdev)
490{
491 struct fb_info *info;
492 struct resource *res;
493 struct sh7760fb_par *par;
494 int ret;
495
496 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
497 if (unlikely(res == NULL)) {
498 dev_err(&pdev->dev, "invalid resource\n");
499 return -EINVAL;
500 }
501
502 info = framebuffer_alloc(sizeof(struct sh7760fb_par), &pdev->dev);
503 if (!info)
504 return -ENOMEM;
505
506 par = info->par;
507 par->dev = pdev;
508
509 par->pd = pdev->dev.platform_data;
510 if (!par->pd) {
511 dev_dbg(info->dev, "no display setup data!\n");
512 ret = -ENODEV;
513 goto out_fb;
514 }
515
516 par->ioarea = request_mem_region(res->start,
517 (res->end - res->start), pdev->name);
518 if (!par->ioarea) {
519 dev_err(&pdev->dev, "mmio area busy\n");
520 ret = -EBUSY;
521 goto out_fb;
522 }
523
524 par->base = ioremap_nocache(res->start, res->end - res->start + 1);
525 if (!par->base) {
526 dev_err(&pdev->dev, "cannot remap\n");
527 ret = -ENODEV;
528 goto out_res;
529 }
530
531 iowrite16(0, par->base + LDINTR); /* disable vsync irq */
532 par->irq = platform_get_irq(pdev, 0);
533 if (par->irq >= 0) {
534 ret = request_irq(par->irq, sh7760fb_irq, 0,
535 "sh7760-lcdc", &par->vsync);
536 if (ret) {
537 dev_err(&pdev->dev, "cannot grab IRQ\n");
538 par->irq = -ENXIO;
539 } else
540 disable_irq_nosync(par->irq);
541 }
542
543 fb_videomode_to_var(&info->var, par->pd->def_mode);
544
545 ret = sh7760fb_alloc_mem(info);
546 if (ret) {
547 dev_dbg(info->dev, "framebuffer memory allocation failed!\n");
548 goto out_unmap;
549 }
550
551 info->pseudo_palette = par->pseudo_palette;
552
553 /* fixup color register bitpositions. These are fixed by hardware */
554 info->var.red.offset = 11;
555 info->var.red.length = 5;
556 info->var.red.msb_right = 0;
557
558 info->var.green.offset = 5;
559 info->var.green.length = 6;
560 info->var.green.msb_right = 0;
561
562 info->var.blue.offset = 0;
563 info->var.blue.length = 5;
564 info->var.blue.msb_right = 0;
565
566 info->var.transp.offset = 0;
567 info->var.transp.length = 0;
568 info->var.transp.msb_right = 0;
569
570 /* set the DON2 bit now, before cmap allocation, as it will randomize
571 * palette memory.
572 */
573 iowrite16(LDCNTR_DON2, par->base + LDCNTR);
574 info->fbops = &sh7760fb_ops;
575
576 ret = fb_alloc_cmap(&info->cmap, 256, 0);
577 if (ret) {
578 dev_dbg(info->dev, "Unable to allocate cmap memory\n");
579 goto out_mem;
580 }
581
582 ret = register_framebuffer(info);
583 if (ret < 0) {
584 dev_dbg(info->dev, "cannot register fb!\n");
585 goto out_cmap;
586 }
587 platform_set_drvdata(pdev, info);
588
589 printk(KERN_INFO "%s: memory at phys 0x%08lx-0x%08lx, size %ld KiB\n",
590 pdev->name,
591 (unsigned long)par->fbdma,
592 (unsigned long)(par->fbdma + info->screen_size - 1),
593 info->screen_size >> 10);
594
595 return 0;
596
597out_cmap:
598 sh7760fb_blank(FB_BLANK_POWERDOWN, info);
599 fb_dealloc_cmap(&info->cmap);
600out_mem:
601 sh7760fb_free_mem(info);
602out_unmap:
603 if (par->irq >= 0)
604 free_irq(par->irq, &par->vsync);
605 iounmap(par->base);
606out_res:
607 release_resource(par->ioarea);
608 kfree(par->ioarea);
609out_fb:
610 framebuffer_release(info);
611 return ret;
612}
613
614static int __devexit sh7760fb_remove(struct platform_device *dev)
615{
616 struct fb_info *info = platform_get_drvdata(dev);
617 struct sh7760fb_par *par = info->par;
618
619 sh7760fb_blank(FB_BLANK_POWERDOWN, info);
620 unregister_framebuffer(info);
621 fb_dealloc_cmap(&info->cmap);
622 sh7760fb_free_mem(info);
623 if (par->irq >= 0)
624 free_irq(par->irq, par);
625 iounmap(par->base);
626 release_resource(par->ioarea);
627 kfree(par->ioarea);
628 framebuffer_release(info);
629 platform_set_drvdata(dev, NULL);
630
631 return 0;
632}
633
634static struct platform_driver sh7760_lcdc_driver = {
635 .driver = {
636 .name = "sh7760-lcdc",
637 .owner = THIS_MODULE,
638 },
639 .probe = sh7760fb_probe,
640 .remove = __devexit_p(sh7760fb_remove),
641};
642
643static int __init sh7760fb_init(void)
644{
645 return platform_driver_register(&sh7760_lcdc_driver);
646}
647
648static void __exit sh7760fb_exit(void)
649{
650 platform_driver_unregister(&sh7760_lcdc_driver);
651}
652
653module_init(sh7760fb_init);
654module_exit(sh7760fb_exit);
655
656MODULE_AUTHOR("Nobuhiro Iwamatsu, Manuel Lauss");
657MODULE_DESCRIPTION("FBdev for SH7760/63 integrated LCD Controller");
658MODULE_LICENSE("GPL");
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
new file mode 100644
index 000000000000..f6ef6cca73cd
--- /dev/null
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -0,0 +1,725 @@
1/*
2 * SuperH Mobile LCDC Framebuffer
3 *
4 * Copyright (c) 2008 Magnus Damm
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/delay.h>
14#include <linux/mm.h>
15#include <linux/fb.h>
16#include <linux/clk.h>
17#include <linux/platform_device.h>
18#include <linux/dma-mapping.h>
19#include <asm/sh_mobile_lcdc.h>
20
21#define PALETTE_NR 16
22
23struct sh_mobile_lcdc_priv;
24struct sh_mobile_lcdc_chan {
25 struct sh_mobile_lcdc_priv *lcdc;
26 unsigned long *reg_offs;
27 unsigned long ldmt1r_value;
28 unsigned long enabled; /* ME and SE in LDCNT2R */
29 struct sh_mobile_lcdc_chan_cfg cfg;
30 u32 pseudo_palette[PALETTE_NR];
31 struct fb_info info;
32 dma_addr_t dma_handle;
33};
34
35struct sh_mobile_lcdc_priv {
36 void __iomem *base;
37 struct clk *clk;
38 unsigned long lddckr;
39 struct sh_mobile_lcdc_chan ch[2];
40};
41
42/* shared registers */
43#define _LDDCKR 0x410
44#define _LDDCKSTPR 0x414
45#define _LDINTR 0x468
46#define _LDSR 0x46c
47#define _LDCNT1R 0x470
48#define _LDCNT2R 0x474
49#define _LDDDSR 0x47c
50#define _LDDWD0R 0x800
51#define _LDDRDR 0x840
52#define _LDDWAR 0x900
53#define _LDDRAR 0x904
54
55/* per-channel registers */
56enum { LDDCKPAT1R, LDDCKPAT2R, LDMT1R, LDMT2R, LDMT3R, LDDFR, LDSM1R,
57 LDSA1R, LDMLSR, LDHCNR, LDHSYNR, LDVLNR, LDVSYNR, LDPMR };
58
59static unsigned long lcdc_offs_mainlcd[] = {
60 [LDDCKPAT1R] = 0x400,
61 [LDDCKPAT2R] = 0x404,
62 [LDMT1R] = 0x418,
63 [LDMT2R] = 0x41c,
64 [LDMT3R] = 0x420,
65 [LDDFR] = 0x424,
66 [LDSM1R] = 0x428,
67 [LDSA1R] = 0x430,
68 [LDMLSR] = 0x438,
69 [LDHCNR] = 0x448,
70 [LDHSYNR] = 0x44c,
71 [LDVLNR] = 0x450,
72 [LDVSYNR] = 0x454,
73 [LDPMR] = 0x460,
74};
75
76static unsigned long lcdc_offs_sublcd[] = {
77 [LDDCKPAT1R] = 0x408,
78 [LDDCKPAT2R] = 0x40c,
79 [LDMT1R] = 0x600,
80 [LDMT2R] = 0x604,
81 [LDMT3R] = 0x608,
82 [LDDFR] = 0x60c,
83 [LDSM1R] = 0x610,
84 [LDSA1R] = 0x618,
85 [LDMLSR] = 0x620,
86 [LDHCNR] = 0x624,
87 [LDHSYNR] = 0x628,
88 [LDVLNR] = 0x62c,
89 [LDVSYNR] = 0x630,
90 [LDPMR] = 0x63c,
91};
92
93#define START_LCDC 0x00000001
94#define LCDC_RESET 0x00000100
95#define DISPLAY_BEU 0x00000008
96#define LCDC_ENABLE 0x00000001
97
98static void lcdc_write_chan(struct sh_mobile_lcdc_chan *chan,
99 int reg_nr, unsigned long data)
100{
101 iowrite32(data, chan->lcdc->base + chan->reg_offs[reg_nr]);
102}
103
104static unsigned long lcdc_read_chan(struct sh_mobile_lcdc_chan *chan,
105 int reg_nr)
106{
107 return ioread32(chan->lcdc->base + chan->reg_offs[reg_nr]);
108}
109
110static void lcdc_write(struct sh_mobile_lcdc_priv *priv,
111 unsigned long reg_offs, unsigned long data)
112{
113 iowrite32(data, priv->base + reg_offs);
114}
115
116static unsigned long lcdc_read(struct sh_mobile_lcdc_priv *priv,
117 unsigned long reg_offs)
118{
119 return ioread32(priv->base + reg_offs);
120}
121
122static void lcdc_wait_bit(struct sh_mobile_lcdc_priv *priv,
123 unsigned long reg_offs,
124 unsigned long mask, unsigned long until)
125{
126 while ((lcdc_read(priv, reg_offs) & mask) != until)
127 cpu_relax();
128}
129
130static int lcdc_chan_is_sublcd(struct sh_mobile_lcdc_chan *chan)
131{
132 return chan->cfg.chan == LCDC_CHAN_SUBLCD;
133}
134
135static void lcdc_sys_write_index(void *handle, unsigned long data)
136{
137 struct sh_mobile_lcdc_chan *ch = handle;
138
139 lcdc_write(ch->lcdc, _LDDWD0R, data | 0x10000000);
140 lcdc_wait_bit(ch->lcdc, _LDSR, 2, 0);
141 lcdc_write(ch->lcdc, _LDDWAR, 1 | (lcdc_chan_is_sublcd(ch) ? 2 : 0));
142}
143
144static void lcdc_sys_write_data(void *handle, unsigned long data)
145{
146 struct sh_mobile_lcdc_chan *ch = handle;
147
148 lcdc_write(ch->lcdc, _LDDWD0R, data | 0x11000000);
149 lcdc_wait_bit(ch->lcdc, _LDSR, 2, 0);
150 lcdc_write(ch->lcdc, _LDDWAR, 1 | (lcdc_chan_is_sublcd(ch) ? 2 : 0));
151}
152
153static unsigned long lcdc_sys_read_data(void *handle)
154{
155 struct sh_mobile_lcdc_chan *ch = handle;
156
157 lcdc_write(ch->lcdc, _LDDRDR, 0x01000000);
158 lcdc_wait_bit(ch->lcdc, _LDSR, 2, 0);
159 lcdc_write(ch->lcdc, _LDDRAR, 1 | (lcdc_chan_is_sublcd(ch) ? 2 : 0));
160 udelay(1);
161
162 return lcdc_read(ch->lcdc, _LDDRDR) & 0xffff;
163}
164
165struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
166 lcdc_sys_write_index,
167 lcdc_sys_write_data,
168 lcdc_sys_read_data,
169};
170
171static void sh_mobile_lcdc_start_stop(struct sh_mobile_lcdc_priv *priv,
172 int start)
173{
174 unsigned long tmp = lcdc_read(priv, _LDCNT2R);
175 int k;
176
177 /* start or stop the lcdc */
178 if (start)
179 lcdc_write(priv, _LDCNT2R, tmp | START_LCDC);
180 else
181 lcdc_write(priv, _LDCNT2R, tmp & ~START_LCDC);
182
183 /* wait until power is applied/stopped on all channels */
184 for (k = 0; k < ARRAY_SIZE(priv->ch); k++)
185 if (lcdc_read(priv, _LDCNT2R) & priv->ch[k].enabled)
186 while (1) {
187 tmp = lcdc_read_chan(&priv->ch[k], LDPMR) & 3;
188 if (start && tmp == 3)
189 break;
190 if (!start && tmp == 0)
191 break;
192 cpu_relax();
193 }
194
195 if (!start)
196 lcdc_write(priv, _LDDCKSTPR, 1); /* stop dotclock */
197}
198
199static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
200{
201 struct sh_mobile_lcdc_chan *ch;
202 struct fb_videomode *lcd_cfg;
203 struct sh_mobile_lcdc_board_cfg *board_cfg;
204 unsigned long tmp;
205 int k, m;
206 int ret = 0;
207
208 /* reset */
209 lcdc_write(priv, _LDCNT2R, lcdc_read(priv, _LDCNT2R) | LCDC_RESET);
210 lcdc_wait_bit(priv, _LDCNT2R, LCDC_RESET, 0);
211
212 /* enable LCDC channels */
213 tmp = lcdc_read(priv, _LDCNT2R);
214 tmp |= priv->ch[0].enabled;
215 tmp |= priv->ch[1].enabled;
216 lcdc_write(priv, _LDCNT2R, tmp);
217
218 /* read data from external memory, avoid using the BEU for now */
219 lcdc_write(priv, _LDCNT2R, lcdc_read(priv, _LDCNT2R) & ~DISPLAY_BEU);
220
221 /* stop the lcdc first */
222 sh_mobile_lcdc_start_stop(priv, 0);
223
224 /* configure clocks */
225 tmp = priv->lddckr;
226 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
227 ch = &priv->ch[k];
228
229 if (!priv->ch[k].enabled)
230 continue;
231
232 m = ch->cfg.clock_divider;
233 if (!m)
234 continue;
235
236 if (m == 1)
237 m = 1 << 6;
238 tmp |= m << (lcdc_chan_is_sublcd(ch) ? 8 : 0);
239
240 lcdc_write_chan(ch, LDDCKPAT1R, 0x00000000);
241 lcdc_write_chan(ch, LDDCKPAT2R, (1 << (m/2)) - 1);
242 }
243
244 lcdc_write(priv, _LDDCKR, tmp);
245
246 /* start dotclock again */
247 lcdc_write(priv, _LDDCKSTPR, 0);
248 lcdc_wait_bit(priv, _LDDCKSTPR, ~0, 0);
249
250 /* interrupts are disabled */
251 lcdc_write(priv, _LDINTR, 0);
252
253 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
254 ch = &priv->ch[k];
255 lcd_cfg = &ch->cfg.lcd_cfg;
256
257 if (!ch->enabled)
258 continue;
259
260 tmp = ch->ldmt1r_value;
261 tmp |= (lcd_cfg->sync & FB_SYNC_VERT_HIGH_ACT) ? 0 : 1 << 28;
262 tmp |= (lcd_cfg->sync & FB_SYNC_HOR_HIGH_ACT) ? 0 : 1 << 27;
263 lcdc_write_chan(ch, LDMT1R, tmp);
264
265 /* setup SYS bus */
266 lcdc_write_chan(ch, LDMT2R, ch->cfg.sys_bus_cfg.ldmt2r);
267 lcdc_write_chan(ch, LDMT3R, ch->cfg.sys_bus_cfg.ldmt3r);
268
269 /* horizontal configuration */
270 tmp = lcd_cfg->xres + lcd_cfg->hsync_len;
271 tmp += lcd_cfg->left_margin;
272 tmp += lcd_cfg->right_margin;
273 tmp /= 8; /* HTCN */
274 tmp |= (lcd_cfg->xres / 8) << 16; /* HDCN */
275 lcdc_write_chan(ch, LDHCNR, tmp);
276
277 tmp = lcd_cfg->xres;
278 tmp += lcd_cfg->right_margin;
279 tmp /= 8; /* HSYNP */
280 tmp |= (lcd_cfg->hsync_len / 8) << 16; /* HSYNW */
281 lcdc_write_chan(ch, LDHSYNR, tmp);
282
283 /* power supply */
284 lcdc_write_chan(ch, LDPMR, 0);
285
286 /* vertical configuration */
287 tmp = lcd_cfg->yres + lcd_cfg->vsync_len;
288 tmp += lcd_cfg->upper_margin;
289 tmp += lcd_cfg->lower_margin; /* VTLN */
290 tmp |= lcd_cfg->yres << 16; /* VDLN */
291 lcdc_write_chan(ch, LDVLNR, tmp);
292
293 tmp = lcd_cfg->yres;
294 tmp += lcd_cfg->lower_margin; /* VSYNP */
295 tmp |= lcd_cfg->vsync_len << 16; /* VSYNW */
296 lcdc_write_chan(ch, LDVSYNR, tmp);
297
298 board_cfg = &ch->cfg.board_cfg;
299 if (board_cfg->setup_sys)
300 ret = board_cfg->setup_sys(board_cfg->board_data, ch,
301 &sh_mobile_lcdc_sys_bus_ops);
302 if (ret)
303 return ret;
304 }
305
306 /* --- display_lcdc_data() --- */
307 lcdc_write(priv, _LDINTR, 0x00000f00);
308
309 /* word and long word swap */
310 lcdc_write(priv, _LDDDSR, lcdc_read(priv, _LDDDSR) | 6);
311
312 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
313 ch = &priv->ch[k];
314
315 if (!priv->ch[k].enabled)
316 continue;
317
318 /* set bpp format in PKF[4:0] */
319 tmp = lcdc_read_chan(ch, LDDFR);
320 tmp &= ~(0x0001001f);
321 tmp |= (priv->ch[k].info.var.bits_per_pixel == 16) ? 3 : 0;
322 lcdc_write_chan(ch, LDDFR, tmp);
323
324 /* point out our frame buffer */
325 lcdc_write_chan(ch, LDSA1R, ch->info.fix.smem_start);
326
327 /* set line size */
328 lcdc_write_chan(ch, LDMLSR, ch->info.fix.line_length);
329
330 /* continuous read mode */
331 lcdc_write_chan(ch, LDSM1R, 0);
332 }
333
334 /* display output */
335 lcdc_write(priv, _LDCNT1R, LCDC_ENABLE);
336
337 /* start the lcdc */
338 sh_mobile_lcdc_start_stop(priv, 1);
339
340 /* tell the board code to enable the panel */
341 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
342 ch = &priv->ch[k];
343 board_cfg = &ch->cfg.board_cfg;
344 if (board_cfg->display_on)
345 board_cfg->display_on(board_cfg->board_data);
346 }
347
348 return 0;
349}
350
351static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
352{
353 struct sh_mobile_lcdc_chan *ch;
354 struct sh_mobile_lcdc_board_cfg *board_cfg;
355 int k;
356
357 /* tell the board code to disable the panel */
358 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
359 ch = &priv->ch[k];
360 board_cfg = &ch->cfg.board_cfg;
361 if (board_cfg->display_off)
362 board_cfg->display_off(board_cfg->board_data);
363 }
364
365 /* stop the lcdc */
366 sh_mobile_lcdc_start_stop(priv, 0);
367}
368
369static int sh_mobile_lcdc_check_interface(struct sh_mobile_lcdc_chan *ch)
370{
371 int ifm, miftyp;
372
373 switch (ch->cfg.interface_type) {
374 case RGB8: ifm = 0; miftyp = 0; break;
375 case RGB9: ifm = 0; miftyp = 4; break;
376 case RGB12A: ifm = 0; miftyp = 5; break;
377 case RGB12B: ifm = 0; miftyp = 6; break;
378 case RGB16: ifm = 0; miftyp = 7; break;
379 case RGB18: ifm = 0; miftyp = 10; break;
380 case RGB24: ifm = 0; miftyp = 11; break;
381 case SYS8A: ifm = 1; miftyp = 0; break;
382 case SYS8B: ifm = 1; miftyp = 1; break;
383 case SYS8C: ifm = 1; miftyp = 2; break;
384 case SYS8D: ifm = 1; miftyp = 3; break;
385 case SYS9: ifm = 1; miftyp = 4; break;
386 case SYS12: ifm = 1; miftyp = 5; break;
387 case SYS16A: ifm = 1; miftyp = 7; break;
388 case SYS16B: ifm = 1; miftyp = 8; break;
389 case SYS16C: ifm = 1; miftyp = 9; break;
390 case SYS18: ifm = 1; miftyp = 10; break;
391 case SYS24: ifm = 1; miftyp = 11; break;
392 default: goto bad;
393 }
394
395 /* SUBLCD only supports SYS interface */
396 if (lcdc_chan_is_sublcd(ch)) {
397 if (ifm == 0)
398 goto bad;
399 else
400 ifm = 0;
401 }
402
403 ch->ldmt1r_value = (ifm << 12) | miftyp;
404 return 0;
405 bad:
406 return -EINVAL;
407}
408
409static int sh_mobile_lcdc_setup_clocks(struct device *dev, int clock_source,
410 struct sh_mobile_lcdc_priv *priv)
411{
412 char *str;
413 int icksel;
414
415 switch (clock_source) {
416 case LCDC_CLK_BUS: str = "bus_clk"; icksel = 0; break;
417 case LCDC_CLK_PERIPHERAL: str = "peripheral_clk"; icksel = 1; break;
418 case LCDC_CLK_EXTERNAL: str = NULL; icksel = 2; break;
419 default:
420 return -EINVAL;
421 }
422
423 priv->lddckr = icksel << 16;
424
425 if (str) {
426 priv->clk = clk_get(dev, str);
427 if (IS_ERR(priv->clk)) {
428 dev_err(dev, "cannot get clock %s\n", str);
429 return PTR_ERR(priv->clk);
430 }
431
432 clk_enable(priv->clk);
433 }
434
435 return 0;
436}
437
438static int sh_mobile_lcdc_setcolreg(u_int regno,
439 u_int red, u_int green, u_int blue,
440 u_int transp, struct fb_info *info)
441{
442 u32 *palette = info->pseudo_palette;
443
444 if (regno >= PALETTE_NR)
445 return -EINVAL;
446
447 /* only FB_VISUAL_TRUECOLOR supported */
448
449 red >>= 16 - info->var.red.length;
450 green >>= 16 - info->var.green.length;
451 blue >>= 16 - info->var.blue.length;
452 transp >>= 16 - info->var.transp.length;
453
454 palette[regno] = (red << info->var.red.offset) |
455 (green << info->var.green.offset) |
456 (blue << info->var.blue.offset) |
457 (transp << info->var.transp.offset);
458
459 return 0;
460}
461
462static struct fb_fix_screeninfo sh_mobile_lcdc_fix = {
463 .id = "SH Mobile LCDC",
464 .type = FB_TYPE_PACKED_PIXELS,
465 .visual = FB_VISUAL_TRUECOLOR,
466 .accel = FB_ACCEL_NONE,
467};
468
469static struct fb_ops sh_mobile_lcdc_ops = {
470 .fb_setcolreg = sh_mobile_lcdc_setcolreg,
471 .fb_fillrect = cfb_fillrect,
472 .fb_copyarea = cfb_copyarea,
473 .fb_imageblit = cfb_imageblit,
474};
475
476static int sh_mobile_lcdc_set_bpp(struct fb_var_screeninfo *var, int bpp)
477{
478 switch (bpp) {
479 case 16: /* PKF[4:0] = 00011 - RGB 565 */
480 var->red.offset = 11;
481 var->red.length = 5;
482 var->green.offset = 5;
483 var->green.length = 6;
484 var->blue.offset = 0;
485 var->blue.length = 5;
486 var->transp.offset = 0;
487 var->transp.length = 0;
488 break;
489
490 case 32: /* PKF[4:0] = 00000 - RGB 888
491 * sh7722 pdf says 00RRGGBB but reality is GGBB00RR
492 * this may be because LDDDSR has word swap enabled..
493 */
494 var->red.offset = 0;
495 var->red.length = 8;
496 var->green.offset = 24;
497 var->green.length = 8;
498 var->blue.offset = 16;
499 var->blue.length = 8;
500 var->transp.offset = 0;
501 var->transp.length = 0;
502 break;
503 default:
504 return -EINVAL;
505 }
506 var->bits_per_pixel = bpp;
507 var->red.msb_right = 0;
508 var->green.msb_right = 0;
509 var->blue.msb_right = 0;
510 var->transp.msb_right = 0;
511 return 0;
512}
513
514static int sh_mobile_lcdc_remove(struct platform_device *pdev);
515
516static int __init sh_mobile_lcdc_probe(struct platform_device *pdev)
517{
518 struct fb_info *info;
519 struct sh_mobile_lcdc_priv *priv;
520 struct sh_mobile_lcdc_info *pdata;
521 struct sh_mobile_lcdc_chan_cfg *cfg;
522 struct resource *res;
523 int error;
524 void *buf;
525 int i, j;
526
527 if (!pdev->dev.platform_data) {
528 dev_err(&pdev->dev, "no platform data defined\n");
529 error = -EINVAL;
530 goto err0;
531 }
532
533 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
534 if (res == NULL) {
535 dev_err(&pdev->dev, "cannot find IO resource\n");
536 error = -ENOENT;
537 goto err0;
538 }
539
540 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
541 if (!priv) {
542 dev_err(&pdev->dev, "cannot allocate device data\n");
543 error = -ENOMEM;
544 goto err0;
545 }
546
547 platform_set_drvdata(pdev, priv);
548 pdata = pdev->dev.platform_data;
549
550 j = 0;
551 for (i = 0; i < ARRAY_SIZE(pdata->ch); i++) {
552 priv->ch[j].lcdc = priv;
553 memcpy(&priv->ch[j].cfg, &pdata->ch[i], sizeof(pdata->ch[i]));
554
555 error = sh_mobile_lcdc_check_interface(&priv->ch[i]);
556 if (error) {
557 dev_err(&pdev->dev, "unsupported interface type\n");
558 goto err1;
559 }
560
561 switch (pdata->ch[i].chan) {
562 case LCDC_CHAN_MAINLCD:
563 priv->ch[j].enabled = 1 << 1;
564 priv->ch[j].reg_offs = lcdc_offs_mainlcd;
565 j++;
566 break;
567 case LCDC_CHAN_SUBLCD:
568 priv->ch[j].enabled = 1 << 2;
569 priv->ch[j].reg_offs = lcdc_offs_sublcd;
570 j++;
571 break;
572 }
573 }
574
575 if (!j) {
576 dev_err(&pdev->dev, "no channels defined\n");
577 error = -EINVAL;
578 goto err1;
579 }
580
581 error = sh_mobile_lcdc_setup_clocks(&pdev->dev,
582 pdata->clock_source, priv);
583 if (error) {
584 dev_err(&pdev->dev, "unable to setup clocks\n");
585 goto err1;
586 }
587
588 priv->lddckr = pdata->lddckr;
589 priv->base = ioremap_nocache(res->start, (res->end - res->start) + 1);
590
591 for (i = 0; i < j; i++) {
592 info = &priv->ch[i].info;
593 cfg = &priv->ch[i].cfg;
594
595 info->fbops = &sh_mobile_lcdc_ops;
596 info->var.xres = info->var.xres_virtual = cfg->lcd_cfg.xres;
597 info->var.yres = info->var.yres_virtual = cfg->lcd_cfg.yres;
598 info->var.activate = FB_ACTIVATE_NOW;
599 error = sh_mobile_lcdc_set_bpp(&info->var, cfg->bpp);
600 if (error)
601 break;
602
603 info->fix = sh_mobile_lcdc_fix;
604 info->fix.line_length = cfg->lcd_cfg.xres * (cfg->bpp / 8);
605 info->fix.smem_len = info->fix.line_length * cfg->lcd_cfg.yres;
606
607 buf = dma_alloc_coherent(&pdev->dev, info->fix.smem_len,
608 &priv->ch[i].dma_handle, GFP_KERNEL);
609 if (!buf) {
610 dev_err(&pdev->dev, "unable to allocate buffer\n");
611 error = -ENOMEM;
612 break;
613 }
614
615 info->pseudo_palette = &priv->ch[i].pseudo_palette;
616 info->flags = FBINFO_FLAG_DEFAULT;
617
618 error = fb_alloc_cmap(&info->cmap, PALETTE_NR, 0);
619 if (error < 0) {
620 dev_err(&pdev->dev, "unable to allocate cmap\n");
621 dma_free_coherent(&pdev->dev, info->fix.smem_len,
622 buf, priv->ch[i].dma_handle);
623 break;
624 }
625
626 memset(buf, 0, info->fix.smem_len);
627 info->fix.smem_start = priv->ch[i].dma_handle;
628 info->screen_base = buf;
629 info->device = &pdev->dev;
630 }
631
632 if (error)
633 goto err1;
634
635 error = sh_mobile_lcdc_start(priv);
636 if (error) {
637 dev_err(&pdev->dev, "unable to start hardware\n");
638 goto err1;
639 }
640
641 for (i = 0; i < j; i++) {
642 error = register_framebuffer(&priv->ch[i].info);
643 if (error < 0)
644 goto err1;
645 }
646
647 for (i = 0; i < j; i++) {
648 info = &priv->ch[i].info;
649 dev_info(info->dev,
650 "registered %s/%s as %dx%d %dbpp.\n",
651 pdev->name,
652 (priv->ch[i].cfg.chan == LCDC_CHAN_MAINLCD) ?
653 "mainlcd" : "sublcd",
654 (int) priv->ch[i].cfg.lcd_cfg.xres,
655 (int) priv->ch[i].cfg.lcd_cfg.yres,
656 priv->ch[i].cfg.bpp);
657 }
658
659 return 0;
660 err1:
661 sh_mobile_lcdc_remove(pdev);
662 err0:
663 return error;
664}
665
666static int sh_mobile_lcdc_remove(struct platform_device *pdev)
667{
668 struct sh_mobile_lcdc_priv *priv = platform_get_drvdata(pdev);
669 struct fb_info *info;
670 int i;
671
672 for (i = 0; i < ARRAY_SIZE(priv->ch); i++)
673 if (priv->ch[i].info.dev)
674 unregister_framebuffer(&priv->ch[i].info);
675
676 sh_mobile_lcdc_stop(priv);
677
678 for (i = 0; i < ARRAY_SIZE(priv->ch); i++) {
679 info = &priv->ch[i].info;
680
681 if (!info->device)
682 continue;
683
684 dma_free_coherent(&pdev->dev, info->fix.smem_len,
685 info->screen_base, priv->ch[i].dma_handle);
686 fb_dealloc_cmap(&info->cmap);
687 }
688
689 if (priv->clk) {
690 clk_disable(priv->clk);
691 clk_put(priv->clk);
692 }
693
694 if (priv->base)
695 iounmap(priv->base);
696
697 kfree(priv);
698 return 0;
699}
700
701static struct platform_driver sh_mobile_lcdc_driver = {
702 .driver = {
703 .name = "sh_mobile_lcdc_fb",
704 .owner = THIS_MODULE,
705 },
706 .probe = sh_mobile_lcdc_probe,
707 .remove = sh_mobile_lcdc_remove,
708};
709
710static int __init sh_mobile_lcdc_init(void)
711{
712 return platform_driver_register(&sh_mobile_lcdc_driver);
713}
714
715static void __exit sh_mobile_lcdc_exit(void)
716{
717 platform_driver_unregister(&sh_mobile_lcdc_driver);
718}
719
720module_init(sh_mobile_lcdc_init);
721module_exit(sh_mobile_lcdc_exit);
722
723MODULE_DESCRIPTION("SuperH Mobile LCDC Framebuffer driver");
724MODULE_AUTHOR("Magnus Damm <damm@opensource.se>");
725MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/sis/init.h b/drivers/video/sis/init.h
index f40a680df86f..b96005c39c67 100644
--- a/drivers/video/sis/init.h
+++ b/drivers/video/sis/init.h
@@ -73,7 +73,6 @@
73#ifdef SIS_CP 73#ifdef SIS_CP
74#undef SIS_CP 74#undef SIS_CP
75#endif 75#endif
76#include <linux/version.h>
77#include <linux/types.h> 76#include <linux/types.h>
78#include <asm/io.h> 77#include <asm/io.h>
79#include <linux/fb.h> 78#include <linux/fb.h>
diff --git a/drivers/video/sis/init301.h b/drivers/video/sis/init301.h
index 7708e1e1d99e..51d99222375d 100644
--- a/drivers/video/sis/init301.h
+++ b/drivers/video/sis/init301.h
@@ -67,7 +67,6 @@
67#ifdef SIS_CP 67#ifdef SIS_CP
68#undef SIS_CP 68#undef SIS_CP
69#endif 69#endif
70#include <linux/version.h>
71#include <linux/types.h> 70#include <linux/types.h>
72#include <asm/io.h> 71#include <asm/io.h>
73#include <linux/fb.h> 72#include <linux/fb.h>
diff --git a/drivers/video/sis/initextlfb.c b/drivers/video/sis/initextlfb.c
index 47a33501549d..99c04a4855d1 100644
--- a/drivers/video/sis/initextlfb.c
+++ b/drivers/video/sis/initextlfb.c
@@ -30,7 +30,6 @@
30#include "vgatypes.h" 30#include "vgatypes.h"
31#include "vstruct.h" 31#include "vstruct.h"
32 32
33#include <linux/version.h>
34#include <linux/types.h> 33#include <linux/types.h>
35#include <linux/fb.h> 34#include <linux/fb.h>
36 35
diff --git a/drivers/video/sis/osdef.h b/drivers/video/sis/osdef.h
index c1492782cb18..6ff8f988a1a7 100644
--- a/drivers/video/sis/osdef.h
+++ b/drivers/video/sis/osdef.h
@@ -87,7 +87,6 @@
87/**********************************************************************/ 87/**********************************************************************/
88 88
89#ifdef SIS_LINUX_KERNEL 89#ifdef SIS_LINUX_KERNEL
90#include <linux/version.h>
91 90
92#ifdef CONFIG_FB_SIS_300 91#ifdef CONFIG_FB_SIS_300
93#define SIS300 92#define SIS300
diff --git a/drivers/video/sis/sis.h b/drivers/video/sis/sis.h
index a14e82211037..7c5710e3fb56 100644
--- a/drivers/video/sis/sis.h
+++ b/drivers/video/sis/sis.h
@@ -24,8 +24,6 @@
24#ifndef _SIS_H_ 24#ifndef _SIS_H_
25#define _SIS_H_ 25#define _SIS_H_
26 26
27#include <linux/version.h>
28
29#include "osdef.h" 27#include "osdef.h"
30#include <video/sisfb.h> 28#include <video/sisfb.h>
31 29
@@ -42,16 +40,6 @@
42#define SIS_NEW_CONFIG_COMPAT 40#define SIS_NEW_CONFIG_COMPAT
43#endif /* CONFIG_COMPAT */ 41#endif /* CONFIG_COMPAT */
44 42
45#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,8)
46#define SIS_IOTYPE1 void __iomem
47#define SIS_IOTYPE2 __iomem
48#define SISINITSTATIC static
49#else
50#define SIS_IOTYPE1 unsigned char
51#define SIS_IOTYPE2
52#define SISINITSTATIC
53#endif
54
55#undef SISFBDEBUG 43#undef SISFBDEBUG
56 44
57#ifdef SISFBDEBUG 45#ifdef SISFBDEBUG
@@ -505,8 +493,8 @@ struct sis_video_info {
505 493
506 unsigned long UMAsize, LFBsize; 494 unsigned long UMAsize, LFBsize;
507 495
508 SIS_IOTYPE1 *video_vbase; 496 void __iomem *video_vbase;
509 SIS_IOTYPE1 *mmio_vbase; 497 void __iomem *mmio_vbase;
510 498
511 unsigned char *bios_abase; 499 unsigned char *bios_abase;
512 500
@@ -533,8 +521,8 @@ struct sis_video_info {
533 int sisfb_nocrt2rate; 521 int sisfb_nocrt2rate;
534 522
535 u32 heapstart; /* offset */ 523 u32 heapstart; /* offset */
536 SIS_IOTYPE1 *sisfb_heap_start; /* address */ 524 void __iomem *sisfb_heap_start; /* address */
537 SIS_IOTYPE1 *sisfb_heap_end; /* address */ 525 void __iomem *sisfb_heap_end; /* address */
538 u32 sisfb_heap_size; 526 u32 sisfb_heap_size;
539 int havenoheap; 527 int havenoheap;
540 528
@@ -612,7 +600,7 @@ struct sis_video_info {
612 u8 detectedpdca; 600 u8 detectedpdca;
613 u8 detectedlcda; 601 u8 detectedlcda;
614 602
615 SIS_IOTYPE1 *hwcursor_vbase; 603 void __iomem *hwcursor_vbase;
616 604
617 int chronteltype; 605 int chronteltype;
618 int tvxpos, tvypos; 606 int tvxpos, tvypos;
diff --git a/drivers/video/sis/sis_accel.c b/drivers/video/sis/sis_accel.c
index 7addf91d2fea..ceb434c95c0d 100644
--- a/drivers/video/sis/sis_accel.c
+++ b/drivers/video/sis/sis_accel.c
@@ -28,7 +28,6 @@
28 * for more information and updates) 28 * for more information and updates)
29 */ 29 */
30 30
31#include <linux/version.h>
32#include <linux/module.h> 31#include <linux/module.h>
33#include <linux/kernel.h> 32#include <linux/kernel.h>
34#include <linux/fb.h> 33#include <linux/fb.h>
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index b9343844cd1f..346d6458cf76 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -33,7 +33,6 @@
33 * 33 *
34 */ 34 */
35 35
36#include <linux/version.h>
37#include <linux/module.h> 36#include <linux/module.h>
38#include <linux/moduleparam.h> 37#include <linux/moduleparam.h>
39#include <linux/kernel.h> 38#include <linux/kernel.h>
@@ -41,13 +40,7 @@
41#include <linux/errno.h> 40#include <linux/errno.h>
42#include <linux/string.h> 41#include <linux/string.h>
43#include <linux/mm.h> 42#include <linux/mm.h>
44
45#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
46#include <linux/tty.h>
47#else
48#include <linux/screen_info.h> 43#include <linux/screen_info.h>
49#endif
50
51#include <linux/slab.h> 44#include <linux/slab.h>
52#include <linux/fb.h> 45#include <linux/fb.h>
53#include <linux/selection.h> 46#include <linux/selection.h>
@@ -1167,11 +1160,7 @@ sisfb_set_mode(struct sis_video_info *ivideo, int clrscrn)
1167 unsigned short modeno = ivideo->mode_no; 1160 unsigned short modeno = ivideo->mode_no;
1168 1161
1169 /* >=2.6.12's fbcon clears the screen anyway */ 1162 /* >=2.6.12's fbcon clears the screen anyway */
1170#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
1171 if(!clrscrn) modeno |= 0x80;
1172#else
1173 modeno |= 0x80; 1163 modeno |= 0x80;
1174#endif
1175 1164
1176 outSISIDXREG(SISSR, IND_SIS_PASSWORD, SIS_PASSWORD); 1165 outSISIDXREG(SISSR, IND_SIS_PASSWORD, SIS_PASSWORD);
1177 1166
@@ -1436,11 +1425,8 @@ sisfb_set_par(struct fb_info *info)
1436 if((err = sisfb_do_set_var(&info->var, 1, info))) 1425 if((err = sisfb_do_set_var(&info->var, 1, info)))
1437 return err; 1426 return err;
1438 1427
1439#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
1440 sisfb_get_fix(&info->fix, info->currcon, info);
1441#else
1442 sisfb_get_fix(&info->fix, -1, info); 1428 sisfb_get_fix(&info->fix, -1, info);
1443#endif 1429
1444 return 0; 1430 return 0;
1445} 1431}
1446 1432
@@ -1676,14 +1662,8 @@ sisfb_blank(int blank, struct fb_info *info)
1676 1662
1677/* ----------- FBDev related routines for all series ---------- */ 1663/* ----------- FBDev related routines for all series ---------- */
1678 1664
1679#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
1680static int sisfb_ioctl(struct fb_info *info, unsigned int cmd, 1665static int sisfb_ioctl(struct fb_info *info, unsigned int cmd,
1681 unsigned long arg) 1666 unsigned long arg)
1682#else
1683static int sisfb_ioctl(struct inode *inode, struct file *file,
1684 unsigned int cmd, unsigned long arg,
1685 struct fb_info *info)
1686#endif
1687{ 1667{
1688 struct sis_video_info *ivideo = (struct sis_video_info *)info->par; 1668 struct sis_video_info *ivideo = (struct sis_video_info *)info->par;
1689 struct sis_memreq sismemreq; 1669 struct sis_memreq sismemreq;
@@ -3986,8 +3966,7 @@ sisfb_handle_command(struct sis_video_info *ivideo, struct sisfb_cmd *sisfb_comm
3986} 3966}
3987 3967
3988#ifndef MODULE 3968#ifndef MODULE
3989SISINITSTATIC int __init 3969static int __init sisfb_setup(char *options)
3990sisfb_setup(char *options)
3991{ 3970{
3992 char *this_opt; 3971 char *this_opt;
3993 3972
@@ -4086,9 +4065,9 @@ sisfb_setup(char *options)
4086#endif 4065#endif
4087 4066
4088static int __devinit 4067static int __devinit
4089sisfb_check_rom(SIS_IOTYPE1 *rom_base, struct sis_video_info *ivideo) 4068sisfb_check_rom(void __iomem *rom_base, struct sis_video_info *ivideo)
4090{ 4069{
4091 SIS_IOTYPE1 *rom; 4070 void __iomem *rom;
4092 int romptr; 4071 int romptr;
4093 4072
4094 if((readb(rom_base) != 0x55) || (readb(rom_base + 1) != 0xaa)) 4073 if((readb(rom_base) != 0x55) || (readb(rom_base + 1) != 0xaa))
@@ -4117,10 +4096,9 @@ static unsigned char * __devinit
4117sisfb_find_rom(struct pci_dev *pdev) 4096sisfb_find_rom(struct pci_dev *pdev)
4118{ 4097{
4119 struct sis_video_info *ivideo = pci_get_drvdata(pdev); 4098 struct sis_video_info *ivideo = pci_get_drvdata(pdev);
4120 SIS_IOTYPE1 *rom_base; 4099 void __iomem *rom_base;
4121 unsigned char *myrombase = NULL; 4100 unsigned char *myrombase = NULL;
4122 u32 temp; 4101 u32 temp;
4123#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
4124 size_t romsize; 4102 size_t romsize;
4125 4103
4126 /* First, try the official pci ROM functions (except 4104 /* First, try the official pci ROM functions (except
@@ -4151,7 +4129,6 @@ sisfb_find_rom(struct pci_dev *pdev)
4151 } 4129 }
4152 4130
4153 if(myrombase) return myrombase; 4131 if(myrombase) return myrombase;
4154#endif
4155 4132
4156 /* Otherwise do it the conventional way. */ 4133 /* Otherwise do it the conventional way. */
4157 4134
@@ -4225,7 +4202,7 @@ sisfb_post_map_vram(struct sis_video_info *ivideo, unsigned int *mapsize,
4225static int __devinit 4202static int __devinit
4226sisfb_post_300_buswidth(struct sis_video_info *ivideo) 4203sisfb_post_300_buswidth(struct sis_video_info *ivideo)
4227{ 4204{
4228 SIS_IOTYPE1 *FBAddress = ivideo->video_vbase; 4205 void __iomem *FBAddress = ivideo->video_vbase;
4229 unsigned short temp; 4206 unsigned short temp;
4230 unsigned char reg; 4207 unsigned char reg;
4231 int i, j; 4208 int i, j;
@@ -4273,7 +4250,7 @@ sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration, int buswidth
4273 int PseudoRankCapacity, int PseudoAdrPinCount, 4250 int PseudoRankCapacity, int PseudoAdrPinCount,
4274 unsigned int mapsize) 4251 unsigned int mapsize)
4275{ 4252{
4276 SIS_IOTYPE1 *FBAddr = ivideo->video_vbase; 4253 void __iomem *FBAddr = ivideo->video_vbase;
4277 unsigned short sr14; 4254 unsigned short sr14;
4278 unsigned int k, RankCapacity, PageCapacity, BankNumHigh, BankNumMid; 4255 unsigned int k, RankCapacity, PageCapacity, BankNumHigh, BankNumMid;
4279 unsigned int PhysicalAdrOtherPage, PhysicalAdrHigh, PhysicalAdrHalfPage; 4256 unsigned int PhysicalAdrOtherPage, PhysicalAdrHigh, PhysicalAdrHalfPage;
@@ -5829,7 +5806,7 @@ sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5829 ivideo->engineok = 0; 5806 ivideo->engineok = 0;
5830 5807
5831 ivideo->sisfb_was_boot_device = 0; 5808 ivideo->sisfb_was_boot_device = 0;
5832#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)) 5809
5833 if(pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW) { 5810 if(pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW) {
5834 if(ivideo->sisvga_enabled) 5811 if(ivideo->sisvga_enabled)
5835 ivideo->sisfb_was_boot_device = 1; 5812 ivideo->sisfb_was_boot_device = 1;
@@ -5840,7 +5817,6 @@ sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5840 "as the primary VGA device\n"); 5817 "as the primary VGA device\n");
5841 } 5818 }
5842 } 5819 }
5843#endif
5844 5820
5845 ivideo->sisfb_parm_mem = sisfb_parm_mem; 5821 ivideo->sisfb_parm_mem = sisfb_parm_mem;
5846 ivideo->sisfb_accel = sisfb_accel; 5822 ivideo->sisfb_accel = sisfb_accel;
@@ -6010,7 +5986,7 @@ sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6010 ivideo->modeprechange = reg & 0x7f; 5986 ivideo->modeprechange = reg & 0x7f;
6011 } else if(ivideo->sisvga_enabled) { 5987 } else if(ivideo->sisvga_enabled) {
6012#if defined(__i386__) || defined(__x86_64__) 5988#if defined(__i386__) || defined(__x86_64__)
6013 unsigned char SIS_IOTYPE2 *tt = ioremap(0x400, 0x100); 5989 unsigned char __iomem *tt = ioremap(0x400, 0x100);
6014 if(tt) { 5990 if(tt) {
6015 ivideo->modeprechange = readb(tt + 0x49); 5991 ivideo->modeprechange = readb(tt + 0x49);
6016 iounmap(tt); 5992 iounmap(tt);
@@ -6503,7 +6479,7 @@ static struct pci_driver sisfb_driver = {
6503 .remove = __devexit_p(sisfb_remove) 6479 .remove = __devexit_p(sisfb_remove)
6504}; 6480};
6505 6481
6506SISINITSTATIC int __init sisfb_init(void) 6482static int __init sisfb_init(void)
6507{ 6483{
6508#ifndef MODULE 6484#ifndef MODULE
6509 char *options = NULL; 6485 char *options = NULL;
diff --git a/drivers/video/sis/sis_main.h b/drivers/video/sis/sis_main.h
index 3e3b7fa05d6c..9540e977270e 100644
--- a/drivers/video/sis/sis_main.h
+++ b/drivers/video/sis/sis_main.h
@@ -665,11 +665,11 @@ static struct _customttable {
665 665
666/* Interface used by the world */ 666/* Interface used by the world */
667#ifndef MODULE 667#ifndef MODULE
668SISINITSTATIC int sisfb_setup(char *options); 668static int sisfb_setup(char *options);
669#endif 669#endif
670 670
671/* Interface to the low level console driver */ 671/* Interface to the low level console driver */
672SISINITSTATIC int sisfb_init(void); 672static int sisfb_init(void);
673 673
674/* fbdev routines */ 674/* fbdev routines */
675static int sisfb_get_fix(struct fb_fix_screeninfo *fix, int con, 675static int sisfb_get_fix(struct fb_fix_screeninfo *fix, int con,
diff --git a/drivers/video/sis/vgatypes.h b/drivers/video/sis/vgatypes.h
index b532fbd2b04c..81a22eaabfde 100644
--- a/drivers/video/sis/vgatypes.h
+++ b/drivers/video/sis/vgatypes.h
@@ -53,10 +53,6 @@
53#ifndef _VGATYPES_H_ 53#ifndef _VGATYPES_H_
54#define _VGATYPES_H_ 54#define _VGATYPES_H_
55 55
56#ifdef SIS_LINUX_KERNEL
57#include <linux/version.h>
58#endif
59
60#define SISIOMEMTYPE 56#define SISIOMEMTYPE
61 57
62#ifdef SIS_LINUX_KERNEL 58#ifdef SIS_LINUX_KERNEL
diff --git a/drivers/video/skeletonfb.c b/drivers/video/skeletonfb.c
index 62321458f71a..df5336561d13 100644
--- a/drivers/video/skeletonfb.c
+++ b/drivers/video/skeletonfb.c
@@ -675,13 +675,13 @@ static struct fb_ops xxxfb_ops = {
675 * Initialization 675 * Initialization
676 */ 676 */
677 677
678/* static int __init xxfb_probe (struct device *device) -- for platform devs */ 678/* static int __init xxfb_probe (struct platform_device *pdev) -- for platform devs */
679static int __devinit xxxfb_probe(struct pci_dev *dev, 679static int __devinit xxxfb_probe(struct pci_dev *dev,
680 const struct pci_device_id *ent) 680 const struct pci_device_id *ent)
681{ 681{
682 struct fb_info *info; 682 struct fb_info *info;
683 struct xxx_par *par; 683 struct xxx_par *par;
684 struct device* device = &dev->dev; /* for pci drivers */ 684 struct device *device = &dev->dev; /* or &pdev->dev */
685 int cmap_len, retval; 685 int cmap_len, retval;
686 686
687 /* 687 /*
@@ -824,18 +824,18 @@ static int __devinit xxxfb_probe(struct pci_dev *dev,
824 return -EINVAL; 824 return -EINVAL;
825 printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, 825 printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
826 info->fix.id); 826 info->fix.id);
827 pci_set_drvdata(dev, info); /* or dev_set_drvdata(device, info) */ 827 pci_set_drvdata(dev, info); /* or platform_set_drvdata(pdev, info) */
828 return 0; 828 return 0;
829} 829}
830 830
831 /* 831 /*
832 * Cleanup 832 * Cleanup
833 */ 833 */
834/* static void __devexit xxxfb_remove(struct device *device) */ 834/* static void __devexit xxxfb_remove(struct platform_device *pdev) */
835static void __devexit xxxfb_remove(struct pci_dev *dev) 835static void __devexit xxxfb_remove(struct pci_dev *dev)
836{ 836{
837 struct fb_info *info = pci_get_drvdata(dev); 837 struct fb_info *info = pci_get_drvdata(dev);
838 /* or dev_get_drvdata(device); */ 838 /* or platform_get_drvdata(pdev); */
839 839
840 if (info) { 840 if (info) {
841 unregister_framebuffer(info); 841 unregister_framebuffer(info);
@@ -961,18 +961,17 @@ static int xxxfb_resume(struct platform_dev *dev)
961#define xxxfb_resume NULL 961#define xxxfb_resume NULL
962#endif /* CONFIG_PM */ 962#endif /* CONFIG_PM */
963 963
964static struct device_driver xxxfb_driver = { 964static struct platform_device_driver xxxfb_driver = {
965 .name = "xxxfb",
966 .bus = &platform_bus_type,
967 .probe = xxxfb_probe, 965 .probe = xxxfb_probe,
968 .remove = xxxfb_remove, 966 .remove = xxxfb_remove,
969 .suspend = xxxfb_suspend, /* optional but recommended */ 967 .suspend = xxxfb_suspend, /* optional but recommended */
970 .resume = xxxfb_resume, /* optional but recommended */ 968 .resume = xxxfb_resume, /* optional but recommended */
969 .driver = {
970 .name = "xxxfb",
971 },
971}; 972};
972 973
973static struct platform_device xxxfb_device = { 974static struct platform_device *xxxfb_device;
974 .name = "xxxfb",
975};
976 975
977#ifndef MODULE 976#ifndef MODULE
978 /* 977 /*
@@ -1002,12 +1001,16 @@ static int __init xxxfb_init(void)
1002 return -ENODEV; 1001 return -ENODEV;
1003 xxxfb_setup(option); 1002 xxxfb_setup(option);
1004#endif 1003#endif
1005 ret = driver_register(&xxxfb_driver); 1004 ret = platform_driver_register(&xxxfb_driver);
1006 1005
1007 if (!ret) { 1006 if (!ret) {
1008 ret = platform_device_register(&xxxfb_device); 1007 xxxfb_device = platform_device_register_simple("xxxfb", 0,
1009 if (ret) 1008 NULL, 0);
1010 driver_unregister(&xxxfb_driver); 1009
1010 if (IS_ERR(xxxfb_device)) {
1011 platform_driver_unregister(&xxxfb_driver);
1012 ret = PTR_ERR(xxxfb_device);
1013 }
1011 } 1014 }
1012 1015
1013 return ret; 1016 return ret;
@@ -1015,8 +1018,8 @@ static int __init xxxfb_init(void)
1015 1018
1016static void __exit xxxfb_exit(void) 1019static void __exit xxxfb_exit(void)
1017{ 1020{
1018 platform_device_unregister(&xxxfb_device); 1021 platform_device_unregister(xxxfb_device);
1019 driver_unregister(&xxxfb_driver); 1022 platform_driver_unregister(&xxxfb_driver);
1020} 1023}
1021#endif /* CONFIG_PCI */ 1024#endif /* CONFIG_PCI */
1022 1025
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index 15d4a768b1f6..f94ae84a58cd 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -48,10 +48,15 @@ enum sm501_controller {
48 HEAD_PANEL = 1, 48 HEAD_PANEL = 1,
49}; 49};
50 50
51/* SM501 memory address */ 51/* SM501 memory address.
52 *
53 * This structure is used to track memory usage within the SM501 framebuffer
54 * allocation. The sm_addr field is stored as an offset as it is often used
55 * against both the physical and mapped addresses.
56 */
52struct sm501_mem { 57struct sm501_mem {
53 unsigned long size; 58 unsigned long size;
54 unsigned long sm_addr; 59 unsigned long sm_addr; /* offset from base of sm501 fb. */
55 void __iomem *k_addr; 60 void __iomem *k_addr;
56}; 61};
57 62
@@ -142,31 +147,68 @@ static inline void sm501fb_sync_regs(struct sm501fb_info *info)
142static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem, 147static int sm501_alloc_mem(struct sm501fb_info *inf, struct sm501_mem *mem,
143 unsigned int why, size_t size) 148 unsigned int why, size_t size)
144{ 149{
145 unsigned int ptr = 0; 150 struct sm501fb_par *par;
151 struct fb_info *fbi;
152 unsigned int ptr;
153 unsigned int end;
146 154
147 switch (why) { 155 switch (why) {
148 case SM501_MEMF_CURSOR: 156 case SM501_MEMF_CURSOR:
149 ptr = inf->fbmem_len - size; 157 ptr = inf->fbmem_len - size;
150 inf->fbmem_len = ptr; 158 inf->fbmem_len = ptr; /* adjust available memory. */
151 break; 159 break;
152 160
153 case SM501_MEMF_PANEL: 161 case SM501_MEMF_PANEL:
154 ptr = inf->fbmem_len - size; 162 ptr = inf->fbmem_len - size;
155 if (ptr < inf->fb[0]->fix.smem_len) 163 fbi = inf->fb[HEAD_CRT];
164
165 /* round down, some programs such as directfb do not draw
166 * 0,0 correctly unless the start is aligned to a page start.
167 */
168
169 if (ptr > 0)
170 ptr &= ~(PAGE_SIZE - 1);
171
172 if (fbi && ptr < fbi->fix.smem_len)
173 return -ENOMEM;
174
175 if (ptr < 0)
156 return -ENOMEM; 176 return -ENOMEM;
157 177
158 break; 178 break;
159 179
160 case SM501_MEMF_CRT: 180 case SM501_MEMF_CRT:
161 ptr = 0; 181 ptr = 0;
182
183 /* check to see if we have panel memory allocated
184 * which would put an limit on available memory. */
185
186 fbi = inf->fb[HEAD_PANEL];
187 if (fbi) {
188 par = fbi->par;
189 end = par->screen.k_addr ? par->screen.sm_addr : inf->fbmem_len;
190 } else
191 end = inf->fbmem_len;
192
193 if ((ptr + size) > end)
194 return -ENOMEM;
195
162 break; 196 break;
163 197
164 case SM501_MEMF_ACCEL: 198 case SM501_MEMF_ACCEL:
165 ptr = inf->fb[0]->fix.smem_len; 199 fbi = inf->fb[HEAD_CRT];
200 ptr = fbi ? fbi->fix.smem_len : 0;
201
202 fbi = inf->fb[HEAD_PANEL];
203 if (fbi) {
204 par = fbi->par;
205 end = par->screen.sm_addr;
206 } else
207 end = inf->fbmem_len;
166 208
167 if ((ptr + size) > 209 if ((ptr + size) > end)
168 (inf->fb[1]->fix.smem_start - inf->fbmem_res->start))
169 return -ENOMEM; 210 return -ENOMEM;
211
170 break; 212 break;
171 213
172 default: 214 default:
@@ -663,15 +705,25 @@ static void sm501fb_panel_power(struct sm501fb_info *fbi, int to)
663 sm501fb_sync_regs(fbi); 705 sm501fb_sync_regs(fbi);
664 mdelay(10); 706 mdelay(10);
665 707
708 /* VBIASEN */
709
666 if (!(pd->flags & SM501FB_FLAG_PANEL_NO_VBIASEN)) { 710 if (!(pd->flags & SM501FB_FLAG_PANEL_NO_VBIASEN)) {
667 control |= SM501_DC_PANEL_CONTROL_BIAS; /* VBIASEN */ 711 if (pd->flags & SM501FB_FLAG_PANEL_INV_VBIASEN)
712 control &= ~SM501_DC_PANEL_CONTROL_BIAS;
713 else
714 control |= SM501_DC_PANEL_CONTROL_BIAS;
715
668 writel(control, ctrl_reg); 716 writel(control, ctrl_reg);
669 sm501fb_sync_regs(fbi); 717 sm501fb_sync_regs(fbi);
670 mdelay(10); 718 mdelay(10);
671 } 719 }
672 720
673 if (!(pd->flags & SM501FB_FLAG_PANEL_NO_FPEN)) { 721 if (!(pd->flags & SM501FB_FLAG_PANEL_NO_FPEN)) {
674 control |= SM501_DC_PANEL_CONTROL_FPEN; 722 if (pd->flags & SM501FB_FLAG_PANEL_INV_FPEN)
723 control &= ~SM501_DC_PANEL_CONTROL_FPEN;
724 else
725 control |= SM501_DC_PANEL_CONTROL_FPEN;
726
675 writel(control, ctrl_reg); 727 writel(control, ctrl_reg);
676 sm501fb_sync_regs(fbi); 728 sm501fb_sync_regs(fbi);
677 mdelay(10); 729 mdelay(10);
@@ -679,14 +731,22 @@ static void sm501fb_panel_power(struct sm501fb_info *fbi, int to)
679 } else if (!to && (control & SM501_DC_PANEL_CONTROL_VDD) != 0) { 731 } else if (!to && (control & SM501_DC_PANEL_CONTROL_VDD) != 0) {
680 /* disable panel power */ 732 /* disable panel power */
681 if (!(pd->flags & SM501FB_FLAG_PANEL_NO_FPEN)) { 733 if (!(pd->flags & SM501FB_FLAG_PANEL_NO_FPEN)) {
682 control &= ~SM501_DC_PANEL_CONTROL_FPEN; 734 if (pd->flags & SM501FB_FLAG_PANEL_INV_FPEN)
735 control |= SM501_DC_PANEL_CONTROL_FPEN;
736 else
737 control &= ~SM501_DC_PANEL_CONTROL_FPEN;
738
683 writel(control, ctrl_reg); 739 writel(control, ctrl_reg);
684 sm501fb_sync_regs(fbi); 740 sm501fb_sync_regs(fbi);
685 mdelay(10); 741 mdelay(10);
686 } 742 }
687 743
688 if (!(pd->flags & SM501FB_FLAG_PANEL_NO_VBIASEN)) { 744 if (!(pd->flags & SM501FB_FLAG_PANEL_NO_VBIASEN)) {
689 control &= ~SM501_DC_PANEL_CONTROL_BIAS; 745 if (pd->flags & SM501FB_FLAG_PANEL_INV_VBIASEN)
746 control |= SM501_DC_PANEL_CONTROL_BIAS;
747 else
748 control &= ~SM501_DC_PANEL_CONTROL_BIAS;
749
690 writel(control, ctrl_reg); 750 writel(control, ctrl_reg);
691 sm501fb_sync_regs(fbi); 751 sm501fb_sync_regs(fbi);
692 mdelay(10); 752 mdelay(10);
@@ -1210,39 +1270,6 @@ static struct fb_ops sm501fb_ops_pnl = {
1210 .fb_imageblit = cfb_imageblit, 1270 .fb_imageblit = cfb_imageblit,
1211}; 1271};
1212 1272
1213/* sm501fb_info_alloc
1214 *
1215 * creates and initialises an sm501fb_info structure
1216*/
1217
1218static struct sm501fb_info *sm501fb_info_alloc(struct fb_info *fbinfo_crt,
1219 struct fb_info *fbinfo_pnl)
1220{
1221 struct sm501fb_info *info;
1222 struct sm501fb_par *par;
1223
1224 info = kzalloc(sizeof(struct sm501fb_info), GFP_KERNEL);
1225 if (info) {
1226 /* set the references back */
1227
1228 par = fbinfo_crt->par;
1229 par->info = info;
1230 par->head = HEAD_CRT;
1231 fbinfo_crt->pseudo_palette = &par->pseudo_palette;
1232
1233 par = fbinfo_pnl->par;
1234 par->info = info;
1235 par->head = HEAD_PANEL;
1236 fbinfo_pnl->pseudo_palette = &par->pseudo_palette;
1237
1238 /* store the two fbs into our info */
1239 info->fb[HEAD_CRT] = fbinfo_crt;
1240 info->fb[HEAD_PANEL] = fbinfo_pnl;
1241 }
1242
1243 return info;
1244}
1245
1246/* sm501_init_cursor 1273/* sm501_init_cursor
1247 * 1274 *
1248 * initialise hw cursor parameters 1275 * initialise hw cursor parameters
@@ -1250,10 +1277,16 @@ static struct sm501fb_info *sm501fb_info_alloc(struct fb_info *fbinfo_crt,
1250 1277
1251static int sm501_init_cursor(struct fb_info *fbi, unsigned int reg_base) 1278static int sm501_init_cursor(struct fb_info *fbi, unsigned int reg_base)
1252{ 1279{
1253 struct sm501fb_par *par = fbi->par; 1280 struct sm501fb_par *par;
1254 struct sm501fb_info *info = par->info; 1281 struct sm501fb_info *info;
1255 int ret; 1282 int ret;
1256 1283
1284 if (fbi == NULL)
1285 return 0;
1286
1287 par = fbi->par;
1288 info = par->info;
1289
1257 par->cursor_regs = info->regs + reg_base; 1290 par->cursor_regs = info->regs + reg_base;
1258 1291
1259 ret = sm501_alloc_mem(info, &par->cursor, SM501_MEMF_CURSOR, 1024); 1292 ret = sm501_alloc_mem(info, &par->cursor, SM501_MEMF_CURSOR, 1024);
@@ -1281,13 +1314,10 @@ static int sm501fb_start(struct sm501fb_info *info,
1281 struct platform_device *pdev) 1314 struct platform_device *pdev)
1282{ 1315{
1283 struct resource *res; 1316 struct resource *res;
1284 struct device *dev; 1317 struct device *dev = &pdev->dev;
1285 int k; 1318 int k;
1286 int ret; 1319 int ret;
1287 1320
1288 info->dev = dev = &pdev->dev;
1289 platform_set_drvdata(pdev, info);
1290
1291 info->irq = ret = platform_get_irq(pdev, 0); 1321 info->irq = ret = platform_get_irq(pdev, 0);
1292 if (ret < 0) { 1322 if (ret < 0) {
1293 /* we currently do not use the IRQ */ 1323 /* we currently do not use the IRQ */
@@ -1390,11 +1420,6 @@ static void sm501fb_stop(struct sm501fb_info *info)
1390 kfree(info->regs_res); 1420 kfree(info->regs_res);
1391} 1421}
1392 1422
1393static void sm501fb_info_release(struct sm501fb_info *info)
1394{
1395 kfree(info);
1396}
1397
1398static int sm501fb_init_fb(struct fb_info *fb, 1423static int sm501fb_init_fb(struct fb_info *fb,
1399 enum sm501_controller head, 1424 enum sm501_controller head,
1400 const char *fbname) 1425 const char *fbname)
@@ -1539,36 +1564,93 @@ static struct sm501_platdata_fb sm501fb_def_pdata = {
1539static char driver_name_crt[] = "sm501fb-crt"; 1564static char driver_name_crt[] = "sm501fb-crt";
1540static char driver_name_pnl[] = "sm501fb-panel"; 1565static char driver_name_pnl[] = "sm501fb-panel";
1541 1566
1542static int __init sm501fb_probe(struct platform_device *pdev) 1567static int __devinit sm501fb_probe_one(struct sm501fb_info *info,
1568 enum sm501_controller head)
1543{ 1569{
1544 struct sm501fb_info *info; 1570 unsigned char *name = (head == HEAD_CRT) ? "crt" : "panel";
1545 struct device *dev = &pdev->dev; 1571 struct sm501_platdata_fbsub *pd;
1546 struct fb_info *fbinfo_crt; 1572 struct sm501fb_par *par;
1547 struct fb_info *fbinfo_pnl; 1573 struct fb_info *fbi;
1548 int ret;
1549 1574
1550 /* allocate our framebuffers */ 1575 pd = (head == HEAD_CRT) ? info->pdata->fb_crt : info->pdata->fb_pnl;
1576
1577 /* Do not initialise if we've not been given any platform data */
1578 if (pd == NULL) {
1579 dev_info(info->dev, "no data for fb %s (disabled)\n", name);
1580 return 0;
1581 }
1551 1582
1552 fbinfo_crt = framebuffer_alloc(sizeof(struct sm501fb_par), dev); 1583 fbi = framebuffer_alloc(sizeof(struct sm501fb_par), info->dev);
1553 if (fbinfo_crt == NULL) { 1584 if (fbi == NULL) {
1554 dev_err(dev, "cannot allocate crt framebuffer\n"); 1585 dev_err(info->dev, "cannot allocate %s framebuffer\n", name);
1555 return -ENOMEM; 1586 return -ENOMEM;
1556 } 1587 }
1557 1588
1558 fbinfo_pnl = framebuffer_alloc(sizeof(struct sm501fb_par), dev); 1589 par = fbi->par;
1559 if (fbinfo_pnl == NULL) { 1590 par->info = info;
1560 dev_err(dev, "cannot allocate panel framebuffer\n"); 1591 par->head = head;
1561 ret = -ENOMEM; 1592 fbi->pseudo_palette = &par->pseudo_palette;
1562 goto fbinfo_crt_alloc_fail; 1593
1594 info->fb[head] = fbi;
1595
1596 return 0;
1597}
1598
1599/* Free up anything allocated by sm501fb_init_fb */
1600
1601static void sm501_free_init_fb(struct sm501fb_info *info,
1602 enum sm501_controller head)
1603{
1604 struct fb_info *fbi = info->fb[head];
1605
1606 fb_dealloc_cmap(&fbi->cmap);
1607}
1608
1609static int __devinit sm501fb_start_one(struct sm501fb_info *info,
1610 enum sm501_controller head,
1611 const char *drvname)
1612{
1613 struct fb_info *fbi = info->fb[head];
1614 int ret;
1615
1616 if (!fbi)
1617 return 0;
1618
1619 ret = sm501fb_init_fb(info->fb[head], head, drvname);
1620 if (ret) {
1621 dev_err(info->dev, "cannot initialise fb %s\n", drvname);
1622 return ret;
1623 }
1624
1625 ret = register_framebuffer(info->fb[head]);
1626 if (ret) {
1627 dev_err(info->dev, "failed to register fb %s\n", drvname);
1628 sm501_free_init_fb(info, head);
1629 return ret;
1563 } 1630 }
1564 1631
1565 info = sm501fb_info_alloc(fbinfo_crt, fbinfo_pnl); 1632 dev_info(info->dev, "fb%d: %s frame buffer\n", fbi->node, fbi->fix.id);
1566 if (info == NULL) { 1633
1567 dev_err(dev, "cannot allocate par\n"); 1634 return 0;
1568 ret = -ENOMEM; 1635}
1569 goto sm501fb_alloc_fail; 1636
1637static int __devinit sm501fb_probe(struct platform_device *pdev)
1638{
1639 struct sm501fb_info *info;
1640 struct device *dev = &pdev->dev;
1641 int ret;
1642
1643 /* allocate our framebuffers */
1644
1645 info = kzalloc(sizeof(struct sm501fb_info), GFP_KERNEL);
1646 if (!info) {
1647 dev_err(dev, "failed to allocate state\n");
1648 return -ENOMEM;
1570 } 1649 }
1571 1650
1651 info->dev = dev = &pdev->dev;
1652 platform_set_drvdata(pdev, info);
1653
1572 if (dev->parent->platform_data) { 1654 if (dev->parent->platform_data) {
1573 struct sm501_platdata *pd = dev->parent->platform_data; 1655 struct sm501_platdata *pd = dev->parent->platform_data;
1574 info->pdata = pd->fb; 1656 info->pdata = pd->fb;
@@ -1579,90 +1661,88 @@ static int __init sm501fb_probe(struct platform_device *pdev)
1579 info->pdata = &sm501fb_def_pdata; 1661 info->pdata = &sm501fb_def_pdata;
1580 } 1662 }
1581 1663
1582 /* start the framebuffers */ 1664 /* probe for the presence of each panel */
1583 1665
1584 ret = sm501fb_start(info, pdev); 1666 ret = sm501fb_probe_one(info, HEAD_CRT);
1585 if (ret) { 1667 if (ret < 0) {
1586 dev_err(dev, "cannot initialise SM501\n"); 1668 dev_err(dev, "failed to probe CRT\n");
1587 goto sm501fb_start_fail; 1669 goto err_alloc;
1588 } 1670 }
1589 1671
1590 /* CRT framebuffer setup */ 1672 ret = sm501fb_probe_one(info, HEAD_PANEL);
1673 if (ret < 0) {
1674 dev_err(dev, "failed to probe PANEL\n");
1675 goto err_probed_crt;
1676 }
1591 1677
1592 ret = sm501fb_init_fb(fbinfo_crt, HEAD_CRT, driver_name_crt); 1678 if (info->fb[HEAD_PANEL] == NULL &&
1593 if (ret) { 1679 info->fb[HEAD_CRT] == NULL) {
1594 dev_err(dev, "cannot initialise CRT fb\n"); 1680 dev_err(dev, "no framebuffers found\n");
1595 goto sm501fb_start_fail; 1681 goto err_alloc;
1596 } 1682 }
1597 1683
1598 /* Panel framebuffer setup */ 1684 /* get the resources for both of the framebuffers */
1599 1685
1600 ret = sm501fb_init_fb(fbinfo_pnl, HEAD_PANEL, driver_name_pnl); 1686 ret = sm501fb_start(info, pdev);
1601 if (ret) { 1687 if (ret) {
1602 dev_err(dev, "cannot initialise Panel fb\n"); 1688 dev_err(dev, "cannot initialise SM501\n");
1603 goto sm501fb_start_fail; 1689 goto err_probed_panel;
1604 } 1690 }
1605 1691
1606 /* register framebuffers */ 1692 ret = sm501fb_start_one(info, HEAD_CRT, driver_name_crt);
1607 1693 if (ret) {
1608 ret = register_framebuffer(fbinfo_crt); 1694 dev_err(dev, "failed to start CRT\n");
1609 if (ret < 0) { 1695 goto err_started;
1610 dev_err(dev, "failed to register CRT fb (%d)\n", ret);
1611 goto register_crt_fail;
1612 } 1696 }
1613 1697
1614 ret = register_framebuffer(fbinfo_pnl); 1698 ret = sm501fb_start_one(info, HEAD_PANEL, driver_name_pnl);
1615 if (ret < 0) { 1699 if (ret) {
1616 dev_err(dev, "failed to register panel fb (%d)\n", ret); 1700 dev_err(dev, "failed to start Panel\n");
1617 goto register_pnl_fail; 1701 goto err_started_crt;
1618 } 1702 }
1619 1703
1620 dev_info(dev, "fb%d: %s frame buffer device\n",
1621 fbinfo_crt->node, fbinfo_crt->fix.id);
1622
1623 dev_info(dev, "fb%d: %s frame buffer device\n",
1624 fbinfo_pnl->node, fbinfo_pnl->fix.id);
1625
1626 /* create device files */ 1704 /* create device files */
1627 1705
1628 ret = device_create_file(dev, &dev_attr_crt_src); 1706 ret = device_create_file(dev, &dev_attr_crt_src);
1629 if (ret) 1707 if (ret)
1630 goto crtsrc_fail; 1708 goto err_started_panel;
1631 1709
1632 ret = device_create_file(dev, &dev_attr_fbregs_pnl); 1710 ret = device_create_file(dev, &dev_attr_fbregs_pnl);
1633 if (ret) 1711 if (ret)
1634 goto fbregs_pnl_fail; 1712 goto err_attached_crtsrc_file;
1635 1713
1636 ret = device_create_file(dev, &dev_attr_fbregs_crt); 1714 ret = device_create_file(dev, &dev_attr_fbregs_crt);
1637 if (ret) 1715 if (ret)
1638 goto fbregs_crt_fail; 1716 goto err_attached_pnlregs_file;
1639 1717
1640 /* we registered, return ok */ 1718 /* we registered, return ok */
1641 return 0; 1719 return 0;
1642 1720
1643 fbregs_crt_fail: 1721err_attached_pnlregs_file:
1644 device_remove_file(dev, &dev_attr_fbregs_pnl); 1722 device_remove_file(dev, &dev_attr_fbregs_pnl);
1645 1723
1646 fbregs_pnl_fail: 1724err_attached_crtsrc_file:
1647 device_remove_file(dev, &dev_attr_crt_src); 1725 device_remove_file(dev, &dev_attr_crt_src);
1648 1726
1649 crtsrc_fail: 1727err_started_panel:
1650 unregister_framebuffer(fbinfo_pnl); 1728 unregister_framebuffer(info->fb[HEAD_PANEL]);
1729 sm501_free_init_fb(info, HEAD_PANEL);
1651 1730
1652 register_pnl_fail: 1731err_started_crt:
1653 unregister_framebuffer(fbinfo_crt); 1732 unregister_framebuffer(info->fb[HEAD_CRT]);
1733 sm501_free_init_fb(info, HEAD_CRT);
1654 1734
1655 register_crt_fail: 1735err_started:
1656 sm501fb_stop(info); 1736 sm501fb_stop(info);
1657 1737
1658 sm501fb_start_fail: 1738err_probed_panel:
1659 sm501fb_info_release(info); 1739 framebuffer_release(info->fb[HEAD_PANEL]);
1660 1740
1661 sm501fb_alloc_fail: 1741err_probed_crt:
1662 framebuffer_release(fbinfo_pnl); 1742 framebuffer_release(info->fb[HEAD_CRT]);
1663 1743
1664 fbinfo_crt_alloc_fail: 1744err_alloc:
1665 framebuffer_release(fbinfo_crt); 1745 kfree(info);
1666 1746
1667 return ret; 1747 return ret;
1668} 1748}
@@ -1681,11 +1761,14 @@ static int sm501fb_remove(struct platform_device *pdev)
1681 device_remove_file(&pdev->dev, &dev_attr_fbregs_pnl); 1761 device_remove_file(&pdev->dev, &dev_attr_fbregs_pnl);
1682 device_remove_file(&pdev->dev, &dev_attr_crt_src); 1762 device_remove_file(&pdev->dev, &dev_attr_crt_src);
1683 1763
1764 sm501_free_init_fb(info, HEAD_CRT);
1765 sm501_free_init_fb(info, HEAD_PANEL);
1766
1684 unregister_framebuffer(fbinfo_crt); 1767 unregister_framebuffer(fbinfo_crt);
1685 unregister_framebuffer(fbinfo_pnl); 1768 unregister_framebuffer(fbinfo_pnl);
1686 1769
1687 sm501fb_stop(info); 1770 sm501fb_stop(info);
1688 sm501fb_info_release(info); 1771 kfree(info);
1689 1772
1690 framebuffer_release(fbinfo_pnl); 1773 framebuffer_release(fbinfo_pnl);
1691 framebuffer_release(fbinfo_crt); 1774 framebuffer_release(fbinfo_crt);
diff --git a/drivers/video/tdfxfb.c b/drivers/video/tdfxfb.c
index ea9f19d25597..77aafcfae037 100644
--- a/drivers/video/tdfxfb.c
+++ b/drivers/video/tdfxfb.c
@@ -836,16 +836,12 @@ static int tdfxfb_pan_display(struct fb_var_screeninfo *var,
836 struct tdfx_par *par = info->par; 836 struct tdfx_par *par = info->par;
837 u32 addr = var->yoffset * info->fix.line_length; 837 u32 addr = var->yoffset * info->fix.line_length;
838 838
839 if (nopan || var->xoffset || (var->yoffset > var->yres_virtual)) 839 if (nopan || var->xoffset)
840 return -EINVAL;
841 if ((var->yoffset + var->yres > var->yres_virtual && nowrap))
842 return -EINVAL; 840 return -EINVAL;
843 841
844 banshee_make_room(par, 1); 842 banshee_make_room(par, 1);
845 tdfx_outl(par, VIDDESKSTART, addr); 843 tdfx_outl(par, VIDDESKSTART, addr);
846 844
847 info->var.xoffset = var->xoffset;
848 info->var.yoffset = var->yoffset;
849 return 0; 845 return 0;
850} 846}
851 847
@@ -1426,6 +1422,8 @@ MODULE_LICENSE("GPL");
1426module_param(hwcursor, int, 0644); 1422module_param(hwcursor, int, 0644);
1427MODULE_PARM_DESC(hwcursor, "Enable hardware cursor " 1423MODULE_PARM_DESC(hwcursor, "Enable hardware cursor "
1428 "(1=enable, 0=disable, default=1)"); 1424 "(1=enable, 0=disable, default=1)");
1425module_param(mode_option, charp, 0);
1426MODULE_PARM_DESC(mode_option, "Initial video mode e.g. '648x480-8@60'");
1429#ifdef CONFIG_MTRR 1427#ifdef CONFIG_MTRR
1430module_param(nomtrr, bool, 0); 1428module_param(nomtrr, bool, 0);
1431MODULE_PARM_DESC(nomtrr, "Disable MTRR support (default: enabled)"); 1429MODULE_PARM_DESC(nomtrr, "Disable MTRR support (default: enabled)");
diff --git a/drivers/video/tridentfb.c b/drivers/video/tridentfb.c
index beefab2992c0..479b2e79ad68 100644
--- a/drivers/video/tridentfb.c
+++ b/drivers/video/tridentfb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Frame buffer driver for Trident Blade and Image series 2 * Frame buffer driver for Trident TGUI, Blade and Image series
3 * 3 *
4 * Copyright 2001, 2002 - Jani Monoses <jani@iv.ro> 4 * Copyright 2001, 2002 - Jani Monoses <jani@iv.ro>
5 * 5 *
@@ -13,7 +13,6 @@
13 * code, suggestions 13 * code, suggestions
14 * TODO: 14 * TODO:
15 * timing value tweaking so it looks good on every monitor in every mode 15 * timing value tweaking so it looks good on every monitor in every mode
16 * TGUI acceleration
17 */ 16 */
18 17
19#include <linux/module.h> 18#include <linux/module.h>
@@ -22,25 +21,26 @@
22#include <linux/pci.h> 21#include <linux/pci.h>
23 22
24#include <linux/delay.h> 23#include <linux/delay.h>
24#include <video/vga.h>
25#include <video/trident.h> 25#include <video/trident.h>
26 26
27#define VERSION "0.7.8-NEWAPI"
28
29struct tridentfb_par { 27struct tridentfb_par {
30 void __iomem *io_virt; /* iospace virtual memory address */ 28 void __iomem *io_virt; /* iospace virtual memory address */
29 u32 pseudo_pal[16];
30 int chip_id;
31 int flatpanel;
32 void (*init_accel) (struct tridentfb_par *, int, int);
33 void (*wait_engine) (struct tridentfb_par *);
34 void (*fill_rect)
35 (struct tridentfb_par *par, u32, u32, u32, u32, u32, u32);
36 void (*copy_rect)
37 (struct tridentfb_par *par, u32, u32, u32, u32, u32, u32);
38 void (*image_blit)
39 (struct tridentfb_par *par, const char*,
40 u32, u32, u32, u32, u32, u32);
41 unsigned char eng_oper; /* engine operation... */
31}; 42};
32 43
33static unsigned char eng_oper; /* engine operation... */
34static struct fb_ops tridentfb_ops;
35
36static struct tridentfb_par default_par;
37
38/* FIXME:kmalloc these 3 instead */
39static struct fb_info fb_info;
40static u32 pseudo_pal[16];
41
42static struct fb_var_screeninfo default_var;
43
44static struct fb_fix_screeninfo tridentfb_fix = { 44static struct fb_fix_screeninfo tridentfb_fix = {
45 .id = "Trident", 45 .id = "Trident",
46 .type = FB_TYPE_PACKED_PIXELS, 46 .type = FB_TYPE_PACKED_PIXELS,
@@ -49,27 +49,22 @@ static struct fb_fix_screeninfo tridentfb_fix = {
49 .accel = FB_ACCEL_NONE, 49 .accel = FB_ACCEL_NONE,
50}; 50};
51 51
52static int chip_id;
53
54static int defaultaccel;
55static int displaytype;
56
57/* defaults which are normally overriden by user values */ 52/* defaults which are normally overriden by user values */
58 53
59/* video mode */ 54/* video mode */
60static char *mode_option __devinitdata = "640x480"; 55static char *mode_option __devinitdata = "640x480-8@60";
61static int bpp = 8; 56static int bpp __devinitdata = 8;
62 57
63static int noaccel; 58static int noaccel __devinitdata;
64 59
65static int center; 60static int center;
66static int stretch; 61static int stretch;
67 62
68static int fp; 63static int fp __devinitdata;
69static int crt; 64static int crt __devinitdata;
70 65
71static int memsize; 66static int memsize __devinitdata;
72static int memdiff; 67static int memdiff __devinitdata;
73static int nativex; 68static int nativex;
74 69
75module_param(mode_option, charp, 0); 70module_param(mode_option, charp, 0);
@@ -84,25 +79,53 @@ module_param(memsize, int, 0);
84module_param(memdiff, int, 0); 79module_param(memdiff, int, 0);
85module_param(nativex, int, 0); 80module_param(nativex, int, 0);
86module_param(fp, int, 0); 81module_param(fp, int, 0);
82MODULE_PARM_DESC(fp, "Define if flatpanel is connected");
87module_param(crt, int, 0); 83module_param(crt, int, 0);
84MODULE_PARM_DESC(crt, "Define if CRT is connected");
85
86static inline int is_oldclock(int id)
87{
88 return (id == TGUI9440) ||
89 (id == TGUI9660) ||
90 (id == CYBER9320);
91}
92
93static inline int is_oldprotect(int id)
94{
95 return is_oldclock(id) ||
96 (id == PROVIDIA9685) ||
97 (id == CYBER9382) ||
98 (id == CYBER9385);
99}
100
101static inline int is_blade(int id)
102{
103 return (id == BLADE3D) ||
104 (id == CYBERBLADEE4) ||
105 (id == CYBERBLADEi7) ||
106 (id == CYBERBLADEi7D) ||
107 (id == CYBERBLADEi1) ||
108 (id == CYBERBLADEi1D) ||
109 (id == CYBERBLADEAi1) ||
110 (id == CYBERBLADEAi1D);
111}
88 112
89static int chip3D; 113static inline int is_xp(int id)
90static int chipcyber; 114{
115 return (id == CYBERBLADEXPAi1) ||
116 (id == CYBERBLADEXPm8) ||
117 (id == CYBERBLADEXPm16);
118}
91 119
92static int is3Dchip(int id) 120static inline int is3Dchip(int id)
93{ 121{
94 return ((id == BLADE3D) || (id == CYBERBLADEE4) || 122 return is_blade(id) || is_xp(id) ||
95 (id == CYBERBLADEi7) || (id == CYBERBLADEi7D) ||
96 (id == CYBER9397) || (id == CYBER9397DVD) || 123 (id == CYBER9397) || (id == CYBER9397DVD) ||
97 (id == CYBER9520) || (id == CYBER9525DVD) || 124 (id == CYBER9520) || (id == CYBER9525DVD) ||
98 (id == IMAGE975) || (id == IMAGE985) || 125 (id == IMAGE975) || (id == IMAGE985);
99 (id == CYBERBLADEi1) || (id == CYBERBLADEi1D) ||
100 (id == CYBERBLADEAi1) || (id == CYBERBLADEAi1D) ||
101 (id == CYBERBLADEXPm8) || (id == CYBERBLADEXPm16) ||
102 (id == CYBERBLADEXPAi1));
103} 126}
104 127
105static int iscyber(int id) 128static inline int iscyber(int id)
106{ 129{
107 switch (id) { 130 switch (id) {
108 case CYBER9388: 131 case CYBER9388:
@@ -122,12 +145,7 @@ static int iscyber(int id)
122 return 1; 145 return 1;
123 146
124 case CYBER9320: 147 case CYBER9320:
125 case TGUI9660:
126 case IMAGE975:
127 case IMAGE985:
128 case BLADE3D:
129 case CYBERBLADEi7: /* VIA MPV4 integrated version */ 148 case CYBERBLADEi7: /* VIA MPV4 integrated version */
130
131 default: 149 default:
132 /* case CYBERBLDAEXPm8: Strange */ 150 /* case CYBERBLDAEXPm8: Strange */
133 /* case CYBERBLDAEXPm16: Strange */ 151 /* case CYBERBLDAEXPm16: Strange */
@@ -135,147 +153,110 @@ static int iscyber(int id)
135 } 153 }
136} 154}
137 155
138#define CRT 0x3D0 /* CRTC registers offset for color display */ 156static inline void t_outb(struct tridentfb_par *p, u8 val, u16 reg)
139 157{
140#ifndef TRIDENT_MMIO 158 fb_writeb(val, p->io_virt + reg);
141 #define TRIDENT_MMIO 1 159}
142#endif
143
144#if TRIDENT_MMIO
145 #define t_outb(val, reg) writeb(val,((struct tridentfb_par *)(fb_info.par))->io_virt + reg)
146 #define t_inb(reg) readb(((struct tridentfb_par*)(fb_info.par))->io_virt + reg)
147#else
148 #define t_outb(val, reg) outb(val, reg)
149 #define t_inb(reg) inb(reg)
150#endif
151 160
161static inline u8 t_inb(struct tridentfb_par *p, u16 reg)
162{
163 return fb_readb(p->io_virt + reg);
164}
152 165
153static struct accel_switch { 166static inline void writemmr(struct tridentfb_par *par, u16 r, u32 v)
154 void (*init_accel) (int, int); 167{
155 void (*wait_engine) (void); 168 fb_writel(v, par->io_virt + r);
156 void (*fill_rect) (u32, u32, u32, u32, u32, u32); 169}
157 void (*copy_rect) (u32, u32, u32, u32, u32, u32);
158} *acc;
159 170
160#define writemmr(r, v) writel(v, ((struct tridentfb_par *)fb_info.par)->io_virt + r) 171static inline u32 readmmr(struct tridentfb_par *par, u16 r)
161#define readmmr(r) readl(((struct tridentfb_par *)fb_info.par)->io_virt + r) 172{
173 return fb_readl(par->io_virt + r);
174}
162 175
163/* 176/*
164 * Blade specific acceleration. 177 * Blade specific acceleration.
165 */ 178 */
166 179
167#define point(x, y) ((y) << 16 | (x)) 180#define point(x, y) ((y) << 16 | (x))
168#define STA 0x2120 181
169#define CMD 0x2144 182static void blade_init_accel(struct tridentfb_par *par, int pitch, int bpp)
170#define ROP 0x2148
171#define CLR 0x2160
172#define SR1 0x2100
173#define SR2 0x2104
174#define DR1 0x2108
175#define DR2 0x210C
176
177#define ROP_S 0xCC
178
179static void blade_init_accel(int pitch, int bpp)
180{ 183{
181 int v1 = (pitch >> 3) << 20; 184 int v1 = (pitch >> 3) << 20;
182 int tmp = 0, v2; 185 int tmp = bpp == 24 ? 2 : (bpp >> 4);
183 switch (bpp) { 186 int v2 = v1 | (tmp << 29);
184 case 8: 187
185 tmp = 0; 188 writemmr(par, 0x21C0, v2);
186 break; 189 writemmr(par, 0x21C4, v2);
187 case 15: 190 writemmr(par, 0x21B8, v2);
188 tmp = 5; 191 writemmr(par, 0x21BC, v2);
189 break; 192 writemmr(par, 0x21D0, v1);
190 case 16: 193 writemmr(par, 0x21D4, v1);
191 tmp = 1; 194 writemmr(par, 0x21C8, v1);
192 break; 195 writemmr(par, 0x21CC, v1);
193 case 24: 196 writemmr(par, 0x216C, 0);
194 case 32:
195 tmp = 2;
196 break;
197 }
198 v2 = v1 | (tmp << 29);
199 writemmr(0x21C0, v2);
200 writemmr(0x21C4, v2);
201 writemmr(0x21B8, v2);
202 writemmr(0x21BC, v2);
203 writemmr(0x21D0, v1);
204 writemmr(0x21D4, v1);
205 writemmr(0x21C8, v1);
206 writemmr(0x21CC, v1);
207 writemmr(0x216C, 0);
208} 197}
209 198
210static void blade_wait_engine(void) 199static void blade_wait_engine(struct tridentfb_par *par)
211{ 200{
212 while (readmmr(STA) & 0xFA800000) ; 201 while (readmmr(par, STATUS) & 0xFA800000)
202 cpu_relax();
213} 203}
214 204
215static void blade_fill_rect(u32 x, u32 y, u32 w, u32 h, u32 c, u32 rop) 205static void blade_fill_rect(struct tridentfb_par *par,
206 u32 x, u32 y, u32 w, u32 h, u32 c, u32 rop)
216{ 207{
217 writemmr(CLR, c); 208 writemmr(par, COLOR, c);
218 writemmr(ROP, rop ? 0x66 : ROP_S); 209 writemmr(par, ROP, rop ? ROP_X : ROP_S);
219 writemmr(CMD, 0x20000000 | 1 << 19 | 1 << 4 | 2 << 2); 210 writemmr(par, CMD, 0x20000000 | 1 << 19 | 1 << 4 | 2 << 2);
220 211
221 writemmr(DR1, point(x, y)); 212 writemmr(par, DST1, point(x, y));
222 writemmr(DR2, point(x + w - 1, y + h - 1)); 213 writemmr(par, DST2, point(x + w - 1, y + h - 1));
223} 214}
224 215
225static void blade_copy_rect(u32 x1, u32 y1, u32 x2, u32 y2, u32 w, u32 h) 216static void blade_image_blit(struct tridentfb_par *par, const char *data,
217 u32 x, u32 y, u32 w, u32 h, u32 c, u32 b)
218{
219 unsigned size = ((w + 31) >> 5) * h;
220
221 writemmr(par, COLOR, c);
222 writemmr(par, BGCOLOR, b);
223 writemmr(par, CMD, 0xa0000000 | 3 << 19);
224
225 writemmr(par, DST1, point(x, y));
226 writemmr(par, DST2, point(x + w - 1, y + h - 1));
227
228 memcpy(par->io_virt + 0x10000, data, 4 * size);
229}
230
231static void blade_copy_rect(struct tridentfb_par *par,
232 u32 x1, u32 y1, u32 x2, u32 y2, u32 w, u32 h)
226{ 233{
227 u32 s1, s2, d1, d2;
228 int direction = 2; 234 int direction = 2;
229 s1 = point(x1, y1); 235 u32 s1 = point(x1, y1);
230 s2 = point(x1 + w - 1, y1 + h - 1); 236 u32 s2 = point(x1 + w - 1, y1 + h - 1);
231 d1 = point(x2, y2); 237 u32 d1 = point(x2, y2);
232 d2 = point(x2 + w - 1, y2 + h - 1); 238 u32 d2 = point(x2 + w - 1, y2 + h - 1);
233 239
234 if ((y1 > y2) || ((y1 == y2) && (x1 > x2))) 240 if ((y1 > y2) || ((y1 == y2) && (x1 > x2)))
235 direction = 0; 241 direction = 0;
236 242
237 writemmr(ROP, ROP_S); 243 writemmr(par, ROP, ROP_S);
238 writemmr(CMD, 0xE0000000 | 1 << 19 | 1 << 4 | 1 << 2 | direction); 244 writemmr(par, CMD, 0xE0000000 | 1 << 19 | 1 << 4 | 1 << 2 | direction);
239 245
240 writemmr(SR1, direction ? s2 : s1); 246 writemmr(par, SRC1, direction ? s2 : s1);
241 writemmr(SR2, direction ? s1 : s2); 247 writemmr(par, SRC2, direction ? s1 : s2);
242 writemmr(DR1, direction ? d2 : d1); 248 writemmr(par, DST1, direction ? d2 : d1);
243 writemmr(DR2, direction ? d1 : d2); 249 writemmr(par, DST2, direction ? d1 : d2);
244} 250}
245 251
246static struct accel_switch accel_blade = {
247 blade_init_accel,
248 blade_wait_engine,
249 blade_fill_rect,
250 blade_copy_rect,
251};
252
253/* 252/*
254 * BladeXP specific acceleration functions 253 * BladeXP specific acceleration functions
255 */ 254 */
256 255
257#define ROP_P 0xF0 256static void xp_init_accel(struct tridentfb_par *par, int pitch, int bpp)
258#define masked_point(x, y) ((y & 0xffff)<<16|(x & 0xffff))
259
260static void xp_init_accel(int pitch, int bpp)
261{ 257{
262 int tmp = 0, v1; 258 unsigned char x = bpp == 24 ? 3 : (bpp >> 4);
263 unsigned char x = 0; 259 int v1 = pitch << (bpp == 24 ? 20 : (18 + x));
264
265 switch (bpp) {
266 case 8:
267 x = 0;
268 break;
269 case 16:
270 x = 1;
271 break;
272 case 24:
273 x = 3;
274 break;
275 case 32:
276 x = 2;
277 break;
278 }
279 260
280 switch (pitch << (bpp >> 3)) { 261 switch (pitch << (bpp >> 3)) {
281 case 8192: 262 case 8192:
@@ -293,42 +274,21 @@ static void xp_init_accel(int pitch, int bpp)
293 break; 274 break;
294 } 275 }
295 276
296 t_outb(x, 0x2125); 277 t_outb(par, x, 0x2125);
297
298 eng_oper = x | 0x40;
299
300 switch (bpp) {
301 case 8:
302 tmp = 18;
303 break;
304 case 15:
305 case 16:
306 tmp = 19;
307 break;
308 case 24:
309 case 32:
310 tmp = 20;
311 break;
312 }
313 278
314 v1 = pitch << tmp; 279 par->eng_oper = x | 0x40;
315 280
316 writemmr(0x2154, v1); 281 writemmr(par, 0x2154, v1);
317 writemmr(0x2150, v1); 282 writemmr(par, 0x2150, v1);
318 t_outb(3, 0x2126); 283 t_outb(par, 3, 0x2126);
319} 284}
320 285
321static void xp_wait_engine(void) 286static void xp_wait_engine(struct tridentfb_par *par)
322{ 287{
323 int busy; 288 int count = 0;
324 int count, timeout; 289 int timeout = 0;
325 290
326 count = 0; 291 while (t_inb(par, STATUS) & 0x80) {
327 timeout = 0;
328 for (;;) {
329 busy = t_inb(STA) & 0x80;
330 if (busy != 0x80)
331 return;
332 count++; 292 count++;
333 if (count == 10000000) { 293 if (count == 10000000) {
334 /* Timeout */ 294 /* Timeout */
@@ -336,30 +296,31 @@ static void xp_wait_engine(void)
336 timeout++; 296 timeout++;
337 if (timeout == 8) { 297 if (timeout == 8) {
338 /* Reset engine */ 298 /* Reset engine */
339 t_outb(0x00, 0x2120); 299 t_outb(par, 0x00, STATUS);
340 return; 300 return;
341 } 301 }
342 } 302 }
303 cpu_relax();
343 } 304 }
344} 305}
345 306
346static void xp_fill_rect(u32 x, u32 y, u32 w, u32 h, u32 c, u32 rop) 307static void xp_fill_rect(struct tridentfb_par *par,
308 u32 x, u32 y, u32 w, u32 h, u32 c, u32 rop)
347{ 309{
348 writemmr(0x2127, ROP_P); 310 writemmr(par, 0x2127, ROP_P);
349 writemmr(0x2158, c); 311 writemmr(par, 0x2158, c);
350 writemmr(0x2128, 0x4000); 312 writemmr(par, DRAWFL, 0x4000);
351 writemmr(0x2140, masked_point(h, w)); 313 writemmr(par, OLDDIM, point(h, w));
352 writemmr(0x2138, masked_point(y, x)); 314 writemmr(par, OLDDST, point(y, x));
353 t_outb(0x01, 0x2124); 315 t_outb(par, 0x01, OLDCMD);
354 t_outb(eng_oper, 0x2125); 316 t_outb(par, par->eng_oper, 0x2125);
355} 317}
356 318
357static void xp_copy_rect(u32 x1, u32 y1, u32 x2, u32 y2, u32 w, u32 h) 319static void xp_copy_rect(struct tridentfb_par *par,
320 u32 x1, u32 y1, u32 x2, u32 y2, u32 w, u32 h)
358{ 321{
359 int direction;
360 u32 x1_tmp, x2_tmp, y1_tmp, y2_tmp; 322 u32 x1_tmp, x2_tmp, y1_tmp, y2_tmp;
361 323 int direction = 0x0004;
362 direction = 0x0004;
363 324
364 if ((x1 < x2) && (y1 == y2)) { 325 if ((x1 < x2) && (y1 == y2)) {
365 direction |= 0x0200; 326 direction |= 0x0200;
@@ -379,103 +340,152 @@ static void xp_copy_rect(u32 x1, u32 y1, u32 x2, u32 y2, u32 w, u32 h)
379 y2_tmp = y2; 340 y2_tmp = y2;
380 } 341 }
381 342
382 writemmr(0x2128, direction); 343 writemmr(par, DRAWFL, direction);
383 t_outb(ROP_S, 0x2127); 344 t_outb(par, ROP_S, 0x2127);
384 writemmr(0x213C, masked_point(y1_tmp, x1_tmp)); 345 writemmr(par, OLDSRC, point(y1_tmp, x1_tmp));
385 writemmr(0x2138, masked_point(y2_tmp, x2_tmp)); 346 writemmr(par, OLDDST, point(y2_tmp, x2_tmp));
386 writemmr(0x2140, masked_point(h, w)); 347 writemmr(par, OLDDIM, point(h, w));
387 t_outb(0x01, 0x2124); 348 t_outb(par, 0x01, OLDCMD);
388} 349}
389 350
390static struct accel_switch accel_xp = {
391 xp_init_accel,
392 xp_wait_engine,
393 xp_fill_rect,
394 xp_copy_rect,
395};
396
397/* 351/*
398 * Image specific acceleration functions 352 * Image specific acceleration functions
399 */ 353 */
400static void image_init_accel(int pitch, int bpp) 354static void image_init_accel(struct tridentfb_par *par, int pitch, int bpp)
401{ 355{
402 int tmp = 0; 356 int tmp = bpp == 24 ? 2: (bpp >> 4);
403 switch (bpp) { 357
404 case 8: 358 writemmr(par, 0x2120, 0xF0000000);
405 tmp = 0; 359 writemmr(par, 0x2120, 0x40000000 | tmp);
406 break; 360 writemmr(par, 0x2120, 0x80000000);
407 case 15: 361 writemmr(par, 0x2144, 0x00000000);
408 tmp = 5; 362 writemmr(par, 0x2148, 0x00000000);
409 break; 363 writemmr(par, 0x2150, 0x00000000);
410 case 16: 364 writemmr(par, 0x2154, 0x00000000);
411 tmp = 1; 365 writemmr(par, 0x2120, 0x60000000 | (pitch << 16) | pitch);
412 break; 366 writemmr(par, 0x216C, 0x00000000);
413 case 24: 367 writemmr(par, 0x2170, 0x00000000);
414 case 32: 368 writemmr(par, 0x217C, 0x00000000);
415 tmp = 2; 369 writemmr(par, 0x2120, 0x10000000);
416 break; 370 writemmr(par, 0x2130, (2047 << 16) | 2047);
417 }
418 writemmr(0x2120, 0xF0000000);
419 writemmr(0x2120, 0x40000000 | tmp);
420 writemmr(0x2120, 0x80000000);
421 writemmr(0x2144, 0x00000000);
422 writemmr(0x2148, 0x00000000);
423 writemmr(0x2150, 0x00000000);
424 writemmr(0x2154, 0x00000000);
425 writemmr(0x2120, 0x60000000 | (pitch << 16) | pitch);
426 writemmr(0x216C, 0x00000000);
427 writemmr(0x2170, 0x00000000);
428 writemmr(0x217C, 0x00000000);
429 writemmr(0x2120, 0x10000000);
430 writemmr(0x2130, (2047 << 16) | 2047);
431} 371}
432 372
433static void image_wait_engine(void) 373static void image_wait_engine(struct tridentfb_par *par)
434{ 374{
435 while (readmmr(0x2164) & 0xF0000000) ; 375 while (readmmr(par, 0x2164) & 0xF0000000)
376 cpu_relax();
436} 377}
437 378
438static void image_fill_rect(u32 x, u32 y, u32 w, u32 h, u32 c, u32 rop) 379static void image_fill_rect(struct tridentfb_par *par,
380 u32 x, u32 y, u32 w, u32 h, u32 c, u32 rop)
439{ 381{
440 writemmr(0x2120, 0x80000000); 382 writemmr(par, 0x2120, 0x80000000);
441 writemmr(0x2120, 0x90000000 | ROP_S); 383 writemmr(par, 0x2120, 0x90000000 | ROP_S);
442 384
443 writemmr(0x2144, c); 385 writemmr(par, 0x2144, c);
444 386
445 writemmr(DR1, point(x, y)); 387 writemmr(par, DST1, point(x, y));
446 writemmr(DR2, point(x + w - 1, y + h - 1)); 388 writemmr(par, DST2, point(x + w - 1, y + h - 1));
447 389
448 writemmr(0x2124, 0x80000000 | 3 << 22 | 1 << 10 | 1 << 9); 390 writemmr(par, 0x2124, 0x80000000 | 3 << 22 | 1 << 10 | 1 << 9);
449} 391}
450 392
451static void image_copy_rect(u32 x1, u32 y1, u32 x2, u32 y2, u32 w, u32 h) 393static void image_copy_rect(struct tridentfb_par *par,
394 u32 x1, u32 y1, u32 x2, u32 y2, u32 w, u32 h)
452{ 395{
453 u32 s1, s2, d1, d2; 396 int direction = 0x4;
454 int direction = 2; 397 u32 s1 = point(x1, y1);
455 s1 = point(x1, y1); 398 u32 s2 = point(x1 + w - 1, y1 + h - 1);
456 s2 = point(x1 + w - 1, y1 + h - 1); 399 u32 d1 = point(x2, y2);
457 d1 = point(x2, y2); 400 u32 d2 = point(x2 + w - 1, y2 + h - 1);
458 d2 = point(x2 + w - 1, y2 + h - 1);
459 401
460 if ((y1 > y2) || ((y1 == y2) && (x1 > x2))) 402 if ((y1 > y2) || ((y1 == y2) && (x1 > x2)))
461 direction = 0; 403 direction = 0;
462 404
463 writemmr(0x2120, 0x80000000); 405 writemmr(par, 0x2120, 0x80000000);
464 writemmr(0x2120, 0x90000000 | ROP_S); 406 writemmr(par, 0x2120, 0x90000000 | ROP_S);
465 407
466 writemmr(SR1, direction ? s2 : s1); 408 writemmr(par, SRC1, direction ? s2 : s1);
467 writemmr(SR2, direction ? s1 : s2); 409 writemmr(par, SRC2, direction ? s1 : s2);
468 writemmr(DR1, direction ? d2 : d1); 410 writemmr(par, DST1, direction ? d2 : d1);
469 writemmr(DR2, direction ? d1 : d2); 411 writemmr(par, DST2, direction ? d1 : d2);
470 writemmr(0x2124, 0x80000000 | 1 << 22 | 1 << 10 | 1 << 7 | direction); 412 writemmr(par, 0x2124,
413 0x80000000 | 1 << 22 | 1 << 10 | 1 << 7 | direction);
471} 414}
472 415
473static struct accel_switch accel_image = { 416/*
474 image_init_accel, 417 * TGUI 9440/96XX acceleration
475 image_wait_engine, 418 */
476 image_fill_rect, 419
477 image_copy_rect, 420static void tgui_init_accel(struct tridentfb_par *par, int pitch, int bpp)
478}; 421{
422 unsigned char x = bpp == 24 ? 3 : (bpp >> 4);
423
424 /* disable clipping */
425 writemmr(par, 0x2148, 0);
426 writemmr(par, 0x214C, point(4095, 2047));
427
428 switch ((pitch * bpp) / 8) {
429 case 8192:
430 case 512:
431 x |= 0x00;
432 break;
433 case 1024:
434 x |= 0x04;
435 break;
436 case 2048:
437 x |= 0x08;
438 break;
439 case 4096:
440 x |= 0x0C;
441 break;
442 }
443
444 fb_writew(x, par->io_virt + 0x2122);
445}
446
447static void tgui_fill_rect(struct tridentfb_par *par,
448 u32 x, u32 y, u32 w, u32 h, u32 c, u32 rop)
449{
450 t_outb(par, ROP_P, 0x2127);
451 writemmr(par, OLDCLR, c);
452 writemmr(par, DRAWFL, 0x4020);
453 writemmr(par, OLDDIM, point(w - 1, h - 1));
454 writemmr(par, OLDDST, point(x, y));
455 t_outb(par, 1, OLDCMD);
456}
457
458static void tgui_copy_rect(struct tridentfb_par *par,
459 u32 x1, u32 y1, u32 x2, u32 y2, u32 w, u32 h)
460{
461 int flags = 0;
462 u16 x1_tmp, x2_tmp, y1_tmp, y2_tmp;
463
464 if ((x1 < x2) && (y1 == y2)) {
465 flags |= 0x0200;
466 x1_tmp = x1 + w - 1;
467 x2_tmp = x2 + w - 1;
468 } else {
469 x1_tmp = x1;
470 x2_tmp = x2;
471 }
472
473 if (y1 < y2) {
474 flags |= 0x0100;
475 y1_tmp = y1 + h - 1;
476 y2_tmp = y2 + h - 1;
477 } else {
478 y1_tmp = y1;
479 y2_tmp = y2;
480 }
481
482 writemmr(par, DRAWFL, 0x4 | flags);
483 t_outb(par, ROP_S, 0x2127);
484 writemmr(par, OLDSRC, point(x1_tmp, y1_tmp));
485 writemmr(par, OLDDST, point(x2_tmp, y2_tmp));
486 writemmr(par, OLDDIM, point(w - 1, h - 1));
487 t_outb(par, 1, OLDCMD);
488}
479 489
480/* 490/*
481 * Accel functions called by the upper layers 491 * Accel functions called by the upper layers
@@ -484,129 +494,162 @@ static struct accel_switch accel_image = {
484static void tridentfb_fillrect(struct fb_info *info, 494static void tridentfb_fillrect(struct fb_info *info,
485 const struct fb_fillrect *fr) 495 const struct fb_fillrect *fr)
486{ 496{
487 int bpp = info->var.bits_per_pixel; 497 struct tridentfb_par *par = info->par;
488 int col = 0; 498 int col;
489 499
490 switch (bpp) { 500 if (info->flags & FBINFO_HWACCEL_DISABLED) {
491 default: 501 cfb_fillrect(info, fr);
492 case 8: 502 return;
493 col |= fr->color; 503 }
504 if (info->var.bits_per_pixel == 8) {
505 col = fr->color;
494 col |= col << 8; 506 col |= col << 8;
495 col |= col << 16; 507 col |= col << 16;
496 break; 508 } else
497 case 16:
498 col = ((u32 *)(info->pseudo_palette))[fr->color]; 509 col = ((u32 *)(info->pseudo_palette))[fr->color];
499 break; 510
500 case 32: 511 par->wait_engine(par);
501 col = ((u32 *)(info->pseudo_palette))[fr->color]; 512 par->fill_rect(par, fr->dx, fr->dy, fr->width,
502 break; 513 fr->height, col, fr->rop);
514}
515
516static void tridentfb_imageblit(struct fb_info *info,
517 const struct fb_image *img)
518{
519 struct tridentfb_par *par = info->par;
520 int col, bgcol;
521
522 if ((info->flags & FBINFO_HWACCEL_DISABLED) || img->depth != 1) {
523 cfb_imageblit(info, img);
524 return;
525 }
526 if (info->var.bits_per_pixel == 8) {
527 col = img->fg_color;
528 col |= col << 8;
529 col |= col << 16;
530 bgcol = img->bg_color;
531 bgcol |= bgcol << 8;
532 bgcol |= bgcol << 16;
533 } else {
534 col = ((u32 *)(info->pseudo_palette))[img->fg_color];
535 bgcol = ((u32 *)(info->pseudo_palette))[img->bg_color];
503 } 536 }
504 537
505 acc->fill_rect(fr->dx, fr->dy, fr->width, fr->height, col, fr->rop); 538 par->wait_engine(par);
506 acc->wait_engine(); 539 if (par->image_blit)
540 par->image_blit(par, img->data, img->dx, img->dy,
541 img->width, img->height, col, bgcol);
542 else
543 cfb_imageblit(info, img);
507} 544}
545
508static void tridentfb_copyarea(struct fb_info *info, 546static void tridentfb_copyarea(struct fb_info *info,
509 const struct fb_copyarea *ca) 547 const struct fb_copyarea *ca)
510{ 548{
511 acc->copy_rect(ca->sx, ca->sy, ca->dx, ca->dy, ca->width, ca->height); 549 struct tridentfb_par *par = info->par;
512 acc->wait_engine(); 550
551 if (info->flags & FBINFO_HWACCEL_DISABLED) {
552 cfb_copyarea(info, ca);
553 return;
554 }
555 par->wait_engine(par);
556 par->copy_rect(par, ca->sx, ca->sy, ca->dx, ca->dy,
557 ca->width, ca->height);
558}
559
560static int tridentfb_sync(struct fb_info *info)
561{
562 struct tridentfb_par *par = info->par;
563
564 if (!(info->flags & FBINFO_HWACCEL_DISABLED))
565 par->wait_engine(par);
566 return 0;
513} 567}
514#else /* !CONFIG_FB_TRIDENT_ACCEL */ 568#else
515#define tridentfb_fillrect cfb_fillrect 569#define tridentfb_fillrect cfb_fillrect
516#define tridentfb_copyarea cfb_copyarea 570#define tridentfb_copyarea cfb_copyarea
571#define tridentfb_imageblit cfb_imageblit
517#endif /* CONFIG_FB_TRIDENT_ACCEL */ 572#endif /* CONFIG_FB_TRIDENT_ACCEL */
518 573
519
520/* 574/*
521 * Hardware access functions 575 * Hardware access functions
522 */ 576 */
523 577
524static inline unsigned char read3X4(int reg) 578static inline unsigned char read3X4(struct tridentfb_par *par, int reg)
525{ 579{
526 struct tridentfb_par *par = (struct tridentfb_par *)fb_info.par; 580 return vga_mm_rcrt(par->io_virt, reg);
527 writeb(reg, par->io_virt + CRT + 4);
528 return readb(par->io_virt + CRT + 5);
529} 581}
530 582
531static inline void write3X4(int reg, unsigned char val) 583static inline void write3X4(struct tridentfb_par *par, int reg,
584 unsigned char val)
532{ 585{
533 struct tridentfb_par *par = (struct tridentfb_par *)fb_info.par; 586 vga_mm_wcrt(par->io_virt, reg, val);
534 writeb(reg, par->io_virt + CRT + 4);
535 writeb(val, par->io_virt + CRT + 5);
536} 587}
537 588
538static inline unsigned char read3C4(int reg) 589static inline unsigned char read3CE(struct tridentfb_par *par,
590 unsigned char reg)
539{ 591{
540 t_outb(reg, 0x3C4); 592 return vga_mm_rgfx(par->io_virt, reg);
541 return t_inb(0x3C5);
542} 593}
543 594
544static inline void write3C4(int reg, unsigned char val) 595static inline void writeAttr(struct tridentfb_par *par, int reg,
596 unsigned char val)
545{ 597{
546 t_outb(reg, 0x3C4); 598 fb_readb(par->io_virt + VGA_IS1_RC); /* flip-flop to index */
547 t_outb(val, 0x3C5); 599 vga_mm_wattr(par->io_virt, reg, val);
548} 600}
549 601
550static inline unsigned char read3CE(int reg) 602static inline void write3CE(struct tridentfb_par *par, int reg,
603 unsigned char val)
551{ 604{
552 t_outb(reg, 0x3CE); 605 vga_mm_wgfx(par->io_virt, reg, val);
553 return t_inb(0x3CF);
554} 606}
555 607
556static inline void writeAttr(int reg, unsigned char val) 608static void enable_mmio(struct tridentfb_par *par)
557{
558 readb(((struct tridentfb_par *)fb_info.par)->io_virt + CRT + 0x0A); /* flip-flop to index */
559 t_outb(reg, 0x3C0);
560 t_outb(val, 0x3C0);
561}
562
563static inline void write3CE(int reg, unsigned char val)
564{
565 t_outb(reg, 0x3CE);
566 t_outb(val, 0x3CF);
567}
568
569static void enable_mmio(void)
570{ 609{
571 /* Goto New Mode */ 610 /* Goto New Mode */
572 outb(0x0B, 0x3C4); 611 vga_io_rseq(0x0B);
573 inb(0x3C5);
574 612
575 /* Unprotect registers */ 613 /* Unprotect registers */
576 outb(NewMode1, 0x3C4); 614 vga_io_wseq(NewMode1, 0x80);
577 outb(0x80, 0x3C5); 615 if (!is_oldprotect(par->chip_id))
616 vga_io_wseq(Protection, 0x92);
578 617
579 /* Enable MMIO */ 618 /* Enable MMIO */
580 outb(PCIReg, 0x3D4); 619 outb(PCIReg, 0x3D4);
581 outb(inb(0x3D5) | 0x01, 0x3D5); 620 outb(inb(0x3D5) | 0x01, 0x3D5);
582} 621}
583 622
584static void disable_mmio(void) 623static void disable_mmio(struct tridentfb_par *par)
585{ 624{
586 /* Goto New Mode */ 625 /* Goto New Mode */
587 t_outb(0x0B, 0x3C4); 626 vga_mm_rseq(par->io_virt, 0x0B);
588 t_inb(0x3C5);
589 627
590 /* Unprotect registers */ 628 /* Unprotect registers */
591 t_outb(NewMode1, 0x3C4); 629 vga_mm_wseq(par->io_virt, NewMode1, 0x80);
592 t_outb(0x80, 0x3C5); 630 if (!is_oldprotect(par->chip_id))
631 vga_mm_wseq(par->io_virt, Protection, 0x92);
593 632
594 /* Disable MMIO */ 633 /* Disable MMIO */
595 t_outb(PCIReg, 0x3D4); 634 t_outb(par, PCIReg, 0x3D4);
596 t_outb(t_inb(0x3D5) & ~0x01, 0x3D5); 635 t_outb(par, t_inb(par, 0x3D5) & ~0x01, 0x3D5);
597} 636}
598 637
599#define crtc_unlock() write3X4(CRTVSyncEnd, read3X4(CRTVSyncEnd) & 0x7F) 638static inline void crtc_unlock(struct tridentfb_par *par)
639{
640 write3X4(par, VGA_CRTC_V_SYNC_END,
641 read3X4(par, VGA_CRTC_V_SYNC_END) & 0x7F);
642}
600 643
601/* Return flat panel's maximum x resolution */ 644/* Return flat panel's maximum x resolution */
602static int __devinit get_nativex(void) 645static int __devinit get_nativex(struct tridentfb_par *par)
603{ 646{
604 int x, y, tmp; 647 int x, y, tmp;
605 648
606 if (nativex) 649 if (nativex)
607 return nativex; 650 return nativex;
608 651
609 tmp = (read3CE(VertStretch) >> 4) & 3; 652 tmp = (read3CE(par, VertStretch) >> 4) & 3;
610 653
611 switch (tmp) { 654 switch (tmp) {
612 case 0: 655 case 0:
@@ -632,77 +675,92 @@ static int __devinit get_nativex(void)
632} 675}
633 676
634/* Set pitch */ 677/* Set pitch */
635static void set_lwidth(int width) 678static inline void set_lwidth(struct tridentfb_par *par, int width)
636{ 679{
637 write3X4(Offset, width & 0xFF); 680 write3X4(par, VGA_CRTC_OFFSET, width & 0xFF);
638 write3X4(AddColReg, 681 write3X4(par, AddColReg,
639 (read3X4(AddColReg) & 0xCF) | ((width & 0x300) >> 4)); 682 (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4));
640} 683}
641 684
642/* For resolutions smaller than FP resolution stretch */ 685/* For resolutions smaller than FP resolution stretch */
643static void screen_stretch(void) 686static void screen_stretch(struct tridentfb_par *par)
644{ 687{
645 if (chip_id != CYBERBLADEXPAi1) 688 if (par->chip_id != CYBERBLADEXPAi1)
646 write3CE(BiosReg, 0); 689 write3CE(par, BiosReg, 0);
647 else 690 else
648 write3CE(BiosReg, 8); 691 write3CE(par, BiosReg, 8);
649 write3CE(VertStretch, (read3CE(VertStretch) & 0x7C) | 1); 692 write3CE(par, VertStretch, (read3CE(par, VertStretch) & 0x7C) | 1);
650 write3CE(HorStretch, (read3CE(HorStretch) & 0x7C) | 1); 693 write3CE(par, HorStretch, (read3CE(par, HorStretch) & 0x7C) | 1);
651} 694}
652 695
653/* For resolutions smaller than FP resolution center */ 696/* For resolutions smaller than FP resolution center */
654static void screen_center(void) 697static inline void screen_center(struct tridentfb_par *par)
655{ 698{
656 write3CE(VertStretch, (read3CE(VertStretch) & 0x7C) | 0x80); 699 write3CE(par, VertStretch, (read3CE(par, VertStretch) & 0x7C) | 0x80);
657 write3CE(HorStretch, (read3CE(HorStretch) & 0x7C) | 0x80); 700 write3CE(par, HorStretch, (read3CE(par, HorStretch) & 0x7C) | 0x80);
658} 701}
659 702
660/* Address of first shown pixel in display memory */ 703/* Address of first shown pixel in display memory */
661static void set_screen_start(int base) 704static void set_screen_start(struct tridentfb_par *par, int base)
662{ 705{
663 write3X4(StartAddrLow, base & 0xFF); 706 u8 tmp;
664 write3X4(StartAddrHigh, (base & 0xFF00) >> 8); 707 write3X4(par, VGA_CRTC_START_LO, base & 0xFF);
665 write3X4(CRTCModuleTest, 708 write3X4(par, VGA_CRTC_START_HI, (base & 0xFF00) >> 8);
666 (read3X4(CRTCModuleTest) & 0xDF) | ((base & 0x10000) >> 11)); 709 tmp = read3X4(par, CRTCModuleTest) & 0xDF;
667 write3X4(CRTHiOrd, 710 write3X4(par, CRTCModuleTest, tmp | ((base & 0x10000) >> 11));
668 (read3X4(CRTHiOrd) & 0xF8) | ((base & 0xE0000) >> 17)); 711 tmp = read3X4(par, CRTHiOrd) & 0xF8;
712 write3X4(par, CRTHiOrd, tmp | ((base & 0xE0000) >> 17));
669} 713}
670 714
671/* Set dotclock frequency */ 715/* Set dotclock frequency */
672static void set_vclk(unsigned long freq) 716static void set_vclk(struct tridentfb_par *par, unsigned long freq)
673{ 717{
674 int m, n, k; 718 int m, n, k;
675 unsigned long f, fi, d, di; 719 unsigned long fi, d, di;
676 unsigned char lo = 0, hi = 0; 720 unsigned char best_m = 0, best_n = 0, best_k = 0;
721 unsigned char hi, lo;
722 unsigned char shift = !is_oldclock(par->chip_id) ? 2 : 1;
677 723
678 d = 20000; 724 d = 20000;
679 for (k = 2; k >= 0; k--) 725 for (k = shift; k >= 0; k--)
680 for (m = 0; m < 63; m++) 726 for (m = 1; m < 32; m++) {
681 for (n = 0; n < 128; n++) { 727 n = ((m + 2) << shift) - 8;
728 for (n = (n < 0 ? 0 : n); n < 122; n++) {
682 fi = ((14318l * (n + 8)) / (m + 2)) >> k; 729 fi = ((14318l * (n + 8)) / (m + 2)) >> k;
683 if ((di = abs(fi - freq)) < d) { 730 di = abs(fi - freq);
731 if (di < d || (di == d && k == best_k)) {
684 d = di; 732 d = di;
685 f = fi; 733 best_n = n;
686 lo = n; 734 best_m = m;
687 hi = (k << 6) | m; 735 best_k = k;
688 } 736 }
689 if (fi > freq) 737 if (fi > freq)
690 break; 738 break;
691 } 739 }
692 if (chip3D) { 740 }
693 write3C4(ClockHigh, hi); 741
694 write3C4(ClockLow, lo); 742 if (is_oldclock(par->chip_id)) {
743 lo = best_n | (best_m << 7);
744 hi = (best_m >> 1) | (best_k << 4);
695 } else { 745 } else {
696 outb(lo, 0x43C8); 746 lo = best_n;
697 outb(hi, 0x43C9); 747 hi = best_m | (best_k << 6);
748 }
749
750 if (is3Dchip(par->chip_id)) {
751 vga_mm_wseq(par->io_virt, ClockHigh, hi);
752 vga_mm_wseq(par->io_virt, ClockLow, lo);
753 } else {
754 t_outb(par, lo, 0x43C8);
755 t_outb(par, hi, 0x43C9);
698 } 756 }
699 debug("VCLK = %X %X\n", hi, lo); 757 debug("VCLK = %X %X\n", hi, lo);
700} 758}
701 759
702/* Set number of lines for flat panels*/ 760/* Set number of lines for flat panels*/
703static void set_number_of_lines(int lines) 761static void set_number_of_lines(struct tridentfb_par *par, int lines)
704{ 762{
705 int tmp = read3CE(CyberEnhance) & 0x8F; 763 int tmp = read3CE(par, CyberEnhance) & 0x8F;
706 if (lines > 1024) 764 if (lines > 1024)
707 tmp |= 0x50; 765 tmp |= 0x50;
708 else if (lines > 768) 766 else if (lines > 768)
@@ -711,24 +769,24 @@ static void set_number_of_lines(int lines)
711 tmp |= 0x20; 769 tmp |= 0x20;
712 else if (lines > 480) 770 else if (lines > 480)
713 tmp |= 0x10; 771 tmp |= 0x10;
714 write3CE(CyberEnhance, tmp); 772 write3CE(par, CyberEnhance, tmp);
715} 773}
716 774
717/* 775/*
718 * If we see that FP is active we assume we have one. 776 * If we see that FP is active we assume we have one.
719 * Otherwise we have a CRT display.User can override. 777 * Otherwise we have a CRT display. User can override.
720 */ 778 */
721static unsigned int __devinit get_displaytype(void) 779static int __devinit is_flatpanel(struct tridentfb_par *par)
722{ 780{
723 if (fp) 781 if (fp)
724 return DISPLAY_FP; 782 return 1;
725 if (crt || !chipcyber) 783 if (crt || !iscyber(par->chip_id))
726 return DISPLAY_CRT; 784 return 0;
727 return (read3CE(FPConfig) & 0x10) ? DISPLAY_FP : DISPLAY_CRT; 785 return (read3CE(par, FPConfig) & 0x10) ? 1 : 0;
728} 786}
729 787
730/* Try detecting the video memory size */ 788/* Try detecting the video memory size */
731static unsigned int __devinit get_memsize(void) 789static unsigned int __devinit get_memsize(struct tridentfb_par *par)
732{ 790{
733 unsigned char tmp, tmp2; 791 unsigned char tmp, tmp2;
734 unsigned int k; 792 unsigned int k;
@@ -737,12 +795,12 @@ static unsigned int __devinit get_memsize(void)
737 if (memsize) 795 if (memsize)
738 k = memsize * Kb; 796 k = memsize * Kb;
739 else 797 else
740 switch (chip_id) { 798 switch (par->chip_id) {
741 case CYBER9525DVD: 799 case CYBER9525DVD:
742 k = 2560 * Kb; 800 k = 2560 * Kb;
743 break; 801 break;
744 default: 802 default:
745 tmp = read3X4(SPR) & 0x0F; 803 tmp = read3X4(par, SPR) & 0x0F;
746 switch (tmp) { 804 switch (tmp) {
747 805
748 case 0x01: 806 case 0x01:
@@ -774,7 +832,7 @@ static unsigned int __devinit get_memsize(void)
774 break; 832 break;
775 case 0x0E: /* XP */ 833 case 0x0E: /* XP */
776 834
777 tmp2 = read3C4(0xC1); 835 tmp2 = vga_mm_rseq(par->io_virt, 0xC1);
778 switch (tmp2) { 836 switch (tmp2) {
779 case 0x00: 837 case 0x00:
780 k = 20 * Mb; 838 k = 20 * Mb;
@@ -812,26 +870,67 @@ static unsigned int __devinit get_memsize(void)
812static int tridentfb_check_var(struct fb_var_screeninfo *var, 870static int tridentfb_check_var(struct fb_var_screeninfo *var,
813 struct fb_info *info) 871 struct fb_info *info)
814{ 872{
873 struct tridentfb_par *par = info->par;
815 int bpp = var->bits_per_pixel; 874 int bpp = var->bits_per_pixel;
875 int line_length;
876 int ramdac = 230000; /* 230MHz for most 3D chips */
816 debug("enter\n"); 877 debug("enter\n");
817 878
818 /* check color depth */ 879 /* check color depth */
819 if (bpp == 24) 880 if (bpp == 24)
820 bpp = var->bits_per_pixel = 32; 881 bpp = var->bits_per_pixel = 32;
882 if (bpp != 8 && bpp != 16 && bpp != 32)
883 return -EINVAL;
884 if (par->chip_id == TGUI9440 && bpp == 32)
885 return -EINVAL;
821 /* check whether resolution fits on panel and in memory */ 886 /* check whether resolution fits on panel and in memory */
822 if (flatpanel && nativex && var->xres > nativex) 887 if (par->flatpanel && nativex && var->xres > nativex)
888 return -EINVAL;
889 /* various resolution checks */
890 var->xres = (var->xres + 7) & ~0x7;
891 if (var->xres > var->xres_virtual)
892 var->xres_virtual = var->xres;
893 if (var->yres > var->yres_virtual)
894 var->yres_virtual = var->yres;
895 if (var->xres_virtual > 4095 || var->yres > 2048)
823 return -EINVAL; 896 return -EINVAL;
824 if (var->xres * var->yres_virtual * bpp / 8 > info->fix.smem_len) 897 /* prevent from position overflow for acceleration */
898 if (var->yres_virtual > 0xffff)
899 return -EINVAL;
900 line_length = var->xres_virtual * bpp / 8;
901
902 if (!is3Dchip(par->chip_id) &&
903 !(info->flags & FBINFO_HWACCEL_DISABLED)) {
904 /* acceleration requires line length to be power of 2 */
905 if (line_length <= 512)
906 var->xres_virtual = 512 * 8 / bpp;
907 else if (line_length <= 1024)
908 var->xres_virtual = 1024 * 8 / bpp;
909 else if (line_length <= 2048)
910 var->xres_virtual = 2048 * 8 / bpp;
911 else if (line_length <= 4096)
912 var->xres_virtual = 4096 * 8 / bpp;
913 else if (line_length <= 8192)
914 var->xres_virtual = 8192 * 8 / bpp;
915 else
916 return -EINVAL;
917
918 line_length = var->xres_virtual * bpp / 8;
919 }
920
921 /* datasheet specifies how to set panning only up to 4 MB */
922 if (line_length * (var->yres_virtual - var->yres) > (4 << 20))
923 var->yres_virtual = ((4 << 20) / line_length) + var->yres;
924
925 if (line_length * var->yres_virtual > info->fix.smem_len)
825 return -EINVAL; 926 return -EINVAL;
826 927
827 switch (bpp) { 928 switch (bpp) {
828 case 8: 929 case 8:
829 var->red.offset = 0; 930 var->red.offset = 0;
830 var->green.offset = 0; 931 var->red.length = 8;
831 var->blue.offset = 0; 932 var->green = var->red;
832 var->red.length = 6; 933 var->blue = var->red;
833 var->green.length = 6;
834 var->blue.length = 6;
835 break; 934 break;
836 case 16: 935 case 16:
837 var->red.offset = 11; 936 var->red.offset = 11;
@@ -852,6 +951,33 @@ static int tridentfb_check_var(struct fb_var_screeninfo *var,
852 default: 951 default:
853 return -EINVAL; 952 return -EINVAL;
854 } 953 }
954
955 if (is_xp(par->chip_id))
956 ramdac = 350000;
957
958 switch (par->chip_id) {
959 case TGUI9440:
960 ramdac = (bpp >= 16) ? 45000 : 90000;
961 break;
962 case CYBER9320:
963 case TGUI9660:
964 ramdac = 135000;
965 break;
966 case PROVIDIA9685:
967 case CYBER9388:
968 case CYBER9382:
969 case CYBER9385:
970 ramdac = 170000;
971 break;
972 }
973
974 /* The clock is doubled for 32 bpp */
975 if (bpp == 32)
976 ramdac /= 2;
977
978 if (PICOS2KHZ(var->pixclock) > ramdac)
979 return -EINVAL;
980
855 debug("exit\n"); 981 debug("exit\n");
856 982
857 return 0; 983 return 0;
@@ -862,25 +988,31 @@ static int tridentfb_check_var(struct fb_var_screeninfo *var,
862static int tridentfb_pan_display(struct fb_var_screeninfo *var, 988static int tridentfb_pan_display(struct fb_var_screeninfo *var,
863 struct fb_info *info) 989 struct fb_info *info)
864{ 990{
991 struct tridentfb_par *par = info->par;
865 unsigned int offset; 992 unsigned int offset;
866 993
867 debug("enter\n"); 994 debug("enter\n");
868 offset = (var->xoffset + (var->yoffset * var->xres)) 995 offset = (var->xoffset + (var->yoffset * var->xres_virtual))
869 * var->bits_per_pixel / 32; 996 * var->bits_per_pixel / 32;
870 info->var.xoffset = var->xoffset; 997 set_screen_start(par, offset);
871 info->var.yoffset = var->yoffset;
872 set_screen_start(offset);
873 debug("exit\n"); 998 debug("exit\n");
874 return 0; 999 return 0;
875} 1000}
876 1001
877#define shadowmode_on() write3CE(CyberControl, read3CE(CyberControl) | 0x81) 1002static inline void shadowmode_on(struct tridentfb_par *par)
878#define shadowmode_off() write3CE(CyberControl, read3CE(CyberControl) & 0x7E) 1003{
1004 write3CE(par, CyberControl, read3CE(par, CyberControl) | 0x81);
1005}
1006
1007static inline void shadowmode_off(struct tridentfb_par *par)
1008{
1009 write3CE(par, CyberControl, read3CE(par, CyberControl) & 0x7E);
1010}
879 1011
880/* Set the hardware to the requested video mode */ 1012/* Set the hardware to the requested video mode */
881static int tridentfb_set_par(struct fb_info *info) 1013static int tridentfb_set_par(struct fb_info *info)
882{ 1014{
883 struct tridentfb_par *par = (struct tridentfb_par *)(info->par); 1015 struct tridentfb_par *par = info->par;
884 u32 htotal, hdispend, hsyncstart, hsyncend, hblankstart, hblankend; 1016 u32 htotal, hdispend, hsyncstart, hsyncend, hblankstart, hblankend;
885 u32 vtotal, vdispend, vsyncstart, vsyncend, vblankstart, vblankend; 1017 u32 vtotal, vdispend, vsyncstart, vsyncend, vblankstart, vblankend;
886 struct fb_var_screeninfo *var = &info->var; 1018 struct fb_var_screeninfo *var = &info->var;
@@ -891,58 +1023,73 @@ static int tridentfb_set_par(struct fb_info *info)
891 debug("enter\n"); 1023 debug("enter\n");
892 hdispend = var->xres / 8 - 1; 1024 hdispend = var->xres / 8 - 1;
893 hsyncstart = (var->xres + var->right_margin) / 8; 1025 hsyncstart = (var->xres + var->right_margin) / 8;
894 hsyncend = var->hsync_len / 8; 1026 hsyncend = (var->xres + var->right_margin + var->hsync_len) / 8;
895 htotal = 1027 htotal = (var->xres + var->left_margin + var->right_margin +
896 (var->xres + var->left_margin + var->right_margin + 1028 var->hsync_len) / 8 - 5;
897 var->hsync_len) / 8 - 10;
898 hblankstart = hdispend + 1; 1029 hblankstart = hdispend + 1;
899 hblankend = htotal + 5; 1030 hblankend = htotal + 3;
900 1031
901 vdispend = var->yres - 1; 1032 vdispend = var->yres - 1;
902 vsyncstart = var->yres + var->lower_margin; 1033 vsyncstart = var->yres + var->lower_margin;
903 vsyncend = var->vsync_len; 1034 vsyncend = vsyncstart + var->vsync_len;
904 vtotal = var->upper_margin + vsyncstart + vsyncend - 2; 1035 vtotal = var->upper_margin + vsyncend - 2;
905 vblankstart = var->yres; 1036 vblankstart = vdispend + 1;
906 vblankend = vtotal + 2; 1037 vblankend = vtotal;
1038
1039 if (info->var.vmode & FB_VMODE_INTERLACED) {
1040 vtotal /= 2;
1041 vdispend /= 2;
1042 vsyncstart /= 2;
1043 vsyncend /= 2;
1044 vblankstart /= 2;
1045 vblankend /= 2;
1046 }
907 1047
908 crtc_unlock(); 1048 enable_mmio(par);
909 write3CE(CyberControl, 8); 1049 crtc_unlock(par);
1050 write3CE(par, CyberControl, 8);
1051 tmp = 0xEB;
1052 if (var->sync & FB_SYNC_HOR_HIGH_ACT)
1053 tmp &= ~0x40;
1054 if (var->sync & FB_SYNC_VERT_HIGH_ACT)
1055 tmp &= ~0x80;
910 1056
911 if (flatpanel && var->xres < nativex) { 1057 if (par->flatpanel && var->xres < nativex) {
912 /* 1058 /*
913 * on flat panels with native size larger 1059 * on flat panels with native size larger
914 * than requested resolution decide whether 1060 * than requested resolution decide whether
915 * we stretch or center 1061 * we stretch or center
916 */ 1062 */
917 t_outb(0xEB, 0x3C2); 1063 t_outb(par, tmp | 0xC0, VGA_MIS_W);
918 1064
919 shadowmode_on(); 1065 shadowmode_on(par);
920 1066
921 if (center) 1067 if (center)
922 screen_center(); 1068 screen_center(par);
923 else if (stretch) 1069 else if (stretch)
924 screen_stretch(); 1070 screen_stretch(par);
925 1071
926 } else { 1072 } else {
927 t_outb(0x2B, 0x3C2); 1073 t_outb(par, tmp, VGA_MIS_W);
928 write3CE(CyberControl, 8); 1074 write3CE(par, CyberControl, 8);
929 } 1075 }
930 1076
931 /* vertical timing values */ 1077 /* vertical timing values */
932 write3X4(CRTVTotal, vtotal & 0xFF); 1078 write3X4(par, VGA_CRTC_V_TOTAL, vtotal & 0xFF);
933 write3X4(CRTVDispEnd, vdispend & 0xFF); 1079 write3X4(par, VGA_CRTC_V_DISP_END, vdispend & 0xFF);
934 write3X4(CRTVSyncStart, vsyncstart & 0xFF); 1080 write3X4(par, VGA_CRTC_V_SYNC_START, vsyncstart & 0xFF);
935 write3X4(CRTVSyncEnd, (vsyncend & 0x0F)); 1081 write3X4(par, VGA_CRTC_V_SYNC_END, (vsyncend & 0x0F));
936 write3X4(CRTVBlankStart, vblankstart & 0xFF); 1082 write3X4(par, VGA_CRTC_V_BLANK_START, vblankstart & 0xFF);
937 write3X4(CRTVBlankEnd, 0 /* p->vblankend & 0xFF */ ); 1083 write3X4(par, VGA_CRTC_V_BLANK_END, vblankend & 0xFF);
938 1084
939 /* horizontal timing values */ 1085 /* horizontal timing values */
940 write3X4(CRTHTotal, htotal & 0xFF); 1086 write3X4(par, VGA_CRTC_H_TOTAL, htotal & 0xFF);
941 write3X4(CRTHDispEnd, hdispend & 0xFF); 1087 write3X4(par, VGA_CRTC_H_DISP, hdispend & 0xFF);
942 write3X4(CRTHSyncStart, hsyncstart & 0xFF); 1088 write3X4(par, VGA_CRTC_H_SYNC_START, hsyncstart & 0xFF);
943 write3X4(CRTHSyncEnd, (hsyncend & 0x1F) | ((hblankend & 0x20) << 2)); 1089 write3X4(par, VGA_CRTC_H_SYNC_END,
944 write3X4(CRTHBlankStart, hblankstart & 0xFF); 1090 (hsyncend & 0x1F) | ((hblankend & 0x20) << 2));
945 write3X4(CRTHBlankEnd, 0 /* (p->hblankend & 0x1F) */ ); 1091 write3X4(par, VGA_CRTC_H_BLANK_START, hblankstart & 0xFF);
1092 write3X4(par, VGA_CRTC_H_BLANK_END, hblankend & 0x1F);
946 1093
947 /* higher bits of vertical timing values */ 1094 /* higher bits of vertical timing values */
948 tmp = 0x10; 1095 tmp = 0x10;
@@ -954,39 +1101,43 @@ static int tridentfb_set_par(struct fb_info *info)
954 if (vtotal & 0x200) tmp |= 0x20; 1101 if (vtotal & 0x200) tmp |= 0x20;
955 if (vdispend & 0x200) tmp |= 0x40; 1102 if (vdispend & 0x200) tmp |= 0x40;
956 if (vsyncstart & 0x200) tmp |= 0x80; 1103 if (vsyncstart & 0x200) tmp |= 0x80;
957 write3X4(CRTOverflow, tmp); 1104 write3X4(par, VGA_CRTC_OVERFLOW, tmp);
958 1105
959 tmp = read3X4(CRTHiOrd) | 0x08; /* line compare bit 10 */ 1106 tmp = read3X4(par, CRTHiOrd) & 0x07;
1107 tmp |= 0x08; /* line compare bit 10 */
960 if (vtotal & 0x400) tmp |= 0x80; 1108 if (vtotal & 0x400) tmp |= 0x80;
961 if (vblankstart & 0x400) tmp |= 0x40; 1109 if (vblankstart & 0x400) tmp |= 0x40;
962 if (vsyncstart & 0x400) tmp |= 0x20; 1110 if (vsyncstart & 0x400) tmp |= 0x20;
963 if (vdispend & 0x400) tmp |= 0x10; 1111 if (vdispend & 0x400) tmp |= 0x10;
964 write3X4(CRTHiOrd, tmp); 1112 write3X4(par, CRTHiOrd, tmp);
965 1113
966 tmp = 0; 1114 tmp = (htotal >> 8) & 0x01;
967 if (htotal & 0x800) tmp |= 0x800 >> 11; 1115 tmp |= (hdispend >> 7) & 0x02;
968 if (hblankstart & 0x800) tmp |= 0x800 >> 7; 1116 tmp |= (hsyncstart >> 5) & 0x08;
969 write3X4(HorizOverflow, tmp); 1117 tmp |= (hblankstart >> 4) & 0x10;
1118 write3X4(par, HorizOverflow, tmp);
970 1119
971 tmp = 0x40; 1120 tmp = 0x40;
972 if (vblankstart & 0x200) tmp |= 0x20; 1121 if (vblankstart & 0x200) tmp |= 0x20;
973//FIXME if (info->var.vmode & FB_VMODE_DOUBLE) tmp |= 0x80; /* double scan for 200 line modes */ 1122//FIXME if (info->var.vmode & FB_VMODE_DOUBLE) tmp |= 0x80; /* double scan for 200 line modes */
974 write3X4(CRTMaxScanLine, tmp); 1123 write3X4(par, VGA_CRTC_MAX_SCAN, tmp);
975 1124
976 write3X4(CRTLineCompare, 0xFF); 1125 write3X4(par, VGA_CRTC_LINE_COMPARE, 0xFF);
977 write3X4(CRTPRowScan, 0); 1126 write3X4(par, VGA_CRTC_PRESET_ROW, 0);
978 write3X4(CRTModeControl, 0xC3); 1127 write3X4(par, VGA_CRTC_MODE, 0xC3);
979 1128
980 write3X4(LinearAddReg, 0x20); /* enable linear addressing */ 1129 write3X4(par, LinearAddReg, 0x20); /* enable linear addressing */
981 1130
982 tmp = (info->var.vmode & FB_VMODE_INTERLACED) ? 0x84 : 0x80; 1131 tmp = (info->var.vmode & FB_VMODE_INTERLACED) ? 0x84 : 0x80;
983 write3X4(CRTCModuleTest, tmp); /* enable access extended memory */ 1132 /* enable access extended memory */
984 1133 write3X4(par, CRTCModuleTest, tmp);
985 write3X4(GraphEngReg, 0x80); /* enable GE for text acceleration */ 1134 tmp = read3CE(par, MiscIntContReg) & ~0x4;
1135 if (info->var.vmode & FB_VMODE_INTERLACED)
1136 tmp |= 0x4;
1137 write3CE(par, MiscIntContReg, tmp);
986 1138
987#ifdef CONFIG_FB_TRIDENT_ACCEL 1139 /* enable GE for text acceleration */
988 acc->init_accel(info->var.xres, bpp); 1140 write3X4(par, GraphEngReg, 0x80);
989#endif
990 1141
991 switch (bpp) { 1142 switch (bpp) {
992 case 8: 1143 case 8:
@@ -1003,57 +1154,59 @@ static int tridentfb_set_par(struct fb_info *info)
1003 break; 1154 break;
1004 } 1155 }
1005 1156
1006 write3X4(PixelBusReg, tmp); 1157 write3X4(par, PixelBusReg, tmp);
1007 1158
1008 tmp = 0x10; 1159 tmp = read3X4(par, DRAMControl);
1009 if (chipcyber) 1160 if (!is_oldprotect(par->chip_id))
1161 tmp |= 0x10;
1162 if (iscyber(par->chip_id))
1010 tmp |= 0x20; 1163 tmp |= 0x20;
1011 write3X4(DRAMControl, tmp); /* both IO, linear enable */ 1164 write3X4(par, DRAMControl, tmp); /* both IO, linear enable */
1012 1165
1013 write3X4(InterfaceSel, read3X4(InterfaceSel) | 0x40); 1166 write3X4(par, InterfaceSel, read3X4(par, InterfaceSel) | 0x40);
1014 write3X4(Performance, 0x92); 1167 if (!is_xp(par->chip_id))
1015 write3X4(PCIReg, 0x07); /* MMIO & PCI read and write burst enable */ 1168 write3X4(par, Performance, read3X4(par, Performance) | 0x10);
1169 /* MMIO & PCI read and write burst enable */
1170 if (par->chip_id != TGUI9440 && par->chip_id != IMAGE975)
1171 write3X4(par, PCIReg, read3X4(par, PCIReg) | 0x06);
1172
1173 vga_mm_wseq(par->io_virt, 0, 3);
1174 vga_mm_wseq(par->io_virt, 1, 1); /* set char clock 8 dots wide */
1175 /* enable 4 maps because needed in chain4 mode */
1176 vga_mm_wseq(par->io_virt, 2, 0x0F);
1177 vga_mm_wseq(par->io_virt, 3, 0);
1178 vga_mm_wseq(par->io_virt, 4, 0x0E); /* memory mode enable bitmaps ?? */
1016 1179
1017 /* convert from picoseconds to kHz */ 1180 /* convert from picoseconds to kHz */
1018 vclk = PICOS2KHZ(info->var.pixclock); 1181 vclk = PICOS2KHZ(info->var.pixclock);
1019 if (bpp == 32) 1182
1183 /* divide clock by 2 if 32bpp chain4 mode display and CPU path */
1184 tmp = read3CE(par, MiscExtFunc) & 0xF0;
1185 if (bpp == 32 || (par->chip_id == TGUI9440 && bpp == 16)) {
1186 tmp |= 8;
1020 vclk *= 2; 1187 vclk *= 2;
1021 set_vclk(vclk);
1022
1023 write3C4(0, 3);
1024 write3C4(1, 1); /* set char clock 8 dots wide */
1025 write3C4(2, 0x0F); /* enable 4 maps because needed in chain4 mode */
1026 write3C4(3, 0);
1027 write3C4(4, 0x0E); /* memory mode enable bitmaps ?? */
1028
1029 write3CE(MiscExtFunc, (bpp == 32) ? 0x1A : 0x12); /* divide clock by 2 if 32bpp */
1030 /* chain4 mode display and CPU path */
1031 write3CE(0x5, 0x40); /* no CGA compat, allow 256 col */
1032 write3CE(0x6, 0x05); /* graphics mode */
1033 write3CE(0x7, 0x0F); /* planes? */
1034
1035 if (chip_id == CYBERBLADEXPAi1) {
1036 /* This fixes snow-effect in 32 bpp */
1037 write3X4(CRTHSyncStart, 0x84);
1038 } 1188 }
1189 set_vclk(par, vclk);
1190 write3CE(par, MiscExtFunc, tmp | 0x12);
1191 write3CE(par, 0x5, 0x40); /* no CGA compat, allow 256 col */
1192 write3CE(par, 0x6, 0x05); /* graphics mode */
1193 write3CE(par, 0x7, 0x0F); /* planes? */
1039 1194
1040 writeAttr(0x10, 0x41); /* graphics mode and support 256 color modes */ 1195 /* graphics mode and support 256 color modes */
1041 writeAttr(0x12, 0x0F); /* planes */ 1196 writeAttr(par, 0x10, 0x41);
1042 writeAttr(0x13, 0); /* horizontal pel panning */ 1197 writeAttr(par, 0x12, 0x0F); /* planes */
1198 writeAttr(par, 0x13, 0); /* horizontal pel panning */
1043 1199
1044 /* colors */ 1200 /* colors */
1045 for (tmp = 0; tmp < 0x10; tmp++) 1201 for (tmp = 0; tmp < 0x10; tmp++)
1046 writeAttr(tmp, tmp); 1202 writeAttr(par, tmp, tmp);
1047 readb(par->io_virt + CRT + 0x0A); /* flip-flop to index */ 1203 fb_readb(par->io_virt + VGA_IS1_RC); /* flip-flop to index */
1048 t_outb(0x20, 0x3C0); /* enable attr */ 1204 t_outb(par, 0x20, VGA_ATT_W); /* enable attr */
1049 1205
1050 switch (bpp) { 1206 switch (bpp) {
1051 case 8: 1207 case 8:
1052 tmp = 0; 1208 tmp = 0;
1053 break; 1209 break;
1054 case 15:
1055 tmp = 0x10;
1056 break;
1057 case 16: 1210 case 16:
1058 tmp = 0x30; 1211 tmp = 0x30;
1059 break; 1212 break;
@@ -1063,19 +1216,23 @@ static int tridentfb_set_par(struct fb_info *info)
1063 break; 1216 break;
1064 } 1217 }
1065 1218
1066 t_inb(0x3C8); 1219 t_inb(par, VGA_PEL_IW);
1067 t_inb(0x3C6); 1220 t_inb(par, VGA_PEL_MSK);
1068 t_inb(0x3C6); 1221 t_inb(par, VGA_PEL_MSK);
1069 t_inb(0x3C6); 1222 t_inb(par, VGA_PEL_MSK);
1070 t_inb(0x3C6); 1223 t_inb(par, VGA_PEL_MSK);
1071 t_outb(tmp, 0x3C6); 1224 t_outb(par, tmp, VGA_PEL_MSK);
1072 t_inb(0x3C8); 1225 t_inb(par, VGA_PEL_IW);
1073 1226
1074 if (flatpanel) 1227 if (par->flatpanel)
1075 set_number_of_lines(info->var.yres); 1228 set_number_of_lines(par, info->var.yres);
1076 set_lwidth(info->var.xres * bpp / (4 * 16)); 1229 info->fix.line_length = info->var.xres_virtual * bpp / 8;
1230 set_lwidth(par, info->fix.line_length / 8);
1231
1232 if (!(info->flags & FBINFO_HWACCEL_DISABLED))
1233 par->init_accel(par, info->var.xres_virtual, bpp);
1234
1077 info->fix.visual = (bpp == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; 1235 info->fix.visual = (bpp == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
1078 info->fix.line_length = info->var.xres * (bpp >> 3);
1079 info->cmap.len = (bpp == 8) ? 256 : 16; 1236 info->cmap.len = (bpp == 8) ? 256 : 16;
1080 debug("exit\n"); 1237 debug("exit\n");
1081 return 0; 1238 return 0;
@@ -1087,17 +1244,18 @@ static int tridentfb_setcolreg(unsigned regno, unsigned red, unsigned green,
1087 struct fb_info *info) 1244 struct fb_info *info)
1088{ 1245{
1089 int bpp = info->var.bits_per_pixel; 1246 int bpp = info->var.bits_per_pixel;
1247 struct tridentfb_par *par = info->par;
1090 1248
1091 if (regno >= info->cmap.len) 1249 if (regno >= info->cmap.len)
1092 return 1; 1250 return 1;
1093 1251
1094 if (bpp == 8) { 1252 if (bpp == 8) {
1095 t_outb(0xFF, 0x3C6); 1253 t_outb(par, 0xFF, VGA_PEL_MSK);
1096 t_outb(regno, 0x3C8); 1254 t_outb(par, regno, VGA_PEL_IW);
1097 1255
1098 t_outb(red >> 10, 0x3C9); 1256 t_outb(par, red >> 10, VGA_PEL_D);
1099 t_outb(green >> 10, 0x3C9); 1257 t_outb(par, green >> 10, VGA_PEL_D);
1100 t_outb(blue >> 10, 0x3C9); 1258 t_outb(par, blue >> 10, VGA_PEL_D);
1101 1259
1102 } else if (regno < 16) { 1260 } else if (regno < 16) {
1103 if (bpp == 16) { /* RGB 565 */ 1261 if (bpp == 16) { /* RGB 565 */
@@ -1108,28 +1266,28 @@ static int tridentfb_setcolreg(unsigned regno, unsigned red, unsigned green,
1108 col |= col << 16; 1266 col |= col << 16;
1109 ((u32 *)(info->pseudo_palette))[regno] = col; 1267 ((u32 *)(info->pseudo_palette))[regno] = col;
1110 } else if (bpp == 32) /* ARGB 8888 */ 1268 } else if (bpp == 32) /* ARGB 8888 */
1111 ((u32*)info->pseudo_palette)[regno] = 1269 ((u32 *)info->pseudo_palette)[regno] =
1112 ((transp & 0xFF00) << 16) | 1270 ((transp & 0xFF00) << 16) |
1113 ((red & 0xFF00) << 8) | 1271 ((red & 0xFF00) << 8) |
1114 ((green & 0xFF00)) | 1272 ((green & 0xFF00)) |
1115 ((blue & 0xFF00) >> 8); 1273 ((blue & 0xFF00) >> 8);
1116 } 1274 }
1117 1275
1118/* debug("exit\n"); */
1119 return 0; 1276 return 0;
1120} 1277}
1121 1278
1122/* Try blanking the screen.For flat panels it does nothing */ 1279/* Try blanking the screen. For flat panels it does nothing */
1123static int tridentfb_blank(int blank_mode, struct fb_info *info) 1280static int tridentfb_blank(int blank_mode, struct fb_info *info)
1124{ 1281{
1125 unsigned char PMCont, DPMSCont; 1282 unsigned char PMCont, DPMSCont;
1283 struct tridentfb_par *par = info->par;
1126 1284
1127 debug("enter\n"); 1285 debug("enter\n");
1128 if (flatpanel) 1286 if (par->flatpanel)
1129 return 0; 1287 return 0;
1130 t_outb(0x04, 0x83C8); /* Read DPMS Control */ 1288 t_outb(par, 0x04, 0x83C8); /* Read DPMS Control */
1131 PMCont = t_inb(0x83C6) & 0xFC; 1289 PMCont = t_inb(par, 0x83C6) & 0xFC;
1132 DPMSCont = read3CE(PowerStatus) & 0xFC; 1290 DPMSCont = read3CE(par, PowerStatus) & 0xFC;
1133 switch (blank_mode) { 1291 switch (blank_mode) {
1134 case FB_BLANK_UNBLANK: 1292 case FB_BLANK_UNBLANK:
1135 /* Screen: On, HSync: On, VSync: On */ 1293 /* Screen: On, HSync: On, VSync: On */
@@ -1155,9 +1313,9 @@ static int tridentfb_blank(int blank_mode, struct fb_info *info)
1155 break; 1313 break;
1156 } 1314 }
1157 1315
1158 write3CE(PowerStatus, DPMSCont); 1316 write3CE(par, PowerStatus, DPMSCont);
1159 t_outb(4, 0x83C8); 1317 t_outb(par, 4, 0x83C8);
1160 t_outb(PMCont, 0x83C6); 1318 t_outb(par, PMCont, 0x83C6);
1161 1319
1162 debug("exit\n"); 1320 debug("exit\n");
1163 1321
@@ -1174,33 +1332,46 @@ static struct fb_ops tridentfb_ops = {
1174 .fb_set_par = tridentfb_set_par, 1332 .fb_set_par = tridentfb_set_par,
1175 .fb_fillrect = tridentfb_fillrect, 1333 .fb_fillrect = tridentfb_fillrect,
1176 .fb_copyarea = tridentfb_copyarea, 1334 .fb_copyarea = tridentfb_copyarea,
1177 .fb_imageblit = cfb_imageblit, 1335 .fb_imageblit = tridentfb_imageblit,
1336#ifdef CONFIG_FB_TRIDENT_ACCEL
1337 .fb_sync = tridentfb_sync,
1338#endif
1178}; 1339};
1179 1340
1180static int __devinit trident_pci_probe(struct pci_dev * dev, 1341static int __devinit trident_pci_probe(struct pci_dev *dev,
1181 const struct pci_device_id * id) 1342 const struct pci_device_id *id)
1182{ 1343{
1183 int err; 1344 int err;
1184 unsigned char revision; 1345 unsigned char revision;
1346 struct fb_info *info;
1347 struct tridentfb_par *default_par;
1348 int chip3D;
1349 int chip_id;
1185 1350
1186 err = pci_enable_device(dev); 1351 err = pci_enable_device(dev);
1187 if (err) 1352 if (err)
1188 return err; 1353 return err;
1189 1354
1190 chip_id = id->device; 1355 info = framebuffer_alloc(sizeof(struct tridentfb_par), &dev->dev);
1356 if (!info)
1357 return -ENOMEM;
1358 default_par = info->par;
1191 1359
1192 if (chip_id == CYBERBLADEi1) 1360 chip_id = id->device;
1193 output("*** Please do use cyblafb, Cyberblade/i1 support "
1194 "will soon be removed from tridentfb!\n");
1195 1361
1362#ifndef CONFIG_FB_TRIDENT_ACCEL
1363 noaccel = 1;
1364#endif
1196 1365
1197 /* If PCI id is 0x9660 then further detect chip type */ 1366 /* If PCI id is 0x9660 then further detect chip type */
1198 1367
1199 if (chip_id == TGUI9660) { 1368 if (chip_id == TGUI9660) {
1200 outb(RevisionID, 0x3C4); 1369 revision = vga_io_rseq(RevisionID);
1201 revision = inb(0x3C5);
1202 1370
1203 switch (revision) { 1371 switch (revision) {
1372 case 0x21:
1373 chip_id = PROVIDIA9685;
1374 break;
1204 case 0x22: 1375 case 0x22:
1205 case 0x23: 1376 case 0x23:
1206 chip_id = CYBER9397; 1377 chip_id = CYBER9397;
@@ -1229,123 +1400,170 @@ static int __devinit trident_pci_probe(struct pci_dev * dev,
1229 } 1400 }
1230 1401
1231 chip3D = is3Dchip(chip_id); 1402 chip3D = is3Dchip(chip_id);
1232 chipcyber = iscyber(chip_id);
1233 1403
1234 if (is_xp(chip_id)) { 1404 if (is_xp(chip_id)) {
1235 acc = &accel_xp; 1405 default_par->init_accel = xp_init_accel;
1406 default_par->wait_engine = xp_wait_engine;
1407 default_par->fill_rect = xp_fill_rect;
1408 default_par->copy_rect = xp_copy_rect;
1409 tridentfb_fix.accel = FB_ACCEL_TRIDENT_BLADEXP;
1236 } else if (is_blade(chip_id)) { 1410 } else if (is_blade(chip_id)) {
1237 acc = &accel_blade; 1411 default_par->init_accel = blade_init_accel;
1238 } else { 1412 default_par->wait_engine = blade_wait_engine;
1239 acc = &accel_image; 1413 default_par->fill_rect = blade_fill_rect;
1414 default_par->copy_rect = blade_copy_rect;
1415 default_par->image_blit = blade_image_blit;
1416 tridentfb_fix.accel = FB_ACCEL_TRIDENT_BLADE3D;
1417 } else if (chip3D) { /* 3DImage family left */
1418 default_par->init_accel = image_init_accel;
1419 default_par->wait_engine = image_wait_engine;
1420 default_par->fill_rect = image_fill_rect;
1421 default_par->copy_rect = image_copy_rect;
1422 tridentfb_fix.accel = FB_ACCEL_TRIDENT_3DIMAGE;
1423 } else { /* TGUI 9440/96XX family */
1424 default_par->init_accel = tgui_init_accel;
1425 default_par->wait_engine = xp_wait_engine;
1426 default_par->fill_rect = tgui_fill_rect;
1427 default_par->copy_rect = tgui_copy_rect;
1428 tridentfb_fix.accel = FB_ACCEL_TRIDENT_TGUI;
1240 } 1429 }
1241 1430
1242 /* acceleration is on by default for 3D chips */ 1431 default_par->chip_id = chip_id;
1243 defaultaccel = chip3D && !noaccel;
1244
1245 fb_info.par = &default_par;
1246 1432
1247 /* setup MMIO region */ 1433 /* setup MMIO region */
1248 tridentfb_fix.mmio_start = pci_resource_start(dev, 1); 1434 tridentfb_fix.mmio_start = pci_resource_start(dev, 1);
1249 tridentfb_fix.mmio_len = chip3D ? 0x20000 : 0x10000; 1435 tridentfb_fix.mmio_len = pci_resource_len(dev, 1);
1250 1436
1251 if (!request_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len, "tridentfb")) { 1437 if (!request_mem_region(tridentfb_fix.mmio_start,
1438 tridentfb_fix.mmio_len, "tridentfb")) {
1252 debug("request_region failed!\n"); 1439 debug("request_region failed!\n");
1440 framebuffer_release(info);
1253 return -1; 1441 return -1;
1254 } 1442 }
1255 1443
1256 default_par.io_virt = ioremap_nocache(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); 1444 default_par->io_virt = ioremap_nocache(tridentfb_fix.mmio_start,
1445 tridentfb_fix.mmio_len);
1257 1446
1258 if (!default_par.io_virt) { 1447 if (!default_par->io_virt) {
1259 debug("ioremap failed\n"); 1448 debug("ioremap failed\n");
1260 err = -1; 1449 err = -1;
1261 goto out_unmap1; 1450 goto out_unmap1;
1262 } 1451 }
1263 1452
1264 enable_mmio(); 1453 enable_mmio(default_par);
1265 1454
1266 /* setup framebuffer memory */ 1455 /* setup framebuffer memory */
1267 tridentfb_fix.smem_start = pci_resource_start(dev, 0); 1456 tridentfb_fix.smem_start = pci_resource_start(dev, 0);
1268 tridentfb_fix.smem_len = get_memsize(); 1457 tridentfb_fix.smem_len = get_memsize(default_par);
1269 1458
1270 if (!request_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len, "tridentfb")) { 1459 if (!request_mem_region(tridentfb_fix.smem_start,
1460 tridentfb_fix.smem_len, "tridentfb")) {
1271 debug("request_mem_region failed!\n"); 1461 debug("request_mem_region failed!\n");
1272 disable_mmio(); 1462 disable_mmio(info->par);
1273 err = -1; 1463 err = -1;
1274 goto out_unmap1; 1464 goto out_unmap1;
1275 } 1465 }
1276 1466
1277 fb_info.screen_base = ioremap_nocache(tridentfb_fix.smem_start, 1467 info->screen_base = ioremap_nocache(tridentfb_fix.smem_start,
1278 tridentfb_fix.smem_len); 1468 tridentfb_fix.smem_len);
1279 1469
1280 if (!fb_info.screen_base) { 1470 if (!info->screen_base) {
1281 debug("ioremap failed\n"); 1471 debug("ioremap failed\n");
1282 err = -1; 1472 err = -1;
1283 goto out_unmap2; 1473 goto out_unmap2;
1284 } 1474 }
1285 1475
1286 output("%s board found\n", pci_name(dev)); 1476 default_par->flatpanel = is_flatpanel(default_par);
1287 displaytype = get_displaytype();
1288 1477
1289 if (flatpanel) 1478 if (default_par->flatpanel)
1290 nativex = get_nativex(); 1479 nativex = get_nativex(default_par);
1291 1480
1292 fb_info.fix = tridentfb_fix; 1481 info->fix = tridentfb_fix;
1293 fb_info.fbops = &tridentfb_ops; 1482 info->fbops = &tridentfb_ops;
1483 info->pseudo_palette = default_par->pseudo_pal;
1294 1484
1485 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
1486 if (!noaccel && default_par->init_accel) {
1487 info->flags &= ~FBINFO_HWACCEL_DISABLED;
1488 info->flags |= FBINFO_HWACCEL_COPYAREA;
1489 info->flags |= FBINFO_HWACCEL_FILLRECT;
1490 } else
1491 info->flags |= FBINFO_HWACCEL_DISABLED;
1295 1492
1296 fb_info.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; 1493 info->pixmap.addr = kmalloc(4096, GFP_KERNEL);
1297#ifdef CONFIG_FB_TRIDENT_ACCEL 1494 if (!info->pixmap.addr) {
1298 fb_info.flags |= FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT; 1495 err = -ENOMEM;
1299#endif 1496 goto out_unmap2;
1300 fb_info.pseudo_palette = pseudo_pal; 1497 }
1498
1499 info->pixmap.size = 4096;
1500 info->pixmap.buf_align = 4;
1501 info->pixmap.scan_align = 1;
1502 info->pixmap.access_align = 32;
1503 info->pixmap.flags = FB_PIXMAP_SYSTEM;
1301 1504
1302 if (!fb_find_mode(&default_var, &fb_info, 1505 if (default_par->image_blit) {
1506 info->flags |= FBINFO_HWACCEL_IMAGEBLIT;
1507 info->pixmap.scan_align = 4;
1508 }
1509
1510 if (noaccel) {
1511 printk(KERN_DEBUG "disabling acceleration\n");
1512 info->flags |= FBINFO_HWACCEL_DISABLED;
1513 info->pixmap.scan_align = 1;
1514 }
1515
1516 if (!fb_find_mode(&info->var, info,
1303 mode_option, NULL, 0, NULL, bpp)) { 1517 mode_option, NULL, 0, NULL, bpp)) {
1304 err = -EINVAL; 1518 err = -EINVAL;
1305 goto out_unmap2; 1519 goto out_unmap2;
1306 } 1520 }
1307 err = fb_alloc_cmap(&fb_info.cmap, 256, 0); 1521 err = fb_alloc_cmap(&info->cmap, 256, 0);
1308 if (err < 0) 1522 if (err < 0)
1309 goto out_unmap2; 1523 goto out_unmap2;
1310 1524
1311 if (defaultaccel && acc) 1525 info->var.activate |= FB_ACTIVATE_NOW;
1312 default_var.accel_flags |= FB_ACCELF_TEXT; 1526 info->device = &dev->dev;
1313 else 1527 if (register_framebuffer(info) < 0) {
1314 default_var.accel_flags &= ~FB_ACCELF_TEXT; 1528 printk(KERN_ERR "tridentfb: could not register framebuffer\n");
1315 default_var.activate |= FB_ACTIVATE_NOW; 1529 fb_dealloc_cmap(&info->cmap);
1316 fb_info.var = default_var;
1317 fb_info.device = &dev->dev;
1318 if (register_framebuffer(&fb_info) < 0) {
1319 printk(KERN_ERR "tridentfb: could not register Trident framebuffer\n");
1320 fb_dealloc_cmap(&fb_info.cmap);
1321 err = -EINVAL; 1530 err = -EINVAL;
1322 goto out_unmap2; 1531 goto out_unmap2;
1323 } 1532 }
1324 output("fb%d: %s frame buffer device %dx%d-%dbpp\n", 1533 output("fb%d: %s frame buffer device %dx%d-%dbpp\n",
1325 fb_info.node, fb_info.fix.id, default_var.xres, 1534 info->node, info->fix.id, info->var.xres,
1326 default_var.yres, default_var.bits_per_pixel); 1535 info->var.yres, info->var.bits_per_pixel);
1536
1537 pci_set_drvdata(dev, info);
1327 return 0; 1538 return 0;
1328 1539
1329out_unmap2: 1540out_unmap2:
1330 if (fb_info.screen_base) 1541 kfree(info->pixmap.addr);
1331 iounmap(fb_info.screen_base); 1542 if (info->screen_base)
1543 iounmap(info->screen_base);
1332 release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len); 1544 release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len);
1333 disable_mmio(); 1545 disable_mmio(info->par);
1334out_unmap1: 1546out_unmap1:
1335 if (default_par.io_virt) 1547 if (default_par->io_virt)
1336 iounmap(default_par.io_virt); 1548 iounmap(default_par->io_virt);
1337 release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); 1549 release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len);
1550 framebuffer_release(info);
1338 return err; 1551 return err;
1339} 1552}
1340 1553
1341static void __devexit trident_pci_remove(struct pci_dev *dev) 1554static void __devexit trident_pci_remove(struct pci_dev *dev)
1342{ 1555{
1343 struct tridentfb_par *par = (struct tridentfb_par*)fb_info.par; 1556 struct fb_info *info = pci_get_drvdata(dev);
1344 unregister_framebuffer(&fb_info); 1557 struct tridentfb_par *par = info->par;
1558
1559 unregister_framebuffer(info);
1345 iounmap(par->io_virt); 1560 iounmap(par->io_virt);
1346 iounmap(fb_info.screen_base); 1561 iounmap(info->screen_base);
1347 release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len); 1562 release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len);
1348 release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); 1563 release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len);
1564 pci_set_drvdata(dev, NULL);
1565 kfree(info->pixmap.addr);
1566 framebuffer_release(info);
1349} 1567}
1350 1568
1351/* List of boards that we are trying to support */ 1569/* List of boards that we are trying to support */
@@ -1358,6 +1576,7 @@ static struct pci_device_id trident_devices[] = {
1358 {PCI_VENDOR_ID_TRIDENT, CYBERBLADEAi1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1576 {PCI_VENDOR_ID_TRIDENT, CYBERBLADEAi1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1359 {PCI_VENDOR_ID_TRIDENT, CYBERBLADEAi1D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1577 {PCI_VENDOR_ID_TRIDENT, CYBERBLADEAi1D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1360 {PCI_VENDOR_ID_TRIDENT, CYBERBLADEE4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1578 {PCI_VENDOR_ID_TRIDENT, CYBERBLADEE4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1579 {PCI_VENDOR_ID_TRIDENT, TGUI9440, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1361 {PCI_VENDOR_ID_TRIDENT, TGUI9660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1580 {PCI_VENDOR_ID_TRIDENT, TGUI9660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1362 {PCI_VENDOR_ID_TRIDENT, IMAGE975, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1581 {PCI_VENDOR_ID_TRIDENT, IMAGE975, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1363 {PCI_VENDOR_ID_TRIDENT, IMAGE985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1582 {PCI_VENDOR_ID_TRIDENT, IMAGE985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
@@ -1399,9 +1618,9 @@ static int __init tridentfb_setup(char *options)
1399 if (!strncmp(opt, "noaccel", 7)) 1618 if (!strncmp(opt, "noaccel", 7))
1400 noaccel = 1; 1619 noaccel = 1;
1401 else if (!strncmp(opt, "fp", 2)) 1620 else if (!strncmp(opt, "fp", 2))
1402 displaytype = DISPLAY_FP; 1621 fp = 1;
1403 else if (!strncmp(opt, "crt", 3)) 1622 else if (!strncmp(opt, "crt", 3))
1404 displaytype = DISPLAY_CRT; 1623 fp = 0;
1405 else if (!strncmp(opt, "bpp=", 4)) 1624 else if (!strncmp(opt, "bpp=", 4))
1406 bpp = simple_strtoul(opt + 4, NULL, 0); 1625 bpp = simple_strtoul(opt + 4, NULL, 0);
1407 else if (!strncmp(opt, "center", 6)) 1626 else if (!strncmp(opt, "center", 6))
@@ -1430,7 +1649,6 @@ static int __init tridentfb_init(void)
1430 return -ENODEV; 1649 return -ENODEV;
1431 tridentfb_setup(option); 1650 tridentfb_setup(option);
1432#endif 1651#endif
1433 output("Trident framebuffer %s initializing\n", VERSION);
1434 return pci_register_driver(&tridentfb_pci_driver); 1652 return pci_register_driver(&tridentfb_pci_driver);
1435} 1653}
1436 1654
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index cdbb56edb6cb..50744229c7a9 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -2054,8 +2054,8 @@ MODULE_PARM_DESC(maxhf,
2054module_param(maxvf, ushort, 0); 2054module_param(maxvf, ushort, 0);
2055MODULE_PARM_DESC(maxvf, 2055MODULE_PARM_DESC(maxvf,
2056 "Maximum vertical frequency [Hz], overrides EDID data"); 2056 "Maximum vertical frequency [Hz], overrides EDID data");
2057module_param_named(mode, mode_option, charp, 0); 2057module_param(mode_option, charp, 0);
2058MODULE_PARM_DESC(mode, 2058MODULE_PARM_DESC(mode_option,
2059 "Specify initial video mode as \"<xres>x<yres>[-<bpp>][@<refresh>]\""); 2059 "Specify initial video mode as \"<xres>x<yres>[-<bpp>][@<refresh>]\"");
2060module_param(vbemode, ushort, 0); 2060module_param(vbemode, ushort, 0);
2061MODULE_PARM_DESC(vbemode, 2061MODULE_PARM_DESC(vbemode,
diff --git a/drivers/video/vfb.c b/drivers/video/vfb.c
index 072638a9528a..93fe08d6c78f 100644
--- a/drivers/video/vfb.c
+++ b/drivers/video/vfb.c
@@ -443,19 +443,29 @@ static int vfb_mmap(struct fb_info *info,
443} 443}
444 444
445#ifndef MODULE 445#ifndef MODULE
446/*
447 * The virtual framebuffer driver is only enabled if explicitly
448 * requested by passing 'video=vfb:' (or any actual options).
449 */
446static int __init vfb_setup(char *options) 450static int __init vfb_setup(char *options)
447{ 451{
448 char *this_opt; 452 char *this_opt;
449 453
454 vfb_enable = 0;
455
456 if (!options)
457 return 1;
458
450 vfb_enable = 1; 459 vfb_enable = 1;
451 460
452 if (!options || !*options) 461 if (!*options)
453 return 1; 462 return 1;
454 463
455 while ((this_opt = strsep(&options, ",")) != NULL) { 464 while ((this_opt = strsep(&options, ",")) != NULL) {
456 if (!*this_opt) 465 if (!*this_opt)
457 continue; 466 continue;
458 if (!strncmp(this_opt, "disable", 7)) 467 /* Test disable for backwards compatibility */
468 if (!strcmp(this_opt, "disable"))
459 vfb_enable = 0; 469 vfb_enable = 0;
460 } 470 }
461 return 1; 471 return 1;
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
index 9b3c5923365e..e31bca8a0cb2 100644
--- a/drivers/video/vga16fb.c
+++ b/drivers/video/vga16fb.c
@@ -26,18 +26,6 @@
26#include <asm/io.h> 26#include <asm/io.h>
27#include <video/vga.h> 27#include <video/vga.h>
28 28
29#define GRAPHICS_ADDR_REG VGA_GFX_I /* Graphics address register. */
30#define GRAPHICS_DATA_REG VGA_GFX_D /* Graphics data register. */
31
32#define SET_RESET_INDEX VGA_GFX_SR_VALUE /* Set/Reset Register index. */
33#define ENABLE_SET_RESET_INDEX VGA_GFX_SR_ENABLE /* Enable Set/Reset Register index. */
34#define DATA_ROTATE_INDEX VGA_GFX_DATA_ROTATE /* Data Rotate Register index. */
35#define GRAPHICS_MODE_INDEX VGA_GFX_MODE /* Graphics Mode Register index. */
36#define BIT_MASK_INDEX VGA_GFX_BIT_MASK /* Bit Mask Register index. */
37
38#define dac_reg (VGA_PEL_IW)
39#define dac_val (VGA_PEL_D)
40
41#define VGA_FB_PHYS 0xA0000 29#define VGA_FB_PHYS 0xA0000
42#define VGA_FB_PHYS_LEN 65536 30#define VGA_FB_PHYS_LEN 65536
43 31
@@ -108,7 +96,7 @@ static struct fb_fix_screeninfo vga16fb_fix __initdata = {
108 .visual = FB_VISUAL_PSEUDOCOLOR, 96 .visual = FB_VISUAL_PSEUDOCOLOR,
109 .xpanstep = 8, 97 .xpanstep = 8,
110 .ypanstep = 1, 98 .ypanstep = 1,
111 .line_length = 640/8, 99 .line_length = 640 / 8,
112 .accel = FB_ACCEL_NONE 100 .accel = FB_ACCEL_NONE
113}; 101};
114 102
@@ -135,23 +123,22 @@ static inline int setmode(int mode)
135{ 123{
136 int oldmode; 124 int oldmode;
137 125
138 vga_io_w(GRAPHICS_ADDR_REG, GRAPHICS_MODE_INDEX); 126 oldmode = vga_io_rgfx(VGA_GFX_MODE);
139 oldmode = vga_io_r(GRAPHICS_DATA_REG); 127 vga_io_w(VGA_GFX_D, mode);
140 vga_io_w(GRAPHICS_DATA_REG, mode);
141 return oldmode; 128 return oldmode;
142} 129}
143 130
144/* Select the Bit Mask Register and return its value. */ 131/* Select the Bit Mask Register and return its value. */
145static inline int selectmask(void) 132static inline int selectmask(void)
146{ 133{
147 return vga_io_rgfx(BIT_MASK_INDEX); 134 return vga_io_rgfx(VGA_GFX_BIT_MASK);
148} 135}
149 136
150/* Set the value of the Bit Mask Register. It must already have been 137/* Set the value of the Bit Mask Register. It must already have been
151 selected with selectmask(). */ 138 selected with selectmask(). */
152static inline void setmask(int mask) 139static inline void setmask(int mask)
153{ 140{
154 vga_io_w(GRAPHICS_DATA_REG, mask); 141 vga_io_w(VGA_GFX_D, mask);
155} 142}
156 143
157/* Set the Data Rotate Register and return its old value. 144/* Set the Data Rotate Register and return its old value.
@@ -161,9 +148,8 @@ static inline int setop(int op)
161{ 148{
162 int oldop; 149 int oldop;
163 150
164 vga_io_w(GRAPHICS_ADDR_REG, DATA_ROTATE_INDEX); 151 oldop = vga_io_rgfx(VGA_GFX_DATA_ROTATE);
165 oldop = vga_io_r(GRAPHICS_DATA_REG); 152 vga_io_w(VGA_GFX_D, op);
166 vga_io_w(GRAPHICS_DATA_REG, op);
167 return oldop; 153 return oldop;
168} 154}
169 155
@@ -173,9 +159,8 @@ static inline int setsr(int sr)
173{ 159{
174 int oldsr; 160 int oldsr;
175 161
176 vga_io_w(GRAPHICS_ADDR_REG, ENABLE_SET_RESET_INDEX); 162 oldsr = vga_io_rgfx(VGA_GFX_SR_ENABLE);
177 oldsr = vga_io_r(GRAPHICS_DATA_REG); 163 vga_io_w(VGA_GFX_D, sr);
178 vga_io_w(GRAPHICS_DATA_REG, sr);
179 return oldsr; 164 return oldsr;
180} 165}
181 166
@@ -184,22 +169,21 @@ static inline int setcolor(int color)
184{ 169{
185 int oldcolor; 170 int oldcolor;
186 171
187 vga_io_w(GRAPHICS_ADDR_REG, SET_RESET_INDEX); 172 oldcolor = vga_io_rgfx(VGA_GFX_SR_VALUE);
188 oldcolor = vga_io_r(GRAPHICS_DATA_REG); 173 vga_io_w(VGA_GFX_D, color);
189 vga_io_w(GRAPHICS_DATA_REG, color);
190 return oldcolor; 174 return oldcolor;
191} 175}
192 176
193/* Return the value in the Graphics Address Register. */ 177/* Return the value in the Graphics Address Register. */
194static inline int getindex(void) 178static inline int getindex(void)
195{ 179{
196 return vga_io_r(GRAPHICS_ADDR_REG); 180 return vga_io_r(VGA_GFX_I);
197} 181}
198 182
199/* Set the value in the Graphics Address Register. */ 183/* Set the value in the Graphics Address Register. */
200static inline void setindex(int index) 184static inline void setindex(int index)
201{ 185{
202 vga_io_w(GRAPHICS_ADDR_REG, index); 186 vga_io_w(VGA_GFX_I, index);
203} 187}
204 188
205static void vga16fb_pan_var(struct fb_info *info, 189static void vga16fb_pan_var(struct fb_info *info,
@@ -672,10 +656,10 @@ static void ega16_setpalette(int regno, unsigned red, unsigned green, unsigned b
672 656
673static void vga16_setpalette(int regno, unsigned red, unsigned green, unsigned blue) 657static void vga16_setpalette(int regno, unsigned red, unsigned green, unsigned blue)
674{ 658{
675 outb(regno, dac_reg); 659 outb(regno, VGA_PEL_IW);
676 outb(red >> 10, dac_val); 660 outb(red >> 10, VGA_PEL_D);
677 outb(green >> 10, dac_val); 661 outb(green >> 10, VGA_PEL_D);
678 outb(blue >> 10, dac_val); 662 outb(blue >> 10, VGA_PEL_D);
679} 663}
680 664
681static int vga16fb_setcolreg(unsigned regno, unsigned red, unsigned green, 665static int vga16fb_setcolreg(unsigned regno, unsigned red, unsigned green,
@@ -719,28 +703,15 @@ static int vga16fb_pan_display(struct fb_var_screeninfo *var,
719 blanking code was originally by Huang shi chao, and modified by 703 blanking code was originally by Huang shi chao, and modified by
720 Christoph Rimek (chrimek@toppoint.de) and todd j. derr 704 Christoph Rimek (chrimek@toppoint.de) and todd j. derr
721 (tjd@barefoot.org) for Linux. */ 705 (tjd@barefoot.org) for Linux. */
722#define attrib_port VGA_ATC_IW
723#define seq_port_reg VGA_SEQ_I
724#define seq_port_val VGA_SEQ_D
725#define gr_port_reg VGA_GFX_I
726#define gr_port_val VGA_GFX_D
727#define video_misc_rd VGA_MIS_R
728#define video_misc_wr VGA_MIS_W
729#define vga_video_port_reg VGA_CRT_IC
730#define vga_video_port_val VGA_CRT_DC
731 706
732static void vga_vesa_blank(struct vga16fb_par *par, int mode) 707static void vga_vesa_blank(struct vga16fb_par *par, int mode)
733{ 708{
734 unsigned char SeqCtrlIndex; 709 unsigned char SeqCtrlIndex = vga_io_r(VGA_SEQ_I);
735 unsigned char CrtCtrlIndex; 710 unsigned char CrtCtrlIndex = vga_io_r(VGA_CRT_IC);
736 711
737 //cli();
738 SeqCtrlIndex = vga_io_r(seq_port_reg);
739 CrtCtrlIndex = vga_io_r(vga_video_port_reg);
740
741 /* save original values of VGA controller registers */ 712 /* save original values of VGA controller registers */
742 if(!par->vesa_blanked) { 713 if(!par->vesa_blanked) {
743 par->vga_state.CrtMiscIO = vga_io_r(video_misc_rd); 714 par->vga_state.CrtMiscIO = vga_io_r(VGA_MIS_R);
744 //sti(); 715 //sti();
745 716
746 par->vga_state.HorizontalTotal = vga_io_rcrt(0x00); /* HorizontalTotal */ 717 par->vga_state.HorizontalTotal = vga_io_rcrt(0x00); /* HorizontalTotal */
@@ -756,12 +727,11 @@ static void vga_vesa_blank(struct vga16fb_par *par, int mode)
756 727
757 /* assure that video is enabled */ 728 /* assure that video is enabled */
758 /* "0x20" is VIDEO_ENABLE_bit in register 01 of sequencer */ 729 /* "0x20" is VIDEO_ENABLE_bit in register 01 of sequencer */
759 //cli();
760 vga_io_wseq(0x01, par->vga_state.ClockingMode | 0x20); 730 vga_io_wseq(0x01, par->vga_state.ClockingMode | 0x20);
761 731
762 /* test for vertical retrace in process.... */ 732 /* test for vertical retrace in process.... */
763 if ((par->vga_state.CrtMiscIO & 0x80) == 0x80) 733 if ((par->vga_state.CrtMiscIO & 0x80) == 0x80)
764 vga_io_w(video_misc_wr, par->vga_state.CrtMiscIO & 0xef); 734 vga_io_w(VGA_MIS_W, par->vga_state.CrtMiscIO & 0xef);
765 735
766 /* 736 /*
767 * Set <End of vertical retrace> to minimum (0) and 737 * Set <End of vertical retrace> to minimum (0) and
@@ -769,12 +739,10 @@ static void vga_vesa_blank(struct vga16fb_par *par, int mode)
769 * Result: turn off vertical sync (VSync) pulse. 739 * Result: turn off vertical sync (VSync) pulse.
770 */ 740 */
771 if (mode & FB_BLANK_VSYNC_SUSPEND) { 741 if (mode & FB_BLANK_VSYNC_SUSPEND) {
772 outb_p(0x10,vga_video_port_reg); /* StartVertRetrace */ 742 vga_io_wcrt(VGA_CRTC_V_SYNC_START, 0xff);
773 outb_p(0xff,vga_video_port_val); /* maximum value */ 743 vga_io_wcrt(VGA_CRTC_V_SYNC_END, 0x40);
774 outb_p(0x11,vga_video_port_reg); /* EndVertRetrace */ 744 /* bits 9,10 of vert. retrace */
775 outb_p(0x40,vga_video_port_val); /* minimum (bits 0..3) */ 745 vga_io_wcrt(VGA_CRTC_OVERFLOW, par->vga_state.Overflow | 0x84);
776 outb_p(0x07,vga_video_port_reg); /* Overflow */
777 outb_p(par->vga_state.Overflow | 0x84,vga_video_port_val); /* bits 9,10 of vert. retrace */
778 } 746 }
779 747
780 if (mode & FB_BLANK_HSYNC_SUSPEND) { 748 if (mode & FB_BLANK_HSYNC_SUSPEND) {
@@ -783,29 +751,22 @@ static void vga_vesa_blank(struct vga16fb_par *par, int mode)
783 * <Start of horizontal Retrace> to maximum 751 * <Start of horizontal Retrace> to maximum
784 * Result: turn off horizontal sync (HSync) pulse. 752 * Result: turn off horizontal sync (HSync) pulse.
785 */ 753 */
786 outb_p(0x04,vga_video_port_reg); /* StartHorizRetrace */ 754 vga_io_wcrt(VGA_CRTC_H_SYNC_START, 0xff);
787 outb_p(0xff,vga_video_port_val); /* maximum */ 755 vga_io_wcrt(VGA_CRTC_H_SYNC_END, 0x00);
788 outb_p(0x05,vga_video_port_reg); /* EndHorizRetrace */
789 outb_p(0x00,vga_video_port_val); /* minimum (0) */
790 } 756 }
791 757
792 /* restore both index registers */ 758 /* restore both index registers */
793 outb_p(SeqCtrlIndex,seq_port_reg); 759 outb_p(SeqCtrlIndex, VGA_SEQ_I);
794 outb_p(CrtCtrlIndex,vga_video_port_reg); 760 outb_p(CrtCtrlIndex, VGA_CRT_IC);
795 //sti();
796} 761}
797 762
798static void vga_vesa_unblank(struct vga16fb_par *par) 763static void vga_vesa_unblank(struct vga16fb_par *par)
799{ 764{
800 unsigned char SeqCtrlIndex; 765 unsigned char SeqCtrlIndex = vga_io_r(VGA_SEQ_I);
801 unsigned char CrtCtrlIndex; 766 unsigned char CrtCtrlIndex = vga_io_r(VGA_CRT_IC);
802 767
803 //cli();
804 SeqCtrlIndex = vga_io_r(seq_port_reg);
805 CrtCtrlIndex = vga_io_r(vga_video_port_reg);
806
807 /* restore original values of VGA controller registers */ 768 /* restore original values of VGA controller registers */
808 vga_io_w(video_misc_wr, par->vga_state.CrtMiscIO); 769 vga_io_w(VGA_MIS_W, par->vga_state.CrtMiscIO);
809 770
810 /* HorizontalTotal */ 771 /* HorizontalTotal */
811 vga_io_wcrt(0x00, par->vga_state.HorizontalTotal); 772 vga_io_wcrt(0x00, par->vga_state.HorizontalTotal);
@@ -827,9 +788,8 @@ static void vga_vesa_unblank(struct vga16fb_par *par)
827 vga_io_wseq(0x01, par->vga_state.ClockingMode); 788 vga_io_wseq(0x01, par->vga_state.ClockingMode);
828 789
829 /* restore index/control registers */ 790 /* restore index/control registers */
830 vga_io_w(seq_port_reg, SeqCtrlIndex); 791 vga_io_w(VGA_SEQ_I, SeqCtrlIndex);
831 vga_io_w(vga_video_port_reg, CrtCtrlIndex); 792 vga_io_w(VGA_CRT_IC, CrtCtrlIndex);
832 //sti();
833} 793}
834 794
835static void vga_pal_blank(void) 795static void vga_pal_blank(void)
@@ -837,10 +797,10 @@ static void vga_pal_blank(void)
837 int i; 797 int i;
838 798
839 for (i=0; i<16; i++) { 799 for (i=0; i<16; i++) {
840 outb_p (i, dac_reg) ; 800 outb_p(i, VGA_PEL_IW);
841 outb_p (0, dac_val) ; 801 outb_p(0, VGA_PEL_D);
842 outb_p (0, dac_val) ; 802 outb_p(0, VGA_PEL_D);
843 outb_p (0, dac_val) ; 803 outb_p(0, VGA_PEL_D);
844 } 804 }
845} 805}
846 806
@@ -1087,12 +1047,15 @@ static void vga16fb_copyarea(struct fb_info *info, const struct fb_copyarea *are
1087 width = x2 - dx; 1047 width = x2 - dx;
1088 height = y2 - dy; 1048 height = y2 - dy;
1089 1049
1050 if (sx + dx < old_dx || sy + dy < old_dy)
1051 return;
1052
1090 /* update sx1,sy1 */ 1053 /* update sx1,sy1 */
1091 sx += (dx - old_dx); 1054 sx += (dx - old_dx);
1092 sy += (dy - old_dy); 1055 sy += (dy - old_dy);
1093 1056
1094 /* the source must be completely inside the virtual screen */ 1057 /* the source must be completely inside the virtual screen */
1095 if (sx < 0 || sy < 0 || (sx + width) > vxres || (sy + height) > vyres) 1058 if (sx + width > vxres || sy + height > vyres)
1096 return; 1059 return;
1097 1060
1098 switch (info->fix.type) { 1061 switch (info->fix.type) {
@@ -1482,6 +1445,7 @@ static void __exit vga16fb_exit(void)
1482 platform_driver_unregister(&vga16fb_driver); 1445 platform_driver_unregister(&vga16fb_driver);
1483} 1446}
1484 1447
1448MODULE_DESCRIPTION("Legacy VGA framebuffer device driver");
1485MODULE_LICENSE("GPL"); 1449MODULE_LICENSE("GPL");
1486module_init(vga16fb_init); 1450module_init(vga16fb_init);
1487module_exit(vga16fb_exit); 1451module_exit(vga16fb_exit);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index ccb78f66c2b6..48399e134c0d 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -788,8 +788,6 @@ config WATCHDOG_RIO
788 machines. The watchdog timeout period is normally one minute but 788 machines. The watchdog timeout period is normally one minute but
789 can be changed with a boot-time parameter. 789 can be changed with a boot-time parameter.
790 790
791# V850 Architecture
792
793# XTENSA Architecture 791# XTENSA Architecture
794 792
795# 793#
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 25b352b664d9..edd305a64e63 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -119,8 +119,6 @@ obj-$(CONFIG_SH_WDT) += shwdt.o
119 119
120# SPARC64 Architecture 120# SPARC64 Architecture
121 121
122# V850 Architecture
123
124# XTENSA Architecture 122# XTENSA Architecture
125 123
126# Architecture Independant 124# Architecture Independant
diff --git a/drivers/zorro/zorro-sysfs.c b/drivers/zorro/zorro-sysfs.c
index 3da712cc7708..5290552d2ef7 100644
--- a/drivers/zorro/zorro-sysfs.c
+++ b/drivers/zorro/zorro-sysfs.c
@@ -15,7 +15,6 @@
15#include <linux/zorro.h> 15#include <linux/zorro.h>
16#include <linux/stat.h> 16#include <linux/stat.h>
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/fs.h>
19 18
20#include "zorro.h" 19#include "zorro.h"
21 20