aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/atm/ambassador.c4
-rw-r--r--drivers/atm/zatm.c4
-rw-r--r--drivers/block/Kconfig6
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/loop.c11
-rw-r--r--drivers/block/pktcdvd.c1
-rw-r--r--drivers/block/xsysace.c1164
-rw-r--r--drivers/block/z2ram.c4
-rw-r--r--drivers/char/Kconfig10
-rw-r--r--drivers/char/apm-emulation.c12
-rw-r--r--drivers/char/cyclades.c367
-rw-r--r--drivers/char/drm/drm_stub.c2
-rw-r--r--drivers/char/drm/sis_mm.c2
-rw-r--r--drivers/char/hvc_console.c1
-rw-r--r--drivers/char/isicom.c93
-rw-r--r--drivers/char/istallion.c9
-rw-r--r--drivers/char/moxa.c37
-rw-r--r--drivers/char/riscom8.c12
-rw-r--r--drivers/char/specialix.c16
-rw-r--r--drivers/char/stallion.c5
-rw-r--r--drivers/char/vt.c35
-rw-r--r--drivers/char/watchdog/Kconfig9
-rw-r--r--drivers/char/watchdog/Makefile3
-rw-r--r--drivers/char/watchdog/at32ap700x_wdt.c386
-rw-r--r--drivers/char/watchdog/ep93xx_wdt.c4
-rw-r--r--drivers/char/watchdog/mixcomwd.c127
-rw-r--r--drivers/char/watchdog/pnx4008_wdt.c4
-rw-r--r--drivers/char/watchdog/s3c2410_wdt.c41
-rw-r--r--drivers/edac/edac_mc.c1
-rw-r--r--drivers/hwmon/lm70.c4
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c2
-rw-r--r--drivers/i2c/chips/Kconfig10
-rw-r--r--drivers/i2c/chips/Makefile1
-rw-r--r--drivers/i2c/chips/menelaus.c1281
-rw-r--r--drivers/ide/ide-probe.c4
-rw-r--r--drivers/ieee1394/ieee1394_core.c3
-rw-r--r--drivers/ieee1394/nodemgr.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c4
-rw-r--r--drivers/input/gameport/gameport.c1
-rw-r--r--drivers/input/serio/serio.c1
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c1
-rw-r--r--drivers/isdn/Kconfig15
-rw-r--r--drivers/isdn/capi/Kconfig7
-rw-r--r--drivers/isdn/capi/capi.c2
-rw-r--r--drivers/isdn/capi/kcapi.c6
-rw-r--r--drivers/isdn/capi/kcapi_proc.c28
-rw-r--r--drivers/isdn/hardware/Kconfig1
-rw-r--r--drivers/isdn/hardware/avm/Kconfig23
-rw-r--r--drivers/isdn/hardware/eicon/Kconfig22
-rw-r--r--drivers/isdn/hardware/eicon/idifunc.c1
-rw-r--r--drivers/isdn/hisax/bkm_a4t.c108
-rw-r--r--drivers/isdn/hisax/config.c243
-rw-r--r--drivers/isdn/hisax/enternow_pci.c165
-rw-r--r--drivers/isdn/hisax/hfc_pci.c191
-rw-r--r--drivers/isdn/hisax/nj_s.c194
-rw-r--r--drivers/isdn/hisax/nj_u.c167
-rw-r--r--drivers/isdn/hisax/sedlbauer.c8
-rw-r--r--drivers/isdn/i4l/Kconfig7
-rw-r--r--drivers/kvm/Kconfig9
-rw-r--r--drivers/kvm/kvm.h116
-rw-r--r--drivers/kvm/kvm_main.c456
-rw-r--r--drivers/kvm/mmu.c292
-rw-r--r--drivers/kvm/paging_tmpl.h273
-rw-r--r--drivers/kvm/svm.c59
-rw-r--r--drivers/kvm/svm.h3
-rw-r--r--drivers/kvm/vmx.c652
-rw-r--r--drivers/kvm/x86_emulate.c44
-rw-r--r--drivers/macintosh/therm_adt746x.c1
-rw-r--r--drivers/macintosh/windfarm_core.c1
-rw-r--r--drivers/md/Kconfig15
-rw-r--r--drivers/md/bitmap.c169
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/md.c71
-rw-r--r--drivers/md/raid1.c3
-rw-r--r--drivers/md/raid10.c3
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c1
-rw-r--r--drivers/media/video/cx88/cx88-tvaudio.c1
-rw-r--r--drivers/media/video/msp3400-kthreads.c6
-rw-r--r--drivers/media/video/tvaudio.c2
-rw-r--r--drivers/media/video/video-buf-dvb.c1
-rw-r--r--drivers/media/video/vivi.c1
-rw-r--r--drivers/message/i2o/debug.c134
-rw-r--r--drivers/message/i2o/exec-osm.c6
-rw-r--r--drivers/message/i2o/i2o_block.c2
-rw-r--r--drivers/message/i2o/i2o_config.c62
-rw-r--r--drivers/mfd/ucb1x00-ts.c1
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/ibmasm/command.c14
-rw-r--r--drivers/misc/ibmasm/dot_command.c10
-rw-r--r--drivers/misc/ibmasm/dot_command.h2
-rw-r--r--drivers/misc/ibmasm/event.c8
-rw-r--r--drivers/misc/ibmasm/heartbeat.c2
-rw-r--r--drivers/misc/ibmasm/i2o.h10
-rw-r--r--drivers/misc/ibmasm/ibmasm.h70
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c24
-rw-r--r--drivers/misc/ibmasm/lowlevel.c2
-rw-r--r--drivers/misc/ibmasm/lowlevel.h16
-rw-r--r--drivers/misc/ibmasm/module.c10
-rw-r--r--drivers/misc/ibmasm/r_heartbeat.c10
-rw-r--r--drivers/misc/ibmasm/remote.c37
-rw-r--r--drivers/misc/ibmasm/remote.h8
-rw-r--r--drivers/misc/ibmasm/uart.c2
-rw-r--r--drivers/mmc/card/queue.c7
-rw-r--r--drivers/mtd/mtd_blkdevs.c3
-rw-r--r--drivers/mtd/ubi/eba.c4
-rw-r--r--drivers/mtd/ubi/wl.c1
-rw-r--r--drivers/net/atl1/atl1_main.c1
-rw-r--r--drivers/net/eepro100.c7
-rw-r--r--drivers/net/natsemi.c2
-rw-r--r--drivers/net/ne2k-pci.c7
-rw-r--r--drivers/net/r8169.c2
-rw-r--r--drivers/net/tokenring/smctr.c6
-rw-r--r--drivers/net/wan/pc300_drv.c2
-rw-r--r--drivers/net/wan/sbni.c7
-rw-r--r--drivers/net/wireless/airo.c3
-rw-r--r--drivers/net/wireless/libertas/main.c1
-rw-r--r--drivers/parisc/hppb.c1
-rw-r--r--drivers/pcmcia/cs.c1
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c548
-rw-r--r--drivers/pnp/pnpbios/core.c1
-rw-r--r--drivers/rtc/Kconfig64
-rw-r--r--drivers/rtc/Makefile4
-rw-r--r--drivers/rtc/rtc-at32ap700x.c317
-rw-r--r--drivers/rtc/rtc-dev.c2
-rw-r--r--drivers/rtc/rtc-ds1216.c226
-rw-r--r--drivers/rtc/rtc-ds1307.c300
-rw-r--r--drivers/rtc/rtc-m41t80.c917
-rw-r--r--drivers/rtc/rtc-m48t59.c491
-rw-r--r--drivers/rtc/rtc-rs5c372.c95
-rw-r--r--drivers/s390/block/dcssblk.c7
-rw-r--r--drivers/sbus/char/jsflash.c3
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c3
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/serial/Kconfig28
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/sb1250-duart.c972
-rw-r--r--drivers/spi/Kconfig45
-rw-r--r--drivers/spi/Makefile5
-rw-r--r--drivers/spi/atmel_spi.c185
-rw-r--r--drivers/spi/au1550_spi.c9
-rw-r--r--drivers/spi/mpc52xx_psc_spi.c9
-rw-r--r--drivers/spi/omap2_mcspi.c1081
-rw-r--r--drivers/spi/omap_uwire.c9
-rw-r--r--drivers/spi/pxa2xx_spi.c9
-rw-r--r--drivers/spi/spi.c11
-rw-r--r--drivers/spi/spi_bitbang.c8
-rw-r--r--drivers/spi/spi_imx.c24
-rw-r--r--drivers/spi/spi_lm70llp.c361
-rw-r--r--drivers/spi/spi_mpc83xx.c47
-rw-r--r--drivers/spi/spi_s3c24xx.c8
-rw-r--r--drivers/spi/spi_txx9.c474
-rw-r--r--drivers/spi/spidev.c6
-rw-r--r--drivers/spi/tle62x0.c328
-rw-r--r--drivers/spi/xilinx_spi.c434
-rw-r--r--drivers/telephony/Kconfig1
-rw-r--r--drivers/telephony/ixj.c7
-rw-r--r--drivers/usb/Kconfig2
-rw-r--r--drivers/usb/atm/ueagle-atm.c1
-rw-r--r--drivers/usb/core/hub.c1
-rw-r--r--drivers/usb/gadget/file_storage.c3
-rw-r--r--drivers/usb/misc/auerswald.c4
-rw-r--r--drivers/usb/storage/usb.c3
-rw-r--r--drivers/video/68328fb.c2
-rw-r--r--drivers/video/Kconfig6
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/aty/ati_ids.h1
-rw-r--r--drivers/video/aty/atyfb_base.c2
-rw-r--r--drivers/video/aty/radeon_base.c2
-rw-r--r--drivers/video/aty/radeonfb.h2
-rw-r--r--drivers/video/console/Kconfig16
-rw-r--r--drivers/video/console/fbcon.c366
-rw-r--r--drivers/video/controlfb.c2
-rw-r--r--drivers/video/cyblafb.c21
-rw-r--r--drivers/video/epson1355fb.c21
-rw-r--r--drivers/video/fbmem.c299
-rw-r--r--drivers/video/fm2fb.c16
-rw-r--r--drivers/video/gbefb.c41
-rw-r--r--drivers/video/i810/i810.h2
-rw-r--r--drivers/video/intelfb/intelfb.h2
-rw-r--r--drivers/video/logo/Kconfig5
-rw-r--r--drivers/video/logo/Makefile2
-rw-r--r--drivers/video/logo/logo_spe_clut224.ppm283
-rw-r--r--drivers/video/macfb.c93
-rw-r--r--drivers/video/macmodes.c5
-rw-r--r--drivers/video/macmodes.h8
-rw-r--r--drivers/video/matrox/matroxfb_accel.c11
-rw-r--r--drivers/video/matrox/matroxfb_base.c4
-rw-r--r--drivers/video/matrox/matroxfb_base.h2
-rw-r--r--drivers/video/matrox/matroxfb_crtc2.c6
-rw-r--r--drivers/video/matrox/matroxfb_crtc2.h2
-rw-r--r--drivers/video/matrox/matroxfb_maven.c9
-rw-r--r--drivers/video/nvidia/nv_hw.c62
-rw-r--r--drivers/video/nvidia/nv_setup.c12
-rw-r--r--drivers/video/nvidia/nv_type.h1
-rw-r--r--drivers/video/nvidia/nvidia.c9
-rw-r--r--drivers/video/offb.c2
-rw-r--r--drivers/video/omap/Kconfig58
-rw-r--r--drivers/video/omap/Makefile29
-rw-r--r--drivers/video/omap/blizzard.c1568
-rw-r--r--drivers/video/omap/dispc.c1502
-rw-r--r--drivers/video/omap/dispc.h43
-rw-r--r--drivers/video/omap/hwa742.c1077
-rw-r--r--drivers/video/omap/lcd_h3.c141
-rw-r--r--drivers/video/omap/lcd_h4.c117
-rw-r--r--drivers/video/omap/lcd_inn1510.c124
-rw-r--r--drivers/video/omap/lcd_inn1610.c150
-rw-r--r--drivers/video/omap/lcd_osk.c144
-rw-r--r--drivers/video/omap/lcd_palmte.c123
-rw-r--r--drivers/video/omap/lcd_palmtt.c127
-rw-r--r--drivers/video/omap/lcd_palmz71.c123
-rw-r--r--drivers/video/omap/lcd_sx1.c334
-rw-r--r--drivers/video/omap/lcdc.c893
-rw-r--r--drivers/video/omap/lcdc.h7
-rw-r--r--drivers/video/omap/omapfb_main.c1941
-rw-r--r--drivers/video/omap/rfbi.c588
-rw-r--r--drivers/video/omap/sossi.c686
-rw-r--r--drivers/video/platinumfb.c2
-rw-r--r--drivers/video/pm2fb.c202
-rw-r--r--drivers/video/pm3fb.c270
-rw-r--r--drivers/video/ps3fb.c1
-rw-r--r--drivers/video/pvr2fb.c7
-rw-r--r--drivers/video/q40fb.c2
-rw-r--r--drivers/video/riva/riva_hw.c7
-rw-r--r--drivers/video/sgivwfb.c2
-rw-r--r--drivers/video/sis/sis.h2
-rw-r--r--drivers/video/sis/sis_main.c6
-rw-r--r--drivers/video/tgafb.c2
-rw-r--r--drivers/video/tridentfb.c30
-rw-r--r--drivers/video/tx3912fb.c2
-rw-r--r--drivers/video/vt8623fb.c42
-rw-r--r--drivers/w1/w1.c1
232 files changed, 23288 insertions, 2984 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 5d576435fccc..fb8a749423ca 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -2666,7 +2666,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2666 mv_print_info(host); 2666 mv_print_info(host);
2667 2667
2668 pci_set_master(pdev); 2668 pci_set_master(pdev);
2669 pci_set_mwi(pdev); 2669 pci_try_set_mwi(pdev);
2670 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, 2670 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2671 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht); 2671 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2672} 2672}
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 59651abfa4f8..b34b3829f6a9 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -1040,7 +1040,7 @@ static int amb_open (struct atm_vcc * atm_vcc)
1040 struct atm_qos * qos; 1040 struct atm_qos * qos;
1041 struct atm_trafprm * txtp; 1041 struct atm_trafprm * txtp;
1042 struct atm_trafprm * rxtp; 1042 struct atm_trafprm * rxtp;
1043 u16 tx_rate_bits; 1043 u16 tx_rate_bits = -1; // hush gcc
1044 u16 tx_vc_bits = -1; // hush gcc 1044 u16 tx_vc_bits = -1; // hush gcc
1045 u16 tx_frame_bits = -1; // hush gcc 1045 u16 tx_frame_bits = -1; // hush gcc
1046 1046
@@ -1096,6 +1096,8 @@ static int amb_open (struct atm_vcc * atm_vcc)
1096 r = round_up; 1096 r = round_up;
1097 } 1097 }
1098 error = make_rate (pcr, r, &tx_rate_bits, NULL); 1098 error = make_rate (pcr, r, &tx_rate_bits, NULL);
1099 if (error)
1100 return error;
1099 tx_vc_bits = TX_UBR_CAPPED; 1101 tx_vc_bits = TX_UBR_CAPPED;
1100 tx_frame_bits = TX_FRAME_CAPPED; 1102 tx_frame_bits = TX_FRAME_CAPPED;
1101 } 1103 }
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 020a87a476c8..58583c6ac5be 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -915,7 +915,7 @@ static int open_tx_first(struct atm_vcc *vcc)
915 unsigned long flags; 915 unsigned long flags;
916 u32 *loop; 916 u32 *loop;
917 unsigned short chan; 917 unsigned short chan;
918 int pcr,unlimited; 918 int unlimited;
919 919
920 DPRINTK("open_tx_first\n"); 920 DPRINTK("open_tx_first\n");
921 zatm_dev = ZATM_DEV(vcc->dev); 921 zatm_dev = ZATM_DEV(vcc->dev);
@@ -936,6 +936,8 @@ static int open_tx_first(struct atm_vcc *vcc)
936 vcc->qos.txtp.max_pcr >= ATM_OC3_PCR); 936 vcc->qos.txtp.max_pcr >= ATM_OC3_PCR);
937 if (unlimited && zatm_dev->ubr != -1) zatm_vcc->shaper = zatm_dev->ubr; 937 if (unlimited && zatm_dev->ubr != -1) zatm_vcc->shaper = zatm_dev->ubr;
938 else { 938 else {
939 int uninitialized_var(pcr);
940
939 if (unlimited) vcc->qos.txtp.max_sdu = ATM_MAX_AAL5_PDU; 941 if (unlimited) vcc->qos.txtp.max_sdu = ATM_MAX_AAL5_PDU;
940 if ((zatm_vcc->shaper = alloc_shaper(vcc->dev,&pcr, 942 if ((zatm_vcc->shaper = alloc_shaper(vcc->dev,&pcr,
941 vcc->qos.txtp.min_pcr,vcc->qos.txtp.max_pcr,unlimited)) 943 vcc->qos.txtp.min_pcr,vcc->qos.txtp.max_pcr,unlimited))
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index c5a61571a076..8f65b88cf711 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -421,4 +421,10 @@ config SUNVDC
421 421
422source "drivers/s390/block/Kconfig" 422source "drivers/s390/block/Kconfig"
423 423
424config XILINX_SYSACE
425 tristate "Xilinx SystemACE support"
426 depends on 4xx
427 help
428 Include support for the Xilinx SystemACE CompactFlash interface
429
424endif # BLK_DEV 430endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 7926be8c9fb7..9ee08ab4ffa8 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_BLK_DEV_XD) += xd.o
17obj-$(CONFIG_BLK_CPQ_DA) += cpqarray.o 17obj-$(CONFIG_BLK_CPQ_DA) += cpqarray.o
18obj-$(CONFIG_BLK_CPQ_CISS_DA) += cciss.o 18obj-$(CONFIG_BLK_CPQ_CISS_DA) += cciss.o
19obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o 19obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o
20obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
20obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o 21obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
21obj-$(CONFIG_SUNVDC) += sunvdc.o 22obj-$(CONFIG_SUNVDC) += sunvdc.o
22 23
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 4503290da407..e425daa1eac3 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -68,6 +68,7 @@
68#include <linux/loop.h> 68#include <linux/loop.h>
69#include <linux/compat.h> 69#include <linux/compat.h>
70#include <linux/suspend.h> 70#include <linux/suspend.h>
71#include <linux/freezer.h>
71#include <linux/writeback.h> 72#include <linux/writeback.h>
72#include <linux/buffer_head.h> /* for invalidate_bdev() */ 73#include <linux/buffer_head.h> /* for invalidate_bdev() */
73#include <linux/completion.h> 74#include <linux/completion.h>
@@ -600,13 +601,6 @@ static int loop_thread(void *data)
600 struct loop_device *lo = data; 601 struct loop_device *lo = data;
601 struct bio *bio; 602 struct bio *bio;
602 603
603 /*
604 * loop can be used in an encrypted device,
605 * hence, it mustn't be stopped at all
606 * because it could be indirectly used during suspension
607 */
608 current->flags |= PF_NOFREEZE;
609
610 set_user_nice(current, -20); 604 set_user_nice(current, -20);
611 605
612 while (!kthread_should_stop() || lo->lo_bio) { 606 while (!kthread_should_stop() || lo->lo_bio) {
@@ -1574,8 +1568,7 @@ static void __exit loop_exit(void)
1574 loop_del_one(lo); 1568 loop_del_one(lo);
1575 1569
1576 blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range); 1570 blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
1577 if (unregister_blkdev(LOOP_MAJOR, "loop")) 1571 unregister_blkdev(LOOP_MAJOR, "loop");
1578 printk(KERN_WARNING "loop: cannot unregister blkdev\n");
1579} 1572}
1580 1573
1581module_init(loop_init); 1574module_init(loop_init);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 7c294a40002e..31be33e4f119 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1593,6 +1593,7 @@ static int kcdrwd(void *foobar)
1593 long min_sleep_time, residue; 1593 long min_sleep_time, residue;
1594 1594
1595 set_user_nice(current, -20); 1595 set_user_nice(current, -20);
1596 set_freezable();
1596 1597
1597 for (;;) { 1598 for (;;) {
1598 DECLARE_WAITQUEUE(wait, current); 1599 DECLARE_WAITQUEUE(wait, current);
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
new file mode 100644
index 000000000000..732ec63b6e9c
--- /dev/null
+++ b/drivers/block/xsysace.c
@@ -0,0 +1,1164 @@
1/*
2 * Xilinx SystemACE device driver
3 *
4 * Copyright 2007 Secret Lab Technologies Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 */
10
11/*
12 * The SystemACE chip is designed to configure FPGAs by loading an FPGA
13 * bitstream from a file on a CF card and squirting it into FPGAs connected
14 * to the SystemACE JTAG chain. It also has the advantage of providing an
15 * MPU interface which can be used to control the FPGA configuration process
16 * and to use the attached CF card for general purpose storage.
17 *
18 * This driver is a block device driver for the SystemACE.
19 *
20 * Initialization:
21 * The driver registers itself as a platform_device driver at module
22 * load time. The platform bus will take care of calling the
23 * ace_probe() method for all SystemACE instances in the system. Any
24 * number of SystemACE instances are supported. ace_probe() calls
25 * ace_setup() which initialized all data structures, reads the CF
26 * id structure and registers the device.
27 *
28 * Processing:
29 * Just about all of the heavy lifting in this driver is performed by
30 * a Finite State Machine (FSM). The driver needs to wait on a number
31 * of events; some raised by interrupts, some which need to be polled
32 * for. Describing all of the behaviour in a FSM seems to be the
33 * easiest way to keep the complexity low and make it easy to
34 * understand what the driver is doing. If the block ops or the
35 * request function need to interact with the hardware, then they
36 * simply need to flag the request and kick of FSM processing.
37 *
38 * The FSM itself is atomic-safe code which can be run from any
39 * context. The general process flow is:
40 * 1. obtain the ace->lock spinlock.
41 * 2. loop on ace_fsm_dostate() until the ace->fsm_continue flag is
42 * cleared.
43 * 3. release the lock.
44 *
45 * Individual states do not sleep in any way. If a condition needs to
46 * be waited for then the state much clear the fsm_continue flag and
47 * either schedule the FSM to be run again at a later time, or expect
48 * an interrupt to call the FSM when the desired condition is met.
49 *
50 * In normal operation, the FSM is processed at interrupt context
51 * either when the driver's tasklet is scheduled, or when an irq is
52 * raised by the hardware. The tasklet can be scheduled at any time.
53 * The request method in particular schedules the tasklet when a new
54 * request has been indicated by the block layer. Once started, the
55 * FSM proceeds as far as it can processing the request until it
56 * needs on a hardware event. At this point, it must yield execution.
57 *
58 * A state has two options when yielding execution:
59 * 1. ace_fsm_yield()
60 * - Call if need to poll for event.
61 * - clears the fsm_continue flag to exit the processing loop
62 * - reschedules the tasklet to run again as soon as possible
63 * 2. ace_fsm_yieldirq()
64 * - Call if an irq is expected from the HW
65 * - clears the fsm_continue flag to exit the processing loop
66 * - does not reschedule the tasklet so the FSM will not be processed
67 * again until an irq is received.
68 * After calling a yield function, the state must return control back
69 * to the FSM main loop.
70 *
71 * Additionally, the driver maintains a kernel timer which can process
72 * the FSM. If the FSM gets stalled, typically due to a missed
73 * interrupt, then the kernel timer will expire and the driver can
74 * continue where it left off.
75 *
76 * To Do:
77 * - Add FPGA configuration control interface.
78 * - Request major number from lanana
79 */
80
81#undef DEBUG
82
83#include <linux/module.h>
84#include <linux/ctype.h>
85#include <linux/init.h>
86#include <linux/interrupt.h>
87#include <linux/errno.h>
88#include <linux/kernel.h>
89#include <linux/delay.h>
90#include <linux/slab.h>
91#include <linux/blkdev.h>
92#include <linux/hdreg.h>
93#include <linux/platform_device.h>
94
95MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
96MODULE_DESCRIPTION("Xilinx SystemACE device driver");
97MODULE_LICENSE("GPL");
98
99/* SystemACE register definitions */
100#define ACE_BUSMODE (0x00)
101
102#define ACE_STATUS (0x04)
103#define ACE_STATUS_CFGLOCK (0x00000001)
104#define ACE_STATUS_MPULOCK (0x00000002)
105#define ACE_STATUS_CFGERROR (0x00000004) /* config controller error */
106#define ACE_STATUS_CFCERROR (0x00000008) /* CF controller error */
107#define ACE_STATUS_CFDETECT (0x00000010)
108#define ACE_STATUS_DATABUFRDY (0x00000020)
109#define ACE_STATUS_DATABUFMODE (0x00000040)
110#define ACE_STATUS_CFGDONE (0x00000080)
111#define ACE_STATUS_RDYFORCFCMD (0x00000100)
112#define ACE_STATUS_CFGMODEPIN (0x00000200)
113#define ACE_STATUS_CFGADDR_MASK (0x0000e000)
114#define ACE_STATUS_CFBSY (0x00020000)
115#define ACE_STATUS_CFRDY (0x00040000)
116#define ACE_STATUS_CFDWF (0x00080000)
117#define ACE_STATUS_CFDSC (0x00100000)
118#define ACE_STATUS_CFDRQ (0x00200000)
119#define ACE_STATUS_CFCORR (0x00400000)
120#define ACE_STATUS_CFERR (0x00800000)
121
122#define ACE_ERROR (0x08)
123#define ACE_CFGLBA (0x0c)
124#define ACE_MPULBA (0x10)
125
126#define ACE_SECCNTCMD (0x14)
127#define ACE_SECCNTCMD_RESET (0x0100)
128#define ACE_SECCNTCMD_IDENTIFY (0x0200)
129#define ACE_SECCNTCMD_READ_DATA (0x0300)
130#define ACE_SECCNTCMD_WRITE_DATA (0x0400)
131#define ACE_SECCNTCMD_ABORT (0x0600)
132
133#define ACE_VERSION (0x16)
134#define ACE_VERSION_REVISION_MASK (0x00FF)
135#define ACE_VERSION_MINOR_MASK (0x0F00)
136#define ACE_VERSION_MAJOR_MASK (0xF000)
137
138#define ACE_CTRL (0x18)
139#define ACE_CTRL_FORCELOCKREQ (0x0001)
140#define ACE_CTRL_LOCKREQ (0x0002)
141#define ACE_CTRL_FORCECFGADDR (0x0004)
142#define ACE_CTRL_FORCECFGMODE (0x0008)
143#define ACE_CTRL_CFGMODE (0x0010)
144#define ACE_CTRL_CFGSTART (0x0020)
145#define ACE_CTRL_CFGSEL (0x0040)
146#define ACE_CTRL_CFGRESET (0x0080)
147#define ACE_CTRL_DATABUFRDYIRQ (0x0100)
148#define ACE_CTRL_ERRORIRQ (0x0200)
149#define ACE_CTRL_CFGDONEIRQ (0x0400)
150#define ACE_CTRL_RESETIRQ (0x0800)
151#define ACE_CTRL_CFGPROG (0x1000)
152#define ACE_CTRL_CFGADDR_MASK (0xe000)
153
154#define ACE_FATSTAT (0x1c)
155
156#define ACE_NUM_MINORS 16
157#define ACE_SECTOR_SIZE (512)
158#define ACE_FIFO_SIZE (32)
159#define ACE_BUF_PER_SECTOR (ACE_SECTOR_SIZE / ACE_FIFO_SIZE)
160
161struct ace_reg_ops;
162
163struct ace_device {
164 /* driver state data */
165 int id;
166 int media_change;
167 int users;
168 struct list_head list;
169
170 /* finite state machine data */
171 struct tasklet_struct fsm_tasklet;
172 uint fsm_task; /* Current activity (ACE_TASK_*) */
173 uint fsm_state; /* Current state (ACE_FSM_STATE_*) */
174 uint fsm_continue_flag; /* cleared to exit FSM mainloop */
175 uint fsm_iter_num;
176 struct timer_list stall_timer;
177
178 /* Transfer state/result, use for both id and block request */
179 struct request *req; /* request being processed */
180 void *data_ptr; /* pointer to I/O buffer */
181 int data_count; /* number of buffers remaining */
182 int data_result; /* Result of transfer; 0 := success */
183
184 int id_req_count; /* count of id requests */
185 int id_result;
186 struct completion id_completion; /* used when id req finishes */
187 int in_irq;
188
189 /* Details of hardware device */
190 unsigned long physaddr;
191 void *baseaddr;
192 int irq;
193 int bus_width; /* 0 := 8 bit; 1 := 16 bit */
194 struct ace_reg_ops *reg_ops;
195 int lock_count;
196
197 /* Block device data structures */
198 spinlock_t lock;
199 struct device *dev;
200 struct request_queue *queue;
201 struct gendisk *gd;
202
203 /* Inserted CF card parameters */
204 struct hd_driveid cf_id;
205};
206
207static int ace_major;
208
209/* ---------------------------------------------------------------------
210 * Low level register access
211 */
212
213struct ace_reg_ops {
214 u16(*in) (struct ace_device * ace, int reg);
215 void (*out) (struct ace_device * ace, int reg, u16 val);
216 void (*datain) (struct ace_device * ace);
217 void (*dataout) (struct ace_device * ace);
218};
219
220/* 8 Bit bus width */
221static u16 ace_in_8(struct ace_device *ace, int reg)
222{
223 void *r = ace->baseaddr + reg;
224 return in_8(r) | (in_8(r + 1) << 8);
225}
226
227static void ace_out_8(struct ace_device *ace, int reg, u16 val)
228{
229 void *r = ace->baseaddr + reg;
230 out_8(r, val);
231 out_8(r + 1, val >> 8);
232}
233
234static void ace_datain_8(struct ace_device *ace)
235{
236 void *r = ace->baseaddr + 0x40;
237 u8 *dst = ace->data_ptr;
238 int i = ACE_FIFO_SIZE;
239 while (i--)
240 *dst++ = in_8(r++);
241 ace->data_ptr = dst;
242}
243
244static void ace_dataout_8(struct ace_device *ace)
245{
246 void *r = ace->baseaddr + 0x40;
247 u8 *src = ace->data_ptr;
248 int i = ACE_FIFO_SIZE;
249 while (i--)
250 out_8(r++, *src++);
251 ace->data_ptr = src;
252}
253
254static struct ace_reg_ops ace_reg_8_ops = {
255 .in = ace_in_8,
256 .out = ace_out_8,
257 .datain = ace_datain_8,
258 .dataout = ace_dataout_8,
259};
260
261/* 16 bit big endian bus attachment */
262static u16 ace_in_be16(struct ace_device *ace, int reg)
263{
264 return in_be16(ace->baseaddr + reg);
265}
266
267static void ace_out_be16(struct ace_device *ace, int reg, u16 val)
268{
269 out_be16(ace->baseaddr + reg, val);
270}
271
272static void ace_datain_be16(struct ace_device *ace)
273{
274 int i = ACE_FIFO_SIZE / 2;
275 u16 *dst = ace->data_ptr;
276 while (i--)
277 *dst++ = in_le16(ace->baseaddr + 0x40);
278 ace->data_ptr = dst;
279}
280
281static void ace_dataout_be16(struct ace_device *ace)
282{
283 int i = ACE_FIFO_SIZE / 2;
284 u16 *src = ace->data_ptr;
285 while (i--)
286 out_le16(ace->baseaddr + 0x40, *src++);
287 ace->data_ptr = src;
288}
289
290/* 16 bit little endian bus attachment */
291static u16 ace_in_le16(struct ace_device *ace, int reg)
292{
293 return in_le16(ace->baseaddr + reg);
294}
295
296static void ace_out_le16(struct ace_device *ace, int reg, u16 val)
297{
298 out_le16(ace->baseaddr + reg, val);
299}
300
301static void ace_datain_le16(struct ace_device *ace)
302{
303 int i = ACE_FIFO_SIZE / 2;
304 u16 *dst = ace->data_ptr;
305 while (i--)
306 *dst++ = in_be16(ace->baseaddr + 0x40);
307 ace->data_ptr = dst;
308}
309
310static void ace_dataout_le16(struct ace_device *ace)
311{
312 int i = ACE_FIFO_SIZE / 2;
313 u16 *src = ace->data_ptr;
314 while (i--)
315 out_be16(ace->baseaddr + 0x40, *src++);
316 ace->data_ptr = src;
317}
318
319static struct ace_reg_ops ace_reg_be16_ops = {
320 .in = ace_in_be16,
321 .out = ace_out_be16,
322 .datain = ace_datain_be16,
323 .dataout = ace_dataout_be16,
324};
325
326static struct ace_reg_ops ace_reg_le16_ops = {
327 .in = ace_in_le16,
328 .out = ace_out_le16,
329 .datain = ace_datain_le16,
330 .dataout = ace_dataout_le16,
331};
332
333static inline u16 ace_in(struct ace_device *ace, int reg)
334{
335 return ace->reg_ops->in(ace, reg);
336}
337
338static inline u32 ace_in32(struct ace_device *ace, int reg)
339{
340 return ace_in(ace, reg) | (ace_in(ace, reg + 2) << 16);
341}
342
343static inline void ace_out(struct ace_device *ace, int reg, u16 val)
344{
345 ace->reg_ops->out(ace, reg, val);
346}
347
348static inline void ace_out32(struct ace_device *ace, int reg, u32 val)
349{
350 ace_out(ace, reg, val);
351 ace_out(ace, reg + 2, val >> 16);
352}
353
354/* ---------------------------------------------------------------------
355 * Debug support functions
356 */
357
358#if defined(DEBUG)
359static void ace_dump_mem(void *base, int len)
360{
361 const char *ptr = base;
362 int i, j;
363
364 for (i = 0; i < len; i += 16) {
365 printk(KERN_INFO "%.8x:", i);
366 for (j = 0; j < 16; j++) {
367 if (!(j % 4))
368 printk(" ");
369 printk("%.2x", ptr[i + j]);
370 }
371 printk(" ");
372 for (j = 0; j < 16; j++)
373 printk("%c", isprint(ptr[i + j]) ? ptr[i + j] : '.');
374 printk("\n");
375 }
376}
377#else
378static inline void ace_dump_mem(void *base, int len)
379{
380}
381#endif
382
383static void ace_dump_regs(struct ace_device *ace)
384{
385 dev_info(ace->dev, " ctrl: %.8x seccnt/cmd: %.4x ver:%.4x\n"
386 " status:%.8x mpu_lba:%.8x busmode:%4x\n"
387 " error: %.8x cfg_lba:%.8x fatstat:%.4x\n",
388 ace_in32(ace, ACE_CTRL),
389 ace_in(ace, ACE_SECCNTCMD),
390 ace_in(ace, ACE_VERSION),
391 ace_in32(ace, ACE_STATUS),
392 ace_in32(ace, ACE_MPULBA),
393 ace_in(ace, ACE_BUSMODE),
394 ace_in32(ace, ACE_ERROR),
395 ace_in32(ace, ACE_CFGLBA), ace_in(ace, ACE_FATSTAT));
396}
397
398void ace_fix_driveid(struct hd_driveid *id)
399{
400#if defined(__BIG_ENDIAN)
401 u16 *buf = (void *)id;
402 int i;
403
404 /* All half words have wrong byte order; swap the bytes */
405 for (i = 0; i < sizeof(struct hd_driveid); i += 2, buf++)
406 *buf = le16_to_cpu(*buf);
407
408 /* Some of the data values are 32bit; swap the half words */
409 id->lba_capacity = ((id->lba_capacity >> 16) & 0x0000FFFF) |
410 ((id->lba_capacity << 16) & 0xFFFF0000);
411 id->spg = ((id->spg >> 16) & 0x0000FFFF) |
412 ((id->spg << 16) & 0xFFFF0000);
413#endif
414}
415
416/* ---------------------------------------------------------------------
417 * Finite State Machine (FSM) implementation
418 */
419
420/* FSM tasks; used to direct state transitions */
421#define ACE_TASK_IDLE 0
422#define ACE_TASK_IDENTIFY 1
423#define ACE_TASK_READ 2
424#define ACE_TASK_WRITE 3
425#define ACE_FSM_NUM_TASKS 4
426
427/* FSM state definitions */
428#define ACE_FSM_STATE_IDLE 0
429#define ACE_FSM_STATE_REQ_LOCK 1
430#define ACE_FSM_STATE_WAIT_LOCK 2
431#define ACE_FSM_STATE_WAIT_CFREADY 3
432#define ACE_FSM_STATE_IDENTIFY_PREPARE 4
433#define ACE_FSM_STATE_IDENTIFY_TRANSFER 5
434#define ACE_FSM_STATE_IDENTIFY_COMPLETE 6
435#define ACE_FSM_STATE_REQ_PREPARE 7
436#define ACE_FSM_STATE_REQ_TRANSFER 8
437#define ACE_FSM_STATE_REQ_COMPLETE 9
438#define ACE_FSM_STATE_ERROR 10
439#define ACE_FSM_NUM_STATES 11
440
441/* Set flag to exit FSM loop and reschedule tasklet */
442static inline void ace_fsm_yield(struct ace_device *ace)
443{
444 dev_dbg(ace->dev, "ace_fsm_yield()\n");
445 tasklet_schedule(&ace->fsm_tasklet);
446 ace->fsm_continue_flag = 0;
447}
448
449/* Set flag to exit FSM loop and wait for IRQ to reschedule tasklet */
450static inline void ace_fsm_yieldirq(struct ace_device *ace)
451{
452 dev_dbg(ace->dev, "ace_fsm_yieldirq()\n");
453
454 if (ace->irq == NO_IRQ)
455 /* No IRQ assigned, so need to poll */
456 tasklet_schedule(&ace->fsm_tasklet);
457 ace->fsm_continue_flag = 0;
458}
459
460/* Get the next read/write request; ending requests that we don't handle */
461struct request *ace_get_next_request(request_queue_t * q)
462{
463 struct request *req;
464
465 while ((req = elv_next_request(q)) != NULL) {
466 if (blk_fs_request(req))
467 break;
468 end_request(req, 0);
469 }
470 return req;
471}
472
473static void ace_fsm_dostate(struct ace_device *ace)
474{
475 struct request *req;
476 u32 status;
477 u16 val;
478 int count;
479 int i;
480
481#if defined(DEBUG)
482 dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n",
483 ace->fsm_state, ace->id_req_count);
484#endif
485
486 switch (ace->fsm_state) {
487 case ACE_FSM_STATE_IDLE:
488 /* See if there is anything to do */
489 if (ace->id_req_count || ace_get_next_request(ace->queue)) {
490 ace->fsm_iter_num++;
491 ace->fsm_state = ACE_FSM_STATE_REQ_LOCK;
492 mod_timer(&ace->stall_timer, jiffies + HZ);
493 if (!timer_pending(&ace->stall_timer))
494 add_timer(&ace->stall_timer);
495 break;
496 }
497 del_timer(&ace->stall_timer);
498 ace->fsm_continue_flag = 0;
499 break;
500
501 case ACE_FSM_STATE_REQ_LOCK:
502 if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
503 /* Already have the lock, jump to next state */
504 ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
505 break;
506 }
507
508 /* Request the lock */
509 val = ace_in(ace, ACE_CTRL);
510 ace_out(ace, ACE_CTRL, val | ACE_CTRL_LOCKREQ);
511 ace->fsm_state = ACE_FSM_STATE_WAIT_LOCK;
512 break;
513
514 case ACE_FSM_STATE_WAIT_LOCK:
515 if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
516 /* got the lock; move to next state */
517 ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
518 break;
519 }
520
521 /* wait a bit for the lock */
522 ace_fsm_yield(ace);
523 break;
524
525 case ACE_FSM_STATE_WAIT_CFREADY:
526 status = ace_in32(ace, ACE_STATUS);
527 if (!(status & ACE_STATUS_RDYFORCFCMD) ||
528 (status & ACE_STATUS_CFBSY)) {
529 /* CF card isn't ready; it needs to be polled */
530 ace_fsm_yield(ace);
531 break;
532 }
533
534 /* Device is ready for command; determine what to do next */
535 if (ace->id_req_count)
536 ace->fsm_state = ACE_FSM_STATE_IDENTIFY_PREPARE;
537 else
538 ace->fsm_state = ACE_FSM_STATE_REQ_PREPARE;
539 break;
540
541 case ACE_FSM_STATE_IDENTIFY_PREPARE:
542 /* Send identify command */
543 ace->fsm_task = ACE_TASK_IDENTIFY;
544 ace->data_ptr = &ace->cf_id;
545 ace->data_count = ACE_BUF_PER_SECTOR;
546 ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY);
547
548 /* As per datasheet, put config controller in reset */
549 val = ace_in(ace, ACE_CTRL);
550 ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);
551
552 /* irq handler takes over from this point; wait for the
553 * transfer to complete */
554 ace->fsm_state = ACE_FSM_STATE_IDENTIFY_TRANSFER;
555 ace_fsm_yieldirq(ace);
556 break;
557
558 case ACE_FSM_STATE_IDENTIFY_TRANSFER:
559 /* Check that the sysace is ready to receive data */
560 status = ace_in32(ace, ACE_STATUS);
561 if (status & ACE_STATUS_CFBSY) {
562 dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i dc=%i\n",
563 ace->fsm_task, ace->fsm_iter_num,
564 ace->data_count);
565 ace_fsm_yield(ace);
566 break;
567 }
568 if (!(status & ACE_STATUS_DATABUFRDY)) {
569 ace_fsm_yield(ace);
570 break;
571 }
572
573 /* Transfer the next buffer */
574 ace->reg_ops->datain(ace);
575 ace->data_count--;
576
577 /* If there are still buffers to be transfers; jump out here */
578 if (ace->data_count != 0) {
579 ace_fsm_yieldirq(ace);
580 break;
581 }
582
583 /* transfer finished; kick state machine */
584 dev_dbg(ace->dev, "identify finished\n");
585 ace->fsm_state = ACE_FSM_STATE_IDENTIFY_COMPLETE;
586 break;
587
588 case ACE_FSM_STATE_IDENTIFY_COMPLETE:
589 ace_fix_driveid(&ace->cf_id);
590 ace_dump_mem(&ace->cf_id, 512); /* Debug: Dump out disk ID */
591
592 if (ace->data_result) {
593 /* Error occured, disable the disk */
594 ace->media_change = 1;
595 set_capacity(ace->gd, 0);
596 dev_err(ace->dev, "error fetching CF id (%i)\n",
597 ace->data_result);
598 } else {
599 ace->media_change = 0;
600
601 /* Record disk parameters */
602 set_capacity(ace->gd, ace->cf_id.lba_capacity);
603 dev_info(ace->dev, "capacity: %i sectors\n",
604 ace->cf_id.lba_capacity);
605 }
606
607 /* We're done, drop to IDLE state and notify waiters */
608 ace->fsm_state = ACE_FSM_STATE_IDLE;
609 ace->id_result = ace->data_result;
610 while (ace->id_req_count) {
611 complete(&ace->id_completion);
612 ace->id_req_count--;
613 }
614 break;
615
616 case ACE_FSM_STATE_REQ_PREPARE:
617 req = ace_get_next_request(ace->queue);
618 if (!req) {
619 ace->fsm_state = ACE_FSM_STATE_IDLE;
620 break;
621 }
622
623 /* Okay, it's a data request, set it up for transfer */
624 dev_dbg(ace->dev,
625 "request: sec=%lx hcnt=%lx, ccnt=%x, dir=%i\n",
626 req->sector, req->hard_nr_sectors,
627 req->current_nr_sectors, rq_data_dir(req));
628
629 ace->req = req;
630 ace->data_ptr = req->buffer;
631 ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR;
632 ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF);
633
634 count = req->hard_nr_sectors;
635 if (rq_data_dir(req)) {
636 /* Kick off write request */
637 dev_dbg(ace->dev, "write data\n");
638 ace->fsm_task = ACE_TASK_WRITE;
639 ace_out(ace, ACE_SECCNTCMD,
640 count | ACE_SECCNTCMD_WRITE_DATA);
641 } else {
642 /* Kick off read request */
643 dev_dbg(ace->dev, "read data\n");
644 ace->fsm_task = ACE_TASK_READ;
645 ace_out(ace, ACE_SECCNTCMD,
646 count | ACE_SECCNTCMD_READ_DATA);
647 }
648
649 /* As per datasheet, put config controller in reset */
650 val = ace_in(ace, ACE_CTRL);
651 ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);
652
653 /* Move to the transfer state. The systemace will raise
654 * an interrupt once there is something to do
655 */
656 ace->fsm_state = ACE_FSM_STATE_REQ_TRANSFER;
657 if (ace->fsm_task == ACE_TASK_READ)
658 ace_fsm_yieldirq(ace); /* wait for data ready */
659 break;
660
661 case ACE_FSM_STATE_REQ_TRANSFER:
662 /* Check that the sysace is ready to receive data */
663 status = ace_in32(ace, ACE_STATUS);
664 if (status & ACE_STATUS_CFBSY) {
665 dev_dbg(ace->dev,
666 "CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
667 ace->fsm_task, ace->fsm_iter_num,
668 ace->req->current_nr_sectors * 16,
669 ace->data_count, ace->in_irq);
670 ace_fsm_yield(ace); /* need to poll CFBSY bit */
671 break;
672 }
673 if (!(status & ACE_STATUS_DATABUFRDY)) {
674 dev_dbg(ace->dev,
675 "DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
676 ace->fsm_task, ace->fsm_iter_num,
677 ace->req->current_nr_sectors * 16,
678 ace->data_count, ace->in_irq);
679 ace_fsm_yieldirq(ace);
680 break;
681 }
682
683 /* Transfer the next buffer */
684 i = 16;
685 if (ace->fsm_task == ACE_TASK_WRITE)
686 ace->reg_ops->dataout(ace);
687 else
688 ace->reg_ops->datain(ace);
689 ace->data_count--;
690
691 /* If there are still buffers to be transfers; jump out here */
692 if (ace->data_count != 0) {
693 ace_fsm_yieldirq(ace);
694 break;
695 }
696
697 /* bio finished; is there another one? */
698 i = ace->req->current_nr_sectors;
699 if (end_that_request_first(ace->req, 1, i)) {
700 /* dev_dbg(ace->dev, "next block; h=%li c=%i\n",
701 * ace->req->hard_nr_sectors,
702 * ace->req->current_nr_sectors);
703 */
704 ace->data_ptr = ace->req->buffer;
705 ace->data_count = ace->req->current_nr_sectors * 16;
706 ace_fsm_yieldirq(ace);
707 break;
708 }
709
710 ace->fsm_state = ACE_FSM_STATE_REQ_COMPLETE;
711 break;
712
713 case ACE_FSM_STATE_REQ_COMPLETE:
714 /* Complete the block request */
715 blkdev_dequeue_request(ace->req);
716 end_that_request_last(ace->req, 1);
717 ace->req = NULL;
718
719 /* Finished request; go to idle state */
720 ace->fsm_state = ACE_FSM_STATE_IDLE;
721 break;
722
723 default:
724 ace->fsm_state = ACE_FSM_STATE_IDLE;
725 break;
726 }
727}
728
729static void ace_fsm_tasklet(unsigned long data)
730{
731 struct ace_device *ace = (void *)data;
732 unsigned long flags;
733
734 spin_lock_irqsave(&ace->lock, flags);
735
736 /* Loop over state machine until told to stop */
737 ace->fsm_continue_flag = 1;
738 while (ace->fsm_continue_flag)
739 ace_fsm_dostate(ace);
740
741 spin_unlock_irqrestore(&ace->lock, flags);
742}
743
744static void ace_stall_timer(unsigned long data)
745{
746 struct ace_device *ace = (void *)data;
747 unsigned long flags;
748
749 dev_warn(ace->dev,
750 "kicking stalled fsm; state=%i task=%i iter=%i dc=%i\n",
751 ace->fsm_state, ace->fsm_task, ace->fsm_iter_num,
752 ace->data_count);
753 spin_lock_irqsave(&ace->lock, flags);
754
755 /* Rearm the stall timer *before* entering FSM (which may then
756 * delete the timer) */
757 mod_timer(&ace->stall_timer, jiffies + HZ);
758
759 /* Loop over state machine until told to stop */
760 ace->fsm_continue_flag = 1;
761 while (ace->fsm_continue_flag)
762 ace_fsm_dostate(ace);
763
764 spin_unlock_irqrestore(&ace->lock, flags);
765}
766
767/* ---------------------------------------------------------------------
768 * Interrupt handling routines
769 */
770static int ace_interrupt_checkstate(struct ace_device *ace)
771{
772 u32 sreg = ace_in32(ace, ACE_STATUS);
773 u16 creg = ace_in(ace, ACE_CTRL);
774
775 /* Check for error occurance */
776 if ((sreg & (ACE_STATUS_CFGERROR | ACE_STATUS_CFCERROR)) &&
777 (creg & ACE_CTRL_ERRORIRQ)) {
778 dev_err(ace->dev, "transfer failure\n");
779 ace_dump_regs(ace);
780 return -EIO;
781 }
782
783 return 0;
784}
785
786static irqreturn_t ace_interrupt(int irq, void *dev_id)
787{
788 u16 creg;
789 struct ace_device *ace = dev_id;
790
791 /* be safe and get the lock */
792 spin_lock(&ace->lock);
793 ace->in_irq = 1;
794
795 /* clear the interrupt */
796 creg = ace_in(ace, ACE_CTRL);
797 ace_out(ace, ACE_CTRL, creg | ACE_CTRL_RESETIRQ);
798 ace_out(ace, ACE_CTRL, creg);
799
800 /* check for IO failures */
801 if (ace_interrupt_checkstate(ace))
802 ace->data_result = -EIO;
803
804 if (ace->fsm_task == 0) {
805 dev_err(ace->dev,
806 "spurious irq; stat=%.8x ctrl=%.8x cmd=%.4x\n",
807 ace_in32(ace, ACE_STATUS), ace_in32(ace, ACE_CTRL),
808 ace_in(ace, ACE_SECCNTCMD));
809 dev_err(ace->dev, "fsm_task=%i fsm_state=%i data_count=%i\n",
810 ace->fsm_task, ace->fsm_state, ace->data_count);
811 }
812
813 /* Loop over state machine until told to stop */
814 ace->fsm_continue_flag = 1;
815 while (ace->fsm_continue_flag)
816 ace_fsm_dostate(ace);
817
818 /* done with interrupt; drop the lock */
819 ace->in_irq = 0;
820 spin_unlock(&ace->lock);
821
822 return IRQ_HANDLED;
823}
824
825/* ---------------------------------------------------------------------
826 * Block ops
827 */
828static void ace_request(request_queue_t * q)
829{
830 struct request *req;
831 struct ace_device *ace;
832
833 req = ace_get_next_request(q);
834
835 if (req) {
836 ace = req->rq_disk->private_data;
837 tasklet_schedule(&ace->fsm_tasklet);
838 }
839}
840
841static int ace_media_changed(struct gendisk *gd)
842{
843 struct ace_device *ace = gd->private_data;
844 dev_dbg(ace->dev, "ace_media_changed(): %i\n", ace->media_change);
845
846 return ace->media_change;
847}
848
849static int ace_revalidate_disk(struct gendisk *gd)
850{
851 struct ace_device *ace = gd->private_data;
852 unsigned long flags;
853
854 dev_dbg(ace->dev, "ace_revalidate_disk()\n");
855
856 if (ace->media_change) {
857 dev_dbg(ace->dev, "requesting cf id and scheduling tasklet\n");
858
859 spin_lock_irqsave(&ace->lock, flags);
860 ace->id_req_count++;
861 spin_unlock_irqrestore(&ace->lock, flags);
862
863 tasklet_schedule(&ace->fsm_tasklet);
864 wait_for_completion(&ace->id_completion);
865 }
866
867 dev_dbg(ace->dev, "revalidate complete\n");
868 return ace->id_result;
869}
870
871static int ace_open(struct inode *inode, struct file *filp)
872{
873 struct ace_device *ace = inode->i_bdev->bd_disk->private_data;
874 unsigned long flags;
875
876 dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1);
877
878 filp->private_data = ace;
879 spin_lock_irqsave(&ace->lock, flags);
880 ace->users++;
881 spin_unlock_irqrestore(&ace->lock, flags);
882
883 check_disk_change(inode->i_bdev);
884 return 0;
885}
886
887static int ace_release(struct inode *inode, struct file *filp)
888{
889 struct ace_device *ace = inode->i_bdev->bd_disk->private_data;
890 unsigned long flags;
891 u16 val;
892
893 dev_dbg(ace->dev, "ace_release() users=%i\n", ace->users - 1);
894
895 spin_lock_irqsave(&ace->lock, flags);
896 ace->users--;
897 if (ace->users == 0) {
898 val = ace_in(ace, ACE_CTRL);
899 ace_out(ace, ACE_CTRL, val & ~ACE_CTRL_LOCKREQ);
900 }
901 spin_unlock_irqrestore(&ace->lock, flags);
902 return 0;
903}
904
905static int ace_ioctl(struct inode *inode, struct file *filp,
906 unsigned int cmd, unsigned long arg)
907{
908 struct ace_device *ace = inode->i_bdev->bd_disk->private_data;
909 struct hd_geometry __user *geo = (struct hd_geometry __user *)arg;
910 struct hd_geometry g;
911 dev_dbg(ace->dev, "ace_ioctl()\n");
912
913 switch (cmd) {
914 case HDIO_GETGEO:
915 g.heads = ace->cf_id.heads;
916 g.sectors = ace->cf_id.sectors;
917 g.cylinders = ace->cf_id.cyls;
918 g.start = 0;
919 return copy_to_user(geo, &g, sizeof(g)) ? -EFAULT : 0;
920
921 default:
922 return -ENOTTY;
923 }
924 return -ENOTTY;
925}
926
927static struct block_device_operations ace_fops = {
928 .owner = THIS_MODULE,
929 .open = ace_open,
930 .release = ace_release,
931 .media_changed = ace_media_changed,
932 .revalidate_disk = ace_revalidate_disk,
933 .ioctl = ace_ioctl,
934};
935
936/* --------------------------------------------------------------------
937 * SystemACE device setup/teardown code
938 */
939static int __devinit ace_setup(struct ace_device *ace)
940{
941 u16 version;
942 u16 val;
943
944 int rc;
945
946 spin_lock_init(&ace->lock);
947 init_completion(&ace->id_completion);
948
949 /*
950 * Map the device
951 */
952 ace->baseaddr = ioremap(ace->physaddr, 0x80);
953 if (!ace->baseaddr)
954 goto err_ioremap;
955
956 if (ace->irq != NO_IRQ) {
957 rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
958 if (rc) {
959 /* Failure - fall back to polled mode */
960 dev_err(ace->dev, "request_irq failed\n");
961 ace->irq = NO_IRQ;
962 }
963 }
964
965 /*
966 * Initialize the state machine tasklet and stall timer
967 */
968 tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace);
969 setup_timer(&ace->stall_timer, ace_stall_timer, (unsigned long)ace);
970
971 /*
972 * Initialize the request queue
973 */
974 ace->queue = blk_init_queue(ace_request, &ace->lock);
975 if (ace->queue == NULL)
976 goto err_blk_initq;
977 blk_queue_hardsect_size(ace->queue, 512);
978
979 /*
980 * Allocate and initialize GD structure
981 */
982 ace->gd = alloc_disk(ACE_NUM_MINORS);
983 if (!ace->gd)
984 goto err_alloc_disk;
985
986 ace->gd->major = ace_major;
987 ace->gd->first_minor = ace->id * ACE_NUM_MINORS;
988 ace->gd->fops = &ace_fops;
989 ace->gd->queue = ace->queue;
990 ace->gd->private_data = ace;
991 snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');
992
993 /* set bus width */
994 if (ace->bus_width == 1) {
995 /* 0x0101 should work regardless of endianess */
996 ace_out_le16(ace, ACE_BUSMODE, 0x0101);
997
998 /* read it back to determine endianess */
999 if (ace_in_le16(ace, ACE_BUSMODE) == 0x0001)
1000 ace->reg_ops = &ace_reg_le16_ops;
1001 else
1002 ace->reg_ops = &ace_reg_be16_ops;
1003 } else {
1004 ace_out_8(ace, ACE_BUSMODE, 0x00);
1005 ace->reg_ops = &ace_reg_8_ops;
1006 }
1007
1008 /* Make sure version register is sane */
1009 version = ace_in(ace, ACE_VERSION);
1010 if ((version == 0) || (version == 0xFFFF))
1011 goto err_read;
1012
1013 /* Put sysace in a sane state by clearing most control reg bits */
1014 ace_out(ace, ACE_CTRL, ACE_CTRL_FORCECFGMODE |
1015 ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ);
1016
1017 /* Enable interrupts */
1018 val = ace_in(ace, ACE_CTRL);
1019 val |= ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ;
1020 ace_out(ace, ACE_CTRL, val);
1021
1022 /* Print the identification */
1023 dev_info(ace->dev, "Xilinx SystemACE revision %i.%i.%i\n",
1024 (version >> 12) & 0xf, (version >> 8) & 0x0f, version & 0xff);
1025 dev_dbg(ace->dev, "physaddr 0x%lx, mapped to 0x%p, irq=%i\n",
1026 ace->physaddr, ace->baseaddr, ace->irq);
1027
1028 ace->media_change = 1;
1029 ace_revalidate_disk(ace->gd);
1030
1031 /* Make the sysace device 'live' */
1032 add_disk(ace->gd);
1033
1034 return 0;
1035
1036 err_read:
1037 put_disk(ace->gd);
1038 err_alloc_disk:
1039 blk_cleanup_queue(ace->queue);
1040 err_blk_initq:
1041 iounmap(ace->baseaddr);
1042 if (ace->irq != NO_IRQ)
1043 free_irq(ace->irq, ace);
1044 err_ioremap:
1045 printk(KERN_INFO "xsysace: error initializing device at 0x%lx\n",
1046 ace->physaddr);
1047 return -ENOMEM;
1048}
1049
1050static void __devexit ace_teardown(struct ace_device *ace)
1051{
1052 if (ace->gd) {
1053 del_gendisk(ace->gd);
1054 put_disk(ace->gd);
1055 }
1056
1057 if (ace->queue)
1058 blk_cleanup_queue(ace->queue);
1059
1060 tasklet_kill(&ace->fsm_tasklet);
1061
1062 if (ace->irq != NO_IRQ)
1063 free_irq(ace->irq, ace);
1064
1065 iounmap(ace->baseaddr);
1066}
1067
1068/* ---------------------------------------------------------------------
1069 * Platform Bus Support
1070 */
1071
1072static int __devinit ace_probe(struct device *device)
1073{
1074 struct platform_device *dev = to_platform_device(device);
1075 struct ace_device *ace;
1076 int i;
1077
1078 dev_dbg(device, "ace_probe(%p)\n", device);
1079
1080 /*
1081 * Allocate the ace device structure
1082 */
1083 ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL);
1084 if (!ace)
1085 goto err_alloc;
1086
1087 ace->dev = device;
1088 ace->id = dev->id;
1089 ace->irq = NO_IRQ;
1090
1091 for (i = 0; i < dev->num_resources; i++) {
1092 if (dev->resource[i].flags & IORESOURCE_MEM)
1093 ace->physaddr = dev->resource[i].start;
1094 if (dev->resource[i].flags & IORESOURCE_IRQ)
1095 ace->irq = dev->resource[i].start;
1096 }
1097
1098 /* FIXME: Should get bus_width from the platform_device struct */
1099 ace->bus_width = 1;
1100
1101 dev_set_drvdata(&dev->dev, ace);
1102
1103 /* Call the bus-independant setup code */
1104 if (ace_setup(ace) != 0)
1105 goto err_setup;
1106
1107 return 0;
1108
1109 err_setup:
1110 dev_set_drvdata(&dev->dev, NULL);
1111 kfree(ace);
1112 err_alloc:
1113 printk(KERN_ERR "xsysace: could not initialize device\n");
1114 return -ENOMEM;
1115}
1116
1117/*
1118 * Platform bus remove() method
1119 */
1120static int __devexit ace_remove(struct device *device)
1121{
1122 struct ace_device *ace = dev_get_drvdata(device);
1123
1124 dev_dbg(device, "ace_remove(%p)\n", device);
1125
1126 if (ace) {
1127 ace_teardown(ace);
1128 kfree(ace);
1129 }
1130
1131 return 0;
1132}
1133
1134static struct device_driver ace_driver = {
1135 .name = "xsysace",
1136 .bus = &platform_bus_type,
1137 .probe = ace_probe,
1138 .remove = __devexit_p(ace_remove),
1139};
1140
1141/* ---------------------------------------------------------------------
1142 * Module init/exit routines
1143 */
1144static int __init ace_init(void)
1145{
1146 ace_major = register_blkdev(ace_major, "xsysace");
1147 if (ace_major <= 0) {
1148 printk(KERN_WARNING "xsysace: register_blkdev() failed\n");
1149 return ace_major;
1150 }
1151
1152 pr_debug("Registering Xilinx SystemACE driver, major=%i\n", ace_major);
1153 return driver_register(&ace_driver);
1154}
1155
1156static void __exit ace_exit(void)
1157{
1158 pr_debug("Unregistering Xilinx SystemACE driver\n");
1159 driver_unregister(&ace_driver);
1160 unregister_blkdev(ace_major, "xsysace");
1161}
1162
1163module_init(ace_init);
1164module_exit(ace_exit);
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 2abf94cc3137..e40fa98842e5 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -371,9 +371,7 @@ static void __exit z2_exit(void)
371{ 371{
372 int i, j; 372 int i, j;
373 blk_unregister_region(MKDEV(Z2RAM_MAJOR, 0), 256); 373 blk_unregister_region(MKDEV(Z2RAM_MAJOR, 0), 256);
374 if ( unregister_blkdev( Z2RAM_MAJOR, DEVICE_NAME ) != 0 ) 374 unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME);
375 printk( KERN_ERR DEVICE_NAME ": unregister of device failed\n");
376
377 del_gendisk(z2ram_gendisk); 375 del_gendisk(z2ram_gendisk);
378 put_disk(z2ram_gendisk); 376 put_disk(z2ram_gendisk);
379 blk_cleanup_queue(z2_queue); 377 blk_cleanup_queue(z2_queue);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index ec9dc3d53f18..d8d7125529c4 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -114,7 +114,7 @@ config COMPUTONE
114 114
115config ROCKETPORT 115config ROCKETPORT
116 tristate "Comtrol RocketPort support" 116 tristate "Comtrol RocketPort support"
117 depends on SERIAL_NONSTANDARD 117 depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI)
118 help 118 help
119 This driver supports Comtrol RocketPort and RocketModem PCI boards. 119 This driver supports Comtrol RocketPort and RocketModem PCI boards.
120 These boards provide 2, 4, 8, 16, or 32 high-speed serial ports or 120 These boards provide 2, 4, 8, 16, or 32 high-speed serial ports or
@@ -157,7 +157,7 @@ config CYZ_INTR
157 157
158config DIGIEPCA 158config DIGIEPCA
159 tristate "Digiboard Intelligent Async Support" 159 tristate "Digiboard Intelligent Async Support"
160 depends on SERIAL_NONSTANDARD 160 depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI)
161 ---help--- 161 ---help---
162 This is a driver for Digi International's Xx, Xeve, and Xem series 162 This is a driver for Digi International's Xx, Xeve, and Xem series
163 of cards which provide multiple serial ports. You would need 163 of cards which provide multiple serial ports. You would need
@@ -213,8 +213,6 @@ config MOXA_SMARTIO_NEW
213 This is upgraded (1.9.1) driver from original Moxa drivers with 213 This is upgraded (1.9.1) driver from original Moxa drivers with
214 changes finally resulting in PCI probing. 214 changes finally resulting in PCI probing.
215 215
216 Use at your own risk.
217
218 This driver can also be built as a module. The module will be called 216 This driver can also be built as a module. The module will be called
219 mxser_new. If you want to do that, say M here. 217 mxser_new. If you want to do that, say M here.
220 218
@@ -354,7 +352,7 @@ config STALDRV
354 352
355config STALLION 353config STALLION
356 tristate "Stallion EasyIO or EC8/32 support" 354 tristate "Stallion EasyIO or EC8/32 support"
357 depends on STALDRV && BROKEN_ON_SMP 355 depends on STALDRV && BROKEN_ON_SMP && (ISA || EISA || PCI)
358 help 356 help
359 If you have an EasyIO or EasyConnection 8/32 multiport Stallion 357 If you have an EasyIO or EasyConnection 8/32 multiport Stallion
360 card, then this is for you; say Y. Make sure to read 358 card, then this is for you; say Y. Make sure to read
@@ -365,7 +363,7 @@ config STALLION
365 363
366config ISTALLION 364config ISTALLION
367 tristate "Stallion EC8/64, ONboard, Brumby support" 365 tristate "Stallion EC8/64, ONboard, Brumby support"
368 depends on STALDRV && BROKEN_ON_SMP 366 depends on STALDRV && BROKEN_ON_SMP && (ISA || EISA || PCI)
369 help 367 help
370 If you have an EasyConnection 8/64, ONboard, Brumby or Stallion 368 If you have an EasyConnection 8/64, ONboard, Brumby or Stallion
371 serial multiport card, say Y here. Make sure to read 369 serial multiport card, say Y here. Make sure to read
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 179c7a3b6e75..ec116df919d9 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -20,6 +20,7 @@
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/pm.h> 21#include <linux/pm.h>
22#include <linux/apm-emulation.h> 22#include <linux/apm-emulation.h>
23#include <linux/freezer.h>
23#include <linux/device.h> 24#include <linux/device.h>
24#include <linux/kernel.h> 25#include <linux/kernel.h>
25#include <linux/list.h> 26#include <linux/list.h>
@@ -329,13 +330,8 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
329 /* 330 /*
330 * Wait for the suspend/resume to complete. If there 331 * Wait for the suspend/resume to complete. If there
331 * are pending acknowledges, we wait here for them. 332 * are pending acknowledges, we wait here for them.
332 *
333 * Note: we need to ensure that the PM subsystem does
334 * not kick us out of the wait when it suspends the
335 * threads.
336 */ 333 */
337 flags = current->flags; 334 flags = current->flags;
338 current->flags |= PF_NOFREEZE;
339 335
340 wait_event(apm_suspend_waitqueue, 336 wait_event(apm_suspend_waitqueue,
341 as->suspend_state == SUSPEND_DONE); 337 as->suspend_state == SUSPEND_DONE);
@@ -365,13 +361,8 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
365 /* 361 /*
366 * Wait for the suspend/resume to complete. If there 362 * Wait for the suspend/resume to complete. If there
367 * are pending acknowledges, we wait here for them. 363 * are pending acknowledges, we wait here for them.
368 *
369 * Note: we need to ensure that the PM subsystem does
370 * not kick us out of the wait when it suspends the
371 * threads.
372 */ 364 */
373 flags = current->flags; 365 flags = current->flags;
374 current->flags |= PF_NOFREEZE;
375 366
376 wait_event_interruptible(apm_suspend_waitqueue, 367 wait_event_interruptible(apm_suspend_waitqueue,
377 as->suspend_state == SUSPEND_DONE); 368 as->suspend_state == SUSPEND_DONE);
@@ -598,7 +589,6 @@ static int __init apm_init(void)
598 kapmd_tsk = NULL; 589 kapmd_tsk = NULL;
599 return ret; 590 return ret;
600 } 591 }
601 kapmd_tsk->flags |= PF_NOFREEZE;
602 wake_up_process(kapmd_tsk); 592 wake_up_process(kapmd_tsk);
603 593
604#ifdef CONFIG_PROC_FS 594#ifdef CONFIG_PROC_FS
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index e04005b5f8a6..9e0adfe27c12 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -646,6 +646,7 @@
646#include <linux/delay.h> 646#include <linux/delay.h>
647#include <linux/spinlock.h> 647#include <linux/spinlock.h>
648#include <linux/bitops.h> 648#include <linux/bitops.h>
649#include <linux/firmware.h>
649 650
650#include <asm/system.h> 651#include <asm/system.h>
651#include <asm/io.h> 652#include <asm/io.h>
@@ -680,6 +681,44 @@ static void cy_send_xchar(struct tty_struct *tty, char ch);
680 681
681#define STD_COM_FLAGS (0) 682#define STD_COM_FLAGS (0)
682 683
684/* firmware stuff */
685#define ZL_MAX_BLOCKS 16
686#define DRIVER_VERSION 0x02010203
687#define RAM_SIZE 0x80000
688
689#define Z_FPGA_LOADED(X) ((readl(&(X)->init_ctrl) & (1<<17)) != 0)
690
691enum zblock_type {
692 ZBLOCK_PRG = 0,
693 ZBLOCK_FPGA = 1
694};
695
696struct zfile_header {
697 char name[64];
698 char date[32];
699 char aux[32];
700 u32 n_config;
701 u32 config_offset;
702 u32 n_blocks;
703 u32 block_offset;
704 u32 reserved[9];
705} __attribute__ ((packed));
706
707struct zfile_config {
708 char name[64];
709 u32 mailbox;
710 u32 function;
711 u32 n_blocks;
712 u32 block_list[ZL_MAX_BLOCKS];
713} __attribute__ ((packed));
714
715struct zfile_block {
716 u32 type;
717 u32 file_offset;
718 u32 ram_offset;
719 u32 size;
720} __attribute__ ((packed));
721
683static struct tty_driver *cy_serial_driver; 722static struct tty_driver *cy_serial_driver;
684 723
685#ifdef CONFIG_ISA 724#ifdef CONFIG_ISA
@@ -1851,11 +1890,11 @@ static void cyz_poll(unsigned long arg)
1851 struct cyclades_card *cinfo; 1890 struct cyclades_card *cinfo;
1852 struct cyclades_port *info; 1891 struct cyclades_port *info;
1853 struct tty_struct *tty; 1892 struct tty_struct *tty;
1854 static struct FIRM_ID *firm_id; 1893 struct FIRM_ID __iomem *firm_id;
1855 static struct ZFW_CTRL *zfw_ctrl; 1894 struct ZFW_CTRL __iomem *zfw_ctrl;
1856 static struct BOARD_CTRL *board_ctrl; 1895 struct BOARD_CTRL __iomem *board_ctrl;
1857 static struct CH_CTRL *ch_ctrl; 1896 struct CH_CTRL __iomem *ch_ctrl;
1858 static struct BUF_CTRL *buf_ctrl; 1897 struct BUF_CTRL __iomem *buf_ctrl;
1859 unsigned long expires = jiffies + HZ; 1898 unsigned long expires = jiffies + HZ;
1860 int card, port; 1899 int card, port;
1861 1900
@@ -1999,7 +2038,6 @@ static int startup(struct cyclades_port *info)
1999 struct ZFW_CTRL __iomem *zfw_ctrl; 2038 struct ZFW_CTRL __iomem *zfw_ctrl;
2000 struct BOARD_CTRL __iomem *board_ctrl; 2039 struct BOARD_CTRL __iomem *board_ctrl;
2001 struct CH_CTRL __iomem *ch_ctrl; 2040 struct CH_CTRL __iomem *ch_ctrl;
2002 int retval;
2003 2041
2004 base_addr = card->base_addr; 2042 base_addr = card->base_addr;
2005 2043
@@ -2371,7 +2409,6 @@ block_til_ready(struct tty_struct *tty, struct file *filp,
2371 struct ZFW_CTRL __iomem *zfw_ctrl; 2409 struct ZFW_CTRL __iomem *zfw_ctrl;
2372 struct BOARD_CTRL __iomem *board_ctrl; 2410 struct BOARD_CTRL __iomem *board_ctrl;
2373 struct CH_CTRL __iomem *ch_ctrl; 2411 struct CH_CTRL __iomem *ch_ctrl;
2374 int retval;
2375 2412
2376 base_addr = cinfo->base_addr; 2413 base_addr = cinfo->base_addr;
2377 firm_id = base_addr + ID_ADDRESS; 2414 firm_id = base_addr + ID_ADDRESS;
@@ -4429,10 +4466,10 @@ static void cy_hangup(struct tty_struct *tty)
4429static int __devinit cy_init_card(struct cyclades_card *cinfo) 4466static int __devinit cy_init_card(struct cyclades_card *cinfo)
4430{ 4467{
4431 struct cyclades_port *info; 4468 struct cyclades_port *info;
4432 u32 mailbox; 4469 u32 uninitialized_var(mailbox);
4433 unsigned int nports; 4470 unsigned int nports;
4434 unsigned short chip_number; 4471 unsigned short chip_number;
4435 int index, port; 4472 int uninitialized_var(index), port;
4436 4473
4437 spin_lock_init(&cinfo->card_lock); 4474 spin_lock_init(&cinfo->card_lock);
4438 4475
@@ -4735,17 +4772,295 @@ static int __init cy_detect_isa(void)
4735} /* cy_detect_isa */ 4772} /* cy_detect_isa */
4736 4773
4737#ifdef CONFIG_PCI 4774#ifdef CONFIG_PCI
4738static void __devinit plx_init(void __iomem * addr, __u32 initctl) 4775static inline int __devinit cyc_isfwstr(const char *str, unsigned int size)
4776{
4777 unsigned int a;
4778
4779 for (a = 0; a < size && *str; a++, str++)
4780 if (*str & 0x80)
4781 return -EINVAL;
4782
4783 for (; a < size; a++, str++)
4784 if (*str)
4785 return -EINVAL;
4786
4787 return 0;
4788}
4789
4790static inline void __devinit cyz_fpga_copy(void __iomem *fpga, u8 *data,
4791 unsigned int size)
4792{
4793 for (; size > 0; size--) {
4794 cy_writel(fpga, *data++);
4795 udelay(10);
4796 }
4797}
4798
4799static void __devinit plx_init(struct pci_dev *pdev, int irq,
4800 struct RUNTIME_9060 __iomem *addr)
4739{ 4801{
4740 /* Reset PLX */ 4802 /* Reset PLX */
4741 cy_writel(addr + initctl, readl(addr + initctl) | 0x40000000); 4803 cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) | 0x40000000);
4742 udelay(100L); 4804 udelay(100L);
4743 cy_writel(addr + initctl, readl(addr + initctl) & ~0x40000000); 4805 cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) & ~0x40000000);
4744 4806
4745 /* Reload Config. Registers from EEPROM */ 4807 /* Reload Config. Registers from EEPROM */
4746 cy_writel(addr + initctl, readl(addr + initctl) | 0x20000000); 4808 cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) | 0x20000000);
4747 udelay(100L); 4809 udelay(100L);
4748 cy_writel(addr + initctl, readl(addr + initctl) & ~0x20000000); 4810 cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) & ~0x20000000);
4811
4812 /* For some yet unknown reason, once the PLX9060 reloads the EEPROM,
4813 * the IRQ is lost and, thus, we have to re-write it to the PCI config.
4814 * registers. This will remain here until we find a permanent fix.
4815 */
4816 pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, irq);
4817}
4818
4819static int __devinit __cyz_load_fw(const struct firmware *fw,
4820 const char *name, const u32 mailbox, void __iomem *base,
4821 void __iomem *fpga)
4822{
4823 void *ptr = fw->data;
4824 struct zfile_header *h = ptr;
4825 struct zfile_config *c, *cs;
4826 struct zfile_block *b, *bs;
4827 unsigned int a, tmp, len = fw->size;
4828#define BAD_FW KERN_ERR "Bad firmware: "
4829 if (len < sizeof(*h)) {
4830 printk(BAD_FW "too short: %u<%zu\n", len, sizeof(*h));
4831 return -EINVAL;
4832 }
4833
4834 cs = ptr + h->config_offset;
4835 bs = ptr + h->block_offset;
4836
4837 if ((void *)(cs + h->n_config) > ptr + len ||
4838 (void *)(bs + h->n_blocks) > ptr + len) {
4839 printk(BAD_FW "too short");
4840 return -EINVAL;
4841 }
4842
4843 if (cyc_isfwstr(h->name, sizeof(h->name)) ||
4844 cyc_isfwstr(h->date, sizeof(h->date))) {
4845 printk(BAD_FW "bad formatted header string\n");
4846 return -EINVAL;
4847 }
4848
4849 if (strncmp(name, h->name, sizeof(h->name))) {
4850 printk(BAD_FW "bad name '%s' (expected '%s')\n", h->name, name);
4851 return -EINVAL;
4852 }
4853
4854 tmp = 0;
4855 for (c = cs; c < cs + h->n_config; c++) {
4856 for (a = 0; a < c->n_blocks; a++)
4857 if (c->block_list[a] > h->n_blocks) {
4858 printk(BAD_FW "bad block ref number in cfgs\n");
4859 return -EINVAL;
4860 }
4861 if (c->mailbox == mailbox && c->function == 0) /* 0 is normal */
4862 tmp++;
4863 }
4864 if (!tmp) {
4865 printk(BAD_FW "nothing appropriate\n");
4866 return -EINVAL;
4867 }
4868
4869 for (b = bs; b < bs + h->n_blocks; b++)
4870 if (b->file_offset + b->size > len) {
4871 printk(BAD_FW "bad block data offset\n");
4872 return -EINVAL;
4873 }
4874
4875 /* everything is OK, let's seek'n'load it */
4876 for (c = cs; c < cs + h->n_config; c++)
4877 if (c->mailbox == mailbox && c->function == 0)
4878 break;
4879
4880 for (a = 0; a < c->n_blocks; a++) {
4881 b = &bs[c->block_list[a]];
4882 if (b->type == ZBLOCK_FPGA) {
4883 if (fpga != NULL)
4884 cyz_fpga_copy(fpga, ptr + b->file_offset,
4885 b->size);
4886 } else {
4887 if (base != NULL)
4888 memcpy_toio(base + b->ram_offset,
4889 ptr + b->file_offset, b->size);
4890 }
4891 }
4892#undef BAD_FW
4893 return 0;
4894}
4895
4896static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
4897 struct RUNTIME_9060 __iomem *ctl_addr, int irq)
4898{
4899 const struct firmware *fw;
4900 struct FIRM_ID __iomem *fid = base_addr + ID_ADDRESS;
4901 struct CUSTOM_REG __iomem *cust = base_addr;
4902 struct ZFW_CTRL __iomem *pt_zfwctrl;
4903 void __iomem *tmp;
4904 u32 mailbox, status;
4905 unsigned int i;
4906 int retval;
4907
4908 retval = request_firmware(&fw, "cyzfirm.bin", &pdev->dev);
4909 if (retval) {
4910 dev_err(&pdev->dev, "can't get firmware\n");
4911 goto err;
4912 }
4913
4914 /* Check whether the firmware is already loaded and running. If
4915 positive, skip this board */
4916 if (Z_FPGA_LOADED(ctl_addr) && readl(&fid->signature) == ZFIRM_ID) {
4917 u32 cntval = readl(base_addr + 0x190);
4918
4919 udelay(100);
4920 if (cntval != readl(base_addr + 0x190)) {
4921 /* FW counter is working, FW is running */
4922 dev_dbg(&pdev->dev, "Cyclades-Z FW already loaded. "
4923 "Skipping board.\n");
4924 retval = 0;
4925 goto err_rel;
4926 }
4927 }
4928
4929 /* start boot */
4930 cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) &
4931 ~0x00030800UL);
4932
4933 mailbox = readl(&ctl_addr->mail_box_0);
4934
4935 if (mailbox == 0 || Z_FPGA_LOADED(ctl_addr)) {
4936 /* stops CPU and set window to beginning of RAM */
4937 cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
4938 cy_writel(&cust->cpu_stop, 0);
4939 cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
4940 udelay(100);
4941 }
4942
4943 plx_init(pdev, irq, ctl_addr);
4944
4945 if (mailbox != 0) {
4946 /* load FPGA */
4947 retval = __cyz_load_fw(fw, "Cyclom-Z", mailbox, NULL,
4948 base_addr);
4949 if (retval)
4950 goto err_rel;
4951 if (!Z_FPGA_LOADED(ctl_addr)) {
4952 dev_err(&pdev->dev, "fw upload successful, but fw is "
4953 "not loaded\n");
4954 goto err_rel;
4955 }
4956 }
4957
4958 /* stops CPU and set window to beginning of RAM */
4959 cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
4960 cy_writel(&cust->cpu_stop, 0);
4961 cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
4962 udelay(100);
4963
4964 /* clear memory */
4965 for (tmp = base_addr; tmp < base_addr + RAM_SIZE; tmp++)
4966 cy_writeb(tmp, 255);
4967 if (mailbox != 0) {
4968 /* set window to last 512K of RAM */
4969 cy_writel(&ctl_addr->loc_addr_base, WIN_RAM + RAM_SIZE);
4970 //sleep(1);
4971 for (tmp = base_addr; tmp < base_addr + RAM_SIZE; tmp++)
4972 cy_writeb(tmp, 255);
4973 /* set window to beginning of RAM */
4974 cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
4975 //sleep(1);
4976 }
4977
4978 retval = __cyz_load_fw(fw, "Cyclom-Z", mailbox, base_addr, NULL);
4979 release_firmware(fw);
4980 if (retval)
4981 goto err;
4982
4983 /* finish boot and start boards */
4984 cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
4985 cy_writel(&cust->cpu_start, 0);
4986 cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
4987 i = 0;
4988 while ((status = readl(&fid->signature)) != ZFIRM_ID && i++ < 40)
4989 msleep(100);
4990 if (status != ZFIRM_ID) {
4991 if (status == ZFIRM_HLT) {
4992 dev_err(&pdev->dev, "you need an external power supply "
4993 "for this number of ports. Firmware halted and "
4994 "board reset.\n");
4995 retval = -EIO;
4996 goto err;
4997 }
4998 dev_warn(&pdev->dev, "fid->signature = 0x%x... Waiting "
4999 "some more time\n", status);
5000 while ((status = readl(&fid->signature)) != ZFIRM_ID &&
5001 i++ < 200)
5002 msleep(100);
5003 if (status != ZFIRM_ID) {
5004 dev_err(&pdev->dev, "Board not started in 20 seconds! "
5005 "Giving up. (fid->signature = 0x%x)\n",
5006 status);
5007 dev_info(&pdev->dev, "*** Warning ***: if you are "
5008 "upgrading the FW, please power cycle the "
5009 "system before loading the new FW to the "
5010 "Cyclades-Z.\n");
5011
5012 if (Z_FPGA_LOADED(ctl_addr))
5013 plx_init(pdev, irq, ctl_addr);
5014
5015 retval = -EIO;
5016 goto err;
5017 }
5018 dev_dbg(&pdev->dev, "Firmware started after %d seconds.\n",
5019 i / 10);
5020 }
5021 pt_zfwctrl = base_addr + readl(&fid->zfwctrl_addr);
5022
5023 dev_dbg(&pdev->dev, "fid=> %p, zfwctrl_addr=> %x, npt_zfwctrl=> %p\n",
5024 base_addr + ID_ADDRESS, readl(&fid->zfwctrl_addr),
5025 base_addr + readl(&fid->zfwctrl_addr));
5026
5027 dev_info(&pdev->dev, "Cyclades-Z FW loaded: version = %x, ports = %u\n",
5028 readl(&pt_zfwctrl->board_ctrl.fw_version),
5029 readl(&pt_zfwctrl->board_ctrl.n_channel));
5030
5031 if (readl(&pt_zfwctrl->board_ctrl.n_channel) == 0) {
5032 dev_warn(&pdev->dev, "no Cyclades-Z ports were found. Please "
5033 "check the connection between the Z host card and the "
5034 "serial expanders.\n");
5035
5036 if (Z_FPGA_LOADED(ctl_addr))
5037 plx_init(pdev, irq, ctl_addr);
5038
5039 dev_info(&pdev->dev, "Null number of ports detected. Board "
5040 "reset.\n");
5041 retval = 0;
5042 goto err;
5043 }
5044
5045 cy_writel(&pt_zfwctrl->board_ctrl.op_system, C_OS_LINUX);
5046 cy_writel(&pt_zfwctrl->board_ctrl.dr_version, DRIVER_VERSION);
5047
5048 /*
5049 Early firmware failed to start looking for commands.
5050 This enables firmware interrupts for those commands.
5051 */
5052 cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) |
5053 (1 << 17));
5054 cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) |
5055 0x00030800UL);
5056
5057 plx_init(pdev, irq, ctl_addr);
5058
5059 return 0;
5060err_rel:
5061 release_firmware(fw);
5062err:
5063 return retval;
4749} 5064}
4750 5065
4751static int __devinit cy_pci_probe(struct pci_dev *pdev, 5066static int __devinit cy_pci_probe(struct pci_dev *pdev,
@@ -4827,16 +5142,9 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
4827 } 5142 }
4828 5143
4829 /* Disable interrupts on the PLX before resetting it */ 5144 /* Disable interrupts on the PLX before resetting it */
4830 cy_writew(addr0 + 0x68, 5145 cy_writew(addr0 + 0x68, readw(addr0 + 0x68) & ~0x0900);
4831 readw(addr0 + 0x68) & ~0x0900);
4832 5146
4833 plx_init(addr0, 0x6c); 5147 plx_init(pdev, irq, addr0);
4834 /* For some yet unknown reason, once the PLX9060 reloads
4835 the EEPROM, the IRQ is lost and, thus, we have to
4836 re-write it to the PCI config. registers.
4837 This will remain here until we find a permanent
4838 fix. */
4839 pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, irq);
4840 5148
4841 mailbox = (u32)readl(&ctl_addr->mail_box_0); 5149 mailbox = (u32)readl(&ctl_addr->mail_box_0);
4842 5150
@@ -4877,6 +5185,9 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
4877 if ((mailbox == ZO_V1) || (mailbox == ZO_V2)) 5185 if ((mailbox == ZO_V1) || (mailbox == ZO_V2))
4878 cy_writel(addr2 + ID_ADDRESS, 0L); 5186 cy_writel(addr2 + ID_ADDRESS, 0L);
4879 5187
5188 retval = cyz_load_fw(pdev, addr2, addr0, irq);
5189 if (retval)
5190 goto err_unmap;
4880 /* This must be a Cyclades-8Zo/PCI. The extendable 5191 /* This must be a Cyclades-8Zo/PCI. The extendable
4881 version will have a different device_id and will 5192 version will have a different device_id and will
4882 be allocated its maximum number of ports. */ 5193 be allocated its maximum number of ports. */
@@ -4953,15 +5264,7 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
4953 case PLX_9060: 5264 case PLX_9060:
4954 case PLX_9080: 5265 case PLX_9080:
4955 default: /* Old boards, use PLX_9060 */ 5266 default: /* Old boards, use PLX_9060 */
4956 5267 plx_init(pdev, irq, addr0);
4957 plx_init(addr0, 0x6c);
4958 /* For some yet unknown reason, once the PLX9060 reloads
4959 the EEPROM, the IRQ is lost and, thus, we have to
4960 re-write it to the PCI config. registers.
4961 This will remain here until we find a permanent
4962 fix. */
4963 pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, irq);
4964
4965 cy_writew(addr0 + 0x68, readw(addr0 + 0x68) | 0x0900); 5268 cy_writew(addr0 + 0x68, readw(addr0 + 0x68) | 0x0900);
4966 break; 5269 break;
4967 } 5270 }
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c
index 9138b49e676e..ee83ff9efed6 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/char/drm/drm_stub.c
@@ -72,6 +72,8 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
72 mutex_init(&dev->struct_mutex); 72 mutex_init(&dev->struct_mutex);
73 mutex_init(&dev->ctxlist_mutex); 73 mutex_init(&dev->ctxlist_mutex);
74 74
75 idr_init(&dev->drw_idr);
76
75 dev->pdev = pdev; 77 dev->pdev = pdev;
76 dev->pci_device = pdev->device; 78 dev->pci_device = pdev->device;
77 dev->pci_vendor = pdev->vendor; 79 dev->pci_vendor = pdev->vendor;
diff --git a/drivers/char/drm/sis_mm.c b/drivers/char/drm/sis_mm.c
index 0580fa33cb77..441bbdbf1510 100644
--- a/drivers/char/drm/sis_mm.c
+++ b/drivers/char/drm/sis_mm.c
@@ -94,7 +94,7 @@ static int sis_fb_init(DRM_IOCTL_ARGS)
94 mutex_lock(&dev->struct_mutex); 94 mutex_lock(&dev->struct_mutex);
95#if defined(CONFIG_FB_SIS) 95#if defined(CONFIG_FB_SIS)
96 { 96 {
97 drm_sman_mm_t sman_mm; 97 struct drm_sman_mm sman_mm;
98 sman_mm.private = (void *)0xFFFFFFFF; 98 sman_mm.private = (void *)0xFFFFFFFF;
99 sman_mm.allocate = sis_sman_mm_allocate; 99 sman_mm.allocate = sis_sman_mm_allocate;
100 sman_mm.free = sis_sman_mm_free; 100 sman_mm.free = sis_sman_mm_free;
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index b3ab42e0dd4a..83c1151ec7a2 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -679,6 +679,7 @@ static int khvcd(void *unused)
679 int poll_mask; 679 int poll_mask;
680 struct hvc_struct *hp; 680 struct hvc_struct *hp;
681 681
682 set_freezable();
682 __set_current_state(TASK_RUNNING); 683 __set_current_state(TASK_RUNNING);
683 do { 684 do {
684 poll_mask = 0; 685 poll_mask = 0;
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index 761f77740d67..77a7a4a06620 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -171,9 +171,6 @@ static struct pci_driver isicom_driver = {
171static int prev_card = 3; /* start servicing isi_card[0] */ 171static int prev_card = 3; /* start servicing isi_card[0] */
172static struct tty_driver *isicom_normal; 172static struct tty_driver *isicom_normal;
173 173
174static DECLARE_COMPLETION(isi_timerdone);
175static char re_schedule = 1;
176
177static void isicom_tx(unsigned long _data); 174static void isicom_tx(unsigned long _data);
178static void isicom_start(struct tty_struct *tty); 175static void isicom_start(struct tty_struct *tty);
179 176
@@ -187,7 +184,7 @@ static signed char linuxb_to_isib[] = {
187 184
188struct isi_board { 185struct isi_board {
189 unsigned long base; 186 unsigned long base;
190 unsigned char irq; 187 int irq;
191 unsigned char port_count; 188 unsigned char port_count;
192 unsigned short status; 189 unsigned short status;
193 unsigned short port_status; /* each bit for each port */ 190 unsigned short port_status; /* each bit for each port */
@@ -227,7 +224,7 @@ static struct isi_port isi_ports[PORT_COUNT];
227 * it wants to talk. 224 * it wants to talk.
228 */ 225 */
229 226
230static inline int WaitTillCardIsFree(u16 base) 227static inline int WaitTillCardIsFree(unsigned long base)
231{ 228{
232 unsigned int count = 0; 229 unsigned int count = 0;
233 unsigned int a = in_atomic(); /* do we run under spinlock? */ 230 unsigned int a = in_atomic(); /* do we run under spinlock? */
@@ -243,17 +240,18 @@ static inline int WaitTillCardIsFree(u16 base)
243 240
244static int lock_card(struct isi_board *card) 241static int lock_card(struct isi_board *card)
245{ 242{
246 char retries;
247 unsigned long base = card->base; 243 unsigned long base = card->base;
244 unsigned int retries, a;
248 245
249 for (retries = 0; retries < 100; retries++) { 246 for (retries = 0; retries < 10; retries++) {
250 spin_lock_irqsave(&card->card_lock, card->flags); 247 spin_lock_irqsave(&card->card_lock, card->flags);
251 if (inw(base + 0xe) & 0x1) { 248 for (a = 0; a < 10; a++) {
252 return 1; 249 if (inw(base + 0xe) & 0x1)
253 } else { 250 return 1;
254 spin_unlock_irqrestore(&card->card_lock, card->flags); 251 udelay(10);
255 udelay(1000); /* 1ms */
256 } 252 }
253 spin_unlock_irqrestore(&card->card_lock, card->flags);
254 msleep(10);
257 } 255 }
258 printk(KERN_WARNING "ISICOM: Failed to lock Card (0x%lx)\n", 256 printk(KERN_WARNING "ISICOM: Failed to lock Card (0x%lx)\n",
259 card->base); 257 card->base);
@@ -261,23 +259,6 @@ static int lock_card(struct isi_board *card)
261 return 0; /* Failed to acquire the card! */ 259 return 0; /* Failed to acquire the card! */
262} 260}
263 261
264static int lock_card_at_interrupt(struct isi_board *card)
265{
266 unsigned char retries;
267 unsigned long base = card->base;
268
269 for (retries = 0; retries < 200; retries++) {
270 spin_lock_irqsave(&card->card_lock, card->flags);
271
272 if (inw(base + 0xe) & 0x1)
273 return 1;
274 else
275 spin_unlock_irqrestore(&card->card_lock, card->flags);
276 }
277 /* Failing in interrupt is an acceptable event */
278 return 0; /* Failed to acquire the card! */
279}
280
281static void unlock_card(struct isi_board *card) 262static void unlock_card(struct isi_board *card)
282{ 263{
283 spin_unlock_irqrestore(&card->card_lock, card->flags); 264 spin_unlock_irqrestore(&card->card_lock, card->flags);
@@ -415,7 +396,9 @@ static inline int __isicom_paranoia_check(struct isi_port const *port,
415 396
416static void isicom_tx(unsigned long _data) 397static void isicom_tx(unsigned long _data)
417{ 398{
418 short count = (BOARD_COUNT-1), card, base; 399 unsigned long flags, base;
400 unsigned int retries;
401 short count = (BOARD_COUNT-1), card;
419 short txcount, wrd, residue, word_count, cnt; 402 short txcount, wrd, residue, word_count, cnt;
420 struct isi_port *port; 403 struct isi_port *port;
421 struct tty_struct *tty; 404 struct tty_struct *tty;
@@ -435,32 +418,34 @@ static void isicom_tx(unsigned long _data)
435 count = isi_card[card].port_count; 418 count = isi_card[card].port_count;
436 port = isi_card[card].ports; 419 port = isi_card[card].ports;
437 base = isi_card[card].base; 420 base = isi_card[card].base;
421
422 spin_lock_irqsave(&isi_card[card].card_lock, flags);
423 for (retries = 0; retries < 100; retries++) {
424 if (inw(base + 0xe) & 0x1)
425 break;
426 udelay(2);
427 }
428 if (retries >= 100)
429 goto unlock;
430
438 for (;count > 0;count--, port++) { 431 for (;count > 0;count--, port++) {
439 if (!lock_card_at_interrupt(&isi_card[card]))
440 continue;
441 /* port not active or tx disabled to force flow control */ 432 /* port not active or tx disabled to force flow control */
442 if (!(port->flags & ASYNC_INITIALIZED) || 433 if (!(port->flags & ASYNC_INITIALIZED) ||
443 !(port->status & ISI_TXOK)) 434 !(port->status & ISI_TXOK))
444 unlock_card(&isi_card[card]);
445 continue; 435 continue;
446 436
447 tty = port->tty; 437 tty = port->tty;
448 438
449 439 if (tty == NULL)
450 if (tty == NULL) {
451 unlock_card(&isi_card[card]);
452 continue; 440 continue;
453 }
454 441
455 txcount = min_t(short, TX_SIZE, port->xmit_cnt); 442 txcount = min_t(short, TX_SIZE, port->xmit_cnt);
456 if (txcount <= 0 || tty->stopped || tty->hw_stopped) { 443 if (txcount <= 0 || tty->stopped || tty->hw_stopped)
457 unlock_card(&isi_card[card]);
458 continue; 444 continue;
459 } 445
460 if (!(inw(base + 0x02) & (1 << port->channel))) { 446 if (!(inw(base + 0x02) & (1 << port->channel)))
461 unlock_card(&isi_card[card]);
462 continue; 447 continue;
463 } 448
464 pr_dbg("txing %d bytes, port%d.\n", txcount, 449 pr_dbg("txing %d bytes, port%d.\n", txcount,
465 port->channel + 1); 450 port->channel + 1);
466 outw((port->channel << isi_card[card].shift_count) | txcount, 451 outw((port->channel << isi_card[card].shift_count) | txcount,
@@ -508,16 +493,12 @@ static void isicom_tx(unsigned long _data)
508 port->status &= ~ISI_TXOK; 493 port->status &= ~ISI_TXOK;
509 if (port->xmit_cnt <= WAKEUP_CHARS) 494 if (port->xmit_cnt <= WAKEUP_CHARS)
510 tty_wakeup(tty); 495 tty_wakeup(tty);
511 unlock_card(&isi_card[card]);
512 } 496 }
513 497
498unlock:
499 spin_unlock_irqrestore(&isi_card[card].card_lock, flags);
514 /* schedule another tx for hopefully in about 10ms */ 500 /* schedule another tx for hopefully in about 10ms */
515sched_again: 501sched_again:
516 if (!re_schedule) {
517 complete(&isi_timerdone);
518 return;
519 }
520
521 mod_timer(&tx, jiffies + msecs_to_jiffies(10)); 502 mod_timer(&tx, jiffies + msecs_to_jiffies(10));
522} 503}
523 504
@@ -1749,17 +1730,13 @@ static unsigned int card_count;
1749static int __devinit isicom_probe(struct pci_dev *pdev, 1730static int __devinit isicom_probe(struct pci_dev *pdev,
1750 const struct pci_device_id *ent) 1731 const struct pci_device_id *ent)
1751{ 1732{
1752 unsigned int ioaddr, signature, index; 1733 unsigned int signature, index;
1753 int retval = -EPERM; 1734 int retval = -EPERM;
1754 u8 pciirq;
1755 struct isi_board *board = NULL; 1735 struct isi_board *board = NULL;
1756 1736
1757 if (card_count >= BOARD_COUNT) 1737 if (card_count >= BOARD_COUNT)
1758 goto err; 1738 goto err;
1759 1739
1760 ioaddr = pci_resource_start(pdev, 3);
1761 /* i.e at offset 0x1c in the PCI configuration register space. */
1762 pciirq = pdev->irq;
1763 dev_info(&pdev->dev, "ISI PCI Card(Device ID 0x%x)\n", ent->device); 1740 dev_info(&pdev->dev, "ISI PCI Card(Device ID 0x%x)\n", ent->device);
1764 1741
1765 /* allot the first empty slot in the array */ 1742 /* allot the first empty slot in the array */
@@ -1770,8 +1747,8 @@ static int __devinit isicom_probe(struct pci_dev *pdev,
1770 } 1747 }
1771 1748
1772 board->index = index; 1749 board->index = index;
1773 board->base = ioaddr; 1750 board->base = pci_resource_start(pdev, 3);
1774 board->irq = pciirq; 1751 board->irq = pdev->irq;
1775 card_count++; 1752 card_count++;
1776 1753
1777 pci_set_drvdata(pdev, board); 1754 pci_set_drvdata(pdev, board);
@@ -1901,9 +1878,7 @@ error:
1901 1878
1902static void __exit isicom_exit(void) 1879static void __exit isicom_exit(void)
1903{ 1880{
1904 re_schedule = 0; 1881 del_timer_sync(&tx);
1905
1906 wait_for_completion_timeout(&isi_timerdone, HZ);
1907 1882
1908 pci_unregister_driver(&isicom_driver); 1883 pci_unregister_driver(&isicom_driver);
1909 tty_unregister_driver(isicom_normal); 1884 tty_unregister_driver(isicom_normal);
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index 809409922996..3c66f402f9d7 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -2163,14 +2163,10 @@ static void __stli_sendcmd(struct stlibrd *brdp, struct stliport *portp, unsigne
2163 cdkhdr_t __iomem *hdrp; 2163 cdkhdr_t __iomem *hdrp;
2164 cdkctrl_t __iomem *cp; 2164 cdkctrl_t __iomem *cp;
2165 unsigned char __iomem *bits; 2165 unsigned char __iomem *bits;
2166 unsigned long flags;
2167
2168 spin_lock_irqsave(&brd_lock, flags);
2169 2166
2170 if (test_bit(ST_CMDING, &portp->state)) { 2167 if (test_bit(ST_CMDING, &portp->state)) {
2171 printk(KERN_ERR "STALLION: command already busy, cmd=%x!\n", 2168 printk(KERN_ERR "STALLION: command already busy, cmd=%x!\n",
2172 (int) cmd); 2169 (int) cmd);
2173 spin_unlock_irqrestore(&brd_lock, flags);
2174 return; 2170 return;
2175 } 2171 }
2176 2172
@@ -2191,7 +2187,6 @@ static void __stli_sendcmd(struct stlibrd *brdp, struct stliport *portp, unsigne
2191 writeb(readb(bits) | portp->portbit, bits); 2187 writeb(readb(bits) | portp->portbit, bits);
2192 set_bit(ST_CMDING, &portp->state); 2188 set_bit(ST_CMDING, &portp->state);
2193 EBRDDISABLE(brdp); 2189 EBRDDISABLE(brdp);
2194 spin_unlock_irqrestore(&brd_lock, flags);
2195} 2190}
2196 2191
2197static void stli_sendcmd(struct stlibrd *brdp, struct stliport *portp, unsigned long cmd, void *arg, int size, int copyback) 2192static void stli_sendcmd(struct stlibrd *brdp, struct stliport *portp, unsigned long cmd, void *arg, int size, int copyback)
@@ -3215,13 +3210,13 @@ static int stli_initecp(struct stlibrd *brdp)
3215 goto err; 3210 goto err;
3216 } 3211 }
3217 3212
3213 brdp->iosize = ECP_IOSIZE;
3214
3218 if (!request_region(brdp->iobase, brdp->iosize, "istallion")) { 3215 if (!request_region(brdp->iobase, brdp->iosize, "istallion")) {
3219 retval = -EIO; 3216 retval = -EIO;
3220 goto err; 3217 goto err;
3221 } 3218 }
3222 3219
3223 brdp->iosize = ECP_IOSIZE;
3224
3225/* 3220/*
3226 * Based on the specific board type setup the common vars to access 3221 * Based on the specific board type setup the common vars to access
3227 * and enable shared memory. Set all board specific information now 3222 * and enable shared memory. Set all board specific information now
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index e0d35c20c04f..ed76f0a127fd 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -1405,7 +1405,6 @@ static int moxaCard;
1405static struct mon_str moxaLog; 1405static struct mon_str moxaLog;
1406static int moxaFuncTout = HZ / 2; 1406static int moxaFuncTout = HZ / 2;
1407 1407
1408static void moxadelay(int);
1409static void moxafunc(void __iomem *, int, ushort); 1408static void moxafunc(void __iomem *, int, ushort);
1410static void wait_finish(void __iomem *); 1409static void wait_finish(void __iomem *);
1411static void low_water_check(void __iomem *); 1410static void low_water_check(void __iomem *);
@@ -2404,10 +2403,10 @@ void MoxaPortSendBreak(int port, int ms100)
2404 ofsAddr = moxa_ports[port].tableAddr; 2403 ofsAddr = moxa_ports[port].tableAddr;
2405 if (ms100) { 2404 if (ms100) {
2406 moxafunc(ofsAddr, FC_SendBreak, Magic_code); 2405 moxafunc(ofsAddr, FC_SendBreak, Magic_code);
2407 moxadelay(ms100 * (HZ / 10)); 2406 msleep(ms100 * 10);
2408 } else { 2407 } else {
2409 moxafunc(ofsAddr, FC_SendBreak, Magic_code); 2408 moxafunc(ofsAddr, FC_SendBreak, Magic_code);
2410 moxadelay(HZ / 4); /* 250 ms */ 2409 msleep(250);
2411 } 2410 }
2412 moxafunc(ofsAddr, FC_StopBreak, Magic_code); 2411 moxafunc(ofsAddr, FC_StopBreak, Magic_code);
2413} 2412}
@@ -2476,18 +2475,6 @@ static int moxa_set_serial_info(struct moxa_port *info,
2476/***************************************************************************** 2475/*****************************************************************************
2477 * Static local functions: * 2476 * Static local functions: *
2478 *****************************************************************************/ 2477 *****************************************************************************/
2479/*
2480 * moxadelay - delays a specified number ticks
2481 */
2482static void moxadelay(int tick)
2483{
2484 unsigned long st, et;
2485
2486 st = jiffies;
2487 et = st + tick;
2488 while (time_before(jiffies, et));
2489}
2490
2491static void moxafunc(void __iomem *ofsAddr, int cmd, ushort arg) 2478static void moxafunc(void __iomem *ofsAddr, int cmd, ushort arg)
2492{ 2479{
2493 2480
@@ -2535,7 +2522,7 @@ static int moxaloadbios(int cardno, unsigned char __user *tmp, int len)
2535 return -EFAULT; 2522 return -EFAULT;
2536 baseAddr = moxa_boards[cardno].basemem; 2523 baseAddr = moxa_boards[cardno].basemem;
2537 writeb(HW_reset, baseAddr + Control_reg); /* reset */ 2524 writeb(HW_reset, baseAddr + Control_reg); /* reset */
2538 moxadelay(1); /* delay 10 ms */ 2525 msleep(10);
2539 for (i = 0; i < 4096; i++) 2526 for (i = 0; i < 4096; i++)
2540 writeb(0, baseAddr + i); /* clear fix page */ 2527 writeb(0, baseAddr + i); /* clear fix page */
2541 for (i = 0; i < len; i++) 2528 for (i = 0; i < len; i++)
@@ -2713,7 +2700,7 @@ static int moxaloadc218(int cardno, void __iomem *baseAddr, int len)
2713 for (i = 0; i < 100; i++) { 2700 for (i = 0; i < 100; i++) {
2714 if (readw(baseAddr + C218_key) == keycode) 2701 if (readw(baseAddr + C218_key) == keycode)
2715 break; 2702 break;
2716 moxadelay(1); /* delay 10 ms */ 2703 msleep(10);
2717 } 2704 }
2718 if (readw(baseAddr + C218_key) != keycode) { 2705 if (readw(baseAddr + C218_key) != keycode) {
2719 return (-1); 2706 return (-1);
@@ -2725,7 +2712,7 @@ static int moxaloadc218(int cardno, void __iomem *baseAddr, int len)
2725 for (i = 0; i < 100; i++) { 2712 for (i = 0; i < 100; i++) {
2726 if (readw(baseAddr + C218_key) == keycode) 2713 if (readw(baseAddr + C218_key) == keycode)
2727 break; 2714 break;
2728 moxadelay(1); /* delay 10 ms */ 2715 msleep(10);
2729 } 2716 }
2730 retry++; 2717 retry++;
2731 } while ((readb(baseAddr + C218chksum_ok) != 1) && (retry < 3)); 2718 } while ((readb(baseAddr + C218chksum_ok) != 1) && (retry < 3));
@@ -2736,7 +2723,7 @@ static int moxaloadc218(int cardno, void __iomem *baseAddr, int len)
2736 for (i = 0; i < 100; i++) { 2723 for (i = 0; i < 100; i++) {
2737 if (readw(baseAddr + Magic_no) == Magic_code) 2724 if (readw(baseAddr + Magic_no) == Magic_code)
2738 break; 2725 break;
2739 moxadelay(1); /* delay 10 ms */ 2726 msleep(10);
2740 } 2727 }
2741 if (readw(baseAddr + Magic_no) != Magic_code) { 2728 if (readw(baseAddr + Magic_no) != Magic_code) {
2742 return (-1); 2729 return (-1);
@@ -2746,7 +2733,7 @@ static int moxaloadc218(int cardno, void __iomem *baseAddr, int len)
2746 for (i = 0; i < 100; i++) { 2733 for (i = 0; i < 100; i++) {
2747 if (readw(baseAddr + Magic_no) == Magic_code) 2734 if (readw(baseAddr + Magic_no) == Magic_code)
2748 break; 2735 break;
2749 moxadelay(1); /* delay 10 ms */ 2736 msleep(10);
2750 } 2737 }
2751 if (readw(baseAddr + Magic_no) != Magic_code) { 2738 if (readw(baseAddr + Magic_no) != Magic_code) {
2752 return (-1); 2739 return (-1);
@@ -2788,7 +2775,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor
2788 for (i = 0; i < 10; i++) { 2775 for (i = 0; i < 10; i++) {
2789 if (readw(baseAddr + C320_key) == C320_KeyCode) 2776 if (readw(baseAddr + C320_key) == C320_KeyCode)
2790 break; 2777 break;
2791 moxadelay(1); 2778 msleep(10);
2792 } 2779 }
2793 if (readw(baseAddr + C320_key) != C320_KeyCode) 2780 if (readw(baseAddr + C320_key) != C320_KeyCode)
2794 return (-1); 2781 return (-1);
@@ -2799,7 +2786,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor
2799 for (i = 0; i < 10; i++) { 2786 for (i = 0; i < 10; i++) {
2800 if (readw(baseAddr + C320_key) == C320_KeyCode) 2787 if (readw(baseAddr + C320_key) == C320_KeyCode)
2801 break; 2788 break;
2802 moxadelay(1); 2789 msleep(10);
2803 } 2790 }
2804 retry++; 2791 retry++;
2805 } while ((readb(baseAddr + C320chksum_ok) != 1) && (retry < 3)); 2792 } while ((readb(baseAddr + C320chksum_ok) != 1) && (retry < 3));
@@ -2809,7 +2796,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor
2809 for (i = 0; i < 600; i++) { 2796 for (i = 0; i < 600; i++) {
2810 if (readw(baseAddr + Magic_no) == Magic_code) 2797 if (readw(baseAddr + Magic_no) == Magic_code)
2811 break; 2798 break;
2812 moxadelay(1); 2799 msleep(10);
2813 } 2800 }
2814 if (readw(baseAddr + Magic_no) != Magic_code) 2801 if (readw(baseAddr + Magic_no) != Magic_code)
2815 return (-100); 2802 return (-100);
@@ -2828,7 +2815,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor
2828 for (i = 0; i < 500; i++) { 2815 for (i = 0; i < 500; i++) {
2829 if (readw(baseAddr + Magic_no) == Magic_code) 2816 if (readw(baseAddr + Magic_no) == Magic_code)
2830 break; 2817 break;
2831 moxadelay(1); 2818 msleep(10);
2832 } 2819 }
2833 if (readw(baseAddr + Magic_no) != Magic_code) 2820 if (readw(baseAddr + Magic_no) != Magic_code)
2834 return (-102); 2821 return (-102);
@@ -2842,7 +2829,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor
2842 for (i = 0; i < 600; i++) { 2829 for (i = 0; i < 600; i++) {
2843 if (readw(baseAddr + Magic_no) == Magic_code) 2830 if (readw(baseAddr + Magic_no) == Magic_code)
2844 break; 2831 break;
2845 moxadelay(1); 2832 msleep(10);
2846 } 2833 }
2847 if (readw(baseAddr + Magic_no) != Magic_code) 2834 if (readw(baseAddr + Magic_no) != Magic_code)
2848 return (-102); 2835 return (-102);
diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c
index 3494e3fc44bf..b37e626f4faa 100644
--- a/drivers/char/riscom8.c
+++ b/drivers/char/riscom8.c
@@ -213,14 +213,6 @@ static inline void rc_release_io_range(struct riscom_board * const bp)
213 release_region(RC_TO_ISA(rc_ioport[i]) + bp->base, 1); 213 release_region(RC_TO_ISA(rc_ioport[i]) + bp->base, 1);
214} 214}
215 215
216/* Must be called with enabled interrupts */
217static inline void rc_long_delay(unsigned long delay)
218{
219 unsigned long i;
220
221 for (i = jiffies + delay; time_after(i,jiffies); ) ;
222}
223
224/* Reset and setup CD180 chip */ 216/* Reset and setup CD180 chip */
225static void __init rc_init_CD180(struct riscom_board const * bp) 217static void __init rc_init_CD180(struct riscom_board const * bp)
226{ 218{
@@ -231,7 +223,7 @@ static void __init rc_init_CD180(struct riscom_board const * bp)
231 rc_wait_CCR(bp); /* Wait for CCR ready */ 223 rc_wait_CCR(bp); /* Wait for CCR ready */
232 rc_out(bp, CD180_CCR, CCR_HARDRESET); /* Reset CD180 chip */ 224 rc_out(bp, CD180_CCR, CCR_HARDRESET); /* Reset CD180 chip */
233 sti(); 225 sti();
234 rc_long_delay(HZ/20); /* Delay 0.05 sec */ 226 msleep(50); /* Delay 0.05 sec */
235 cli(); 227 cli();
236 rc_out(bp, CD180_GIVR, RC_ID); /* Set ID for this chip */ 228 rc_out(bp, CD180_GIVR, RC_ID); /* Set ID for this chip */
237 rc_out(bp, CD180_GICR, 0); /* Clear all bits */ 229 rc_out(bp, CD180_GICR, 0); /* Clear all bits */
@@ -280,7 +272,7 @@ static int __init rc_probe(struct riscom_board *bp)
280 rc_wait_CCR(bp); 272 rc_wait_CCR(bp);
281 rc_out(bp, CD180_CCR, CCR_TXEN); /* Enable transmitter */ 273 rc_out(bp, CD180_CCR, CCR_TXEN); /* Enable transmitter */
282 rc_out(bp, CD180_IER, IER_TXRDY); /* Enable tx empty intr */ 274 rc_out(bp, CD180_IER, IER_TXRDY); /* Enable tx empty intr */
283 rc_long_delay(HZ/20); 275 msleep(50);
284 irqs = probe_irq_off(irqs); 276 irqs = probe_irq_off(irqs);
285 val1 = rc_in(bp, RC_BSR); /* Get Board Status reg */ 277 val1 = rc_in(bp, RC_BSR); /* Get Board Status reg */
286 val2 = rc_in(bp, RC_ACK_TINT); /* ACK interrupt */ 278 val2 = rc_in(bp, RC_ACK_TINT); /* ACK interrupt */
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index baf7234b6e66..455855631aef 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -345,18 +345,6 @@ static inline void sx_release_io_range(struct specialix_board * bp)
345} 345}
346 346
347 347
348/* Must be called with enabled interrupts */
349/* Ugly. Very ugly. Don't use this for anything else than initialization
350 code */
351static inline void sx_long_delay(unsigned long delay)
352{
353 unsigned long i;
354
355 for (i = jiffies + delay; time_after(i, jiffies); ) ;
356}
357
358
359
360/* Set the IRQ using the RTS lines that run to the PAL on the board.... */ 348/* Set the IRQ using the RTS lines that run to the PAL on the board.... */
361static int sx_set_irq ( struct specialix_board *bp) 349static int sx_set_irq ( struct specialix_board *bp)
362{ 350{
@@ -397,7 +385,7 @@ static int sx_init_CD186x(struct specialix_board * bp)
397 spin_lock_irqsave(&bp->lock, flags); 385 spin_lock_irqsave(&bp->lock, flags);
398 sx_out_off(bp, CD186x_CCR, CCR_HARDRESET); /* Reset CD186x chip */ 386 sx_out_off(bp, CD186x_CCR, CCR_HARDRESET); /* Reset CD186x chip */
399 spin_unlock_irqrestore(&bp->lock, flags); 387 spin_unlock_irqrestore(&bp->lock, flags);
400 sx_long_delay(HZ/20); /* Delay 0.05 sec */ 388 msleep(50); /* Delay 0.05 sec */
401 spin_lock_irqsave(&bp->lock, flags); 389 spin_lock_irqsave(&bp->lock, flags);
402 sx_out_off(bp, CD186x_GIVR, SX_ID); /* Set ID for this chip */ 390 sx_out_off(bp, CD186x_GIVR, SX_ID); /* Set ID for this chip */
403 sx_out_off(bp, CD186x_GICR, 0); /* Clear all bits */ 391 sx_out_off(bp, CD186x_GICR, 0); /* Clear all bits */
@@ -533,7 +521,7 @@ static int sx_probe(struct specialix_board *bp)
533 sx_wait_CCR(bp); 521 sx_wait_CCR(bp);
534 sx_out(bp, CD186x_CCR, CCR_TXEN); /* Enable transmitter */ 522 sx_out(bp, CD186x_CCR, CCR_TXEN); /* Enable transmitter */
535 sx_out(bp, CD186x_IER, IER_TXRDY); /* Enable tx empty intr */ 523 sx_out(bp, CD186x_IER, IER_TXRDY); /* Enable tx empty intr */
536 sx_long_delay(HZ/20); 524 msleep(50);
537 irqs = probe_irq_off(irqs); 525 irqs = probe_irq_off(irqs);
538 526
539 dprintk (SX_DEBUG_INIT, "SRSR = %02x, ", sx_in(bp, CD186x_SRSR)); 527 dprintk (SX_DEBUG_INIT, "SRSR = %02x, ", sx_in(bp, CD186x_SRSR));
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index 8c73ccb8830f..93d0bb8b4c0f 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -1788,7 +1788,6 @@ static void stl_offintr(struct work_struct *work)
1788 if (tty == NULL) 1788 if (tty == NULL)
1789 return; 1789 return;
1790 1790
1791 lock_kernel();
1792 if (test_bit(ASYI_TXLOW, &portp->istate)) 1791 if (test_bit(ASYI_TXLOW, &portp->istate))
1793 tty_wakeup(tty); 1792 tty_wakeup(tty);
1794 1793
@@ -1802,7 +1801,6 @@ static void stl_offintr(struct work_struct *work)
1802 if (portp->flags & ASYNC_CHECK_CD) 1801 if (portp->flags & ASYNC_CHECK_CD)
1803 tty_hangup(tty); /* FIXME: module removal race here - AKPM */ 1802 tty_hangup(tty); /* FIXME: module removal race here - AKPM */
1804 } 1803 }
1805 unlock_kernel();
1806} 1804}
1807 1805
1808/*****************************************************************************/ 1806/*****************************************************************************/
@@ -2357,9 +2355,6 @@ static int __devinit stl_pciprobe(struct pci_dev *pdev,
2357 if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) 2355 if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE)
2358 goto err; 2356 goto err;
2359 2357
2360 dev_info(&pdev->dev, "please, report this to LKML: %x/%x/%x\n",
2361 pdev->vendor, pdev->device, pdev->class);
2362
2363 retval = pci_enable_device(pdev); 2358 retval = pci_enable_device(pdev);
2364 if (retval) 2359 if (retval)
2365 goto err; 2360 goto err;
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 6650ae1c088f..edb7002a3216 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -729,10 +729,9 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
729 /* although the numbers above are not valid since long ago, the 729 /* although the numbers above are not valid since long ago, the
730 point is still up-to-date and the comment still has its value 730 point is still up-to-date and the comment still has its value
731 even if only as a historical artifact. --mj, July 1998 */ 731 even if only as a historical artifact. --mj, July 1998 */
732 vc = kmalloc(sizeof(struct vc_data), GFP_KERNEL); 732 vc = kzalloc(sizeof(struct vc_data), GFP_KERNEL);
733 if (!vc) 733 if (!vc)
734 return -ENOMEM; 734 return -ENOMEM;
735 memset(vc, 0, sizeof(*vc));
736 vc_cons[currcons].d = vc; 735 vc_cons[currcons].d = vc;
737 INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK); 736 INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
738 visual_init(vc, currcons, 1); 737 visual_init(vc, currcons, 1);
@@ -1991,8 +1990,7 @@ static int is_double_width(uint32_t ucs)
1991 { 0xFE10, 0xFE19 }, { 0xFE30, 0xFE6F }, { 0xFF00, 0xFF60 }, 1990 { 0xFE10, 0xFE19 }, { 0xFE30, 0xFE6F }, { 0xFF00, 0xFF60 },
1992 { 0xFFE0, 0xFFE6 }, { 0x20000, 0x2FFFD }, { 0x30000, 0x3FFFD } 1991 { 0xFFE0, 0xFFE6 }, { 0x20000, 0x2FFFD }, { 0x30000, 0x3FFFD }
1993 }; 1992 };
1994 return bisearch(ucs, double_width, 1993 return bisearch(ucs, double_width, ARRAY_SIZE(double_width) - 1);
1995 sizeof(double_width) / sizeof(*double_width) - 1);
1996} 1994}
1997 1995
1998/* acquires console_sem */ 1996/* acquires console_sem */
@@ -2989,8 +2987,24 @@ static int con_is_graphics(const struct consw *csw, int first, int last)
2989 return retval; 2987 return retval;
2990} 2988}
2991 2989
2992static int unbind_con_driver(const struct consw *csw, int first, int last, 2990/**
2993 int deflt) 2991 * unbind_con_driver - unbind a console driver
2992 * @csw: pointer to console driver to unregister
2993 * @first: first in range of consoles that @csw should be unbound from
2994 * @last: last in range of consoles that @csw should be unbound from
2995 * @deflt: should next bound console driver be default after @csw is unbound?
2996 *
2997 * To unbind a driver from all possible consoles, pass 0 as @first and
2998 * %MAX_NR_CONSOLES as @last.
2999 *
3000 * @deflt controls whether the console that ends up replacing @csw should be
3001 * the default console.
3002 *
3003 * RETURNS:
3004 * -ENODEV if @csw isn't a registered console driver or can't be unregistered
3005 * or 0 on success.
3006 */
3007int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
2994{ 3008{
2995 struct module *owner = csw->owner; 3009 struct module *owner = csw->owner;
2996 const struct consw *defcsw = NULL; 3010 const struct consw *defcsw = NULL;
@@ -3075,6 +3089,7 @@ err:
3075 return retval; 3089 return retval;
3076 3090
3077} 3091}
3092EXPORT_SYMBOL(unbind_con_driver);
3078 3093
3079static int vt_bind(struct con_driver *con) 3094static int vt_bind(struct con_driver *con)
3080{ 3095{
@@ -3491,9 +3506,6 @@ void do_blank_screen(int entering_gfx)
3491 } 3506 }
3492 return; 3507 return;
3493 } 3508 }
3494 if (blank_state != blank_normal_wait)
3495 return;
3496 blank_state = blank_off;
3497 3509
3498 /* entering graphics mode? */ 3510 /* entering graphics mode? */
3499 if (entering_gfx) { 3511 if (entering_gfx) {
@@ -3501,10 +3513,15 @@ void do_blank_screen(int entering_gfx)
3501 save_screen(vc); 3513 save_screen(vc);
3502 vc->vc_sw->con_blank(vc, -1, 1); 3514 vc->vc_sw->con_blank(vc, -1, 1);
3503 console_blanked = fg_console + 1; 3515 console_blanked = fg_console + 1;
3516 blank_state = blank_off;
3504 set_origin(vc); 3517 set_origin(vc);
3505 return; 3518 return;
3506 } 3519 }
3507 3520
3521 if (blank_state != blank_normal_wait)
3522 return;
3523 blank_state = blank_off;
3524
3508 /* don't blank graphics */ 3525 /* don't blank graphics */
3509 if (vc->vc_mode != KD_TEXT) { 3526 if (vc->vc_mode != KD_TEXT) {
3510 console_blanked = fg_console + 1; 3527 console_blanked = fg_console + 1;
diff --git a/drivers/char/watchdog/Kconfig b/drivers/char/watchdog/Kconfig
index 53f5538c0c05..2f48ba329961 100644
--- a/drivers/char/watchdog/Kconfig
+++ b/drivers/char/watchdog/Kconfig
@@ -187,6 +187,15 @@ config PNX4008_WATCHDOG
187 187
188 Say N if you are unsure. 188 Say N if you are unsure.
189 189
190# AVR32 Architecture
191
192config AT32AP700X_WDT
193 tristate "AT32AP700x watchdog"
194 depends on WATCHDOG && CPU_AT32AP7000
195 help
196 Watchdog timer embedded into AT32AP700x devices. This will reboot
197 your system when the timeout is reached.
198
190# X86 (i386 + ia64 + x86_64) Architecture 199# X86 (i386 + ia64 + x86_64) Architecture
191 200
192config ACQUIRE_WDT 201config ACQUIRE_WDT
diff --git a/drivers/char/watchdog/Makefile b/drivers/char/watchdog/Makefile
index d90f649038c2..3907ec04a4e5 100644
--- a/drivers/char/watchdog/Makefile
+++ b/drivers/char/watchdog/Makefile
@@ -36,6 +36,9 @@ obj-$(CONFIG_MPCORE_WATCHDOG) += mpcore_wdt.o
36obj-$(CONFIG_EP93XX_WATCHDOG) += ep93xx_wdt.o 36obj-$(CONFIG_EP93XX_WATCHDOG) += ep93xx_wdt.o
37obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o 37obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o
38 38
39# AVR32 Architecture
40obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
41
39# X86 (i386 + ia64 + x86_64) Architecture 42# X86 (i386 + ia64 + x86_64) Architecture
40obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o 43obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o
41obj-$(CONFIG_ADVANTECH_WDT) += advantechwdt.o 44obj-$(CONFIG_ADVANTECH_WDT) += advantechwdt.o
diff --git a/drivers/char/watchdog/at32ap700x_wdt.c b/drivers/char/watchdog/at32ap700x_wdt.c
new file mode 100644
index 000000000000..54a516169d07
--- /dev/null
+++ b/drivers/char/watchdog/at32ap700x_wdt.c
@@ -0,0 +1,386 @@
1/*
2 * Watchdog driver for Atmel AT32AP700X devices
3 *
4 * Copyright (C) 2005-2006 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/miscdevice.h>
16#include <linux/fs.h>
17#include <linux/platform_device.h>
18#include <linux/watchdog.h>
19#include <linux/uaccess.h>
20#include <linux/io.h>
21#include <linux/spinlock.h>
22
23#define TIMEOUT_MIN 1
24#define TIMEOUT_MAX 2
25#define TIMEOUT_DEFAULT TIMEOUT_MAX
26
27/* module parameters */
28static int timeout = TIMEOUT_DEFAULT;
29module_param(timeout, int, 0);
30MODULE_PARM_DESC(timeout,
31 "Timeout value. Limited to be 1 or 2 seconds. (default="
32 __MODULE_STRING(TIMEOUT_DEFAULT) ")");
33
34static int nowayout = WATCHDOG_NOWAYOUT;
35module_param(nowayout, int, 0);
36MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
37 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
38
39/* Watchdog registers and write/read macro */
40#define WDT_CTRL 0x00
41#define WDT_CTRL_EN 0
42#define WDT_CTRL_PSEL 8
43#define WDT_CTRL_KEY 24
44
45#define WDT_CLR 0x04
46
47#define WDT_BIT(name) (1 << WDT_##name)
48#define WDT_BF(name, value) ((value) << WDT_##name)
49
50#define wdt_readl(dev, reg) \
51 __raw_readl((dev)->regs + WDT_##reg)
52#define wdt_writel(dev, reg, value) \
53 __raw_writel((value), (dev)->regs + WDT_##reg)
54
55struct wdt_at32ap700x {
56 void __iomem *regs;
57 spinlock_t io_lock;
58 int timeout;
59 unsigned long users;
60 struct miscdevice miscdev;
61};
62
63static struct wdt_at32ap700x *wdt;
64static char expect_release;
65
66/*
67 * Disable the watchdog.
68 */
69static inline void at32_wdt_stop(void)
70{
71 unsigned long psel;
72
73 spin_lock(&wdt->io_lock);
74 psel = wdt_readl(wdt, CTRL) & WDT_BF(CTRL_PSEL, 0x0f);
75 wdt_writel(wdt, CTRL, psel | WDT_BF(CTRL_KEY, 0x55));
76 wdt_writel(wdt, CTRL, psel | WDT_BF(CTRL_KEY, 0xaa));
77 spin_unlock(&wdt->io_lock);
78}
79
80/*
81 * Enable and reset the watchdog.
82 */
83static inline void at32_wdt_start(void)
84{
85 /* 0xf is 2^16 divider = 2 sec, 0xe is 2^15 divider = 1 sec */
86 unsigned long psel = (wdt->timeout > 1) ? 0xf : 0xe;
87
88 spin_lock(&wdt->io_lock);
89 wdt_writel(wdt, CTRL, WDT_BIT(CTRL_EN)
90 | WDT_BF(CTRL_PSEL, psel)
91 | WDT_BF(CTRL_KEY, 0x55));
92 wdt_writel(wdt, CTRL, WDT_BIT(CTRL_EN)
93 | WDT_BF(CTRL_PSEL, psel)
94 | WDT_BF(CTRL_KEY, 0xaa));
95 spin_unlock(&wdt->io_lock);
96}
97
98/*
99 * Pat the watchdog timer.
100 */
101static inline void at32_wdt_pat(void)
102{
103 spin_lock(&wdt->io_lock);
104 wdt_writel(wdt, CLR, 0x42);
105 spin_unlock(&wdt->io_lock);
106}
107
108/*
109 * Watchdog device is opened, and watchdog starts running.
110 */
111static int at32_wdt_open(struct inode *inode, struct file *file)
112{
113 if (test_and_set_bit(1, &wdt->users))
114 return -EBUSY;
115
116 at32_wdt_start();
117 return nonseekable_open(inode, file);
118}
119
120/*
121 * Close the watchdog device.
122 */
123static int at32_wdt_close(struct inode *inode, struct file *file)
124{
125 if (expect_release == 42) {
126 at32_wdt_stop();
127 } else {
128 dev_dbg(wdt->miscdev.parent,
129 "Unexpected close, not stopping watchdog!\n");
130 at32_wdt_pat();
131 }
132 clear_bit(1, &wdt->users);
133 expect_release = 0;
134 return 0;
135}
136
137/*
138 * Change the watchdog time interval.
139 */
140static int at32_wdt_settimeout(int time)
141{
142 /*
143 * All counting occurs at 1 / SLOW_CLOCK (32 kHz) and max prescaler is
144 * 2 ^ 16 allowing up to 2 seconds timeout.
145 */
146 if ((time < TIMEOUT_MIN) || (time > TIMEOUT_MAX))
147 return -EINVAL;
148
149 /*
150 * Set new watchdog time. It will be used when at32_wdt_start() is
151 * called.
152 */
153 wdt->timeout = time;
154 return 0;
155}
156
157static struct watchdog_info at32_wdt_info = {
158 .identity = "at32ap700x watchdog",
159 .options = WDIOF_SETTIMEOUT |
160 WDIOF_KEEPALIVEPING |
161 WDIOF_MAGICCLOSE,
162};
163
164/*
165 * Handle commands from user-space.
166 */
167static int at32_wdt_ioctl(struct inode *inode, struct file *file,
168 unsigned int cmd, unsigned long arg)
169{
170 int ret = -ENOTTY;
171 int time;
172 void __user *argp = (void __user *)arg;
173 int __user *p = argp;
174
175 switch (cmd) {
176 case WDIOC_KEEPALIVE:
177 at32_wdt_pat();
178 ret = 0;
179 break;
180 case WDIOC_GETSUPPORT:
181 ret = copy_to_user(argp, &at32_wdt_info,
182 sizeof(at32_wdt_info)) ? -EFAULT : 0;
183 break;
184 case WDIOC_SETTIMEOUT:
185 ret = get_user(time, p);
186 if (ret)
187 break;
188 ret = at32_wdt_settimeout(time);
189 if (ret)
190 break;
191 /* Enable new time value */
192 at32_wdt_start();
193 /* fall through */
194 case WDIOC_GETTIMEOUT:
195 ret = put_user(wdt->timeout, p);
196 break;
197 case WDIOC_GETSTATUS: /* fall through */
198 case WDIOC_GETBOOTSTATUS:
199 ret = put_user(0, p);
200 break;
201 case WDIOC_SETOPTIONS:
202 ret = get_user(time, p);
203 if (ret)
204 break;
205 if (time & WDIOS_DISABLECARD)
206 at32_wdt_stop();
207 if (time & WDIOS_ENABLECARD)
208 at32_wdt_start();
209 ret = 0;
210 break;
211 }
212
213 return ret;
214}
215
216static ssize_t at32_wdt_write(struct file *file, const char __user *data,
217 size_t len, loff_t *ppos)
218{
219 /* See if we got the magic character 'V' and reload the timer */
220 if (len) {
221 if (!nowayout) {
222 size_t i;
223
224 /*
225 * note: just in case someone wrote the magic
226 * character five months ago...
227 */
228 expect_release = 0;
229
230 /*
231 * scan to see whether or not we got the magic
232 * character
233 */
234 for (i = 0; i != len; i++) {
235 char c;
236 if (get_user(c, data+i))
237 return -EFAULT;
238 if (c == 'V')
239 expect_release = 42;
240 }
241 }
242 /* someone wrote to us, we should pat the watchdog */
243 at32_wdt_pat();
244 }
245 return len;
246}
247
248static const struct file_operations at32_wdt_fops = {
249 .owner = THIS_MODULE,
250 .llseek = no_llseek,
251 .ioctl = at32_wdt_ioctl,
252 .open = at32_wdt_open,
253 .release = at32_wdt_close,
254 .write = at32_wdt_write,
255};
256
257static int __init at32_wdt_probe(struct platform_device *pdev)
258{
259 struct resource *regs;
260 int ret;
261
262 if (wdt) {
263 dev_dbg(&pdev->dev, "only 1 wdt instance supported.\n");
264 return -EBUSY;
265 }
266
267 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
268 if (!regs) {
269 dev_dbg(&pdev->dev, "missing mmio resource\n");
270 return -ENXIO;
271 }
272
273 wdt = kzalloc(sizeof(struct wdt_at32ap700x), GFP_KERNEL);
274 if (!wdt) {
275 dev_dbg(&pdev->dev, "no memory for wdt structure\n");
276 return -ENOMEM;
277 }
278
279 wdt->regs = ioremap(regs->start, regs->end - regs->start + 1);
280 if (!wdt->regs) {
281 ret = -ENOMEM;
282 dev_dbg(&pdev->dev, "could not map I/O memory\n");
283 goto err_free;
284 }
285 spin_lock_init(&wdt->io_lock);
286 wdt->users = 0;
287 wdt->miscdev.minor = WATCHDOG_MINOR;
288 wdt->miscdev.name = "watchdog";
289 wdt->miscdev.fops = &at32_wdt_fops;
290
291 if (at32_wdt_settimeout(timeout)) {
292 at32_wdt_settimeout(TIMEOUT_DEFAULT);
293 dev_dbg(&pdev->dev,
294 "default timeout invalid, set to %d sec.\n",
295 TIMEOUT_DEFAULT);
296 }
297
298 ret = misc_register(&wdt->miscdev);
299 if (ret) {
300 dev_dbg(&pdev->dev, "failed to register wdt miscdev\n");
301 goto err_iounmap;
302 }
303
304 platform_set_drvdata(pdev, wdt);
305 wdt->miscdev.parent = &pdev->dev;
306 dev_info(&pdev->dev,
307 "AT32AP700X WDT at 0x%p, timeout %d sec (nowayout=%d)\n",
308 wdt->regs, wdt->timeout, nowayout);
309
310 return 0;
311
312err_iounmap:
313 iounmap(wdt->regs);
314err_free:
315 kfree(wdt);
316 wdt = NULL;
317 return ret;
318}
319
320static int __exit at32_wdt_remove(struct platform_device *pdev)
321{
322 if (wdt && platform_get_drvdata(pdev) == wdt) {
323 /* Stop the timer before we leave */
324 if (!nowayout)
325 at32_wdt_stop();
326
327 misc_deregister(&wdt->miscdev);
328 iounmap(wdt->regs);
329 kfree(wdt);
330 wdt = NULL;
331 platform_set_drvdata(pdev, NULL);
332 }
333
334 return 0;
335}
336
337static void at32_wdt_shutdown(struct platform_device *pdev)
338{
339 at32_wdt_stop();
340}
341
342#ifdef CONFIG_PM
343static int at32_wdt_suspend(struct platform_device *pdev, pm_message_t message)
344{
345 at32_wdt_stop();
346 return 0;
347}
348
349static int at32_wdt_resume(struct platform_device *pdev)
350{
351 if (wdt->users)
352 at32_wdt_start();
353 return 0;
354}
355#else
356#define at32_wdt_suspend NULL
357#define at32_wdt_resume NULL
358#endif
359
360static struct platform_driver at32_wdt_driver = {
361 .remove = __exit_p(at32_wdt_remove),
362 .suspend = at32_wdt_suspend,
363 .resume = at32_wdt_resume,
364 .driver = {
365 .name = "at32_wdt",
366 .owner = THIS_MODULE,
367 },
368 .shutdown = at32_wdt_shutdown,
369};
370
371static int __init at32_wdt_init(void)
372{
373 return platform_driver_probe(&at32_wdt_driver, at32_wdt_probe);
374}
375module_init(at32_wdt_init);
376
377static void __exit at32_wdt_exit(void)
378{
379 platform_driver_unregister(&at32_wdt_driver);
380}
381module_exit(at32_wdt_exit);
382
383MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>");
384MODULE_DESCRIPTION("Watchdog driver for Atmel AT32AP700X");
385MODULE_LICENSE("GPL");
386MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/char/watchdog/ep93xx_wdt.c b/drivers/char/watchdog/ep93xx_wdt.c
index 01cf123b1616..0e4787a0bb87 100644
--- a/drivers/char/watchdog/ep93xx_wdt.c
+++ b/drivers/char/watchdog/ep93xx_wdt.c
@@ -107,10 +107,6 @@ static ssize_t
107ep93xx_wdt_write(struct file *file, const char __user *data, size_t len, 107ep93xx_wdt_write(struct file *file, const char __user *data, size_t len,
108 loff_t *ppos) 108 loff_t *ppos)
109{ 109{
110 /* Can't seek (pwrite) on this device */
111 if (*ppos != file->f_pos)
112 return -ESPIPE;
113
114 if (len) { 110 if (len) {
115 if (!nowayout) { 111 if (!nowayout) {
116 size_t i; 112 size_t i;
diff --git a/drivers/char/watchdog/mixcomwd.c b/drivers/char/watchdog/mixcomwd.c
index f35e2848aa3e..db2ccb864412 100644
--- a/drivers/char/watchdog/mixcomwd.c
+++ b/drivers/char/watchdog/mixcomwd.c
@@ -29,11 +29,18 @@
29 * - support for one more type board 29 * - support for one more type board
30 * 30 *
31 * Version 0.5 (2001/12/14) Matt Domsch <Matt_Domsch@dell.com> 31 * Version 0.5 (2001/12/14) Matt Domsch <Matt_Domsch@dell.com>
32 * - added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT 32 * - added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT
33 *
34 * Version 0.6 (2002/04/12): Rob Radez <rob@osinvestor.com>
35 * - make mixcomwd_opened unsigned,
36 * removed lock_kernel/unlock_kernel from mixcomwd_release,
37 * modified ioctl a bit to conform to API
33 * 38 *
34 */ 39 */
35 40
36#define VERSION "0.5" 41#define VERSION "0.6"
42#define WATCHDOG_NAME "mixcomwd"
43#define PFX WATCHDOG_NAME ": "
37 44
38#include <linux/module.h> 45#include <linux/module.h>
39#include <linux/moduleparam.h> 46#include <linux/moduleparam.h>
@@ -49,12 +56,46 @@
49#include <asm/uaccess.h> 56#include <asm/uaccess.h>
50#include <asm/io.h> 57#include <asm/io.h>
51 58
52static int mixcomwd_ioports[] = { 0x180, 0x280, 0x380, 0x000 }; 59/*
53 60 * We have two types of cards that can be probed:
54#define MIXCOM_WATCHDOG_OFFSET 0xc10 61 * 1) The Mixcom cards: these cards can be found at addresses
62 * 0x180, 0x280, 0x380 with an additional offset of 0xc10.
63 * (Or 0xd90, 0xe90, 0xf90).
64 * 2) The FlashCOM cards: these cards can be set up at
65 * 0x300 -> 0x378, in 0x8 jumps with an offset of 0x04.
66 * (Or 0x304 -> 0x37c in 0x8 jumps).
67 * Each card has it's own ID.
68 */
55#define MIXCOM_ID 0x11 69#define MIXCOM_ID 0x11
56#define FLASHCOM_WATCHDOG_OFFSET 0x4
57#define FLASHCOM_ID 0x18 70#define FLASHCOM_ID 0x18
71static struct {
72 int ioport;
73 int id;
74} mixcomwd_io_info[] __devinitdata = {
75 /* The Mixcom cards */
76 {0x0d90, MIXCOM_ID},
77 {0x0e90, MIXCOM_ID},
78 {0x0f90, MIXCOM_ID},
79 /* The FlashCOM cards */
80 {0x0304, FLASHCOM_ID},
81 {0x030c, FLASHCOM_ID},
82 {0x0314, FLASHCOM_ID},
83 {0x031c, FLASHCOM_ID},
84 {0x0324, FLASHCOM_ID},
85 {0x032c, FLASHCOM_ID},
86 {0x0334, FLASHCOM_ID},
87 {0x033c, FLASHCOM_ID},
88 {0x0344, FLASHCOM_ID},
89 {0x034c, FLASHCOM_ID},
90 {0x0354, FLASHCOM_ID},
91 {0x035c, FLASHCOM_ID},
92 {0x0364, FLASHCOM_ID},
93 {0x036c, FLASHCOM_ID},
94 {0x0374, FLASHCOM_ID},
95 {0x037c, FLASHCOM_ID},
96 /* The end of the list */
97 {0x0000, 0},
98};
58 99
59static void mixcomwd_timerfun(unsigned long d); 100static void mixcomwd_timerfun(unsigned long d);
60 101
@@ -113,13 +154,13 @@ static int mixcomwd_release(struct inode *inode, struct file *file)
113{ 154{
114 if (expect_close == 42) { 155 if (expect_close == 42) {
115 if(mixcomwd_timer_alive) { 156 if(mixcomwd_timer_alive) {
116 printk(KERN_ERR "mixcomwd: release called while internal timer alive"); 157 printk(KERN_ERR PFX "release called while internal timer alive");
117 return -EBUSY; 158 return -EBUSY;
118 } 159 }
119 mixcomwd_timer_alive=1; 160 mixcomwd_timer_alive=1;
120 mod_timer(&mixcomwd_timer, jiffies + 5 * HZ); 161 mod_timer(&mixcomwd_timer, jiffies + 5 * HZ);
121 } else { 162 } else {
122 printk(KERN_CRIT "mixcomwd: WDT device closed unexpectedly. WDT will not stop!\n"); 163 printk(KERN_CRIT PFX "WDT device closed unexpectedly. WDT will not stop!\n");
123 } 164 }
124 165
125 clear_bit(0,&mixcomwd_opened); 166 clear_bit(0,&mixcomwd_opened);
@@ -188,8 +229,7 @@ static int mixcomwd_ioctl(struct inode *inode, struct file *file,
188 return 0; 229 return 0;
189} 230}
190 231
191static const struct file_operations mixcomwd_fops= 232static const struct file_operations mixcomwd_fops = {
192{
193 .owner = THIS_MODULE, 233 .owner = THIS_MODULE,
194 .llseek = no_llseek, 234 .llseek = no_llseek,
195 .write = mixcomwd_write, 235 .write = mixcomwd_write,
@@ -198,46 +238,30 @@ static const struct file_operations mixcomwd_fops=
198 .release = mixcomwd_release, 238 .release = mixcomwd_release,
199}; 239};
200 240
201static struct miscdevice mixcomwd_miscdev= 241static struct miscdevice mixcomwd_miscdev = {
202{
203 .minor = WATCHDOG_MINOR, 242 .minor = WATCHDOG_MINOR,
204 .name = "watchdog", 243 .name = "watchdog",
205 .fops = &mixcomwd_fops, 244 .fops = &mixcomwd_fops,
206}; 245};
207 246
208static int __init mixcomwd_checkcard(int port) 247static int __init checkcard(int port, int card_id)
209{ 248{
210 int id; 249 int id;
211 250
212 port += MIXCOM_WATCHDOG_OFFSET;
213 if (!request_region(port, 1, "MixCOM watchdog")) {
214 return 0;
215 }
216
217 id=inb_p(port) & 0x3f;
218 if(id!=MIXCOM_ID) {
219 release_region(port, 1);
220 return 0;
221 }
222 return port;
223}
224
225static int __init flashcom_checkcard(int port)
226{
227 int id;
228
229 port += FLASHCOM_WATCHDOG_OFFSET;
230 if (!request_region(port, 1, "MixCOM watchdog")) { 251 if (!request_region(port, 1, "MixCOM watchdog")) {
231 return 0; 252 return 0;
232 } 253 }
233 254
234 id=inb_p(port); 255 id=inb_p(port);
235 if(id!=FLASHCOM_ID) { 256 if (card_id==MIXCOM_ID)
257 id &= 0x3f;
258
259 if (id!=card_id) {
236 release_region(port, 1); 260 release_region(port, 1);
237 return 0; 261 return 0;
238 } 262 }
239 return port; 263 return 1;
240 } 264}
241 265
242static int __init mixcomwd_init(void) 266static int __init mixcomwd_init(void)
243{ 267{
@@ -245,50 +269,50 @@ static int __init mixcomwd_init(void)
245 int ret; 269 int ret;
246 int found=0; 270 int found=0;
247 271
248 for (i = 0; !found && mixcomwd_ioports[i] != 0; i++) { 272 for (i = 0; !found && mixcomwd_io_info[i].ioport != 0; i++) {
249 watchdog_port = mixcomwd_checkcard(mixcomwd_ioports[i]); 273 if (checkcard(mixcomwd_io_info[i].ioport,
250 if (watchdog_port) { 274 mixcomwd_io_info[i].id)) {
251 found = 1;
252 }
253 }
254
255 /* The FlashCOM card can be set up at 0x300 -> 0x378, in 0x8 jumps */
256 for (i = 0x300; !found && i < 0x380; i+=0x8) {
257 watchdog_port = flashcom_checkcard(i);
258 if (watchdog_port) {
259 found = 1; 275 found = 1;
276 watchdog_port = mixcomwd_io_info[i].ioport;
260 } 277 }
261 } 278 }
262 279
263 if (!found) { 280 if (!found) {
264 printk("mixcomwd: No card detected, or port not available.\n"); 281 printk(KERN_ERR PFX "No card detected, or port not available.\n");
265 return -ENODEV; 282 return -ENODEV;
266 } 283 }
267 284
268 ret = misc_register(&mixcomwd_miscdev); 285 ret = misc_register(&mixcomwd_miscdev);
269 if (ret) 286 if (ret)
270 { 287 {
271 release_region(watchdog_port, 1); 288 printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n",
272 return ret; 289 WATCHDOG_MINOR, ret);
290 goto error_misc_register_watchdog;
273 } 291 }
274 292
275 printk(KERN_INFO "MixCOM watchdog driver v%s, watchdog port at 0x%3x\n",VERSION,watchdog_port); 293 printk(KERN_INFO "MixCOM watchdog driver v%s, watchdog port at 0x%3x\n",
294 VERSION, watchdog_port);
276 295
277 return 0; 296 return 0;
297
298error_misc_register_watchdog:
299 release_region(watchdog_port, 1);
300 watchdog_port = 0x0000;
301 return ret;
278} 302}
279 303
280static void __exit mixcomwd_exit(void) 304static void __exit mixcomwd_exit(void)
281{ 305{
282 if (!nowayout) { 306 if (!nowayout) {
283 if(mixcomwd_timer_alive) { 307 if(mixcomwd_timer_alive) {
284 printk(KERN_WARNING "mixcomwd: I quit now, hardware will" 308 printk(KERN_WARNING PFX "I quit now, hardware will"
285 " probably reboot!\n"); 309 " probably reboot!\n");
286 del_timer_sync(&mixcomwd_timer); 310 del_timer_sync(&mixcomwd_timer);
287 mixcomwd_timer_alive=0; 311 mixcomwd_timer_alive=0;
288 } 312 }
289 } 313 }
290 release_region(watchdog_port,1);
291 misc_deregister(&mixcomwd_miscdev); 314 misc_deregister(&mixcomwd_miscdev);
315 release_region(watchdog_port,1);
292} 316}
293 317
294module_init(mixcomwd_init); 318module_init(mixcomwd_init);
@@ -296,5 +320,6 @@ module_exit(mixcomwd_exit);
296 320
297MODULE_AUTHOR("Gergely Madarasz <gorgo@itc.hu>"); 321MODULE_AUTHOR("Gergely Madarasz <gorgo@itc.hu>");
298MODULE_DESCRIPTION("MixCom Watchdog driver"); 322MODULE_DESCRIPTION("MixCom Watchdog driver");
323MODULE_VERSION(VERSION);
299MODULE_LICENSE("GPL"); 324MODULE_LICENSE("GPL");
300MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); 325MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/char/watchdog/pnx4008_wdt.c b/drivers/char/watchdog/pnx4008_wdt.c
index 5991add702b0..22f8873dd092 100644
--- a/drivers/char/watchdog/pnx4008_wdt.c
+++ b/drivers/char/watchdog/pnx4008_wdt.c
@@ -148,10 +148,6 @@ static ssize_t
148pnx4008_wdt_write(struct file *file, const char *data, size_t len, 148pnx4008_wdt_write(struct file *file, const char *data, size_t len,
149 loff_t * ppos) 149 loff_t * ppos)
150{ 150{
151 /* Can't seek (pwrite) on this device */
152 if (ppos != &file->f_pos)
153 return -ESPIPE;
154
155 if (len) { 151 if (len) {
156 if (!nowayout) { 152 if (!nowayout) {
157 size_t i; 153 size_t i;
diff --git a/drivers/char/watchdog/s3c2410_wdt.c b/drivers/char/watchdog/s3c2410_wdt.c
index 20fa29ca7404..50430bced2f2 100644
--- a/drivers/char/watchdog/s3c2410_wdt.c
+++ b/drivers/char/watchdog/s3c2410_wdt.c
@@ -92,6 +92,7 @@ typedef enum close_state {
92 92
93static DECLARE_MUTEX(open_lock); 93static DECLARE_MUTEX(open_lock);
94 94
95static struct device *wdt_dev; /* platform device attached to */
95static struct resource *wdt_mem; 96static struct resource *wdt_mem;
96static struct resource *wdt_irq; 97static struct resource *wdt_irq;
97static struct clk *wdt_clock; 98static struct clk *wdt_clock;
@@ -180,7 +181,7 @@ static int s3c2410wdt_set_heartbeat(int timeout)
180 } 181 }
181 182
182 if ((count / divisor) >= 0x10000) { 183 if ((count / divisor) >= 0x10000) {
183 printk(KERN_ERR PFX "timeout %d too big\n", timeout); 184 dev_err(wdt_dev, "timeout %d too big\n", timeout);
184 return -EINVAL; 185 return -EINVAL;
185 } 186 }
186 } 187 }
@@ -233,7 +234,7 @@ static int s3c2410wdt_release(struct inode *inode, struct file *file)
233 if (allow_close == CLOSE_STATE_ALLOW) { 234 if (allow_close == CLOSE_STATE_ALLOW) {
234 s3c2410wdt_stop(); 235 s3c2410wdt_stop();
235 } else { 236 } else {
236 printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n"); 237 dev_err(wdt_dev, "Unexpected close, not stopping watchdog\n");
237 s3c2410wdt_keepalive(); 238 s3c2410wdt_keepalive();
238 } 239 }
239 240
@@ -338,7 +339,7 @@ static struct miscdevice s3c2410wdt_miscdev = {
338 339
339static irqreturn_t s3c2410wdt_irq(int irqno, void *param) 340static irqreturn_t s3c2410wdt_irq(int irqno, void *param)
340{ 341{
341 printk(KERN_INFO PFX "Watchdog timer expired!\n"); 342 dev_info(wdt_dev, "watchdog timer expired (irq)\n");
342 343
343 s3c2410wdt_keepalive(); 344 s3c2410wdt_keepalive();
344 return IRQ_HANDLED; 345 return IRQ_HANDLED;
@@ -348,31 +349,36 @@ static irqreturn_t s3c2410wdt_irq(int irqno, void *param)
348static int s3c2410wdt_probe(struct platform_device *pdev) 349static int s3c2410wdt_probe(struct platform_device *pdev)
349{ 350{
350 struct resource *res; 351 struct resource *res;
352 struct device *dev;
353 unsigned int wtcon;
351 int started = 0; 354 int started = 0;
352 int ret; 355 int ret;
353 int size; 356 int size;
354 357
355 DBG("%s: probe=%p\n", __FUNCTION__, pdev); 358 DBG("%s: probe=%p\n", __FUNCTION__, pdev);
356 359
360 dev = &pdev->dev;
361 wdt_dev = &pdev->dev;
362
357 /* get the memory region for the watchdog timer */ 363 /* get the memory region for the watchdog timer */
358 364
359 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 365 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
360 if (res == NULL) { 366 if (res == NULL) {
361 printk(KERN_INFO PFX "failed to get memory region resouce\n"); 367 dev_err(dev, "no memory resource specified\n");
362 return -ENOENT; 368 return -ENOENT;
363 } 369 }
364 370
365 size = (res->end-res->start)+1; 371 size = (res->end-res->start)+1;
366 wdt_mem = request_mem_region(res->start, size, pdev->name); 372 wdt_mem = request_mem_region(res->start, size, pdev->name);
367 if (wdt_mem == NULL) { 373 if (wdt_mem == NULL) {
368 printk(KERN_INFO PFX "failed to get memory region\n"); 374 dev_err(dev, "failed to get memory region\n");
369 ret = -ENOENT; 375 ret = -ENOENT;
370 goto err_req; 376 goto err_req;
371 } 377 }
372 378
373 wdt_base = ioremap(res->start, size); 379 wdt_base = ioremap(res->start, size);
374 if (wdt_base == 0) { 380 if (wdt_base == 0) {
375 printk(KERN_INFO PFX "failed to ioremap() region\n"); 381 dev_err(dev, "failed to ioremap() region\n");
376 ret = -EINVAL; 382 ret = -EINVAL;
377 goto err_req; 383 goto err_req;
378 } 384 }
@@ -381,20 +387,20 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
381 387
382 wdt_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 388 wdt_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
383 if (wdt_irq == NULL) { 389 if (wdt_irq == NULL) {
384 printk(KERN_INFO PFX "failed to get irq resource\n"); 390 dev_err(dev, "no irq resource specified\n");
385 ret = -ENOENT; 391 ret = -ENOENT;
386 goto err_map; 392 goto err_map;
387 } 393 }
388 394
389 ret = request_irq(wdt_irq->start, s3c2410wdt_irq, 0, pdev->name, pdev); 395 ret = request_irq(wdt_irq->start, s3c2410wdt_irq, 0, pdev->name, pdev);
390 if (ret != 0) { 396 if (ret != 0) {
391 printk(KERN_INFO PFX "failed to install irq (%d)\n", ret); 397 dev_err(dev, "failed to install irq (%d)\n", ret);
392 goto err_map; 398 goto err_map;
393 } 399 }
394 400
395 wdt_clock = clk_get(&pdev->dev, "watchdog"); 401 wdt_clock = clk_get(&pdev->dev, "watchdog");
396 if (IS_ERR(wdt_clock)) { 402 if (IS_ERR(wdt_clock)) {
397 printk(KERN_INFO PFX "failed to find watchdog clock source\n"); 403 dev_err(dev, "failed to find watchdog clock source\n");
398 ret = PTR_ERR(wdt_clock); 404 ret = PTR_ERR(wdt_clock);
399 goto err_irq; 405 goto err_irq;
400 } 406 }
@@ -408,22 +414,22 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
408 started = s3c2410wdt_set_heartbeat(CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME); 414 started = s3c2410wdt_set_heartbeat(CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME);
409 415
410 if (started == 0) { 416 if (started == 0) {
411 printk(KERN_INFO PFX "tmr_margin value out of range, default %d used\n", 417 dev_info(dev,"tmr_margin value out of range, default %d used\n",
412 CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME); 418 CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME);
413 } else { 419 } else {
414 printk(KERN_INFO PFX "default timer value is out of range, cannot start\n"); 420 dev_info(dev, "default timer value is out of range, cannot start\n");
415 } 421 }
416 } 422 }
417 423
418 ret = misc_register(&s3c2410wdt_miscdev); 424 ret = misc_register(&s3c2410wdt_miscdev);
419 if (ret) { 425 if (ret) {
420 printk (KERN_ERR PFX "cannot register miscdev on minor=%d (%d)\n", 426 dev_err(dev, "cannot register miscdev on minor=%d (%d)\n",
421 WATCHDOG_MINOR, ret); 427 WATCHDOG_MINOR, ret);
422 goto err_clk; 428 goto err_clk;
423 } 429 }
424 430
425 if (tmr_atboot && started == 0) { 431 if (tmr_atboot && started == 0) {
426 printk(KERN_INFO PFX "Starting Watchdog Timer\n"); 432 dev_info(dev, "starting watchdog timer\n");
427 s3c2410wdt_start(); 433 s3c2410wdt_start();
428 } else if (!tmr_atboot) { 434 } else if (!tmr_atboot) {
429 /* if we're not enabling the watchdog, then ensure it is 435 /* if we're not enabling the watchdog, then ensure it is
@@ -433,6 +439,15 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
433 s3c2410wdt_stop(); 439 s3c2410wdt_stop();
434 } 440 }
435 441
442 /* print out a statement of readiness */
443
444 wtcon = readl(wdt_base + S3C2410_WTCON);
445
446 dev_info(dev, "watchdog %sactive, reset %sabled, irq %sabled\n",
447 (wtcon & S3C2410_WTCON_ENABLE) ? "" : "in",
448 (wtcon & S3C2410_WTCON_RSTEN) ? "" : "dis",
449 (wtcon & S3C2410_WTCON_INTEN) ? "" : "en");
450
436 return 0; 451 return 0;
437 452
438 err_clk: 453 err_clk:
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 7b622300d0e5..804875de5801 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -1906,6 +1906,7 @@ static void do_edac_check(void)
1906 1906
1907static int edac_kernel_thread(void *arg) 1907static int edac_kernel_thread(void *arg)
1908{ 1908{
1909 set_freezable();
1909 while (!kthread_should_stop()) { 1910 while (!kthread_should_stop()) {
1910 do_edac_check(); 1911 do_edac_check();
1911 1912
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index 7eaae3834e15..275d392eca61 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -96,6 +96,10 @@ static int __devinit lm70_probe(struct spi_device *spi)
96 struct lm70 *p_lm70; 96 struct lm70 *p_lm70;
97 int status; 97 int status;
98 98
99 /* signaling is SPI_MODE_0 on a 3-wire link (shared SI/SO) */
100 if ((spi->mode & (SPI_CPOL|SPI_CPHA)) || !(spi->mode & SPI_3WIRE))
101 return -EINVAL;
102
99 p_lm70 = kzalloc(sizeof *p_lm70, GFP_KERNEL); 103 p_lm70 = kzalloc(sizeof *p_lm70, GFP_KERNEL);
100 if (!p_lm70) 104 if (!p_lm70)
101 return -ENOMEM; 105 return -ENOMEM;
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 03188d277af1..17cecf1ea797 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -630,7 +630,7 @@ static struct i2c_adapter pmcmsptwi_adapter = {
630static struct platform_driver pmcmsptwi_driver = { 630static struct platform_driver pmcmsptwi_driver = {
631 .probe = pmcmsptwi_probe, 631 .probe = pmcmsptwi_probe,
632 .remove = __devexit_p(pmcmsptwi_remove), 632 .remove = __devexit_p(pmcmsptwi_remove),
633 .driver { 633 .driver = {
634 .name = DRV_NAME, 634 .name = DRV_NAME,
635 .owner = THIS_MODULE, 635 .owner = THIS_MODULE,
636 }, 636 },
diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig
index 3944e889cb21..2e1c24f671cf 100644
--- a/drivers/i2c/chips/Kconfig
+++ b/drivers/i2c/chips/Kconfig
@@ -153,4 +153,14 @@ config SENSORS_TSL2550
153 This driver can also be built as a module. If so, the module 153 This driver can also be built as a module. If so, the module
154 will be called tsl2550. 154 will be called tsl2550.
155 155
156config MENELAUS
157 bool "TWL92330/Menelaus PM chip"
158 depends on I2C=y && ARCH_OMAP24XX
159 help
160 If you say yes here you get support for the Texas Instruments
161 TWL92330/Menelaus Power Management chip. This include voltage
162 regulators, Dual slot memory card tranceivers, real-time clock
163 and other features that are often used in portable devices like
164 cell phones and PDAs.
165
156endmenu 166endmenu
diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile
index d8cbeb3f4b63..ca924e105959 100644
--- a/drivers/i2c/chips/Makefile
+++ b/drivers/i2c/chips/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_SENSORS_PCF8574) += pcf8574.o
13obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o 13obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o
14obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o 14obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o
15obj-$(CONFIG_TPS65010) += tps65010.o 15obj-$(CONFIG_TPS65010) += tps65010.o
16obj-$(CONFIG_MENELAUS) += menelaus.o
16obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o 17obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o
17 18
18ifeq ($(CONFIG_I2C_DEBUG_CHIP),y) 19ifeq ($(CONFIG_I2C_DEBUG_CHIP),y)
diff --git a/drivers/i2c/chips/menelaus.c b/drivers/i2c/chips/menelaus.c
new file mode 100644
index 000000000000..48a7e2f0bdd3
--- /dev/null
+++ b/drivers/i2c/chips/menelaus.c
@@ -0,0 +1,1281 @@
1#define DEBUG
2/*
3 * Copyright (C) 2004 Texas Instruments, Inc.
4 *
5 * Some parts based tps65010.c:
6 * Copyright (C) 2004 Texas Instruments and
7 * Copyright (C) 2004-2005 David Brownell
8 *
9 * Some parts based on tlv320aic24.c:
10 * Copyright (C) by Kai Svahn <kai.svahn@nokia.com>
11 *
12 * Changes for interrupt handling and clean-up by
13 * Tony Lindgren <tony@atomide.com> and Imre Deak <imre.deak@nokia.com>
14 * Cleanup and generalized support for voltage setting by
15 * Juha Yrjola
16 * Added support for controlling VCORE and regulator sleep states,
17 * Amit Kucheria <amit.kucheria@nokia.com>
18 * Copyright (C) 2005, 2006 Nokia Corporation
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 */
34
35#include <linux/module.h>
36#include <linux/i2c.h>
37#include <linux/interrupt.h>
38#include <linux/sched.h>
39#include <linux/mutex.h>
40#include <linux/workqueue.h>
41#include <linux/delay.h>
42#include <linux/rtc.h>
43#include <linux/bcd.h>
44
45#include <asm/mach-types.h>
46#include <asm/mach/irq.h>
47
48#include <asm/arch/gpio.h>
49#include <asm/arch/menelaus.h>
50
51#define DRIVER_NAME "menelaus"
52
53#define pr_err(fmt, arg...) printk(KERN_ERR DRIVER_NAME ": ", ## arg);
54
55#define MENELAUS_I2C_ADDRESS 0x72
56
57#define MENELAUS_REV 0x01
58#define MENELAUS_VCORE_CTRL1 0x02
59#define MENELAUS_VCORE_CTRL2 0x03
60#define MENELAUS_VCORE_CTRL3 0x04
61#define MENELAUS_VCORE_CTRL4 0x05
62#define MENELAUS_VCORE_CTRL5 0x06
63#define MENELAUS_DCDC_CTRL1 0x07
64#define MENELAUS_DCDC_CTRL2 0x08
65#define MENELAUS_DCDC_CTRL3 0x09
66#define MENELAUS_LDO_CTRL1 0x0A
67#define MENELAUS_LDO_CTRL2 0x0B
68#define MENELAUS_LDO_CTRL3 0x0C
69#define MENELAUS_LDO_CTRL4 0x0D
70#define MENELAUS_LDO_CTRL5 0x0E
71#define MENELAUS_LDO_CTRL6 0x0F
72#define MENELAUS_LDO_CTRL7 0x10
73#define MENELAUS_LDO_CTRL8 0x11
74#define MENELAUS_SLEEP_CTRL1 0x12
75#define MENELAUS_SLEEP_CTRL2 0x13
76#define MENELAUS_DEVICE_OFF 0x14
77#define MENELAUS_OSC_CTRL 0x15
78#define MENELAUS_DETECT_CTRL 0x16
79#define MENELAUS_INT_MASK1 0x17
80#define MENELAUS_INT_MASK2 0x18
81#define MENELAUS_INT_STATUS1 0x19
82#define MENELAUS_INT_STATUS2 0x1A
83#define MENELAUS_INT_ACK1 0x1B
84#define MENELAUS_INT_ACK2 0x1C
85#define MENELAUS_GPIO_CTRL 0x1D
86#define MENELAUS_GPIO_IN 0x1E
87#define MENELAUS_GPIO_OUT 0x1F
88#define MENELAUS_BBSMS 0x20
89#define MENELAUS_RTC_CTRL 0x21
90#define MENELAUS_RTC_UPDATE 0x22
91#define MENELAUS_RTC_SEC 0x23
92#define MENELAUS_RTC_MIN 0x24
93#define MENELAUS_RTC_HR 0x25
94#define MENELAUS_RTC_DAY 0x26
95#define MENELAUS_RTC_MON 0x27
96#define MENELAUS_RTC_YR 0x28
97#define MENELAUS_RTC_WKDAY 0x29
98#define MENELAUS_RTC_AL_SEC 0x2A
99#define MENELAUS_RTC_AL_MIN 0x2B
100#define MENELAUS_RTC_AL_HR 0x2C
101#define MENELAUS_RTC_AL_DAY 0x2D
102#define MENELAUS_RTC_AL_MON 0x2E
103#define MENELAUS_RTC_AL_YR 0x2F
104#define MENELAUS_RTC_COMP_MSB 0x30
105#define MENELAUS_RTC_COMP_LSB 0x31
106#define MENELAUS_S1_PULL_EN 0x32
107#define MENELAUS_S1_PULL_DIR 0x33
108#define MENELAUS_S2_PULL_EN 0x34
109#define MENELAUS_S2_PULL_DIR 0x35
110#define MENELAUS_MCT_CTRL1 0x36
111#define MENELAUS_MCT_CTRL2 0x37
112#define MENELAUS_MCT_CTRL3 0x38
113#define MENELAUS_MCT_PIN_ST 0x39
114#define MENELAUS_DEBOUNCE1 0x3A
115
116#define IH_MENELAUS_IRQS 12
117#define MENELAUS_MMC_S1CD_IRQ 0 /* MMC slot 1 card change */
118#define MENELAUS_MMC_S2CD_IRQ 1 /* MMC slot 2 card change */
119#define MENELAUS_MMC_S1D1_IRQ 2 /* MMC DAT1 low in slot 1 */
120#define MENELAUS_MMC_S2D1_IRQ 3 /* MMC DAT1 low in slot 2 */
121#define MENELAUS_LOWBAT_IRQ 4 /* Low battery */
122#define MENELAUS_HOTDIE_IRQ 5 /* Hot die detect */
123#define MENELAUS_UVLO_IRQ 6 /* UVLO detect */
124#define MENELAUS_TSHUT_IRQ 7 /* Thermal shutdown */
125#define MENELAUS_RTCTMR_IRQ 8 /* RTC timer */
126#define MENELAUS_RTCALM_IRQ 9 /* RTC alarm */
127#define MENELAUS_RTCERR_IRQ 10 /* RTC error */
128#define MENELAUS_PSHBTN_IRQ 11 /* Push button */
129#define MENELAUS_RESERVED12_IRQ 12 /* Reserved */
130#define MENELAUS_RESERVED13_IRQ 13 /* Reserved */
131#define MENELAUS_RESERVED14_IRQ 14 /* Reserved */
132#define MENELAUS_RESERVED15_IRQ 15 /* Reserved */
133
134static void menelaus_work(struct work_struct *_menelaus);
135
136struct menelaus_chip {
137 struct mutex lock;
138 struct i2c_client *client;
139 struct work_struct work;
140#ifdef CONFIG_RTC_DRV_TWL92330
141 struct rtc_device *rtc;
142 u8 rtc_control;
143 unsigned uie:1;
144#endif
145 unsigned vcore_hw_mode:1;
146 u8 mask1, mask2;
147 void (*handlers[16])(struct menelaus_chip *);
148 void (*mmc_callback)(void *data, u8 mask);
149 void *mmc_callback_data;
150};
151
152static struct menelaus_chip *the_menelaus;
153
154static int menelaus_write_reg(int reg, u8 value)
155{
156 int val = i2c_smbus_write_byte_data(the_menelaus->client, reg, value);
157
158 if (val < 0) {
159 pr_err("write error");
160 return val;
161 }
162
163 return 0;
164}
165
166static int menelaus_read_reg(int reg)
167{
168 int val = i2c_smbus_read_byte_data(the_menelaus->client, reg);
169
170 if (val < 0)
171 pr_err("read error");
172
173 return val;
174}
175
176static int menelaus_enable_irq(int irq)
177{
178 if (irq > 7) {
179 irq -= 8;
180 the_menelaus->mask2 &= ~(1 << irq);
181 return menelaus_write_reg(MENELAUS_INT_MASK2,
182 the_menelaus->mask2);
183 } else {
184 the_menelaus->mask1 &= ~(1 << irq);
185 return menelaus_write_reg(MENELAUS_INT_MASK1,
186 the_menelaus->mask1);
187 }
188}
189
190static int menelaus_disable_irq(int irq)
191{
192 if (irq > 7) {
193 irq -= 8;
194 the_menelaus->mask2 |= (1 << irq);
195 return menelaus_write_reg(MENELAUS_INT_MASK2,
196 the_menelaus->mask2);
197 } else {
198 the_menelaus->mask1 |= (1 << irq);
199 return menelaus_write_reg(MENELAUS_INT_MASK1,
200 the_menelaus->mask1);
201 }
202}
203
204static int menelaus_ack_irq(int irq)
205{
206 if (irq > 7)
207 return menelaus_write_reg(MENELAUS_INT_ACK2, 1 << (irq - 8));
208 else
209 return menelaus_write_reg(MENELAUS_INT_ACK1, 1 << irq);
210}
211
212/* Adds a handler for an interrupt. Does not run in interrupt context */
213static int menelaus_add_irq_work(int irq,
214 void (*handler)(struct menelaus_chip *))
215{
216 int ret = 0;
217
218 mutex_lock(&the_menelaus->lock);
219 the_menelaus->handlers[irq] = handler;
220 ret = menelaus_enable_irq(irq);
221 mutex_unlock(&the_menelaus->lock);
222
223 return ret;
224}
225
226/* Removes handler for an interrupt */
227static int menelaus_remove_irq_work(int irq)
228{
229 int ret = 0;
230
231 mutex_lock(&the_menelaus->lock);
232 ret = menelaus_disable_irq(irq);
233 the_menelaus->handlers[irq] = NULL;
234 mutex_unlock(&the_menelaus->lock);
235
236 return ret;
237}
238
239/*
240 * Gets scheduled when a card detect interrupt happens. Note that in some cases
241 * this line is wired to card cover switch rather than the card detect switch
242 * in each slot. In this case the cards are not seen by menelaus.
243 * FIXME: Add handling for D1 too
244 */
245static void menelaus_mmc_cd_work(struct menelaus_chip *menelaus_hw)
246{
247 int reg;
248 unsigned char card_mask = 0;
249
250 reg = menelaus_read_reg(MENELAUS_MCT_PIN_ST);
251 if (reg < 0)
252 return;
253
254 if (!(reg & 0x1))
255 card_mask |= (1 << 0);
256
257 if (!(reg & 0x2))
258 card_mask |= (1 << 1);
259
260 if (menelaus_hw->mmc_callback)
261 menelaus_hw->mmc_callback(menelaus_hw->mmc_callback_data,
262 card_mask);
263}
264
265/*
266 * Toggles the MMC slots between open-drain and push-pull mode.
267 */
268int menelaus_set_mmc_opendrain(int slot, int enable)
269{
270 int ret, val;
271
272 if (slot != 1 && slot != 2)
273 return -EINVAL;
274 mutex_lock(&the_menelaus->lock);
275 ret = menelaus_read_reg(MENELAUS_MCT_CTRL1);
276 if (ret < 0) {
277 mutex_unlock(&the_menelaus->lock);
278 return ret;
279 }
280 val = ret;
281 if (slot == 1) {
282 if (enable)
283 val |= 1 << 2;
284 else
285 val &= ~(1 << 2);
286 } else {
287 if (enable)
288 val |= 1 << 3;
289 else
290 val &= ~(1 << 3);
291 }
292 ret = menelaus_write_reg(MENELAUS_MCT_CTRL1, val);
293 mutex_unlock(&the_menelaus->lock);
294
295 return ret;
296}
297EXPORT_SYMBOL(menelaus_set_mmc_opendrain);
298
299int menelaus_set_slot_sel(int enable)
300{
301 int ret;
302
303 mutex_lock(&the_menelaus->lock);
304 ret = menelaus_read_reg(MENELAUS_GPIO_CTRL);
305 if (ret < 0)
306 goto out;
307 ret |= 0x02;
308 if (enable)
309 ret |= 1 << 5;
310 else
311 ret &= ~(1 << 5);
312 ret = menelaus_write_reg(MENELAUS_GPIO_CTRL, ret);
313out:
314 mutex_unlock(&the_menelaus->lock);
315 return ret;
316}
317EXPORT_SYMBOL(menelaus_set_slot_sel);
318
319int menelaus_set_mmc_slot(int slot, int enable, int power, int cd_en)
320{
321 int ret, val;
322
323 if (slot != 1 && slot != 2)
324 return -EINVAL;
325 if (power >= 3)
326 return -EINVAL;
327
328 mutex_lock(&the_menelaus->lock);
329
330 ret = menelaus_read_reg(MENELAUS_MCT_CTRL2);
331 if (ret < 0)
332 goto out;
333 val = ret;
334 if (slot == 1) {
335 if (cd_en)
336 val |= (1 << 4) | (1 << 6);
337 else
338 val &= ~((1 << 4) | (1 << 6));
339 } else {
340 if (cd_en)
341 val |= (1 << 5) | (1 << 7);
342 else
343 val &= ~((1 << 5) | (1 << 7));
344 }
345 ret = menelaus_write_reg(MENELAUS_MCT_CTRL2, val);
346 if (ret < 0)
347 goto out;
348
349 ret = menelaus_read_reg(MENELAUS_MCT_CTRL3);
350 if (ret < 0)
351 goto out;
352 val = ret;
353 if (slot == 1) {
354 if (enable)
355 val |= 1 << 0;
356 else
357 val &= ~(1 << 0);
358 } else {
359 int b;
360
361 if (enable)
362 ret |= 1 << 1;
363 else
364 ret &= ~(1 << 1);
365 b = menelaus_read_reg(MENELAUS_MCT_CTRL2);
366 b &= ~0x03;
367 b |= power;
368 ret = menelaus_write_reg(MENELAUS_MCT_CTRL2, b);
369 if (ret < 0)
370 goto out;
371 }
372 /* Disable autonomous shutdown */
373 val &= ~(0x03 << 2);
374 ret = menelaus_write_reg(MENELAUS_MCT_CTRL3, val);
375out:
376 mutex_unlock(&the_menelaus->lock);
377 return ret;
378}
379EXPORT_SYMBOL(menelaus_set_mmc_slot);
380
381int menelaus_register_mmc_callback(void (*callback)(void *data, u8 card_mask),
382 void *data)
383{
384 int ret = 0;
385
386 the_menelaus->mmc_callback_data = data;
387 the_menelaus->mmc_callback = callback;
388 ret = menelaus_add_irq_work(MENELAUS_MMC_S1CD_IRQ,
389 menelaus_mmc_cd_work);
390 if (ret < 0)
391 return ret;
392 ret = menelaus_add_irq_work(MENELAUS_MMC_S2CD_IRQ,
393 menelaus_mmc_cd_work);
394 if (ret < 0)
395 return ret;
396 ret = menelaus_add_irq_work(MENELAUS_MMC_S1D1_IRQ,
397 menelaus_mmc_cd_work);
398 if (ret < 0)
399 return ret;
400 ret = menelaus_add_irq_work(MENELAUS_MMC_S2D1_IRQ,
401 menelaus_mmc_cd_work);
402
403 return ret;
404}
405EXPORT_SYMBOL(menelaus_register_mmc_callback);
406
407void menelaus_unregister_mmc_callback(void)
408{
409 menelaus_remove_irq_work(MENELAUS_MMC_S1CD_IRQ);
410 menelaus_remove_irq_work(MENELAUS_MMC_S2CD_IRQ);
411 menelaus_remove_irq_work(MENELAUS_MMC_S1D1_IRQ);
412 menelaus_remove_irq_work(MENELAUS_MMC_S2D1_IRQ);
413
414 the_menelaus->mmc_callback = NULL;
415 the_menelaus->mmc_callback_data = 0;
416}
417EXPORT_SYMBOL(menelaus_unregister_mmc_callback);
418
419struct menelaus_vtg {
420 const char *name;
421 u8 vtg_reg;
422 u8 vtg_shift;
423 u8 vtg_bits;
424 u8 mode_reg;
425};
426
427struct menelaus_vtg_value {
428 u16 vtg;
429 u16 val;
430};
431
432static int menelaus_set_voltage(const struct menelaus_vtg *vtg, int mV,
433 int vtg_val, int mode)
434{
435 int val, ret;
436 struct i2c_client *c = the_menelaus->client;
437
438 mutex_lock(&the_menelaus->lock);
439 if (vtg == 0)
440 goto set_voltage;
441
442 ret = menelaus_read_reg(vtg->vtg_reg);
443 if (ret < 0)
444 goto out;
445 val = ret & ~(((1 << vtg->vtg_bits) - 1) << vtg->vtg_shift);
446 val |= vtg_val << vtg->vtg_shift;
447
448 dev_dbg(&c->dev, "Setting voltage '%s'"
449 "to %d mV (reg 0x%02x, val 0x%02x)\n",
450 vtg->name, mV, vtg->vtg_reg, val);
451
452 ret = menelaus_write_reg(vtg->vtg_reg, val);
453 if (ret < 0)
454 goto out;
455set_voltage:
456 ret = menelaus_write_reg(vtg->mode_reg, mode);
457out:
458 mutex_unlock(&the_menelaus->lock);
459 if (ret == 0) {
460 /* Wait for voltage to stabilize */
461 msleep(1);
462 }
463 return ret;
464}
465
466static int menelaus_get_vtg_value(int vtg, const struct menelaus_vtg_value *tbl,
467 int n)
468{
469 int i;
470
471 for (i = 0; i < n; i++, tbl++)
472 if (tbl->vtg == vtg)
473 return tbl->val;
474 return -EINVAL;
475}
476
477/*
478 * Vcore can be programmed in two ways:
479 * SW-controlled: Required voltage is programmed into VCORE_CTRL1
480 * HW-controlled: Required range (roof-floor) is programmed into VCORE_CTRL3
481 * and VCORE_CTRL4
482 *
483 * Call correct 'set' function accordingly
484 */
485
486static const struct menelaus_vtg_value vcore_values[] = {
487 { 1000, 0 },
488 { 1025, 1 },
489 { 1050, 2 },
490 { 1075, 3 },
491 { 1100, 4 },
492 { 1125, 5 },
493 { 1150, 6 },
494 { 1175, 7 },
495 { 1200, 8 },
496 { 1225, 9 },
497 { 1250, 10 },
498 { 1275, 11 },
499 { 1300, 12 },
500 { 1325, 13 },
501 { 1350, 14 },
502 { 1375, 15 },
503 { 1400, 16 },
504 { 1425, 17 },
505 { 1450, 18 },
506};
507
508int menelaus_set_vcore_sw(unsigned int mV)
509{
510 int val, ret;
511 struct i2c_client *c = the_menelaus->client;
512
513 val = menelaus_get_vtg_value(mV, vcore_values,
514 ARRAY_SIZE(vcore_values));
515 if (val < 0)
516 return -EINVAL;
517
518 dev_dbg(&c->dev, "Setting VCORE to %d mV (val 0x%02x)\n", mV, val);
519
520 /* Set SW mode and the voltage in one go. */
521 mutex_lock(&the_menelaus->lock);
522 ret = menelaus_write_reg(MENELAUS_VCORE_CTRL1, val);
523 if (ret == 0)
524 the_menelaus->vcore_hw_mode = 0;
525 mutex_unlock(&the_menelaus->lock);
526 msleep(1);
527
528 return ret;
529}
530
531int menelaus_set_vcore_hw(unsigned int roof_mV, unsigned int floor_mV)
532{
533 int fval, rval, val, ret;
534 struct i2c_client *c = the_menelaus->client;
535
536 rval = menelaus_get_vtg_value(roof_mV, vcore_values,
537 ARRAY_SIZE(vcore_values));
538 if (rval < 0)
539 return -EINVAL;
540 fval = menelaus_get_vtg_value(floor_mV, vcore_values,
541 ARRAY_SIZE(vcore_values));
542 if (fval < 0)
543 return -EINVAL;
544
545 dev_dbg(&c->dev, "Setting VCORE FLOOR to %d mV and ROOF to %d mV\n",
546 floor_mV, roof_mV);
547
548 mutex_lock(&the_menelaus->lock);
549 ret = menelaus_write_reg(MENELAUS_VCORE_CTRL3, fval);
550 if (ret < 0)
551 goto out;
552 ret = menelaus_write_reg(MENELAUS_VCORE_CTRL4, rval);
553 if (ret < 0)
554 goto out;
555 if (!the_menelaus->vcore_hw_mode) {
556 val = menelaus_read_reg(MENELAUS_VCORE_CTRL1);
557 /* HW mode, turn OFF byte comparator */
558 val |= ((1 << 7) | (1 << 5));
559 ret = menelaus_write_reg(MENELAUS_VCORE_CTRL1, val);
560 the_menelaus->vcore_hw_mode = 1;
561 }
562 msleep(1);
563out:
564 mutex_unlock(&the_menelaus->lock);
565 return ret;
566}
567
568static const struct menelaus_vtg vmem_vtg = {
569 .name = "VMEM",
570 .vtg_reg = MENELAUS_LDO_CTRL1,
571 .vtg_shift = 0,
572 .vtg_bits = 2,
573 .mode_reg = MENELAUS_LDO_CTRL3,
574};
575
576static const struct menelaus_vtg_value vmem_values[] = {
577 { 1500, 0 },
578 { 1800, 1 },
579 { 1900, 2 },
580 { 2500, 3 },
581};
582
583int menelaus_set_vmem(unsigned int mV)
584{
585 int val;
586
587 if (mV == 0)
588 return menelaus_set_voltage(&vmem_vtg, 0, 0, 0);
589
590 val = menelaus_get_vtg_value(mV, vmem_values, ARRAY_SIZE(vmem_values));
591 if (val < 0)
592 return -EINVAL;
593 return menelaus_set_voltage(&vmem_vtg, mV, val, 0x02);
594}
595EXPORT_SYMBOL(menelaus_set_vmem);
596
597static const struct menelaus_vtg vio_vtg = {
598 .name = "VIO",
599 .vtg_reg = MENELAUS_LDO_CTRL1,
600 .vtg_shift = 2,
601 .vtg_bits = 2,
602 .mode_reg = MENELAUS_LDO_CTRL4,
603};
604
605static const struct menelaus_vtg_value vio_values[] = {
606 { 1500, 0 },
607 { 1800, 1 },
608 { 2500, 2 },
609 { 2800, 3 },
610};
611
612int menelaus_set_vio(unsigned int mV)
613{
614 int val;
615
616 if (mV == 0)
617 return menelaus_set_voltage(&vio_vtg, 0, 0, 0);
618
619 val = menelaus_get_vtg_value(mV, vio_values, ARRAY_SIZE(vio_values));
620 if (val < 0)
621 return -EINVAL;
622 return menelaus_set_voltage(&vio_vtg, mV, val, 0x02);
623}
624EXPORT_SYMBOL(menelaus_set_vio);
625
626static const struct menelaus_vtg_value vdcdc_values[] = {
627 { 1500, 0 },
628 { 1800, 1 },
629 { 2000, 2 },
630 { 2200, 3 },
631 { 2400, 4 },
632 { 2800, 5 },
633 { 3000, 6 },
634 { 3300, 7 },
635};
636
637static const struct menelaus_vtg vdcdc2_vtg = {
638 .name = "VDCDC2",
639 .vtg_reg = MENELAUS_DCDC_CTRL1,
640 .vtg_shift = 0,
641 .vtg_bits = 3,
642 .mode_reg = MENELAUS_DCDC_CTRL2,
643};
644
645static const struct menelaus_vtg vdcdc3_vtg = {
646 .name = "VDCDC3",
647 .vtg_reg = MENELAUS_DCDC_CTRL1,
648 .vtg_shift = 3,
649 .vtg_bits = 3,
650 .mode_reg = MENELAUS_DCDC_CTRL3,
651};
652
653int menelaus_set_vdcdc(int dcdc, unsigned int mV)
654{
655 const struct menelaus_vtg *vtg;
656 int val;
657
658 if (dcdc != 2 && dcdc != 3)
659 return -EINVAL;
660 if (dcdc == 2)
661 vtg = &vdcdc2_vtg;
662 else
663 vtg = &vdcdc3_vtg;
664
665 if (mV == 0)
666 return menelaus_set_voltage(vtg, 0, 0, 0);
667
668 val = menelaus_get_vtg_value(mV, vdcdc_values,
669 ARRAY_SIZE(vdcdc_values));
670 if (val < 0)
671 return -EINVAL;
672 return menelaus_set_voltage(vtg, mV, val, 0x03);
673}
674
675static const struct menelaus_vtg_value vmmc_values[] = {
676 { 1850, 0 },
677 { 2800, 1 },
678 { 3000, 2 },
679 { 3100, 3 },
680};
681
682static const struct menelaus_vtg vmmc_vtg = {
683 .name = "VMMC",
684 .vtg_reg = MENELAUS_LDO_CTRL1,
685 .vtg_shift = 6,
686 .vtg_bits = 2,
687 .mode_reg = MENELAUS_LDO_CTRL7,
688};
689
690int menelaus_set_vmmc(unsigned int mV)
691{
692 int val;
693
694 if (mV == 0)
695 return menelaus_set_voltage(&vmmc_vtg, 0, 0, 0);
696
697 val = menelaus_get_vtg_value(mV, vmmc_values, ARRAY_SIZE(vmmc_values));
698 if (val < 0)
699 return -EINVAL;
700 return menelaus_set_voltage(&vmmc_vtg, mV, val, 0x02);
701}
702EXPORT_SYMBOL(menelaus_set_vmmc);
703
704
705static const struct menelaus_vtg_value vaux_values[] = {
706 { 1500, 0 },
707 { 1800, 1 },
708 { 2500, 2 },
709 { 2800, 3 },
710};
711
712static const struct menelaus_vtg vaux_vtg = {
713 .name = "VAUX",
714 .vtg_reg = MENELAUS_LDO_CTRL1,
715 .vtg_shift = 4,
716 .vtg_bits = 2,
717 .mode_reg = MENELAUS_LDO_CTRL6,
718};
719
720int menelaus_set_vaux(unsigned int mV)
721{
722 int val;
723
724 if (mV == 0)
725 return menelaus_set_voltage(&vaux_vtg, 0, 0, 0);
726
727 val = menelaus_get_vtg_value(mV, vaux_values, ARRAY_SIZE(vaux_values));
728 if (val < 0)
729 return -EINVAL;
730 return menelaus_set_voltage(&vaux_vtg, mV, val, 0x02);
731}
732EXPORT_SYMBOL(menelaus_set_vaux);
733
734int menelaus_get_slot_pin_states(void)
735{
736 return menelaus_read_reg(MENELAUS_MCT_PIN_ST);
737}
738EXPORT_SYMBOL(menelaus_get_slot_pin_states);
739
740int menelaus_set_regulator_sleep(int enable, u32 val)
741{
742 int t, ret;
743 struct i2c_client *c = the_menelaus->client;
744
745 mutex_lock(&the_menelaus->lock);
746 ret = menelaus_write_reg(MENELAUS_SLEEP_CTRL2, val);
747 if (ret < 0)
748 goto out;
749
750 dev_dbg(&c->dev, "regulator sleep configuration: %02x\n", val);
751
752 ret = menelaus_read_reg(MENELAUS_GPIO_CTRL);
753 if (ret < 0)
754 goto out;
755 t = ((1 << 6) | 0x04);
756 if (enable)
757 ret |= t;
758 else
759 ret &= ~t;
760 ret = menelaus_write_reg(MENELAUS_GPIO_CTRL, ret);
761out:
762 mutex_unlock(&the_menelaus->lock);
763 return ret;
764}
765
766/*-----------------------------------------------------------------------*/
767
768/* Handles Menelaus interrupts. Does not run in interrupt context */
769static void menelaus_work(struct work_struct *_menelaus)
770{
771 struct menelaus_chip *menelaus =
772 container_of(_menelaus, struct menelaus_chip, work);
773 void (*handler)(struct menelaus_chip *menelaus);
774
775 while (1) {
776 unsigned isr;
777
778 isr = (menelaus_read_reg(MENELAUS_INT_STATUS2)
779 & ~menelaus->mask2) << 8;
780 isr |= menelaus_read_reg(MENELAUS_INT_STATUS1)
781 & ~menelaus->mask1;
782 if (!isr)
783 break;
784
785 while (isr) {
786 int irq = fls(isr) - 1;
787 isr &= ~(1 << irq);
788
789 mutex_lock(&menelaus->lock);
790 menelaus_disable_irq(irq);
791 menelaus_ack_irq(irq);
792 handler = menelaus->handlers[irq];
793 if (handler)
794 handler(menelaus);
795 menelaus_enable_irq(irq);
796 mutex_unlock(&menelaus->lock);
797 }
798 }
799 enable_irq(menelaus->client->irq);
800}
801
802/*
803 * We cannot use I2C in interrupt context, so we just schedule work.
804 */
805static irqreturn_t menelaus_irq(int irq, void *_menelaus)
806{
807 struct menelaus_chip *menelaus = _menelaus;
808
809 disable_irq_nosync(irq);
810 (void)schedule_work(&menelaus->work);
811
812 return IRQ_HANDLED;
813}
814
815/*-----------------------------------------------------------------------*/
816
817/*
818 * The RTC needs to be set once, then it runs on backup battery power.
819 * It supports alarms, including system wake alarms (from some modes);
820 * and 1/second IRQs if requested.
821 */
822#ifdef CONFIG_RTC_DRV_TWL92330
823
824#define RTC_CTRL_RTC_EN (1 << 0)
825#define RTC_CTRL_AL_EN (1 << 1)
826#define RTC_CTRL_MODE12 (1 << 2)
827#define RTC_CTRL_EVERY_MASK (3 << 3)
828#define RTC_CTRL_EVERY_SEC (0 << 3)
829#define RTC_CTRL_EVERY_MIN (1 << 3)
830#define RTC_CTRL_EVERY_HR (2 << 3)
831#define RTC_CTRL_EVERY_DAY (3 << 3)
832
833#define RTC_UPDATE_EVERY 0x08
834
835#define RTC_HR_PM (1 << 7)
836
837static void menelaus_to_time(char *regs, struct rtc_time *t)
838{
839 t->tm_sec = BCD2BIN(regs[0]);
840 t->tm_min = BCD2BIN(regs[1]);
841 if (the_menelaus->rtc_control & RTC_CTRL_MODE12) {
842 t->tm_hour = BCD2BIN(regs[2] & 0x1f) - 1;
843 if (regs[2] & RTC_HR_PM)
844 t->tm_hour += 12;
845 } else
846 t->tm_hour = BCD2BIN(regs[2] & 0x3f);
847 t->tm_mday = BCD2BIN(regs[3]);
848 t->tm_mon = BCD2BIN(regs[4]) - 1;
849 t->tm_year = BCD2BIN(regs[5]) + 100;
850}
851
852static int time_to_menelaus(struct rtc_time *t, int regnum)
853{
854 int hour, status;
855
856 status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_sec));
857 if (status < 0)
858 goto fail;
859
860 status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_min));
861 if (status < 0)
862 goto fail;
863
864 if (the_menelaus->rtc_control & RTC_CTRL_MODE12) {
865 hour = t->tm_hour + 1;
866 if (hour > 12)
867 hour = RTC_HR_PM | BIN2BCD(hour - 12);
868 else
869 hour = BIN2BCD(hour);
870 } else
871 hour = BIN2BCD(t->tm_hour);
872 status = menelaus_write_reg(regnum++, hour);
873 if (status < 0)
874 goto fail;
875
876 status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_mday));
877 if (status < 0)
878 goto fail;
879
880 status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_mon + 1));
881 if (status < 0)
882 goto fail;
883
884 status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_year - 100));
885 if (status < 0)
886 goto fail;
887
888 return 0;
889fail:
890 dev_err(&the_menelaus->client->dev, "rtc write reg %02x, err %d\n",
891 --regnum, status);
892 return status;
893}
894
895static int menelaus_read_time(struct device *dev, struct rtc_time *t)
896{
897 struct i2c_msg msg[2];
898 char regs[7];
899 int status;
900
901 /* block read date and time registers */
902 regs[0] = MENELAUS_RTC_SEC;
903
904 msg[0].addr = MENELAUS_I2C_ADDRESS;
905 msg[0].flags = 0;
906 msg[0].len = 1;
907 msg[0].buf = regs;
908
909 msg[1].addr = MENELAUS_I2C_ADDRESS;
910 msg[1].flags = I2C_M_RD;
911 msg[1].len = sizeof(regs);
912 msg[1].buf = regs;
913
914 status = i2c_transfer(the_menelaus->client->adapter, msg, 2);
915 if (status != 2) {
916 dev_err(dev, "%s error %d\n", "read", status);
917 return -EIO;
918 }
919
920 menelaus_to_time(regs, t);
921 t->tm_wday = BCD2BIN(regs[6]);
922
923 return 0;
924}
925
926static int menelaus_set_time(struct device *dev, struct rtc_time *t)
927{
928 int status;
929
930 /* write date and time registers */
931 status = time_to_menelaus(t, MENELAUS_RTC_SEC);
932 if (status < 0)
933 return status;
934 status = menelaus_write_reg(MENELAUS_RTC_WKDAY, BIN2BCD(t->tm_wday));
935 if (status < 0) {
936 dev_err(&the_menelaus->client->dev, "rtc write reg %02x",
937 "err %d\n", MENELAUS_RTC_WKDAY, status);
938 return status;
939 }
940
941 /* now commit the write */
942 status = menelaus_write_reg(MENELAUS_RTC_UPDATE, RTC_UPDATE_EVERY);
943 if (status < 0)
944 dev_err(&the_menelaus->client->dev, "rtc commit time, err %d\n",
945 status);
946
947 return 0;
948}
949
950static int menelaus_read_alarm(struct device *dev, struct rtc_wkalrm *w)
951{
952 struct i2c_msg msg[2];
953 char regs[6];
954 int status;
955
956 /* block read alarm registers */
957 regs[0] = MENELAUS_RTC_AL_SEC;
958
959 msg[0].addr = MENELAUS_I2C_ADDRESS;
960 msg[0].flags = 0;
961 msg[0].len = 1;
962 msg[0].buf = regs;
963
964 msg[1].addr = MENELAUS_I2C_ADDRESS;
965 msg[1].flags = I2C_M_RD;
966 msg[1].len = sizeof(regs);
967 msg[1].buf = regs;
968
969 status = i2c_transfer(the_menelaus->client->adapter, msg, 2);
970 if (status != 2) {
971 dev_err(dev, "%s error %d\n", "alarm read", status);
972 return -EIO;
973 }
974
975 menelaus_to_time(regs, &w->time);
976
977 w->enabled = !!(the_menelaus->rtc_control & RTC_CTRL_AL_EN);
978
979 /* NOTE we *could* check if actually pending... */
980 w->pending = 0;
981
982 return 0;
983}
984
985static int menelaus_set_alarm(struct device *dev, struct rtc_wkalrm *w)
986{
987 int status;
988
989 if (the_menelaus->client->irq <= 0 && w->enabled)
990 return -ENODEV;
991
992 /* clear previous alarm enable */
993 if (the_menelaus->rtc_control & RTC_CTRL_AL_EN) {
994 the_menelaus->rtc_control &= ~RTC_CTRL_AL_EN;
995 status = menelaus_write_reg(MENELAUS_RTC_CTRL,
996 the_menelaus->rtc_control);
997 if (status < 0)
998 return status;
999 }
1000
1001 /* write alarm registers */
1002 status = time_to_menelaus(&w->time, MENELAUS_RTC_AL_SEC);
1003 if (status < 0)
1004 return status;
1005
1006 /* enable alarm if requested */
1007 if (w->enabled) {
1008 the_menelaus->rtc_control |= RTC_CTRL_AL_EN;
1009 status = menelaus_write_reg(MENELAUS_RTC_CTRL,
1010 the_menelaus->rtc_control);
1011 }
1012
1013 return status;
1014}
1015
1016#ifdef CONFIG_RTC_INTF_DEV
1017
1018static void menelaus_rtc_update_work(struct menelaus_chip *m)
1019{
1020 /* report 1/sec update */
1021 local_irq_disable();
1022 rtc_update_irq(m->rtc, 1, RTC_IRQF | RTC_UF);
1023 local_irq_enable();
1024}
1025
1026static int menelaus_ioctl(struct device *dev, unsigned cmd, unsigned long arg)
1027{
1028 int status;
1029
1030 if (the_menelaus->client->irq <= 0)
1031 return -ENOIOCTLCMD;
1032
1033 switch (cmd) {
1034 /* alarm IRQ */
1035 case RTC_AIE_ON:
1036 if (the_menelaus->rtc_control & RTC_CTRL_AL_EN)
1037 return 0;
1038 the_menelaus->rtc_control |= RTC_CTRL_AL_EN;
1039 break;
1040 case RTC_AIE_OFF:
1041 if (!(the_menelaus->rtc_control & RTC_CTRL_AL_EN))
1042 return 0;
1043 the_menelaus->rtc_control &= ~RTC_CTRL_AL_EN;
1044 break;
1045 /* 1/second "update" IRQ */
1046 case RTC_UIE_ON:
1047 if (the_menelaus->uie)
1048 return 0;
1049 status = menelaus_remove_irq_work(MENELAUS_RTCTMR_IRQ);
1050 status = menelaus_add_irq_work(MENELAUS_RTCTMR_IRQ,
1051 menelaus_rtc_update_work);
1052 if (status == 0)
1053 the_menelaus->uie = 1;
1054 return status;
1055 case RTC_UIE_OFF:
1056 if (!the_menelaus->uie)
1057 return 0;
1058 status = menelaus_remove_irq_work(MENELAUS_RTCTMR_IRQ);
1059 if (status == 0)
1060 the_menelaus->uie = 0;
1061 return status;
1062 default:
1063 return -ENOIOCTLCMD;
1064 }
1065 return menelaus_write_reg(MENELAUS_RTC_CTRL, the_menelaus->rtc_control);
1066}
1067
1068#else
1069#define menelaus_ioctl NULL
1070#endif
1071
1072/* REVISIT no compensation register support ... */
1073
1074static const struct rtc_class_ops menelaus_rtc_ops = {
1075 .ioctl = menelaus_ioctl,
1076 .read_time = menelaus_read_time,
1077 .set_time = menelaus_set_time,
1078 .read_alarm = menelaus_read_alarm,
1079 .set_alarm = menelaus_set_alarm,
1080};
1081
1082static void menelaus_rtc_alarm_work(struct menelaus_chip *m)
1083{
1084 /* report alarm */
1085 local_irq_disable();
1086 rtc_update_irq(m->rtc, 1, RTC_IRQF | RTC_AF);
1087 local_irq_enable();
1088
1089 /* then disable it; alarms are oneshot */
1090 the_menelaus->rtc_control &= ~RTC_CTRL_AL_EN;
1091 menelaus_write_reg(MENELAUS_RTC_CTRL, the_menelaus->rtc_control);
1092}
1093
1094static inline void menelaus_rtc_init(struct menelaus_chip *m)
1095{
1096 int alarm = (m->client->irq > 0);
1097
1098 /* assume 32KDETEN pin is pulled high */
1099 if (!(menelaus_read_reg(MENELAUS_OSC_CTRL) & 0x80)) {
1100 dev_dbg(&m->client->dev, "no 32k oscillator\n");
1101 return;
1102 }
1103
1104 /* support RTC alarm; it can issue wakeups */
1105 if (alarm) {
1106 if (menelaus_add_irq_work(MENELAUS_RTCALM_IRQ,
1107 menelaus_rtc_alarm_work) < 0) {
1108 dev_err(&m->client->dev, "can't handle RTC alarm\n");
1109 return;
1110 }
1111 device_init_wakeup(&m->client->dev, 1);
1112 }
1113
1114 /* be sure RTC is enabled; allow 1/sec irqs; leave 12hr mode alone */
1115 m->rtc_control = menelaus_read_reg(MENELAUS_RTC_CTRL);
1116 if (!(m->rtc_control & RTC_CTRL_RTC_EN)
1117 || (m->rtc_control & RTC_CTRL_AL_EN)
1118 || (m->rtc_control & RTC_CTRL_EVERY_MASK)) {
1119 if (!(m->rtc_control & RTC_CTRL_RTC_EN)) {
1120 dev_warn(&m->client->dev, "rtc clock needs setting\n");
1121 m->rtc_control |= RTC_CTRL_RTC_EN;
1122 }
1123 m->rtc_control &= ~RTC_CTRL_EVERY_MASK;
1124 m->rtc_control &= ~RTC_CTRL_AL_EN;
1125 menelaus_write_reg(MENELAUS_RTC_CTRL, m->rtc_control);
1126 }
1127
1128 m->rtc = rtc_device_register(DRIVER_NAME,
1129 &m->client->dev,
1130 &menelaus_rtc_ops, THIS_MODULE);
1131 if (IS_ERR(m->rtc)) {
1132 if (alarm) {
1133 menelaus_remove_irq_work(MENELAUS_RTCALM_IRQ);
1134 device_init_wakeup(&m->client->dev, 0);
1135 }
1136 dev_err(&m->client->dev, "can't register RTC: %d\n",
1137 (int) PTR_ERR(m->rtc));
1138 the_menelaus->rtc = NULL;
1139 }
1140}
1141
1142#else
1143
1144static inline void menelaus_rtc_init(struct menelaus_chip *m)
1145{
1146 /* nothing */
1147}
1148
1149#endif
1150
1151/*-----------------------------------------------------------------------*/
1152
1153static struct i2c_driver menelaus_i2c_driver;
1154
1155static int menelaus_probe(struct i2c_client *client)
1156{
1157 struct menelaus_chip *menelaus;
1158 int rev = 0, val;
1159 int err = 0;
1160 struct menelaus_platform_data *menelaus_pdata =
1161 client->dev.platform_data;
1162
1163 if (the_menelaus) {
1164 dev_dbg(&client->dev, "only one %s for now\n",
1165 DRIVER_NAME);
1166 return -ENODEV;
1167 }
1168
1169 menelaus = kzalloc(sizeof *menelaus, GFP_KERNEL);
1170 if (!menelaus)
1171 return -ENOMEM;
1172
1173 i2c_set_clientdata(client, menelaus);
1174
1175 the_menelaus = menelaus;
1176 menelaus->client = client;
1177
1178 /* If a true probe check the device */
1179 rev = menelaus_read_reg(MENELAUS_REV);
1180 if (rev < 0) {
1181 pr_err("device not found");
1182 err = -ENODEV;
1183 goto fail1;
1184 }
1185
1186 /* Ack and disable all Menelaus interrupts */
1187 menelaus_write_reg(MENELAUS_INT_ACK1, 0xff);
1188 menelaus_write_reg(MENELAUS_INT_ACK2, 0xff);
1189 menelaus_write_reg(MENELAUS_INT_MASK1, 0xff);
1190 menelaus_write_reg(MENELAUS_INT_MASK2, 0xff);
1191 menelaus->mask1 = 0xff;
1192 menelaus->mask2 = 0xff;
1193
1194 /* Set output buffer strengths */
1195 menelaus_write_reg(MENELAUS_MCT_CTRL1, 0x73);
1196
1197 if (client->irq > 0) {
1198 err = request_irq(client->irq, menelaus_irq, IRQF_DISABLED,
1199 DRIVER_NAME, menelaus);
1200 if (err) {
1201 dev_dbg(&client->dev, "can't get IRQ %d, err %d",
1202 client->irq, err);
1203 goto fail1;
1204 }
1205 }
1206
1207 mutex_init(&menelaus->lock);
1208 INIT_WORK(&menelaus->work, menelaus_work);
1209
1210 pr_info("Menelaus rev %d.%d\n", rev >> 4, rev & 0x0f);
1211
1212 val = menelaus_read_reg(MENELAUS_VCORE_CTRL1);
1213 if (val < 0)
1214 goto fail2;
1215 if (val & (1 << 7))
1216 menelaus->vcore_hw_mode = 1;
1217 else
1218 menelaus->vcore_hw_mode = 0;
1219
1220 if (menelaus_pdata != NULL && menelaus_pdata->late_init != NULL) {
1221 err = menelaus_pdata->late_init(&client->dev);
1222 if (err < 0)
1223 goto fail2;
1224 }
1225
1226 menelaus_rtc_init(menelaus);
1227
1228 return 0;
1229fail2:
1230 free_irq(client->irq, menelaus);
1231 flush_scheduled_work();
1232fail1:
1233 kfree(menelaus);
1234 return err;
1235}
1236
1237static int __exit menelaus_remove(struct i2c_client *client)
1238{
1239 struct menelaus_chip *menelaus = i2c_get_clientdata(client);
1240
1241 free_irq(client->irq, menelaus);
1242 kfree(menelaus);
1243 i2c_set_clientdata(client, NULL);
1244 the_menelaus = NULL;
1245 return 0;
1246}
1247
1248static struct i2c_driver menelaus_i2c_driver = {
1249 .driver = {
1250 .name = DRIVER_NAME,
1251 },
1252 .probe = menelaus_probe,
1253 .remove = __exit_p(menelaus_remove),
1254};
1255
1256static int __init menelaus_init(void)
1257{
1258 int res;
1259
1260 res = i2c_add_driver(&menelaus_i2c_driver);
1261 if (res < 0) {
1262 pr_err("driver registration failed\n");
1263 return res;
1264 }
1265
1266 return 0;
1267}
1268
1269static void __exit menelaus_exit(void)
1270{
1271 i2c_del_driver(&menelaus_i2c_driver);
1272
1273 /* FIXME: Shutdown menelaus parts that can be shut down */
1274}
1275
1276MODULE_AUTHOR("Texas Instruments, Inc. (and others)");
1277MODULE_DESCRIPTION("I2C interface for Menelaus.");
1278MODULE_LICENSE("GPL");
1279
1280module_init(menelaus_init);
1281module_exit(menelaus_exit);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index cc5801399467..5a4c5ea12f89 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1073,14 +1073,14 @@ static int init_irq (ide_hwif_t *hwif)
1073 hwgroup->hwif->next = hwif; 1073 hwgroup->hwif->next = hwif;
1074 spin_unlock_irq(&ide_lock); 1074 spin_unlock_irq(&ide_lock);
1075 } else { 1075 } else {
1076 hwgroup = kmalloc_node(sizeof(ide_hwgroup_t), GFP_KERNEL, 1076 hwgroup = kmalloc_node(sizeof(ide_hwgroup_t),
1077 GFP_KERNEL | __GFP_ZERO,
1077 hwif_to_node(hwif->drives[0].hwif)); 1078 hwif_to_node(hwif->drives[0].hwif));
1078 if (!hwgroup) 1079 if (!hwgroup)
1079 goto out_up; 1080 goto out_up;
1080 1081
1081 hwif->hwgroup = hwgroup; 1082 hwif->hwgroup = hwgroup;
1082 1083
1083 memset(hwgroup, 0, sizeof(ide_hwgroup_t));
1084 hwgroup->hwif = hwif->next = hwif; 1084 hwgroup->hwif = hwif->next = hwif;
1085 hwgroup->rq = NULL; 1085 hwgroup->rq = NULL;
1086 hwgroup->handler = NULL; 1086 hwgroup->handler = NULL;
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index 0fc8c6e559e4..ee45259573c8 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -30,6 +30,7 @@
30#include <linux/moduleparam.h> 30#include <linux/moduleparam.h>
31#include <linux/bitops.h> 31#include <linux/bitops.h>
32#include <linux/kdev_t.h> 32#include <linux/kdev_t.h>
33#include <linux/freezer.h>
33#include <linux/suspend.h> 34#include <linux/suspend.h>
34#include <linux/kthread.h> 35#include <linux/kthread.h>
35#include <linux/preempt.h> 36#include <linux/preempt.h>
@@ -1128,8 +1129,6 @@ static int hpsbpkt_thread(void *__hi)
1128 struct list_head tmp; 1129 struct list_head tmp;
1129 int may_schedule; 1130 int may_schedule;
1130 1131
1131 current->flags |= PF_NOFREEZE;
1132
1133 while (!kthread_should_stop()) { 1132 while (!kthread_should_stop()) {
1134 1133
1135 INIT_LIST_HEAD(&tmp); 1134 INIT_LIST_HEAD(&tmp);
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 51a12062ed36..2ffd53461db6 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -1699,6 +1699,7 @@ static int nodemgr_host_thread(void *__hi)
1699 unsigned int g, generation = 0; 1699 unsigned int g, generation = 0;
1700 int i, reset_cycles = 0; 1700 int i, reset_cycles = 0;
1701 1701
1702 set_freezable();
1702 /* Setup our device-model entries */ 1703 /* Setup our device-model entries */
1703 nodemgr_create_host_dev_files(host); 1704 nodemgr_create_host_dev_files(host);
1704 1705
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index eef415b12b2e..11f1d99db40b 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1591,7 +1591,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1591 int i; 1591 int i;
1592 int size; 1592 int size;
1593 int size0 = 0; 1593 int size0 = 0;
1594 u32 f0; 1594 u32 f0 = 0;
1595 int ind; 1595 int ind;
1596 u8 op0 = 0; 1596 u8 op0 = 0;
1597 1597
@@ -1946,7 +1946,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1946 int i; 1946 int i;
1947 int size; 1947 int size;
1948 int size0 = 0; 1948 int size0 = 0;
1949 u32 f0; 1949 u32 f0 = 0;
1950 int ind; 1950 int ind;
1951 u8 op0 = 0; 1951 u8 op0 = 0;
1952 1952
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index bd686a2a517d..20896d5e5f0e 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -445,6 +445,7 @@ static struct gameport *gameport_get_pending_child(struct gameport *parent)
445 445
446static int gameport_thread(void *nothing) 446static int gameport_thread(void *nothing)
447{ 447{
448 set_freezable();
448 do { 449 do {
449 gameport_handle_event(); 450 gameport_handle_event();
450 wait_event_interruptible(gameport_wait, 451 wait_event_interruptible(gameport_wait,
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index a8f3bc1dff22..372ca4931194 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -384,6 +384,7 @@ static struct serio *serio_get_pending_child(struct serio *parent)
384 384
385static int serio_thread(void *nothing) 385static int serio_thread(void *nothing)
386{ 386{
387 set_freezable();
387 do { 388 do {
388 serio_handle_event(); 389 serio_handle_event();
389 wait_event_interruptible(serio_wait, 390 wait_event_interruptible(serio_wait,
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index f0cbcdb008ed..36f944019158 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -292,6 +292,7 @@ static int ucb1400_ts_thread(void *_ucb)
292 292
293 sched_setscheduler(tsk, SCHED_FIFO, &param); 293 sched_setscheduler(tsk, SCHED_FIFO, &param);
294 294
295 set_freezable();
295 while (!kthread_should_stop()) { 296 while (!kthread_should_stop()) {
296 unsigned int x, y, p; 297 unsigned int x, y, p;
297 long timeout; 298 long timeout;
diff --git a/drivers/isdn/Kconfig b/drivers/isdn/Kconfig
index 3e088c42b222..cf906c8cee4d 100644
--- a/drivers/isdn/Kconfig
+++ b/drivers/isdn/Kconfig
@@ -2,12 +2,10 @@
2# ISDN device configuration 2# ISDN device configuration
3# 3#
4 4
5menu "ISDN subsystem" 5menuconfig ISDN
6 depends on !S390
7
8config ISDN
9 tristate "ISDN support" 6 tristate "ISDN support"
10 depends on NET 7 depends on NET
8 depends on !S390
11 ---help--- 9 ---help---
12 ISDN ("Integrated Services Digital Networks", called RNIS in France) 10 ISDN ("Integrated Services Digital Networks", called RNIS in France)
13 is a special type of fully digital telephone service; it's mostly 11 is a special type of fully digital telephone service; it's mostly
@@ -21,9 +19,9 @@ config ISDN
21 19
22 Select this option if you want your kernel to support ISDN. 20 Select this option if you want your kernel to support ISDN.
23 21
22if ISDN
24 23
25menu "Old ISDN4Linux" 24menu "Old ISDN4Linux"
26 depends on NET && ISDN
27 25
28config ISDN_I4L 26config ISDN_I4L
29 tristate "Old ISDN4Linux (deprecated)" 27 tristate "Old ISDN4Linux (deprecated)"
@@ -50,20 +48,21 @@ endif
50endmenu 48endmenu
51 49
52comment "CAPI subsystem" 50comment "CAPI subsystem"
53 depends on NET && ISDN
54 51
55config ISDN_CAPI 52config ISDN_CAPI
56 tristate "CAPI2.0 support" 53 tristate "CAPI2.0 support"
57 depends on ISDN
58 help 54 help
59 This provides the CAPI (Common ISDN Application Programming 55 This provides the CAPI (Common ISDN Application Programming
60 Interface, a standard making it easy for programs to access ISDN 56 Interface, a standard making it easy for programs to access ISDN
61 hardware, see <http://www.capi.org/>. This is needed for AVM's set 57 hardware, see <http://www.capi.org/>. This is needed for AVM's set
62 of active ISDN controllers like B1, T1, M1. 58 of active ISDN controllers like B1, T1, M1.
63 59
60if ISDN_CAPI
61
64source "drivers/isdn/capi/Kconfig" 62source "drivers/isdn/capi/Kconfig"
65 63
66source "drivers/isdn/hardware/Kconfig" 64source "drivers/isdn/hardware/Kconfig"
67 65
68endmenu 66endif # ISDN_CAPI
69 67
68endif # ISDN
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index c92f9d764fce..e1afd60924fb 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -3,7 +3,6 @@
3# 3#
4config ISDN_DRV_AVMB1_VERBOSE_REASON 4config ISDN_DRV_AVMB1_VERBOSE_REASON
5 bool "Verbose reason code reporting" 5 bool "Verbose reason code reporting"
6 depends on ISDN_CAPI
7 default y 6 default y
8 help 7 help
9 If you say Y here, the CAPI drivers will give verbose reasons for 8 If you say Y here, the CAPI drivers will give verbose reasons for
@@ -12,7 +11,6 @@ config ISDN_DRV_AVMB1_VERBOSE_REASON
12 11
13config CAPI_TRACE 12config CAPI_TRACE
14 bool "CAPI trace support" 13 bool "CAPI trace support"
15 depends on ISDN_CAPI
16 default y 14 default y
17 help 15 help
18 If you say Y here, the kernelcapi driver can make verbose traces 16 If you say Y here, the kernelcapi driver can make verbose traces
@@ -23,7 +21,7 @@ config CAPI_TRACE
23 21
24config ISDN_CAPI_MIDDLEWARE 22config ISDN_CAPI_MIDDLEWARE
25 bool "CAPI2.0 Middleware support (EXPERIMENTAL)" 23 bool "CAPI2.0 Middleware support (EXPERIMENTAL)"
26 depends on ISDN_CAPI && EXPERIMENTAL 24 depends on EXPERIMENTAL
27 help 25 help
28 This option will enhance the capabilities of the /dev/capi20 26 This option will enhance the capabilities of the /dev/capi20
29 interface. It will provide a means of moving a data connection, 27 interface. It will provide a means of moving a data connection,
@@ -33,7 +31,6 @@ config ISDN_CAPI_MIDDLEWARE
33 31
34config ISDN_CAPI_CAPI20 32config ISDN_CAPI_CAPI20
35 tristate "CAPI2.0 /dev/capi support" 33 tristate "CAPI2.0 /dev/capi support"
36 depends on ISDN_CAPI
37 help 34 help
38 This option will provide the CAPI 2.0 interface to userspace 35 This option will provide the CAPI 2.0 interface to userspace
39 applications via /dev/capi20. Applications should use the 36 applications via /dev/capi20. Applications should use the
@@ -56,7 +53,7 @@ config ISDN_CAPI_CAPIFS
56 53
57config ISDN_CAPI_CAPIDRV 54config ISDN_CAPI_CAPIDRV
58 tristate "CAPI2.0 capidrv interface support" 55 tristate "CAPI2.0 capidrv interface support"
59 depends on ISDN_CAPI && ISDN_I4L 56 depends on ISDN_I4L
60 help 57 help
61 This option provides the glue code to hook up CAPI driven cards to 58 This option provides the glue code to hook up CAPI driven cards to
62 the legacy isdn4linux link layer. If you have a card which is 59 the legacy isdn4linux link layer. If you have a card which is
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 81661b8bd3a8..f449daef3eed 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -549,7 +549,7 @@ static int handle_minor_send(struct capiminor *mp)
549 capimsg_setu8 (skb->data, 5, CAPI_REQ); 549 capimsg_setu8 (skb->data, 5, CAPI_REQ);
550 capimsg_setu16(skb->data, 6, mp->msgid++); 550 capimsg_setu16(skb->data, 6, mp->msgid++);
551 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */ 551 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
552 capimsg_setu32(skb->data, 12, (u32) skb->data); /* Data32 */ 552 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
553 capimsg_setu16(skb->data, 16, len); /* Data length */ 553 capimsg_setu16(skb->data, 16, len); /* Data length */
554 capimsg_setu16(skb->data, 18, datahandle); 554 capimsg_setu16(skb->data, 18, datahandle);
555 capimsg_setu16(skb->data, 20, 0); /* Flags */ 555 capimsg_setu16(skb->data, 20, 0); /* Flags */
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 3ed34f7a1c4f..9f73bc2727c2 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -258,7 +258,7 @@ static void recv_handler(struct work_struct *work)
258 if ((!ap) || (ap->release_in_progress)) 258 if ((!ap) || (ap->release_in_progress))
259 return; 259 return;
260 260
261 down(&ap->recv_sem); 261 mutex_lock(&ap->recv_mtx);
262 while ((skb = skb_dequeue(&ap->recv_queue))) { 262 while ((skb = skb_dequeue(&ap->recv_queue))) {
263 if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_IND) 263 if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_IND)
264 ap->nrecvdatapkt++; 264 ap->nrecvdatapkt++;
@@ -267,7 +267,7 @@ static void recv_handler(struct work_struct *work)
267 267
268 ap->recv_message(ap, skb); 268 ap->recv_message(ap, skb);
269 } 269 }
270 up(&ap->recv_sem); 270 mutex_unlock(&ap->recv_mtx);
271} 271}
272 272
273void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *skb) 273void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *skb)
@@ -547,7 +547,7 @@ u16 capi20_register(struct capi20_appl *ap)
547 ap->nsentctlpkt = 0; 547 ap->nsentctlpkt = 0;
548 ap->nsentdatapkt = 0; 548 ap->nsentdatapkt = 0;
549 ap->callback = NULL; 549 ap->callback = NULL;
550 init_MUTEX(&ap->recv_sem); 550 mutex_init(&ap->recv_mtx);
551 skb_queue_head_init(&ap->recv_queue); 551 skb_queue_head_init(&ap->recv_queue);
552 INIT_WORK(&ap->recv_work, recv_handler); 552 INIT_WORK(&ap->recv_work, recv_handler);
553 ap->release_in_progress = 0; 553 ap->release_in_progress = 0;
diff --git a/drivers/isdn/capi/kcapi_proc.c b/drivers/isdn/capi/kcapi_proc.c
index 31f4fd8b8b0a..845a797b0030 100644
--- a/drivers/isdn/capi/kcapi_proc.c
+++ b/drivers/isdn/capi/kcapi_proc.c
@@ -243,36 +243,15 @@ create_seq_entry(char *name, mode_t mode, const struct file_operations *f)
243 243
244// --------------------------------------------------------------------------- 244// ---------------------------------------------------------------------------
245 245
246
247static __inline__ struct capi_driver *capi_driver_get_idx(loff_t pos)
248{
249 struct capi_driver *drv = NULL;
250 struct list_head *l;
251 loff_t i;
252
253 i = 0;
254 list_for_each(l, &capi_drivers) {
255 drv = list_entry(l, struct capi_driver, list);
256 if (i++ == pos)
257 return drv;
258 }
259 return NULL;
260}
261
262static void *capi_driver_start(struct seq_file *seq, loff_t *pos) 246static void *capi_driver_start(struct seq_file *seq, loff_t *pos)
263{ 247{
264 struct capi_driver *drv;
265 read_lock(&capi_drivers_list_lock); 248 read_lock(&capi_drivers_list_lock);
266 drv = capi_driver_get_idx(*pos); 249 return seq_list_start(&capi_drivers, *pos);
267 return drv;
268} 250}
269 251
270static void *capi_driver_next(struct seq_file *seq, void *v, loff_t *pos) 252static void *capi_driver_next(struct seq_file *seq, void *v, loff_t *pos)
271{ 253{
272 struct capi_driver *drv = (struct capi_driver *)v; 254 return seq_list_next(v, &capi_drivers, pos);
273 ++*pos;
274 if (drv->list.next == &capi_drivers) return NULL;
275 return list_entry(drv->list.next, struct capi_driver, list);
276} 255}
277 256
278static void capi_driver_stop(struct seq_file *seq, void *v) 257static void capi_driver_stop(struct seq_file *seq, void *v)
@@ -282,7 +261,8 @@ static void capi_driver_stop(struct seq_file *seq, void *v)
282 261
283static int capi_driver_show(struct seq_file *seq, void *v) 262static int capi_driver_show(struct seq_file *seq, void *v)
284{ 263{
285 struct capi_driver *drv = (struct capi_driver *)v; 264 struct capi_driver *drv = list_entry(v, struct capi_driver, list);
265
286 seq_printf(seq, "%-32s %s\n", drv->name, drv->revision); 266 seq_printf(seq, "%-32s %s\n", drv->name, drv->revision);
287 return 0; 267 return 0;
288} 268}
diff --git a/drivers/isdn/hardware/Kconfig b/drivers/isdn/hardware/Kconfig
index 139f19797713..30d028d24955 100644
--- a/drivers/isdn/hardware/Kconfig
+++ b/drivers/isdn/hardware/Kconfig
@@ -2,7 +2,6 @@
2# ISDN hardware drivers 2# ISDN hardware drivers
3# 3#
4comment "CAPI hardware drivers" 4comment "CAPI hardware drivers"
5 depends on NET && ISDN && ISDN_CAPI
6 5
7source "drivers/isdn/hardware/avm/Kconfig" 6source "drivers/isdn/hardware/avm/Kconfig"
8 7
diff --git a/drivers/isdn/hardware/avm/Kconfig b/drivers/isdn/hardware/avm/Kconfig
index 29a32a8830c0..5dbcbe3a54a6 100644
--- a/drivers/isdn/hardware/avm/Kconfig
+++ b/drivers/isdn/hardware/avm/Kconfig
@@ -2,23 +2,22 @@
2# ISDN AVM drivers 2# ISDN AVM drivers
3# 3#
4 4
5menu "Active AVM cards" 5menuconfig CAPI_AVM
6 depends on NET && ISDN && ISDN_CAPI!=n 6 bool "Active AVM cards"
7
8config CAPI_AVM
9 bool "Support AVM cards"
10 help 7 help
11 Enable support for AVM active ISDN cards. 8 Enable support for AVM active ISDN cards.
12 9
10if CAPI_AVM
11
13config ISDN_DRV_AVMB1_B1ISA 12config ISDN_DRV_AVMB1_B1ISA
14 tristate "AVM B1 ISA support" 13 tristate "AVM B1 ISA support"
15 depends on CAPI_AVM && ISDN_CAPI && ISA 14 depends on ISA
16 help 15 help
17 Enable support for the ISA version of the AVM B1 card. 16 Enable support for the ISA version of the AVM B1 card.
18 17
19config ISDN_DRV_AVMB1_B1PCI 18config ISDN_DRV_AVMB1_B1PCI
20 tristate "AVM B1 PCI support" 19 tristate "AVM B1 PCI support"
21 depends on CAPI_AVM && ISDN_CAPI && PCI 20 depends on PCI
22 help 21 help
23 Enable support for the PCI version of the AVM B1 card. 22 Enable support for the PCI version of the AVM B1 card.
24 23
@@ -30,14 +29,13 @@ config ISDN_DRV_AVMB1_B1PCIV4
30 29
31config ISDN_DRV_AVMB1_T1ISA 30config ISDN_DRV_AVMB1_T1ISA
32 tristate "AVM T1/T1-B ISA support" 31 tristate "AVM T1/T1-B ISA support"
33 depends on CAPI_AVM && ISDN_CAPI && ISA 32 depends on ISA
34 help 33 help
35 Enable support for the AVM T1 T1B card. 34 Enable support for the AVM T1 T1B card.
36 Note: This is a PRI card and handle 30 B-channels. 35 Note: This is a PRI card and handle 30 B-channels.
37 36
38config ISDN_DRV_AVMB1_B1PCMCIA 37config ISDN_DRV_AVMB1_B1PCMCIA
39 tristate "AVM B1/M1/M2 PCMCIA support" 38 tristate "AVM B1/M1/M2 PCMCIA support"
40 depends on CAPI_AVM && ISDN_CAPI
41 help 39 help
42 Enable support for the PCMCIA version of the AVM B1 card. 40 Enable support for the PCMCIA version of the AVM B1 card.
43 41
@@ -50,17 +48,16 @@ config ISDN_DRV_AVMB1_AVM_CS
50 48
51config ISDN_DRV_AVMB1_T1PCI 49config ISDN_DRV_AVMB1_T1PCI
52 tristate "AVM T1/T1-B PCI support" 50 tristate "AVM T1/T1-B PCI support"
53 depends on CAPI_AVM && ISDN_CAPI && PCI 51 depends on PCI
54 help 52 help
55 Enable support for the AVM T1 T1B card. 53 Enable support for the AVM T1 T1B card.
56 Note: This is a PRI card and handle 30 B-channels. 54 Note: This is a PRI card and handle 30 B-channels.
57 55
58config ISDN_DRV_AVMB1_C4 56config ISDN_DRV_AVMB1_C4
59 tristate "AVM C4/C2 support" 57 tristate "AVM C4/C2 support"
60 depends on CAPI_AVM && ISDN_CAPI && PCI 58 depends on PCI
61 help 59 help
62 Enable support for the AVM C4/C2 PCI cards. 60 Enable support for the AVM C4/C2 PCI cards.
63 These cards handle 4/2 BRI ISDN lines (8/4 channels). 61 These cards handle 4/2 BRI ISDN lines (8/4 channels).
64 62
65endmenu 63endif # CAPI_AVM
66
diff --git a/drivers/isdn/hardware/eicon/Kconfig b/drivers/isdn/hardware/eicon/Kconfig
index 01d4afd9d843..6082b6a5ced3 100644
--- a/drivers/isdn/hardware/eicon/Kconfig
+++ b/drivers/isdn/hardware/eicon/Kconfig
@@ -2,52 +2,50 @@
2# ISDN DIVAS Eicon driver 2# ISDN DIVAS Eicon driver
3# 3#
4 4
5menu "Active Eicon DIVA Server cards" 5menuconfig CAPI_EICON
6 depends on NET && ISDN && ISDN_CAPI!=n 6 bool "Active Eicon DIVA Server cards"
7
8config CAPI_EICON
9 bool "Support Eicon cards"
10 help 7 help
11 Enable support for Eicon Networks active ISDN cards. 8 Enable support for Eicon Networks active ISDN cards.
12 9
10if CAPI_EICON
11
13config ISDN_DIVAS 12config ISDN_DIVAS
14 tristate "Support Eicon DIVA Server cards" 13 tristate "Support Eicon DIVA Server cards"
15 depends on CAPI_EICON && PROC_FS && PCI 14 depends on PROC_FS && PCI
16 help 15 help
17 Say Y here if you have an Eicon Networks DIVA Server PCI ISDN card. 16 Say Y here if you have an Eicon Networks DIVA Server PCI ISDN card.
18 In order to use this card, additional firmware is necessary, which 17 In order to use this card, additional firmware is necessary, which
19 has to be downloaded into the card using the divactrl utility. 18 has to be downloaded into the card using the divactrl utility.
20 19
20if ISDN_DIVAS
21
21config ISDN_DIVAS_BRIPCI 22config ISDN_DIVAS_BRIPCI
22 bool "DIVA Server BRI/PCI support" 23 bool "DIVA Server BRI/PCI support"
23 depends on ISDN_DIVAS
24 help 24 help
25 Enable support for DIVA Server BRI-PCI. 25 Enable support for DIVA Server BRI-PCI.
26 26
27config ISDN_DIVAS_PRIPCI 27config ISDN_DIVAS_PRIPCI
28 bool "DIVA Server PRI/PCI support" 28 bool "DIVA Server PRI/PCI support"
29 depends on ISDN_DIVAS
30 help 29 help
31 Enable support for DIVA Server PRI-PCI. 30 Enable support for DIVA Server PRI-PCI.
32 31
33config ISDN_DIVAS_DIVACAPI 32config ISDN_DIVAS_DIVACAPI
34 tristate "DIVA CAPI2.0 interface support" 33 tristate "DIVA CAPI2.0 interface support"
35 depends on ISDN_DIVAS && ISDN_CAPI
36 help 34 help
37 You need this to provide the CAPI interface 35 You need this to provide the CAPI interface
38 for DIVA Server cards. 36 for DIVA Server cards.
39 37
40config ISDN_DIVAS_USERIDI 38config ISDN_DIVAS_USERIDI
41 tristate "DIVA User-IDI interface support" 39 tristate "DIVA User-IDI interface support"
42 depends on ISDN_DIVAS
43 help 40 help
44 Enable support for user-mode IDI interface. 41 Enable support for user-mode IDI interface.
45 42
46config ISDN_DIVAS_MAINT 43config ISDN_DIVAS_MAINT
47 tristate "DIVA Maint driver support" 44 tristate "DIVA Maint driver support"
48 depends on ISDN_DIVAS && m 45 depends on m
49 help 46 help
50 Enable Divas Maintenance driver. 47 Enable Divas Maintenance driver.
51 48
52endmenu 49endif # ISDN_DIVAS
53 50
51endif # CAPI_EICON
diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
index 4cbc68cf4dba..db87d5105422 100644
--- a/drivers/isdn/hardware/eicon/idifunc.c
+++ b/drivers/isdn/hardware/eicon/idifunc.c
@@ -106,6 +106,7 @@ static void um_new_card(DESCRIPTOR * d)
106 } else { 106 } else {
107 DBG_ERR(("could not create user mode idi card %d", 107 DBG_ERR(("could not create user mode idi card %d",
108 adapter_nr)); 108 adapter_nr));
109 diva_os_free(0, card);
109 } 110 }
110} 111}
111 112
diff --git a/drivers/isdn/hisax/bkm_a4t.c b/drivers/isdn/hisax/bkm_a4t.c
index 871310d56a6e..3d1bdc8431ad 100644
--- a/drivers/isdn/hisax/bkm_a4t.c
+++ b/drivers/isdn/hisax/bkm_a4t.c
@@ -255,54 +255,38 @@ BKM_card_msg(struct IsdnCardState *cs, int mt, void *arg)
255 return (0); 255 return (0);
256} 256}
257 257
258static struct pci_dev *dev_a4t __devinitdata = NULL; 258static int __devinit a4t_pci_probe(struct pci_dev *dev_a4t,
259 struct IsdnCardState *cs,
260 u_int *found,
261 u_int *pci_memaddr)
262{
263 u16 sub_sys;
264 u16 sub_vendor;
265
266 sub_vendor = dev_a4t->subsystem_vendor;
267 sub_sys = dev_a4t->subsystem_device;
268 if ((sub_sys == PCI_DEVICE_ID_BERKOM_A4T) && (sub_vendor == PCI_VENDOR_ID_BERKOM)) {
269 if (pci_enable_device(dev_a4t))
270 return (0); /* end loop & function */
271 *found = 1;
272 *pci_memaddr = pci_resource_start(dev_a4t, 0);
273 cs->irq = dev_a4t->irq;
274 return (1); /* end loop */
275 }
259 276
260int __devinit 277 return (-1); /* continue looping */
261setup_bkm_a4t(struct IsdnCard *card) 278}
279
280static int __devinit a4t_cs_init(struct IsdnCard *card,
281 struct IsdnCardState *cs,
282 u_int pci_memaddr)
262{ 283{
263 struct IsdnCardState *cs = card->cs;
264 char tmp[64];
265 u_int pci_memaddr = 0, found = 0;
266 I20_REGISTER_FILE *pI20_Regs; 284 I20_REGISTER_FILE *pI20_Regs;
267#ifdef CONFIG_PCI
268#endif
269
270 strcpy(tmp, bkm_a4t_revision);
271 printk(KERN_INFO "HiSax: T-Berkom driver Rev. %s\n", HiSax_getrev(tmp));
272 if (cs->typ == ISDN_CTYPE_BKM_A4T) {
273 cs->subtyp = BKM_A4T;
274 } else
275 return (0);
276 285
277#ifdef CONFIG_PCI
278 while ((dev_a4t = pci_find_device(PCI_VENDOR_ID_ZORAN,
279 PCI_DEVICE_ID_ZORAN_36120, dev_a4t))) {
280 u16 sub_sys;
281 u16 sub_vendor;
282
283 sub_vendor = dev_a4t->subsystem_vendor;
284 sub_sys = dev_a4t->subsystem_device;
285 if ((sub_sys == PCI_DEVICE_ID_BERKOM_A4T) && (sub_vendor == PCI_VENDOR_ID_BERKOM)) {
286 if (pci_enable_device(dev_a4t))
287 return(0);
288 found = 1;
289 pci_memaddr = pci_resource_start(dev_a4t, 0);
290 cs->irq = dev_a4t->irq;
291 break;
292 }
293 }
294 if (!found) {
295 printk(KERN_WARNING "HiSax: %s: Card not found\n", CardType[card->typ]);
296 return (0);
297 }
298 if (!cs->irq) { /* IRQ range check ?? */ 286 if (!cs->irq) { /* IRQ range check ?? */
299 printk(KERN_WARNING "HiSax: %s: No IRQ\n", CardType[card->typ]); 287 printk(KERN_WARNING "HiSax: %s: No IRQ\n", CardType[card->typ]);
300 return (0); 288 return (0);
301 } 289 }
302 if (!pci_memaddr) {
303 printk(KERN_WARNING "HiSax: %s: No Memory base address\n", CardType[card->typ]);
304 return (0);
305 }
306 cs->hw.ax.base = (long) ioremap(pci_memaddr, 4096); 290 cs->hw.ax.base = (long) ioremap(pci_memaddr, 4096);
307 /* Check suspecious address */ 291 /* Check suspecious address */
308 pI20_Regs = (I20_REGISTER_FILE *) (cs->hw.ax.base); 292 pI20_Regs = (I20_REGISTER_FILE *) (cs->hw.ax.base);
@@ -317,11 +301,7 @@ setup_bkm_a4t(struct IsdnCard *card)
317 cs->hw.ax.jade_adr = cs->hw.ax.base + PO_OFFSET; 301 cs->hw.ax.jade_adr = cs->hw.ax.base + PO_OFFSET;
318 cs->hw.ax.isac_ale = GCS_1; 302 cs->hw.ax.isac_ale = GCS_1;
319 cs->hw.ax.jade_ale = GCS_3; 303 cs->hw.ax.jade_ale = GCS_3;
320#else 304
321 printk(KERN_WARNING "HiSax: %s: NO_PCI_BIOS\n", CardType[card->typ]);
322 printk(KERN_WARNING "HiSax: %s: unable to configure\n", CardType[card->typ]);
323 return (0);
324#endif /* CONFIG_PCI */
325 printk(KERN_INFO "HiSax: %s: Card configured at 0x%lX IRQ %d\n", 305 printk(KERN_INFO "HiSax: %s: Card configured at 0x%lX IRQ %d\n",
326 CardType[card->typ], cs->hw.ax.base, cs->irq); 306 CardType[card->typ], cs->hw.ax.base, cs->irq);
327 307
@@ -339,5 +319,43 @@ setup_bkm_a4t(struct IsdnCard *card)
339 ISACVersion(cs, "Telekom A4T:"); 319 ISACVersion(cs, "Telekom A4T:");
340 /* Jade version */ 320 /* Jade version */
341 JadeVersion(cs, "Telekom A4T:"); 321 JadeVersion(cs, "Telekom A4T:");
322
342 return (1); 323 return (1);
343} 324}
325
326static struct pci_dev *dev_a4t __devinitdata = NULL;
327
328int __devinit
329setup_bkm_a4t(struct IsdnCard *card)
330{
331 struct IsdnCardState *cs = card->cs;
332 char tmp[64];
333 u_int pci_memaddr = 0, found = 0;
334 int ret;
335
336 strcpy(tmp, bkm_a4t_revision);
337 printk(KERN_INFO "HiSax: T-Berkom driver Rev. %s\n", HiSax_getrev(tmp));
338 if (cs->typ == ISDN_CTYPE_BKM_A4T) {
339 cs->subtyp = BKM_A4T;
340 } else
341 return (0);
342
343 while ((dev_a4t = pci_find_device(PCI_VENDOR_ID_ZORAN,
344 PCI_DEVICE_ID_ZORAN_36120, dev_a4t))) {
345 ret = a4t_pci_probe(dev_a4t, cs, &found, &pci_memaddr);
346 if (!ret)
347 return (0);
348 if (ret > 0)
349 break;
350 }
351 if (!found) {
352 printk(KERN_WARNING "HiSax: %s: Card not found\n", CardType[card->typ]);
353 return (0);
354 }
355 if (!pci_memaddr) {
356 printk(KERN_WARNING "HiSax: %s: No Memory base address\n", CardType[card->typ]);
357 return (0);
358 }
359
360 return a4t_cs_init(card, cs, pci_memaddr);
361}
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index 8d53a7fd2671..5f7907e57090 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -361,11 +361,11 @@ module_param_array(io1, int, NULL, 0);
361 361
362int nrcards; 362int nrcards;
363 363
364extern char *l1_revision; 364extern const char *l1_revision;
365extern char *l2_revision; 365extern const char *l2_revision;
366extern char *l3_revision; 366extern const char *l3_revision;
367extern char *lli_revision; 367extern const char *lli_revision;
368extern char *tei_revision; 368extern const char *tei_revision;
369 369
370char *HiSax_getrev(const char *revision) 370char *HiSax_getrev(const char *revision)
371{ 371{
@@ -847,95 +847,10 @@ static int init_card(struct IsdnCardState *cs)
847 return 3; 847 return 3;
848} 848}
849 849
850static int checkcard(int cardnr, char *id, int *busy_flag, struct module *lockowner) 850static int hisax_cs_setup_card(struct IsdnCard *card)
851{ 851{
852 int ret = 0; 852 int ret;
853 struct IsdnCard *card = cards + cardnr;
854 struct IsdnCardState *cs;
855 853
856 cs = kzalloc(sizeof(struct IsdnCardState), GFP_ATOMIC);
857 if (!cs) {
858 printk(KERN_WARNING
859 "HiSax: No memory for IsdnCardState(card %d)\n",
860 cardnr + 1);
861 goto out;
862 }
863 card->cs = cs;
864 spin_lock_init(&cs->statlock);
865 spin_lock_init(&cs->lock);
866 cs->chanlimit = 2; /* maximum B-channel number */
867 cs->logecho = 0; /* No echo logging */
868 cs->cardnr = cardnr;
869 cs->debug = L1_DEB_WARN;
870 cs->HW_Flags = 0;
871 cs->busy_flag = busy_flag;
872 cs->irq_flags = I4L_IRQ_FLAG;
873#if TEI_PER_CARD
874 if (card->protocol == ISDN_PTYPE_NI1)
875 test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
876#else
877 test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
878#endif
879 cs->protocol = card->protocol;
880
881 if (card->typ <= 0 || card->typ > ISDN_CTYPE_COUNT) {
882 printk(KERN_WARNING
883 "HiSax: Card Type %d out of range\n", card->typ);
884 goto outf_cs;
885 }
886 if (!(cs->dlog = kmalloc(MAX_DLOG_SPACE, GFP_ATOMIC))) {
887 printk(KERN_WARNING
888 "HiSax: No memory for dlog(card %d)\n", cardnr + 1);
889 goto outf_cs;
890 }
891 if (!(cs->status_buf = kmalloc(HISAX_STATUS_BUFSIZE, GFP_ATOMIC))) {
892 printk(KERN_WARNING
893 "HiSax: No memory for status_buf(card %d)\n",
894 cardnr + 1);
895 goto outf_dlog;
896 }
897 cs->stlist = NULL;
898 cs->status_read = cs->status_buf;
899 cs->status_write = cs->status_buf;
900 cs->status_end = cs->status_buf + HISAX_STATUS_BUFSIZE - 1;
901 cs->typ = card->typ;
902#ifdef MODULE
903 cs->iif.owner = lockowner;
904#endif
905 strcpy(cs->iif.id, id);
906 cs->iif.channels = 2;
907 cs->iif.maxbufsize = MAX_DATA_SIZE;
908 cs->iif.hl_hdrlen = MAX_HEADER_LEN;
909 cs->iif.features =
910 ISDN_FEATURE_L2_X75I |
911 ISDN_FEATURE_L2_HDLC |
912 ISDN_FEATURE_L2_HDLC_56K |
913 ISDN_FEATURE_L2_TRANS |
914 ISDN_FEATURE_L3_TRANS |
915#ifdef CONFIG_HISAX_1TR6
916 ISDN_FEATURE_P_1TR6 |
917#endif
918#ifdef CONFIG_HISAX_EURO
919 ISDN_FEATURE_P_EURO |
920#endif
921#ifdef CONFIG_HISAX_NI1
922 ISDN_FEATURE_P_NI1 |
923#endif
924 0;
925
926 cs->iif.command = HiSax_command;
927 cs->iif.writecmd = NULL;
928 cs->iif.writebuf_skb = HiSax_writebuf_skb;
929 cs->iif.readstat = HiSax_readstatus;
930 register_isdn(&cs->iif);
931 cs->myid = cs->iif.channels;
932 printk(KERN_INFO
933 "HiSax: Card %d Protocol %s Id=%s (%d)\n", cardnr + 1,
934 (card->protocol == ISDN_PTYPE_1TR6) ? "1TR6" :
935 (card->protocol == ISDN_PTYPE_EURO) ? "EDSS1" :
936 (card->protocol == ISDN_PTYPE_LEASED) ? "LEASED" :
937 (card->protocol == ISDN_PTYPE_NI1) ? "NI1" :
938 "NONE", cs->iif.id, cs->myid);
939 switch (card->typ) { 854 switch (card->typ) {
940#if CARD_TELES0 855#if CARD_TELES0
941 case ISDN_CTYPE_16_0: 856 case ISDN_CTYPE_16_0:
@@ -1094,13 +1009,115 @@ static int checkcard(int cardnr, char *id, int *busy_flag, struct module *lockow
1094 printk(KERN_WARNING 1009 printk(KERN_WARNING
1095 "HiSax: Support for %s Card not selected\n", 1010 "HiSax: Support for %s Card not selected\n",
1096 CardType[card->typ]); 1011 CardType[card->typ]);
1097 ll_unload(cs); 1012 ret = 0;
1013 break;
1014 }
1015
1016 return ret;
1017}
1018
1019static int hisax_cs_new(int cardnr, char *id, struct IsdnCard *card,
1020 struct IsdnCardState **cs_out, int *busy_flag,
1021 struct module *lockowner)
1022{
1023 struct IsdnCardState *cs;
1024
1025 *cs_out = NULL;
1026
1027 cs = kzalloc(sizeof(struct IsdnCardState), GFP_ATOMIC);
1028 if (!cs) {
1029 printk(KERN_WARNING
1030 "HiSax: No memory for IsdnCardState(card %d)\n",
1031 cardnr + 1);
1032 goto out;
1033 }
1034 card->cs = cs;
1035 spin_lock_init(&cs->statlock);
1036 spin_lock_init(&cs->lock);
1037 cs->chanlimit = 2; /* maximum B-channel number */
1038 cs->logecho = 0; /* No echo logging */
1039 cs->cardnr = cardnr;
1040 cs->debug = L1_DEB_WARN;
1041 cs->HW_Flags = 0;
1042 cs->busy_flag = busy_flag;
1043 cs->irq_flags = I4L_IRQ_FLAG;
1044#if TEI_PER_CARD
1045 if (card->protocol == ISDN_PTYPE_NI1)
1046 test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
1047#else
1048 test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
1049#endif
1050 cs->protocol = card->protocol;
1051
1052 if (card->typ <= 0 || card->typ > ISDN_CTYPE_COUNT) {
1053 printk(KERN_WARNING
1054 "HiSax: Card Type %d out of range\n", card->typ);
1098 goto outf_cs; 1055 goto outf_cs;
1099 } 1056 }
1100 if (!ret) { 1057 if (!(cs->dlog = kmalloc(MAX_DLOG_SPACE, GFP_ATOMIC))) {
1101 ll_unload(cs); 1058 printk(KERN_WARNING
1059 "HiSax: No memory for dlog(card %d)\n", cardnr + 1);
1102 goto outf_cs; 1060 goto outf_cs;
1103 } 1061 }
1062 if (!(cs->status_buf = kmalloc(HISAX_STATUS_BUFSIZE, GFP_ATOMIC))) {
1063 printk(KERN_WARNING
1064 "HiSax: No memory for status_buf(card %d)\n",
1065 cardnr + 1);
1066 goto outf_dlog;
1067 }
1068 cs->stlist = NULL;
1069 cs->status_read = cs->status_buf;
1070 cs->status_write = cs->status_buf;
1071 cs->status_end = cs->status_buf + HISAX_STATUS_BUFSIZE - 1;
1072 cs->typ = card->typ;
1073#ifdef MODULE
1074 cs->iif.owner = lockowner;
1075#endif
1076 strcpy(cs->iif.id, id);
1077 cs->iif.channels = 2;
1078 cs->iif.maxbufsize = MAX_DATA_SIZE;
1079 cs->iif.hl_hdrlen = MAX_HEADER_LEN;
1080 cs->iif.features =
1081 ISDN_FEATURE_L2_X75I |
1082 ISDN_FEATURE_L2_HDLC |
1083 ISDN_FEATURE_L2_HDLC_56K |
1084 ISDN_FEATURE_L2_TRANS |
1085 ISDN_FEATURE_L3_TRANS |
1086#ifdef CONFIG_HISAX_1TR6
1087 ISDN_FEATURE_P_1TR6 |
1088#endif
1089#ifdef CONFIG_HISAX_EURO
1090 ISDN_FEATURE_P_EURO |
1091#endif
1092#ifdef CONFIG_HISAX_NI1
1093 ISDN_FEATURE_P_NI1 |
1094#endif
1095 0;
1096
1097 cs->iif.command = HiSax_command;
1098 cs->iif.writecmd = NULL;
1099 cs->iif.writebuf_skb = HiSax_writebuf_skb;
1100 cs->iif.readstat = HiSax_readstatus;
1101 register_isdn(&cs->iif);
1102 cs->myid = cs->iif.channels;
1103
1104 *cs_out = cs;
1105 return 1; /* success */
1106
1107outf_dlog:
1108 kfree(cs->dlog);
1109outf_cs:
1110 kfree(cs);
1111 card->cs = NULL;
1112out:
1113 return 0; /* error */
1114}
1115
1116static int hisax_cs_setup(int cardnr, struct IsdnCard *card,
1117 struct IsdnCardState *cs)
1118{
1119 int ret;
1120
1104 if (!(cs->rcvbuf = kmalloc(MAX_DFRAME_LEN_L1, GFP_ATOMIC))) { 1121 if (!(cs->rcvbuf = kmalloc(MAX_DFRAME_LEN_L1, GFP_ATOMIC))) {
1105 printk(KERN_WARNING "HiSax: No memory for isac rcvbuf\n"); 1122 printk(KERN_WARNING "HiSax: No memory for isac rcvbuf\n");
1106 ll_unload(cs); 1123 ll_unload(cs);
@@ -1143,11 +1160,41 @@ static int checkcard(int cardnr, char *id, int *busy_flag, struct module *lockow
1143 if (!test_bit(HW_ISAR, &cs->HW_Flags)) 1160 if (!test_bit(HW_ISAR, &cs->HW_Flags))
1144 ll_run(cs, 0); 1161 ll_run(cs, 0);
1145 1162
1146 ret = 1; 1163 return 1;
1164
1165outf_cs:
1166 kfree(cs);
1167 card->cs = NULL;
1168 return ret;
1169}
1170
1171static int checkcard(int cardnr, char *id, int *busy_flag, struct module *lockowner)
1172{
1173 int ret;
1174 struct IsdnCard *card = cards + cardnr;
1175 struct IsdnCardState *cs;
1176
1177 ret = hisax_cs_new(cardnr, id, card, &cs, busy_flag, lockowner);
1178 if (!ret)
1179 return 0;
1180
1181 printk(KERN_INFO
1182 "HiSax: Card %d Protocol %s Id=%s (%d)\n", cardnr + 1,
1183 (card->protocol == ISDN_PTYPE_1TR6) ? "1TR6" :
1184 (card->protocol == ISDN_PTYPE_EURO) ? "EDSS1" :
1185 (card->protocol == ISDN_PTYPE_LEASED) ? "LEASED" :
1186 (card->protocol == ISDN_PTYPE_NI1) ? "NI1" :
1187 "NONE", cs->iif.id, cs->myid);
1188
1189 ret = hisax_cs_setup_card(card);
1190 if (!ret) {
1191 ll_unload(cs);
1192 goto outf_cs;
1193 }
1194
1195 ret = hisax_cs_setup(cardnr, card, cs);
1147 goto out; 1196 goto out;
1148 1197
1149 outf_dlog:
1150 kfree(cs->dlog);
1151 outf_cs: 1198 outf_cs:
1152 kfree(cs); 1199 kfree(cs);
1153 card->cs = NULL; 1200 card->cs = NULL;
diff --git a/drivers/isdn/hisax/enternow_pci.c b/drivers/isdn/hisax/enternow_pci.c
index b45de9d408d1..b73027ff50e8 100644
--- a/drivers/isdn/hisax/enternow_pci.c
+++ b/drivers/isdn/hisax/enternow_pci.c
@@ -300,98 +300,72 @@ enpci_interrupt(int intno, void *dev_id)
300 return IRQ_HANDLED; 300 return IRQ_HANDLED;
301} 301}
302 302
303 303static int __devinit en_pci_probe(struct pci_dev *dev_netjet,
304static struct pci_dev *dev_netjet __devinitdata = NULL; 304 struct IsdnCardState *cs)
305
306/* called by config.c */
307int __devinit
308setup_enternow_pci(struct IsdnCard *card)
309{ 305{
310 int bytecnt; 306 if (pci_enable_device(dev_netjet))
311 struct IsdnCardState *cs = card->cs;
312 char tmp[64];
313
314#ifdef CONFIG_PCI
315#ifdef __BIG_ENDIAN
316#error "not running on big endian machines now"
317#endif
318 strcpy(tmp, enternow_pci_rev);
319 printk(KERN_INFO "HiSax: Formula-n Europe AG enter:now ISDN PCI driver Rev. %s\n", HiSax_getrev(tmp));
320 if (cs->typ != ISDN_CTYPE_ENTERNOW)
321 return(0); 307 return(0);
322 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); 308 cs->irq = dev_netjet->irq;
323 309 if (!cs->irq) {
324 for ( ;; ) 310 printk(KERN_WARNING "enter:now PCI: No IRQ for PCI card found\n");
325 { 311 return(0);
326 if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET, 312 }
327 PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) { 313 cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
328 if (pci_enable_device(dev_netjet)) 314 if (!cs->hw.njet.base) {
329 return(0); 315 printk(KERN_WARNING "enter:now PCI: No IO-Adr for PCI card found\n");
330 cs->irq = dev_netjet->irq; 316 return(0);
331 if (!cs->irq) { 317 }
332 printk(KERN_WARNING "enter:now PCI: No IRQ for PCI card found\n"); 318 /* checks Sub-Vendor ID because system crashes with Traverse-Card */
333 return(0); 319 if ((dev_netjet->subsystem_vendor != 0x55) ||
334 } 320 (dev_netjet->subsystem_device != 0x02)) {
335 cs->hw.njet.base = pci_resource_start(dev_netjet, 0); 321 printk(KERN_WARNING "enter:now: You tried to load this driver with an incompatible TigerJet-card\n");
336 if (!cs->hw.njet.base) { 322 printk(KERN_WARNING "Use type=20 for Traverse NetJet PCI Card.\n");
337 printk(KERN_WARNING "enter:now PCI: No IO-Adr for PCI card found\n"); 323 return(0);
338 return(0); 324 }
339 }
340 /* checks Sub-Vendor ID because system crashes with Traverse-Card */
341 if ((dev_netjet->subsystem_vendor != 0x55) ||
342 (dev_netjet->subsystem_device != 0x02)) {
343 printk(KERN_WARNING "enter:now: You tried to load this driver with an incompatible TigerJet-card\n");
344 printk(KERN_WARNING "Use type=20 for Traverse NetJet PCI Card.\n");
345 return(0);
346 }
347 } else {
348 printk(KERN_WARNING "enter:now PCI: No PCI card found\n");
349 return(0);
350 }
351
352 cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
353 cs->hw.njet.isac = cs->hw.njet.base + 0xC0; // Fenster zum AMD
354
355 /* Reset an */
356 cs->hw.njet.ctrl_reg = 0x07; // geändert von 0xff
357 outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
358 /* 20 ms Pause */
359 mdelay(20);
360 325
361 cs->hw.njet.ctrl_reg = 0x30; /* Reset Off and status read clear */ 326 return(1);
362 outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL); 327}
363 mdelay(10);
364 328
365 cs->hw.njet.auxd = 0x00; // war 0xc0 329static void __devinit en_cs_init(struct IsdnCard *card,
366 cs->hw.njet.dmactrl = 0; 330 struct IsdnCardState *cs)
331{
332 cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
333 cs->hw.njet.isac = cs->hw.njet.base + 0xC0; // Fenster zum AMD
367 334
368 outb(~TJ_AMD_IRQ, cs->hw.njet.base + NETJET_AUXCTRL); 335 /* Reset an */
369 outb(TJ_AMD_IRQ, cs->hw.njet.base + NETJET_IRQMASK1); 336 cs->hw.njet.ctrl_reg = 0x07; // geändert von 0xff
370 outb(cs->hw.njet.auxd, cs->hw.njet.auxa); 337 outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
338 /* 20 ms Pause */
339 mdelay(20);
371 340
372 break; 341 cs->hw.njet.ctrl_reg = 0x30; /* Reset Off and status read clear */
373 } 342 outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
374#else 343 mdelay(10);
375 344
376 printk(KERN_WARNING "enter:now PCI: NO_PCI_BIOS\n"); 345 cs->hw.njet.auxd = 0x00; // war 0xc0
377 printk(KERN_WARNING "enter:now PCI: unable to config Formula-n enter:now ISDN PCI ab\n"); 346 cs->hw.njet.dmactrl = 0;
378 return (0);
379 347
380#endif /* CONFIG_PCI */ 348 outb(~TJ_AMD_IRQ, cs->hw.njet.base + NETJET_AUXCTRL);
349 outb(TJ_AMD_IRQ, cs->hw.njet.base + NETJET_IRQMASK1);
350 outb(cs->hw.njet.auxd, cs->hw.njet.auxa);
351}
381 352
382 bytecnt = 256; 353static int __devinit en_cs_init_rest(struct IsdnCard *card,
354 struct IsdnCardState *cs)
355{
356 const int bytecnt = 256;
383 357
384 printk(KERN_INFO 358 printk(KERN_INFO
385 "enter:now PCI: PCI card configured at 0x%lx IRQ %d\n", 359 "enter:now PCI: PCI card configured at 0x%lx IRQ %d\n",
386 cs->hw.njet.base, cs->irq); 360 cs->hw.njet.base, cs->irq);
387 if (!request_region(cs->hw.njet.base, bytecnt, "Fn_ISDN")) { 361 if (!request_region(cs->hw.njet.base, bytecnt, "Fn_ISDN")) {
388 printk(KERN_WARNING 362 printk(KERN_WARNING
389 "HiSax: %s config port %lx-%lx already in use\n", 363 "HiSax: enter:now config port %lx-%lx already in use\n",
390 CardType[card->typ], 364 cs->hw.njet.base,
391 cs->hw.njet.base, 365 cs->hw.njet.base + bytecnt);
392 cs->hw.njet.base + bytecnt);
393 return (0); 366 return (0);
394 } 367 }
368
395 setup_Amd7930(cs); 369 setup_Amd7930(cs);
396 cs->hw.njet.last_is0 = 0; 370 cs->hw.njet.last_is0 = 0;
397 /* macro rByteAMD */ 371 /* macro rByteAMD */
@@ -407,5 +381,44 @@ setup_enternow_pci(struct IsdnCard *card)
407 cs->irq_func = &enpci_interrupt; 381 cs->irq_func = &enpci_interrupt;
408 cs->irq_flags |= IRQF_SHARED; 382 cs->irq_flags |= IRQF_SHARED;
409 383
410 return (1); 384 return (1);
385}
386
387static struct pci_dev *dev_netjet __devinitdata = NULL;
388
389/* called by config.c */
390int __devinit
391setup_enternow_pci(struct IsdnCard *card)
392{
393 int ret;
394 struct IsdnCardState *cs = card->cs;
395 char tmp[64];
396
397#ifdef __BIG_ENDIAN
398#error "not running on big endian machines now"
399#endif
400
401 strcpy(tmp, enternow_pci_rev);
402 printk(KERN_INFO "HiSax: Formula-n Europe AG enter:now ISDN PCI driver Rev. %s\n", HiSax_getrev(tmp));
403 if (cs->typ != ISDN_CTYPE_ENTERNOW)
404 return(0);
405 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
406
407 for ( ;; )
408 {
409 if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
410 PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
411 ret = en_pci_probe(dev_netjet, cs);
412 if (!ret)
413 return(0);
414 } else {
415 printk(KERN_WARNING "enter:now PCI: No PCI card found\n");
416 return(0);
417 }
418
419 en_cs_init(card, cs);
420 break;
421 }
422
423 return en_cs_init_rest(card, cs);
411} 424}
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 8a48a3ce0a55..077080aca799 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -6,7 +6,7 @@
6 * based on existing driver for CCD hfc ISA cards 6 * based on existing driver for CCD hfc ISA cards
7 * Copyright by Werner Cornelius <werner@isdn4linux.de> 7 * Copyright by Werner Cornelius <werner@isdn4linux.de>
8 * by Karsten Keil <keil@isdn4linux.de> 8 * by Karsten Keil <keil@isdn4linux.de>
9 * 9 *
10 * This software may be used and distributed according to the terms 10 * This software may be used and distributed according to the terms
11 * of the GNU General Public License, incorporated herein by reference. 11 * of the GNU General Public License, incorporated herein by reference.
12 * 12 *
@@ -67,8 +67,6 @@ static const PCI_ENTRY id_list[] =
67}; 67};
68 68
69 69
70#ifdef CONFIG_PCI
71
72/******************************************/ 70/******************************************/
73/* free hardware resources used by driver */ 71/* free hardware resources used by driver */
74/******************************************/ 72/******************************************/
@@ -237,7 +235,7 @@ static void hfcpci_clear_fifo_rx(struct IsdnCardState *cs, int fifo)
237 if (fifo_state) 235 if (fifo_state)
238 cs->hw.hfcpci.fifo_en |= fifo_state; 236 cs->hw.hfcpci.fifo_en |= fifo_state;
239 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en); 237 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
240} 238}
241 239
242/***************************************/ 240/***************************************/
243/* clear the desired B-channel tx fifo */ 241/* clear the desired B-channel tx fifo */
@@ -263,7 +261,7 @@ static void hfcpci_clear_fifo_tx(struct IsdnCardState *cs, int fifo)
263 if (fifo_state) 261 if (fifo_state)
264 cs->hw.hfcpci.fifo_en |= fifo_state; 262 cs->hw.hfcpci.fifo_en |= fifo_state;
265 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en); 263 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
266} 264}
267 265
268/*********************************************/ 266/*********************************************/
269/* read a complete B-frame out of the buffer */ 267/* read a complete B-frame out of the buffer */
@@ -511,7 +509,6 @@ main_rec_hfcpci(struct BCState *bcs)
511 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); 509 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
512 if (count && receive) 510 if (count && receive)
513 goto Begin; 511 goto Begin;
514 return;
515} 512}
516 513
517/**************************/ 514/**************************/
@@ -582,7 +579,6 @@ hfcpci_fill_dfifo(struct IsdnCardState *cs)
582 579
583 dev_kfree_skb_any(cs->tx_skb); 580 dev_kfree_skb_any(cs->tx_skb);
584 cs->tx_skb = NULL; 581 cs->tx_skb = NULL;
585 return;
586} 582}
587 583
588/**************************/ 584/**************************/
@@ -729,7 +725,6 @@ hfcpci_fill_fifo(struct BCState *bcs)
729 dev_kfree_skb_any(bcs->tx_skb); 725 dev_kfree_skb_any(bcs->tx_skb);
730 bcs->tx_skb = NULL; 726 bcs->tx_skb = NULL;
731 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); 727 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
732 return;
733} 728}
734 729
735/**********************************************/ 730/**********************************************/
@@ -924,7 +919,6 @@ receive_emsg(struct IsdnCardState *cs)
924 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); 919 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
925 if (count && receive) 920 if (count && receive)
926 goto Begin; 921 goto Begin;
927 return;
928} /* receive_emsg */ 922} /* receive_emsg */
929 923
930/*********************/ 924/*********************/
@@ -1350,13 +1344,13 @@ mode_hfcpci(struct BCState *bcs, int mode, int bc)
1350 cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA; 1344 cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
1351 } 1345 }
1352 if (fifo2) { 1346 if (fifo2) {
1353 cs->hw.hfcpci.last_bfifo_cnt[1] = 0; 1347 cs->hw.hfcpci.last_bfifo_cnt[1] = 0;
1354 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2; 1348 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2;
1355 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC); 1349 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1356 cs->hw.hfcpci.ctmt &= ~2; 1350 cs->hw.hfcpci.ctmt &= ~2;
1357 cs->hw.hfcpci.conn &= ~0x18; 1351 cs->hw.hfcpci.conn &= ~0x18;
1358 } else { 1352 } else {
1359 cs->hw.hfcpci.last_bfifo_cnt[0] = 0; 1353 cs->hw.hfcpci.last_bfifo_cnt[0] = 0;
1360 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B1; 1354 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B1;
1361 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC); 1355 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1362 cs->hw.hfcpci.ctmt &= ~1; 1356 cs->hw.hfcpci.ctmt &= ~1;
@@ -1642,8 +1636,6 @@ hfcpci_card_msg(struct IsdnCardState *cs, int mt, void *arg)
1642/* this variable is used as card index when more than one cards are present */ 1636/* this variable is used as card index when more than one cards are present */
1643static struct pci_dev *dev_hfcpci __devinitdata = NULL; 1637static struct pci_dev *dev_hfcpci __devinitdata = NULL;
1644 1638
1645#endif /* CONFIG_PCI */
1646
1647int __devinit 1639int __devinit
1648setup_hfcpci(struct IsdnCard *card) 1640setup_hfcpci(struct IsdnCard *card)
1649{ 1641{
@@ -1656,96 +1648,99 @@ setup_hfcpci(struct IsdnCard *card)
1656#ifdef __BIG_ENDIAN 1648#ifdef __BIG_ENDIAN
1657#error "not running on big endian machines now" 1649#error "not running on big endian machines now"
1658#endif 1650#endif
1651
1659 strcpy(tmp, hfcpci_revision); 1652 strcpy(tmp, hfcpci_revision);
1660 printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp)); 1653 printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp));
1661#ifdef CONFIG_PCI 1654
1662 cs->hw.hfcpci.int_s1 = 0; 1655 cs->hw.hfcpci.int_s1 = 0;
1663 cs->dc.hfcpci.ph_state = 0; 1656 cs->dc.hfcpci.ph_state = 0;
1664 cs->hw.hfcpci.fifo = 255; 1657 cs->hw.hfcpci.fifo = 255;
1665 if (cs->typ == ISDN_CTYPE_HFC_PCI) { 1658 if (cs->typ != ISDN_CTYPE_HFC_PCI)
1666 i = 0; 1659 return(0);
1667 while (id_list[i].vendor_id) { 1660
1668 tmp_hfcpci = pci_find_device(id_list[i].vendor_id, 1661 i = 0;
1669 id_list[i].device_id, 1662 while (id_list[i].vendor_id) {
1670 dev_hfcpci); 1663 tmp_hfcpci = pci_find_device(id_list[i].vendor_id,
1671 i++; 1664 id_list[i].device_id,
1672 if (tmp_hfcpci) { 1665 dev_hfcpci);
1673 if (pci_enable_device(tmp_hfcpci)) 1666 i++;
1674 continue;
1675 pci_set_master(tmp_hfcpci);
1676 if ((card->para[0]) && (card->para[0] != (tmp_hfcpci->resource[ 0].start & PCI_BASE_ADDRESS_IO_MASK)))
1677 continue;
1678 else
1679 break;
1680 }
1681 }
1682
1683 if (tmp_hfcpci) { 1667 if (tmp_hfcpci) {
1684 i--; 1668 if (pci_enable_device(tmp_hfcpci))
1685 dev_hfcpci = tmp_hfcpci; /* old device */ 1669 continue;
1686 cs->hw.hfcpci.dev = dev_hfcpci; 1670 pci_set_master(tmp_hfcpci);
1687 cs->irq = dev_hfcpci->irq; 1671 if ((card->para[0]) && (card->para[0] != (tmp_hfcpci->resource[ 0].start & PCI_BASE_ADDRESS_IO_MASK)))
1688 if (!cs->irq) { 1672 continue;
1689 printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n"); 1673 else
1690 return (0); 1674 break;
1691 }
1692 cs->hw.hfcpci.pci_io = (char *)(unsigned long)dev_hfcpci->resource[1].start;
1693 printk(KERN_INFO "HiSax: HFC-PCI card manufacturer: %s card name: %s\n", id_list[i].vendor_name, id_list[i].card_name);
1694 } else {
1695 printk(KERN_WARNING "HFC-PCI: No PCI card found\n");
1696 return (0);
1697 }
1698 if (!cs->hw.hfcpci.pci_io) {
1699 printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
1700 return (0);
1701 }
1702 /* Allocate memory for FIFOS */
1703 /* Because the HFC-PCI needs a 32K physical alignment, we */
1704 /* need to allocate the double mem and align the address */
1705 if (!(cs->hw.hfcpci.share_start = kmalloc(65536, GFP_KERNEL))) {
1706 printk(KERN_WARNING "HFC-PCI: Error allocating memory for FIFO!\n");
1707 return 0;
1708 } 1675 }
1709 cs->hw.hfcpci.fifos = (void *) 1676 }
1710 (((ulong) cs->hw.hfcpci.share_start) & ~0x7FFF) + 0x8000; 1677
1711 pci_write_config_dword(cs->hw.hfcpci.dev, 0x80, (u_int) virt_to_bus(cs->hw.hfcpci.fifos)); 1678 if (!tmp_hfcpci) {
1712 cs->hw.hfcpci.pci_io = ioremap((ulong) cs->hw.hfcpci.pci_io, 256); 1679 printk(KERN_WARNING "HFC-PCI: No PCI card found\n");
1713 printk(KERN_INFO 1680 return (0);
1714 "HFC-PCI: defined at mem %p fifo %p(%#x) IRQ %d HZ %d\n", 1681 }
1715 cs->hw.hfcpci.pci_io, 1682
1716 cs->hw.hfcpci.fifos, 1683 i--;
1717 (u_int) virt_to_bus(cs->hw.hfcpci.fifos), 1684 dev_hfcpci = tmp_hfcpci; /* old device */
1718 cs->irq, HZ); 1685 cs->hw.hfcpci.dev = dev_hfcpci;
1719 spin_lock_irqsave(&cs->lock, flags); 1686 cs->irq = dev_hfcpci->irq;
1720 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */ 1687 if (!cs->irq) {
1721 cs->hw.hfcpci.int_m2 = 0; /* disable alle interrupts */ 1688 printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
1722 cs->hw.hfcpci.int_m1 = 0; 1689 return (0);
1723 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1); 1690 }
1724 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2); 1691 cs->hw.hfcpci.pci_io = (char *)(unsigned long)dev_hfcpci->resource[1].start;
1725 /* At this point the needed PCI config is done */ 1692 printk(KERN_INFO "HiSax: HFC-PCI card manufacturer: %s card name: %s\n", id_list[i].vendor_name, id_list[i].card_name);
1726 /* fifos are still not enabled */ 1693
1727 INIT_WORK(&cs->tqueue, hfcpci_bh); 1694 if (!cs->hw.hfcpci.pci_io) {
1728 cs->setstack_d = setstack_hfcpci; 1695 printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
1729 cs->BC_Send_Data = &hfcpci_send_data; 1696 return (0);
1730 cs->readisac = NULL; 1697 }
1731 cs->writeisac = NULL; 1698 /* Allocate memory for FIFOS */
1732 cs->readisacfifo = NULL; 1699 /* Because the HFC-PCI needs a 32K physical alignment, we */
1733 cs->writeisacfifo = NULL; 1700 /* need to allocate the double mem and align the address */
1734 cs->BC_Read_Reg = NULL; 1701 if (!(cs->hw.hfcpci.share_start = kmalloc(65536, GFP_KERNEL))) {
1735 cs->BC_Write_Reg = NULL; 1702 printk(KERN_WARNING "HFC-PCI: Error allocating memory for FIFO!\n");
1736 cs->irq_func = &hfcpci_interrupt; 1703 return 0;
1737 cs->irq_flags |= IRQF_SHARED; 1704 }
1738 cs->hw.hfcpci.timer.function = (void *) hfcpci_Timer; 1705 cs->hw.hfcpci.fifos = (void *)
1739 cs->hw.hfcpci.timer.data = (long) cs; 1706 (((ulong) cs->hw.hfcpci.share_start) & ~0x7FFF) + 0x8000;
1740 init_timer(&cs->hw.hfcpci.timer); 1707 pci_write_config_dword(cs->hw.hfcpci.dev, 0x80, (u_int) virt_to_bus(cs->hw.hfcpci.fifos));
1741 cs->cardmsg = &hfcpci_card_msg; 1708 cs->hw.hfcpci.pci_io = ioremap((ulong) cs->hw.hfcpci.pci_io, 256);
1742 cs->auxcmd = &hfcpci_auxcmd; 1709 printk(KERN_INFO
1743 spin_unlock_irqrestore(&cs->lock, flags); 1710 "HFC-PCI: defined at mem %p fifo %p(%#x) IRQ %d HZ %d\n",
1744 return (1); 1711 cs->hw.hfcpci.pci_io,
1745 } else 1712 cs->hw.hfcpci.fifos,
1746 return (0); /* no valid card type */ 1713 (u_int) virt_to_bus(cs->hw.hfcpci.fifos),
1747#else 1714 cs->irq, HZ);
1748 printk(KERN_WARNING "HFC-PCI: NO_PCI_BIOS\n"); 1715
1749 return (0); 1716 spin_lock_irqsave(&cs->lock, flags);
1750#endif /* CONFIG_PCI */ 1717
1718 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */
1719 cs->hw.hfcpci.int_m2 = 0; /* disable alle interrupts */
1720 cs->hw.hfcpci.int_m1 = 0;
1721 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1722 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
1723 /* At this point the needed PCI config is done */
1724 /* fifos are still not enabled */
1725
1726 INIT_WORK(&cs->tqueue, hfcpci_bh);
1727 cs->setstack_d = setstack_hfcpci;
1728 cs->BC_Send_Data = &hfcpci_send_data;
1729 cs->readisac = NULL;
1730 cs->writeisac = NULL;
1731 cs->readisacfifo = NULL;
1732 cs->writeisacfifo = NULL;
1733 cs->BC_Read_Reg = NULL;
1734 cs->BC_Write_Reg = NULL;
1735 cs->irq_func = &hfcpci_interrupt;
1736 cs->irq_flags |= IRQF_SHARED;
1737 cs->hw.hfcpci.timer.function = (void *) hfcpci_Timer;
1738 cs->hw.hfcpci.timer.data = (long) cs;
1739 init_timer(&cs->hw.hfcpci.timer);
1740 cs->cardmsg = &hfcpci_card_msg;
1741 cs->auxcmd = &hfcpci_auxcmd;
1742
1743 spin_unlock_irqrestore(&cs->lock, flags);
1744
1745 return (1);
1751} 1746}
diff --git a/drivers/isdn/hisax/nj_s.c b/drivers/isdn/hisax/nj_s.c
index c09ffb135330..fa2db87667c8 100644
--- a/drivers/isdn/hisax/nj_s.c
+++ b/drivers/isdn/hisax/nj_s.c
@@ -148,107 +148,87 @@ NETjet_S_card_msg(struct IsdnCardState *cs, int mt, void *arg)
148 return(0); 148 return(0);
149} 149}
150 150
151static struct pci_dev *dev_netjet __devinitdata = NULL; 151static int __devinit njs_pci_probe(struct pci_dev *dev_netjet,
152 152 struct IsdnCardState *cs)
153int __devinit
154setup_netjet_s(struct IsdnCard *card)
155{ 153{
156 int bytecnt,cfg; 154 int cfg;
157 struct IsdnCardState *cs = card->cs;
158 char tmp[64];
159 155
160#ifdef __BIG_ENDIAN 156 if (pci_enable_device(dev_netjet))
161#error "not running on big endian machines now"
162#endif
163 strcpy(tmp, NETjet_S_revision);
164 printk(KERN_INFO "HiSax: Traverse Tech. NETjet-S driver Rev. %s\n", HiSax_getrev(tmp));
165 if (cs->typ != ISDN_CTYPE_NETJET_S)
166 return(0); 157 return(0);
167 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); 158 pci_set_master(dev_netjet);
159 cs->irq = dev_netjet->irq;
160 if (!cs->irq) {
161 printk(KERN_WARNING "NETjet-S: No IRQ for PCI card found\n");
162 return(0);
163 }
164 cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
165 if (!cs->hw.njet.base) {
166 printk(KERN_WARNING "NETjet-S: No IO-Adr for PCI card found\n");
167 return(0);
168 }
169 /* the TJ300 and TJ320 must be detected, the IRQ handling is different
170 * unfortunatly the chips use the same device ID, but the TJ320 has
171 * the bit20 in status PCI cfg register set
172 */
173 pci_read_config_dword(dev_netjet, 0x04, &cfg);
174 if (cfg & 0x00100000)
175 cs->subtyp = 1; /* TJ320 */
176 else
177 cs->subtyp = 0; /* TJ300 */
178 /* 2001/10/04 Christoph Ersfeld, Formula-n Europe AG www.formula-n.com */
179 if ((dev_netjet->subsystem_vendor == 0x55) &&
180 (dev_netjet->subsystem_device == 0x02)) {
181 printk(KERN_WARNING "Netjet: You tried to load this driver with an incompatible TigerJet-card\n");
182 printk(KERN_WARNING "Use type=41 for Formula-n enter:now ISDN PCI and compatible\n");
183 return(0);
184 }
185 /* end new code */
168 186
169#ifdef CONFIG_PCI 187 return(1);
188}
170 189
171 for ( ;; ) 190static int __devinit njs_cs_init(struct IsdnCard *card,
172 { 191 struct IsdnCardState *cs)
173 if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET, 192{
174 PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
175 if (pci_enable_device(dev_netjet))
176 return(0);
177 pci_set_master(dev_netjet);
178 cs->irq = dev_netjet->irq;
179 if (!cs->irq) {
180 printk(KERN_WARNING "NETjet-S: No IRQ for PCI card found\n");
181 return(0);
182 }
183 cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
184 if (!cs->hw.njet.base) {
185 printk(KERN_WARNING "NETjet-S: No IO-Adr for PCI card found\n");
186 return(0);
187 }
188 /* the TJ300 and TJ320 must be detected, the IRQ handling is different
189 * unfortunatly the chips use the same device ID, but the TJ320 has
190 * the bit20 in status PCI cfg register set
191 */
192 pci_read_config_dword(dev_netjet, 0x04, &cfg);
193 if (cfg & 0x00100000)
194 cs->subtyp = 1; /* TJ320 */
195 else
196 cs->subtyp = 0; /* TJ300 */
197 /* 2001/10/04 Christoph Ersfeld, Formula-n Europe AG www.formula-n.com */
198 if ((dev_netjet->subsystem_vendor == 0x55) &&
199 (dev_netjet->subsystem_device == 0x02)) {
200 printk(KERN_WARNING "Netjet: You tried to load this driver with an incompatible TigerJet-card\n");
201 printk(KERN_WARNING "Use type=41 for Formula-n enter:now ISDN PCI and compatible\n");
202 return(0);
203 }
204 /* end new code */
205 } else {
206 printk(KERN_WARNING "NETjet-S: No PCI card found\n");
207 return(0);
208 }
209 193
210 cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA; 194 cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
211 cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF; 195 cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF;
212 196
213 cs->hw.njet.ctrl_reg = 0xff; /* Reset On */ 197 cs->hw.njet.ctrl_reg = 0xff; /* Reset On */
214 byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg); 198 byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
215 mdelay(10); 199 mdelay(10);
216 200
217 cs->hw.njet.ctrl_reg = 0x00; /* Reset Off and status read clear */ 201 cs->hw.njet.ctrl_reg = 0x00; /* Reset Off and status read clear */
218 byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg); 202 byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
219 mdelay(10); 203 mdelay(10);
220 204
221 cs->hw.njet.auxd = 0xC0; 205 cs->hw.njet.auxd = 0xC0;
222 cs->hw.njet.dmactrl = 0; 206 cs->hw.njet.dmactrl = 0;
223 207
224 byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ); 208 byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ);
225 byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ); 209 byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ);
226 byteout(cs->hw.njet.auxa, cs->hw.njet.auxd); 210 byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
227 211
228 switch ( ( ( NETjet_ReadIC( cs, ISAC_RBCH ) >> 5 ) & 3 ) ) 212 switch ( ( ( NETjet_ReadIC( cs, ISAC_RBCH ) >> 5 ) & 3 ) )
229 { 213 {
230 case 0 : 214 case 0 :
231 break; 215 return 1; /* end loop */
232 216
233 case 3 : 217 case 3 :
234 printk( KERN_WARNING "NETjet-S: NETspider-U PCI card found\n" ); 218 printk( KERN_WARNING "NETjet-S: NETspider-U PCI card found\n" );
235 continue; 219 return -1; /* continue looping */
236 220
237 default : 221 default :
238 printk( KERN_WARNING "NETjet-S: No PCI card found\n" ); 222 printk( KERN_WARNING "NETjet-S: No PCI card found\n" );
239 return 0; 223 return 0; /* end loop & function */
240 }
241 break;
242 } 224 }
243#else 225 return 1; /* end loop */
244 226}
245 printk(KERN_WARNING "NETjet-S: NO_PCI_BIOS\n");
246 printk(KERN_WARNING "NETjet-S: unable to config NETJET-S PCI\n");
247 return (0);
248
249#endif /* CONFIG_PCI */
250 227
251 bytecnt = 256; 228static int __devinit njs_cs_init_rest(struct IsdnCard *card,
229 struct IsdnCardState *cs)
230{
231 const int bytecnt = 256;
252 232
253 printk(KERN_INFO 233 printk(KERN_INFO
254 "NETjet-S: %s card configured at %#lx IRQ %d\n", 234 "NETjet-S: %s card configured at %#lx IRQ %d\n",
@@ -273,5 +253,47 @@ setup_netjet_s(struct IsdnCard *card)
273 cs->irq_func = &netjet_s_interrupt; 253 cs->irq_func = &netjet_s_interrupt;
274 cs->irq_flags |= IRQF_SHARED; 254 cs->irq_flags |= IRQF_SHARED;
275 ISACVersion(cs, "NETjet-S:"); 255 ISACVersion(cs, "NETjet-S:");
256
276 return (1); 257 return (1);
277} 258}
259
260static struct pci_dev *dev_netjet __devinitdata = NULL;
261
262int __devinit
263setup_netjet_s(struct IsdnCard *card)
264{
265 int ret;
266 struct IsdnCardState *cs = card->cs;
267 char tmp[64];
268
269#ifdef __BIG_ENDIAN
270#error "not running on big endian machines now"
271#endif
272 strcpy(tmp, NETjet_S_revision);
273 printk(KERN_INFO "HiSax: Traverse Tech. NETjet-S driver Rev. %s\n", HiSax_getrev(tmp));
274 if (cs->typ != ISDN_CTYPE_NETJET_S)
275 return(0);
276 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
277
278 for ( ;; )
279 {
280 if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
281 PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
282 ret = njs_pci_probe(dev_netjet, cs);
283 if (!ret)
284 return(0);
285 } else {
286 printk(KERN_WARNING "NETjet-S: No PCI card found\n");
287 return(0);
288 }
289
290 ret = njs_cs_init(card, cs);
291 if (!ret)
292 return(0);
293 if (ret > 0)
294 break;
295 /* otherwise, ret < 0, continue looping */
296 }
297
298 return njs_cs_init_rest(card, cs);
299}
diff --git a/drivers/isdn/hisax/nj_u.c b/drivers/isdn/hisax/nj_u.c
index 8202cf34ecae..f017d3816b1d 100644
--- a/drivers/isdn/hisax/nj_u.c
+++ b/drivers/isdn/hisax/nj_u.c
@@ -128,93 +128,69 @@ NETjet_U_card_msg(struct IsdnCardState *cs, int mt, void *arg)
128 return(0); 128 return(0);
129} 129}
130 130
131static struct pci_dev *dev_netjet __devinitdata = NULL; 131static int __devinit nju_pci_probe(struct pci_dev *dev_netjet,
132 132 struct IsdnCardState *cs)
133int __devinit
134setup_netjet_u(struct IsdnCard *card)
135{ 133{
136 int bytecnt; 134 if (pci_enable_device(dev_netjet))
137 struct IsdnCardState *cs = card->cs;
138 char tmp[64];
139#ifdef CONFIG_PCI
140#endif
141#ifdef __BIG_ENDIAN
142#error "not running on big endian machines now"
143#endif
144 strcpy(tmp, NETjet_U_revision);
145 printk(KERN_INFO "HiSax: Traverse Tech. NETspider-U driver Rev. %s\n", HiSax_getrev(tmp));
146 if (cs->typ != ISDN_CTYPE_NETJET_U)
147 return(0); 135 return(0);
148 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); 136 pci_set_master(dev_netjet);
149 137 cs->irq = dev_netjet->irq;
150#ifdef CONFIG_PCI 138 if (!cs->irq) {
139 printk(KERN_WARNING "NETspider-U: No IRQ for PCI card found\n");
140 return(0);
141 }
142 cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
143 if (!cs->hw.njet.base) {
144 printk(KERN_WARNING "NETspider-U: No IO-Adr for PCI card found\n");
145 return(0);
146 }
151 147
152 for ( ;; ) 148 return (1);
153 { 149}
154 if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
155 PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
156 if (pci_enable_device(dev_netjet))
157 return(0);
158 pci_set_master(dev_netjet);
159 cs->irq = dev_netjet->irq;
160 if (!cs->irq) {
161 printk(KERN_WARNING "NETspider-U: No IRQ for PCI card found\n");
162 return(0);
163 }
164 cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
165 if (!cs->hw.njet.base) {
166 printk(KERN_WARNING "NETspider-U: No IO-Adr for PCI card found\n");
167 return(0);
168 }
169 } else {
170 printk(KERN_WARNING "NETspider-U: No PCI card found\n");
171 return(0);
172 }
173 150
174 cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA; 151static int __devinit nju_cs_init(struct IsdnCard *card,
175 cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF; 152 struct IsdnCardState *cs)
176 mdelay(10); 153{
154 cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
155 cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF;
156 mdelay(10);
177 157
178 cs->hw.njet.ctrl_reg = 0xff; /* Reset On */ 158 cs->hw.njet.ctrl_reg = 0xff; /* Reset On */
179 byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg); 159 byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
180 mdelay(10); 160 mdelay(10);
181 161
182 cs->hw.njet.ctrl_reg = 0x00; /* Reset Off and status read clear */ 162 cs->hw.njet.ctrl_reg = 0x00; /* Reset Off and status read clear */
183 byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg); 163 byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
184 mdelay(10); 164 mdelay(10);
185 165
186 cs->hw.njet.auxd = 0xC0; 166 cs->hw.njet.auxd = 0xC0;
187 cs->hw.njet.dmactrl = 0; 167 cs->hw.njet.dmactrl = 0;
188 168
189 byteout(cs->hw.njet.auxa, 0); 169 byteout(cs->hw.njet.auxa, 0);
190 byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ); 170 byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ);
191 byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ); 171 byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ);
192 byteout(cs->hw.njet.auxa, cs->hw.njet.auxd); 172 byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
193 173
194 switch ( ( ( NETjet_ReadIC( cs, ICC_RBCH ) >> 5 ) & 3 ) ) 174 switch ( ( ( NETjet_ReadIC( cs, ICC_RBCH ) >> 5 ) & 3 ) )
195 { 175 {
196 case 3 : 176 case 3 :
197 break; 177 return 1; /* end loop */
198 178
199 case 0 : 179 case 0 :
200 printk( KERN_WARNING "NETspider-U: NETjet-S PCI card found\n" ); 180 printk( KERN_WARNING "NETspider-U: NETjet-S PCI card found\n" );
201 continue; 181 return -1; /* continue looping */
202 182
203 default : 183 default :
204 printk( KERN_WARNING "NETspider-U: No PCI card found\n" ); 184 printk( KERN_WARNING "NETspider-U: No PCI card found\n" );
205 return 0; 185 return 0; /* end loop & function */
206 }
207 break;
208 } 186 }
209#else 187 return 1; /* end loop */
210 188}
211 printk(KERN_WARNING "NETspider-U: NO_PCI_BIOS\n");
212 printk(KERN_WARNING "NETspider-U: unable to config NETspider-U PCI\n");
213 return (0);
214
215#endif /* CONFIG_PCI */
216 189
217 bytecnt = 256; 190static int __devinit nju_cs_init_rest(struct IsdnCard *card,
191 struct IsdnCardState *cs)
192{
193 const int bytecnt = 256;
218 194
219 printk(KERN_INFO 195 printk(KERN_INFO
220 "NETspider-U: PCI card configured at %#lx IRQ %d\n", 196 "NETspider-U: PCI card configured at %#lx IRQ %d\n",
@@ -239,5 +215,48 @@ setup_netjet_u(struct IsdnCard *card)
239 cs->irq_func = &netjet_u_interrupt; 215 cs->irq_func = &netjet_u_interrupt;
240 cs->irq_flags |= IRQF_SHARED; 216 cs->irq_flags |= IRQF_SHARED;
241 ICCVersion(cs, "NETspider-U:"); 217 ICCVersion(cs, "NETspider-U:");
218
242 return (1); 219 return (1);
243} 220}
221
222static struct pci_dev *dev_netjet __devinitdata = NULL;
223
224int __devinit
225setup_netjet_u(struct IsdnCard *card)
226{
227 int ret;
228 struct IsdnCardState *cs = card->cs;
229 char tmp[64];
230
231#ifdef __BIG_ENDIAN
232#error "not running on big endian machines now"
233#endif
234
235 strcpy(tmp, NETjet_U_revision);
236 printk(KERN_INFO "HiSax: Traverse Tech. NETspider-U driver Rev. %s\n", HiSax_getrev(tmp));
237 if (cs->typ != ISDN_CTYPE_NETJET_U)
238 return(0);
239 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
240
241 for ( ;; )
242 {
243 if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
244 PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
245 ret = nju_pci_probe(dev_netjet, cs);
246 if (!ret)
247 return(0);
248 } else {
249 printk(KERN_WARNING "NETspider-U: No PCI card found\n");
250 return(0);
251 }
252
253 ret = nju_cs_init(card, cs);
254 if (!ret)
255 return (0);
256 if (ret > 0)
257 break;
258 /* ret < 0 == continue looping */
259 }
260
261 return nju_cs_init_rest(card, cs);
262}
diff --git a/drivers/isdn/hisax/sedlbauer.c b/drivers/isdn/hisax/sedlbauer.c
index 030d1625c5c6..ad06f3cc60fb 100644
--- a/drivers/isdn/hisax/sedlbauer.c
+++ b/drivers/isdn/hisax/sedlbauer.c
@@ -451,6 +451,9 @@ Sedl_card_msg(struct IsdnCardState *cs, int mt, void *arg)
451 spin_unlock_irqrestore(&cs->lock, flags); 451 spin_unlock_irqrestore(&cs->lock, flags);
452 return(0); 452 return(0);
453 case CARD_RELEASE: 453 case CARD_RELEASE:
454 if (cs->hw.sedl.bus == SEDL_BUS_PCI)
455 /* disable all IRQ */
456 byteout(cs->hw.sedl.cfg_reg+ 5, 0);
454 if (cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) { 457 if (cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) {
455 spin_lock_irqsave(&cs->lock, flags); 458 spin_lock_irqsave(&cs->lock, flags);
456 writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx, 459 writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx,
@@ -468,6 +471,9 @@ Sedl_card_msg(struct IsdnCardState *cs, int mt, void *arg)
468 return(0); 471 return(0);
469 case CARD_INIT: 472 case CARD_INIT:
470 spin_lock_irqsave(&cs->lock, flags); 473 spin_lock_irqsave(&cs->lock, flags);
474 if (cs->hw.sedl.bus == SEDL_BUS_PCI)
475 /* enable all IRQ */
476 byteout(cs->hw.sedl.cfg_reg+ 5, 0x02);
471 reset_sedlbauer(cs); 477 reset_sedlbauer(cs);
472 if (cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) { 478 if (cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) {
473 clear_pending_isac_ints(cs); 479 clear_pending_isac_ints(cs);
@@ -667,7 +673,7 @@ setup_sedlbauer(struct IsdnCard *card)
667 byteout(cs->hw.sedl.cfg_reg, 0xff); 673 byteout(cs->hw.sedl.cfg_reg, 0xff);
668 byteout(cs->hw.sedl.cfg_reg, 0x00); 674 byteout(cs->hw.sedl.cfg_reg, 0x00);
669 byteout(cs->hw.sedl.cfg_reg+ 2, 0xdd); 675 byteout(cs->hw.sedl.cfg_reg+ 2, 0xdd);
670 byteout(cs->hw.sedl.cfg_reg+ 5, 0x02); 676 byteout(cs->hw.sedl.cfg_reg+ 5, 0); /* disable all IRQ */
671 byteout(cs->hw.sedl.cfg_reg +3, cs->hw.sedl.reset_on); 677 byteout(cs->hw.sedl.cfg_reg +3, cs->hw.sedl.reset_on);
672 mdelay(2); 678 mdelay(2);
673 byteout(cs->hw.sedl.cfg_reg +3, cs->hw.sedl.reset_off); 679 byteout(cs->hw.sedl.cfg_reg +3, cs->hw.sedl.reset_off);
diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig
index 3ef567b99c74..e91c187992dd 100644
--- a/drivers/isdn/i4l/Kconfig
+++ b/drivers/isdn/i4l/Kconfig
@@ -86,7 +86,6 @@ config ISDN_X25
86 86
87 87
88menu "ISDN feature submodules" 88menu "ISDN feature submodules"
89 depends on ISDN
90 89
91config ISDN_DRV_LOOP 90config ISDN_DRV_LOOP
92 tristate "isdnloop support" 91 tristate "isdnloop support"
@@ -100,7 +99,7 @@ config ISDN_DRV_LOOP
100 99
101config ISDN_DIVERSION 100config ISDN_DIVERSION
102 tristate "Support isdn diversion services" 101 tristate "Support isdn diversion services"
103 depends on ISDN && ISDN_I4L 102 depends on ISDN_I4L
104 help 103 help
105 This option allows you to use some supplementary diversion 104 This option allows you to use some supplementary diversion
106 services in conjunction with the HiSax driver on an EURO/DSS1 105 services in conjunction with the HiSax driver on an EURO/DSS1
@@ -120,13 +119,13 @@ config ISDN_DIVERSION
120endmenu 119endmenu
121 120
122comment "ISDN4Linux hardware drivers" 121comment "ISDN4Linux hardware drivers"
123 depends on NET && ISDN && ISDN_I4L 122 depends on ISDN_I4L
124 123
125source "drivers/isdn/hisax/Kconfig" 124source "drivers/isdn/hisax/Kconfig"
126 125
127 126
128menu "Active cards" 127menu "Active cards"
129 depends on NET && ISDN && ISDN_I4L!=n 128 depends on ISDN_I4L!=n
130 129
131source "drivers/isdn/icn/Kconfig" 130source "drivers/isdn/icn/Kconfig"
132 131
diff --git a/drivers/kvm/Kconfig b/drivers/kvm/Kconfig
index e8e37d826478..33fa28a8c199 100644
--- a/drivers/kvm/Kconfig
+++ b/drivers/kvm/Kconfig
@@ -1,12 +1,17 @@
1# 1#
2# KVM configuration 2# KVM configuration
3# 3#
4menu "Virtualization" 4menuconfig VIRTUALIZATION
5 bool "Virtualization"
5 depends on X86 6 depends on X86
7 default y
8
9if VIRTUALIZATION
6 10
7config KVM 11config KVM
8 tristate "Kernel-based Virtual Machine (KVM) support" 12 tristate "Kernel-based Virtual Machine (KVM) support"
9 depends on X86 && EXPERIMENTAL 13 depends on X86 && EXPERIMENTAL
14 depends on X86_CMPXCHG64 || 64BIT
10 ---help--- 15 ---help---
11 Support hosting fully virtualized guest machines using hardware 16 Support hosting fully virtualized guest machines using hardware
12 virtualization extensions. You will need a fairly recent 17 virtualization extensions. You will need a fairly recent
@@ -35,4 +40,4 @@ config KVM_AMD
35 Provides support for KVM on AMD processors equipped with the AMD-V 40 Provides support for KVM on AMD processors equipped with the AMD-V
36 (SVM) extensions. 41 (SVM) extensions.
37 42
38endmenu 43endif # VIRTUALIZATION
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 152312c1fafa..a7c5e6bee034 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -10,6 +10,8 @@
10#include <linux/list.h> 10#include <linux/list.h>
11#include <linux/mutex.h> 11#include <linux/mutex.h>
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <linux/signal.h>
14#include <linux/sched.h>
13#include <linux/mm.h> 15#include <linux/mm.h>
14#include <asm/signal.h> 16#include <asm/signal.h>
15 17
@@ -18,6 +20,7 @@
18#include <linux/kvm_para.h> 20#include <linux/kvm_para.h>
19 21
20#define CR0_PE_MASK (1ULL << 0) 22#define CR0_PE_MASK (1ULL << 0)
23#define CR0_MP_MASK (1ULL << 1)
21#define CR0_TS_MASK (1ULL << 3) 24#define CR0_TS_MASK (1ULL << 3)
22#define CR0_NE_MASK (1ULL << 5) 25#define CR0_NE_MASK (1ULL << 5)
23#define CR0_WP_MASK (1ULL << 16) 26#define CR0_WP_MASK (1ULL << 16)
@@ -42,7 +45,8 @@
42 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK \ 45 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK \
43 | CR0_NW_MASK | CR0_CD_MASK) 46 | CR0_NW_MASK | CR0_CD_MASK)
44#define KVM_VM_CR0_ALWAYS_ON \ 47#define KVM_VM_CR0_ALWAYS_ON \
45 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK) 48 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK | CR0_TS_MASK \
49 | CR0_MP_MASK)
46#define KVM_GUEST_CR4_MASK \ 50#define KVM_GUEST_CR4_MASK \
47 (CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK) 51 (CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK)
48#define KVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK) 52#define KVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK)
@@ -51,10 +55,10 @@
51#define INVALID_PAGE (~(hpa_t)0) 55#define INVALID_PAGE (~(hpa_t)0)
52#define UNMAPPED_GVA (~(gpa_t)0) 56#define UNMAPPED_GVA (~(gpa_t)0)
53 57
54#define KVM_MAX_VCPUS 1 58#define KVM_MAX_VCPUS 4
55#define KVM_ALIAS_SLOTS 4 59#define KVM_ALIAS_SLOTS 4
56#define KVM_MEMORY_SLOTS 4 60#define KVM_MEMORY_SLOTS 4
57#define KVM_NUM_MMU_PAGES 256 61#define KVM_NUM_MMU_PAGES 1024
58#define KVM_MIN_FREE_MMU_PAGES 5 62#define KVM_MIN_FREE_MMU_PAGES 5
59#define KVM_REFILL_PAGES 25 63#define KVM_REFILL_PAGES 25
60#define KVM_MAX_CPUID_ENTRIES 40 64#define KVM_MAX_CPUID_ENTRIES 40
@@ -80,6 +84,11 @@
80#define KVM_PIO_PAGE_OFFSET 1 84#define KVM_PIO_PAGE_OFFSET 1
81 85
82/* 86/*
87 * vcpu->requests bit members
88 */
89#define KVM_TLB_FLUSH 0
90
91/*
83 * Address types: 92 * Address types:
84 * 93 *
85 * gva - guest virtual address 94 * gva - guest virtual address
@@ -137,7 +146,7 @@ struct kvm_mmu_page {
137 gfn_t gfn; 146 gfn_t gfn;
138 union kvm_mmu_page_role role; 147 union kvm_mmu_page_role role;
139 148
140 hpa_t page_hpa; 149 u64 *spt;
141 unsigned long slot_bitmap; /* One bit set per slot which has memory 150 unsigned long slot_bitmap; /* One bit set per slot which has memory
142 * in this shadow page. 151 * in this shadow page.
143 */ 152 */
@@ -232,6 +241,7 @@ struct kvm_pio_request {
232 struct page *guest_pages[2]; 241 struct page *guest_pages[2];
233 unsigned guest_page_offset; 242 unsigned guest_page_offset;
234 int in; 243 int in;
244 int port;
235 int size; 245 int size;
236 int string; 246 int string;
237 int down; 247 int down;
@@ -252,8 +262,70 @@ struct kvm_stat {
252 u32 halt_exits; 262 u32 halt_exits;
253 u32 request_irq_exits; 263 u32 request_irq_exits;
254 u32 irq_exits; 264 u32 irq_exits;
265 u32 light_exits;
266 u32 efer_reload;
267};
268
269struct kvm_io_device {
270 void (*read)(struct kvm_io_device *this,
271 gpa_t addr,
272 int len,
273 void *val);
274 void (*write)(struct kvm_io_device *this,
275 gpa_t addr,
276 int len,
277 const void *val);
278 int (*in_range)(struct kvm_io_device *this, gpa_t addr);
279 void (*destructor)(struct kvm_io_device *this);
280
281 void *private;
282};
283
284static inline void kvm_iodevice_read(struct kvm_io_device *dev,
285 gpa_t addr,
286 int len,
287 void *val)
288{
289 dev->read(dev, addr, len, val);
290}
291
292static inline void kvm_iodevice_write(struct kvm_io_device *dev,
293 gpa_t addr,
294 int len,
295 const void *val)
296{
297 dev->write(dev, addr, len, val);
298}
299
300static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr)
301{
302 return dev->in_range(dev, addr);
303}
304
305static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
306{
307 if (dev->destructor)
308 dev->destructor(dev);
309}
310
311/*
312 * It would be nice to use something smarter than a linear search, TBD...
313 * Thankfully we dont expect many devices to register (famous last words :),
314 * so until then it will suffice. At least its abstracted so we can change
315 * in one place.
316 */
317struct kvm_io_bus {
318 int dev_count;
319#define NR_IOBUS_DEVS 6
320 struct kvm_io_device *devs[NR_IOBUS_DEVS];
255}; 321};
256 322
323void kvm_io_bus_init(struct kvm_io_bus *bus);
324void kvm_io_bus_destroy(struct kvm_io_bus *bus);
325struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr);
326void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
327 struct kvm_io_device *dev);
328
257struct kvm_vcpu { 329struct kvm_vcpu {
258 struct kvm *kvm; 330 struct kvm *kvm;
259 union { 331 union {
@@ -266,6 +338,8 @@ struct kvm_vcpu {
266 u64 host_tsc; 338 u64 host_tsc;
267 struct kvm_run *run; 339 struct kvm_run *run;
268 int interrupt_window_open; 340 int interrupt_window_open;
341 int guest_mode;
342 unsigned long requests;
269 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ 343 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
270#define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long) 344#define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long)
271 unsigned long irq_pending[NR_IRQ_WORDS]; 345 unsigned long irq_pending[NR_IRQ_WORDS];
@@ -285,15 +359,20 @@ struct kvm_vcpu {
285 u64 apic_base; 359 u64 apic_base;
286 u64 ia32_misc_enable_msr; 360 u64 ia32_misc_enable_msr;
287 int nmsrs; 361 int nmsrs;
362 int save_nmsrs;
363 int msr_offset_efer;
364#ifdef CONFIG_X86_64
365 int msr_offset_kernel_gs_base;
366#endif
288 struct vmx_msr_entry *guest_msrs; 367 struct vmx_msr_entry *guest_msrs;
289 struct vmx_msr_entry *host_msrs; 368 struct vmx_msr_entry *host_msrs;
290 369
291 struct list_head free_pages;
292 struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES];
293 struct kvm_mmu mmu; 370 struct kvm_mmu mmu;
294 371
295 struct kvm_mmu_memory_cache mmu_pte_chain_cache; 372 struct kvm_mmu_memory_cache mmu_pte_chain_cache;
296 struct kvm_mmu_memory_cache mmu_rmap_desc_cache; 373 struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
374 struct kvm_mmu_memory_cache mmu_page_cache;
375 struct kvm_mmu_memory_cache mmu_page_header_cache;
297 376
298 gfn_t last_pt_write_gfn; 377 gfn_t last_pt_write_gfn;
299 int last_pt_write_count; 378 int last_pt_write_count;
@@ -305,6 +384,11 @@ struct kvm_vcpu {
305 char *guest_fx_image; 384 char *guest_fx_image;
306 int fpu_active; 385 int fpu_active;
307 int guest_fpu_loaded; 386 int guest_fpu_loaded;
387 struct vmx_host_state {
388 int loaded;
389 u16 fs_sel, gs_sel, ldt_sel;
390 int fs_gs_ldt_reload_needed;
391 } vmx_host_state;
308 392
309 int mmio_needed; 393 int mmio_needed;
310 int mmio_read_completed; 394 int mmio_read_completed;
@@ -331,6 +415,7 @@ struct kvm_vcpu {
331 u32 ar; 415 u32 ar;
332 } tr, es, ds, fs, gs; 416 } tr, es, ds, fs, gs;
333 } rmode; 417 } rmode;
418 int halt_request; /* real mode on Intel only */
334 419
335 int cpuid_nent; 420 int cpuid_nent;
336 struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES]; 421 struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES];
@@ -362,12 +447,15 @@ struct kvm {
362 struct list_head active_mmu_pages; 447 struct list_head active_mmu_pages;
363 int n_free_mmu_pages; 448 int n_free_mmu_pages;
364 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; 449 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
450 int nvcpus;
365 struct kvm_vcpu vcpus[KVM_MAX_VCPUS]; 451 struct kvm_vcpu vcpus[KVM_MAX_VCPUS];
366 int memory_config_version; 452 int memory_config_version;
367 int busy; 453 int busy;
368 unsigned long rmap_overflow; 454 unsigned long rmap_overflow;
369 struct list_head vm_list; 455 struct list_head vm_list;
370 struct file *filp; 456 struct file *filp;
457 struct kvm_io_bus mmio_bus;
458 struct kvm_io_bus pio_bus;
371}; 459};
372 460
373struct descriptor_table { 461struct descriptor_table {
@@ -488,6 +576,7 @@ int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
488 int size, unsigned long count, int string, int down, 576 int size, unsigned long count, int string, int down,
489 gva_t address, int rep, unsigned port); 577 gva_t address, int rep, unsigned port);
490void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); 578void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
579int kvm_emulate_halt(struct kvm_vcpu *vcpu);
491int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address); 580int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
492int emulate_clts(struct kvm_vcpu *vcpu); 581int emulate_clts(struct kvm_vcpu *vcpu);
493int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, 582int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr,
@@ -511,6 +600,7 @@ void save_msrs(struct vmx_msr_entry *e, int n);
511void kvm_resched(struct kvm_vcpu *vcpu); 600void kvm_resched(struct kvm_vcpu *vcpu);
512void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); 601void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
513void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); 602void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
603void kvm_flush_remote_tlbs(struct kvm *kvm);
514 604
515int kvm_read_guest(struct kvm_vcpu *vcpu, 605int kvm_read_guest(struct kvm_vcpu *vcpu,
516 gva_t addr, 606 gva_t addr,
@@ -524,10 +614,12 @@ int kvm_write_guest(struct kvm_vcpu *vcpu,
524 614
525unsigned long segment_base(u16 selector); 615unsigned long segment_base(u16 selector);
526 616
527void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); 617void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
528void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); 618 const u8 *old, const u8 *new, int bytes);
529int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); 619int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
530void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); 620void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
621int kvm_mmu_load(struct kvm_vcpu *vcpu);
622void kvm_mmu_unload(struct kvm_vcpu *vcpu);
531 623
532int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run); 624int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run);
533 625
@@ -539,6 +631,14 @@ static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
539 return vcpu->mmu.page_fault(vcpu, gva, error_code); 631 return vcpu->mmu.page_fault(vcpu, gva, error_code);
540} 632}
541 633
634static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
635{
636 if (likely(vcpu->mmu.root_hpa != INVALID_PAGE))
637 return 0;
638
639 return kvm_mmu_load(vcpu);
640}
641
542static inline int is_long_mode(struct kvm_vcpu *vcpu) 642static inline int is_long_mode(struct kvm_vcpu *vcpu)
543{ 643{
544#ifdef CONFIG_X86_64 644#ifdef CONFIG_X86_64
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 8f1f07adb04e..1b206f197c6b 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -16,34 +16,33 @@
16 */ 16 */
17 17
18#include "kvm.h" 18#include "kvm.h"
19#include "x86_emulate.h"
20#include "segment_descriptor.h"
19 21
20#include <linux/kvm.h> 22#include <linux/kvm.h>
21#include <linux/module.h> 23#include <linux/module.h>
22#include <linux/errno.h> 24#include <linux/errno.h>
23#include <linux/magic.h>
24#include <asm/processor.h>
25#include <linux/percpu.h> 25#include <linux/percpu.h>
26#include <linux/gfp.h> 26#include <linux/gfp.h>
27#include <asm/msr.h>
28#include <linux/mm.h> 27#include <linux/mm.h>
29#include <linux/miscdevice.h> 28#include <linux/miscdevice.h>
30#include <linux/vmalloc.h> 29#include <linux/vmalloc.h>
31#include <asm/uaccess.h>
32#include <linux/reboot.h> 30#include <linux/reboot.h>
33#include <asm/io.h>
34#include <linux/debugfs.h> 31#include <linux/debugfs.h>
35#include <linux/highmem.h> 32#include <linux/highmem.h>
36#include <linux/file.h> 33#include <linux/file.h>
37#include <asm/desc.h>
38#include <linux/sysdev.h> 34#include <linux/sysdev.h>
39#include <linux/cpu.h> 35#include <linux/cpu.h>
40#include <linux/file.h>
41#include <linux/fs.h>
42#include <linux/mount.h>
43#include <linux/sched.h> 36#include <linux/sched.h>
37#include <linux/cpumask.h>
38#include <linux/smp.h>
39#include <linux/anon_inodes.h>
44 40
45#include "x86_emulate.h" 41#include <asm/processor.h>
46#include "segment_descriptor.h" 42#include <asm/msr.h>
43#include <asm/io.h>
44#include <asm/uaccess.h>
45#include <asm/desc.h>
47 46
48MODULE_AUTHOR("Qumranet"); 47MODULE_AUTHOR("Qumranet");
49MODULE_LICENSE("GPL"); 48MODULE_LICENSE("GPL");
@@ -51,8 +50,12 @@ MODULE_LICENSE("GPL");
51static DEFINE_SPINLOCK(kvm_lock); 50static DEFINE_SPINLOCK(kvm_lock);
52static LIST_HEAD(vm_list); 51static LIST_HEAD(vm_list);
53 52
53static cpumask_t cpus_hardware_enabled;
54
54struct kvm_arch_ops *kvm_arch_ops; 55struct kvm_arch_ops *kvm_arch_ops;
55 56
57static void hardware_disable(void *ignored);
58
56#define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x) 59#define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
57 60
58static struct kvm_stats_debugfs_item { 61static struct kvm_stats_debugfs_item {
@@ -72,13 +75,13 @@ static struct kvm_stats_debugfs_item {
72 { "halt_exits", STAT_OFFSET(halt_exits) }, 75 { "halt_exits", STAT_OFFSET(halt_exits) },
73 { "request_irq", STAT_OFFSET(request_irq_exits) }, 76 { "request_irq", STAT_OFFSET(request_irq_exits) },
74 { "irq_exits", STAT_OFFSET(irq_exits) }, 77 { "irq_exits", STAT_OFFSET(irq_exits) },
78 { "light_exits", STAT_OFFSET(light_exits) },
79 { "efer_reload", STAT_OFFSET(efer_reload) },
75 { NULL } 80 { NULL }
76}; 81};
77 82
78static struct dentry *debugfs_dir; 83static struct dentry *debugfs_dir;
79 84
80struct vfsmount *kvmfs_mnt;
81
82#define MAX_IO_MSRS 256 85#define MAX_IO_MSRS 256
83 86
84#define CR0_RESEVED_BITS 0xffffffff1ffaffc0ULL 87#define CR0_RESEVED_BITS 0xffffffff1ffaffc0ULL
@@ -100,55 +103,6 @@ struct segment_descriptor_64 {
100static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 103static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
101 unsigned long arg); 104 unsigned long arg);
102 105
103static struct inode *kvmfs_inode(struct file_operations *fops)
104{
105 int error = -ENOMEM;
106 struct inode *inode = new_inode(kvmfs_mnt->mnt_sb);
107
108 if (!inode)
109 goto eexit_1;
110
111 inode->i_fop = fops;
112
113 /*
114 * Mark the inode dirty from the very beginning,
115 * that way it will never be moved to the dirty
116 * list because mark_inode_dirty() will think
117 * that it already _is_ on the dirty list.
118 */
119 inode->i_state = I_DIRTY;
120 inode->i_mode = S_IRUSR | S_IWUSR;
121 inode->i_uid = current->fsuid;
122 inode->i_gid = current->fsgid;
123 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
124 return inode;
125
126eexit_1:
127 return ERR_PTR(error);
128}
129
130static struct file *kvmfs_file(struct inode *inode, void *private_data)
131{
132 struct file *file = get_empty_filp();
133
134 if (!file)
135 return ERR_PTR(-ENFILE);
136
137 file->f_path.mnt = mntget(kvmfs_mnt);
138 file->f_path.dentry = d_alloc_anon(inode);
139 if (!file->f_path.dentry)
140 return ERR_PTR(-ENOMEM);
141 file->f_mapping = inode->i_mapping;
142
143 file->f_pos = 0;
144 file->f_flags = O_RDWR;
145 file->f_op = inode->i_fop;
146 file->f_mode = FMODE_READ | FMODE_WRITE;
147 file->f_version = 0;
148 file->private_data = private_data;
149 return file;
150}
151
152unsigned long segment_base(u16 selector) 106unsigned long segment_base(u16 selector)
153{ 107{
154 struct descriptor_table gdt; 108 struct descriptor_table gdt;
@@ -307,6 +261,48 @@ static void vcpu_put(struct kvm_vcpu *vcpu)
307 mutex_unlock(&vcpu->mutex); 261 mutex_unlock(&vcpu->mutex);
308} 262}
309 263
264static void ack_flush(void *_completed)
265{
266 atomic_t *completed = _completed;
267
268 atomic_inc(completed);
269}
270
271void kvm_flush_remote_tlbs(struct kvm *kvm)
272{
273 int i, cpu, needed;
274 cpumask_t cpus;
275 struct kvm_vcpu *vcpu;
276 atomic_t completed;
277
278 atomic_set(&completed, 0);
279 cpus_clear(cpus);
280 needed = 0;
281 for (i = 0; i < kvm->nvcpus; ++i) {
282 vcpu = &kvm->vcpus[i];
283 if (test_and_set_bit(KVM_TLB_FLUSH, &vcpu->requests))
284 continue;
285 cpu = vcpu->cpu;
286 if (cpu != -1 && cpu != raw_smp_processor_id())
287 if (!cpu_isset(cpu, cpus)) {
288 cpu_set(cpu, cpus);
289 ++needed;
290 }
291 }
292
293 /*
294 * We really want smp_call_function_mask() here. But that's not
295 * available, so ipi all cpus in parallel and wait for them
296 * to complete.
297 */
298 for (cpu = first_cpu(cpus); cpu != NR_CPUS; cpu = next_cpu(cpu, cpus))
299 smp_call_function_single(cpu, ack_flush, &completed, 1, 0);
300 while (atomic_read(&completed) != needed) {
301 cpu_relax();
302 barrier();
303 }
304}
305
310static struct kvm *kvm_create_vm(void) 306static struct kvm *kvm_create_vm(void)
311{ 307{
312 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); 308 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
@@ -315,8 +311,13 @@ static struct kvm *kvm_create_vm(void)
315 if (!kvm) 311 if (!kvm)
316 return ERR_PTR(-ENOMEM); 312 return ERR_PTR(-ENOMEM);
317 313
314 kvm_io_bus_init(&kvm->pio_bus);
318 spin_lock_init(&kvm->lock); 315 spin_lock_init(&kvm->lock);
319 INIT_LIST_HEAD(&kvm->active_mmu_pages); 316 INIT_LIST_HEAD(&kvm->active_mmu_pages);
317 spin_lock(&kvm_lock);
318 list_add(&kvm->vm_list, &vm_list);
319 spin_unlock(&kvm_lock);
320 kvm_io_bus_init(&kvm->mmio_bus);
320 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 321 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
321 struct kvm_vcpu *vcpu = &kvm->vcpus[i]; 322 struct kvm_vcpu *vcpu = &kvm->vcpus[i];
322 323
@@ -324,10 +325,6 @@ static struct kvm *kvm_create_vm(void)
324 vcpu->cpu = -1; 325 vcpu->cpu = -1;
325 vcpu->kvm = kvm; 326 vcpu->kvm = kvm;
326 vcpu->mmu.root_hpa = INVALID_PAGE; 327 vcpu->mmu.root_hpa = INVALID_PAGE;
327 INIT_LIST_HEAD(&vcpu->free_pages);
328 spin_lock(&kvm_lock);
329 list_add(&kvm->vm_list, &vm_list);
330 spin_unlock(&kvm_lock);
331 } 328 }
332 return kvm; 329 return kvm;
333} 330}
@@ -380,6 +377,16 @@ static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
380 } 377 }
381} 378}
382 379
380static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
381{
382 if (!vcpu->vmcs)
383 return;
384
385 vcpu_load(vcpu);
386 kvm_mmu_unload(vcpu);
387 vcpu_put(vcpu);
388}
389
383static void kvm_free_vcpu(struct kvm_vcpu *vcpu) 390static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
384{ 391{
385 if (!vcpu->vmcs) 392 if (!vcpu->vmcs)
@@ -400,6 +407,11 @@ static void kvm_free_vcpus(struct kvm *kvm)
400{ 407{
401 unsigned int i; 408 unsigned int i;
402 409
410 /*
411 * Unpin any mmu pages first.
412 */
413 for (i = 0; i < KVM_MAX_VCPUS; ++i)
414 kvm_unload_vcpu_mmu(&kvm->vcpus[i]);
403 for (i = 0; i < KVM_MAX_VCPUS; ++i) 415 for (i = 0; i < KVM_MAX_VCPUS; ++i)
404 kvm_free_vcpu(&kvm->vcpus[i]); 416 kvm_free_vcpu(&kvm->vcpus[i]);
405} 417}
@@ -414,6 +426,8 @@ static void kvm_destroy_vm(struct kvm *kvm)
414 spin_lock(&kvm_lock); 426 spin_lock(&kvm_lock);
415 list_del(&kvm->vm_list); 427 list_del(&kvm->vm_list);
416 spin_unlock(&kvm_lock); 428 spin_unlock(&kvm_lock);
429 kvm_io_bus_destroy(&kvm->pio_bus);
430 kvm_io_bus_destroy(&kvm->mmio_bus);
417 kvm_free_vcpus(kvm); 431 kvm_free_vcpus(kvm);
418 kvm_free_physmem(kvm); 432 kvm_free_physmem(kvm);
419 kfree(kvm); 433 kfree(kvm);
@@ -969,7 +983,7 @@ EXPORT_SYMBOL_GPL(gfn_to_page);
969void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 983void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
970{ 984{
971 int i; 985 int i;
972 struct kvm_memory_slot *memslot = NULL; 986 struct kvm_memory_slot *memslot;
973 unsigned long rel_gfn; 987 unsigned long rel_gfn;
974 988
975 for (i = 0; i < kvm->nmemslots; ++i) { 989 for (i = 0; i < kvm->nmemslots; ++i) {
@@ -978,7 +992,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
978 if (gfn >= memslot->base_gfn 992 if (gfn >= memslot->base_gfn
979 && gfn < memslot->base_gfn + memslot->npages) { 993 && gfn < memslot->base_gfn + memslot->npages) {
980 994
981 if (!memslot || !memslot->dirty_bitmap) 995 if (!memslot->dirty_bitmap)
982 return; 996 return;
983 997
984 rel_gfn = gfn - memslot->base_gfn; 998 rel_gfn = gfn - memslot->base_gfn;
@@ -1037,12 +1051,31 @@ static int emulator_write_std(unsigned long addr,
1037 return X86EMUL_UNHANDLEABLE; 1051 return X86EMUL_UNHANDLEABLE;
1038} 1052}
1039 1053
1054static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
1055 gpa_t addr)
1056{
1057 /*
1058 * Note that its important to have this wrapper function because
1059 * in the very near future we will be checking for MMIOs against
1060 * the LAPIC as well as the general MMIO bus
1061 */
1062 return kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
1063}
1064
1065static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
1066 gpa_t addr)
1067{
1068 return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
1069}
1070
1040static int emulator_read_emulated(unsigned long addr, 1071static int emulator_read_emulated(unsigned long addr,
1041 void *val, 1072 void *val,
1042 unsigned int bytes, 1073 unsigned int bytes,
1043 struct x86_emulate_ctxt *ctxt) 1074 struct x86_emulate_ctxt *ctxt)
1044{ 1075{
1045 struct kvm_vcpu *vcpu = ctxt->vcpu; 1076 struct kvm_vcpu *vcpu = ctxt->vcpu;
1077 struct kvm_io_device *mmio_dev;
1078 gpa_t gpa;
1046 1079
1047 if (vcpu->mmio_read_completed) { 1080 if (vcpu->mmio_read_completed) {
1048 memcpy(val, vcpu->mmio_data, bytes); 1081 memcpy(val, vcpu->mmio_data, bytes);
@@ -1051,18 +1084,26 @@ static int emulator_read_emulated(unsigned long addr,
1051 } else if (emulator_read_std(addr, val, bytes, ctxt) 1084 } else if (emulator_read_std(addr, val, bytes, ctxt)
1052 == X86EMUL_CONTINUE) 1085 == X86EMUL_CONTINUE)
1053 return X86EMUL_CONTINUE; 1086 return X86EMUL_CONTINUE;
1054 else {
1055 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1056 1087
1057 if (gpa == UNMAPPED_GVA) 1088 gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1058 return X86EMUL_PROPAGATE_FAULT; 1089 if (gpa == UNMAPPED_GVA)
1059 vcpu->mmio_needed = 1; 1090 return X86EMUL_PROPAGATE_FAULT;
1060 vcpu->mmio_phys_addr = gpa;
1061 vcpu->mmio_size = bytes;
1062 vcpu->mmio_is_write = 0;
1063 1091
1064 return X86EMUL_UNHANDLEABLE; 1092 /*
1093 * Is this MMIO handled locally?
1094 */
1095 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1096 if (mmio_dev) {
1097 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
1098 return X86EMUL_CONTINUE;
1065 } 1099 }
1100
1101 vcpu->mmio_needed = 1;
1102 vcpu->mmio_phys_addr = gpa;
1103 vcpu->mmio_size = bytes;
1104 vcpu->mmio_is_write = 0;
1105
1106 return X86EMUL_UNHANDLEABLE;
1066} 1107}
1067 1108
1068static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 1109static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
@@ -1070,18 +1111,20 @@ static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1070{ 1111{
1071 struct page *page; 1112 struct page *page;
1072 void *virt; 1113 void *virt;
1114 unsigned offset = offset_in_page(gpa);
1073 1115
1074 if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT)) 1116 if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
1075 return 0; 1117 return 0;
1076 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); 1118 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1077 if (!page) 1119 if (!page)
1078 return 0; 1120 return 0;
1079 kvm_mmu_pre_write(vcpu, gpa, bytes);
1080 mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT); 1121 mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
1081 virt = kmap_atomic(page, KM_USER0); 1122 virt = kmap_atomic(page, KM_USER0);
1082 memcpy(virt + offset_in_page(gpa), val, bytes); 1123 if (memcmp(virt + offset_in_page(gpa), val, bytes)) {
1124 kvm_mmu_pte_write(vcpu, gpa, virt + offset, val, bytes);
1125 memcpy(virt + offset_in_page(gpa), val, bytes);
1126 }
1083 kunmap_atomic(virt, KM_USER0); 1127 kunmap_atomic(virt, KM_USER0);
1084 kvm_mmu_post_write(vcpu, gpa, bytes);
1085 return 1; 1128 return 1;
1086} 1129}
1087 1130
@@ -1090,8 +1133,9 @@ static int emulator_write_emulated(unsigned long addr,
1090 unsigned int bytes, 1133 unsigned int bytes,
1091 struct x86_emulate_ctxt *ctxt) 1134 struct x86_emulate_ctxt *ctxt)
1092{ 1135{
1093 struct kvm_vcpu *vcpu = ctxt->vcpu; 1136 struct kvm_vcpu *vcpu = ctxt->vcpu;
1094 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); 1137 struct kvm_io_device *mmio_dev;
1138 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1095 1139
1096 if (gpa == UNMAPPED_GVA) { 1140 if (gpa == UNMAPPED_GVA) {
1097 kvm_arch_ops->inject_page_fault(vcpu, addr, 2); 1141 kvm_arch_ops->inject_page_fault(vcpu, addr, 2);
@@ -1101,6 +1145,15 @@ static int emulator_write_emulated(unsigned long addr,
1101 if (emulator_write_phys(vcpu, gpa, val, bytes)) 1145 if (emulator_write_phys(vcpu, gpa, val, bytes))
1102 return X86EMUL_CONTINUE; 1146 return X86EMUL_CONTINUE;
1103 1147
1148 /*
1149 * Is this MMIO handled locally?
1150 */
1151 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1152 if (mmio_dev) {
1153 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
1154 return X86EMUL_CONTINUE;
1155 }
1156
1104 vcpu->mmio_needed = 1; 1157 vcpu->mmio_needed = 1;
1105 vcpu->mmio_phys_addr = gpa; 1158 vcpu->mmio_phys_addr = gpa;
1106 vcpu->mmio_size = bytes; 1159 vcpu->mmio_size = bytes;
@@ -1269,6 +1322,17 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
1269} 1322}
1270EXPORT_SYMBOL_GPL(emulate_instruction); 1323EXPORT_SYMBOL_GPL(emulate_instruction);
1271 1324
1325int kvm_emulate_halt(struct kvm_vcpu *vcpu)
1326{
1327 if (vcpu->irq_summary)
1328 return 1;
1329
1330 vcpu->run->exit_reason = KVM_EXIT_HLT;
1331 ++vcpu->stat.halt_exits;
1332 return 0;
1333}
1334EXPORT_SYMBOL_GPL(kvm_emulate_halt);
1335
1272int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run) 1336int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run)
1273{ 1337{
1274 unsigned long nr, a0, a1, a2, a3, a4, a5, ret; 1338 unsigned long nr, a0, a1, a2, a3, a4, a5, ret;
@@ -1469,6 +1533,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1469 case MSR_IA32_MC0_MISC+16: 1533 case MSR_IA32_MC0_MISC+16:
1470 case MSR_IA32_UCODE_REV: 1534 case MSR_IA32_UCODE_REV:
1471 case MSR_IA32_PERF_STATUS: 1535 case MSR_IA32_PERF_STATUS:
1536 case MSR_IA32_EBL_CR_POWERON:
1472 /* MTRR registers */ 1537 /* MTRR registers */
1473 case 0xfe: 1538 case 0xfe:
1474 case 0x200 ... 0x2ff: 1539 case 0x200 ... 0x2ff:
@@ -1727,6 +1792,20 @@ static int complete_pio(struct kvm_vcpu *vcpu)
1727 return 0; 1792 return 0;
1728} 1793}
1729 1794
1795void kernel_pio(struct kvm_io_device *pio_dev, struct kvm_vcpu *vcpu)
1796{
1797 /* TODO: String I/O for in kernel device */
1798
1799 if (vcpu->pio.in)
1800 kvm_iodevice_read(pio_dev, vcpu->pio.port,
1801 vcpu->pio.size,
1802 vcpu->pio_data);
1803 else
1804 kvm_iodevice_write(pio_dev, vcpu->pio.port,
1805 vcpu->pio.size,
1806 vcpu->pio_data);
1807}
1808
1730int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, 1809int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1731 int size, unsigned long count, int string, int down, 1810 int size, unsigned long count, int string, int down,
1732 gva_t address, int rep, unsigned port) 1811 gva_t address, int rep, unsigned port)
@@ -1735,6 +1814,7 @@ int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1735 int i; 1814 int i;
1736 int nr_pages = 1; 1815 int nr_pages = 1;
1737 struct page *page; 1816 struct page *page;
1817 struct kvm_io_device *pio_dev;
1738 1818
1739 vcpu->run->exit_reason = KVM_EXIT_IO; 1819 vcpu->run->exit_reason = KVM_EXIT_IO;
1740 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; 1820 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
@@ -1746,17 +1826,27 @@ int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1746 vcpu->pio.cur_count = count; 1826 vcpu->pio.cur_count = count;
1747 vcpu->pio.size = size; 1827 vcpu->pio.size = size;
1748 vcpu->pio.in = in; 1828 vcpu->pio.in = in;
1829 vcpu->pio.port = port;
1749 vcpu->pio.string = string; 1830 vcpu->pio.string = string;
1750 vcpu->pio.down = down; 1831 vcpu->pio.down = down;
1751 vcpu->pio.guest_page_offset = offset_in_page(address); 1832 vcpu->pio.guest_page_offset = offset_in_page(address);
1752 vcpu->pio.rep = rep; 1833 vcpu->pio.rep = rep;
1753 1834
1835 pio_dev = vcpu_find_pio_dev(vcpu, port);
1754 if (!string) { 1836 if (!string) {
1755 kvm_arch_ops->cache_regs(vcpu); 1837 kvm_arch_ops->cache_regs(vcpu);
1756 memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4); 1838 memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
1757 kvm_arch_ops->decache_regs(vcpu); 1839 kvm_arch_ops->decache_regs(vcpu);
1840 if (pio_dev) {
1841 kernel_pio(pio_dev, vcpu);
1842 complete_pio(vcpu);
1843 return 1;
1844 }
1758 return 0; 1845 return 0;
1759 } 1846 }
1847 /* TODO: String I/O for in kernel device */
1848 if (pio_dev)
1849 printk(KERN_ERR "kvm_setup_pio: no string io support\n");
1760 1850
1761 if (!count) { 1851 if (!count) {
1762 kvm_arch_ops->skip_emulated_instruction(vcpu); 1852 kvm_arch_ops->skip_emulated_instruction(vcpu);
@@ -2273,34 +2363,12 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
2273 struct inode *inode; 2363 struct inode *inode;
2274 struct file *file; 2364 struct file *file;
2275 2365
2366 r = anon_inode_getfd(&fd, &inode, &file,
2367 "kvm-vcpu", &kvm_vcpu_fops, vcpu);
2368 if (r)
2369 return r;
2276 atomic_inc(&vcpu->kvm->filp->f_count); 2370 atomic_inc(&vcpu->kvm->filp->f_count);
2277 inode = kvmfs_inode(&kvm_vcpu_fops);
2278 if (IS_ERR(inode)) {
2279 r = PTR_ERR(inode);
2280 goto out1;
2281 }
2282
2283 file = kvmfs_file(inode, vcpu);
2284 if (IS_ERR(file)) {
2285 r = PTR_ERR(file);
2286 goto out2;
2287 }
2288
2289 r = get_unused_fd();
2290 if (r < 0)
2291 goto out3;
2292 fd = r;
2293 fd_install(fd, file);
2294
2295 return fd; 2371 return fd;
2296
2297out3:
2298 fput(file);
2299out2:
2300 iput(inode);
2301out1:
2302 fput(vcpu->kvm->filp);
2303 return r;
2304} 2372}
2305 2373
2306/* 2374/*
@@ -2363,6 +2431,11 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
2363 if (r < 0) 2431 if (r < 0)
2364 goto out_free_vcpus; 2432 goto out_free_vcpus;
2365 2433
2434 spin_lock(&kvm_lock);
2435 if (n >= kvm->nvcpus)
2436 kvm->nvcpus = n + 1;
2437 spin_unlock(&kvm_lock);
2438
2366 return r; 2439 return r;
2367 2440
2368out_free_vcpus: 2441out_free_vcpus:
@@ -2376,6 +2449,27 @@ out:
2376 return r; 2449 return r;
2377} 2450}
2378 2451
2452static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
2453{
2454 u64 efer;
2455 int i;
2456 struct kvm_cpuid_entry *e, *entry;
2457
2458 rdmsrl(MSR_EFER, efer);
2459 entry = NULL;
2460 for (i = 0; i < vcpu->cpuid_nent; ++i) {
2461 e = &vcpu->cpuid_entries[i];
2462 if (e->function == 0x80000001) {
2463 entry = e;
2464 break;
2465 }
2466 }
2467 if (entry && (entry->edx & EFER_NX) && !(efer & EFER_NX)) {
2468 entry->edx &= ~(1 << 20);
2469 printk(KERN_INFO ": guest NX capability removed\n");
2470 }
2471}
2472
2379static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, 2473static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
2380 struct kvm_cpuid *cpuid, 2474 struct kvm_cpuid *cpuid,
2381 struct kvm_cpuid_entry __user *entries) 2475 struct kvm_cpuid_entry __user *entries)
@@ -2390,6 +2484,7 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
2390 cpuid->nent * sizeof(struct kvm_cpuid_entry))) 2484 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
2391 goto out; 2485 goto out;
2392 vcpu->cpuid_nent = cpuid->nent; 2486 vcpu->cpuid_nent = cpuid->nent;
2487 cpuid_fix_nx_cap(vcpu);
2393 return 0; 2488 return 0;
2394 2489
2395out: 2490out:
@@ -2738,41 +2833,18 @@ static int kvm_dev_ioctl_create_vm(void)
2738 struct file *file; 2833 struct file *file;
2739 struct kvm *kvm; 2834 struct kvm *kvm;
2740 2835
2741 inode = kvmfs_inode(&kvm_vm_fops);
2742 if (IS_ERR(inode)) {
2743 r = PTR_ERR(inode);
2744 goto out1;
2745 }
2746
2747 kvm = kvm_create_vm(); 2836 kvm = kvm_create_vm();
2748 if (IS_ERR(kvm)) { 2837 if (IS_ERR(kvm))
2749 r = PTR_ERR(kvm); 2838 return PTR_ERR(kvm);
2750 goto out2; 2839 r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
2840 if (r) {
2841 kvm_destroy_vm(kvm);
2842 return r;
2751 } 2843 }
2752 2844
2753 file = kvmfs_file(inode, kvm);
2754 if (IS_ERR(file)) {
2755 r = PTR_ERR(file);
2756 goto out3;
2757 }
2758 kvm->filp = file; 2845 kvm->filp = file;
2759 2846
2760 r = get_unused_fd();
2761 if (r < 0)
2762 goto out4;
2763 fd = r;
2764 fd_install(fd, file);
2765
2766 return fd; 2847 return fd;
2767
2768out4:
2769 fput(file);
2770out3:
2771 kvm_destroy_vm(kvm);
2772out2:
2773 iput(inode);
2774out1:
2775 return r;
2776} 2848}
2777 2849
2778static long kvm_dev_ioctl(struct file *filp, 2850static long kvm_dev_ioctl(struct file *filp,
@@ -2862,7 +2934,7 @@ static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
2862 * in vmx root mode. 2934 * in vmx root mode.
2863 */ 2935 */
2864 printk(KERN_INFO "kvm: exiting hardware virtualization\n"); 2936 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
2865 on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1); 2937 on_each_cpu(hardware_disable, NULL, 0, 1);
2866 } 2938 }
2867 return NOTIFY_OK; 2939 return NOTIFY_OK;
2868} 2940}
@@ -2905,33 +2977,88 @@ static void decache_vcpus_on_cpu(int cpu)
2905 spin_unlock(&kvm_lock); 2977 spin_unlock(&kvm_lock);
2906} 2978}
2907 2979
2980static void hardware_enable(void *junk)
2981{
2982 int cpu = raw_smp_processor_id();
2983
2984 if (cpu_isset(cpu, cpus_hardware_enabled))
2985 return;
2986 cpu_set(cpu, cpus_hardware_enabled);
2987 kvm_arch_ops->hardware_enable(NULL);
2988}
2989
2990static void hardware_disable(void *junk)
2991{
2992 int cpu = raw_smp_processor_id();
2993
2994 if (!cpu_isset(cpu, cpus_hardware_enabled))
2995 return;
2996 cpu_clear(cpu, cpus_hardware_enabled);
2997 decache_vcpus_on_cpu(cpu);
2998 kvm_arch_ops->hardware_disable(NULL);
2999}
3000
2908static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, 3001static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2909 void *v) 3002 void *v)
2910{ 3003{
2911 int cpu = (long)v; 3004 int cpu = (long)v;
2912 3005
2913 switch (val) { 3006 switch (val) {
2914 case CPU_DOWN_PREPARE: 3007 case CPU_DYING:
2915 case CPU_DOWN_PREPARE_FROZEN: 3008 case CPU_DYING_FROZEN:
2916 case CPU_UP_CANCELED: 3009 case CPU_UP_CANCELED:
2917 case CPU_UP_CANCELED_FROZEN: 3010 case CPU_UP_CANCELED_FROZEN:
2918 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", 3011 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2919 cpu); 3012 cpu);
2920 decache_vcpus_on_cpu(cpu); 3013 smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
2921 smp_call_function_single(cpu, kvm_arch_ops->hardware_disable,
2922 NULL, 0, 1);
2923 break; 3014 break;
2924 case CPU_ONLINE: 3015 case CPU_ONLINE:
2925 case CPU_ONLINE_FROZEN: 3016 case CPU_ONLINE_FROZEN:
2926 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", 3017 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2927 cpu); 3018 cpu);
2928 smp_call_function_single(cpu, kvm_arch_ops->hardware_enable, 3019 smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
2929 NULL, 0, 1);
2930 break; 3020 break;
2931 } 3021 }
2932 return NOTIFY_OK; 3022 return NOTIFY_OK;
2933} 3023}
2934 3024
3025void kvm_io_bus_init(struct kvm_io_bus *bus)
3026{
3027 memset(bus, 0, sizeof(*bus));
3028}
3029
3030void kvm_io_bus_destroy(struct kvm_io_bus *bus)
3031{
3032 int i;
3033
3034 for (i = 0; i < bus->dev_count; i++) {
3035 struct kvm_io_device *pos = bus->devs[i];
3036
3037 kvm_iodevice_destructor(pos);
3038 }
3039}
3040
3041struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
3042{
3043 int i;
3044
3045 for (i = 0; i < bus->dev_count; i++) {
3046 struct kvm_io_device *pos = bus->devs[i];
3047
3048 if (pos->in_range(pos, addr))
3049 return pos;
3050 }
3051
3052 return NULL;
3053}
3054
3055void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
3056{
3057 BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
3058
3059 bus->devs[bus->dev_count++] = dev;
3060}
3061
2935static struct notifier_block kvm_cpu_notifier = { 3062static struct notifier_block kvm_cpu_notifier = {
2936 .notifier_call = kvm_cpu_hotplug, 3063 .notifier_call = kvm_cpu_hotplug,
2937 .priority = 20, /* must be > scheduler priority */ 3064 .priority = 20, /* must be > scheduler priority */
@@ -2983,14 +3110,13 @@ static void kvm_exit_debug(void)
2983 3110
2984static int kvm_suspend(struct sys_device *dev, pm_message_t state) 3111static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2985{ 3112{
2986 decache_vcpus_on_cpu(raw_smp_processor_id()); 3113 hardware_disable(NULL);
2987 on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
2988 return 0; 3114 return 0;
2989} 3115}
2990 3116
2991static int kvm_resume(struct sys_device *dev) 3117static int kvm_resume(struct sys_device *dev)
2992{ 3118{
2993 on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1); 3119 hardware_enable(NULL);
2994 return 0; 3120 return 0;
2995} 3121}
2996 3122
@@ -3007,18 +3133,6 @@ static struct sys_device kvm_sysdev = {
3007 3133
3008hpa_t bad_page_address; 3134hpa_t bad_page_address;
3009 3135
3010static int kvmfs_get_sb(struct file_system_type *fs_type, int flags,
3011 const char *dev_name, void *data, struct vfsmount *mnt)
3012{
3013 return get_sb_pseudo(fs_type, "kvm:", NULL, KVMFS_SUPER_MAGIC, mnt);
3014}
3015
3016static struct file_system_type kvm_fs_type = {
3017 .name = "kvmfs",
3018 .get_sb = kvmfs_get_sb,
3019 .kill_sb = kill_anon_super,
3020};
3021
3022int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module) 3136int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
3023{ 3137{
3024 int r; 3138 int r;
@@ -3043,7 +3157,7 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
3043 if (r < 0) 3157 if (r < 0)
3044 goto out; 3158 goto out;
3045 3159
3046 on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1); 3160 on_each_cpu(hardware_enable, NULL, 0, 1);
3047 r = register_cpu_notifier(&kvm_cpu_notifier); 3161 r = register_cpu_notifier(&kvm_cpu_notifier);
3048 if (r) 3162 if (r)
3049 goto out_free_1; 3163 goto out_free_1;
@@ -3075,7 +3189,7 @@ out_free_2:
3075 unregister_reboot_notifier(&kvm_reboot_notifier); 3189 unregister_reboot_notifier(&kvm_reboot_notifier);
3076 unregister_cpu_notifier(&kvm_cpu_notifier); 3190 unregister_cpu_notifier(&kvm_cpu_notifier);
3077out_free_1: 3191out_free_1:
3078 on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1); 3192 on_each_cpu(hardware_disable, NULL, 0, 1);
3079 kvm_arch_ops->hardware_unsetup(); 3193 kvm_arch_ops->hardware_unsetup();
3080out: 3194out:
3081 kvm_arch_ops = NULL; 3195 kvm_arch_ops = NULL;
@@ -3089,7 +3203,7 @@ void kvm_exit_arch(void)
3089 sysdev_class_unregister(&kvm_sysdev_class); 3203 sysdev_class_unregister(&kvm_sysdev_class);
3090 unregister_reboot_notifier(&kvm_reboot_notifier); 3204 unregister_reboot_notifier(&kvm_reboot_notifier);
3091 unregister_cpu_notifier(&kvm_cpu_notifier); 3205 unregister_cpu_notifier(&kvm_cpu_notifier);
3092 on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1); 3206 on_each_cpu(hardware_disable, NULL, 0, 1);
3093 kvm_arch_ops->hardware_unsetup(); 3207 kvm_arch_ops->hardware_unsetup();
3094 kvm_arch_ops = NULL; 3208 kvm_arch_ops = NULL;
3095} 3209}
@@ -3103,14 +3217,6 @@ static __init int kvm_init(void)
3103 if (r) 3217 if (r)
3104 goto out4; 3218 goto out4;
3105 3219
3106 r = register_filesystem(&kvm_fs_type);
3107 if (r)
3108 goto out3;
3109
3110 kvmfs_mnt = kern_mount(&kvm_fs_type);
3111 r = PTR_ERR(kvmfs_mnt);
3112 if (IS_ERR(kvmfs_mnt))
3113 goto out2;
3114 kvm_init_debug(); 3220 kvm_init_debug();
3115 3221
3116 kvm_init_msr_list(); 3222 kvm_init_msr_list();
@@ -3127,10 +3233,6 @@ static __init int kvm_init(void)
3127 3233
3128out: 3234out:
3129 kvm_exit_debug(); 3235 kvm_exit_debug();
3130 mntput(kvmfs_mnt);
3131out2:
3132 unregister_filesystem(&kvm_fs_type);
3133out3:
3134 kvm_mmu_module_exit(); 3236 kvm_mmu_module_exit();
3135out4: 3237out4:
3136 return r; 3238 return r;
@@ -3140,8 +3242,6 @@ static __exit void kvm_exit(void)
3140{ 3242{
3141 kvm_exit_debug(); 3243 kvm_exit_debug();
3142 __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT)); 3244 __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
3143 mntput(kvmfs_mnt);
3144 unregister_filesystem(&kvm_fs_type);
3145 kvm_mmu_module_exit(); 3245 kvm_mmu_module_exit();
3146} 3246}
3147 3247
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index e8e228118de9..b297a6b111ac 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -16,15 +16,18 @@
16 * the COPYING file in the top-level directory. 16 * the COPYING file in the top-level directory.
17 * 17 *
18 */ 18 */
19
20#include "vmx.h"
21#include "kvm.h"
22
19#include <linux/types.h> 23#include <linux/types.h>
20#include <linux/string.h> 24#include <linux/string.h>
21#include <asm/page.h>
22#include <linux/mm.h> 25#include <linux/mm.h>
23#include <linux/highmem.h> 26#include <linux/highmem.h>
24#include <linux/module.h> 27#include <linux/module.h>
25 28
26#include "vmx.h" 29#include <asm/page.h>
27#include "kvm.h" 30#include <asm/cmpxchg.h>
28 31
29#undef MMU_DEBUG 32#undef MMU_DEBUG
30 33
@@ -90,25 +93,11 @@ static int dbg = 1;
90#define PT32_DIR_PSE36_MASK (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT) 93#define PT32_DIR_PSE36_MASK (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
91 94
92 95
93#define PT32_PTE_COPY_MASK \
94 (PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK | PT_GLOBAL_MASK)
95
96#define PT64_PTE_COPY_MASK (PT64_NX_MASK | PT32_PTE_COPY_MASK)
97
98#define PT_FIRST_AVAIL_BITS_SHIFT 9 96#define PT_FIRST_AVAIL_BITS_SHIFT 9
99#define PT64_SECOND_AVAIL_BITS_SHIFT 52 97#define PT64_SECOND_AVAIL_BITS_SHIFT 52
100 98
101#define PT_SHADOW_PS_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
102#define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT) 99#define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
103 100
104#define PT_SHADOW_WRITABLE_SHIFT (PT_FIRST_AVAIL_BITS_SHIFT + 1)
105#define PT_SHADOW_WRITABLE_MASK (1ULL << PT_SHADOW_WRITABLE_SHIFT)
106
107#define PT_SHADOW_USER_SHIFT (PT_SHADOW_WRITABLE_SHIFT + 1)
108#define PT_SHADOW_USER_MASK (1ULL << (PT_SHADOW_USER_SHIFT))
109
110#define PT_SHADOW_BITS_OFFSET (PT_SHADOW_WRITABLE_SHIFT - PT_WRITABLE_SHIFT)
111
112#define VALID_PAGE(x) ((x) != INVALID_PAGE) 101#define VALID_PAGE(x) ((x) != INVALID_PAGE)
113 102
114#define PT64_LEVEL_BITS 9 103#define PT64_LEVEL_BITS 9
@@ -165,6 +154,8 @@ struct kvm_rmap_desc {
165 154
166static struct kmem_cache *pte_chain_cache; 155static struct kmem_cache *pte_chain_cache;
167static struct kmem_cache *rmap_desc_cache; 156static struct kmem_cache *rmap_desc_cache;
157static struct kmem_cache *mmu_page_cache;
158static struct kmem_cache *mmu_page_header_cache;
168 159
169static int is_write_protection(struct kvm_vcpu *vcpu) 160static int is_write_protection(struct kvm_vcpu *vcpu)
170{ 161{
@@ -202,6 +193,15 @@ static int is_rmap_pte(u64 pte)
202 == (PT_WRITABLE_MASK | PT_PRESENT_MASK); 193 == (PT_WRITABLE_MASK | PT_PRESENT_MASK);
203} 194}
204 195
196static void set_shadow_pte(u64 *sptep, u64 spte)
197{
198#ifdef CONFIG_X86_64
199 set_64bit((unsigned long *)sptep, spte);
200#else
201 set_64bit((unsigned long long *)sptep, spte);
202#endif
203}
204
205static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, 205static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
206 struct kmem_cache *base_cache, int min, 206 struct kmem_cache *base_cache, int min,
207 gfp_t gfp_flags) 207 gfp_t gfp_flags)
@@ -235,6 +235,14 @@ static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags)
235 goto out; 235 goto out;
236 r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache, 236 r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
237 rmap_desc_cache, 1, gfp_flags); 237 rmap_desc_cache, 1, gfp_flags);
238 if (r)
239 goto out;
240 r = mmu_topup_memory_cache(&vcpu->mmu_page_cache,
241 mmu_page_cache, 4, gfp_flags);
242 if (r)
243 goto out;
244 r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
245 mmu_page_header_cache, 4, gfp_flags);
238out: 246out:
239 return r; 247 return r;
240} 248}
@@ -258,6 +266,8 @@ static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
258{ 266{
259 mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache); 267 mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
260 mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache); 268 mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
269 mmu_free_memory_cache(&vcpu->mmu_page_cache);
270 mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
261} 271}
262 272
263static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc, 273static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
@@ -433,19 +443,18 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
433 BUG_ON(!(*spte & PT_WRITABLE_MASK)); 443 BUG_ON(!(*spte & PT_WRITABLE_MASK));
434 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); 444 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
435 rmap_remove(vcpu, spte); 445 rmap_remove(vcpu, spte);
436 kvm_arch_ops->tlb_flush(vcpu); 446 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
437 *spte &= ~(u64)PT_WRITABLE_MASK; 447 kvm_flush_remote_tlbs(vcpu->kvm);
438 } 448 }
439} 449}
440 450
441#ifdef MMU_DEBUG 451#ifdef MMU_DEBUG
442static int is_empty_shadow_page(hpa_t page_hpa) 452static int is_empty_shadow_page(u64 *spt)
443{ 453{
444 u64 *pos; 454 u64 *pos;
445 u64 *end; 455 u64 *end;
446 456
447 for (pos = __va(page_hpa), end = pos + PAGE_SIZE / sizeof(u64); 457 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
448 pos != end; pos++)
449 if (*pos != 0) { 458 if (*pos != 0) {
450 printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__, 459 printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
451 pos, *pos); 460 pos, *pos);
@@ -455,13 +464,13 @@ static int is_empty_shadow_page(hpa_t page_hpa)
455} 464}
456#endif 465#endif
457 466
458static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa) 467static void kvm_mmu_free_page(struct kvm_vcpu *vcpu,
468 struct kvm_mmu_page *page_head)
459{ 469{
460 struct kvm_mmu_page *page_head = page_header(page_hpa); 470 ASSERT(is_empty_shadow_page(page_head->spt));
461 471 list_del(&page_head->link);
462 ASSERT(is_empty_shadow_page(page_hpa)); 472 mmu_memory_cache_free(&vcpu->mmu_page_cache, page_head->spt);
463 page_head->page_hpa = page_hpa; 473 mmu_memory_cache_free(&vcpu->mmu_page_header_cache, page_head);
464 list_move(&page_head->link, &vcpu->free_pages);
465 ++vcpu->kvm->n_free_mmu_pages; 474 ++vcpu->kvm->n_free_mmu_pages;
466} 475}
467 476
@@ -475,12 +484,15 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
475{ 484{
476 struct kvm_mmu_page *page; 485 struct kvm_mmu_page *page;
477 486
478 if (list_empty(&vcpu->free_pages)) 487 if (!vcpu->kvm->n_free_mmu_pages)
479 return NULL; 488 return NULL;
480 489
481 page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link); 490 page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
482 list_move(&page->link, &vcpu->kvm->active_mmu_pages); 491 sizeof *page);
483 ASSERT(is_empty_shadow_page(page->page_hpa)); 492 page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
493 set_page_private(virt_to_page(page->spt), (unsigned long)page);
494 list_add(&page->link, &vcpu->kvm->active_mmu_pages);
495 ASSERT(is_empty_shadow_page(page->spt));
484 page->slot_bitmap = 0; 496 page->slot_bitmap = 0;
485 page->multimapped = 0; 497 page->multimapped = 0;
486 page->parent_pte = parent_pte; 498 page->parent_pte = parent_pte;
@@ -638,7 +650,7 @@ static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
638 u64 *pt; 650 u64 *pt;
639 u64 ent; 651 u64 ent;
640 652
641 pt = __va(page->page_hpa); 653 pt = page->spt;
642 654
643 if (page->role.level == PT_PAGE_TABLE_LEVEL) { 655 if (page->role.level == PT_PAGE_TABLE_LEVEL) {
644 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { 656 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
@@ -646,7 +658,7 @@ static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
646 rmap_remove(vcpu, &pt[i]); 658 rmap_remove(vcpu, &pt[i]);
647 pt[i] = 0; 659 pt[i] = 0;
648 } 660 }
649 kvm_arch_ops->tlb_flush(vcpu); 661 kvm_flush_remote_tlbs(vcpu->kvm);
650 return; 662 return;
651 } 663 }
652 664
@@ -659,6 +671,7 @@ static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
659 ent &= PT64_BASE_ADDR_MASK; 671 ent &= PT64_BASE_ADDR_MASK;
660 mmu_page_remove_parent_pte(vcpu, page_header(ent), &pt[i]); 672 mmu_page_remove_parent_pte(vcpu, page_header(ent), &pt[i]);
661 } 673 }
674 kvm_flush_remote_tlbs(vcpu->kvm);
662} 675}
663 676
664static void kvm_mmu_put_page(struct kvm_vcpu *vcpu, 677static void kvm_mmu_put_page(struct kvm_vcpu *vcpu,
@@ -685,12 +698,12 @@ static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
685 } 698 }
686 BUG_ON(!parent_pte); 699 BUG_ON(!parent_pte);
687 kvm_mmu_put_page(vcpu, page, parent_pte); 700 kvm_mmu_put_page(vcpu, page, parent_pte);
688 *parent_pte = 0; 701 set_shadow_pte(parent_pte, 0);
689 } 702 }
690 kvm_mmu_page_unlink_children(vcpu, page); 703 kvm_mmu_page_unlink_children(vcpu, page);
691 if (!page->root_count) { 704 if (!page->root_count) {
692 hlist_del(&page->hash_link); 705 hlist_del(&page->hash_link);
693 kvm_mmu_free_page(vcpu, page->page_hpa); 706 kvm_mmu_free_page(vcpu, page);
694 } else 707 } else
695 list_move(&page->link, &vcpu->kvm->active_mmu_pages); 708 list_move(&page->link, &vcpu->kvm->active_mmu_pages);
696} 709}
@@ -717,6 +730,17 @@ static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
717 return r; 730 return r;
718} 731}
719 732
733static void mmu_unshadow(struct kvm_vcpu *vcpu, gfn_t gfn)
734{
735 struct kvm_mmu_page *page;
736
737 while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) {
738 pgprintk("%s: zap %lx %x\n",
739 __FUNCTION__, gfn, page->role.word);
740 kvm_mmu_zap_page(vcpu, page);
741 }
742}
743
720static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa) 744static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
721{ 745{
722 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT)); 746 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT));
@@ -805,7 +829,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
805 return -ENOMEM; 829 return -ENOMEM;
806 } 830 }
807 831
808 table[index] = new_table->page_hpa | PT_PRESENT_MASK 832 table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
809 | PT_WRITABLE_MASK | PT_USER_MASK; 833 | PT_WRITABLE_MASK | PT_USER_MASK;
810 } 834 }
811 table_addr = table[index] & PT64_BASE_ADDR_MASK; 835 table_addr = table[index] & PT64_BASE_ADDR_MASK;
@@ -817,11 +841,12 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
817 int i; 841 int i;
818 struct kvm_mmu_page *page; 842 struct kvm_mmu_page *page;
819 843
844 if (!VALID_PAGE(vcpu->mmu.root_hpa))
845 return;
820#ifdef CONFIG_X86_64 846#ifdef CONFIG_X86_64
821 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) { 847 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
822 hpa_t root = vcpu->mmu.root_hpa; 848 hpa_t root = vcpu->mmu.root_hpa;
823 849
824 ASSERT(VALID_PAGE(root));
825 page = page_header(root); 850 page = page_header(root);
826 --page->root_count; 851 --page->root_count;
827 vcpu->mmu.root_hpa = INVALID_PAGE; 852 vcpu->mmu.root_hpa = INVALID_PAGE;
@@ -832,7 +857,6 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
832 hpa_t root = vcpu->mmu.pae_root[i]; 857 hpa_t root = vcpu->mmu.pae_root[i];
833 858
834 if (root) { 859 if (root) {
835 ASSERT(VALID_PAGE(root));
836 root &= PT64_BASE_ADDR_MASK; 860 root &= PT64_BASE_ADDR_MASK;
837 page = page_header(root); 861 page = page_header(root);
838 --page->root_count; 862 --page->root_count;
@@ -857,7 +881,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
857 ASSERT(!VALID_PAGE(root)); 881 ASSERT(!VALID_PAGE(root));
858 page = kvm_mmu_get_page(vcpu, root_gfn, 0, 882 page = kvm_mmu_get_page(vcpu, root_gfn, 0,
859 PT64_ROOT_LEVEL, 0, 0, NULL); 883 PT64_ROOT_LEVEL, 0, 0, NULL);
860 root = page->page_hpa; 884 root = __pa(page->spt);
861 ++page->root_count; 885 ++page->root_count;
862 vcpu->mmu.root_hpa = root; 886 vcpu->mmu.root_hpa = root;
863 return; 887 return;
@@ -878,7 +902,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
878 page = kvm_mmu_get_page(vcpu, root_gfn, i << 30, 902 page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
879 PT32_ROOT_LEVEL, !is_paging(vcpu), 903 PT32_ROOT_LEVEL, !is_paging(vcpu),
880 0, NULL); 904 0, NULL);
881 root = page->page_hpa; 905 root = __pa(page->spt);
882 ++page->root_count; 906 ++page->root_count;
883 vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK; 907 vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
884 } 908 }
@@ -928,9 +952,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu)
928 context->free = nonpaging_free; 952 context->free = nonpaging_free;
929 context->root_level = 0; 953 context->root_level = 0;
930 context->shadow_root_level = PT32E_ROOT_LEVEL; 954 context->shadow_root_level = PT32E_ROOT_LEVEL;
931 mmu_alloc_roots(vcpu); 955 context->root_hpa = INVALID_PAGE;
932 ASSERT(VALID_PAGE(context->root_hpa));
933 kvm_arch_ops->set_cr3(vcpu, context->root_hpa);
934 return 0; 956 return 0;
935} 957}
936 958
@@ -944,59 +966,6 @@ static void paging_new_cr3(struct kvm_vcpu *vcpu)
944{ 966{
945 pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3); 967 pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
946 mmu_free_roots(vcpu); 968 mmu_free_roots(vcpu);
947 if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
948 kvm_mmu_free_some_pages(vcpu);
949 mmu_alloc_roots(vcpu);
950 kvm_mmu_flush_tlb(vcpu);
951 kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
952}
953
954static inline void set_pte_common(struct kvm_vcpu *vcpu,
955 u64 *shadow_pte,
956 gpa_t gaddr,
957 int dirty,
958 u64 access_bits,
959 gfn_t gfn)
960{
961 hpa_t paddr;
962
963 *shadow_pte |= access_bits << PT_SHADOW_BITS_OFFSET;
964 if (!dirty)
965 access_bits &= ~PT_WRITABLE_MASK;
966
967 paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
968
969 *shadow_pte |= access_bits;
970
971 if (is_error_hpa(paddr)) {
972 *shadow_pte |= gaddr;
973 *shadow_pte |= PT_SHADOW_IO_MARK;
974 *shadow_pte &= ~PT_PRESENT_MASK;
975 return;
976 }
977
978 *shadow_pte |= paddr;
979
980 if (access_bits & PT_WRITABLE_MASK) {
981 struct kvm_mmu_page *shadow;
982
983 shadow = kvm_mmu_lookup_page(vcpu, gfn);
984 if (shadow) {
985 pgprintk("%s: found shadow page for %lx, marking ro\n",
986 __FUNCTION__, gfn);
987 access_bits &= ~PT_WRITABLE_MASK;
988 if (is_writeble_pte(*shadow_pte)) {
989 *shadow_pte &= ~PT_WRITABLE_MASK;
990 kvm_arch_ops->tlb_flush(vcpu);
991 }
992 }
993 }
994
995 if (access_bits & PT_WRITABLE_MASK)
996 mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
997
998 page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
999 rmap_add(vcpu, shadow_pte);
1000} 969}
1001 970
1002static void inject_page_fault(struct kvm_vcpu *vcpu, 971static void inject_page_fault(struct kvm_vcpu *vcpu,
@@ -1006,23 +975,6 @@ static void inject_page_fault(struct kvm_vcpu *vcpu,
1006 kvm_arch_ops->inject_page_fault(vcpu, addr, err_code); 975 kvm_arch_ops->inject_page_fault(vcpu, addr, err_code);
1007} 976}
1008 977
1009static inline int fix_read_pf(u64 *shadow_ent)
1010{
1011 if ((*shadow_ent & PT_SHADOW_USER_MASK) &&
1012 !(*shadow_ent & PT_USER_MASK)) {
1013 /*
1014 * If supervisor write protect is disabled, we shadow kernel
1015 * pages as user pages so we can trap the write access.
1016 */
1017 *shadow_ent |= PT_USER_MASK;
1018 *shadow_ent &= ~PT_WRITABLE_MASK;
1019
1020 return 1;
1021
1022 }
1023 return 0;
1024}
1025
1026static void paging_free(struct kvm_vcpu *vcpu) 978static void paging_free(struct kvm_vcpu *vcpu)
1027{ 979{
1028 nonpaging_free(vcpu); 980 nonpaging_free(vcpu);
@@ -1047,10 +999,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
1047 context->free = paging_free; 999 context->free = paging_free;
1048 context->root_level = level; 1000 context->root_level = level;
1049 context->shadow_root_level = level; 1001 context->shadow_root_level = level;
1050 mmu_alloc_roots(vcpu); 1002 context->root_hpa = INVALID_PAGE;
1051 ASSERT(VALID_PAGE(context->root_hpa));
1052 kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
1053 (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
1054 return 0; 1003 return 0;
1055} 1004}
1056 1005
@@ -1069,10 +1018,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
1069 context->free = paging_free; 1018 context->free = paging_free;
1070 context->root_level = PT32_ROOT_LEVEL; 1019 context->root_level = PT32_ROOT_LEVEL;
1071 context->shadow_root_level = PT32E_ROOT_LEVEL; 1020 context->shadow_root_level = PT32E_ROOT_LEVEL;
1072 mmu_alloc_roots(vcpu); 1021 context->root_hpa = INVALID_PAGE;
1073 ASSERT(VALID_PAGE(context->root_hpa));
1074 kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
1075 (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
1076 return 0; 1022 return 0;
1077} 1023}
1078 1024
@@ -1107,18 +1053,33 @@ static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1107 1053
1108int kvm_mmu_reset_context(struct kvm_vcpu *vcpu) 1054int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
1109{ 1055{
1056 destroy_kvm_mmu(vcpu);
1057 return init_kvm_mmu(vcpu);
1058}
1059
1060int kvm_mmu_load(struct kvm_vcpu *vcpu)
1061{
1110 int r; 1062 int r;
1111 1063
1112 destroy_kvm_mmu(vcpu); 1064 spin_lock(&vcpu->kvm->lock);
1113 r = init_kvm_mmu(vcpu);
1114 if (r < 0)
1115 goto out;
1116 r = mmu_topup_memory_caches(vcpu); 1065 r = mmu_topup_memory_caches(vcpu);
1066 if (r)
1067 goto out;
1068 mmu_alloc_roots(vcpu);
1069 kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
1070 kvm_mmu_flush_tlb(vcpu);
1117out: 1071out:
1072 spin_unlock(&vcpu->kvm->lock);
1118 return r; 1073 return r;
1119} 1074}
1075EXPORT_SYMBOL_GPL(kvm_mmu_load);
1076
1077void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1078{
1079 mmu_free_roots(vcpu);
1080}
1120 1081
1121static void mmu_pre_write_zap_pte(struct kvm_vcpu *vcpu, 1082static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1122 struct kvm_mmu_page *page, 1083 struct kvm_mmu_page *page,
1123 u64 *spte) 1084 u64 *spte)
1124{ 1085{
@@ -1135,9 +1096,25 @@ static void mmu_pre_write_zap_pte(struct kvm_vcpu *vcpu,
1135 } 1096 }
1136 } 1097 }
1137 *spte = 0; 1098 *spte = 0;
1099 kvm_flush_remote_tlbs(vcpu->kvm);
1100}
1101
1102static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1103 struct kvm_mmu_page *page,
1104 u64 *spte,
1105 const void *new, int bytes)
1106{
1107 if (page->role.level != PT_PAGE_TABLE_LEVEL)
1108 return;
1109
1110 if (page->role.glevels == PT32_ROOT_LEVEL)
1111 paging32_update_pte(vcpu, page, spte, new, bytes);
1112 else
1113 paging64_update_pte(vcpu, page, spte, new, bytes);
1138} 1114}
1139 1115
1140void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes) 1116void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1117 const u8 *old, const u8 *new, int bytes)
1141{ 1118{
1142 gfn_t gfn = gpa >> PAGE_SHIFT; 1119 gfn_t gfn = gpa >> PAGE_SHIFT;
1143 struct kvm_mmu_page *page; 1120 struct kvm_mmu_page *page;
@@ -1149,6 +1126,7 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
1149 unsigned pte_size; 1126 unsigned pte_size;
1150 unsigned page_offset; 1127 unsigned page_offset;
1151 unsigned misaligned; 1128 unsigned misaligned;
1129 unsigned quadrant;
1152 int level; 1130 int level;
1153 int flooded = 0; 1131 int flooded = 0;
1154 int npte; 1132 int npte;
@@ -1169,6 +1147,7 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
1169 continue; 1147 continue;
1170 pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8; 1148 pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1171 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); 1149 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
1150 misaligned |= bytes < 4;
1172 if (misaligned || flooded) { 1151 if (misaligned || flooded) {
1173 /* 1152 /*
1174 * Misaligned accesses are too much trouble to fix 1153 * Misaligned accesses are too much trouble to fix
@@ -1200,21 +1179,20 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
1200 page_offset <<= 1; 1179 page_offset <<= 1;
1201 npte = 2; 1180 npte = 2;
1202 } 1181 }
1182 quadrant = page_offset >> PAGE_SHIFT;
1203 page_offset &= ~PAGE_MASK; 1183 page_offset &= ~PAGE_MASK;
1184 if (quadrant != page->role.quadrant)
1185 continue;
1204 } 1186 }
1205 spte = __va(page->page_hpa); 1187 spte = &page->spt[page_offset / sizeof(*spte)];
1206 spte += page_offset / sizeof(*spte);
1207 while (npte--) { 1188 while (npte--) {
1208 mmu_pre_write_zap_pte(vcpu, page, spte); 1189 mmu_pte_write_zap_pte(vcpu, page, spte);
1190 mmu_pte_write_new_pte(vcpu, page, spte, new, bytes);
1209 ++spte; 1191 ++spte;
1210 } 1192 }
1211 } 1193 }
1212} 1194}
1213 1195
1214void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
1215{
1216}
1217
1218int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) 1196int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1219{ 1197{
1220 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva); 1198 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
@@ -1243,13 +1221,6 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu)
1243 struct kvm_mmu_page, link); 1221 struct kvm_mmu_page, link);
1244 kvm_mmu_zap_page(vcpu, page); 1222 kvm_mmu_zap_page(vcpu, page);
1245 } 1223 }
1246 while (!list_empty(&vcpu->free_pages)) {
1247 page = list_entry(vcpu->free_pages.next,
1248 struct kvm_mmu_page, link);
1249 list_del(&page->link);
1250 __free_page(pfn_to_page(page->page_hpa >> PAGE_SHIFT));
1251 page->page_hpa = INVALID_PAGE;
1252 }
1253 free_page((unsigned long)vcpu->mmu.pae_root); 1224 free_page((unsigned long)vcpu->mmu.pae_root);
1254} 1225}
1255 1226
@@ -1260,18 +1231,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1260 1231
1261 ASSERT(vcpu); 1232 ASSERT(vcpu);
1262 1233
1263 for (i = 0; i < KVM_NUM_MMU_PAGES; i++) { 1234 vcpu->kvm->n_free_mmu_pages = KVM_NUM_MMU_PAGES;
1264 struct kvm_mmu_page *page_header = &vcpu->page_header_buf[i];
1265
1266 INIT_LIST_HEAD(&page_header->link);
1267 if ((page = alloc_page(GFP_KERNEL)) == NULL)
1268 goto error_1;
1269 set_page_private(page, (unsigned long)page_header);
1270 page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
1271 memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
1272 list_add(&page_header->link, &vcpu->free_pages);
1273 ++vcpu->kvm->n_free_mmu_pages;
1274 }
1275 1235
1276 /* 1236 /*
1277 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. 1237 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
@@ -1296,7 +1256,6 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
1296{ 1256{
1297 ASSERT(vcpu); 1257 ASSERT(vcpu);
1298 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa)); 1258 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1299 ASSERT(list_empty(&vcpu->free_pages));
1300 1259
1301 return alloc_mmu_pages(vcpu); 1260 return alloc_mmu_pages(vcpu);
1302} 1261}
@@ -1305,7 +1264,6 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1305{ 1264{
1306 ASSERT(vcpu); 1265 ASSERT(vcpu);
1307 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa)); 1266 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1308 ASSERT(!list_empty(&vcpu->free_pages));
1309 1267
1310 return init_kvm_mmu(vcpu); 1268 return init_kvm_mmu(vcpu);
1311} 1269}
@@ -1331,7 +1289,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot)
1331 if (!test_bit(slot, &page->slot_bitmap)) 1289 if (!test_bit(slot, &page->slot_bitmap))
1332 continue; 1290 continue;
1333 1291
1334 pt = __va(page->page_hpa); 1292 pt = page->spt;
1335 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) 1293 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1336 /* avoid RMW */ 1294 /* avoid RMW */
1337 if (pt[i] & PT_WRITABLE_MASK) { 1295 if (pt[i] & PT_WRITABLE_MASK) {
@@ -1354,7 +1312,7 @@ void kvm_mmu_zap_all(struct kvm_vcpu *vcpu)
1354 } 1312 }
1355 1313
1356 mmu_free_memory_caches(vcpu); 1314 mmu_free_memory_caches(vcpu);
1357 kvm_arch_ops->tlb_flush(vcpu); 1315 kvm_flush_remote_tlbs(vcpu->kvm);
1358 init_kvm_mmu(vcpu); 1316 init_kvm_mmu(vcpu);
1359} 1317}
1360 1318
@@ -1364,6 +1322,10 @@ void kvm_mmu_module_exit(void)
1364 kmem_cache_destroy(pte_chain_cache); 1322 kmem_cache_destroy(pte_chain_cache);
1365 if (rmap_desc_cache) 1323 if (rmap_desc_cache)
1366 kmem_cache_destroy(rmap_desc_cache); 1324 kmem_cache_destroy(rmap_desc_cache);
1325 if (mmu_page_cache)
1326 kmem_cache_destroy(mmu_page_cache);
1327 if (mmu_page_header_cache)
1328 kmem_cache_destroy(mmu_page_header_cache);
1367} 1329}
1368 1330
1369int kvm_mmu_module_init(void) 1331int kvm_mmu_module_init(void)
@@ -1379,6 +1341,18 @@ int kvm_mmu_module_init(void)
1379 if (!rmap_desc_cache) 1341 if (!rmap_desc_cache)
1380 goto nomem; 1342 goto nomem;
1381 1343
1344 mmu_page_cache = kmem_cache_create("kvm_mmu_page",
1345 PAGE_SIZE,
1346 PAGE_SIZE, 0, NULL, NULL);
1347 if (!mmu_page_cache)
1348 goto nomem;
1349
1350 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
1351 sizeof(struct kvm_mmu_page),
1352 0, 0, NULL, NULL);
1353 if (!mmu_page_header_cache)
1354 goto nomem;
1355
1382 return 0; 1356 return 0;
1383 1357
1384nomem: 1358nomem:
@@ -1482,7 +1456,7 @@ static int count_writable_mappings(struct kvm_vcpu *vcpu)
1482 int i; 1456 int i;
1483 1457
1484 list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) { 1458 list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
1485 u64 *pt = __va(page->page_hpa); 1459 u64 *pt = page->spt;
1486 1460
1487 if (page->role.level != PT_PAGE_TABLE_LEVEL) 1461 if (page->role.level != PT_PAGE_TABLE_LEVEL)
1488 continue; 1462 continue;
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index 73ffbffb1097..a7c5cb0319ea 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -31,7 +31,6 @@
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level) 31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) 32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level) 33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
34 #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK
35 #ifdef CONFIG_X86_64 34 #ifdef CONFIG_X86_64
36 #define PT_MAX_FULL_LEVELS 4 35 #define PT_MAX_FULL_LEVELS 4
37 #else 36 #else
@@ -46,7 +45,6 @@
46 #define PT_INDEX(addr, level) PT32_INDEX(addr, level) 45 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
47 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) 46 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
48 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level) 47 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
49 #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK
50 #define PT_MAX_FULL_LEVELS 2 48 #define PT_MAX_FULL_LEVELS 2
51#else 49#else
52 #error Invalid PTTYPE value 50 #error Invalid PTTYPE value
@@ -192,40 +190,143 @@ static void FNAME(mark_pagetable_dirty)(struct kvm *kvm,
192 mark_page_dirty(kvm, walker->table_gfn[walker->level - 1]); 190 mark_page_dirty(kvm, walker->table_gfn[walker->level - 1]);
193} 191}
194 192
195static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte, 193static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
196 u64 *shadow_pte, u64 access_bits, gfn_t gfn) 194 u64 *shadow_pte,
195 gpa_t gaddr,
196 pt_element_t *gpte,
197 u64 access_bits,
198 int user_fault,
199 int write_fault,
200 int *ptwrite,
201 struct guest_walker *walker,
202 gfn_t gfn)
197{ 203{
198 ASSERT(*shadow_pte == 0); 204 hpa_t paddr;
199 access_bits &= guest_pte; 205 int dirty = *gpte & PT_DIRTY_MASK;
200 *shadow_pte = (guest_pte & PT_PTE_COPY_MASK); 206 u64 spte = *shadow_pte;
201 set_pte_common(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK, 207 int was_rmapped = is_rmap_pte(spte);
202 guest_pte & PT_DIRTY_MASK, access_bits, gfn); 208
209 pgprintk("%s: spte %llx gpte %llx access %llx write_fault %d"
210 " user_fault %d gfn %lx\n",
211 __FUNCTION__, spte, (u64)*gpte, access_bits,
212 write_fault, user_fault, gfn);
213
214 if (write_fault && !dirty) {
215 *gpte |= PT_DIRTY_MASK;
216 dirty = 1;
217 FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
218 }
219
220 spte |= PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK;
221 spte |= *gpte & PT64_NX_MASK;
222 if (!dirty)
223 access_bits &= ~PT_WRITABLE_MASK;
224
225 paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
226
227 spte |= PT_PRESENT_MASK;
228 if (access_bits & PT_USER_MASK)
229 spte |= PT_USER_MASK;
230
231 if (is_error_hpa(paddr)) {
232 spte |= gaddr;
233 spte |= PT_SHADOW_IO_MARK;
234 spte &= ~PT_PRESENT_MASK;
235 set_shadow_pte(shadow_pte, spte);
236 return;
237 }
238
239 spte |= paddr;
240
241 if ((access_bits & PT_WRITABLE_MASK)
242 || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
243 struct kvm_mmu_page *shadow;
244
245 spte |= PT_WRITABLE_MASK;
246 if (user_fault) {
247 mmu_unshadow(vcpu, gfn);
248 goto unshadowed;
249 }
250
251 shadow = kvm_mmu_lookup_page(vcpu, gfn);
252 if (shadow) {
253 pgprintk("%s: found shadow page for %lx, marking ro\n",
254 __FUNCTION__, gfn);
255 access_bits &= ~PT_WRITABLE_MASK;
256 if (is_writeble_pte(spte)) {
257 spte &= ~PT_WRITABLE_MASK;
258 kvm_arch_ops->tlb_flush(vcpu);
259 }
260 if (write_fault)
261 *ptwrite = 1;
262 }
263 }
264
265unshadowed:
266
267 if (access_bits & PT_WRITABLE_MASK)
268 mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
269
270 set_shadow_pte(shadow_pte, spte);
271 page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
272 if (!was_rmapped)
273 rmap_add(vcpu, shadow_pte);
203} 274}
204 275
205static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde, 276static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t *gpte,
206 u64 *shadow_pte, u64 access_bits, gfn_t gfn) 277 u64 *shadow_pte, u64 access_bits,
278 int user_fault, int write_fault, int *ptwrite,
279 struct guest_walker *walker, gfn_t gfn)
280{
281 access_bits &= *gpte;
282 FNAME(set_pte_common)(vcpu, shadow_pte, *gpte & PT_BASE_ADDR_MASK,
283 gpte, access_bits, user_fault, write_fault,
284 ptwrite, walker, gfn);
285}
286
287static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
288 u64 *spte, const void *pte, int bytes)
289{
290 pt_element_t gpte;
291
292 if (bytes < sizeof(pt_element_t))
293 return;
294 gpte = *(const pt_element_t *)pte;
295 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK))
296 return;
297 pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
298 FNAME(set_pte)(vcpu, &gpte, spte, PT_USER_MASK | PT_WRITABLE_MASK, 0,
299 0, NULL, NULL,
300 (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT);
301}
302
303static void FNAME(set_pde)(struct kvm_vcpu *vcpu, pt_element_t *gpde,
304 u64 *shadow_pte, u64 access_bits,
305 int user_fault, int write_fault, int *ptwrite,
306 struct guest_walker *walker, gfn_t gfn)
207{ 307{
208 gpa_t gaddr; 308 gpa_t gaddr;
209 309
210 ASSERT(*shadow_pte == 0); 310 access_bits &= *gpde;
211 access_bits &= guest_pde;
212 gaddr = (gpa_t)gfn << PAGE_SHIFT; 311 gaddr = (gpa_t)gfn << PAGE_SHIFT;
213 if (PTTYPE == 32 && is_cpuid_PSE36()) 312 if (PTTYPE == 32 && is_cpuid_PSE36())
214 gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) << 313 gaddr |= (*gpde & PT32_DIR_PSE36_MASK) <<
215 (32 - PT32_DIR_PSE36_SHIFT); 314 (32 - PT32_DIR_PSE36_SHIFT);
216 *shadow_pte = guest_pde & PT_PTE_COPY_MASK; 315 FNAME(set_pte_common)(vcpu, shadow_pte, gaddr,
217 set_pte_common(vcpu, shadow_pte, gaddr, 316 gpde, access_bits, user_fault, write_fault,
218 guest_pde & PT_DIRTY_MASK, access_bits, gfn); 317 ptwrite, walker, gfn);
219} 318}
220 319
221/* 320/*
222 * Fetch a shadow pte for a specific level in the paging hierarchy. 321 * Fetch a shadow pte for a specific level in the paging hierarchy.
223 */ 322 */
224static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, 323static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
225 struct guest_walker *walker) 324 struct guest_walker *walker,
325 int user_fault, int write_fault, int *ptwrite)
226{ 326{
227 hpa_t shadow_addr; 327 hpa_t shadow_addr;
228 int level; 328 int level;
329 u64 *shadow_ent;
229 u64 *prev_shadow_ent = NULL; 330 u64 *prev_shadow_ent = NULL;
230 pt_element_t *guest_ent = walker->ptep; 331 pt_element_t *guest_ent = walker->ptep;
231 332
@@ -242,37 +343,23 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
242 343
243 for (; ; level--) { 344 for (; ; level--) {
244 u32 index = SHADOW_PT_INDEX(addr, level); 345 u32 index = SHADOW_PT_INDEX(addr, level);
245 u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index;
246 struct kvm_mmu_page *shadow_page; 346 struct kvm_mmu_page *shadow_page;
247 u64 shadow_pte; 347 u64 shadow_pte;
248 int metaphysical; 348 int metaphysical;
249 gfn_t table_gfn; 349 gfn_t table_gfn;
250 unsigned hugepage_access = 0; 350 unsigned hugepage_access = 0;
251 351
352 shadow_ent = ((u64 *)__va(shadow_addr)) + index;
252 if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) { 353 if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) {
253 if (level == PT_PAGE_TABLE_LEVEL) 354 if (level == PT_PAGE_TABLE_LEVEL)
254 return shadow_ent; 355 break;
255 shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK; 356 shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
256 prev_shadow_ent = shadow_ent; 357 prev_shadow_ent = shadow_ent;
257 continue; 358 continue;
258 } 359 }
259 360
260 if (level == PT_PAGE_TABLE_LEVEL) { 361 if (level == PT_PAGE_TABLE_LEVEL)
261 362 break;
262 if (walker->level == PT_DIRECTORY_LEVEL) {
263 if (prev_shadow_ent)
264 *prev_shadow_ent |= PT_SHADOW_PS_MARK;
265 FNAME(set_pde)(vcpu, *guest_ent, shadow_ent,
266 walker->inherited_ar,
267 walker->gfn);
268 } else {
269 ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
270 FNAME(set_pte)(vcpu, *guest_ent, shadow_ent,
271 walker->inherited_ar,
272 walker->gfn);
273 }
274 return shadow_ent;
275 }
276 363
277 if (level - 1 == PT_PAGE_TABLE_LEVEL 364 if (level - 1 == PT_PAGE_TABLE_LEVEL
278 && walker->level == PT_DIRECTORY_LEVEL) { 365 && walker->level == PT_DIRECTORY_LEVEL) {
@@ -289,90 +376,24 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
289 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, 376 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
290 metaphysical, hugepage_access, 377 metaphysical, hugepage_access,
291 shadow_ent); 378 shadow_ent);
292 shadow_addr = shadow_page->page_hpa; 379 shadow_addr = __pa(shadow_page->spt);
293 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK 380 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
294 | PT_WRITABLE_MASK | PT_USER_MASK; 381 | PT_WRITABLE_MASK | PT_USER_MASK;
295 *shadow_ent = shadow_pte; 382 *shadow_ent = shadow_pte;
296 prev_shadow_ent = shadow_ent; 383 prev_shadow_ent = shadow_ent;
297 } 384 }
298}
299 385
300/* 386 if (walker->level == PT_DIRECTORY_LEVEL) {
301 * The guest faulted for write. We need to 387 FNAME(set_pde)(vcpu, guest_ent, shadow_ent,
302 * 388 walker->inherited_ar, user_fault, write_fault,
303 * - check write permissions 389 ptwrite, walker, walker->gfn);
304 * - update the guest pte dirty bit 390 } else {
305 * - update our own dirty page tracking structures 391 ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
306 */ 392 FNAME(set_pte)(vcpu, guest_ent, shadow_ent,
307static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu, 393 walker->inherited_ar, user_fault, write_fault,
308 u64 *shadow_ent, 394 ptwrite, walker, walker->gfn);
309 struct guest_walker *walker,
310 gva_t addr,
311 int user,
312 int *write_pt)
313{
314 pt_element_t *guest_ent;
315 int writable_shadow;
316 gfn_t gfn;
317 struct kvm_mmu_page *page;
318
319 if (is_writeble_pte(*shadow_ent))
320 return !user || (*shadow_ent & PT_USER_MASK);
321
322 writable_shadow = *shadow_ent & PT_SHADOW_WRITABLE_MASK;
323 if (user) {
324 /*
325 * User mode access. Fail if it's a kernel page or a read-only
326 * page.
327 */
328 if (!(*shadow_ent & PT_SHADOW_USER_MASK) || !writable_shadow)
329 return 0;
330 ASSERT(*shadow_ent & PT_USER_MASK);
331 } else
332 /*
333 * Kernel mode access. Fail if it's a read-only page and
334 * supervisor write protection is enabled.
335 */
336 if (!writable_shadow) {
337 if (is_write_protection(vcpu))
338 return 0;
339 *shadow_ent &= ~PT_USER_MASK;
340 }
341
342 guest_ent = walker->ptep;
343
344 if (!is_present_pte(*guest_ent)) {
345 *shadow_ent = 0;
346 return 0;
347 } 395 }
348 396 return shadow_ent;
349 gfn = walker->gfn;
350
351 if (user) {
352 /*
353 * Usermode page faults won't be for page table updates.
354 */
355 while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) {
356 pgprintk("%s: zap %lx %x\n",
357 __FUNCTION__, gfn, page->role.word);
358 kvm_mmu_zap_page(vcpu, page);
359 }
360 } else if (kvm_mmu_lookup_page(vcpu, gfn)) {
361 pgprintk("%s: found shadow page for %lx, marking ro\n",
362 __FUNCTION__, gfn);
363 mark_page_dirty(vcpu->kvm, gfn);
364 FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
365 *guest_ent |= PT_DIRTY_MASK;
366 *write_pt = 1;
367 return 0;
368 }
369 mark_page_dirty(vcpu->kvm, gfn);
370 *shadow_ent |= PT_WRITABLE_MASK;
371 FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
372 *guest_ent |= PT_DIRTY_MASK;
373 rmap_add(vcpu, shadow_ent);
374
375 return 1;
376} 397}
377 398
378/* 399/*
@@ -397,7 +418,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
397 int fetch_fault = error_code & PFERR_FETCH_MASK; 418 int fetch_fault = error_code & PFERR_FETCH_MASK;
398 struct guest_walker walker; 419 struct guest_walker walker;
399 u64 *shadow_pte; 420 u64 *shadow_pte;
400 int fixed;
401 int write_pt = 0; 421 int write_pt = 0;
402 int r; 422 int r;
403 423
@@ -421,27 +441,20 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
421 pgprintk("%s: guest page fault\n", __FUNCTION__); 441 pgprintk("%s: guest page fault\n", __FUNCTION__);
422 inject_page_fault(vcpu, addr, walker.error_code); 442 inject_page_fault(vcpu, addr, walker.error_code);
423 FNAME(release_walker)(&walker); 443 FNAME(release_walker)(&walker);
444 vcpu->last_pt_write_count = 0; /* reset fork detector */
424 return 0; 445 return 0;
425 } 446 }
426 447
427 shadow_pte = FNAME(fetch)(vcpu, addr, &walker); 448 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
428 pgprintk("%s: shadow pte %p %llx\n", __FUNCTION__, 449 &write_pt);
429 shadow_pte, *shadow_pte); 450 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
430 451 shadow_pte, *shadow_pte, write_pt);
431 /*
432 * Update the shadow pte.
433 */
434 if (write_fault)
435 fixed = FNAME(fix_write_pf)(vcpu, shadow_pte, &walker, addr,
436 user_fault, &write_pt);
437 else
438 fixed = fix_read_pf(shadow_pte);
439
440 pgprintk("%s: updated shadow pte %p %llx\n", __FUNCTION__,
441 shadow_pte, *shadow_pte);
442 452
443 FNAME(release_walker)(&walker); 453 FNAME(release_walker)(&walker);
444 454
455 if (!write_pt)
456 vcpu->last_pt_write_count = 0; /* reset fork detector */
457
445 /* 458 /*
446 * mmio: emulate if accessible, otherwise its a guest fault. 459 * mmio: emulate if accessible, otherwise its a guest fault.
447 */ 460 */
@@ -478,7 +491,5 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
478#undef PT_INDEX 491#undef PT_INDEX
479#undef SHADOW_PT_INDEX 492#undef SHADOW_PT_INDEX
480#undef PT_LEVEL_MASK 493#undef PT_LEVEL_MASK
481#undef PT_PTE_COPY_MASK
482#undef PT_NON_PTE_COPY_MASK
483#undef PT_DIR_BASE_ADDR_MASK 494#undef PT_DIR_BASE_ADDR_MASK
484#undef PT_MAX_FULL_LEVELS 495#undef PT_MAX_FULL_LEVELS
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index fa17d6d4f0cb..bc818cc126e3 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -14,16 +14,17 @@
14 * 14 *
15 */ 15 */
16 16
17#include "kvm_svm.h"
18#include "x86_emulate.h"
19
17#include <linux/module.h> 20#include <linux/module.h>
18#include <linux/kernel.h> 21#include <linux/kernel.h>
19#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
20#include <linux/highmem.h> 23#include <linux/highmem.h>
21#include <linux/profile.h> 24#include <linux/profile.h>
22#include <linux/sched.h> 25#include <linux/sched.h>
23#include <asm/desc.h>
24 26
25#include "kvm_svm.h" 27#include <asm/desc.h>
26#include "x86_emulate.h"
27 28
28MODULE_AUTHOR("Qumranet"); 29MODULE_AUTHOR("Qumranet");
29MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
@@ -378,7 +379,7 @@ static __init int svm_hardware_setup(void)
378 int cpu; 379 int cpu;
379 struct page *iopm_pages; 380 struct page *iopm_pages;
380 struct page *msrpm_pages; 381 struct page *msrpm_pages;
381 void *msrpm_va; 382 void *iopm_va, *msrpm_va;
382 int r; 383 int r;
383 384
384 kvm_emulator_want_group7_invlpg(); 385 kvm_emulator_want_group7_invlpg();
@@ -387,8 +388,10 @@ static __init int svm_hardware_setup(void)
387 388
388 if (!iopm_pages) 389 if (!iopm_pages)
389 return -ENOMEM; 390 return -ENOMEM;
390 memset(page_address(iopm_pages), 0xff, 391
391 PAGE_SIZE * (1 << IOPM_ALLOC_ORDER)); 392 iopm_va = page_address(iopm_pages);
393 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
394 clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
392 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; 395 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
393 396
394 397
@@ -579,7 +582,7 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
579 goto out2; 582 goto out2;
580 583
581 vcpu->svm->vmcb = page_address(page); 584 vcpu->svm->vmcb = page_address(page);
582 memset(vcpu->svm->vmcb, 0, PAGE_SIZE); 585 clear_page(vcpu->svm->vmcb);
583 vcpu->svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; 586 vcpu->svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
584 vcpu->svm->asid_generation = 0; 587 vcpu->svm->asid_generation = 0;
585 memset(vcpu->svm->db_regs, 0, sizeof(vcpu->svm->db_regs)); 588 memset(vcpu->svm->db_regs, 0, sizeof(vcpu->svm->db_regs));
@@ -587,9 +590,9 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
587 590
588 fx_init(vcpu); 591 fx_init(vcpu);
589 vcpu->fpu_active = 1; 592 vcpu->fpu_active = 1;
590 vcpu->apic_base = 0xfee00000 | 593 vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
591 /*for vcpu 0*/ MSR_IA32_APICBASE_BSP | 594 if (vcpu == &vcpu->kvm->vcpus[0])
592 MSR_IA32_APICBASE_ENABLE; 595 vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
593 596
594 return 0; 597 return 0;
595 598
@@ -955,7 +958,7 @@ static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
955 * VMCB is undefined after a SHUTDOWN intercept 958 * VMCB is undefined after a SHUTDOWN intercept
956 * so reinitialize it. 959 * so reinitialize it.
957 */ 960 */
958 memset(vcpu->svm->vmcb, 0, PAGE_SIZE); 961 clear_page(vcpu->svm->vmcb);
959 init_vmcb(vcpu->svm->vmcb); 962 init_vmcb(vcpu->svm->vmcb);
960 963
961 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; 964 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
@@ -1113,12 +1116,7 @@ static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1113{ 1116{
1114 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1; 1117 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1;
1115 skip_emulated_instruction(vcpu); 1118 skip_emulated_instruction(vcpu);
1116 if (vcpu->irq_summary) 1119 return kvm_emulate_halt(vcpu);
1117 return 1;
1118
1119 kvm_run->exit_reason = KVM_EXIT_HLT;
1120 ++vcpu->stat.halt_exits;
1121 return 0;
1122} 1120}
1123 1121
1124static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1122static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
@@ -1473,6 +1471,11 @@ static void load_db_regs(unsigned long *db_regs)
1473 asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3])); 1471 asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));
1474} 1472}
1475 1473
1474static void svm_flush_tlb(struct kvm_vcpu *vcpu)
1475{
1476 force_new_asid(vcpu);
1477}
1478
1476static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1479static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1477{ 1480{
1478 u16 fs_selector; 1481 u16 fs_selector;
@@ -1481,11 +1484,20 @@ static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1481 int r; 1484 int r;
1482 1485
1483again: 1486again:
1487 r = kvm_mmu_reload(vcpu);
1488 if (unlikely(r))
1489 return r;
1490
1484 if (!vcpu->mmio_read_completed) 1491 if (!vcpu->mmio_read_completed)
1485 do_interrupt_requests(vcpu, kvm_run); 1492 do_interrupt_requests(vcpu, kvm_run);
1486 1493
1487 clgi(); 1494 clgi();
1488 1495
1496 vcpu->guest_mode = 1;
1497 if (vcpu->requests)
1498 if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
1499 svm_flush_tlb(vcpu);
1500
1489 pre_svm_run(vcpu); 1501 pre_svm_run(vcpu);
1490 1502
1491 save_host_msrs(vcpu); 1503 save_host_msrs(vcpu);
@@ -1617,6 +1629,8 @@ again:
1617#endif 1629#endif
1618 : "cc", "memory" ); 1630 : "cc", "memory" );
1619 1631
1632 vcpu->guest_mode = 0;
1633
1620 if (vcpu->fpu_active) { 1634 if (vcpu->fpu_active) {
1621 fx_save(vcpu->guest_fx_image); 1635 fx_save(vcpu->guest_fx_image);
1622 fx_restore(vcpu->host_fx_image); 1636 fx_restore(vcpu->host_fx_image);
@@ -1681,11 +1695,6 @@ again:
1681 return r; 1695 return r;
1682} 1696}
1683 1697
1684static void svm_flush_tlb(struct kvm_vcpu *vcpu)
1685{
1686 force_new_asid(vcpu);
1687}
1688
1689static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) 1698static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
1690{ 1699{
1691 vcpu->svm->vmcb->save.cr3 = root; 1700 vcpu->svm->vmcb->save.cr3 = root;
@@ -1727,6 +1736,12 @@ static void svm_inject_page_fault(struct kvm_vcpu *vcpu,
1727 1736
1728static int is_disabled(void) 1737static int is_disabled(void)
1729{ 1738{
1739 u64 vm_cr;
1740
1741 rdmsrl(MSR_VM_CR, vm_cr);
1742 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
1743 return 1;
1744
1730 return 0; 1745 return 0;
1731} 1746}
1732 1747
diff --git a/drivers/kvm/svm.h b/drivers/kvm/svm.h
index 5e93814400ce..3b1b0f35b6cb 100644
--- a/drivers/kvm/svm.h
+++ b/drivers/kvm/svm.h
@@ -175,8 +175,11 @@ struct __attribute__ ((__packed__)) vmcb {
175#define SVM_CPUID_FUNC 0x8000000a 175#define SVM_CPUID_FUNC 0x8000000a
176 176
177#define MSR_EFER_SVME_MASK (1ULL << 12) 177#define MSR_EFER_SVME_MASK (1ULL << 12)
178#define MSR_VM_CR 0xc0010114
178#define MSR_VM_HSAVE_PA 0xc0010117ULL 179#define MSR_VM_HSAVE_PA 0xc0010117ULL
179 180
181#define SVM_VM_CR_SVM_DISABLE 4
182
180#define SVM_SELECTOR_S_SHIFT 4 183#define SVM_SELECTOR_S_SHIFT 4
181#define SVM_SELECTOR_DPL_SHIFT 5 184#define SVM_SELECTOR_DPL_SHIFT 5
182#define SVM_SELECTOR_P_SHIFT 7 185#define SVM_SELECTOR_P_SHIFT 7
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index c1ac106ace8c..80628f69916d 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -17,28 +17,35 @@
17 17
18#include "kvm.h" 18#include "kvm.h"
19#include "vmx.h" 19#include "vmx.h"
20#include "segment_descriptor.h"
21
20#include <linux/module.h> 22#include <linux/module.h>
21#include <linux/kernel.h> 23#include <linux/kernel.h>
22#include <linux/mm.h> 24#include <linux/mm.h>
23#include <linux/highmem.h> 25#include <linux/highmem.h>
24#include <linux/profile.h> 26#include <linux/profile.h>
25#include <linux/sched.h> 27#include <linux/sched.h>
28
26#include <asm/io.h> 29#include <asm/io.h>
27#include <asm/desc.h> 30#include <asm/desc.h>
28 31
29#include "segment_descriptor.h"
30
31MODULE_AUTHOR("Qumranet"); 32MODULE_AUTHOR("Qumranet");
32MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
33 34
35static int init_rmode_tss(struct kvm *kvm);
36
34static DEFINE_PER_CPU(struct vmcs *, vmxarea); 37static DEFINE_PER_CPU(struct vmcs *, vmxarea);
35static DEFINE_PER_CPU(struct vmcs *, current_vmcs); 38static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
36 39
40static struct page *vmx_io_bitmap_a;
41static struct page *vmx_io_bitmap_b;
42
37#ifdef CONFIG_X86_64 43#ifdef CONFIG_X86_64
38#define HOST_IS_64 1 44#define HOST_IS_64 1
39#else 45#else
40#define HOST_IS_64 0 46#define HOST_IS_64 0
41#endif 47#endif
48#define EFER_SAVE_RESTORE_BITS ((u64)EFER_SCE)
42 49
43static struct vmcs_descriptor { 50static struct vmcs_descriptor {
44 int size; 51 int size;
@@ -82,18 +89,17 @@ static const u32 vmx_msr_index[] = {
82}; 89};
83#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) 90#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
84 91
85#ifdef CONFIG_X86_64 92static inline u64 msr_efer_save_restore_bits(struct vmx_msr_entry msr)
86static unsigned msr_offset_kernel_gs_base; 93{
87#define NR_64BIT_MSRS 4 94 return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
88/* 95}
89 * avoid save/load MSR_SYSCALL_MASK and MSR_LSTAR by std vt 96
90 * mechanism (cpu bug AA24) 97static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu)
91 */ 98{
92#define NR_BAD_MSRS 2 99 int efer_offset = vcpu->msr_offset_efer;
93#else 100 return msr_efer_save_restore_bits(vcpu->host_msrs[efer_offset]) !=
94#define NR_64BIT_MSRS 0 101 msr_efer_save_restore_bits(vcpu->guest_msrs[efer_offset]);
95#define NR_BAD_MSRS 0 102}
96#endif
97 103
98static inline int is_page_fault(u32 intr_info) 104static inline int is_page_fault(u32 intr_info)
99{ 105{
@@ -115,13 +121,23 @@ static inline int is_external_interrupt(u32 intr_info)
115 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); 121 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
116} 122}
117 123
118static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr) 124static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr)
119{ 125{
120 int i; 126 int i;
121 127
122 for (i = 0; i < vcpu->nmsrs; ++i) 128 for (i = 0; i < vcpu->nmsrs; ++i)
123 if (vcpu->guest_msrs[i].index == msr) 129 if (vcpu->guest_msrs[i].index == msr)
124 return &vcpu->guest_msrs[i]; 130 return i;
131 return -1;
132}
133
134static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
135{
136 int i;
137
138 i = __find_msr_index(vcpu, msr);
139 if (i >= 0)
140 return &vcpu->guest_msrs[i];
125 return NULL; 141 return NULL;
126} 142}
127 143
@@ -147,6 +163,7 @@ static void __vcpu_clear(void *arg)
147 vmcs_clear(vcpu->vmcs); 163 vmcs_clear(vcpu->vmcs);
148 if (per_cpu(current_vmcs, cpu) == vcpu->vmcs) 164 if (per_cpu(current_vmcs, cpu) == vcpu->vmcs)
149 per_cpu(current_vmcs, cpu) = NULL; 165 per_cpu(current_vmcs, cpu) = NULL;
166 rdtscll(vcpu->host_tsc);
150} 167}
151 168
152static void vcpu_clear(struct kvm_vcpu *vcpu) 169static void vcpu_clear(struct kvm_vcpu *vcpu)
@@ -234,6 +251,127 @@ static void vmcs_set_bits(unsigned long field, u32 mask)
234 vmcs_writel(field, vmcs_readl(field) | mask); 251 vmcs_writel(field, vmcs_readl(field) | mask);
235} 252}
236 253
254static void update_exception_bitmap(struct kvm_vcpu *vcpu)
255{
256 u32 eb;
257
258 eb = 1u << PF_VECTOR;
259 if (!vcpu->fpu_active)
260 eb |= 1u << NM_VECTOR;
261 if (vcpu->guest_debug.enabled)
262 eb |= 1u << 1;
263 if (vcpu->rmode.active)
264 eb = ~0;
265 vmcs_write32(EXCEPTION_BITMAP, eb);
266}
267
268static void reload_tss(void)
269{
270#ifndef CONFIG_X86_64
271
272 /*
273 * VT restores TR but not its size. Useless.
274 */
275 struct descriptor_table gdt;
276 struct segment_descriptor *descs;
277
278 get_gdt(&gdt);
279 descs = (void *)gdt.base;
280 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
281 load_TR_desc();
282#endif
283}
284
285static void load_transition_efer(struct kvm_vcpu *vcpu)
286{
287 u64 trans_efer;
288 int efer_offset = vcpu->msr_offset_efer;
289
290 trans_efer = vcpu->host_msrs[efer_offset].data;
291 trans_efer &= ~EFER_SAVE_RESTORE_BITS;
292 trans_efer |= msr_efer_save_restore_bits(
293 vcpu->guest_msrs[efer_offset]);
294 wrmsrl(MSR_EFER, trans_efer);
295 vcpu->stat.efer_reload++;
296}
297
298static void vmx_save_host_state(struct kvm_vcpu *vcpu)
299{
300 struct vmx_host_state *hs = &vcpu->vmx_host_state;
301
302 if (hs->loaded)
303 return;
304
305 hs->loaded = 1;
306 /*
307 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
308 * allow segment selectors with cpl > 0 or ti == 1.
309 */
310 hs->ldt_sel = read_ldt();
311 hs->fs_gs_ldt_reload_needed = hs->ldt_sel;
312 hs->fs_sel = read_fs();
313 if (!(hs->fs_sel & 7))
314 vmcs_write16(HOST_FS_SELECTOR, hs->fs_sel);
315 else {
316 vmcs_write16(HOST_FS_SELECTOR, 0);
317 hs->fs_gs_ldt_reload_needed = 1;
318 }
319 hs->gs_sel = read_gs();
320 if (!(hs->gs_sel & 7))
321 vmcs_write16(HOST_GS_SELECTOR, hs->gs_sel);
322 else {
323 vmcs_write16(HOST_GS_SELECTOR, 0);
324 hs->fs_gs_ldt_reload_needed = 1;
325 }
326
327#ifdef CONFIG_X86_64
328 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
329 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
330#else
331 vmcs_writel(HOST_FS_BASE, segment_base(hs->fs_sel));
332 vmcs_writel(HOST_GS_BASE, segment_base(hs->gs_sel));
333#endif
334
335#ifdef CONFIG_X86_64
336 if (is_long_mode(vcpu)) {
337 save_msrs(vcpu->host_msrs + vcpu->msr_offset_kernel_gs_base, 1);
338 }
339#endif
340 load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
341 if (msr_efer_need_save_restore(vcpu))
342 load_transition_efer(vcpu);
343}
344
345static void vmx_load_host_state(struct kvm_vcpu *vcpu)
346{
347 struct vmx_host_state *hs = &vcpu->vmx_host_state;
348
349 if (!hs->loaded)
350 return;
351
352 hs->loaded = 0;
353 if (hs->fs_gs_ldt_reload_needed) {
354 load_ldt(hs->ldt_sel);
355 load_fs(hs->fs_sel);
356 /*
357 * If we have to reload gs, we must take care to
358 * preserve our gs base.
359 */
360 local_irq_disable();
361 load_gs(hs->gs_sel);
362#ifdef CONFIG_X86_64
363 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
364#endif
365 local_irq_enable();
366
367 reload_tss();
368 }
369 save_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
370 load_msrs(vcpu->host_msrs, vcpu->save_nmsrs);
371 if (msr_efer_need_save_restore(vcpu))
372 load_msrs(vcpu->host_msrs + vcpu->msr_offset_efer, 1);
373}
374
237/* 375/*
238 * Switches to specified vcpu, until a matching vcpu_put(), but assumes 376 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
239 * vcpu mutex is already taken. 377 * vcpu mutex is already taken.
@@ -242,6 +380,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
242{ 380{
243 u64 phys_addr = __pa(vcpu->vmcs); 381 u64 phys_addr = __pa(vcpu->vmcs);
244 int cpu; 382 int cpu;
383 u64 tsc_this, delta;
245 384
246 cpu = get_cpu(); 385 cpu = get_cpu();
247 386
@@ -275,15 +414,43 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
275 414
276 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); 415 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
277 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ 416 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
417
418 /*
419 * Make sure the time stamp counter is monotonous.
420 */
421 rdtscll(tsc_this);
422 delta = vcpu->host_tsc - tsc_this;
423 vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);
278 } 424 }
279} 425}
280 426
281static void vmx_vcpu_put(struct kvm_vcpu *vcpu) 427static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
282{ 428{
429 vmx_load_host_state(vcpu);
283 kvm_put_guest_fpu(vcpu); 430 kvm_put_guest_fpu(vcpu);
284 put_cpu(); 431 put_cpu();
285} 432}
286 433
434static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
435{
436 if (vcpu->fpu_active)
437 return;
438 vcpu->fpu_active = 1;
439 vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK);
440 if (vcpu->cr0 & CR0_TS_MASK)
441 vmcs_set_bits(GUEST_CR0, CR0_TS_MASK);
442 update_exception_bitmap(vcpu);
443}
444
445static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
446{
447 if (!vcpu->fpu_active)
448 return;
449 vcpu->fpu_active = 0;
450 vmcs_set_bits(GUEST_CR0, CR0_TS_MASK);
451 update_exception_bitmap(vcpu);
452}
453
287static void vmx_vcpu_decache(struct kvm_vcpu *vcpu) 454static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
288{ 455{
289 vcpu_clear(vcpu); 456 vcpu_clear(vcpu);
@@ -332,41 +499,61 @@ static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
332} 499}
333 500
334/* 501/*
502 * Swap MSR entry in host/guest MSR entry array.
503 */
504void move_msr_up(struct kvm_vcpu *vcpu, int from, int to)
505{
506 struct vmx_msr_entry tmp;
507 tmp = vcpu->guest_msrs[to];
508 vcpu->guest_msrs[to] = vcpu->guest_msrs[from];
509 vcpu->guest_msrs[from] = tmp;
510 tmp = vcpu->host_msrs[to];
511 vcpu->host_msrs[to] = vcpu->host_msrs[from];
512 vcpu->host_msrs[from] = tmp;
513}
514
515/*
335 * Set up the vmcs to automatically save and restore system 516 * Set up the vmcs to automatically save and restore system
336 * msrs. Don't touch the 64-bit msrs if the guest is in legacy 517 * msrs. Don't touch the 64-bit msrs if the guest is in legacy
337 * mode, as fiddling with msrs is very expensive. 518 * mode, as fiddling with msrs is very expensive.
338 */ 519 */
339static void setup_msrs(struct kvm_vcpu *vcpu) 520static void setup_msrs(struct kvm_vcpu *vcpu)
340{ 521{
341 int nr_skip, nr_good_msrs; 522 int save_nmsrs;
342
343 if (is_long_mode(vcpu))
344 nr_skip = NR_BAD_MSRS;
345 else
346 nr_skip = NR_64BIT_MSRS;
347 nr_good_msrs = vcpu->nmsrs - nr_skip;
348 523
349 /* 524 save_nmsrs = 0;
350 * MSR_K6_STAR is only needed on long mode guests, and only
351 * if efer.sce is enabled.
352 */
353 if (find_msr_entry(vcpu, MSR_K6_STAR)) {
354 --nr_good_msrs;
355#ifdef CONFIG_X86_64 525#ifdef CONFIG_X86_64
356 if (is_long_mode(vcpu) && (vcpu->shadow_efer & EFER_SCE)) 526 if (is_long_mode(vcpu)) {
357 ++nr_good_msrs; 527 int index;
358#endif 528
529 index = __find_msr_index(vcpu, MSR_SYSCALL_MASK);
530 if (index >= 0)
531 move_msr_up(vcpu, index, save_nmsrs++);
532 index = __find_msr_index(vcpu, MSR_LSTAR);
533 if (index >= 0)
534 move_msr_up(vcpu, index, save_nmsrs++);
535 index = __find_msr_index(vcpu, MSR_CSTAR);
536 if (index >= 0)
537 move_msr_up(vcpu, index, save_nmsrs++);
538 index = __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
539 if (index >= 0)
540 move_msr_up(vcpu, index, save_nmsrs++);
541 /*
542 * MSR_K6_STAR is only needed on long mode guests, and only
543 * if efer.sce is enabled.
544 */
545 index = __find_msr_index(vcpu, MSR_K6_STAR);
546 if ((index >= 0) && (vcpu->shadow_efer & EFER_SCE))
547 move_msr_up(vcpu, index, save_nmsrs++);
359 } 548 }
549#endif
550 vcpu->save_nmsrs = save_nmsrs;
360 551
361 vmcs_writel(VM_ENTRY_MSR_LOAD_ADDR, 552#ifdef CONFIG_X86_64
362 virt_to_phys(vcpu->guest_msrs + nr_skip)); 553 vcpu->msr_offset_kernel_gs_base =
363 vmcs_writel(VM_EXIT_MSR_STORE_ADDR, 554 __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
364 virt_to_phys(vcpu->guest_msrs + nr_skip)); 555#endif
365 vmcs_writel(VM_EXIT_MSR_LOAD_ADDR, 556 vcpu->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER);
366 virt_to_phys(vcpu->host_msrs + nr_skip));
367 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, nr_good_msrs); /* 22.2.2 */
368 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
369 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
370} 557}
371 558
372/* 559/*
@@ -394,23 +581,6 @@ static void guest_write_tsc(u64 guest_tsc)
394 vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc); 581 vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
395} 582}
396 583
397static void reload_tss(void)
398{
399#ifndef CONFIG_X86_64
400
401 /*
402 * VT restores TR but not its size. Useless.
403 */
404 struct descriptor_table gdt;
405 struct segment_descriptor *descs;
406
407 get_gdt(&gdt);
408 descs = (void *)gdt.base;
409 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
410 load_TR_desc();
411#endif
412}
413
414/* 584/*
415 * Reads an msr value (of 'msr_index') into 'pdata'. 585 * Reads an msr value (of 'msr_index') into 'pdata'.
416 * Returns 0 on success, non-0 otherwise. 586 * Returns 0 on success, non-0 otherwise.
@@ -470,10 +640,15 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
470static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) 640static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
471{ 641{
472 struct vmx_msr_entry *msr; 642 struct vmx_msr_entry *msr;
643 int ret = 0;
644
473 switch (msr_index) { 645 switch (msr_index) {
474#ifdef CONFIG_X86_64 646#ifdef CONFIG_X86_64
475 case MSR_EFER: 647 case MSR_EFER:
476 return kvm_set_msr_common(vcpu, msr_index, data); 648 ret = kvm_set_msr_common(vcpu, msr_index, data);
649 if (vcpu->vmx_host_state.loaded)
650 load_transition_efer(vcpu);
651 break;
477 case MSR_FS_BASE: 652 case MSR_FS_BASE:
478 vmcs_writel(GUEST_FS_BASE, data); 653 vmcs_writel(GUEST_FS_BASE, data);
479 break; 654 break;
@@ -497,14 +672,14 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
497 msr = find_msr_entry(vcpu, msr_index); 672 msr = find_msr_entry(vcpu, msr_index);
498 if (msr) { 673 if (msr) {
499 msr->data = data; 674 msr->data = data;
675 if (vcpu->vmx_host_state.loaded)
676 load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
500 break; 677 break;
501 } 678 }
502 return kvm_set_msr_common(vcpu, msr_index, data); 679 ret = kvm_set_msr_common(vcpu, msr_index, data);
503 msr->data = data;
504 break;
505 } 680 }
506 681
507 return 0; 682 return ret;
508} 683}
509 684
510/* 685/*
@@ -530,10 +705,8 @@ static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
530static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) 705static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
531{ 706{
532 unsigned long dr7 = 0x400; 707 unsigned long dr7 = 0x400;
533 u32 exception_bitmap;
534 int old_singlestep; 708 int old_singlestep;
535 709
536 exception_bitmap = vmcs_read32(EXCEPTION_BITMAP);
537 old_singlestep = vcpu->guest_debug.singlestep; 710 old_singlestep = vcpu->guest_debug.singlestep;
538 711
539 vcpu->guest_debug.enabled = dbg->enabled; 712 vcpu->guest_debug.enabled = dbg->enabled;
@@ -549,13 +722,9 @@ static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
549 dr7 |= 0 << (i*4+16); /* execution breakpoint */ 722 dr7 |= 0 << (i*4+16); /* execution breakpoint */
550 } 723 }
551 724
552 exception_bitmap |= (1u << 1); /* Trap debug exceptions */
553
554 vcpu->guest_debug.singlestep = dbg->singlestep; 725 vcpu->guest_debug.singlestep = dbg->singlestep;
555 } else { 726 } else
556 exception_bitmap &= ~(1u << 1); /* Ignore debug exceptions */
557 vcpu->guest_debug.singlestep = 0; 727 vcpu->guest_debug.singlestep = 0;
558 }
559 728
560 if (old_singlestep && !vcpu->guest_debug.singlestep) { 729 if (old_singlestep && !vcpu->guest_debug.singlestep) {
561 unsigned long flags; 730 unsigned long flags;
@@ -565,7 +734,7 @@ static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
565 vmcs_writel(GUEST_RFLAGS, flags); 734 vmcs_writel(GUEST_RFLAGS, flags);
566 } 735 }
567 736
568 vmcs_write32(EXCEPTION_BITMAP, exception_bitmap); 737 update_exception_bitmap(vcpu);
569 vmcs_writel(GUEST_DR7, dr7); 738 vmcs_writel(GUEST_DR7, dr7);
570 739
571 return 0; 740 return 0;
@@ -679,14 +848,6 @@ static __exit void hardware_unsetup(void)
679 free_kvm_area(); 848 free_kvm_area();
680} 849}
681 850
682static void update_exception_bitmap(struct kvm_vcpu *vcpu)
683{
684 if (vcpu->rmode.active)
685 vmcs_write32(EXCEPTION_BITMAP, ~0);
686 else
687 vmcs_write32(EXCEPTION_BITMAP, 1 << PF_VECTOR);
688}
689
690static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save) 851static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
691{ 852{
692 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 853 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
@@ -793,6 +954,8 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
793 fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds); 954 fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds);
794 fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs); 955 fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
795 fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs); 956 fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
957
958 init_rmode_tss(vcpu->kvm);
796} 959}
797 960
798#ifdef CONFIG_X86_64 961#ifdef CONFIG_X86_64
@@ -837,6 +1000,8 @@ static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
837 1000
838static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 1001static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
839{ 1002{
1003 vmx_fpu_deactivate(vcpu);
1004
840 if (vcpu->rmode.active && (cr0 & CR0_PE_MASK)) 1005 if (vcpu->rmode.active && (cr0 & CR0_PE_MASK))
841 enter_pmode(vcpu); 1006 enter_pmode(vcpu);
842 1007
@@ -852,26 +1017,20 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
852 } 1017 }
853#endif 1018#endif
854 1019
855 if (!(cr0 & CR0_TS_MASK)) {
856 vcpu->fpu_active = 1;
857 vmcs_clear_bits(EXCEPTION_BITMAP, CR0_TS_MASK);
858 }
859
860 vmcs_writel(CR0_READ_SHADOW, cr0); 1020 vmcs_writel(CR0_READ_SHADOW, cr0);
861 vmcs_writel(GUEST_CR0, 1021 vmcs_writel(GUEST_CR0,
862 (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON); 1022 (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
863 vcpu->cr0 = cr0; 1023 vcpu->cr0 = cr0;
1024
1025 if (!(cr0 & CR0_TS_MASK) || !(cr0 & CR0_PE_MASK))
1026 vmx_fpu_activate(vcpu);
864} 1027}
865 1028
866static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 1029static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
867{ 1030{
868 vmcs_writel(GUEST_CR3, cr3); 1031 vmcs_writel(GUEST_CR3, cr3);
869 1032 if (vcpu->cr0 & CR0_PE_MASK)
870 if (!(vcpu->cr0 & CR0_TS_MASK)) { 1033 vmx_fpu_deactivate(vcpu);
871 vcpu->fpu_active = 0;
872 vmcs_set_bits(GUEST_CR0, CR0_TS_MASK);
873 vmcs_set_bits(EXCEPTION_BITMAP, 1 << NM_VECTOR);
874 }
875} 1034}
876 1035
877static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1036static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
@@ -937,23 +1096,11 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
937 var->unusable = (ar >> 16) & 1; 1096 var->unusable = (ar >> 16) & 1;
938} 1097}
939 1098
940static void vmx_set_segment(struct kvm_vcpu *vcpu, 1099static u32 vmx_segment_access_rights(struct kvm_segment *var)
941 struct kvm_segment *var, int seg)
942{ 1100{
943 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
944 u32 ar; 1101 u32 ar;
945 1102
946 vmcs_writel(sf->base, var->base); 1103 if (var->unusable)
947 vmcs_write32(sf->limit, var->limit);
948 vmcs_write16(sf->selector, var->selector);
949 if (vcpu->rmode.active && var->s) {
950 /*
951 * Hack real-mode segments into vm86 compatibility.
952 */
953 if (var->base == 0xffff0000 && var->selector == 0xf000)
954 vmcs_writel(sf->base, 0xf0000);
955 ar = 0xf3;
956 } else if (var->unusable)
957 ar = 1 << 16; 1104 ar = 1 << 16;
958 else { 1105 else {
959 ar = var->type & 15; 1106 ar = var->type & 15;
@@ -967,6 +1114,35 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
967 } 1114 }
968 if (ar == 0) /* a 0 value means unusable */ 1115 if (ar == 0) /* a 0 value means unusable */
969 ar = AR_UNUSABLE_MASK; 1116 ar = AR_UNUSABLE_MASK;
1117
1118 return ar;
1119}
1120
1121static void vmx_set_segment(struct kvm_vcpu *vcpu,
1122 struct kvm_segment *var, int seg)
1123{
1124 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1125 u32 ar;
1126
1127 if (vcpu->rmode.active && seg == VCPU_SREG_TR) {
1128 vcpu->rmode.tr.selector = var->selector;
1129 vcpu->rmode.tr.base = var->base;
1130 vcpu->rmode.tr.limit = var->limit;
1131 vcpu->rmode.tr.ar = vmx_segment_access_rights(var);
1132 return;
1133 }
1134 vmcs_writel(sf->base, var->base);
1135 vmcs_write32(sf->limit, var->limit);
1136 vmcs_write16(sf->selector, var->selector);
1137 if (vcpu->rmode.active && var->s) {
1138 /*
1139 * Hack real-mode segments into vm86 compatibility.
1140 */
1141 if (var->base == 0xffff0000 && var->selector == 0xf000)
1142 vmcs_writel(sf->base, 0xf0000);
1143 ar = 0xf3;
1144 } else
1145 ar = vmx_segment_access_rights(var);
970 vmcs_write32(sf->ar_bytes, ar); 1146 vmcs_write32(sf->ar_bytes, ar);
971} 1147}
972 1148
@@ -1018,16 +1194,16 @@ static int init_rmode_tss(struct kvm* kvm)
1018 } 1194 }
1019 1195
1020 page = kmap_atomic(p1, KM_USER0); 1196 page = kmap_atomic(p1, KM_USER0);
1021 memset(page, 0, PAGE_SIZE); 1197 clear_page(page);
1022 *(u16*)(page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE; 1198 *(u16*)(page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
1023 kunmap_atomic(page, KM_USER0); 1199 kunmap_atomic(page, KM_USER0);
1024 1200
1025 page = kmap_atomic(p2, KM_USER0); 1201 page = kmap_atomic(p2, KM_USER0);
1026 memset(page, 0, PAGE_SIZE); 1202 clear_page(page);
1027 kunmap_atomic(page, KM_USER0); 1203 kunmap_atomic(page, KM_USER0);
1028 1204
1029 page = kmap_atomic(p3, KM_USER0); 1205 page = kmap_atomic(p3, KM_USER0);
1030 memset(page, 0, PAGE_SIZE); 1206 clear_page(page);
1031 *(page + RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1) = ~0; 1207 *(page + RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1) = ~0;
1032 kunmap_atomic(page, KM_USER0); 1208 kunmap_atomic(page, KM_USER0);
1033 1209
@@ -1066,7 +1242,7 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1066 struct descriptor_table dt; 1242 struct descriptor_table dt;
1067 int i; 1243 int i;
1068 int ret = 0; 1244 int ret = 0;
1069 extern asmlinkage void kvm_vmx_return(void); 1245 unsigned long kvm_vmx_return;
1070 1246
1071 if (!init_rmode_tss(vcpu->kvm)) { 1247 if (!init_rmode_tss(vcpu->kvm)) {
1072 ret = -ENOMEM; 1248 ret = -ENOMEM;
@@ -1076,9 +1252,9 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1076 memset(vcpu->regs, 0, sizeof(vcpu->regs)); 1252 memset(vcpu->regs, 0, sizeof(vcpu->regs));
1077 vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val(); 1253 vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val();
1078 vcpu->cr8 = 0; 1254 vcpu->cr8 = 0;
1079 vcpu->apic_base = 0xfee00000 | 1255 vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
1080 /*for vcpu 0*/ MSR_IA32_APICBASE_BSP | 1256 if (vcpu == &vcpu->kvm->vcpus[0])
1081 MSR_IA32_APICBASE_ENABLE; 1257 vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
1082 1258
1083 fx_init(vcpu); 1259 fx_init(vcpu);
1084 1260
@@ -1129,8 +1305,8 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1129 vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0); 1305 vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
1130 1306
1131 /* I/O */ 1307 /* I/O */
1132 vmcs_write64(IO_BITMAP_A, 0); 1308 vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
1133 vmcs_write64(IO_BITMAP_B, 0); 1309 vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
1134 1310
1135 guest_write_tsc(0); 1311 guest_write_tsc(0);
1136 1312
@@ -1150,12 +1326,11 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1150 CPU_BASED_HLT_EXITING /* 20.6.2 */ 1326 CPU_BASED_HLT_EXITING /* 20.6.2 */
1151 | CPU_BASED_CR8_LOAD_EXITING /* 20.6.2 */ 1327 | CPU_BASED_CR8_LOAD_EXITING /* 20.6.2 */
1152 | CPU_BASED_CR8_STORE_EXITING /* 20.6.2 */ 1328 | CPU_BASED_CR8_STORE_EXITING /* 20.6.2 */
1153 | CPU_BASED_UNCOND_IO_EXITING /* 20.6.2 */ 1329 | CPU_BASED_ACTIVATE_IO_BITMAP /* 20.6.2 */
1154 | CPU_BASED_MOV_DR_EXITING 1330 | CPU_BASED_MOV_DR_EXITING
1155 | CPU_BASED_USE_TSC_OFFSETING /* 21.3 */ 1331 | CPU_BASED_USE_TSC_OFFSETING /* 21.3 */
1156 ); 1332 );
1157 1333
1158 vmcs_write32(EXCEPTION_BITMAP, 1 << PF_VECTOR);
1159 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); 1334 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
1160 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); 1335 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
1161 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ 1336 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
@@ -1185,8 +1360,11 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1185 get_idt(&dt); 1360 get_idt(&dt);
1186 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */ 1361 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
1187 1362
1188 1363 asm ("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
1189 vmcs_writel(HOST_RIP, (unsigned long)kvm_vmx_return); /* 22.2.5 */ 1364 vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
1365 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
1366 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
1367 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
1190 1368
1191 rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk); 1369 rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
1192 vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs); 1370 vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
@@ -1210,10 +1388,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1210 vcpu->host_msrs[j].reserved = 0; 1388 vcpu->host_msrs[j].reserved = 0;
1211 vcpu->host_msrs[j].data = data; 1389 vcpu->host_msrs[j].data = data;
1212 vcpu->guest_msrs[j] = vcpu->host_msrs[j]; 1390 vcpu->guest_msrs[j] = vcpu->host_msrs[j];
1213#ifdef CONFIG_X86_64
1214 if (index == MSR_KERNEL_GS_BASE)
1215 msr_offset_kernel_gs_base = j;
1216#endif
1217 ++vcpu->nmsrs; 1391 ++vcpu->nmsrs;
1218 } 1392 }
1219 1393
@@ -1241,6 +1415,8 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1241#ifdef CONFIG_X86_64 1415#ifdef CONFIG_X86_64
1242 vmx_set_efer(vcpu, 0); 1416 vmx_set_efer(vcpu, 0);
1243#endif 1417#endif
1418 vmx_fpu_activate(vcpu);
1419 update_exception_bitmap(vcpu);
1244 1420
1245 return 0; 1421 return 0;
1246 1422
@@ -1365,7 +1541,11 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
1365 if (!vcpu->rmode.active) 1541 if (!vcpu->rmode.active)
1366 return 0; 1542 return 0;
1367 1543
1368 if (vec == GP_VECTOR && err_code == 0) 1544 /*
1545 * Instruction with address size override prefix opcode 0x67
1546 * Cause the #SS fault with 0 error code in VM86 mode.
1547 */
1548 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
1369 if (emulate_instruction(vcpu, NULL, 0, 0) == EMULATE_DONE) 1549 if (emulate_instruction(vcpu, NULL, 0, 0) == EMULATE_DONE)
1370 return 1; 1550 return 1;
1371 return 0; 1551 return 0;
@@ -1400,10 +1580,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1400 } 1580 }
1401 1581
1402 if (is_no_device(intr_info)) { 1582 if (is_no_device(intr_info)) {
1403 vcpu->fpu_active = 1; 1583 vmx_fpu_activate(vcpu);
1404 vmcs_clear_bits(EXCEPTION_BITMAP, 1 << NM_VECTOR);
1405 if (!(vcpu->cr0 & CR0_TS_MASK))
1406 vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK);
1407 return 1; 1584 return 1;
1408 } 1585 }
1409 1586
@@ -1445,8 +1622,13 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1445 1622
1446 if (vcpu->rmode.active && 1623 if (vcpu->rmode.active &&
1447 handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK, 1624 handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
1448 error_code)) 1625 error_code)) {
1626 if (vcpu->halt_request) {
1627 vcpu->halt_request = 0;
1628 return kvm_emulate_halt(vcpu);
1629 }
1449 return 1; 1630 return 1;
1631 }
1450 1632
1451 if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == (INTR_TYPE_EXCEPTION | 1)) { 1633 if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == (INTR_TYPE_EXCEPTION | 1)) {
1452 kvm_run->exit_reason = KVM_EXIT_DEBUG; 1634 kvm_run->exit_reason = KVM_EXIT_DEBUG;
@@ -1595,11 +1777,10 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1595 break; 1777 break;
1596 case 2: /* clts */ 1778 case 2: /* clts */
1597 vcpu_load_rsp_rip(vcpu); 1779 vcpu_load_rsp_rip(vcpu);
1598 vcpu->fpu_active = 1; 1780 vmx_fpu_deactivate(vcpu);
1599 vmcs_clear_bits(EXCEPTION_BITMAP, 1 << NM_VECTOR);
1600 vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK);
1601 vcpu->cr0 &= ~CR0_TS_MASK; 1781 vcpu->cr0 &= ~CR0_TS_MASK;
1602 vmcs_writel(CR0_READ_SHADOW, vcpu->cr0); 1782 vmcs_writel(CR0_READ_SHADOW, vcpu->cr0);
1783 vmx_fpu_activate(vcpu);
1603 skip_emulated_instruction(vcpu); 1784 skip_emulated_instruction(vcpu);
1604 return 1; 1785 return 1;
1605 case 1: /*mov from cr*/ 1786 case 1: /*mov from cr*/
@@ -1734,12 +1915,7 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu,
1734static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1915static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1735{ 1916{
1736 skip_emulated_instruction(vcpu); 1917 skip_emulated_instruction(vcpu);
1737 if (vcpu->irq_summary) 1918 return kvm_emulate_halt(vcpu);
1738 return 1;
1739
1740 kvm_run->exit_reason = KVM_EXIT_HLT;
1741 ++vcpu->stat.halt_exits;
1742 return 0;
1743} 1919}
1744 1920
1745static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1921static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
@@ -1770,7 +1946,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
1770}; 1946};
1771 1947
1772static const int kvm_vmx_max_exit_handlers = 1948static const int kvm_vmx_max_exit_handlers =
1773 sizeof(kvm_vmx_exit_handlers) / sizeof(*kvm_vmx_exit_handlers); 1949 ARRAY_SIZE(kvm_vmx_exit_handlers);
1774 1950
1775/* 1951/*
1776 * The guest has exited. See if we can fix it or if we need userspace 1952 * The guest has exited. See if we can fix it or if we need userspace
@@ -1810,61 +1986,44 @@ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
1810 (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF)); 1986 (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
1811} 1987}
1812 1988
1989static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
1990{
1991}
1992
1813static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1993static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1814{ 1994{
1815 u8 fail; 1995 u8 fail;
1816 u16 fs_sel, gs_sel, ldt_sel;
1817 int fs_gs_ldt_reload_needed;
1818 int r; 1996 int r;
1819 1997
1820again: 1998preempted:
1821 /* 1999 if (vcpu->guest_debug.enabled)
1822 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not 2000 kvm_guest_debug_pre(vcpu);
1823 * allow segment selectors with cpl > 0 or ti == 1.
1824 */
1825 fs_sel = read_fs();
1826 gs_sel = read_gs();
1827 ldt_sel = read_ldt();
1828 fs_gs_ldt_reload_needed = (fs_sel & 7) | (gs_sel & 7) | ldt_sel;
1829 if (!fs_gs_ldt_reload_needed) {
1830 vmcs_write16(HOST_FS_SELECTOR, fs_sel);
1831 vmcs_write16(HOST_GS_SELECTOR, gs_sel);
1832 } else {
1833 vmcs_write16(HOST_FS_SELECTOR, 0);
1834 vmcs_write16(HOST_GS_SELECTOR, 0);
1835 }
1836
1837#ifdef CONFIG_X86_64
1838 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
1839 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
1840#else
1841 vmcs_writel(HOST_FS_BASE, segment_base(fs_sel));
1842 vmcs_writel(HOST_GS_BASE, segment_base(gs_sel));
1843#endif
1844 2001
2002again:
1845 if (!vcpu->mmio_read_completed) 2003 if (!vcpu->mmio_read_completed)
1846 do_interrupt_requests(vcpu, kvm_run); 2004 do_interrupt_requests(vcpu, kvm_run);
1847 2005
1848 if (vcpu->guest_debug.enabled) 2006 vmx_save_host_state(vcpu);
1849 kvm_guest_debug_pre(vcpu);
1850
1851 kvm_load_guest_fpu(vcpu); 2007 kvm_load_guest_fpu(vcpu);
1852 2008
2009 r = kvm_mmu_reload(vcpu);
2010 if (unlikely(r))
2011 goto out;
2012
1853 /* 2013 /*
1854 * Loading guest fpu may have cleared host cr0.ts 2014 * Loading guest fpu may have cleared host cr0.ts
1855 */ 2015 */
1856 vmcs_writel(HOST_CR0, read_cr0()); 2016 vmcs_writel(HOST_CR0, read_cr0());
1857 2017
1858#ifdef CONFIG_X86_64 2018 local_irq_disable();
1859 if (is_long_mode(vcpu)) { 2019
1860 save_msrs(vcpu->host_msrs + msr_offset_kernel_gs_base, 1); 2020 vcpu->guest_mode = 1;
1861 load_msrs(vcpu->guest_msrs, NR_BAD_MSRS); 2021 if (vcpu->requests)
1862 } 2022 if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
1863#endif 2023 vmx_flush_tlb(vcpu);
1864 2024
1865 asm ( 2025 asm (
1866 /* Store host registers */ 2026 /* Store host registers */
1867 "pushf \n\t"
1868#ifdef CONFIG_X86_64 2027#ifdef CONFIG_X86_64
1869 "push %%rax; push %%rbx; push %%rdx;" 2028 "push %%rax; push %%rbx; push %%rdx;"
1870 "push %%rsi; push %%rdi; push %%rbp;" 2029 "push %%rsi; push %%rdi; push %%rbp;"
@@ -1909,12 +2068,11 @@ again:
1909 "mov %c[rcx](%3), %%ecx \n\t" /* kills %3 (ecx) */ 2068 "mov %c[rcx](%3), %%ecx \n\t" /* kills %3 (ecx) */
1910#endif 2069#endif
1911 /* Enter guest mode */ 2070 /* Enter guest mode */
1912 "jne launched \n\t" 2071 "jne .Llaunched \n\t"
1913 ASM_VMX_VMLAUNCH "\n\t" 2072 ASM_VMX_VMLAUNCH "\n\t"
1914 "jmp kvm_vmx_return \n\t" 2073 "jmp .Lkvm_vmx_return \n\t"
1915 "launched: " ASM_VMX_VMRESUME "\n\t" 2074 ".Llaunched: " ASM_VMX_VMRESUME "\n\t"
1916 ".globl kvm_vmx_return \n\t" 2075 ".Lkvm_vmx_return: "
1917 "kvm_vmx_return: "
1918 /* Save guest registers, load host registers, keep flags */ 2076 /* Save guest registers, load host registers, keep flags */
1919#ifdef CONFIG_X86_64 2077#ifdef CONFIG_X86_64
1920 "xchg %3, (%%rsp) \n\t" 2078 "xchg %3, (%%rsp) \n\t"
@@ -1957,7 +2115,6 @@ again:
1957 "pop %%ecx; popa \n\t" 2115 "pop %%ecx; popa \n\t"
1958#endif 2116#endif
1959 "setbe %0 \n\t" 2117 "setbe %0 \n\t"
1960 "popf \n\t"
1961 : "=q" (fail) 2118 : "=q" (fail)
1962 : "r"(vcpu->launched), "d"((unsigned long)HOST_RSP), 2119 : "r"(vcpu->launched), "d"((unsigned long)HOST_RSP),
1963 "c"(vcpu), 2120 "c"(vcpu),
@@ -1981,84 +2138,61 @@ again:
1981 [cr2]"i"(offsetof(struct kvm_vcpu, cr2)) 2138 [cr2]"i"(offsetof(struct kvm_vcpu, cr2))
1982 : "cc", "memory" ); 2139 : "cc", "memory" );
1983 2140
1984 /* 2141 vcpu->guest_mode = 0;
1985 * Reload segment selectors ASAP. (it's needed for a functional 2142 local_irq_enable();
1986 * kernel: x86 relies on having __KERNEL_PDA in %fs and x86_64
1987 * relies on having 0 in %gs for the CPU PDA to work.)
1988 */
1989 if (fs_gs_ldt_reload_needed) {
1990 load_ldt(ldt_sel);
1991 load_fs(fs_sel);
1992 /*
1993 * If we have to reload gs, we must take care to
1994 * preserve our gs base.
1995 */
1996 local_irq_disable();
1997 load_gs(gs_sel);
1998#ifdef CONFIG_X86_64
1999 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
2000#endif
2001 local_irq_enable();
2002 2143
2003 reload_tss();
2004 }
2005 ++vcpu->stat.exits; 2144 ++vcpu->stat.exits;
2006 2145
2007#ifdef CONFIG_X86_64
2008 if (is_long_mode(vcpu)) {
2009 save_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
2010 load_msrs(vcpu->host_msrs, NR_BAD_MSRS);
2011 }
2012#endif
2013
2014 vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0; 2146 vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
2015 2147
2016 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); 2148 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
2017 2149
2018 if (fail) { 2150 if (unlikely(fail)) {
2019 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; 2151 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2020 kvm_run->fail_entry.hardware_entry_failure_reason 2152 kvm_run->fail_entry.hardware_entry_failure_reason
2021 = vmcs_read32(VM_INSTRUCTION_ERROR); 2153 = vmcs_read32(VM_INSTRUCTION_ERROR);
2022 r = 0; 2154 r = 0;
2023 } else { 2155 goto out;
2024 /* 2156 }
2025 * Profile KVM exit RIPs: 2157 /*
2026 */ 2158 * Profile KVM exit RIPs:
2027 if (unlikely(prof_on == KVM_PROFILING)) 2159 */
2028 profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP)); 2160 if (unlikely(prof_on == KVM_PROFILING))
2029 2161 profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
2030 vcpu->launched = 1; 2162
2031 r = kvm_handle_exit(kvm_run, vcpu); 2163 vcpu->launched = 1;
2032 if (r > 0) { 2164 r = kvm_handle_exit(kvm_run, vcpu);
2033 /* Give scheduler a change to reschedule. */ 2165 if (r > 0) {
2034 if (signal_pending(current)) { 2166 /* Give scheduler a change to reschedule. */
2035 ++vcpu->stat.signal_exits; 2167 if (signal_pending(current)) {
2036 post_kvm_run_save(vcpu, kvm_run); 2168 r = -EINTR;
2037 kvm_run->exit_reason = KVM_EXIT_INTR; 2169 kvm_run->exit_reason = KVM_EXIT_INTR;
2038 return -EINTR; 2170 ++vcpu->stat.signal_exits;
2039 } 2171 goto out;
2040 2172 }
2041 if (dm_request_for_irq_injection(vcpu, kvm_run)) { 2173
2042 ++vcpu->stat.request_irq_exits; 2174 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
2043 post_kvm_run_save(vcpu, kvm_run); 2175 r = -EINTR;
2044 kvm_run->exit_reason = KVM_EXIT_INTR; 2176 kvm_run->exit_reason = KVM_EXIT_INTR;
2045 return -EINTR; 2177 ++vcpu->stat.request_irq_exits;
2046 } 2178 goto out;
2047 2179 }
2048 kvm_resched(vcpu); 2180 if (!need_resched()) {
2181 ++vcpu->stat.light_exits;
2049 goto again; 2182 goto again;
2050 } 2183 }
2051 } 2184 }
2052 2185
2186out:
2187 if (r > 0) {
2188 kvm_resched(vcpu);
2189 goto preempted;
2190 }
2191
2053 post_kvm_run_save(vcpu, kvm_run); 2192 post_kvm_run_save(vcpu, kvm_run);
2054 return r; 2193 return r;
2055} 2194}
2056 2195
2057static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
2058{
2059 vmcs_writel(GUEST_CR3, vmcs_readl(GUEST_CR3));
2060}
2061
2062static void vmx_inject_page_fault(struct kvm_vcpu *vcpu, 2196static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
2063 unsigned long addr, 2197 unsigned long addr,
2064 u32 err_code) 2198 u32 err_code)
@@ -2122,7 +2256,6 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
2122 vmcs_clear(vmcs); 2256 vmcs_clear(vmcs);
2123 vcpu->vmcs = vmcs; 2257 vcpu->vmcs = vmcs;
2124 vcpu->launched = 0; 2258 vcpu->launched = 0;
2125 vcpu->fpu_active = 1;
2126 2259
2127 return 0; 2260 return 0;
2128 2261
@@ -2188,11 +2321,50 @@ static struct kvm_arch_ops vmx_arch_ops = {
2188 2321
2189static int __init vmx_init(void) 2322static int __init vmx_init(void)
2190{ 2323{
2191 return kvm_init_arch(&vmx_arch_ops, THIS_MODULE); 2324 void *iova;
2325 int r;
2326
2327 vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2328 if (!vmx_io_bitmap_a)
2329 return -ENOMEM;
2330
2331 vmx_io_bitmap_b = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2332 if (!vmx_io_bitmap_b) {
2333 r = -ENOMEM;
2334 goto out;
2335 }
2336
2337 /*
2338 * Allow direct access to the PC debug port (it is often used for I/O
2339 * delays, but the vmexits simply slow things down).
2340 */
2341 iova = kmap(vmx_io_bitmap_a);
2342 memset(iova, 0xff, PAGE_SIZE);
2343 clear_bit(0x80, iova);
2344 kunmap(vmx_io_bitmap_a);
2345
2346 iova = kmap(vmx_io_bitmap_b);
2347 memset(iova, 0xff, PAGE_SIZE);
2348 kunmap(vmx_io_bitmap_b);
2349
2350 r = kvm_init_arch(&vmx_arch_ops, THIS_MODULE);
2351 if (r)
2352 goto out1;
2353
2354 return 0;
2355
2356out1:
2357 __free_page(vmx_io_bitmap_b);
2358out:
2359 __free_page(vmx_io_bitmap_a);
2360 return r;
2192} 2361}
2193 2362
2194static void __exit vmx_exit(void) 2363static void __exit vmx_exit(void)
2195{ 2364{
2365 __free_page(vmx_io_bitmap_b);
2366 __free_page(vmx_io_bitmap_a);
2367
2196 kvm_exit_arch(); 2368 kvm_exit_arch();
2197} 2369}
2198 2370
diff --git a/drivers/kvm/x86_emulate.c b/drivers/kvm/x86_emulate.c
index 7ade09086aa5..f60012d62610 100644
--- a/drivers/kvm/x86_emulate.c
+++ b/drivers/kvm/x86_emulate.c
@@ -98,8 +98,11 @@ static u8 opcode_table[256] = {
98 0, 0, 0, 0, 98 0, 0, 0, 0,
99 /* 0x40 - 0x4F */ 99 /* 0x40 - 0x4F */
100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 /* 0x50 - 0x5F */ 101 /* 0x50 - 0x57 */
102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 102 0, 0, 0, 0, 0, 0, 0, 0,
103 /* 0x58 - 0x5F */
104 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
105 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
103 /* 0x60 - 0x6F */ 106 /* 0x60 - 0x6F */
104 0, 0, 0, DstReg | SrcMem32 | ModRM | Mov /* movsxd (x86/64) */ , 107 0, 0, 0, DstReg | SrcMem32 | ModRM | Mov /* movsxd (x86/64) */ ,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 108 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -128,9 +131,9 @@ static u8 opcode_table[256] = {
128 /* 0xB0 - 0xBF */ 131 /* 0xB0 - 0xBF */
129 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 132 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
130 /* 0xC0 - 0xC7 */ 133 /* 0xC0 - 0xC7 */
131 ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM, 0, 0, 134 ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM,
132 0, 0, ByteOp | DstMem | SrcImm | ModRM | Mov, 135 0, ImplicitOps, 0, 0,
133 DstMem | SrcImm | ModRM | Mov, 136 ByteOp | DstMem | SrcImm | ModRM | Mov, DstMem | SrcImm | ModRM | Mov,
134 /* 0xC8 - 0xCF */ 137 /* 0xC8 - 0xCF */
135 0, 0, 0, 0, 0, 0, 0, 0, 138 0, 0, 0, 0, 0, 0, 0, 0,
136 /* 0xD0 - 0xD7 */ 139 /* 0xD0 - 0xD7 */
@@ -143,7 +146,8 @@ static u8 opcode_table[256] = {
143 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 146 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
144 /* 0xF0 - 0xF7 */ 147 /* 0xF0 - 0xF7 */
145 0, 0, 0, 0, 148 0, 0, 0, 0,
146 0, 0, ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM, 149 ImplicitOps, 0,
150 ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
147 /* 0xF8 - 0xFF */ 151 /* 0xF8 - 0xFF */
148 0, 0, 0, 0, 152 0, 0, 0, 0,
149 0, 0, ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM 153 0, 0, ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM
@@ -152,7 +156,7 @@ static u8 opcode_table[256] = {
152static u16 twobyte_table[256] = { 156static u16 twobyte_table[256] = {
153 /* 0x00 - 0x0F */ 157 /* 0x00 - 0x0F */
154 0, SrcMem | ModRM | DstReg, 0, 0, 0, 0, ImplicitOps, 0, 158 0, SrcMem | ModRM | DstReg, 0, 0, 0, 0, ImplicitOps, 0,
155 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 159 0, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0,
156 /* 0x10 - 0x1F */ 160 /* 0x10 - 0x1F */
157 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0, 161 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0,
158 /* 0x20 - 0x2F */ 162 /* 0x20 - 0x2F */
@@ -481,6 +485,7 @@ x86_emulate_memop(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
481 int mode = ctxt->mode; 485 int mode = ctxt->mode;
482 unsigned long modrm_ea; 486 unsigned long modrm_ea;
483 int use_modrm_ea, index_reg = 0, base_reg = 0, scale, rip_relative = 0; 487 int use_modrm_ea, index_reg = 0, base_reg = 0, scale, rip_relative = 0;
488 int no_wb = 0;
484 489
485 /* Shadow copy of register state. Committed on successful emulation. */ 490 /* Shadow copy of register state. Committed on successful emulation. */
486 unsigned long _regs[NR_VCPU_REGS]; 491 unsigned long _regs[NR_VCPU_REGS];
@@ -1047,7 +1052,7 @@ done_prefixes:
1047 _regs[VCPU_REGS_RSP]), 1052 _regs[VCPU_REGS_RSP]),
1048 &dst.val, dst.bytes, ctxt)) != 0) 1053 &dst.val, dst.bytes, ctxt)) != 0)
1049 goto done; 1054 goto done;
1050 dst.val = dst.orig_val; /* skanky: disable writeback */ 1055 no_wb = 1;
1051 break; 1056 break;
1052 default: 1057 default:
1053 goto cannot_emulate; 1058 goto cannot_emulate;
@@ -1056,7 +1061,7 @@ done_prefixes:
1056 } 1061 }
1057 1062
1058writeback: 1063writeback:
1059 if ((d & Mov) || (dst.orig_val != dst.val)) { 1064 if (!no_wb) {
1060 switch (dst.type) { 1065 switch (dst.type) {
1061 case OP_REG: 1066 case OP_REG:
1062 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */ 1067 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
@@ -1149,6 +1154,23 @@ special_insn:
1149 case 0xae ... 0xaf: /* scas */ 1154 case 0xae ... 0xaf: /* scas */
1150 DPRINTF("Urk! I don't handle SCAS.\n"); 1155 DPRINTF("Urk! I don't handle SCAS.\n");
1151 goto cannot_emulate; 1156 goto cannot_emulate;
1157 case 0xf4: /* hlt */
1158 ctxt->vcpu->halt_request = 1;
1159 goto done;
1160 case 0xc3: /* ret */
1161 dst.ptr = &_eip;
1162 goto pop_instruction;
1163 case 0x58 ... 0x5f: /* pop reg */
1164 dst.ptr = (unsigned long *)&_regs[b & 0x7];
1165
1166pop_instruction:
1167 if ((rc = ops->read_std(register_address(ctxt->ss_base,
1168 _regs[VCPU_REGS_RSP]), dst.ptr, op_bytes, ctxt)) != 0)
1169 goto done;
1170
1171 register_address_increment(_regs[VCPU_REGS_RSP], op_bytes);
1172 no_wb = 1; /* Disable writeback. */
1173 break;
1152 } 1174 }
1153 goto writeback; 1175 goto writeback;
1154 1176
@@ -1302,8 +1324,10 @@ twobyte_insn:
1302 1324
1303twobyte_special_insn: 1325twobyte_special_insn:
1304 /* Disable writeback. */ 1326 /* Disable writeback. */
1305 dst.orig_val = dst.val; 1327 no_wb = 1;
1306 switch (b) { 1328 switch (b) {
1329 case 0x09: /* wbinvd */
1330 break;
1307 case 0x0d: /* GrpP (prefetch) */ 1331 case 0x0d: /* GrpP (prefetch) */
1308 case 0x18: /* Grp16 (prefetch/nop) */ 1332 case 0x18: /* Grp16 (prefetch/nop) */
1309 break; 1333 break;
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index bd55e6ab99fc..f25685b9b7cf 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -335,6 +335,7 @@ static int monitor_task(void *arg)
335{ 335{
336 struct thermostat* th = arg; 336 struct thermostat* th = arg;
337 337
338 set_freezable();
338 while(!kthread_should_stop()) { 339 while(!kthread_should_stop()) {
339 try_to_freeze(); 340 try_to_freeze();
340 msleep_interruptible(2000); 341 msleep_interruptible(2000);
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
index 4fcb245ba184..e18d265d5d33 100644
--- a/drivers/macintosh/windfarm_core.c
+++ b/drivers/macintosh/windfarm_core.c
@@ -92,6 +92,7 @@ static int wf_thread_func(void *data)
92 92
93 DBG("wf: thread started\n"); 93 DBG("wf: thread started\n");
94 94
95 set_freezable();
95 while(!kthread_should_stop()) { 96 while(!kthread_should_stop()) {
96 if (time_after_eq(jiffies, next)) { 97 if (time_after_eq(jiffies, next)) {
97 wf_notify(WF_EVENT_TICK, NULL); 98 wf_notify(WF_EVENT_TICK, NULL);
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 64bf3a81db93..531d4d17d011 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -2,19 +2,17 @@
2# Block device driver configuration 2# Block device driver configuration
3# 3#
4 4
5if BLOCK 5menuconfig MD
6
7menu "Multi-device support (RAID and LVM)"
8
9config MD
10 bool "Multiple devices driver support (RAID and LVM)" 6 bool "Multiple devices driver support (RAID and LVM)"
7 depends on BLOCK
11 help 8 help
12 Support multiple physical spindles through a single logical device. 9 Support multiple physical spindles through a single logical device.
13 Required for RAID and logical volume management. 10 Required for RAID and logical volume management.
14 11
12if MD
13
15config BLK_DEV_MD 14config BLK_DEV_MD
16 tristate "RAID support" 15 tristate "RAID support"
17 depends on MD
18 ---help--- 16 ---help---
19 This driver lets you combine several hard disk partitions into one 17 This driver lets you combine several hard disk partitions into one
20 logical block device. This can be used to simply append one 18 logical block device. This can be used to simply append one
@@ -191,7 +189,6 @@ config MD_FAULTY
191 189
192config BLK_DEV_DM 190config BLK_DEV_DM
193 tristate "Device mapper support" 191 tristate "Device mapper support"
194 depends on MD
195 ---help--- 192 ---help---
196 Device-mapper is a low level volume manager. It works by allowing 193 Device-mapper is a low level volume manager. It works by allowing
197 people to specify mappings for ranges of logical sectors. Various 194 people to specify mappings for ranges of logical sectors. Various
@@ -279,6 +276,4 @@ config DM_DELAY
279 276
280 If unsure, say N. 277 If unsure, say N.
281 278
282endmenu 279endif # MD
283
284endif
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 9620d452d030..927cb34c4805 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -268,6 +268,31 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
268 if (page->index == bitmap->file_pages-1) 268 if (page->index == bitmap->file_pages-1)
269 size = roundup(bitmap->last_page_size, 269 size = roundup(bitmap->last_page_size,
270 bdev_hardsect_size(rdev->bdev)); 270 bdev_hardsect_size(rdev->bdev));
271 /* Just make sure we aren't corrupting data or
272 * metadata
273 */
274 if (bitmap->offset < 0) {
275 /* DATA BITMAP METADATA */
276 if (bitmap->offset
277 + page->index * (PAGE_SIZE/512)
278 + size/512 > 0)
279 /* bitmap runs in to metadata */
280 return -EINVAL;
281 if (rdev->data_offset + mddev->size*2
282 > rdev->sb_offset*2 + bitmap->offset)
283 /* data runs in to bitmap */
284 return -EINVAL;
285 } else if (rdev->sb_offset*2 < rdev->data_offset) {
286 /* METADATA BITMAP DATA */
287 if (rdev->sb_offset*2
288 + bitmap->offset
289 + page->index*(PAGE_SIZE/512) + size/512
290 > rdev->data_offset)
291 /* bitmap runs in to data */
292 return -EINVAL;
293 } else {
294 /* DATA METADATA BITMAP - no problems */
295 }
271 md_super_write(mddev, rdev, 296 md_super_write(mddev, rdev,
272 (rdev->sb_offset<<1) + bitmap->offset 297 (rdev->sb_offset<<1) + bitmap->offset
273 + page->index * (PAGE_SIZE/512), 298 + page->index * (PAGE_SIZE/512),
@@ -280,32 +305,38 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
280 return 0; 305 return 0;
281} 306}
282 307
308static void bitmap_file_kick(struct bitmap *bitmap);
283/* 309/*
284 * write out a page to a file 310 * write out a page to a file
285 */ 311 */
286static int write_page(struct bitmap *bitmap, struct page *page, int wait) 312static void write_page(struct bitmap *bitmap, struct page *page, int wait)
287{ 313{
288 struct buffer_head *bh; 314 struct buffer_head *bh;
289 315
290 if (bitmap->file == NULL) 316 if (bitmap->file == NULL) {
291 return write_sb_page(bitmap, page, wait); 317 switch (write_sb_page(bitmap, page, wait)) {
318 case -EINVAL:
319 bitmap->flags |= BITMAP_WRITE_ERROR;
320 }
321 } else {
292 322
293 bh = page_buffers(page); 323 bh = page_buffers(page);
294 324
295 while (bh && bh->b_blocknr) { 325 while (bh && bh->b_blocknr) {
296 atomic_inc(&bitmap->pending_writes); 326 atomic_inc(&bitmap->pending_writes);
297 set_buffer_locked(bh); 327 set_buffer_locked(bh);
298 set_buffer_mapped(bh); 328 set_buffer_mapped(bh);
299 submit_bh(WRITE, bh); 329 submit_bh(WRITE, bh);
300 bh = bh->b_this_page; 330 bh = bh->b_this_page;
301 } 331 }
302 332
303 if (wait) { 333 if (wait) {
304 wait_event(bitmap->write_wait, 334 wait_event(bitmap->write_wait,
305 atomic_read(&bitmap->pending_writes)==0); 335 atomic_read(&bitmap->pending_writes)==0);
306 return (bitmap->flags & BITMAP_WRITE_ERROR) ? -EIO : 0; 336 }
307 } 337 }
308 return 0; 338 if (bitmap->flags & BITMAP_WRITE_ERROR)
339 bitmap_file_kick(bitmap);
309} 340}
310 341
311static void end_bitmap_write(struct buffer_head *bh, int uptodate) 342static void end_bitmap_write(struct buffer_head *bh, int uptodate)
@@ -425,17 +456,17 @@ out:
425 */ 456 */
426 457
427/* update the event counter and sync the superblock to disk */ 458/* update the event counter and sync the superblock to disk */
428int bitmap_update_sb(struct bitmap *bitmap) 459void bitmap_update_sb(struct bitmap *bitmap)
429{ 460{
430 bitmap_super_t *sb; 461 bitmap_super_t *sb;
431 unsigned long flags; 462 unsigned long flags;
432 463
433 if (!bitmap || !bitmap->mddev) /* no bitmap for this array */ 464 if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
434 return 0; 465 return;
435 spin_lock_irqsave(&bitmap->lock, flags); 466 spin_lock_irqsave(&bitmap->lock, flags);
436 if (!bitmap->sb_page) { /* no superblock */ 467 if (!bitmap->sb_page) { /* no superblock */
437 spin_unlock_irqrestore(&bitmap->lock, flags); 468 spin_unlock_irqrestore(&bitmap->lock, flags);
438 return 0; 469 return;
439 } 470 }
440 spin_unlock_irqrestore(&bitmap->lock, flags); 471 spin_unlock_irqrestore(&bitmap->lock, flags);
441 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0); 472 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
@@ -443,7 +474,7 @@ int bitmap_update_sb(struct bitmap *bitmap)
443 if (!bitmap->mddev->degraded) 474 if (!bitmap->mddev->degraded)
444 sb->events_cleared = cpu_to_le64(bitmap->mddev->events); 475 sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
445 kunmap_atomic(sb, KM_USER0); 476 kunmap_atomic(sb, KM_USER0);
446 return write_page(bitmap, bitmap->sb_page, 1); 477 write_page(bitmap, bitmap->sb_page, 1);
447} 478}
448 479
449/* print out the bitmap file superblock */ 480/* print out the bitmap file superblock */
@@ -572,20 +603,22 @@ enum bitmap_mask_op {
572 MASK_UNSET 603 MASK_UNSET
573}; 604};
574 605
575/* record the state of the bitmap in the superblock */ 606/* record the state of the bitmap in the superblock. Return the old value */
576static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits, 607static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
577 enum bitmap_mask_op op) 608 enum bitmap_mask_op op)
578{ 609{
579 bitmap_super_t *sb; 610 bitmap_super_t *sb;
580 unsigned long flags; 611 unsigned long flags;
612 int old;
581 613
582 spin_lock_irqsave(&bitmap->lock, flags); 614 spin_lock_irqsave(&bitmap->lock, flags);
583 if (!bitmap->sb_page) { /* can't set the state */ 615 if (!bitmap->sb_page) { /* can't set the state */
584 spin_unlock_irqrestore(&bitmap->lock, flags); 616 spin_unlock_irqrestore(&bitmap->lock, flags);
585 return; 617 return 0;
586 } 618 }
587 spin_unlock_irqrestore(&bitmap->lock, flags); 619 spin_unlock_irqrestore(&bitmap->lock, flags);
588 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0); 620 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
621 old = le32_to_cpu(sb->state) & bits;
589 switch (op) { 622 switch (op) {
590 case MASK_SET: sb->state |= cpu_to_le32(bits); 623 case MASK_SET: sb->state |= cpu_to_le32(bits);
591 break; 624 break;
@@ -594,6 +627,7 @@ static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
594 default: BUG(); 627 default: BUG();
595 } 628 }
596 kunmap_atomic(sb, KM_USER0); 629 kunmap_atomic(sb, KM_USER0);
630 return old;
597} 631}
598 632
599/* 633/*
@@ -687,18 +721,23 @@ static void bitmap_file_kick(struct bitmap *bitmap)
687{ 721{
688 char *path, *ptr = NULL; 722 char *path, *ptr = NULL;
689 723
690 bitmap_mask_state(bitmap, BITMAP_STALE, MASK_SET); 724 if (bitmap_mask_state(bitmap, BITMAP_STALE, MASK_SET) == 0) {
691 bitmap_update_sb(bitmap); 725 bitmap_update_sb(bitmap);
692 726
693 if (bitmap->file) { 727 if (bitmap->file) {
694 path = kmalloc(PAGE_SIZE, GFP_KERNEL); 728 path = kmalloc(PAGE_SIZE, GFP_KERNEL);
695 if (path) 729 if (path)
696 ptr = file_path(bitmap->file, path, PAGE_SIZE); 730 ptr = file_path(bitmap->file, path, PAGE_SIZE);
697 731
698 printk(KERN_ALERT "%s: kicking failed bitmap file %s from array!\n", 732 printk(KERN_ALERT
699 bmname(bitmap), ptr ? ptr : ""); 733 "%s: kicking failed bitmap file %s from array!\n",
734 bmname(bitmap), ptr ? ptr : "");
700 735
701 kfree(path); 736 kfree(path);
737 } else
738 printk(KERN_ALERT
739 "%s: disabling internal bitmap due to errors\n",
740 bmname(bitmap));
702 } 741 }
703 742
704 bitmap_file_put(bitmap); 743 bitmap_file_put(bitmap);
@@ -769,16 +808,15 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
769/* this gets called when the md device is ready to unplug its underlying 808/* this gets called when the md device is ready to unplug its underlying
770 * (slave) device queues -- before we let any writes go down, we need to 809 * (slave) device queues -- before we let any writes go down, we need to
771 * sync the dirty pages of the bitmap file to disk */ 810 * sync the dirty pages of the bitmap file to disk */
772int bitmap_unplug(struct bitmap *bitmap) 811void bitmap_unplug(struct bitmap *bitmap)
773{ 812{
774 unsigned long i, flags; 813 unsigned long i, flags;
775 int dirty, need_write; 814 int dirty, need_write;
776 struct page *page; 815 struct page *page;
777 int wait = 0; 816 int wait = 0;
778 int err;
779 817
780 if (!bitmap) 818 if (!bitmap)
781 return 0; 819 return;
782 820
783 /* look at each page to see if there are any set bits that need to be 821 /* look at each page to see if there are any set bits that need to be
784 * flushed out to disk */ 822 * flushed out to disk */
@@ -786,7 +824,7 @@ int bitmap_unplug(struct bitmap *bitmap)
786 spin_lock_irqsave(&bitmap->lock, flags); 824 spin_lock_irqsave(&bitmap->lock, flags);
787 if (!bitmap->filemap) { 825 if (!bitmap->filemap) {
788 spin_unlock_irqrestore(&bitmap->lock, flags); 826 spin_unlock_irqrestore(&bitmap->lock, flags);
789 return 0; 827 return;
790 } 828 }
791 page = bitmap->filemap[i]; 829 page = bitmap->filemap[i];
792 dirty = test_page_attr(bitmap, page, BITMAP_PAGE_DIRTY); 830 dirty = test_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
@@ -798,7 +836,7 @@ int bitmap_unplug(struct bitmap *bitmap)
798 spin_unlock_irqrestore(&bitmap->lock, flags); 836 spin_unlock_irqrestore(&bitmap->lock, flags);
799 837
800 if (dirty | need_write) 838 if (dirty | need_write)
801 err = write_page(bitmap, page, 0); 839 write_page(bitmap, page, 0);
802 } 840 }
803 if (wait) { /* if any writes were performed, we need to wait on them */ 841 if (wait) { /* if any writes were performed, we need to wait on them */
804 if (bitmap->file) 842 if (bitmap->file)
@@ -809,7 +847,6 @@ int bitmap_unplug(struct bitmap *bitmap)
809 } 847 }
810 if (bitmap->flags & BITMAP_WRITE_ERROR) 848 if (bitmap->flags & BITMAP_WRITE_ERROR)
811 bitmap_file_kick(bitmap); 849 bitmap_file_kick(bitmap);
812 return 0;
813} 850}
814 851
815static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed); 852static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
@@ -858,21 +895,21 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
858 bmname(bitmap), 895 bmname(bitmap),
859 (unsigned long) i_size_read(file->f_mapping->host), 896 (unsigned long) i_size_read(file->f_mapping->host),
860 bytes + sizeof(bitmap_super_t)); 897 bytes + sizeof(bitmap_super_t));
861 goto out; 898 goto err;
862 } 899 }
863 900
864 ret = -ENOMEM; 901 ret = -ENOMEM;
865 902
866 bitmap->filemap = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL); 903 bitmap->filemap = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
867 if (!bitmap->filemap) 904 if (!bitmap->filemap)
868 goto out; 905 goto err;
869 906
870 /* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */ 907 /* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */
871 bitmap->filemap_attr = kzalloc( 908 bitmap->filemap_attr = kzalloc(
872 roundup( DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)), 909 roundup( DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
873 GFP_KERNEL); 910 GFP_KERNEL);
874 if (!bitmap->filemap_attr) 911 if (!bitmap->filemap_attr)
875 goto out; 912 goto err;
876 913
877 oldindex = ~0L; 914 oldindex = ~0L;
878 915
@@ -905,7 +942,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
905 } 942 }
906 if (IS_ERR(page)) { /* read error */ 943 if (IS_ERR(page)) { /* read error */
907 ret = PTR_ERR(page); 944 ret = PTR_ERR(page);
908 goto out; 945 goto err;
909 } 946 }
910 947
911 oldindex = index; 948 oldindex = index;
@@ -920,11 +957,13 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
920 memset(paddr + offset, 0xff, 957 memset(paddr + offset, 0xff,
921 PAGE_SIZE - offset); 958 PAGE_SIZE - offset);
922 kunmap_atomic(paddr, KM_USER0); 959 kunmap_atomic(paddr, KM_USER0);
923 ret = write_page(bitmap, page, 1); 960 write_page(bitmap, page, 1);
924 if (ret) { 961
962 ret = -EIO;
963 if (bitmap->flags & BITMAP_WRITE_ERROR) {
925 /* release, page not in filemap yet */ 964 /* release, page not in filemap yet */
926 put_page(page); 965 put_page(page);
927 goto out; 966 goto err;
928 } 967 }
929 } 968 }
930 969
@@ -956,11 +995,15 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
956 md_wakeup_thread(bitmap->mddev->thread); 995 md_wakeup_thread(bitmap->mddev->thread);
957 } 996 }
958 997
959out:
960 printk(KERN_INFO "%s: bitmap initialized from disk: " 998 printk(KERN_INFO "%s: bitmap initialized from disk: "
961 "read %lu/%lu pages, set %lu bits, status: %d\n", 999 "read %lu/%lu pages, set %lu bits\n",
962 bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt, ret); 1000 bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt);
1001
1002 return 0;
963 1003
1004 err:
1005 printk(KERN_INFO "%s: bitmap initialisation failed: %d\n",
1006 bmname(bitmap), ret);
964 return ret; 1007 return ret;
965} 1008}
966 1009
@@ -997,19 +1040,18 @@ static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
997 * out to disk 1040 * out to disk
998 */ 1041 */
999 1042
1000int bitmap_daemon_work(struct bitmap *bitmap) 1043void bitmap_daemon_work(struct bitmap *bitmap)
1001{ 1044{
1002 unsigned long j; 1045 unsigned long j;
1003 unsigned long flags; 1046 unsigned long flags;
1004 struct page *page = NULL, *lastpage = NULL; 1047 struct page *page = NULL, *lastpage = NULL;
1005 int err = 0;
1006 int blocks; 1048 int blocks;
1007 void *paddr; 1049 void *paddr;
1008 1050
1009 if (bitmap == NULL) 1051 if (bitmap == NULL)
1010 return 0; 1052 return;
1011 if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ)) 1053 if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ))
1012 return 0; 1054 return;
1013 bitmap->daemon_lastrun = jiffies; 1055 bitmap->daemon_lastrun = jiffies;
1014 1056
1015 for (j = 0; j < bitmap->chunks; j++) { 1057 for (j = 0; j < bitmap->chunks; j++) {
@@ -1032,14 +1074,8 @@ int bitmap_daemon_work(struct bitmap *bitmap)
1032 clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); 1074 clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
1033 1075
1034 spin_unlock_irqrestore(&bitmap->lock, flags); 1076 spin_unlock_irqrestore(&bitmap->lock, flags);
1035 if (need_write) { 1077 if (need_write)
1036 switch (write_page(bitmap, page, 0)) { 1078 write_page(bitmap, page, 0);
1037 case 0:
1038 break;
1039 default:
1040 bitmap_file_kick(bitmap);
1041 }
1042 }
1043 continue; 1079 continue;
1044 } 1080 }
1045 1081
@@ -1048,13 +1084,11 @@ int bitmap_daemon_work(struct bitmap *bitmap)
1048 if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) { 1084 if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
1049 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1085 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1050 spin_unlock_irqrestore(&bitmap->lock, flags); 1086 spin_unlock_irqrestore(&bitmap->lock, flags);
1051 err = write_page(bitmap, lastpage, 0); 1087 write_page(bitmap, lastpage, 0);
1052 } else { 1088 } else {
1053 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1089 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1054 spin_unlock_irqrestore(&bitmap->lock, flags); 1090 spin_unlock_irqrestore(&bitmap->lock, flags);
1055 } 1091 }
1056 if (err)
1057 bitmap_file_kick(bitmap);
1058 } else 1092 } else
1059 spin_unlock_irqrestore(&bitmap->lock, flags); 1093 spin_unlock_irqrestore(&bitmap->lock, flags);
1060 lastpage = page; 1094 lastpage = page;
@@ -1097,14 +1131,13 @@ int bitmap_daemon_work(struct bitmap *bitmap)
1097 if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) { 1131 if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
1098 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1132 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1099 spin_unlock_irqrestore(&bitmap->lock, flags); 1133 spin_unlock_irqrestore(&bitmap->lock, flags);
1100 err = write_page(bitmap, lastpage, 0); 1134 write_page(bitmap, lastpage, 0);
1101 } else { 1135 } else {
1102 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1136 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1103 spin_unlock_irqrestore(&bitmap->lock, flags); 1137 spin_unlock_irqrestore(&bitmap->lock, flags);
1104 } 1138 }
1105 } 1139 }
1106 1140
1107 return err;
1108} 1141}
1109 1142
1110static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, 1143static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
@@ -1517,7 +1550,9 @@ int bitmap_create(mddev_t *mddev)
1517 1550
1518 mddev->thread->timeout = bitmap->daemon_sleep * HZ; 1551 mddev->thread->timeout = bitmap->daemon_sleep * HZ;
1519 1552
1520 return bitmap_update_sb(bitmap); 1553 bitmap_update_sb(bitmap);
1554
1555 return (bitmap->flags & BITMAP_WRITE_ERROR) ? -EIO : 0;
1521 1556
1522 error: 1557 error:
1523 bitmap_free(bitmap); 1558 bitmap_free(bitmap);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index f4f7d35561ab..846614e676c6 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -161,9 +161,7 @@ static void local_exit(void)
161{ 161{
162 kmem_cache_destroy(_tio_cache); 162 kmem_cache_destroy(_tio_cache);
163 kmem_cache_destroy(_io_cache); 163 kmem_cache_destroy(_io_cache);
164 164 unregister_blkdev(_major, _name);
165 if (unregister_blkdev(_major, _name) < 0)
166 DMERR("unregister_blkdev failed");
167 165
168 _major = 0; 166 _major = 0;
169 167
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 33beaa7da085..65ddc887dfd7 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1640,7 +1640,6 @@ static void sync_sbs(mddev_t * mddev, int nospares)
1640 1640
1641static void md_update_sb(mddev_t * mddev, int force_change) 1641static void md_update_sb(mddev_t * mddev, int force_change)
1642{ 1642{
1643 int err;
1644 struct list_head *tmp; 1643 struct list_head *tmp;
1645 mdk_rdev_t *rdev; 1644 mdk_rdev_t *rdev;
1646 int sync_req; 1645 int sync_req;
@@ -1727,7 +1726,7 @@ repeat:
1727 "md: updating %s RAID superblock on device (in sync %d)\n", 1726 "md: updating %s RAID superblock on device (in sync %d)\n",
1728 mdname(mddev),mddev->in_sync); 1727 mdname(mddev),mddev->in_sync);
1729 1728
1730 err = bitmap_update_sb(mddev->bitmap); 1729 bitmap_update_sb(mddev->bitmap);
1731 ITERATE_RDEV(mddev,rdev,tmp) { 1730 ITERATE_RDEV(mddev,rdev,tmp) {
1732 char b[BDEVNAME_SIZE]; 1731 char b[BDEVNAME_SIZE];
1733 dprintk(KERN_INFO "md: "); 1732 dprintk(KERN_INFO "md: ");
@@ -2073,9 +2072,11 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
2073 err = super_types[super_format]. 2072 err = super_types[super_format].
2074 load_super(rdev, NULL, super_minor); 2073 load_super(rdev, NULL, super_minor);
2075 if (err == -EINVAL) { 2074 if (err == -EINVAL) {
2076 printk(KERN_WARNING 2075 printk(KERN_WARNING
2077 "md: %s has invalid sb, not importing!\n", 2076 "md: %s does not have a valid v%d.%d "
2078 bdevname(rdev->bdev,b)); 2077 "superblock, not importing!\n",
2078 bdevname(rdev->bdev,b),
2079 super_format, super_minor);
2079 goto abort_free; 2080 goto abort_free;
2080 } 2081 }
2081 if (err < 0) { 2082 if (err < 0) {
@@ -3174,13 +3175,33 @@ static int do_md_run(mddev_t * mddev)
3174 * Drop all container device buffers, from now on 3175 * Drop all container device buffers, from now on
3175 * the only valid external interface is through the md 3176 * the only valid external interface is through the md
3176 * device. 3177 * device.
3177 * Also find largest hardsector size
3178 */ 3178 */
3179 ITERATE_RDEV(mddev,rdev,tmp) { 3179 ITERATE_RDEV(mddev,rdev,tmp) {
3180 if (test_bit(Faulty, &rdev->flags)) 3180 if (test_bit(Faulty, &rdev->flags))
3181 continue; 3181 continue;
3182 sync_blockdev(rdev->bdev); 3182 sync_blockdev(rdev->bdev);
3183 invalidate_bdev(rdev->bdev); 3183 invalidate_bdev(rdev->bdev);
3184
3185 /* perform some consistency tests on the device.
3186 * We don't want the data to overlap the metadata,
3187 * Internal Bitmap issues has handled elsewhere.
3188 */
3189 if (rdev->data_offset < rdev->sb_offset) {
3190 if (mddev->size &&
3191 rdev->data_offset + mddev->size*2
3192 > rdev->sb_offset*2) {
3193 printk("md: %s: data overlaps metadata\n",
3194 mdname(mddev));
3195 return -EINVAL;
3196 }
3197 } else {
3198 if (rdev->sb_offset*2 + rdev->sb_size/512
3199 > rdev->data_offset) {
3200 printk("md: %s: metadata overlaps data\n",
3201 mdname(mddev));
3202 return -EINVAL;
3203 }
3204 }
3184 } 3205 }
3185 3206
3186 md_probe(mddev->unit, NULL, NULL); 3207 md_probe(mddev->unit, NULL, NULL);
@@ -4642,7 +4663,6 @@ static int md_thread(void * arg)
4642 * many dirty RAID5 blocks. 4663 * many dirty RAID5 blocks.
4643 */ 4664 */
4644 4665
4645 current->flags |= PF_NOFREEZE;
4646 allow_signal(SIGKILL); 4666 allow_signal(SIGKILL);
4647 while (!kthread_should_stop()) { 4667 while (!kthread_should_stop()) {
4648 4668
@@ -5090,7 +5110,7 @@ static int is_mddev_idle(mddev_t *mddev)
5090 mdk_rdev_t * rdev; 5110 mdk_rdev_t * rdev;
5091 struct list_head *tmp; 5111 struct list_head *tmp;
5092 int idle; 5112 int idle;
5093 unsigned long curr_events; 5113 long curr_events;
5094 5114
5095 idle = 1; 5115 idle = 1;
5096 ITERATE_RDEV(mddev,rdev,tmp) { 5116 ITERATE_RDEV(mddev,rdev,tmp) {
@@ -5098,20 +5118,29 @@ static int is_mddev_idle(mddev_t *mddev)
5098 curr_events = disk_stat_read(disk, sectors[0]) + 5118 curr_events = disk_stat_read(disk, sectors[0]) +
5099 disk_stat_read(disk, sectors[1]) - 5119 disk_stat_read(disk, sectors[1]) -
5100 atomic_read(&disk->sync_io); 5120 atomic_read(&disk->sync_io);
5101 /* The difference between curr_events and last_events 5121 /* sync IO will cause sync_io to increase before the disk_stats
5102 * will be affected by any new non-sync IO (making 5122 * as sync_io is counted when a request starts, and
5103 * curr_events bigger) and any difference in the amount of 5123 * disk_stats is counted when it completes.
5104 * in-flight syncio (making current_events bigger or smaller) 5124 * So resync activity will cause curr_events to be smaller than
5105 * The amount in-flight is currently limited to 5125 * when there was no such activity.
5106 * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6 5126 * non-sync IO will cause disk_stat to increase without
5107 * which is at most 4096 sectors. 5127 * increasing sync_io so curr_events will (eventually)
5108 * These numbers are fairly fragile and should be made 5128 * be larger than it was before. Once it becomes
5109 * more robust, probably by enforcing the 5129 * substantially larger, the test below will cause
5110 * 'window size' that md_do_sync sort-of uses. 5130 * the array to appear non-idle, and resync will slow
5131 * down.
5132 * If there is a lot of outstanding resync activity when
5133 * we set last_event to curr_events, then all that activity
5134 * completing might cause the array to appear non-idle
5135 * and resync will be slowed down even though there might
5136 * not have been non-resync activity. This will only
5137 * happen once though. 'last_events' will soon reflect
5138 * the state where there is little or no outstanding
5139 * resync requests, and further resync activity will
5140 * always make curr_events less than last_events.
5111 * 5141 *
5112 * Note: the following is an unsigned comparison.
5113 */ 5142 */
5114 if ((long)curr_events - (long)rdev->last_events > 4096) { 5143 if (curr_events - rdev->last_events > 4096) {
5115 rdev->last_events = curr_events; 5144 rdev->last_events = curr_events;
5116 idle = 0; 5145 idle = 0;
5117 } 5146 }
@@ -5772,7 +5801,7 @@ static void autostart_arrays(int part)
5772 for (i = 0; i < dev_cnt; i++) { 5801 for (i = 0; i < dev_cnt; i++) {
5773 dev_t dev = detected_devices[i]; 5802 dev_t dev = detected_devices[i];
5774 5803
5775 rdev = md_import_device(dev,0, 0); 5804 rdev = md_import_device(dev,0, 90);
5776 if (IS_ERR(rdev)) 5805 if (IS_ERR(rdev))
5777 continue; 5806 continue;
5778 5807
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 46677d7d9980..00c78b77b13d 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1526,8 +1526,7 @@ static void raid1d(mddev_t *mddev)
1526 blk_remove_plug(mddev->queue); 1526 blk_remove_plug(mddev->queue);
1527 spin_unlock_irqrestore(&conf->device_lock, flags); 1527 spin_unlock_irqrestore(&conf->device_lock, flags);
1528 /* flush any pending bitmap writes to disk before proceeding w/ I/O */ 1528 /* flush any pending bitmap writes to disk before proceeding w/ I/O */
1529 if (bitmap_unplug(mddev->bitmap) != 0) 1529 bitmap_unplug(mddev->bitmap);
1530 printk("%s: bitmap file write failed!\n", mdname(mddev));
1531 1530
1532 while (bio) { /* submit pending writes */ 1531 while (bio) { /* submit pending writes */
1533 struct bio *next = bio->bi_next; 1532 struct bio *next = bio->bi_next;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 9eb66c1b523b..a95ada1cfac4 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1510,8 +1510,7 @@ static void raid10d(mddev_t *mddev)
1510 blk_remove_plug(mddev->queue); 1510 blk_remove_plug(mddev->queue);
1511 spin_unlock_irqrestore(&conf->device_lock, flags); 1511 spin_unlock_irqrestore(&conf->device_lock, flags);
1512 /* flush any pending bitmap writes to disk before proceeding w/ I/O */ 1512 /* flush any pending bitmap writes to disk before proceeding w/ I/O */
1513 if (bitmap_unplug(mddev->bitmap) != 0) 1513 bitmap_unplug(mddev->bitmap);
1514 printk("%s: bitmap file write failed!\n", mdname(mddev));
1515 1514
1516 while (bio) { /* submit pending writes */ 1515 while (bio) { /* submit pending writes */
1517 struct bio *next = bio->bi_next; 1516 struct bio *next = bio->bi_next;
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index f4e4ca2dcade..b6c7f6610ec5 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -523,6 +523,7 @@ static int dvb_frontend_thread(void *data)
523 523
524 dvb_frontend_init(fe); 524 dvb_frontend_init(fe);
525 525
526 set_freezable();
526 while (1) { 527 while (1) {
527 up(&fepriv->sem); /* is locked when we enter the thread... */ 528 up(&fepriv->sem); /* is locked when we enter the thread... */
528restart: 529restart:
diff --git a/drivers/media/video/cx88/cx88-tvaudio.c b/drivers/media/video/cx88/cx88-tvaudio.c
index 259ea08e784f..1cc2d286a1cb 100644
--- a/drivers/media/video/cx88/cx88-tvaudio.c
+++ b/drivers/media/video/cx88/cx88-tvaudio.c
@@ -906,6 +906,7 @@ int cx88_audio_thread(void *data)
906 u32 mode = 0; 906 u32 mode = 0;
907 907
908 dprintk("cx88: tvaudio thread started\n"); 908 dprintk("cx88: tvaudio thread started\n");
909 set_freezable();
909 for (;;) { 910 for (;;) {
910 msleep_interruptible(1000); 911 msleep_interruptible(1000);
911 if (kthread_should_stop()) 912 if (kthread_should_stop())
diff --git a/drivers/media/video/msp3400-kthreads.c b/drivers/media/video/msp3400-kthreads.c
index e1821eb82fb5..d5ee2629121e 100644
--- a/drivers/media/video/msp3400-kthreads.c
+++ b/drivers/media/video/msp3400-kthreads.c
@@ -23,6 +23,7 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/i2c.h> 25#include <linux/i2c.h>
26#include <linux/freezer.h>
26#include <linux/videodev.h> 27#include <linux/videodev.h>
27#include <linux/videodev2.h> 28#include <linux/videodev2.h>
28#include <media/v4l2-common.h> 29#include <media/v4l2-common.h>
@@ -468,6 +469,7 @@ int msp3400c_thread(void *data)
468 469
469 470
470 v4l_dbg(1, msp_debug, client, "msp3400 daemon started\n"); 471 v4l_dbg(1, msp_debug, client, "msp3400 daemon started\n");
472 set_freezable();
471 for (;;) { 473 for (;;) {
472 v4l_dbg(2, msp_debug, client, "msp3400 thread: sleep\n"); 474 v4l_dbg(2, msp_debug, client, "msp3400 thread: sleep\n");
473 msp_sleep(state, -1); 475 msp_sleep(state, -1);
@@ -646,7 +648,7 @@ int msp3410d_thread(void *data)
646 int val, i, std, count; 648 int val, i, std, count;
647 649
648 v4l_dbg(1, msp_debug, client, "msp3410 daemon started\n"); 650 v4l_dbg(1, msp_debug, client, "msp3410 daemon started\n");
649 651 set_freezable();
650 for (;;) { 652 for (;;) {
651 v4l_dbg(2, msp_debug, client, "msp3410 thread: sleep\n"); 653 v4l_dbg(2, msp_debug, client, "msp3410 thread: sleep\n");
652 msp_sleep(state,-1); 654 msp_sleep(state,-1);
@@ -940,7 +942,7 @@ int msp34xxg_thread(void *data)
940 int val, i; 942 int val, i;
941 943
942 v4l_dbg(1, msp_debug, client, "msp34xxg daemon started\n"); 944 v4l_dbg(1, msp_debug, client, "msp34xxg daemon started\n");
943 945 set_freezable();
944 for (;;) { 946 for (;;) {
945 v4l_dbg(2, msp_debug, client, "msp34xxg thread: sleep\n"); 947 v4l_dbg(2, msp_debug, client, "msp34xxg thread: sleep\n");
946 msp_sleep(state, -1); 948 msp_sleep(state, -1);
diff --git a/drivers/media/video/tvaudio.c b/drivers/media/video/tvaudio.c
index c9bf9dbc2ea3..9da338dc4f3b 100644
--- a/drivers/media/video/tvaudio.c
+++ b/drivers/media/video/tvaudio.c
@@ -271,7 +271,7 @@ static int chip_thread(void *data)
271 struct CHIPDESC *desc = chiplist + chip->type; 271 struct CHIPDESC *desc = chiplist + chip->type;
272 272
273 v4l_dbg(1, debug, &chip->c, "%s: thread started\n", chip->c.name); 273 v4l_dbg(1, debug, &chip->c, "%s: thread started\n", chip->c.name);
274 274 set_freezable();
275 for (;;) { 275 for (;;) {
276 set_current_state(TASK_INTERRUPTIBLE); 276 set_current_state(TASK_INTERRUPTIBLE);
277 if (!kthread_should_stop()) 277 if (!kthread_should_stop())
diff --git a/drivers/media/video/video-buf-dvb.c b/drivers/media/video/video-buf-dvb.c
index fcc5467e7636..e617925ba31e 100644
--- a/drivers/media/video/video-buf-dvb.c
+++ b/drivers/media/video/video-buf-dvb.c
@@ -47,6 +47,7 @@ static int videobuf_dvb_thread(void *data)
47 int err; 47 int err;
48 48
49 dprintk("dvb thread started\n"); 49 dprintk("dvb thread started\n");
50 set_freezable();
50 videobuf_read_start(&dvb->dvbq); 51 videobuf_read_start(&dvb->dvbq);
51 52
52 for (;;) { 53 for (;;) {
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index f7e1d1910374..3ef4d0159c33 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -573,6 +573,7 @@ static int vivi_thread(void *data)
573 dprintk(1,"thread started\n"); 573 dprintk(1,"thread started\n");
574 574
575 mod_timer(&dma_q->timeout, jiffies+BUFFER_TIMEOUT); 575 mod_timer(&dma_q->timeout, jiffies+BUFFER_TIMEOUT);
576 set_freezable();
576 577
577 for (;;) { 578 for (;;) {
578 vivi_sleep(dma_q); 579 vivi_sleep(dma_q);
diff --git a/drivers/message/i2o/debug.c b/drivers/message/i2o/debug.c
index 8abe45e49ad7..ce62d8bfe1c8 100644
--- a/drivers/message/i2o/debug.c
+++ b/drivers/message/i2o/debug.c
@@ -24,7 +24,7 @@ void i2o_report_status(const char *severity, const char *str,
24 if (cmd == I2O_CMD_UTIL_EVT_REGISTER) 24 if (cmd == I2O_CMD_UTIL_EVT_REGISTER)
25 return; // No status in this reply 25 return; // No status in this reply
26 26
27 printk(KERN_DEBUG "%s%s: ", severity, str); 27 printk("%s%s: ", severity, str);
28 28
29 if (cmd < 0x1F) // Utility cmd 29 if (cmd < 0x1F) // Utility cmd
30 i2o_report_util_cmd(cmd); 30 i2o_report_util_cmd(cmd);
@@ -32,7 +32,7 @@ void i2o_report_status(const char *severity, const char *str,
32 else if (cmd >= 0xA0 && cmd <= 0xEF) // Executive cmd 32 else if (cmd >= 0xA0 && cmd <= 0xEF) // Executive cmd
33 i2o_report_exec_cmd(cmd); 33 i2o_report_exec_cmd(cmd);
34 else 34 else
35 printk(KERN_DEBUG "Cmd = %0#2x, ", cmd); // Other cmds 35 printk("Cmd = %0#2x, ", cmd); // Other cmds
36 36
37 if (msg[0] & MSG_FAIL) { 37 if (msg[0] & MSG_FAIL) {
38 i2o_report_fail_status(req_status, msg); 38 i2o_report_fail_status(req_status, msg);
@@ -44,7 +44,7 @@ void i2o_report_status(const char *severity, const char *str,
44 if (cmd < 0x1F || (cmd >= 0xA0 && cmd <= 0xEF)) 44 if (cmd < 0x1F || (cmd >= 0xA0 && cmd <= 0xEF))
45 i2o_report_common_dsc(detailed_status); 45 i2o_report_common_dsc(detailed_status);
46 else 46 else
47 printk(KERN_DEBUG " / DetailedStatus = %0#4x.\n", 47 printk(" / DetailedStatus = %0#4x.\n",
48 detailed_status); 48 detailed_status);
49} 49}
50 50
@@ -89,10 +89,10 @@ static void i2o_report_fail_status(u8 req_status, u32 * msg)
89 }; 89 };
90 90
91 if (req_status == I2O_FSC_TRANSPORT_UNKNOWN_FAILURE) 91 if (req_status == I2O_FSC_TRANSPORT_UNKNOWN_FAILURE)
92 printk(KERN_DEBUG "TRANSPORT_UNKNOWN_FAILURE (%0#2x).\n", 92 printk("TRANSPORT_UNKNOWN_FAILURE (%0#2x).\n",
93 req_status); 93 req_status);
94 else 94 else
95 printk(KERN_DEBUG "TRANSPORT_%s.\n", 95 printk("TRANSPORT_%s.\n",
96 FAIL_STATUS[req_status & 0x0F]); 96 FAIL_STATUS[req_status & 0x0F]);
97 97
98 /* Dump some details */ 98 /* Dump some details */
@@ -104,7 +104,7 @@ static void i2o_report_fail_status(u8 req_status, u32 * msg)
104 printk(KERN_ERR " FailingHostUnit = 0x%04X, FailingIOP = 0x%03X\n", 104 printk(KERN_ERR " FailingHostUnit = 0x%04X, FailingIOP = 0x%03X\n",
105 msg[5] >> 16, msg[5] & 0xFFF); 105 msg[5] >> 16, msg[5] & 0xFFF);
106 106
107 printk(KERN_ERR " Severity: 0x%02X ", (msg[4] >> 16) & 0xFF); 107 printk(KERN_ERR " Severity: 0x%02X\n", (msg[4] >> 16) & 0xFF);
108 if (msg[4] & (1 << 16)) 108 if (msg[4] & (1 << 16))
109 printk(KERN_DEBUG "(FormatError), " 109 printk(KERN_DEBUG "(FormatError), "
110 "this msg can never be delivered/processed.\n"); 110 "this msg can never be delivered/processed.\n");
@@ -142,9 +142,9 @@ static void i2o_report_common_status(u8 req_status)
142 }; 142 };
143 143
144 if (req_status >= ARRAY_SIZE(REPLY_STATUS)) 144 if (req_status >= ARRAY_SIZE(REPLY_STATUS))
145 printk(KERN_DEBUG "RequestStatus = %0#2x", req_status); 145 printk("RequestStatus = %0#2x", req_status);
146 else 146 else
147 printk(KERN_DEBUG "%s", REPLY_STATUS[req_status]); 147 printk("%s", REPLY_STATUS[req_status]);
148} 148}
149 149
150/* 150/*
@@ -187,10 +187,10 @@ static void i2o_report_common_dsc(u16 detailed_status)
187 }; 187 };
188 188
189 if (detailed_status > I2O_DSC_DEVICE_NOT_AVAILABLE) 189 if (detailed_status > I2O_DSC_DEVICE_NOT_AVAILABLE)
190 printk(KERN_DEBUG " / DetailedStatus = %0#4x.\n", 190 printk(" / DetailedStatus = %0#4x.\n",
191 detailed_status); 191 detailed_status);
192 else 192 else
193 printk(KERN_DEBUG " / %s.\n", COMMON_DSC[detailed_status]); 193 printk(" / %s.\n", COMMON_DSC[detailed_status]);
194} 194}
195 195
196/* 196/*
@@ -200,49 +200,49 @@ static void i2o_report_util_cmd(u8 cmd)
200{ 200{
201 switch (cmd) { 201 switch (cmd) {
202 case I2O_CMD_UTIL_NOP: 202 case I2O_CMD_UTIL_NOP:
203 printk(KERN_DEBUG "UTIL_NOP, "); 203 printk("UTIL_NOP, ");
204 break; 204 break;
205 case I2O_CMD_UTIL_ABORT: 205 case I2O_CMD_UTIL_ABORT:
206 printk(KERN_DEBUG "UTIL_ABORT, "); 206 printk("UTIL_ABORT, ");
207 break; 207 break;
208 case I2O_CMD_UTIL_CLAIM: 208 case I2O_CMD_UTIL_CLAIM:
209 printk(KERN_DEBUG "UTIL_CLAIM, "); 209 printk("UTIL_CLAIM, ");
210 break; 210 break;
211 case I2O_CMD_UTIL_RELEASE: 211 case I2O_CMD_UTIL_RELEASE:
212 printk(KERN_DEBUG "UTIL_CLAIM_RELEASE, "); 212 printk("UTIL_CLAIM_RELEASE, ");
213 break; 213 break;
214 case I2O_CMD_UTIL_CONFIG_DIALOG: 214 case I2O_CMD_UTIL_CONFIG_DIALOG:
215 printk(KERN_DEBUG "UTIL_CONFIG_DIALOG, "); 215 printk("UTIL_CONFIG_DIALOG, ");
216 break; 216 break;
217 case I2O_CMD_UTIL_DEVICE_RESERVE: 217 case I2O_CMD_UTIL_DEVICE_RESERVE:
218 printk(KERN_DEBUG "UTIL_DEVICE_RESERVE, "); 218 printk("UTIL_DEVICE_RESERVE, ");
219 break; 219 break;
220 case I2O_CMD_UTIL_DEVICE_RELEASE: 220 case I2O_CMD_UTIL_DEVICE_RELEASE:
221 printk(KERN_DEBUG "UTIL_DEVICE_RELEASE, "); 221 printk("UTIL_DEVICE_RELEASE, ");
222 break; 222 break;
223 case I2O_CMD_UTIL_EVT_ACK: 223 case I2O_CMD_UTIL_EVT_ACK:
224 printk(KERN_DEBUG "UTIL_EVENT_ACKNOWLEDGE, "); 224 printk("UTIL_EVENT_ACKNOWLEDGE, ");
225 break; 225 break;
226 case I2O_CMD_UTIL_EVT_REGISTER: 226 case I2O_CMD_UTIL_EVT_REGISTER:
227 printk(KERN_DEBUG "UTIL_EVENT_REGISTER, "); 227 printk("UTIL_EVENT_REGISTER, ");
228 break; 228 break;
229 case I2O_CMD_UTIL_LOCK: 229 case I2O_CMD_UTIL_LOCK:
230 printk(KERN_DEBUG "UTIL_LOCK, "); 230 printk("UTIL_LOCK, ");
231 break; 231 break;
232 case I2O_CMD_UTIL_LOCK_RELEASE: 232 case I2O_CMD_UTIL_LOCK_RELEASE:
233 printk(KERN_DEBUG "UTIL_LOCK_RELEASE, "); 233 printk("UTIL_LOCK_RELEASE, ");
234 break; 234 break;
235 case I2O_CMD_UTIL_PARAMS_GET: 235 case I2O_CMD_UTIL_PARAMS_GET:
236 printk(KERN_DEBUG "UTIL_PARAMS_GET, "); 236 printk("UTIL_PARAMS_GET, ");
237 break; 237 break;
238 case I2O_CMD_UTIL_PARAMS_SET: 238 case I2O_CMD_UTIL_PARAMS_SET:
239 printk(KERN_DEBUG "UTIL_PARAMS_SET, "); 239 printk("UTIL_PARAMS_SET, ");
240 break; 240 break;
241 case I2O_CMD_UTIL_REPLY_FAULT_NOTIFY: 241 case I2O_CMD_UTIL_REPLY_FAULT_NOTIFY:
242 printk(KERN_DEBUG "UTIL_REPLY_FAULT_NOTIFY, "); 242 printk("UTIL_REPLY_FAULT_NOTIFY, ");
243 break; 243 break;
244 default: 244 default:
245 printk(KERN_DEBUG "Cmd = %0#2x, ", cmd); 245 printk("Cmd = %0#2x, ", cmd);
246 } 246 }
247} 247}
248 248
@@ -253,106 +253,106 @@ static void i2o_report_exec_cmd(u8 cmd)
253{ 253{
254 switch (cmd) { 254 switch (cmd) {
255 case I2O_CMD_ADAPTER_ASSIGN: 255 case I2O_CMD_ADAPTER_ASSIGN:
256 printk(KERN_DEBUG "EXEC_ADAPTER_ASSIGN, "); 256 printk("EXEC_ADAPTER_ASSIGN, ");
257 break; 257 break;
258 case I2O_CMD_ADAPTER_READ: 258 case I2O_CMD_ADAPTER_READ:
259 printk(KERN_DEBUG "EXEC_ADAPTER_READ, "); 259 printk("EXEC_ADAPTER_READ, ");
260 break; 260 break;
261 case I2O_CMD_ADAPTER_RELEASE: 261 case I2O_CMD_ADAPTER_RELEASE:
262 printk(KERN_DEBUG "EXEC_ADAPTER_RELEASE, "); 262 printk("EXEC_ADAPTER_RELEASE, ");
263 break; 263 break;
264 case I2O_CMD_BIOS_INFO_SET: 264 case I2O_CMD_BIOS_INFO_SET:
265 printk(KERN_DEBUG "EXEC_BIOS_INFO_SET, "); 265 printk("EXEC_BIOS_INFO_SET, ");
266 break; 266 break;
267 case I2O_CMD_BOOT_DEVICE_SET: 267 case I2O_CMD_BOOT_DEVICE_SET:
268 printk(KERN_DEBUG "EXEC_BOOT_DEVICE_SET, "); 268 printk("EXEC_BOOT_DEVICE_SET, ");
269 break; 269 break;
270 case I2O_CMD_CONFIG_VALIDATE: 270 case I2O_CMD_CONFIG_VALIDATE:
271 printk(KERN_DEBUG "EXEC_CONFIG_VALIDATE, "); 271 printk("EXEC_CONFIG_VALIDATE, ");
272 break; 272 break;
273 case I2O_CMD_CONN_SETUP: 273 case I2O_CMD_CONN_SETUP:
274 printk(KERN_DEBUG "EXEC_CONN_SETUP, "); 274 printk("EXEC_CONN_SETUP, ");
275 break; 275 break;
276 case I2O_CMD_DDM_DESTROY: 276 case I2O_CMD_DDM_DESTROY:
277 printk(KERN_DEBUG "EXEC_DDM_DESTROY, "); 277 printk("EXEC_DDM_DESTROY, ");
278 break; 278 break;
279 case I2O_CMD_DDM_ENABLE: 279 case I2O_CMD_DDM_ENABLE:
280 printk(KERN_DEBUG "EXEC_DDM_ENABLE, "); 280 printk("EXEC_DDM_ENABLE, ");
281 break; 281 break;
282 case I2O_CMD_DDM_QUIESCE: 282 case I2O_CMD_DDM_QUIESCE:
283 printk(KERN_DEBUG "EXEC_DDM_QUIESCE, "); 283 printk("EXEC_DDM_QUIESCE, ");
284 break; 284 break;
285 case I2O_CMD_DDM_RESET: 285 case I2O_CMD_DDM_RESET:
286 printk(KERN_DEBUG "EXEC_DDM_RESET, "); 286 printk("EXEC_DDM_RESET, ");
287 break; 287 break;
288 case I2O_CMD_DDM_SUSPEND: 288 case I2O_CMD_DDM_SUSPEND:
289 printk(KERN_DEBUG "EXEC_DDM_SUSPEND, "); 289 printk("EXEC_DDM_SUSPEND, ");
290 break; 290 break;
291 case I2O_CMD_DEVICE_ASSIGN: 291 case I2O_CMD_DEVICE_ASSIGN:
292 printk(KERN_DEBUG "EXEC_DEVICE_ASSIGN, "); 292 printk("EXEC_DEVICE_ASSIGN, ");
293 break; 293 break;
294 case I2O_CMD_DEVICE_RELEASE: 294 case I2O_CMD_DEVICE_RELEASE:
295 printk(KERN_DEBUG "EXEC_DEVICE_RELEASE, "); 295 printk("EXEC_DEVICE_RELEASE, ");
296 break; 296 break;
297 case I2O_CMD_HRT_GET: 297 case I2O_CMD_HRT_GET:
298 printk(KERN_DEBUG "EXEC_HRT_GET, "); 298 printk("EXEC_HRT_GET, ");
299 break; 299 break;
300 case I2O_CMD_ADAPTER_CLEAR: 300 case I2O_CMD_ADAPTER_CLEAR:
301 printk(KERN_DEBUG "EXEC_IOP_CLEAR, "); 301 printk("EXEC_IOP_CLEAR, ");
302 break; 302 break;
303 case I2O_CMD_ADAPTER_CONNECT: 303 case I2O_CMD_ADAPTER_CONNECT:
304 printk(KERN_DEBUG "EXEC_IOP_CONNECT, "); 304 printk("EXEC_IOP_CONNECT, ");
305 break; 305 break;
306 case I2O_CMD_ADAPTER_RESET: 306 case I2O_CMD_ADAPTER_RESET:
307 printk(KERN_DEBUG "EXEC_IOP_RESET, "); 307 printk("EXEC_IOP_RESET, ");
308 break; 308 break;
309 case I2O_CMD_LCT_NOTIFY: 309 case I2O_CMD_LCT_NOTIFY:
310 printk(KERN_DEBUG "EXEC_LCT_NOTIFY, "); 310 printk("EXEC_LCT_NOTIFY, ");
311 break; 311 break;
312 case I2O_CMD_OUTBOUND_INIT: 312 case I2O_CMD_OUTBOUND_INIT:
313 printk(KERN_DEBUG "EXEC_OUTBOUND_INIT, "); 313 printk("EXEC_OUTBOUND_INIT, ");
314 break; 314 break;
315 case I2O_CMD_PATH_ENABLE: 315 case I2O_CMD_PATH_ENABLE:
316 printk(KERN_DEBUG "EXEC_PATH_ENABLE, "); 316 printk("EXEC_PATH_ENABLE, ");
317 break; 317 break;
318 case I2O_CMD_PATH_QUIESCE: 318 case I2O_CMD_PATH_QUIESCE:
319 printk(KERN_DEBUG "EXEC_PATH_QUIESCE, "); 319 printk("EXEC_PATH_QUIESCE, ");
320 break; 320 break;
321 case I2O_CMD_PATH_RESET: 321 case I2O_CMD_PATH_RESET:
322 printk(KERN_DEBUG "EXEC_PATH_RESET, "); 322 printk("EXEC_PATH_RESET, ");
323 break; 323 break;
324 case I2O_CMD_STATIC_MF_CREATE: 324 case I2O_CMD_STATIC_MF_CREATE:
325 printk(KERN_DEBUG "EXEC_STATIC_MF_CREATE, "); 325 printk("EXEC_STATIC_MF_CREATE, ");
326 break; 326 break;
327 case I2O_CMD_STATIC_MF_RELEASE: 327 case I2O_CMD_STATIC_MF_RELEASE:
328 printk(KERN_DEBUG "EXEC_STATIC_MF_RELEASE, "); 328 printk("EXEC_STATIC_MF_RELEASE, ");
329 break; 329 break;
330 case I2O_CMD_STATUS_GET: 330 case I2O_CMD_STATUS_GET:
331 printk(KERN_DEBUG "EXEC_STATUS_GET, "); 331 printk("EXEC_STATUS_GET, ");
332 break; 332 break;
333 case I2O_CMD_SW_DOWNLOAD: 333 case I2O_CMD_SW_DOWNLOAD:
334 printk(KERN_DEBUG "EXEC_SW_DOWNLOAD, "); 334 printk("EXEC_SW_DOWNLOAD, ");
335 break; 335 break;
336 case I2O_CMD_SW_UPLOAD: 336 case I2O_CMD_SW_UPLOAD:
337 printk(KERN_DEBUG "EXEC_SW_UPLOAD, "); 337 printk("EXEC_SW_UPLOAD, ");
338 break; 338 break;
339 case I2O_CMD_SW_REMOVE: 339 case I2O_CMD_SW_REMOVE:
340 printk(KERN_DEBUG "EXEC_SW_REMOVE, "); 340 printk("EXEC_SW_REMOVE, ");
341 break; 341 break;
342 case I2O_CMD_SYS_ENABLE: 342 case I2O_CMD_SYS_ENABLE:
343 printk(KERN_DEBUG "EXEC_SYS_ENABLE, "); 343 printk("EXEC_SYS_ENABLE, ");
344 break; 344 break;
345 case I2O_CMD_SYS_MODIFY: 345 case I2O_CMD_SYS_MODIFY:
346 printk(KERN_DEBUG "EXEC_SYS_MODIFY, "); 346 printk("EXEC_SYS_MODIFY, ");
347 break; 347 break;
348 case I2O_CMD_SYS_QUIESCE: 348 case I2O_CMD_SYS_QUIESCE:
349 printk(KERN_DEBUG "EXEC_SYS_QUIESCE, "); 349 printk("EXEC_SYS_QUIESCE, ");
350 break; 350 break;
351 case I2O_CMD_SYS_TAB_SET: 351 case I2O_CMD_SYS_TAB_SET:
352 printk(KERN_DEBUG "EXEC_SYS_TAB_SET, "); 352 printk("EXEC_SYS_TAB_SET, ");
353 break; 353 break;
354 default: 354 default:
355 printk(KERN_DEBUG "Cmd = %#02x, ", cmd); 355 printk("Cmd = %#02x, ", cmd);
356 } 356 }
357} 357}
358 358
@@ -361,28 +361,28 @@ void i2o_debug_state(struct i2o_controller *c)
361 printk(KERN_INFO "%s: State = ", c->name); 361 printk(KERN_INFO "%s: State = ", c->name);
362 switch (((i2o_status_block *) c->status_block.virt)->iop_state) { 362 switch (((i2o_status_block *) c->status_block.virt)->iop_state) {
363 case 0x01: 363 case 0x01:
364 printk(KERN_DEBUG "INIT\n"); 364 printk("INIT\n");
365 break; 365 break;
366 case 0x02: 366 case 0x02:
367 printk(KERN_DEBUG "RESET\n"); 367 printk("RESET\n");
368 break; 368 break;
369 case 0x04: 369 case 0x04:
370 printk(KERN_DEBUG "HOLD\n"); 370 printk("HOLD\n");
371 break; 371 break;
372 case 0x05: 372 case 0x05:
373 printk(KERN_DEBUG "READY\n"); 373 printk("READY\n");
374 break; 374 break;
375 case 0x08: 375 case 0x08:
376 printk(KERN_DEBUG "OPERATIONAL\n"); 376 printk("OPERATIONAL\n");
377 break; 377 break;
378 case 0x10: 378 case 0x10:
379 printk(KERN_DEBUG "FAILED\n"); 379 printk("FAILED\n");
380 break; 380 break;
381 case 0x11: 381 case 0x11:
382 printk(KERN_DEBUG "FAULTED\n"); 382 printk("FAULTED\n");
383 break; 383 break;
384 default: 384 default:
385 printk(KERN_DEBUG "%x (unknown !!)\n", 385 printk("%x (unknown !!)\n",
386 ((i2o_status_block *) c->status_block.virt)->iop_state); 386 ((i2o_status_block *) c->status_block.virt)->iop_state);
387 } 387 }
388}; 388};
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index c13b9321e7ab..8c83ee3b0920 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -131,8 +131,10 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
131 int rc = 0; 131 int rc = 0;
132 132
133 wait = i2o_exec_wait_alloc(); 133 wait = i2o_exec_wait_alloc();
134 if (!wait) 134 if (!wait) {
135 i2o_msg_nop(c, msg);
135 return -ENOMEM; 136 return -ENOMEM;
137 }
136 138
137 if (tcntxt == 0xffffffff) 139 if (tcntxt == 0xffffffff)
138 tcntxt = 0x80000000; 140 tcntxt = 0x80000000;
@@ -337,6 +339,8 @@ static int i2o_exec_probe(struct device *dev)
337 rc = device_create_file(dev, &dev_attr_product_id); 339 rc = device_create_file(dev, &dev_attr_product_id);
338 if (rc) goto err_vid; 340 if (rc) goto err_vid;
339 341
342 i2o_dev->iop->exec = i2o_dev;
343
340 return 0; 344 return 0;
341 345
342err_vid: 346err_vid:
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index b17c4b2bc9ef..64a52bd7544a 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -215,7 +215,7 @@ static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
215 struct i2o_message *msg; 215 struct i2o_message *msg;
216 216
217 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); 217 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
218 if (IS_ERR(msg) == I2O_QUEUE_EMPTY) 218 if (IS_ERR(msg))
219 return PTR_ERR(msg); 219 return PTR_ERR(msg);
220 220
221 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); 221 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 8ba275a12773..84e046e94f5f 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -554,8 +554,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
554 return -ENXIO; 554 return -ENXIO;
555 } 555 }
556 556
557 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
558
559 sb = c->status_block.virt; 557 sb = c->status_block.virt;
560 558
561 if (get_user(size, &user_msg[0])) { 559 if (get_user(size, &user_msg[0])) {
@@ -573,24 +571,30 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
573 571
574 size <<= 2; // Convert to bytes 572 size <<= 2; // Convert to bytes
575 573
574 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
575 if (IS_ERR(msg))
576 return PTR_ERR(msg);
577
578 rcode = -EFAULT;
576 /* Copy in the user's I2O command */ 579 /* Copy in the user's I2O command */
577 if (copy_from_user(msg, user_msg, size)) { 580 if (copy_from_user(msg, user_msg, size)) {
578 osm_warn("unable to copy user message\n"); 581 osm_warn("unable to copy user message\n");
579 return -EFAULT; 582 goto out;
580 } 583 }
581 i2o_dump_message(msg); 584 i2o_dump_message(msg);
582 585
583 if (get_user(reply_size, &user_reply[0]) < 0) 586 if (get_user(reply_size, &user_reply[0]) < 0)
584 return -EFAULT; 587 goto out;
585 588
586 reply_size >>= 16; 589 reply_size >>= 16;
587 reply_size <<= 2; 590 reply_size <<= 2;
588 591
592 rcode = -ENOMEM;
589 reply = kzalloc(reply_size, GFP_KERNEL); 593 reply = kzalloc(reply_size, GFP_KERNEL);
590 if (!reply) { 594 if (!reply) {
591 printk(KERN_WARNING "%s: Could not allocate reply buffer\n", 595 printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
592 c->name); 596 c->name);
593 return -ENOMEM; 597 goto out;
594 } 598 }
595 599
596 sg_offset = (msg->u.head[0] >> 4) & 0x0f; 600 sg_offset = (msg->u.head[0] >> 4) & 0x0f;
@@ -661,13 +665,14 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
661 } 665 }
662 666
663 rcode = i2o_msg_post_wait(c, msg, 60); 667 rcode = i2o_msg_post_wait(c, msg, 60);
668 msg = NULL;
664 if (rcode) { 669 if (rcode) {
665 reply[4] = ((u32) rcode) << 24; 670 reply[4] = ((u32) rcode) << 24;
666 goto sg_list_cleanup; 671 goto sg_list_cleanup;
667 } 672 }
668 673
669 if (sg_offset) { 674 if (sg_offset) {
670 u32 msg[I2O_OUTBOUND_MSG_FRAME_SIZE]; 675 u32 rmsg[I2O_OUTBOUND_MSG_FRAME_SIZE];
671 /* Copy back the Scatter Gather buffers back to user space */ 676 /* Copy back the Scatter Gather buffers back to user space */
672 u32 j; 677 u32 j;
673 // TODO 64bit fix 678 // TODO 64bit fix
@@ -675,7 +680,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
675 int sg_size; 680 int sg_size;
676 681
677 // re-acquire the original message to handle correctly the sg copy operation 682 // re-acquire the original message to handle correctly the sg copy operation
678 memset(&msg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4); 683 memset(&rmsg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4);
679 // get user msg size in u32s 684 // get user msg size in u32s
680 if (get_user(size, &user_msg[0])) { 685 if (get_user(size, &user_msg[0])) {
681 rcode = -EFAULT; 686 rcode = -EFAULT;
@@ -684,7 +689,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
684 size = size >> 16; 689 size = size >> 16;
685 size *= 4; 690 size *= 4;
686 /* Copy in the user's I2O command */ 691 /* Copy in the user's I2O command */
687 if (copy_from_user(msg, user_msg, size)) { 692 if (copy_from_user(rmsg, user_msg, size)) {
688 rcode = -EFAULT; 693 rcode = -EFAULT;
689 goto sg_list_cleanup; 694 goto sg_list_cleanup;
690 } 695 }
@@ -692,7 +697,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
692 (size - sg_offset * 4) / sizeof(struct sg_simple_element); 697 (size - sg_offset * 4) / sizeof(struct sg_simple_element);
693 698
694 // TODO 64bit fix 699 // TODO 64bit fix
695 sg = (struct sg_simple_element *)(msg + sg_offset); 700 sg = (struct sg_simple_element *)(rmsg + sg_offset);
696 for (j = 0; j < sg_count; j++) { 701 for (j = 0; j < sg_count; j++) {
697 /* Copy out the SG list to user's buffer if necessary */ 702 /* Copy out the SG list to user's buffer if necessary */
698 if (! 703 if (!
@@ -714,7 +719,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
714 } 719 }
715 } 720 }
716 721
717 sg_list_cleanup: 722sg_list_cleanup:
718 /* Copy back the reply to user space */ 723 /* Copy back the reply to user space */
719 if (reply_size) { 724 if (reply_size) {
720 // we wrote our own values for context - now restore the user supplied ones 725 // we wrote our own values for context - now restore the user supplied ones
@@ -723,7 +728,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
723 "%s: Could not copy message context FROM user\n", 728 "%s: Could not copy message context FROM user\n",
724 c->name); 729 c->name);
725 rcode = -EFAULT; 730 rcode = -EFAULT;
726 goto sg_list_cleanup;
727 } 731 }
728 if (copy_to_user(user_reply, reply, reply_size)) { 732 if (copy_to_user(user_reply, reply, reply_size)) {
729 printk(KERN_WARNING 733 printk(KERN_WARNING
@@ -731,12 +735,14 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
731 rcode = -EFAULT; 735 rcode = -EFAULT;
732 } 736 }
733 } 737 }
734
735 for (i = 0; i < sg_index; i++) 738 for (i = 0; i < sg_index; i++)
736 i2o_dma_free(&c->pdev->dev, &sg_list[i]); 739 i2o_dma_free(&c->pdev->dev, &sg_list[i]);
737 740
738 cleanup: 741cleanup:
739 kfree(reply); 742 kfree(reply);
743out:
744 if (msg)
745 i2o_msg_nop(c, msg);
740 return rcode; 746 return rcode;
741} 747}
742 748
@@ -793,8 +799,6 @@ static int i2o_cfg_passthru(unsigned long arg)
793 return -ENXIO; 799 return -ENXIO;
794 } 800 }
795 801
796 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
797
798 sb = c->status_block.virt; 802 sb = c->status_block.virt;
799 803
800 if (get_user(size, &user_msg[0])) 804 if (get_user(size, &user_msg[0]))
@@ -810,12 +814,17 @@ static int i2o_cfg_passthru(unsigned long arg)
810 814
811 size <<= 2; // Convert to bytes 815 size <<= 2; // Convert to bytes
812 816
817 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
818 if (IS_ERR(msg))
819 return PTR_ERR(msg);
820
821 rcode = -EFAULT;
813 /* Copy in the user's I2O command */ 822 /* Copy in the user's I2O command */
814 if (copy_from_user(msg, user_msg, size)) 823 if (copy_from_user(msg, user_msg, size))
815 return -EFAULT; 824 goto out;
816 825
817 if (get_user(reply_size, &user_reply[0]) < 0) 826 if (get_user(reply_size, &user_reply[0]) < 0)
818 return -EFAULT; 827 goto out;
819 828
820 reply_size >>= 16; 829 reply_size >>= 16;
821 reply_size <<= 2; 830 reply_size <<= 2;
@@ -824,7 +833,8 @@ static int i2o_cfg_passthru(unsigned long arg)
824 if (!reply) { 833 if (!reply) {
825 printk(KERN_WARNING "%s: Could not allocate reply buffer\n", 834 printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
826 c->name); 835 c->name);
827 return -ENOMEM; 836 rcode = -ENOMEM;
837 goto out;
828 } 838 }
829 839
830 sg_offset = (msg->u.head[0] >> 4) & 0x0f; 840 sg_offset = (msg->u.head[0] >> 4) & 0x0f;
@@ -891,13 +901,14 @@ static int i2o_cfg_passthru(unsigned long arg)
891 } 901 }
892 902
893 rcode = i2o_msg_post_wait(c, msg, 60); 903 rcode = i2o_msg_post_wait(c, msg, 60);
904 msg = NULL;
894 if (rcode) { 905 if (rcode) {
895 reply[4] = ((u32) rcode) << 24; 906 reply[4] = ((u32) rcode) << 24;
896 goto sg_list_cleanup; 907 goto sg_list_cleanup;
897 } 908 }
898 909
899 if (sg_offset) { 910 if (sg_offset) {
900 u32 msg[128]; 911 u32 rmsg[128];
901 /* Copy back the Scatter Gather buffers back to user space */ 912 /* Copy back the Scatter Gather buffers back to user space */
902 u32 j; 913 u32 j;
903 // TODO 64bit fix 914 // TODO 64bit fix
@@ -905,7 +916,7 @@ static int i2o_cfg_passthru(unsigned long arg)
905 int sg_size; 916 int sg_size;
906 917
907 // re-acquire the original message to handle correctly the sg copy operation 918 // re-acquire the original message to handle correctly the sg copy operation
908 memset(&msg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4); 919 memset(&rmsg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4);
909 // get user msg size in u32s 920 // get user msg size in u32s
910 if (get_user(size, &user_msg[0])) { 921 if (get_user(size, &user_msg[0])) {
911 rcode = -EFAULT; 922 rcode = -EFAULT;
@@ -914,7 +925,7 @@ static int i2o_cfg_passthru(unsigned long arg)
914 size = size >> 16; 925 size = size >> 16;
915 size *= 4; 926 size *= 4;
916 /* Copy in the user's I2O command */ 927 /* Copy in the user's I2O command */
917 if (copy_from_user(msg, user_msg, size)) { 928 if (copy_from_user(rmsg, user_msg, size)) {
918 rcode = -EFAULT; 929 rcode = -EFAULT;
919 goto sg_list_cleanup; 930 goto sg_list_cleanup;
920 } 931 }
@@ -922,7 +933,7 @@ static int i2o_cfg_passthru(unsigned long arg)
922 (size - sg_offset * 4) / sizeof(struct sg_simple_element); 933 (size - sg_offset * 4) / sizeof(struct sg_simple_element);
923 934
924 // TODO 64bit fix 935 // TODO 64bit fix
925 sg = (struct sg_simple_element *)(msg + sg_offset); 936 sg = (struct sg_simple_element *)(rmsg + sg_offset);
926 for (j = 0; j < sg_count; j++) { 937 for (j = 0; j < sg_count; j++) {
927 /* Copy out the SG list to user's buffer if necessary */ 938 /* Copy out the SG list to user's buffer if necessary */
928 if (! 939 if (!
@@ -944,7 +955,7 @@ static int i2o_cfg_passthru(unsigned long arg)
944 } 955 }
945 } 956 }
946 957
947 sg_list_cleanup: 958sg_list_cleanup:
948 /* Copy back the reply to user space */ 959 /* Copy back the reply to user space */
949 if (reply_size) { 960 if (reply_size) {
950 // we wrote our own values for context - now restore the user supplied ones 961 // we wrote our own values for context - now restore the user supplied ones
@@ -964,8 +975,11 @@ static int i2o_cfg_passthru(unsigned long arg)
964 for (i = 0; i < sg_index; i++) 975 for (i = 0; i < sg_index; i++)
965 kfree(sg_list[i]); 976 kfree(sg_list[i]);
966 977
967 cleanup: 978cleanup:
968 kfree(reply); 979 kfree(reply);
980out:
981 if (msg)
982 i2o_msg_nop(c, msg);
969 return rcode; 983 return rcode;
970} 984}
971#endif 985#endif
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index 38e815a2e871..fdbaa776f249 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -209,6 +209,7 @@ static int ucb1x00_thread(void *_ts)
209 DECLARE_WAITQUEUE(wait, tsk); 209 DECLARE_WAITQUEUE(wait, tsk);
210 int valid = 0; 210 int valid = 0;
211 211
212 set_freezable();
212 add_wait_queue(&ts->irq_wait, &wait); 213 add_wait_queue(&ts->irq_wait, &wait);
213 while (!kthread_should_stop()) { 214 while (!kthread_should_stop()) {
214 unsigned int x, y, p; 215 unsigned int x, y, p;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index a92b8728b90c..1d516f24ba53 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -10,7 +10,7 @@ if MISC_DEVICES
10 10
11config IBM_ASM 11config IBM_ASM
12 tristate "Device driver for IBM RSA service processor" 12 tristate "Device driver for IBM RSA service processor"
13 depends on X86 && PCI && EXPERIMENTAL 13 depends on X86 && PCI && INPUT && EXPERIMENTAL
14 ---help--- 14 ---help---
15 This option enables device driver support for in-band access to the 15 This option enables device driver support for in-band access to the
16 IBM RSA (Condor) service processor in eServer xSeries systems. 16 IBM RSA (Condor) service processor in eServer xSeries systems.
diff --git a/drivers/misc/ibmasm/command.c b/drivers/misc/ibmasm/command.c
index 07a085ccbd5b..b5df347c81b9 100644
--- a/drivers/misc/ibmasm/command.c
+++ b/drivers/misc/ibmasm/command.c
@@ -18,7 +18,7 @@
18 * 18 *
19 * Copyright (C) IBM Corporation, 2004 19 * Copyright (C) IBM Corporation, 2004
20 * 20 *
21 * Author: Max Asböck <amax@us.ibm.com> 21 * Author: Max Asböck <amax@us.ibm.com>
22 * 22 *
23 */ 23 */
24 24
@@ -72,7 +72,7 @@ struct command *ibmasm_new_command(struct service_processor *sp, size_t buffer_s
72static void free_command(struct kobject *kobj) 72static void free_command(struct kobject *kobj)
73{ 73{
74 struct command *cmd = to_command(kobj); 74 struct command *cmd = to_command(kobj);
75 75
76 list_del(&cmd->queue_node); 76 list_del(&cmd->queue_node);
77 atomic_dec(&command_count); 77 atomic_dec(&command_count);
78 dbg("command count: %d\n", atomic_read(&command_count)); 78 dbg("command count: %d\n", atomic_read(&command_count));
@@ -113,14 +113,14 @@ static inline void do_exec_command(struct service_processor *sp)
113 exec_next_command(sp); 113 exec_next_command(sp);
114 } 114 }
115} 115}
116 116
117/** 117/**
118 * exec_command 118 * exec_command
119 * send a command to a service processor 119 * send a command to a service processor
120 * Commands are executed sequentially. One command (sp->current_command) 120 * Commands are executed sequentially. One command (sp->current_command)
121 * is sent to the service processor. Once the interrupt handler gets a 121 * is sent to the service processor. Once the interrupt handler gets a
122 * message of type command_response, the message is copied into 122 * message of type command_response, the message is copied into
123 * the current commands buffer, 123 * the current commands buffer,
124 */ 124 */
125void ibmasm_exec_command(struct service_processor *sp, struct command *cmd) 125void ibmasm_exec_command(struct service_processor *sp, struct command *cmd)
126{ 126{
@@ -160,7 +160,7 @@ static void exec_next_command(struct service_processor *sp)
160 } 160 }
161} 161}
162 162
163/** 163/**
164 * Sleep until a command has failed or a response has been received 164 * Sleep until a command has failed or a response has been received
165 * and the command status been updated by the interrupt handler. 165 * and the command status been updated by the interrupt handler.
166 * (see receive_response). 166 * (see receive_response).
@@ -182,8 +182,8 @@ void ibmasm_receive_command_response(struct service_processor *sp, void *respons
182{ 182{
183 struct command *cmd = sp->current_command; 183 struct command *cmd = sp->current_command;
184 184
185 if (!sp->current_command) 185 if (!sp->current_command)
186 return; 186 return;
187 187
188 memcpy_fromio(cmd->buffer, response, min(size, cmd->buffer_size)); 188 memcpy_fromio(cmd->buffer, response, min(size, cmd->buffer_size));
189 cmd->status = IBMASM_CMD_COMPLETE; 189 cmd->status = IBMASM_CMD_COMPLETE;
diff --git a/drivers/misc/ibmasm/dot_command.c b/drivers/misc/ibmasm/dot_command.c
index 13c52f866e2e..3dd2dfb8da17 100644
--- a/drivers/misc/ibmasm/dot_command.c
+++ b/drivers/misc/ibmasm/dot_command.c
@@ -17,7 +17,7 @@
17 * 17 *
18 * Copyright (C) IBM Corporation, 2004 18 * Copyright (C) IBM Corporation, 2004
19 * 19 *
20 * Author: Max Asböck <amax@us.ibm.com> 20 * Author: Max Asböck <amax@us.ibm.com>
21 * 21 *
22 */ 22 */
23 23
@@ -44,11 +44,11 @@ void ibmasm_receive_message(struct service_processor *sp, void *message, int mes
44 size = message_size; 44 size = message_size;
45 45
46 switch (header->type) { 46 switch (header->type) {
47 case sp_event: 47 case sp_event:
48 ibmasm_receive_event(sp, message, size); 48 ibmasm_receive_event(sp, message, size);
49 break; 49 break;
50 case sp_command_response: 50 case sp_command_response:
51 ibmasm_receive_command_response(sp, message, size); 51 ibmasm_receive_command_response(sp, message, size);
52 break; 52 break;
53 case sp_heartbeat: 53 case sp_heartbeat:
54 ibmasm_receive_heartbeat(sp, message, size); 54 ibmasm_receive_heartbeat(sp, message, size);
@@ -95,7 +95,7 @@ int ibmasm_send_driver_vpd(struct service_processor *sp)
95 strcat(vpd_data, IBMASM_DRIVER_VPD); 95 strcat(vpd_data, IBMASM_DRIVER_VPD);
96 vpd_data[10] = 0; 96 vpd_data[10] = 0;
97 vpd_data[15] = 0; 97 vpd_data[15] = 0;
98 98
99 ibmasm_exec_command(sp, command); 99 ibmasm_exec_command(sp, command);
100 ibmasm_wait_for_response(command, IBMASM_CMD_TIMEOUT_NORMAL); 100 ibmasm_wait_for_response(command, IBMASM_CMD_TIMEOUT_NORMAL);
101 101
@@ -118,7 +118,7 @@ struct os_state_command {
118 * During driver init this function is called with os state "up". 118 * During driver init this function is called with os state "up".
119 * This causes the service processor to start sending heartbeats the 119 * This causes the service processor to start sending heartbeats the
120 * driver. 120 * driver.
121 * During driver exit the function is called with os state "down", 121 * During driver exit the function is called with os state "down",
122 * causing the service processor to stop the heartbeats. 122 * causing the service processor to stop the heartbeats.
123 */ 123 */
124int ibmasm_send_os_state(struct service_processor *sp, int os_state) 124int ibmasm_send_os_state(struct service_processor *sp, int os_state)
diff --git a/drivers/misc/ibmasm/dot_command.h b/drivers/misc/ibmasm/dot_command.h
index 2d21c2741b6a..6cbba1afef35 100644
--- a/drivers/misc/ibmasm/dot_command.h
+++ b/drivers/misc/ibmasm/dot_command.h
@@ -17,7 +17,7 @@
17 * 17 *
18 * Copyright (C) IBM Corporation, 2004 18 * Copyright (C) IBM Corporation, 2004
19 * 19 *
20 * Author: Max Asböck <amax@us.ibm.com> 20 * Author: Max Asböck <amax@us.ibm.com>
21 * 21 *
22 */ 22 */
23 23
diff --git a/drivers/misc/ibmasm/event.c b/drivers/misc/ibmasm/event.c
index fe1e819235a4..fda6a4d3bf23 100644
--- a/drivers/misc/ibmasm/event.c
+++ b/drivers/misc/ibmasm/event.c
@@ -18,7 +18,7 @@
18 * 18 *
19 * Copyright (C) IBM Corporation, 2004 19 * Copyright (C) IBM Corporation, 2004
20 * 20 *
21 * Author: Max Asböck <amax@us.ibm.com> 21 * Author: Max Asböck <amax@us.ibm.com>
22 * 22 *
23 */ 23 */
24 24
@@ -51,7 +51,7 @@ static void wake_up_event_readers(struct service_processor *sp)
51 * event readers. 51 * event readers.
52 * There is no reader marker in the buffer, therefore readers are 52 * There is no reader marker in the buffer, therefore readers are
53 * responsible for keeping up with the writer, or they will loose events. 53 * responsible for keeping up with the writer, or they will loose events.
54 */ 54 */
55void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int data_size) 55void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int data_size)
56{ 56{
57 struct event_buffer *buffer = sp->event_buffer; 57 struct event_buffer *buffer = sp->event_buffer;
@@ -77,13 +77,13 @@ void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int
77 77
78static inline int event_available(struct event_buffer *b, struct event_reader *r) 78static inline int event_available(struct event_buffer *b, struct event_reader *r)
79{ 79{
80 return (r->next_serial_number < b->next_serial_number); 80 return (r->next_serial_number < b->next_serial_number);
81} 81}
82 82
83/** 83/**
84 * get_next_event 84 * get_next_event
85 * Called by event readers (initiated from user space through the file 85 * Called by event readers (initiated from user space through the file
86 * system). 86 * system).
87 * Sleeps until a new event is available. 87 * Sleeps until a new event is available.
88 */ 88 */
89int ibmasm_get_next_event(struct service_processor *sp, struct event_reader *reader) 89int ibmasm_get_next_event(struct service_processor *sp, struct event_reader *reader)
diff --git a/drivers/misc/ibmasm/heartbeat.c b/drivers/misc/ibmasm/heartbeat.c
index 7fd7a43e38de..3036e785b3e4 100644
--- a/drivers/misc/ibmasm/heartbeat.c
+++ b/drivers/misc/ibmasm/heartbeat.c
@@ -18,7 +18,7 @@
18 * 18 *
19 * Copyright (C) IBM Corporation, 2004 19 * Copyright (C) IBM Corporation, 2004
20 * 20 *
21 * Author: Max Asböck <amax@us.ibm.com> 21 * Author: Max Asböck <amax@us.ibm.com>
22 * 22 *
23 */ 23 */
24 24
diff --git a/drivers/misc/ibmasm/i2o.h b/drivers/misc/ibmasm/i2o.h
index 958c957a5e75..bf2c738d2b72 100644
--- a/drivers/misc/ibmasm/i2o.h
+++ b/drivers/misc/ibmasm/i2o.h
@@ -17,7 +17,7 @@
17 * 17 *
18 * Copyright (C) IBM Corporation, 2004 18 * Copyright (C) IBM Corporation, 2004
19 * 19 *
20 * Author: Max Asböck <amax@us.ibm.com> 20 * Author: Max Asböck <amax@us.ibm.com>
21 * 21 *
22 */ 22 */
23 23
@@ -26,9 +26,9 @@ struct i2o_header {
26 u8 version; 26 u8 version;
27 u8 message_flags; 27 u8 message_flags;
28 u16 message_size; 28 u16 message_size;
29 u8 target; 29 u8 target;
30 u8 initiator_and_target; 30 u8 initiator_and_target;
31 u8 initiator; 31 u8 initiator;
32 u8 function; 32 u8 function;
33 u32 initiator_context; 33 u32 initiator_context;
34}; 34};
@@ -64,12 +64,12 @@ static inline unsigned short outgoing_message_size(unsigned int data_size)
64 size = sizeof(struct i2o_header) + data_size; 64 size = sizeof(struct i2o_header) + data_size;
65 65
66 i2o_size = size / sizeof(u32); 66 i2o_size = size / sizeof(u32);
67 67
68 if (size % sizeof(u32)) 68 if (size % sizeof(u32))
69 i2o_size++; 69 i2o_size++;
70 70
71 return i2o_size; 71 return i2o_size;
72} 72}
73 73
74static inline u32 incoming_data_size(struct i2o_message *i2o_message) 74static inline u32 incoming_data_size(struct i2o_message *i2o_message)
75{ 75{
diff --git a/drivers/misc/ibmasm/ibmasm.h b/drivers/misc/ibmasm/ibmasm.h
index 48d5abebfc30..de860bc6d3f5 100644
--- a/drivers/misc/ibmasm/ibmasm.h
+++ b/drivers/misc/ibmasm/ibmasm.h
@@ -18,7 +18,7 @@
18 * 18 *
19 * Copyright (C) IBM Corporation, 2004 19 * Copyright (C) IBM Corporation, 2004
20 * 20 *
21 * Author: Max Asböck <amax@us.ibm.com> 21 * Author: Max Asböck <amax@us.ibm.com>
22 * 22 *
23 */ 23 */
24 24
@@ -58,8 +58,8 @@ static inline char *get_timestamp(char *buf)
58 return buf; 58 return buf;
59} 59}
60 60
61#define IBMASM_CMD_PENDING 0 61#define IBMASM_CMD_PENDING 0
62#define IBMASM_CMD_COMPLETE 1 62#define IBMASM_CMD_COMPLETE 1
63#define IBMASM_CMD_FAILED 2 63#define IBMASM_CMD_FAILED 2
64 64
65#define IBMASM_CMD_TIMEOUT_NORMAL 45 65#define IBMASM_CMD_TIMEOUT_NORMAL 45
@@ -163,55 +163,55 @@ struct service_processor {
163}; 163};
164 164
165/* command processing */ 165/* command processing */
166extern struct command *ibmasm_new_command(struct service_processor *sp, size_t buffer_size); 166struct command *ibmasm_new_command(struct service_processor *sp, size_t buffer_size);
167extern void ibmasm_exec_command(struct service_processor *sp, struct command *cmd); 167void ibmasm_exec_command(struct service_processor *sp, struct command *cmd);
168extern void ibmasm_wait_for_response(struct command *cmd, int timeout); 168void ibmasm_wait_for_response(struct command *cmd, int timeout);
169extern void ibmasm_receive_command_response(struct service_processor *sp, void *response, size_t size); 169void ibmasm_receive_command_response(struct service_processor *sp, void *response, size_t size);
170 170
171/* event processing */ 171/* event processing */
172extern int ibmasm_event_buffer_init(struct service_processor *sp); 172int ibmasm_event_buffer_init(struct service_processor *sp);
173extern void ibmasm_event_buffer_exit(struct service_processor *sp); 173void ibmasm_event_buffer_exit(struct service_processor *sp);
174extern void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int data_size); 174void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int data_size);
175extern void ibmasm_event_reader_register(struct service_processor *sp, struct event_reader *reader); 175void ibmasm_event_reader_register(struct service_processor *sp, struct event_reader *reader);
176extern void ibmasm_event_reader_unregister(struct service_processor *sp, struct event_reader *reader); 176void ibmasm_event_reader_unregister(struct service_processor *sp, struct event_reader *reader);
177extern int ibmasm_get_next_event(struct service_processor *sp, struct event_reader *reader); 177int ibmasm_get_next_event(struct service_processor *sp, struct event_reader *reader);
178extern void ibmasm_cancel_next_event(struct event_reader *reader); 178void ibmasm_cancel_next_event(struct event_reader *reader);
179 179
180/* heartbeat - from SP to OS */ 180/* heartbeat - from SP to OS */
181extern void ibmasm_register_panic_notifier(void); 181void ibmasm_register_panic_notifier(void);
182extern void ibmasm_unregister_panic_notifier(void); 182void ibmasm_unregister_panic_notifier(void);
183extern int ibmasm_heartbeat_init(struct service_processor *sp); 183int ibmasm_heartbeat_init(struct service_processor *sp);
184extern void ibmasm_heartbeat_exit(struct service_processor *sp); 184void ibmasm_heartbeat_exit(struct service_processor *sp);
185extern void ibmasm_receive_heartbeat(struct service_processor *sp, void *message, size_t size); 185void ibmasm_receive_heartbeat(struct service_processor *sp, void *message, size_t size);
186 186
187/* reverse heartbeat - from OS to SP */ 187/* reverse heartbeat - from OS to SP */
188extern void ibmasm_init_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb); 188void ibmasm_init_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb);
189extern int ibmasm_start_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb); 189int ibmasm_start_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb);
190extern void ibmasm_stop_reverse_heartbeat(struct reverse_heartbeat *rhb); 190void ibmasm_stop_reverse_heartbeat(struct reverse_heartbeat *rhb);
191 191
192/* dot commands */ 192/* dot commands */
193extern void ibmasm_receive_message(struct service_processor *sp, void *data, int data_size); 193void ibmasm_receive_message(struct service_processor *sp, void *data, int data_size);
194extern int ibmasm_send_driver_vpd(struct service_processor *sp); 194int ibmasm_send_driver_vpd(struct service_processor *sp);
195extern int ibmasm_send_os_state(struct service_processor *sp, int os_state); 195int ibmasm_send_os_state(struct service_processor *sp, int os_state);
196 196
197/* low level message processing */ 197/* low level message processing */
198extern int ibmasm_send_i2o_message(struct service_processor *sp); 198int ibmasm_send_i2o_message(struct service_processor *sp);
199extern irqreturn_t ibmasm_interrupt_handler(int irq, void * dev_id); 199irqreturn_t ibmasm_interrupt_handler(int irq, void * dev_id);
200 200
201/* remote console */ 201/* remote console */
202extern void ibmasm_handle_mouse_interrupt(struct service_processor *sp); 202void ibmasm_handle_mouse_interrupt(struct service_processor *sp);
203extern int ibmasm_init_remote_input_dev(struct service_processor *sp); 203int ibmasm_init_remote_input_dev(struct service_processor *sp);
204extern void ibmasm_free_remote_input_dev(struct service_processor *sp); 204void ibmasm_free_remote_input_dev(struct service_processor *sp);
205 205
206/* file system */ 206/* file system */
207extern int ibmasmfs_register(void); 207int ibmasmfs_register(void);
208extern void ibmasmfs_unregister(void); 208void ibmasmfs_unregister(void);
209extern void ibmasmfs_add_sp(struct service_processor *sp); 209void ibmasmfs_add_sp(struct service_processor *sp);
210 210
211/* uart */ 211/* uart */
212#ifdef CONFIG_SERIAL_8250 212#ifdef CONFIG_SERIAL_8250
213extern void ibmasm_register_uart(struct service_processor *sp); 213void ibmasm_register_uart(struct service_processor *sp);
214extern void ibmasm_unregister_uart(struct service_processor *sp); 214void ibmasm_unregister_uart(struct service_processor *sp);
215#else 215#else
216#define ibmasm_register_uart(sp) do { } while(0) 216#define ibmasm_register_uart(sp) do { } while(0)
217#define ibmasm_unregister_uart(sp) do { } while(0) 217#define ibmasm_unregister_uart(sp) do { } while(0)
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index c436d3de8b8b..eb7b073734b8 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -17,12 +17,12 @@
17 * 17 *
18 * Copyright (C) IBM Corporation, 2004 18 * Copyright (C) IBM Corporation, 2004
19 * 19 *
20 * Author: Max Asböck <amax@us.ibm.com> 20 * Author: Max Asböck <amax@us.ibm.com>
21 * 21 *
22 */ 22 */
23 23
24/* 24/*
25 * Parts of this code are based on an article by Jonathan Corbet 25 * Parts of this code are based on an article by Jonathan Corbet
26 * that appeared in Linux Weekly News. 26 * that appeared in Linux Weekly News.
27 */ 27 */
28 28
@@ -55,22 +55,22 @@
55 * For each service processor the following files are created: 55 * For each service processor the following files are created:
56 * 56 *
57 * command: execute dot commands 57 * command: execute dot commands
58 * write: execute a dot command on the service processor 58 * write: execute a dot command on the service processor
59 * read: return the result of a previously executed dot command 59 * read: return the result of a previously executed dot command
60 * 60 *
61 * events: listen for service processor events 61 * events: listen for service processor events
62 * read: sleep (interruptible) until an event occurs 62 * read: sleep (interruptible) until an event occurs
63 * write: wakeup sleeping event listener 63 * write: wakeup sleeping event listener
64 * 64 *
65 * reverse_heartbeat: send a heartbeat to the service processor 65 * reverse_heartbeat: send a heartbeat to the service processor
66 * read: sleep (interruptible) until the reverse heartbeat fails 66 * read: sleep (interruptible) until the reverse heartbeat fails
67 * write: wakeup sleeping heartbeat listener 67 * write: wakeup sleeping heartbeat listener
68 * 68 *
69 * remote_video/width 69 * remote_video/width
70 * remote_video/height 70 * remote_video/height
71 * remote_video/width: control remote display settings 71 * remote_video/width: control remote display settings
72 * write: set value 72 * write: set value
73 * read: read value 73 * read: read value
74 */ 74 */
75 75
76#include <linux/fs.h> 76#include <linux/fs.h>
@@ -155,7 +155,7 @@ static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode)
155 155
156static struct dentry *ibmasmfs_create_file (struct super_block *sb, 156static struct dentry *ibmasmfs_create_file (struct super_block *sb,
157 struct dentry *parent, 157 struct dentry *parent,
158 const char *name, 158 const char *name,
159 const struct file_operations *fops, 159 const struct file_operations *fops,
160 void *data, 160 void *data,
161 int mode) 161 int mode)
@@ -261,7 +261,7 @@ static int command_file_close(struct inode *inode, struct file *file)
261 struct ibmasmfs_command_data *command_data = file->private_data; 261 struct ibmasmfs_command_data *command_data = file->private_data;
262 262
263 if (command_data->command) 263 if (command_data->command)
264 command_put(command_data->command); 264 command_put(command_data->command);
265 265
266 kfree(command_data); 266 kfree(command_data);
267 return 0; 267 return 0;
@@ -348,7 +348,7 @@ static ssize_t command_file_write(struct file *file, const char __user *ubuff, s
348static int event_file_open(struct inode *inode, struct file *file) 348static int event_file_open(struct inode *inode, struct file *file)
349{ 349{
350 struct ibmasmfs_event_data *event_data; 350 struct ibmasmfs_event_data *event_data;
351 struct service_processor *sp; 351 struct service_processor *sp;
352 352
353 if (!inode->i_private) 353 if (!inode->i_private)
354 return -ENODEV; 354 return -ENODEV;
@@ -573,7 +573,7 @@ static ssize_t remote_settings_file_write(struct file *file, const char __user *
573 kfree(buff); 573 kfree(buff);
574 return -EFAULT; 574 return -EFAULT;
575 } 575 }
576 576
577 value = simple_strtoul(buff, NULL, 10); 577 value = simple_strtoul(buff, NULL, 10);
578 writel(value, address); 578 writel(value, address);
579 kfree(buff); 579 kfree(buff);
diff --git a/drivers/misc/ibmasm/lowlevel.c b/drivers/misc/ibmasm/lowlevel.c
index a3c589b7cbfa..4b2398e27fd5 100644
--- a/drivers/misc/ibmasm/lowlevel.c
+++ b/drivers/misc/ibmasm/lowlevel.c
@@ -17,7 +17,7 @@
17 * 17 *
18 * Copyright (C) IBM Corporation, 2004 18 * Copyright (C) IBM Corporation, 2004
19 * 19 *
20 * Author: Max Asböck <amax@us.ibm.com> 20 * Author: Max Asböck <amax@us.ibm.com>
21 * 21 *
22 */ 22 */
23 23
diff --git a/drivers/misc/ibmasm/lowlevel.h b/drivers/misc/ibmasm/lowlevel.h
index e5ed59c589aa..766766523a60 100644
--- a/drivers/misc/ibmasm/lowlevel.h
+++ b/drivers/misc/ibmasm/lowlevel.h
@@ -17,7 +17,7 @@
17 * 17 *
18 * Copyright (C) IBM Corporation, 2004 18 * Copyright (C) IBM Corporation, 2004
19 * 19 *
20 * Author: Max Asböck <amax@us.ibm.com> 20 * Author: Max Asböck <amax@us.ibm.com>
21 * 21 *
22 */ 22 */
23 23
@@ -48,9 +48,9 @@
48#define INTR_CONTROL_REGISTER 0x13A4 48#define INTR_CONTROL_REGISTER 0x13A4
49 49
50#define SCOUT_COM_A_BASE 0x0000 50#define SCOUT_COM_A_BASE 0x0000
51#define SCOUT_COM_B_BASE 0x0100 51#define SCOUT_COM_B_BASE 0x0100
52#define SCOUT_COM_C_BASE 0x0200 52#define SCOUT_COM_C_BASE 0x0200
53#define SCOUT_COM_D_BASE 0x0300 53#define SCOUT_COM_D_BASE 0x0300
54 54
55static inline int sp_interrupt_pending(void __iomem *base_address) 55static inline int sp_interrupt_pending(void __iomem *base_address)
56{ 56{
@@ -86,12 +86,12 @@ static inline void disable_sp_interrupts(void __iomem *base_address)
86 86
87static inline void enable_uart_interrupts(void __iomem *base_address) 87static inline void enable_uart_interrupts(void __iomem *base_address)
88{ 88{
89 ibmasm_enable_interrupts(base_address, UART_INTR_MASK); 89 ibmasm_enable_interrupts(base_address, UART_INTR_MASK);
90} 90}
91 91
92static inline void disable_uart_interrupts(void __iomem *base_address) 92static inline void disable_uart_interrupts(void __iomem *base_address)
93{ 93{
94 ibmasm_disable_interrupts(base_address, UART_INTR_MASK); 94 ibmasm_disable_interrupts(base_address, UART_INTR_MASK);
95} 95}
96 96
97#define valid_mfa(mfa) ( (mfa) != NO_MFAS_AVAILABLE ) 97#define valid_mfa(mfa) ( (mfa) != NO_MFAS_AVAILABLE )
@@ -111,7 +111,7 @@ static inline u32 get_mfa_outbound(void __iomem *base_address)
111 111
112static inline void set_mfa_outbound(void __iomem *base_address, u32 mfa) 112static inline void set_mfa_outbound(void __iomem *base_address, u32 mfa)
113{ 113{
114 writel(mfa, base_address + OUTBOUND_QUEUE_PORT); 114 writel(mfa, base_address + OUTBOUND_QUEUE_PORT);
115} 115}
116 116
117static inline u32 get_mfa_inbound(void __iomem *base_address) 117static inline u32 get_mfa_inbound(void __iomem *base_address)
@@ -126,7 +126,7 @@ static inline u32 get_mfa_inbound(void __iomem *base_address)
126 126
127static inline void set_mfa_inbound(void __iomem *base_address, u32 mfa) 127static inline void set_mfa_inbound(void __iomem *base_address, u32 mfa)
128{ 128{
129 writel(mfa, base_address + INBOUND_QUEUE_PORT); 129 writel(mfa, base_address + INBOUND_QUEUE_PORT);
130} 130}
131 131
132static inline struct i2o_message *get_i2o_message(void __iomem *base_address, u32 mfa) 132static inline struct i2o_message *get_i2o_message(void __iomem *base_address, u32 mfa)
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c
index 2f3bddfab937..fb03a853fac4 100644
--- a/drivers/misc/ibmasm/module.c
+++ b/drivers/misc/ibmasm/module.c
@@ -18,9 +18,9 @@
18 * 18 *
19 * Copyright (C) IBM Corporation, 2004 19 * Copyright (C) IBM Corporation, 2004
20 * 20 *
21 * Author: Max Asböck <amax@us.ibm.com> 21 * Author: Max Asböck <amax@us.ibm.com>
22 * 22 *
23 * This driver is based on code originally written by Pete Reynolds 23 * This driver is based on code originally written by Pete Reynolds
24 * and others. 24 * and others.
25 * 25 *
26 */ 26 */
@@ -30,13 +30,13 @@
30 * 30 *
31 * 1) When loaded it sends a message to the service processor, 31 * 1) When loaded it sends a message to the service processor,
32 * indicating that an OS is * running. This causes the service processor 32 * indicating that an OS is * running. This causes the service processor
33 * to send periodic heartbeats to the OS. 33 * to send periodic heartbeats to the OS.
34 * 34 *
35 * 2) Answers the periodic heartbeats sent by the service processor. 35 * 2) Answers the periodic heartbeats sent by the service processor.
36 * Failure to do so would result in system reboot. 36 * Failure to do so would result in system reboot.
37 * 37 *
38 * 3) Acts as a pass through for dot commands sent from user applications. 38 * 3) Acts as a pass through for dot commands sent from user applications.
39 * The interface for this is the ibmasmfs file system. 39 * The interface for this is the ibmasmfs file system.
40 * 40 *
41 * 4) Allows user applications to register for event notification. Events 41 * 4) Allows user applications to register for event notification. Events
42 * are sent to the driver through interrupts. They can be read from user 42 * are sent to the driver through interrupts. They can be read from user
@@ -105,7 +105,7 @@ static int __devinit ibmasm_init_one(struct pci_dev *pdev, const struct pci_devi
105 } 105 }
106 106
107 sp->irq = pdev->irq; 107 sp->irq = pdev->irq;
108 sp->base_address = ioremap(pci_resource_start(pdev, 0), 108 sp->base_address = ioremap(pci_resource_start(pdev, 0),
109 pci_resource_len(pdev, 0)); 109 pci_resource_len(pdev, 0));
110 if (sp->base_address == 0) { 110 if (sp->base_address == 0) {
111 dev_err(sp->dev, "Failed to ioremap pci memory\n"); 111 dev_err(sp->dev, "Failed to ioremap pci memory\n");
diff --git a/drivers/misc/ibmasm/r_heartbeat.c b/drivers/misc/ibmasm/r_heartbeat.c
index f8fdb2d5417e..bec9e2c44bef 100644
--- a/drivers/misc/ibmasm/r_heartbeat.c
+++ b/drivers/misc/ibmasm/r_heartbeat.c
@@ -16,7 +16,7 @@
16 * 16 *
17 * Copyright (C) IBM Corporation, 2004 17 * Copyright (C) IBM Corporation, 2004
18 * 18 *
19 * Author: Max Asböck <amax@us.ibm.com> 19 * Author: Max Asböck <amax@us.ibm.com>
20 * 20 *
21 */ 21 */
22 22
@@ -36,10 +36,10 @@ static struct {
36 unsigned char command[3]; 36 unsigned char command[3];
37} rhb_dot_cmd = { 37} rhb_dot_cmd = {
38 .header = { 38 .header = {
39 .type = sp_read, 39 .type = sp_read,
40 .command_size = 3, 40 .command_size = 3,
41 .data_size = 0, 41 .data_size = 0,
42 .status = 0 42 .status = 0
43 }, 43 },
44 .command = { 4, 3, 6 } 44 .command = { 4, 3, 6 }
45}; 45};
@@ -76,9 +76,9 @@ int ibmasm_start_reverse_heartbeat(struct service_processor *sp, struct reverse_
76 if (cmd->status != IBMASM_CMD_COMPLETE) 76 if (cmd->status != IBMASM_CMD_COMPLETE)
77 times_failed++; 77 times_failed++;
78 78
79 wait_event_interruptible_timeout(rhb->wait, 79 wait_event_interruptible_timeout(rhb->wait,
80 rhb->stopped, 80 rhb->stopped,
81 REVERSE_HEARTBEAT_TIMEOUT * HZ); 81 REVERSE_HEARTBEAT_TIMEOUT * HZ);
82 82
83 if (signal_pending(current) || rhb->stopped) { 83 if (signal_pending(current) || rhb->stopped) {
84 result = -EINTR; 84 result = -EINTR;
diff --git a/drivers/misc/ibmasm/remote.c b/drivers/misc/ibmasm/remote.c
index a40fda6c402c..0550ce075fc4 100644
--- a/drivers/misc/ibmasm/remote.c
+++ b/drivers/misc/ibmasm/remote.c
@@ -28,11 +28,10 @@
28#include "ibmasm.h" 28#include "ibmasm.h"
29#include "remote.h" 29#include "remote.h"
30 30
31static int xmax = 1600; 31#define MOUSE_X_MAX 1600
32static int ymax = 1200; 32#define MOUSE_Y_MAX 1200
33 33
34 34static const unsigned short xlate_high[XLATE_SIZE] = {
35static unsigned short xlate_high[XLATE_SIZE] = {
36 [KEY_SYM_ENTER & 0xff] = KEY_ENTER, 35 [KEY_SYM_ENTER & 0xff] = KEY_ENTER,
37 [KEY_SYM_KPSLASH & 0xff] = KEY_KPSLASH, 36 [KEY_SYM_KPSLASH & 0xff] = KEY_KPSLASH,
38 [KEY_SYM_KPSTAR & 0xff] = KEY_KPASTERISK, 37 [KEY_SYM_KPSTAR & 0xff] = KEY_KPASTERISK,
@@ -81,7 +80,8 @@ static unsigned short xlate_high[XLATE_SIZE] = {
81 [KEY_SYM_NUM_LOCK & 0xff] = KEY_NUMLOCK, 80 [KEY_SYM_NUM_LOCK & 0xff] = KEY_NUMLOCK,
82 [KEY_SYM_SCR_LOCK & 0xff] = KEY_SCROLLLOCK, 81 [KEY_SYM_SCR_LOCK & 0xff] = KEY_SCROLLLOCK,
83}; 82};
84static unsigned short xlate[XLATE_SIZE] = { 83
84static const unsigned short xlate[XLATE_SIZE] = {
85 [NO_KEYCODE] = KEY_RESERVED, 85 [NO_KEYCODE] = KEY_RESERVED,
86 [KEY_SYM_SPACE] = KEY_SPACE, 86 [KEY_SYM_SPACE] = KEY_SPACE,
87 [KEY_SYM_TILDE] = KEY_GRAVE, [KEY_SYM_BKTIC] = KEY_GRAVE, 87 [KEY_SYM_TILDE] = KEY_GRAVE, [KEY_SYM_BKTIC] = KEY_GRAVE,
@@ -133,19 +133,16 @@ static unsigned short xlate[XLATE_SIZE] = {
133 [KEY_SYM_Z] = KEY_Z, [KEY_SYM_z] = KEY_Z, 133 [KEY_SYM_Z] = KEY_Z, [KEY_SYM_z] = KEY_Z,
134}; 134};
135 135
136static char remote_mouse_name[] = "ibmasm RSA I remote mouse";
137static char remote_keybd_name[] = "ibmasm RSA I remote keyboard";
138
139static void print_input(struct remote_input *input) 136static void print_input(struct remote_input *input)
140{ 137{
141 if (input->type == INPUT_TYPE_MOUSE) { 138 if (input->type == INPUT_TYPE_MOUSE) {
142 unsigned char buttons = input->mouse_buttons; 139 unsigned char buttons = input->mouse_buttons;
143 dbg("remote mouse movement: (x,y)=(%d,%d)%s%s%s%s\n", 140 dbg("remote mouse movement: (x,y)=(%d,%d)%s%s%s%s\n",
144 input->data.mouse.x, input->data.mouse.y, 141 input->data.mouse.x, input->data.mouse.y,
145 (buttons)?" -- buttons:":"", 142 (buttons) ? " -- buttons:" : "",
146 (buttons & REMOTE_BUTTON_LEFT)?"left ":"", 143 (buttons & REMOTE_BUTTON_LEFT) ? "left " : "",
147 (buttons & REMOTE_BUTTON_MIDDLE)?"middle ":"", 144 (buttons & REMOTE_BUTTON_MIDDLE) ? "middle " : "",
148 (buttons & REMOTE_BUTTON_RIGHT)?"right":"" 145 (buttons & REMOTE_BUTTON_RIGHT) ? "right" : ""
149 ); 146 );
150 } else { 147 } else {
151 dbg("remote keypress (code, flag, down):" 148 dbg("remote keypress (code, flag, down):"
@@ -180,7 +177,7 @@ static void send_keyboard_event(struct input_dev *dev,
180 key = xlate_high[code & 0xff]; 177 key = xlate_high[code & 0xff];
181 else 178 else
182 key = xlate[code]; 179 key = xlate[code];
183 input_report_key(dev, key, (input->data.keyboard.key_down) ? 1 : 0); 180 input_report_key(dev, key, input->data.keyboard.key_down);
184 input_sync(dev); 181 input_sync(dev);
185} 182}
186 183
@@ -228,20 +225,22 @@ int ibmasm_init_remote_input_dev(struct service_processor *sp)
228 mouse_dev->id.vendor = pdev->vendor; 225 mouse_dev->id.vendor = pdev->vendor;
229 mouse_dev->id.product = pdev->device; 226 mouse_dev->id.product = pdev->device;
230 mouse_dev->id.version = 1; 227 mouse_dev->id.version = 1;
228 mouse_dev->dev.parent = sp->dev;
231 mouse_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS); 229 mouse_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS);
232 mouse_dev->keybit[LONG(BTN_MOUSE)] = BIT(BTN_LEFT) | 230 mouse_dev->keybit[LONG(BTN_MOUSE)] = BIT(BTN_LEFT) |
233 BIT(BTN_RIGHT) | BIT(BTN_MIDDLE); 231 BIT(BTN_RIGHT) | BIT(BTN_MIDDLE);
234 set_bit(BTN_TOUCH, mouse_dev->keybit); 232 set_bit(BTN_TOUCH, mouse_dev->keybit);
235 mouse_dev->name = remote_mouse_name; 233 mouse_dev->name = "ibmasm RSA I remote mouse";
236 input_set_abs_params(mouse_dev, ABS_X, 0, xmax, 0, 0); 234 input_set_abs_params(mouse_dev, ABS_X, 0, MOUSE_X_MAX, 0, 0);
237 input_set_abs_params(mouse_dev, ABS_Y, 0, ymax, 0, 0); 235 input_set_abs_params(mouse_dev, ABS_Y, 0, MOUSE_Y_MAX, 0, 0);
238 236
239 mouse_dev->id.bustype = BUS_PCI; 237 keybd_dev->id.bustype = BUS_PCI;
240 keybd_dev->id.vendor = pdev->vendor; 238 keybd_dev->id.vendor = pdev->vendor;
241 keybd_dev->id.product = pdev->device; 239 keybd_dev->id.product = pdev->device;
242 mouse_dev->id.version = 2; 240 keybd_dev->id.version = 2;
241 keybd_dev->dev.parent = sp->dev;
243 keybd_dev->evbit[0] = BIT(EV_KEY); 242 keybd_dev->evbit[0] = BIT(EV_KEY);
244 keybd_dev->name = remote_keybd_name; 243 keybd_dev->name = "ibmasm RSA I remote keyboard";
245 244
246 for (i = 0; i < XLATE_SIZE; i++) { 245 for (i = 0; i < XLATE_SIZE; i++) {
247 if (xlate_high[i]) 246 if (xlate_high[i])
diff --git a/drivers/misc/ibmasm/remote.h b/drivers/misc/ibmasm/remote.h
index b7076a8442d2..72acf5af7a2a 100644
--- a/drivers/misc/ibmasm/remote.h
+++ b/drivers/misc/ibmasm/remote.h
@@ -18,7 +18,7 @@
18 * 18 *
19 * Copyright (C) IBM Corporation, 2004 19 * Copyright (C) IBM Corporation, 2004
20 * 20 *
21 * Author: Max Asböck <amax@us.ibm.com> 21 * Author: Max Asböck <amax@us.ibm.com>
22 * 22 *
23 * Orignally written by Pete Reynolds 23 * Orignally written by Pete Reynolds
24 */ 24 */
@@ -73,7 +73,7 @@ struct keyboard_input {
73 73
74 74
75 75
76struct remote_input { 76struct remote_input {
77 union { 77 union {
78 struct mouse_input mouse; 78 struct mouse_input mouse;
79 struct keyboard_input keyboard; 79 struct keyboard_input keyboard;
@@ -85,7 +85,7 @@ struct remote_input {
85 unsigned char pad3; 85 unsigned char pad3;
86}; 86};
87 87
88#define mouse_addr(sp) (sp->base_address + CONDOR_MOUSE_DATA) 88#define mouse_addr(sp) (sp->base_address + CONDOR_MOUSE_DATA)
89#define display_width(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_RESX) 89#define display_width(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_RESX)
90#define display_height(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_RESY) 90#define display_height(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_RESY)
91#define display_depth(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_BITS) 91#define display_depth(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_BITS)
@@ -93,7 +93,7 @@ struct remote_input {
93#define vnc_status(sp) (mouse_addr(sp) + CONDOR_OUTPUT_VNC_STATUS) 93#define vnc_status(sp) (mouse_addr(sp) + CONDOR_OUTPUT_VNC_STATUS)
94#define isr_control(sp) (mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL) 94#define isr_control(sp) (mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL)
95 95
96#define mouse_interrupt_pending(sp) readl(mouse_addr(sp) + CONDOR_MOUSE_ISR_STATUS) 96#define mouse_interrupt_pending(sp) readl(mouse_addr(sp) + CONDOR_MOUSE_ISR_STATUS)
97#define clear_mouse_interrupt(sp) writel(0, mouse_addr(sp) + CONDOR_MOUSE_ISR_STATUS) 97#define clear_mouse_interrupt(sp) writel(0, mouse_addr(sp) + CONDOR_MOUSE_ISR_STATUS)
98#define enable_mouse_interrupts(sp) writel(1, mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL) 98#define enable_mouse_interrupts(sp) writel(1, mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL)
99#define disable_mouse_interrupts(sp) writel(0, mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL) 99#define disable_mouse_interrupts(sp) writel(0, mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL)
diff --git a/drivers/misc/ibmasm/uart.c b/drivers/misc/ibmasm/uart.c
index 9783caf49696..93baa350d698 100644
--- a/drivers/misc/ibmasm/uart.c
+++ b/drivers/misc/ibmasm/uart.c
@@ -18,7 +18,7 @@
18 * 18 *
19 * Copyright (C) IBM Corporation, 2004 19 * Copyright (C) IBM Corporation, 2004
20 * 20 *
21 * Author: Max Asböck <amax@us.ibm.com> 21 * Author: Max Asböck <amax@us.ibm.com>
22 * 22 *
23 */ 23 */
24 24
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 4fb2089dc690..b53dac8d1b69 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -11,6 +11,7 @@
11 */ 11 */
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/blkdev.h> 13#include <linux/blkdev.h>
14#include <linux/freezer.h>
14#include <linux/kthread.h> 15#include <linux/kthread.h>
15 16
16#include <linux/mmc/card.h> 17#include <linux/mmc/card.h>
@@ -44,11 +45,7 @@ static int mmc_queue_thread(void *d)
44 struct mmc_queue *mq = d; 45 struct mmc_queue *mq = d;
45 struct request_queue *q = mq->queue; 46 struct request_queue *q = mq->queue;
46 47
47 /* 48 current->flags |= PF_MEMALLOC;
48 * Set iothread to ensure that we aren't put to sleep by
49 * the process freezing. We handle suspension ourselves.
50 */
51 current->flags |= PF_MEMALLOC|PF_NOFREEZE;
52 49
53 down(&mq->thread_sem); 50 down(&mq->thread_sem);
54 do { 51 do {
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 51bc7e2f1f22..ef89780eb9d6 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -16,6 +16,7 @@
16#include <linux/mtd/mtd.h> 16#include <linux/mtd/mtd.h>
17#include <linux/blkdev.h> 17#include <linux/blkdev.h>
18#include <linux/blkpg.h> 18#include <linux/blkpg.h>
19#include <linux/freezer.h>
19#include <linux/spinlock.h> 20#include <linux/spinlock.h>
20#include <linux/hdreg.h> 21#include <linux/hdreg.h>
21#include <linux/init.h> 22#include <linux/init.h>
@@ -80,7 +81,7 @@ static int mtd_blktrans_thread(void *arg)
80 struct request_queue *rq = tr->blkcore_priv->rq; 81 struct request_queue *rq = tr->blkcore_priv->rq;
81 82
82 /* we might get involved when memory gets low, so use PF_MEMALLOC */ 83 /* we might get involved when memory gets low, so use PF_MEMALLOC */
83 current->flags |= PF_MEMALLOC | PF_NOFREEZE; 84 current->flags |= PF_MEMALLOC;
84 85
85 spin_lock_irq(rq->queue_lock); 86 spin_lock_irq(rq->queue_lock);
86 while (!kthread_should_stop()) { 87 while (!kthread_should_stop()) {
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 74002945b71b..7c6b223b3f8a 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -368,7 +368,7 @@ int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
368 int err, pnum, scrub = 0, idx = vol_id2idx(ubi, vol_id); 368 int err, pnum, scrub = 0, idx = vol_id2idx(ubi, vol_id);
369 struct ubi_vid_hdr *vid_hdr; 369 struct ubi_vid_hdr *vid_hdr;
370 struct ubi_volume *vol = ubi->volumes[idx]; 370 struct ubi_volume *vol = ubi->volumes[idx];
371 uint32_t crc, crc1; 371 uint32_t uninitialized_var(crc);
372 372
373 err = leb_read_lock(ubi, vol_id, lnum); 373 err = leb_read_lock(ubi, vol_id, lnum);
374 if (err) 374 if (err)
@@ -451,7 +451,7 @@ retry:
451 } 451 }
452 452
453 if (check) { 453 if (check) {
454 crc1 = crc32(UBI_CRC32_INIT, buf, len); 454 uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
455 if (crc1 != crc) { 455 if (crc1 != crc) {
456 ubi_warn("CRC error: calculated %#08x, must be %#08x", 456 ubi_warn("CRC error: calculated %#08x, must be %#08x",
457 crc1, crc); 457 crc1, crc);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 9ecaf77eca9e..ab2174a56bc2 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1346,6 +1346,7 @@ static int ubi_thread(void *u)
1346 ubi_msg("background thread \"%s\" started, PID %d", 1346 ubi_msg("background thread \"%s\" started, PID %d",
1347 ubi->bgt_name, current->pid); 1347 ubi->bgt_name, current->pid);
1348 1348
1349 set_freezable();
1349 for (;;) { 1350 for (;;) {
1350 int err; 1351 int err;
1351 1352
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c
index 4a18b881ae9a..fd1e156f1747 100644
--- a/drivers/net/atl1/atl1_main.c
+++ b/drivers/net/atl1/atl1_main.c
@@ -75,6 +75,7 @@
75#include <linux/compiler.h> 75#include <linux/compiler.h>
76#include <linux/delay.h> 76#include <linux/delay.h>
77#include <linux/mii.h> 77#include <linux/mii.h>
78#include <linux/interrupt.h>
78#include <net/checksum.h> 79#include <net/checksum.h>
79 80
80#include <asm/atomic.h> 81#include <asm/atomic.h>
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 9afa47edfc58..3c54014acece 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -2292,10 +2292,15 @@ static int eepro100_resume(struct pci_dev *pdev)
2292 struct net_device *dev = pci_get_drvdata (pdev); 2292 struct net_device *dev = pci_get_drvdata (pdev);
2293 struct speedo_private *sp = netdev_priv(dev); 2293 struct speedo_private *sp = netdev_priv(dev);
2294 void __iomem *ioaddr = sp->regs; 2294 void __iomem *ioaddr = sp->regs;
2295 int rc;
2295 2296
2296 pci_set_power_state(pdev, PCI_D0); 2297 pci_set_power_state(pdev, PCI_D0);
2297 pci_restore_state(pdev); 2298 pci_restore_state(pdev);
2298 pci_enable_device(pdev); 2299
2300 rc = pci_enable_device(pdev);
2301 if (rc)
2302 return rc;
2303
2299 pci_set_master(pdev); 2304 pci_set_master(pdev);
2300 2305
2301 if (!netif_running(dev)) 2306 if (!netif_running(dev))
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 3450051ae56b..6bb48ba80964 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -671,7 +671,7 @@ static ssize_t natsemi_show_##_name(struct device *dev, \
671#define NATSEMI_CREATE_FILE(_dev, _name) \ 671#define NATSEMI_CREATE_FILE(_dev, _name) \
672 device_create_file(&_dev->dev, &dev_attr_##_name) 672 device_create_file(&_dev->dev, &dev_attr_##_name)
673#define NATSEMI_REMOVE_FILE(_dev, _name) \ 673#define NATSEMI_REMOVE_FILE(_dev, _name) \
674 device_create_file(&_dev->dev, &dev_attr_##_name) 674 device_remove_file(&_dev->dev, &dev_attr_##_name)
675 675
676NATSEMI_ATTR(dspcfg_workaround); 676NATSEMI_ATTR(dspcfg_workaround);
677 677
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 995c0a5d4066..cfdeaf7aa163 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -669,10 +669,15 @@ static int ne2k_pci_suspend (struct pci_dev *pdev, pm_message_t state)
669static int ne2k_pci_resume (struct pci_dev *pdev) 669static int ne2k_pci_resume (struct pci_dev *pdev)
670{ 670{
671 struct net_device *dev = pci_get_drvdata (pdev); 671 struct net_device *dev = pci_get_drvdata (pdev);
672 int rc;
672 673
673 pci_set_power_state(pdev, 0); 674 pci_set_power_state(pdev, 0);
674 pci_restore_state(pdev); 675 pci_restore_state(pdev);
675 pci_enable_device(pdev); 676
677 rc = pci_enable_device(pdev);
678 if (rc)
679 return rc;
680
676 NS8390_init(dev, 1); 681 NS8390_init(dev, 1);
677 netif_device_attach(dev); 682 netif_device_attach(dev);
678 683
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 982a9010c7a9..bb6896ae3151 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2338,7 +2338,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
2338{ 2338{
2339 struct skb_shared_info *info = skb_shinfo(skb); 2339 struct skb_shared_info *info = skb_shinfo(skb);
2340 unsigned int cur_frag, entry; 2340 unsigned int cur_frag, entry;
2341 struct TxDesc *txd; 2341 struct TxDesc * uninitialized_var(txd);
2342 2342
2343 entry = tp->cur_tx; 2343 entry = tp->cur_tx;
2344 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { 2344 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index 58d7e5d452fa..f83bb5cb0d3d 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -3692,7 +3692,6 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3692 __u16 rcode, correlator; 3692 __u16 rcode, correlator;
3693 int err = 0; 3693 int err = 0;
3694 __u8 xframe = 1; 3694 __u8 xframe = 1;
3695 __u16 tx_fstatus;
3696 3695
3697 rmf->vl = SWAP_BYTES(rmf->vl); 3696 rmf->vl = SWAP_BYTES(rmf->vl);
3698 if(rx_status & FCB_RX_STATUS_DA_MATCHED) 3697 if(rx_status & FCB_RX_STATUS_DA_MATCHED)
@@ -3783,7 +3782,9 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3783 } 3782 }
3784 break; 3783 break;
3785 3784
3786 case TX_FORWARD: 3785 case TX_FORWARD: {
3786 __u16 uninitialized_var(tx_fstatus);
3787
3787 if((rcode = smctr_rcv_tx_forward(dev, rmf)) 3788 if((rcode = smctr_rcv_tx_forward(dev, rmf))
3788 != POSITIVE_ACK) 3789 != POSITIVE_ACK)
3789 { 3790 {
@@ -3811,6 +3812,7 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3811 } 3812 }
3812 } 3813 }
3813 break; 3814 break;
3815 }
3814 3816
3815 /* Received MAC Frames Processed by CRS/REM/RPS. */ 3817 /* Received MAC Frames Processed by CRS/REM/RPS. */
3816 case RSP: 3818 case RSP:
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index ec1c556a47ca..5d8c78ee2cd9 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -2833,6 +2833,8 @@ static int clock_rate_calc(uclong rate, uclong clock, int *br_io)
2833 int br, tc; 2833 int br, tc;
2834 int br_pwr, error; 2834 int br_pwr, error;
2835 2835
2836 *br_io = 0;
2837
2836 if (rate == 0) 2838 if (rate == 0)
2837 return (0); 2839 return (0);
2838 2840
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 35eded7ffb2d..1cc18e787a65 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -595,8 +595,8 @@ recv_frame( struct net_device *dev )
595 595
596 u32 crc = CRC32_INITIAL; 596 u32 crc = CRC32_INITIAL;
597 597
598 unsigned framelen, frameno, ack; 598 unsigned framelen = 0, frameno, ack;
599 unsigned is_first, frame_ok; 599 unsigned is_first, frame_ok = 0;
600 600
601 if( check_fhdr( ioaddr, &framelen, &frameno, &ack, &is_first, &crc ) ) { 601 if( check_fhdr( ioaddr, &framelen, &frameno, &ack, &is_first, &crc ) ) {
602 frame_ok = framelen > 4 602 frame_ok = framelen > 4
@@ -604,8 +604,7 @@ recv_frame( struct net_device *dev )
604 : skip_tail( ioaddr, framelen, crc ); 604 : skip_tail( ioaddr, framelen, crc );
605 if( frame_ok ) 605 if( frame_ok )
606 interpret_ack( dev, ack ); 606 interpret_ack( dev, ack );
607 } else 607 }
608 frame_ok = 0;
609 608
610 outb( inb( ioaddr + CSR0 ) ^ CT_ZER, ioaddr + CSR0 ); 609 outb( inb( ioaddr + CSR0 ) ^ CT_ZER, ioaddr + CSR0 );
611 if( frame_ok ) { 610 if( frame_ok ) {
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 1c54908fdc4c..ee1cc14db389 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -3086,7 +3086,8 @@ static int airo_thread(void *data) {
3086 struct net_device *dev = data; 3086 struct net_device *dev = data;
3087 struct airo_info *ai = dev->priv; 3087 struct airo_info *ai = dev->priv;
3088 int locked; 3088 int locked;
3089 3089
3090 set_freezable();
3090 while(1) { 3091 while(1) {
3091 /* make swsusp happy with our thread */ 3092 /* make swsusp happy with our thread */
3092 try_to_freeze(); 3093 try_to_freeze();
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 4a59306a3f05..9f366242c392 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -613,6 +613,7 @@ static int wlan_service_main_thread(void *data)
613 613
614 init_waitqueue_entry(&wait, current); 614 init_waitqueue_entry(&wait, current);
615 615
616 set_freezable();
616 for (;;) { 617 for (;;) {
617 lbs_deb_thread( "main-thread 111: intcounter=%d " 618 lbs_deb_thread( "main-thread 111: intcounter=%d "
618 "currenttxskb=%p dnld_sent=%d\n", 619 "currenttxskb=%p dnld_sent=%d\n",
diff --git a/drivers/parisc/hppb.c b/drivers/parisc/hppb.c
index a68b3b3761a2..a728a7cd2fc8 100644
--- a/drivers/parisc/hppb.c
+++ b/drivers/parisc/hppb.c
@@ -16,6 +16,7 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/dma-mapping.h>
19#include <linux/ioport.h> 20#include <linux/ioport.h>
20 21
21#include <asm/io.h> 22#include <asm/io.h>
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 50cad3a59a6c..7c93a108f9b8 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -651,6 +651,7 @@ static int pccardd(void *__skt)
651 add_wait_queue(&skt->thread_wait, &wait); 651 add_wait_queue(&skt->thread_wait, &wait);
652 complete(&skt->thread_done); 652 complete(&skt->thread_done);
653 653
654 set_freezable();
654 for (;;) { 655 for (;;) {
655 unsigned long flags; 656 unsigned long flags;
656 unsigned int events; 657 unsigned int events;
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 3b40f9623cc9..3c45142c40b2 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -113,7 +113,7 @@ MODULE_LICENSE("Dual MPL/GPL");
113#define CONFIG_PCMCIA_SLOT_B 113#define CONFIG_PCMCIA_SLOT_B
114#endif 114#endif
115 115
116#endif /* !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B) */ 116#endif /* !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B) */
117 117
118#if defined(CONFIG_PCMCIA_SLOT_A) && defined(CONFIG_PCMCIA_SLOT_B) 118#if defined(CONFIG_PCMCIA_SLOT_A) && defined(CONFIG_PCMCIA_SLOT_B)
119 119
@@ -146,9 +146,9 @@ MODULE_LICENSE("Dual MPL/GPL");
146 146
147/* ------------------------------------------------------------------------- */ 147/* ------------------------------------------------------------------------- */
148 148
149#define PCMCIA_MEM_WIN_BASE 0xe0000000 /* base address for memory window 0 */ 149#define PCMCIA_MEM_WIN_BASE 0xe0000000 /* base address for memory window 0 */
150#define PCMCIA_MEM_WIN_SIZE 0x04000000 /* each memory window is 64 MByte */ 150#define PCMCIA_MEM_WIN_SIZE 0x04000000 /* each memory window is 64 MByte */
151#define PCMCIA_IO_WIN_BASE _IO_BASE /* base address for io window 0 */ 151#define PCMCIA_IO_WIN_BASE _IO_BASE /* base address for io window 0 */
152/* ------------------------------------------------------------------------- */ 152/* ------------------------------------------------------------------------- */
153 153
154static int pcmcia_schlvl; 154static int pcmcia_schlvl;
@@ -169,8 +169,8 @@ static u32 *m8xx_pgcrx[2];
169 */ 169 */
170 170
171struct pcmcia_win { 171struct pcmcia_win {
172 u32 br; 172 u32 br;
173 u32 or; 173 u32 or;
174}; 174};
175 175
176/* 176/*
@@ -214,7 +214,7 @@ struct pcmcia_win {
214 214
215/* we keep one lookup table per socket to check flags */ 215/* we keep one lookup table per socket to check flags */
216 216
217#define PCMCIA_EVENTS_MAX 5 /* 4 max at a time + termination */ 217#define PCMCIA_EVENTS_MAX 5 /* 4 max at a time + termination */
218 218
219struct event_table { 219struct event_table {
220 u32 regbit; 220 u32 regbit;
@@ -224,8 +224,8 @@ struct event_table {
224static const char driver_name[] = "m8xx-pcmcia"; 224static const char driver_name[] = "m8xx-pcmcia";
225 225
226struct socket_info { 226struct socket_info {
227 void (*handler)(void *info, u32 events); 227 void (*handler) (void *info, u32 events);
228 void *info; 228 void *info;
229 229
230 u32 slot; 230 u32 slot;
231 pcmconf8xx_t *pcmcia; 231 pcmconf8xx_t *pcmcia;
@@ -234,7 +234,7 @@ struct socket_info {
234 234
235 socket_state_t state; 235 socket_state_t state;
236 struct pccard_mem_map mem_win[PCMCIA_MEM_WIN_NO]; 236 struct pccard_mem_map mem_win[PCMCIA_MEM_WIN_NO];
237 struct pccard_io_map io_win[PCMCIA_IO_WIN_NO]; 237 struct pccard_io_map io_win[PCMCIA_IO_WIN_NO];
238 struct event_table events[PCMCIA_EVENTS_MAX]; 238 struct event_table events[PCMCIA_EVENTS_MAX];
239 struct pcmcia_socket socket; 239 struct pcmcia_socket socket;
240}; 240};
@@ -248,8 +248,7 @@ static struct socket_info socket[PCMCIA_SOCKETS_NO];
248 248
249#define M8XX_SIZES_NO 32 249#define M8XX_SIZES_NO 32
250 250
251static const u32 m8xx_size_to_gray[M8XX_SIZES_NO] = 251static const u32 m8xx_size_to_gray[M8XX_SIZES_NO] = {
252{
253 0x00000001, 0x00000002, 0x00000008, 0x00000004, 252 0x00000001, 0x00000002, 0x00000008, 0x00000004,
254 0x00000080, 0x00000040, 0x00000010, 0x00000020, 253 0x00000080, 0x00000040, 0x00000010, 0x00000020,
255 0x00008000, 0x00004000, 0x00001000, 0x00002000, 254 0x00008000, 0x00004000, 0x00001000, 0x00002000,
@@ -265,7 +264,7 @@ static const u32 m8xx_size_to_gray[M8XX_SIZES_NO] =
265 264
266static irqreturn_t m8xx_interrupt(int irq, void *dev); 265static irqreturn_t m8xx_interrupt(int irq, void *dev);
267 266
268#define PCMCIA_BMT_LIMIT (15*4) /* Bus Monitor Timeout value */ 267#define PCMCIA_BMT_LIMIT (15*4) /* Bus Monitor Timeout value */
269 268
270/* ------------------------------------------------------------------------- */ 269/* ------------------------------------------------------------------------- */
271/* board specific stuff: */ 270/* board specific stuff: */
@@ -289,8 +288,9 @@ static int voltage_set(int slot, int vcc, int vpp)
289{ 288{
290 u32 reg = 0; 289 u32 reg = 0;
291 290
292 switch(vcc) { 291 switch (vcc) {
293 case 0: break; 292 case 0:
293 break;
294 case 33: 294 case 33:
295 reg |= BCSR1_PCVCTL4; 295 reg |= BCSR1_PCVCTL4;
296 break; 296 break;
@@ -301,11 +301,12 @@ static int voltage_set(int slot, int vcc, int vpp)
301 return 1; 301 return 1;
302 } 302 }
303 303
304 switch(vpp) { 304 switch (vpp) {
305 case 0: break; 305 case 0:
306 break;
306 case 33: 307 case 33:
307 case 50: 308 case 50:
308 if(vcc == vpp) 309 if (vcc == vpp)
309 reg |= BCSR1_PCVCTL6; 310 reg |= BCSR1_PCVCTL6;
310 else 311 else
311 return 1; 312 return 1;
@@ -316,25 +317,29 @@ static int voltage_set(int slot, int vcc, int vpp)
316 return 1; 317 return 1;
317 } 318 }
318 319
319 if(!((vcc == 50) || (vcc == 0))) 320 if (!((vcc == 50) || (vcc == 0)))
320 return 1; 321 return 1;
321 322
322 /* first, turn off all power */ 323 /* first, turn off all power */
323 324
324 out_be32(((u32 *)RPX_CSR_ADDR), in_be32(((u32 *)RPX_CSR_ADDR)) & ~(BCSR1_PCVCTL4 | BCSR1_PCVCTL5 | BCSR1_PCVCTL6 | BCSR1_PCVCTL7)); 325 out_be32(((u32 *) RPX_CSR_ADDR),
326 in_be32(((u32 *) RPX_CSR_ADDR)) & ~(BCSR1_PCVCTL4 |
327 BCSR1_PCVCTL5 |
328 BCSR1_PCVCTL6 |
329 BCSR1_PCVCTL7));
325 330
326 /* enable new powersettings */ 331 /* enable new powersettings */
327 332
328 out_be32(((u32 *)RPX_CSR_ADDR), in_be32(((u32 *)RPX_CSR_ADDR)) | reg); 333 out_be32(((u32 *) RPX_CSR_ADDR), in_be32(((u32 *) RPX_CSR_ADDR)) | reg);
329 334
330 return 0; 335 return 0;
331} 336}
332 337
333#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V 338#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
334#define hardware_enable(_slot_) /* No hardware to enable */ 339#define hardware_enable(_slot_) /* No hardware to enable */
335#define hardware_disable(_slot_) /* No hardware to disable */ 340#define hardware_disable(_slot_) /* No hardware to disable */
336 341
337#endif /* CONFIG_RPXCLASSIC */ 342#endif /* CONFIG_RPXCLASSIC */
338 343
339/* FADS Boards from Motorola */ 344/* FADS Boards from Motorola */
340 345
@@ -346,43 +351,45 @@ static int voltage_set(int slot, int vcc, int vpp)
346{ 351{
347 u32 reg = 0; 352 u32 reg = 0;
348 353
349 switch(vcc) { 354 switch (vcc) {
350 case 0: 355 case 0:
351 break; 356 break;
352 case 33: 357 case 33:
353 reg |= BCSR1_PCCVCC0; 358 reg |= BCSR1_PCCVCC0;
354 break; 359 break;
355 case 50: 360 case 50:
356 reg |= BCSR1_PCCVCC1; 361 reg |= BCSR1_PCCVCC1;
357 break; 362 break;
358 default: 363 default:
359 return 1; 364 return 1;
360 } 365 }
361 366
362 switch(vpp) { 367 switch (vpp) {
363 case 0: 368 case 0:
364 break; 369 break;
365 case 33: 370 case 33:
366 case 50: 371 case 50:
367 if(vcc == vpp) 372 if (vcc == vpp)
368 reg |= BCSR1_PCCVPP1; 373 reg |= BCSR1_PCCVPP1;
369 else 374 else
370 return 1;
371 break;
372 case 120:
373 if ((vcc == 33) || (vcc == 50))
374 reg |= BCSR1_PCCVPP0;
375 else
376 return 1;
377 default:
378 return 1; 375 return 1;
376 break;
377 case 120:
378 if ((vcc == 33) || (vcc == 50))
379 reg |= BCSR1_PCCVPP0;
380 else
381 return 1;
382 default:
383 return 1;
379 } 384 }
380 385
381 /* first, turn off all power */ 386 /* first, turn off all power */
382 out_be32((u32 *)BCSR1, in_be32((u32 *)BCSR1) & ~(BCSR1_PCCVCC_MASK | BCSR1_PCCVPP_MASK)); 387 out_be32((u32 *) BCSR1,
388 in_be32((u32 *) BCSR1) & ~(BCSR1_PCCVCC_MASK |
389 BCSR1_PCCVPP_MASK));
383 390
384 /* enable new powersettings */ 391 /* enable new powersettings */
385 out_be32((u32 *)BCSR1, in_be32((u32 *)BCSR1) | reg); 392 out_be32((u32 *) BCSR1, in_be32((u32 *) BCSR1) | reg);
386 393
387 return 0; 394 return 0;
388} 395}
@@ -391,12 +398,12 @@ static int voltage_set(int slot, int vcc, int vpp)
391 398
392static void hardware_enable(int slot) 399static void hardware_enable(int slot)
393{ 400{
394 out_be32((u32 *)BCSR1, in_be32((u32 *)BCSR1) & ~BCSR1_PCCEN); 401 out_be32((u32 *) BCSR1, in_be32((u32 *) BCSR1) & ~BCSR1_PCCEN);
395} 402}
396 403
397static void hardware_disable(int slot) 404static void hardware_disable(int slot)
398{ 405{
399 out_be32((u32 *)BCSR1, in_be32((u32 *)BCSR1) | BCSR1_PCCEN); 406 out_be32((u32 *) BCSR1, in_be32((u32 *) BCSR1) | BCSR1_PCCEN);
400} 407}
401 408
402#endif 409#endif
@@ -410,7 +417,7 @@ static void hardware_disable(int slot)
410 417
411static inline void hardware_enable(int slot) 418static inline void hardware_enable(int slot)
412{ 419{
413 m8xx_pcmcia_ops.hw_ctrl(slot, 1); 420 m8xx_pcmcia_ops.hw_ctrl(slot, 1);
414} 421}
415 422
416static inline void hardware_disable(int slot) 423static inline void hardware_disable(int slot)
@@ -436,52 +443,53 @@ static int voltage_set(int slot, int vcc, int vpp)
436{ 443{
437 u8 reg = 0; 444 u8 reg = 0;
438 445
439 switch(vcc) { 446 switch (vcc) {
440 case 0: 447 case 0:
441 break; 448 break;
442 case 33: 449 case 33:
443 reg |= CSR2_VCC_33; 450 reg |= CSR2_VCC_33;
444 break; 451 break;
445 case 50: 452 case 50:
446 reg |= CSR2_VCC_50; 453 reg |= CSR2_VCC_50;
447 break; 454 break;
448 default: 455 default:
449 return 1; 456 return 1;
450 } 457 }
451 458
452 switch(vpp) { 459 switch (vpp) {
453 case 0: 460 case 0:
454 break; 461 break;
455 case 33: 462 case 33:
456 case 50: 463 case 50:
457 if(vcc == vpp) 464 if (vcc == vpp)
458 reg |= CSR2_VPP_VCC; 465 reg |= CSR2_VPP_VCC;
459 else 466 else
460 return 1; 467 return 1;
461 break; 468 break;
462 case 120: 469 case 120:
463 if ((vcc == 33) || (vcc == 50)) 470 if ((vcc == 33) || (vcc == 50))
464 reg |= CSR2_VPP_12; 471 reg |= CSR2_VPP_12;
465 else 472 else
466 return 1;
467 default:
468 return 1; 473 return 1;
474 default:
475 return 1;
469 } 476 }
470 477
471 /* first, turn off all power */ 478 /* first, turn off all power */
472 out_8((u8 *)MBX_CSR2_ADDR, in_8((u8 *)MBX_CSR2_ADDR) & ~(CSR2_VCC_MASK | CSR2_VPP_MASK)); 479 out_8((u8 *) MBX_CSR2_ADDR,
480 in_8((u8 *) MBX_CSR2_ADDR) & ~(CSR2_VCC_MASK | CSR2_VPP_MASK));
473 481
474 /* enable new powersettings */ 482 /* enable new powersettings */
475 out_8((u8 *)MBX_CSR2_ADDR, in_8((u8 *)MBX_CSR2_ADDR) | reg); 483 out_8((u8 *) MBX_CSR2_ADDR, in_8((u8 *) MBX_CSR2_ADDR) | reg);
476 484
477 return 0; 485 return 0;
478} 486}
479 487
480#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V 488#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
481#define hardware_enable(_slot_) /* No hardware to enable */ 489#define hardware_enable(_slot_) /* No hardware to enable */
482#define hardware_disable(_slot_) /* No hardware to disable */ 490#define hardware_disable(_slot_) /* No hardware to disable */
483 491
484#endif /* CONFIG_MBX */ 492#endif /* CONFIG_MBX */
485 493
486#if defined(CONFIG_PRxK) 494#if defined(CONFIG_PRxK)
487#include <asm/cpld.h> 495#include <asm/cpld.h>
@@ -495,43 +503,46 @@ static int voltage_set(int slot, int vcc, int vpp)
495 u8 regread; 503 u8 regread;
496 cpld_regs *ccpld = get_cpld(); 504 cpld_regs *ccpld = get_cpld();
497 505
498 switch(vcc) { 506 switch (vcc) {
499 case 0: 507 case 0:
500 break; 508 break;
501 case 33: 509 case 33:
502 reg |= PCMCIA_VCC_33; 510 reg |= PCMCIA_VCC_33;
503 break; 511 break;
504 case 50: 512 case 50:
505 reg |= PCMCIA_VCC_50; 513 reg |= PCMCIA_VCC_50;
506 break; 514 break;
507 default: 515 default:
508 return 1; 516 return 1;
509 } 517 }
510 518
511 switch(vpp) { 519 switch (vpp) {
512 case 0: 520 case 0:
513 break; 521 break;
514 case 33: 522 case 33:
515 case 50: 523 case 50:
516 if(vcc == vpp) 524 if (vcc == vpp)
517 reg |= PCMCIA_VPP_VCC; 525 reg |= PCMCIA_VPP_VCC;
518 else 526 else
519 return 1;
520 break;
521 case 120:
522 if ((vcc == 33) || (vcc == 50))
523 reg |= PCMCIA_VPP_12;
524 else
525 return 1;
526 default:
527 return 1; 527 return 1;
528 break;
529 case 120:
530 if ((vcc == 33) || (vcc == 50))
531 reg |= PCMCIA_VPP_12;
532 else
533 return 1;
534 default:
535 return 1;
528 } 536 }
529 537
530 reg = reg >> (slot << 2); 538 reg = reg >> (slot << 2);
531 regread = in_8(&ccpld->fpga_pc_ctl); 539 regread = in_8(&ccpld->fpga_pc_ctl);
532 if (reg != (regread & ((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >> (slot << 2)))) { 540 if (reg !=
541 (regread & ((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >> (slot << 2)))) {
533 /* enable new powersettings */ 542 /* enable new powersettings */
534 regread = regread & ~((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >> (slot << 2)); 543 regread =
544 regread & ~((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >>
545 (slot << 2));
535 out_8(&ccpld->fpga_pc_ctl, reg | regread); 546 out_8(&ccpld->fpga_pc_ctl, reg | regread);
536 msleep(100); 547 msleep(100);
537 } 548 }
@@ -540,10 +551,10 @@ static int voltage_set(int slot, int vcc, int vpp)
540} 551}
541 552
542#define socket_get(_slot_) PCMCIA_SOCKET_KEY_LV 553#define socket_get(_slot_) PCMCIA_SOCKET_KEY_LV
543#define hardware_enable(_slot_) /* No hardware to enable */ 554#define hardware_enable(_slot_) /* No hardware to enable */
544#define hardware_disable(_slot_) /* No hardware to disable */ 555#define hardware_disable(_slot_) /* No hardware to disable */
545 556
546#endif /* CONFIG_PRxK */ 557#endif /* CONFIG_PRxK */
547 558
548static u32 pending_events[PCMCIA_SOCKETS_NO]; 559static u32 pending_events[PCMCIA_SOCKETS_NO];
549static DEFINE_SPINLOCK(pending_event_lock); 560static DEFINE_SPINLOCK(pending_event_lock);
@@ -553,7 +564,7 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev)
553 struct socket_info *s; 564 struct socket_info *s;
554 struct event_table *e; 565 struct event_table *e;
555 unsigned int i, events, pscr, pipr, per; 566 unsigned int i, events, pscr, pipr, per;
556 pcmconf8xx_t *pcmcia = socket[0].pcmcia; 567 pcmconf8xx_t *pcmcia = socket[0].pcmcia;
557 568
558 dprintk("Interrupt!\n"); 569 dprintk("Interrupt!\n");
559 /* get interrupt sources */ 570 /* get interrupt sources */
@@ -562,16 +573,16 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev)
562 pipr = in_be32(&pcmcia->pcmc_pipr); 573 pipr = in_be32(&pcmcia->pcmc_pipr);
563 per = in_be32(&pcmcia->pcmc_per); 574 per = in_be32(&pcmcia->pcmc_per);
564 575
565 for(i = 0; i < PCMCIA_SOCKETS_NO; i++) { 576 for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
566 s = &socket[i]; 577 s = &socket[i];
567 e = &s->events[0]; 578 e = &s->events[0];
568 events = 0; 579 events = 0;
569 580
570 while(e->regbit) { 581 while (e->regbit) {
571 if(pscr & e->regbit) 582 if (pscr & e->regbit)
572 events |= e->eventbit; 583 events |= e->eventbit;
573 584
574 e++; 585 e++;
575 } 586 }
576 587
577 /* 588 /*
@@ -579,13 +590,11 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev)
579 * not too nice done, 590 * not too nice done,
580 * we depend on that CD2 is the bit to the left of CD1... 591 * we depend on that CD2 is the bit to the left of CD1...
581 */ 592 */
582 if(events & SS_DETECT) 593 if (events & SS_DETECT)
583 if(((pipr & M8XX_PCMCIA_CD2(i)) >> 1) ^ 594 if (((pipr & M8XX_PCMCIA_CD2(i)) >> 1) ^
584 (pipr & M8XX_PCMCIA_CD1(i))) 595 (pipr & M8XX_PCMCIA_CD1(i))) {
585 {
586 events &= ~SS_DETECT; 596 events &= ~SS_DETECT;
587 } 597 }
588
589#ifdef PCMCIA_GLITCHY_CD 598#ifdef PCMCIA_GLITCHY_CD
590 /* 599 /*
591 * I've experienced CD problems with my ADS board. 600 * I've experienced CD problems with my ADS board.
@@ -593,24 +602,23 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev)
593 * real change of Card detection. 602 * real change of Card detection.
594 */ 603 */
595 604
596 if((events & SS_DETECT) && 605 if ((events & SS_DETECT) &&
597 ((pipr & 606 ((pipr &
598 (M8XX_PCMCIA_CD2(i) | M8XX_PCMCIA_CD1(i))) == 0) && 607 (M8XX_PCMCIA_CD2(i) | M8XX_PCMCIA_CD1(i))) == 0) &&
599 (s->state.Vcc | s->state.Vpp)) { 608 (s->state.Vcc | s->state.Vpp)) {
600 events &= ~SS_DETECT; 609 events &= ~SS_DETECT;
601 /*printk( "CD glitch workaround - CD = 0x%08x!\n", 610 /*printk( "CD glitch workaround - CD = 0x%08x!\n",
602 (pipr & (M8XX_PCMCIA_CD2(i) 611 (pipr & (M8XX_PCMCIA_CD2(i)
603 | M8XX_PCMCIA_CD1(i))));*/ 612 | M8XX_PCMCIA_CD1(i)))); */
604 } 613 }
605#endif 614#endif
606 615
607 /* call the handler */ 616 /* call the handler */
608 617
609 dprintk("slot %u: events = 0x%02x, pscr = 0x%08x, " 618 dprintk("slot %u: events = 0x%02x, pscr = 0x%08x, "
610 "pipr = 0x%08x\n", 619 "pipr = 0x%08x\n", i, events, pscr, pipr);
611 i, events, pscr, pipr);
612 620
613 if(events) { 621 if (events) {
614 spin_lock(&pending_event_lock); 622 spin_lock(&pending_event_lock);
615 pending_events[i] |= events; 623 pending_events[i] |= events;
616 spin_unlock(&pending_event_lock); 624 spin_unlock(&pending_event_lock);
@@ -643,11 +651,11 @@ static u32 m8xx_get_graycode(u32 size)
643{ 651{
644 u32 k; 652 u32 k;
645 653
646 for(k = 0; k < M8XX_SIZES_NO; k++) 654 for (k = 0; k < M8XX_SIZES_NO; k++)
647 if(m8xx_size_to_gray[k] == size) 655 if (m8xx_size_to_gray[k] == size)
648 break; 656 break;
649 657
650 if((k == M8XX_SIZES_NO) || (m8xx_size_to_gray[k] == -1)) 658 if ((k == M8XX_SIZES_NO) || (m8xx_size_to_gray[k] == -1))
651 k = -1; 659 k = -1;
652 660
653 return k; 661 return k;
@@ -657,7 +665,7 @@ static u32 m8xx_get_speed(u32 ns, u32 is_io, u32 bus_freq)
657{ 665{
658 u32 reg, clocks, psst, psl, psht; 666 u32 reg, clocks, psst, psl, psht;
659 667
660 if(!ns) { 668 if (!ns) {
661 669
662 /* 670 /*
663 * We get called with IO maps setup to 0ns 671 * We get called with IO maps setup to 0ns
@@ -665,10 +673,10 @@ static u32 m8xx_get_speed(u32 ns, u32 is_io, u32 bus_freq)
665 * They should be 255ns. 673 * They should be 255ns.
666 */ 674 */
667 675
668 if(is_io) 676 if (is_io)
669 ns = 255; 677 ns = 255;
670 else 678 else
671 ns = 100; /* fast memory if 0 */ 679 ns = 100; /* fast memory if 0 */
672 } 680 }
673 681
674 /* 682 /*
@@ -679,23 +687,23 @@ static u32 m8xx_get_speed(u32 ns, u32 is_io, u32 bus_freq)
679 687
680/* how we want to adjust the timing - in percent */ 688/* how we want to adjust the timing - in percent */
681 689
682#define ADJ 180 /* 80 % longer accesstime - to be sure */ 690#define ADJ 180 /* 80 % longer accesstime - to be sure */
683 691
684 clocks = ((bus_freq / 1000) * ns) / 1000; 692 clocks = ((bus_freq / 1000) * ns) / 1000;
685 clocks = (clocks * ADJ) / (100*1000); 693 clocks = (clocks * ADJ) / (100 * 1000);
686 if(clocks >= PCMCIA_BMT_LIMIT) { 694 if (clocks >= PCMCIA_BMT_LIMIT) {
687 printk( "Max access time limit reached\n"); 695 printk("Max access time limit reached\n");
688 clocks = PCMCIA_BMT_LIMIT-1; 696 clocks = PCMCIA_BMT_LIMIT - 1;
689 } 697 }
690 698
691 psst = clocks / 7; /* setup time */ 699 psst = clocks / 7; /* setup time */
692 psht = clocks / 7; /* hold time */ 700 psht = clocks / 7; /* hold time */
693 psl = (clocks * 5) / 7; /* strobe length */ 701 psl = (clocks * 5) / 7; /* strobe length */
694 702
695 psst += clocks - (psst + psht + psl); 703 psst += clocks - (psst + psht + psl);
696 704
697 reg = psst << 12; 705 reg = psst << 12;
698 reg |= psl << 7; 706 reg |= psl << 7;
699 reg |= psht << 16; 707 reg |= psht << 16;
700 708
701 return reg; 709 return reg;
@@ -710,8 +718,8 @@ static int m8xx_get_status(struct pcmcia_socket *sock, unsigned int *value)
710 718
711 pipr = in_be32(&pcmcia->pcmc_pipr); 719 pipr = in_be32(&pcmcia->pcmc_pipr);
712 720
713 *value = ((pipr & (M8XX_PCMCIA_CD1(lsock) 721 *value = ((pipr & (M8XX_PCMCIA_CD1(lsock)
714 | M8XX_PCMCIA_CD2(lsock))) == 0) ? SS_DETECT : 0; 722 | M8XX_PCMCIA_CD2(lsock))) == 0) ? SS_DETECT : 0;
715 *value |= (pipr & M8XX_PCMCIA_WP(lsock)) ? SS_WRPROT : 0; 723 *value |= (pipr & M8XX_PCMCIA_WP(lsock)) ? SS_WRPROT : 0;
716 724
717 if (s->state.flags & SS_IOCARD) 725 if (s->state.flags & SS_IOCARD)
@@ -795,16 +803,16 @@ static int m8xx_get_status(struct pcmcia_socket *sock, unsigned int *value)
795 /* read out VS1 and VS2 */ 803 /* read out VS1 and VS2 */
796 804
797 reg = (pipr & M8XX_PCMCIA_VS_MASK(lsock)) 805 reg = (pipr & M8XX_PCMCIA_VS_MASK(lsock))
798 >> M8XX_PCMCIA_VS_SHIFT(lsock); 806 >> M8XX_PCMCIA_VS_SHIFT(lsock);
799 807
800 if(socket_get(lsock) == PCMCIA_SOCKET_KEY_LV) { 808 if (socket_get(lsock) == PCMCIA_SOCKET_KEY_LV) {
801 switch(reg) { 809 switch (reg) {
802 case 1: 810 case 1:
803 *value |= SS_3VCARD; 811 *value |= SS_3VCARD;
804 break; /* GND, NC - 3.3V only */ 812 break; /* GND, NC - 3.3V only */
805 case 2: 813 case 2:
806 *value |= SS_XVCARD; 814 *value |= SS_XVCARD;
807 break; /* NC. GND - x.xV only */ 815 break; /* NC. GND - x.xV only */
808 }; 816 };
809 } 817 }
810 818
@@ -812,7 +820,7 @@ static int m8xx_get_status(struct pcmcia_socket *sock, unsigned int *value)
812 return 0; 820 return 0;
813} 821}
814 822
815static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state) 823static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t * state)
816{ 824{
817 int lsock = container_of(sock, struct socket_info, socket)->slot; 825 int lsock = container_of(sock, struct socket_info, socket)->slot;
818 struct socket_info *s = &socket[lsock]; 826 struct socket_info *s = &socket[lsock];
@@ -821,20 +829,20 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
821 unsigned long flags; 829 unsigned long flags;
822 pcmconf8xx_t *pcmcia = socket[0].pcmcia; 830 pcmconf8xx_t *pcmcia = socket[0].pcmcia;
823 831
824 dprintk( "SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " 832 dprintk("SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, "
825 "io_irq %d, csc_mask %#2.2x)\n", lsock, state->flags, 833 "io_irq %d, csc_mask %#2.2x)\n", lsock, state->flags,
826 state->Vcc, state->Vpp, state->io_irq, state->csc_mask); 834 state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
827 835
828 /* First, set voltage - bail out if invalid */ 836 /* First, set voltage - bail out if invalid */
829 if(voltage_set(lsock, state->Vcc, state->Vpp)) 837 if (voltage_set(lsock, state->Vcc, state->Vpp))
830 return -EINVAL; 838 return -EINVAL;
831 839
832
833 /* Take care of reset... */ 840 /* Take care of reset... */
834 if(state->flags & SS_RESET) 841 if (state->flags & SS_RESET)
835 out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXRESET); /* active high */ 842 out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXRESET); /* active high */
836 else 843 else
837 out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXRESET); 844 out_be32(M8XX_PGCRX(lsock),
845 in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXRESET);
838 846
839 /* ... and output enable. */ 847 /* ... and output enable. */
840 848
@@ -846,10 +854,11 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
846 no pullups are present -> the cards act wierd. 854 no pullups are present -> the cards act wierd.
847 So right now the buffers are enabled if the power is on. */ 855 So right now the buffers are enabled if the power is on. */
848 856
849 if(state->Vcc || state->Vpp) 857 if (state->Vcc || state->Vpp)
850 out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXOE); /* active low */ 858 out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXOE); /* active low */
851 else 859 else
852 out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXOE); 860 out_be32(M8XX_PGCRX(lsock),
861 in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXOE);
853 862
854 /* 863 /*
855 * We'd better turn off interrupts before 864 * We'd better turn off interrupts before
@@ -866,17 +875,17 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
866 e = &s->events[0]; 875 e = &s->events[0];
867 reg = 0; 876 reg = 0;
868 877
869 if(state->csc_mask & SS_DETECT) { 878 if (state->csc_mask & SS_DETECT) {
870 e->eventbit = SS_DETECT; 879 e->eventbit = SS_DETECT;
871 reg |= e->regbit = (M8XX_PCMCIA_CD2(lsock) 880 reg |= e->regbit = (M8XX_PCMCIA_CD2(lsock)
872 | M8XX_PCMCIA_CD1(lsock)); 881 | M8XX_PCMCIA_CD1(lsock));
873 e++; 882 e++;
874 } 883 }
875 if(state->flags & SS_IOCARD) { 884 if (state->flags & SS_IOCARD) {
876 /* 885 /*
877 * I/O card 886 * I/O card
878 */ 887 */
879 if(state->csc_mask & SS_STSCHG) { 888 if (state->csc_mask & SS_STSCHG) {
880 e->eventbit = SS_STSCHG; 889 e->eventbit = SS_STSCHG;
881 reg |= e->regbit = M8XX_PCMCIA_BVD1(lsock); 890 reg |= e->regbit = M8XX_PCMCIA_BVD1(lsock);
882 e++; 891 e++;
@@ -884,9 +893,10 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
884 /* 893 /*
885 * If io_irq is non-zero we should enable irq. 894 * If io_irq is non-zero we should enable irq.
886 */ 895 */
887 if(state->io_irq) { 896 if (state->io_irq) {
888 out_be32(M8XX_PGCRX(lsock), 897 out_be32(M8XX_PGCRX(lsock),
889 in_be32(M8XX_PGCRX(lsock)) | mk_int_int_mask(s->hwirq) << 24); 898 in_be32(M8XX_PGCRX(lsock)) |
899 mk_int_int_mask(s->hwirq) << 24);
890 /* 900 /*
891 * Strange thing here: 901 * Strange thing here:
892 * The manual does not tell us which interrupt 902 * The manual does not tell us which interrupt
@@ -897,33 +907,32 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
897 * have to be cleared in PSCR in the interrupt handler. 907 * have to be cleared in PSCR in the interrupt handler.
898 */ 908 */
899 reg |= M8XX_PCMCIA_RDY_L(lsock); 909 reg |= M8XX_PCMCIA_RDY_L(lsock);
900 } 910 } else
901 else 911 out_be32(M8XX_PGCRX(lsock),
902 out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & 0x00ffffff); 912 in_be32(M8XX_PGCRX(lsock)) & 0x00ffffff);
903 } 913 } else {
904 else {
905 /* 914 /*
906 * Memory card 915 * Memory card
907 */ 916 */
908 if(state->csc_mask & SS_BATDEAD) { 917 if (state->csc_mask & SS_BATDEAD) {
909 e->eventbit = SS_BATDEAD; 918 e->eventbit = SS_BATDEAD;
910 reg |= e->regbit = M8XX_PCMCIA_BVD1(lsock); 919 reg |= e->regbit = M8XX_PCMCIA_BVD1(lsock);
911 e++; 920 e++;
912 } 921 }
913 if(state->csc_mask & SS_BATWARN) { 922 if (state->csc_mask & SS_BATWARN) {
914 e->eventbit = SS_BATWARN; 923 e->eventbit = SS_BATWARN;
915 reg |= e->regbit = M8XX_PCMCIA_BVD2(lsock); 924 reg |= e->regbit = M8XX_PCMCIA_BVD2(lsock);
916 e++; 925 e++;
917 } 926 }
918 /* What should I trigger on - low/high,raise,fall? */ 927 /* What should I trigger on - low/high,raise,fall? */
919 if(state->csc_mask & SS_READY) { 928 if (state->csc_mask & SS_READY) {
920 e->eventbit = SS_READY; 929 e->eventbit = SS_READY;
921 reg |= e->regbit = 0; //?? 930 reg |= e->regbit = 0; //??
922 e++; 931 e++;
923 } 932 }
924 } 933 }
925 934
926 e->regbit = 0; /* terminate list */ 935 e->regbit = 0; /* terminate list */
927 936
928 /* 937 /*
929 * Clear the status changed . 938 * Clear the status changed .
@@ -940,7 +949,9 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
940 * Ones will enable the interrupt. 949 * Ones will enable the interrupt.
941 */ 950 */
942 951
943 reg |= in_be32(&pcmcia->pcmc_per) & (M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1)); 952 reg |=
953 in_be32(&pcmcia->
954 pcmc_per) & (M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1));
944 out_be32(&pcmcia->pcmc_per, reg); 955 out_be32(&pcmcia->pcmc_per, reg);
945 956
946 spin_unlock_irqrestore(&events_lock, flags); 957 spin_unlock_irqrestore(&events_lock, flags);
@@ -961,67 +972,66 @@ static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
961 unsigned int reg, winnr; 972 unsigned int reg, winnr;
962 pcmconf8xx_t *pcmcia = s->pcmcia; 973 pcmconf8xx_t *pcmcia = s->pcmcia;
963 974
964
965#define M8XX_SIZE (io->stop - io->start + 1) 975#define M8XX_SIZE (io->stop - io->start + 1)
966#define M8XX_BASE (PCMCIA_IO_WIN_BASE + io->start) 976#define M8XX_BASE (PCMCIA_IO_WIN_BASE + io->start)
967 977
968 dprintk( "SetIOMap(%d, %d, %#2.2x, %d ns, " 978 dprintk("SetIOMap(%d, %d, %#2.2x, %d ns, "
969 "%#4.4x-%#4.4x)\n", lsock, io->map, io->flags, 979 "%#4.4x-%#4.4x)\n", lsock, io->map, io->flags,
970 io->speed, io->start, io->stop); 980 io->speed, io->start, io->stop);
971 981
972 if ((io->map >= PCMCIA_IO_WIN_NO) || (io->start > 0xffff) 982 if ((io->map >= PCMCIA_IO_WIN_NO) || (io->start > 0xffff)
973 || (io->stop > 0xffff) || (io->stop < io->start)) 983 || (io->stop > 0xffff) || (io->stop < io->start))
974 return -EINVAL; 984 return -EINVAL;
975 985
976 if((reg = m8xx_get_graycode(M8XX_SIZE)) == -1) 986 if ((reg = m8xx_get_graycode(M8XX_SIZE)) == -1)
977 return -EINVAL; 987 return -EINVAL;
978 988
979 if(io->flags & MAP_ACTIVE) { 989 if (io->flags & MAP_ACTIVE) {
980 990
981 dprintk( "io->flags & MAP_ACTIVE\n"); 991 dprintk("io->flags & MAP_ACTIVE\n");
982 992
983 winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO) 993 winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO)
984 + (lsock * PCMCIA_IO_WIN_NO) + io->map; 994 + (lsock * PCMCIA_IO_WIN_NO) + io->map;
985 995
986 /* setup registers */ 996 /* setup registers */
987 997
988 w = (void *) &pcmcia->pcmc_pbr0; 998 w = (void *)&pcmcia->pcmc_pbr0;
989 w += winnr; 999 w += winnr;
990 1000
991 out_be32(&w->or, 0); /* turn off window first */ 1001 out_be32(&w->or, 0); /* turn off window first */
992 out_be32(&w->br, M8XX_BASE); 1002 out_be32(&w->br, M8XX_BASE);
993 1003
994 reg <<= 27; 1004 reg <<= 27;
995 reg |= M8XX_PCMCIA_POR_IO |(lsock << 2); 1005 reg |= M8XX_PCMCIA_POR_IO | (lsock << 2);
996 1006
997 reg |= m8xx_get_speed(io->speed, 1, s->bus_freq); 1007 reg |= m8xx_get_speed(io->speed, 1, s->bus_freq);
998 1008
999 if(io->flags & MAP_WRPROT) 1009 if (io->flags & MAP_WRPROT)
1000 reg |= M8XX_PCMCIA_POR_WRPROT; 1010 reg |= M8XX_PCMCIA_POR_WRPROT;
1001 1011
1002 /*if(io->flags & (MAP_16BIT | MAP_AUTOSZ))*/ 1012 /*if(io->flags & (MAP_16BIT | MAP_AUTOSZ)) */
1003 if(io->flags & MAP_16BIT) 1013 if (io->flags & MAP_16BIT)
1004 reg |= M8XX_PCMCIA_POR_16BIT; 1014 reg |= M8XX_PCMCIA_POR_16BIT;
1005 1015
1006 if(io->flags & MAP_ACTIVE) 1016 if (io->flags & MAP_ACTIVE)
1007 reg |= M8XX_PCMCIA_POR_VALID; 1017 reg |= M8XX_PCMCIA_POR_VALID;
1008 1018
1009 out_be32(&w->or, reg); 1019 out_be32(&w->or, reg);
1010 1020
1011 dprintk("Socket %u: Mapped io window %u at %#8.8x, " 1021 dprintk("Socket %u: Mapped io window %u at %#8.8x, "
1012 "OR = %#8.8x.\n", lsock, io->map, w->br, w->or); 1022 "OR = %#8.8x.\n", lsock, io->map, w->br, w->or);
1013 } else { 1023 } else {
1014 /* shutdown IO window */ 1024 /* shutdown IO window */
1015 winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO) 1025 winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO)
1016 + (lsock * PCMCIA_IO_WIN_NO) + io->map; 1026 + (lsock * PCMCIA_IO_WIN_NO) + io->map;
1017 1027
1018 /* setup registers */ 1028 /* setup registers */
1019 1029
1020 w = (void *) &pcmcia->pcmc_pbr0; 1030 w = (void *)&pcmcia->pcmc_pbr0;
1021 w += winnr; 1031 w += winnr;
1022 1032
1023 out_be32(&w->or, 0); /* turn off window */ 1033 out_be32(&w->or, 0); /* turn off window */
1024 out_be32(&w->br, 0); /* turn off base address */ 1034 out_be32(&w->br, 0); /* turn off base address */
1025 1035
1026 dprintk("Socket %u: Unmapped io window %u at %#8.8x, " 1036 dprintk("Socket %u: Unmapped io window %u at %#8.8x, "
1027 "OR = %#8.8x.\n", lsock, io->map, w->br, w->or); 1037 "OR = %#8.8x.\n", lsock, io->map, w->br, w->or);
@@ -1029,15 +1039,14 @@ static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
1029 1039
1030 /* copy the struct and modify the copy */ 1040 /* copy the struct and modify the copy */
1031 s->io_win[io->map] = *io; 1041 s->io_win[io->map] = *io;
1032 s->io_win[io->map].flags &= (MAP_WRPROT 1042 s->io_win[io->map].flags &= (MAP_WRPROT | MAP_16BIT | MAP_ACTIVE);
1033 | MAP_16BIT
1034 | MAP_ACTIVE);
1035 dprintk("SetIOMap exit\n"); 1043 dprintk("SetIOMap exit\n");
1036 1044
1037 return 0; 1045 return 0;
1038} 1046}
1039 1047
1040static int m8xx_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *mem) 1048static int m8xx_set_mem_map(struct pcmcia_socket *sock,
1049 struct pccard_mem_map *mem)
1041{ 1050{
1042 int lsock = container_of(sock, struct socket_info, socket)->slot; 1051 int lsock = container_of(sock, struct socket_info, socket)->slot;
1043 struct socket_info *s = &socket[lsock]; 1052 struct socket_info *s = &socket[lsock];
@@ -1046,19 +1055,19 @@ static int m8xx_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *m
1046 unsigned int reg, winnr; 1055 unsigned int reg, winnr;
1047 pcmconf8xx_t *pcmcia = s->pcmcia; 1056 pcmconf8xx_t *pcmcia = s->pcmcia;
1048 1057
1049 dprintk( "SetMemMap(%d, %d, %#2.2x, %d ns, " 1058 dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, "
1050 "%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags, 1059 "%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags,
1051 mem->speed, mem->static_start, mem->card_start); 1060 mem->speed, mem->static_start, mem->card_start);
1052 1061
1053 if ((mem->map >= PCMCIA_MEM_WIN_NO) 1062 if ((mem->map >= PCMCIA_MEM_WIN_NO)
1054// || ((mem->s) >= PCMCIA_MEM_WIN_SIZE) 1063// || ((mem->s) >= PCMCIA_MEM_WIN_SIZE)
1055 || (mem->card_start >= 0x04000000) 1064 || (mem->card_start >= 0x04000000)
1056 || (mem->static_start & 0xfff) /* 4KByte resolution */ 1065 || (mem->static_start & 0xfff) /* 4KByte resolution */
1057 || (mem->card_start & 0xfff)) 1066 ||(mem->card_start & 0xfff))
1058 return -EINVAL; 1067 return -EINVAL;
1059 1068
1060 if((reg = m8xx_get_graycode(PCMCIA_MEM_WIN_SIZE)) == -1) { 1069 if ((reg = m8xx_get_graycode(PCMCIA_MEM_WIN_SIZE)) == -1) {
1061 printk( "Cannot set size to 0x%08x.\n", PCMCIA_MEM_WIN_SIZE); 1070 printk("Cannot set size to 0x%08x.\n", PCMCIA_MEM_WIN_SIZE);
1062 return -EINVAL; 1071 return -EINVAL;
1063 } 1072 }
1064 reg <<= 27; 1073 reg <<= 27;
@@ -1067,50 +1076,47 @@ static int m8xx_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *m
1067 1076
1068 /* Setup the window in the pcmcia controller */ 1077 /* Setup the window in the pcmcia controller */
1069 1078
1070 w = (void *) &pcmcia->pcmc_pbr0; 1079 w = (void *)&pcmcia->pcmc_pbr0;
1071 w += winnr; 1080 w += winnr;
1072 1081
1073 reg |= lsock << 2; 1082 reg |= lsock << 2;
1074 1083
1075 reg |= m8xx_get_speed(mem->speed, 0, s->bus_freq); 1084 reg |= m8xx_get_speed(mem->speed, 0, s->bus_freq);
1076 1085
1077 if(mem->flags & MAP_ATTRIB) 1086 if (mem->flags & MAP_ATTRIB)
1078 reg |= M8XX_PCMCIA_POR_ATTRMEM; 1087 reg |= M8XX_PCMCIA_POR_ATTRMEM;
1079 1088
1080 if(mem->flags & MAP_WRPROT) 1089 if (mem->flags & MAP_WRPROT)
1081 reg |= M8XX_PCMCIA_POR_WRPROT; 1090 reg |= M8XX_PCMCIA_POR_WRPROT;
1082 1091
1083 if(mem->flags & MAP_16BIT) 1092 if (mem->flags & MAP_16BIT)
1084 reg |= M8XX_PCMCIA_POR_16BIT; 1093 reg |= M8XX_PCMCIA_POR_16BIT;
1085 1094
1086 if(mem->flags & MAP_ACTIVE) 1095 if (mem->flags & MAP_ACTIVE)
1087 reg |= M8XX_PCMCIA_POR_VALID; 1096 reg |= M8XX_PCMCIA_POR_VALID;
1088 1097
1089 out_be32(&w->or, reg); 1098 out_be32(&w->or, reg);
1090 1099
1091 dprintk("Socket %u: Mapped memory window %u at %#8.8x, " 1100 dprintk("Socket %u: Mapped memory window %u at %#8.8x, "
1092 "OR = %#8.8x.\n", lsock, mem->map, w->br, w->or); 1101 "OR = %#8.8x.\n", lsock, mem->map, w->br, w->or);
1093 1102
1094 if(mem->flags & MAP_ACTIVE) { 1103 if (mem->flags & MAP_ACTIVE) {
1095 /* get the new base address */ 1104 /* get the new base address */
1096 mem->static_start = PCMCIA_MEM_WIN_BASE + 1105 mem->static_start = PCMCIA_MEM_WIN_BASE +
1097 (PCMCIA_MEM_WIN_SIZE * winnr) 1106 (PCMCIA_MEM_WIN_SIZE * winnr)
1098 + mem->card_start; 1107 + mem->card_start;
1099 } 1108 }
1100 1109
1101 dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, " 1110 dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, "
1102 "%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags, 1111 "%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags,
1103 mem->speed, mem->static_start, mem->card_start); 1112 mem->speed, mem->static_start, mem->card_start);
1104 1113
1105 /* copy the struct and modify the copy */ 1114 /* copy the struct and modify the copy */
1106 1115
1107 old = &s->mem_win[mem->map]; 1116 old = &s->mem_win[mem->map];
1108 1117
1109 *old = *mem; 1118 *old = *mem;
1110 old->flags &= (MAP_ATTRIB 1119 old->flags &= (MAP_ATTRIB | MAP_WRPROT | MAP_16BIT | MAP_ACTIVE);
1111 | MAP_WRPROT
1112 | MAP_16BIT
1113 | MAP_ACTIVE);
1114 1120
1115 return 0; 1121 return 0;
1116} 1122}
@@ -1121,7 +1127,7 @@ static int m8xx_sock_init(struct pcmcia_socket *sock)
1121 pccard_io_map io = { 0, 0, 0, 0, 1 }; 1127 pccard_io_map io = { 0, 0, 0, 0, 1 };
1122 pccard_mem_map mem = { 0, 0, 0, 0, 0, 0 }; 1128 pccard_mem_map mem = { 0, 0, 0, 0, 0, 0 };
1123 1129
1124 dprintk( "sock_init(%d)\n", s); 1130 dprintk("sock_init(%d)\n", s);
1125 1131
1126 m8xx_set_socket(sock, &dead_socket); 1132 m8xx_set_socket(sock, &dead_socket);
1127 for (i = 0; i < PCMCIA_IO_WIN_NO; i++) { 1133 for (i = 0; i < PCMCIA_IO_WIN_NO; i++) {
@@ -1143,7 +1149,7 @@ static int m8xx_sock_suspend(struct pcmcia_socket *sock)
1143} 1149}
1144 1150
1145static struct pccard_operations m8xx_services = { 1151static struct pccard_operations m8xx_services = {
1146 .init = m8xx_sock_init, 1152 .init = m8xx_sock_init,
1147 .suspend = m8xx_sock_suspend, 1153 .suspend = m8xx_sock_suspend,
1148 .get_status = m8xx_get_status, 1154 .get_status = m8xx_get_status,
1149 .set_socket = m8xx_set_socket, 1155 .set_socket = m8xx_set_socket,
@@ -1151,7 +1157,8 @@ static struct pccard_operations m8xx_services = {
1151 .set_mem_map = m8xx_set_mem_map, 1157 .set_mem_map = m8xx_set_mem_map,
1152}; 1158};
1153 1159
1154static int __init m8xx_probe(struct of_device *ofdev, const struct of_device_id *match) 1160static int __init m8xx_probe(struct of_device *ofdev,
1161 const struct of_device_id *match)
1155{ 1162{
1156 struct pcmcia_win *w; 1163 struct pcmcia_win *w;
1157 unsigned int i, m, hwirq; 1164 unsigned int i, m, hwirq;
@@ -1162,49 +1169,50 @@ static int __init m8xx_probe(struct of_device *ofdev, const struct of_device_id
1162 pcmcia_info("%s\n", version); 1169 pcmcia_info("%s\n", version);
1163 1170
1164 pcmcia = of_iomap(np, 0); 1171 pcmcia = of_iomap(np, 0);
1165 if(pcmcia == NULL) 1172 if (pcmcia == NULL)
1166 return -EINVAL; 1173 return -EINVAL;
1167 1174
1168 pcmcia_schlvl = irq_of_parse_and_map(np, 0); 1175 pcmcia_schlvl = irq_of_parse_and_map(np, 0);
1169 hwirq = irq_map[pcmcia_schlvl].hwirq; 1176 hwirq = irq_map[pcmcia_schlvl].hwirq;
1170 if (pcmcia_schlvl < 0) 1177 if (pcmcia_schlvl < 0)
1171 return -EINVAL; 1178 return -EINVAL;
1172 1179
1173 m8xx_pgcrx[0] = &pcmcia->pcmc_pgcra; 1180 m8xx_pgcrx[0] = &pcmcia->pcmc_pgcra;
1174 m8xx_pgcrx[1] = &pcmcia->pcmc_pgcrb; 1181 m8xx_pgcrx[1] = &pcmcia->pcmc_pgcrb;
1175 1182
1176
1177 pcmcia_info(PCMCIA_BOARD_MSG " using " PCMCIA_SLOT_MSG 1183 pcmcia_info(PCMCIA_BOARD_MSG " using " PCMCIA_SLOT_MSG
1178 " with IRQ %u (%d). \n", pcmcia_schlvl, hwirq); 1184 " with IRQ %u (%d). \n", pcmcia_schlvl, hwirq);
1179 1185
1180 /* Configure Status change interrupt */ 1186 /* Configure Status change interrupt */
1181 1187
1182 if(request_irq(pcmcia_schlvl, m8xx_interrupt, IRQF_SHARED, 1188 if (request_irq(pcmcia_schlvl, m8xx_interrupt, IRQF_SHARED,
1183 driver_name, socket)) { 1189 driver_name, socket)) {
1184 pcmcia_error("Cannot allocate IRQ %u for SCHLVL!\n", 1190 pcmcia_error("Cannot allocate IRQ %u for SCHLVL!\n",
1185 pcmcia_schlvl); 1191 pcmcia_schlvl);
1186 return -1; 1192 return -1;
1187 } 1193 }
1188 1194
1189 w = (void *) &pcmcia->pcmc_pbr0; 1195 w = (void *)&pcmcia->pcmc_pbr0;
1190 1196
1191 out_be32(&pcmcia->pcmc_pscr, M8XX_PCMCIA_MASK(0)| M8XX_PCMCIA_MASK(1)); 1197 out_be32(&pcmcia->pcmc_pscr, M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1));
1192 clrbits32(&pcmcia->pcmc_per, M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1)); 1198 clrbits32(&pcmcia->pcmc_per, M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1));
1193 1199
1194 /* connect interrupt and disable CxOE */ 1200 /* connect interrupt and disable CxOE */
1195 1201
1196 out_be32(M8XX_PGCRX(0), M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16)); 1202 out_be32(M8XX_PGCRX(0),
1197 out_be32(M8XX_PGCRX(1), M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16)); 1203 M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16));
1204 out_be32(M8XX_PGCRX(1),
1205 M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16));
1198 1206
1199 /* intialize the fixed memory windows */ 1207 /* intialize the fixed memory windows */
1200 1208
1201 for(i = 0; i < PCMCIA_SOCKETS_NO; i++){ 1209 for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
1202 for (m = 0; m < PCMCIA_MEM_WIN_NO; m++) { 1210 for (m = 0; m < PCMCIA_MEM_WIN_NO; m++) {
1203 out_be32(&w->br, PCMCIA_MEM_WIN_BASE + 1211 out_be32(&w->br, PCMCIA_MEM_WIN_BASE +
1204 (PCMCIA_MEM_WIN_SIZE 1212 (PCMCIA_MEM_WIN_SIZE
1205 * (m + i * PCMCIA_MEM_WIN_NO))); 1213 * (m + i * PCMCIA_MEM_WIN_NO)));
1206 1214
1207 out_be32(&w->or, 0); /* set to not valid */ 1215 out_be32(&w->or, 0); /* set to not valid */
1208 1216
1209 w++; 1217 w++;
1210 } 1218 }
@@ -1218,10 +1226,11 @@ static int __init m8xx_probe(struct of_device *ofdev, const struct of_device_id
1218 hardware_enable(0); 1226 hardware_enable(0);
1219 hardware_enable(1); 1227 hardware_enable(1);
1220 1228
1221 for (i = 0 ; i < PCMCIA_SOCKETS_NO; i++) { 1229 for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
1222 socket[i].slot = i; 1230 socket[i].slot = i;
1223 socket[i].socket.owner = THIS_MODULE; 1231 socket[i].socket.owner = THIS_MODULE;
1224 socket[i].socket.features = SS_CAP_PCCARD | SS_CAP_MEM_ALIGN | SS_CAP_STATIC_MAP; 1232 socket[i].socket.features =
1233 SS_CAP_PCCARD | SS_CAP_MEM_ALIGN | SS_CAP_STATIC_MAP;
1225 socket[i].socket.irq_mask = 0x000; 1234 socket[i].socket.irq_mask = 0x000;
1226 socket[i].socket.map_size = 0x1000; 1235 socket[i].socket.map_size = 0x1000;
1227 socket[i].socket.io_offset = 0; 1236 socket[i].socket.io_offset = 0;
@@ -1234,7 +1243,6 @@ static int __init m8xx_probe(struct of_device *ofdev, const struct of_device_id
1234 socket[i].bus_freq = ppc_proc_freq; 1243 socket[i].bus_freq = ppc_proc_freq;
1235 socket[i].hwirq = hwirq; 1244 socket[i].hwirq = hwirq;
1236 1245
1237
1238 } 1246 }
1239 1247
1240 for (i = 0; i < PCMCIA_SOCKETS_NO; i++) { 1248 for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
@@ -1246,25 +1254,25 @@ static int __init m8xx_probe(struct of_device *ofdev, const struct of_device_id
1246 return 0; 1254 return 0;
1247} 1255}
1248 1256
1249static int m8xx_remove(struct of_device* ofdev) 1257static int m8xx_remove(struct of_device *ofdev)
1250{ 1258{
1251 u32 m, i; 1259 u32 m, i;
1252 struct pcmcia_win *w; 1260 struct pcmcia_win *w;
1253 pcmconf8xx_t *pcmcia = socket[0].pcmcia; 1261 pcmconf8xx_t *pcmcia = socket[0].pcmcia;
1254 1262
1255 for (i = 0; i < PCMCIA_SOCKETS_NO; i++) { 1263 for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
1256 w = (void *) &pcmcia->pcmc_pbr0; 1264 w = (void *)&pcmcia->pcmc_pbr0;
1257 1265
1258 out_be32(&pcmcia->pcmc_pscr, M8XX_PCMCIA_MASK(i)); 1266 out_be32(&pcmcia->pcmc_pscr, M8XX_PCMCIA_MASK(i));
1259 out_be32(&pcmcia->pcmc_per, 1267 out_be32(&pcmcia->pcmc_per,
1260 in_be32(&pcmcia->pcmc_per) & ~M8XX_PCMCIA_MASK(i)); 1268 in_be32(&pcmcia->pcmc_per) & ~M8XX_PCMCIA_MASK(i));
1261 1269
1262 /* turn off interrupt and disable CxOE */ 1270 /* turn off interrupt and disable CxOE */
1263 out_be32(M8XX_PGCRX(i), M8XX_PGCRX_CXOE); 1271 out_be32(M8XX_PGCRX(i), M8XX_PGCRX_CXOE);
1264 1272
1265 /* turn off memory windows */ 1273 /* turn off memory windows */
1266 for (m = 0; m < PCMCIA_MEM_WIN_NO; m++) { 1274 for (m = 0; m < PCMCIA_MEM_WIN_NO; m++) {
1267 out_be32(&w->or, 0); /* set to not valid */ 1275 out_be32(&w->or, 0); /* set to not valid */
1268 w++; 1276 w++;
1269 } 1277 }
1270 1278
@@ -1299,21 +1307,21 @@ static int m8xx_resume(struct platform_device *pdev)
1299 1307
1300static struct of_device_id m8xx_pcmcia_match[] = { 1308static struct of_device_id m8xx_pcmcia_match[] = {
1301 { 1309 {
1302 .type = "pcmcia", 1310 .type = "pcmcia",
1303 .compatible = "fsl,pq-pcmcia", 1311 .compatible = "fsl,pq-pcmcia",
1304 }, 1312 },
1305 {}, 1313 {},
1306}; 1314};
1307 1315
1308MODULE_DEVICE_TABLE(of, m8xx_pcmcia_match); 1316MODULE_DEVICE_TABLE(of, m8xx_pcmcia_match);
1309 1317
1310static struct of_platform_driver m8xx_pcmcia_driver = { 1318static struct of_platform_driver m8xx_pcmcia_driver = {
1311 .name = (char *) driver_name, 1319 .name = (char *)driver_name,
1312 .match_table = m8xx_pcmcia_match, 1320 .match_table = m8xx_pcmcia_match,
1313 .probe = m8xx_probe, 1321 .probe = m8xx_probe,
1314 .remove = m8xx_remove, 1322 .remove = m8xx_remove,
1315 .suspend = m8xx_suspend, 1323 .suspend = m8xx_suspend,
1316 .resume = m8xx_resume, 1324 .resume = m8xx_resume,
1317}; 1325};
1318 1326
1319static int __init m8xx_init(void) 1327static int __init m8xx_init(void)
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 3a201b77b963..03baf1c64a2e 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -160,6 +160,7 @@ static int pnp_dock_thread(void * unused)
160{ 160{
161 static struct pnp_docking_station_info now; 161 static struct pnp_docking_station_info now;
162 int docked = -1, d = 0; 162 int docked = -1, d = 0;
163 set_freezable();
163 while (!unloading) 164 while (!unloading)
164 { 165 {
165 int status; 166 int status;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 83b071b6ece4..cea401feb0f3 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -10,7 +10,6 @@ config RTC_LIB
10 10
11config RTC_CLASS 11config RTC_CLASS
12 tristate "RTC class" 12 tristate "RTC class"
13 depends on EXPERIMENTAL
14 default n 13 default n
15 select RTC_LIB 14 select RTC_LIB
16 help 15 help
@@ -119,7 +118,7 @@ config RTC_DRV_TEST
119 will be called rtc-test. 118 will be called rtc-test.
120 119
121comment "I2C RTC drivers" 120comment "I2C RTC drivers"
122 depends on RTC_CLASS 121 depends on RTC_CLASS && I2C
123 122
124config RTC_DRV_DS1307 123config RTC_DRV_DS1307
125 tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00" 124 tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00"
@@ -160,11 +159,11 @@ config RTC_DRV_MAX6900
160 will be called rtc-max6900. 159 will be called rtc-max6900.
161 160
162config RTC_DRV_RS5C372 161config RTC_DRV_RS5C372
163 tristate "Ricoh RS5C372A/B" 162 tristate "Ricoh RS5C372A/B, RV5C386, RV5C387A"
164 depends on RTC_CLASS && I2C 163 depends on RTC_CLASS && I2C
165 help 164 help
166 If you say yes here you get support for the 165 If you say yes here you get support for the
167 Ricoh RS5C372A and RS5C372B RTC chips. 166 Ricoh RS5C372A, RS5C372B, RV5C386, and RV5C387A RTC chips.
168 167
169 This driver can also be built as a module. If so, the module 168 This driver can also be built as a module. If so, the module
170 will be called rtc-rs5c372. 169 will be called rtc-rs5c372.
@@ -213,12 +212,40 @@ config RTC_DRV_PCF8583
213 This driver can also be built as a module. If so, the module 212 This driver can also be built as a module. If so, the module
214 will be called rtc-pcf8583. 213 will be called rtc-pcf8583.
215 214
215config RTC_DRV_M41T80
216 tristate "ST M41T80 series RTC"
217 depends on RTC_CLASS && I2C
218 help
219 If you say Y here you will get support for the
220 ST M41T80 RTC chips series. Currently following chips are
221 supported: M41T80, M41T81, M41T82, M41T83, M41ST84, M41ST85
222 and M41ST87.
223
224 This driver can also be built as a module. If so, the module
225 will be called rtc-m41t80.
226
227config RTC_DRV_M41T80_WDT
228 bool "ST M41T80 series RTC watchdog timer"
229 depends on RTC_DRV_M41T80
230 help
231 If you say Y here you will get support for the
232 watchdog timer in ST M41T80 RTC chips series.
233
234config RTC_DRV_TWL92330
235 boolean "TI TWL92330/Menelaus"
236 depends on RTC_CLASS && I2C && MENELAUS
237 help
238 If you say yes here you get support for the RTC on the
239 TWL92330 "Menelaus" power mangement chip, used with OMAP2
240 platforms. The support is integrated with the rest of
241 the Menelaus driver; it's not separate module.
242
216comment "SPI RTC drivers" 243comment "SPI RTC drivers"
217 depends on RTC_CLASS 244 depends on RTC_CLASS && SPI_MASTER
218 245
219config RTC_DRV_RS5C348 246config RTC_DRV_RS5C348
220 tristate "Ricoh RS5C348A/B" 247 tristate "Ricoh RS5C348A/B"
221 depends on RTC_CLASS && SPI 248 depends on RTC_CLASS && SPI_MASTER
222 help 249 help
223 If you say yes here you get support for the 250 If you say yes here you get support for the
224 Ricoh RS5C348A and RS5C348B RTC chips. 251 Ricoh RS5C348A and RS5C348B RTC chips.
@@ -228,7 +255,7 @@ config RTC_DRV_RS5C348
228 255
229config RTC_DRV_MAX6902 256config RTC_DRV_MAX6902
230 tristate "Maxim 6902" 257 tristate "Maxim 6902"
231 depends on RTC_CLASS && SPI 258 depends on RTC_CLASS && SPI_MASTER
232 help 259 help
233 If you say yes here you will get support for the 260 If you say yes here you will get support for the
234 Maxim MAX6902 SPI RTC chip. 261 Maxim MAX6902 SPI RTC chip.
@@ -262,6 +289,12 @@ config RTC_DRV_CMOS
262 This driver can also be built as a module. If so, the module 289 This driver can also be built as a module. If so, the module
263 will be called rtc-cmos. 290 will be called rtc-cmos.
264 291
292config RTC_DRV_DS1216
293 tristate "Dallas DS1216"
294 depends on RTC_CLASS && SNI_RM
295 help
296 If you say yes here you get support for the Dallas DS1216 RTC chips.
297
265config RTC_DRV_DS1553 298config RTC_DRV_DS1553
266 tristate "Dallas DS1553" 299 tristate "Dallas DS1553"
267 depends on RTC_CLASS 300 depends on RTC_CLASS
@@ -292,6 +325,16 @@ config RTC_DRV_M48T86
292 This driver can also be built as a module. If so, the module 325 This driver can also be built as a module. If so, the module
293 will be called rtc-m48t86. 326 will be called rtc-m48t86.
294 327
328config RTC_DRV_M48T59
329 tristate "ST M48T59"
330 depends on RTC_CLASS
331 help
332 If you say Y here you will get support for the
333 ST M48T59 RTC chip.
334
335 This driver can also be built as a module, if so, the module
336 will be called "rtc-m48t59".
337
295config RTC_DRV_V3020 338config RTC_DRV_V3020
296 tristate "EM Microelectronic V3020" 339 tristate "EM Microelectronic V3020"
297 depends on RTC_CLASS 340 depends on RTC_CLASS
@@ -379,6 +422,13 @@ config RTC_DRV_PL031
379 To compile this driver as a module, choose M here: the 422 To compile this driver as a module, choose M here: the
380 module will be called rtc-pl031. 423 module will be called rtc-pl031.
381 424
425config RTC_DRV_AT32AP700X
426 tristate "AT32AP700X series RTC"
427 depends on RTC_CLASS && PLATFORM_AT32AP
428 help
429 Driver for the internal RTC (Realtime Clock) on Atmel AVR32
430 AT32AP700x family processors.
431
382config RTC_DRV_AT91RM9200 432config RTC_DRV_AT91RM9200
383 tristate "AT91RM9200" 433 tristate "AT91RM9200"
384 depends on RTC_CLASS && ARCH_AT91RM9200 434 depends on RTC_CLASS && ARCH_AT91RM9200
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index a1afbc236073..3109af9a1651 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
19obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o 19obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o
20obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o 20obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o
21obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o 21obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
22obj-$(CONFIG_RTC_DRV_AT32AP700X) += rtc-at32ap700x.o
22obj-$(CONFIG_RTC_DRV_DS1307) += rtc-ds1307.o 23obj-$(CONFIG_RTC_DRV_DS1307) += rtc-ds1307.o
23obj-$(CONFIG_RTC_DRV_DS1672) += rtc-ds1672.o 24obj-$(CONFIG_RTC_DRV_DS1672) += rtc-ds1672.o
24obj-$(CONFIG_RTC_DRV_DS1742) += rtc-ds1742.o 25obj-$(CONFIG_RTC_DRV_DS1742) += rtc-ds1742.o
@@ -28,6 +29,7 @@ obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o
28obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o 29obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o
29obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o 30obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o
30obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o 31obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o
32obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o
31obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o 33obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
32obj-$(CONFIG_RTC_DRV_DS1553) += rtc-ds1553.o 34obj-$(CONFIG_RTC_DRV_DS1553) += rtc-ds1553.o
33obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o 35obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
@@ -41,3 +43,5 @@ obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
41obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o 43obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
42obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o 44obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o
43obj-$(CONFIG_RTC_DRV_BFIN) += rtc-bfin.o 45obj-$(CONFIG_RTC_DRV_BFIN) += rtc-bfin.o
46obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o
47obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c
new file mode 100644
index 000000000000..2999214ca534
--- /dev/null
+++ b/drivers/rtc/rtc-at32ap700x.c
@@ -0,0 +1,317 @@
1/*
2 * An RTC driver for the AVR32 AT32AP700x processor series.
3 *
4 * Copyright (C) 2007 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/platform_device.h>
14#include <linux/rtc.h>
15#include <linux/io.h>
16
17/*
18 * This is a bare-bones RTC. It runs during most system sleep states, but has
19 * no battery backup and gets reset during system restart. It must be
20 * initialized from an external clock (network, I2C, etc) before it can be of
21 * much use.
22 *
23 * The alarm functionality is limited by the hardware, not supporting
24 * periodic interrupts.
25 */
26
27#define RTC_CTRL 0x00
28#define RTC_CTRL_EN 0
29#define RTC_CTRL_PCLR 1
30#define RTC_CTRL_TOPEN 2
31#define RTC_CTRL_PSEL 8
32
33#define RTC_VAL 0x04
34
35#define RTC_TOP 0x08
36
37#define RTC_IER 0x10
38#define RTC_IER_TOPI 0
39
40#define RTC_IDR 0x14
41#define RTC_IDR_TOPI 0
42
43#define RTC_IMR 0x18
44#define RTC_IMR_TOPI 0
45
46#define RTC_ISR 0x1c
47#define RTC_ISR_TOPI 0
48
49#define RTC_ICR 0x20
50#define RTC_ICR_TOPI 0
51
52#define RTC_BIT(name) (1 << RTC_##name)
53#define RTC_BF(name, value) ((value) << RTC_##name)
54
55#define rtc_readl(dev, reg) \
56 __raw_readl((dev)->regs + RTC_##reg)
57#define rtc_writel(dev, reg, value) \
58 __raw_writel((value), (dev)->regs + RTC_##reg)
59
60struct rtc_at32ap700x {
61 struct rtc_device *rtc;
62 void __iomem *regs;
63 unsigned long alarm_time;
64 unsigned long irq;
65 /* Protect against concurrent register access. */
66 spinlock_t lock;
67};
68
69static int at32_rtc_readtime(struct device *dev, struct rtc_time *tm)
70{
71 struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
72 unsigned long now;
73
74 now = rtc_readl(rtc, VAL);
75 rtc_time_to_tm(now, tm);
76
77 return 0;
78}
79
80static int at32_rtc_settime(struct device *dev, struct rtc_time *tm)
81{
82 struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
83 unsigned long now;
84 int ret;
85
86 ret = rtc_tm_to_time(tm, &now);
87 if (ret == 0)
88 rtc_writel(rtc, VAL, now);
89
90 return ret;
91}
92
93static int at32_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
94{
95 struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
96
97 rtc_time_to_tm(rtc->alarm_time, &alrm->time);
98 alrm->pending = rtc_readl(rtc, IMR) & RTC_BIT(IMR_TOPI) ? 1 : 0;
99
100 return 0;
101}
102
103static int at32_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
104{
105 struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
106 unsigned long rtc_unix_time;
107 unsigned long alarm_unix_time;
108 int ret;
109
110 rtc_unix_time = rtc_readl(rtc, VAL);
111
112 ret = rtc_tm_to_time(&alrm->time, &alarm_unix_time);
113 if (ret)
114 return ret;
115
116 if (alarm_unix_time < rtc_unix_time)
117 return -EINVAL;
118
119 spin_lock_irq(&rtc->lock);
120 rtc->alarm_time = alarm_unix_time;
121 rtc_writel(rtc, TOP, rtc->alarm_time);
122 if (alrm->pending)
123 rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
124 | RTC_BIT(CTRL_TOPEN));
125 else
126 rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
127 & ~RTC_BIT(CTRL_TOPEN));
128 spin_unlock_irq(&rtc->lock);
129
130 return ret;
131}
132
133static int at32_rtc_ioctl(struct device *dev, unsigned int cmd,
134 unsigned long arg)
135{
136 struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
137 int ret = 0;
138
139 spin_lock_irq(&rtc->lock);
140
141 switch (cmd) {
142 case RTC_AIE_ON:
143 if (rtc_readl(rtc, VAL) > rtc->alarm_time) {
144 ret = -EINVAL;
145 break;
146 }
147 rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
148 | RTC_BIT(CTRL_TOPEN));
149 rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
150 rtc_writel(rtc, IER, RTC_BIT(IER_TOPI));
151 break;
152 case RTC_AIE_OFF:
153 rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
154 & ~RTC_BIT(CTRL_TOPEN));
155 rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI));
156 rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
157 break;
158 default:
159 ret = -ENOIOCTLCMD;
160 break;
161 }
162
163 spin_unlock_irq(&rtc->lock);
164
165 return ret;
166}
167
168static irqreturn_t at32_rtc_interrupt(int irq, void *dev_id)
169{
170 struct rtc_at32ap700x *rtc = (struct rtc_at32ap700x *)dev_id;
171 unsigned long isr = rtc_readl(rtc, ISR);
172 unsigned long events = 0;
173 int ret = IRQ_NONE;
174
175 spin_lock(&rtc->lock);
176
177 if (isr & RTC_BIT(ISR_TOPI)) {
178 rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
179 rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI));
180 rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
181 & ~RTC_BIT(CTRL_TOPEN));
182 rtc_writel(rtc, VAL, rtc->alarm_time);
183 events = RTC_AF | RTC_IRQF;
184 rtc_update_irq(rtc->rtc, 1, events);
185 ret = IRQ_HANDLED;
186 }
187
188 spin_unlock(&rtc->lock);
189
190 return ret;
191}
192
193static struct rtc_class_ops at32_rtc_ops = {
194 .ioctl = at32_rtc_ioctl,
195 .read_time = at32_rtc_readtime,
196 .set_time = at32_rtc_settime,
197 .read_alarm = at32_rtc_readalarm,
198 .set_alarm = at32_rtc_setalarm,
199};
200
201static int __init at32_rtc_probe(struct platform_device *pdev)
202{
203 struct resource *regs;
204 struct rtc_at32ap700x *rtc;
205 int irq = -1;
206 int ret;
207
208 rtc = kzalloc(sizeof(struct rtc_at32ap700x), GFP_KERNEL);
209 if (!rtc) {
210 dev_dbg(&pdev->dev, "out of memory\n");
211 return -ENOMEM;
212 }
213
214 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
215 if (!regs) {
216 dev_dbg(&pdev->dev, "no mmio resource defined\n");
217 ret = -ENXIO;
218 goto out;
219 }
220
221 irq = platform_get_irq(pdev, 0);
222 if (irq < 0) {
223 dev_dbg(&pdev->dev, "could not get irq\n");
224 ret = -ENXIO;
225 goto out;
226 }
227
228 ret = request_irq(irq, at32_rtc_interrupt, IRQF_SHARED, "rtc", rtc);
229 if (ret) {
230 dev_dbg(&pdev->dev, "could not request irq %d\n", irq);
231 goto out;
232 }
233
234 rtc->irq = irq;
235 rtc->regs = ioremap(regs->start, regs->end - regs->start + 1);
236 if (!rtc->regs) {
237 ret = -ENOMEM;
238 dev_dbg(&pdev->dev, "could not map I/O memory\n");
239 goto out_free_irq;
240 }
241 spin_lock_init(&rtc->lock);
242
243 /*
244 * Maybe init RTC: count from zero at 1 Hz, disable wrap irq.
245 *
246 * Do not reset VAL register, as it can hold an old time
247 * from last JTAG reset.
248 */
249 if (!(rtc_readl(rtc, CTRL) & RTC_BIT(CTRL_EN))) {
250 rtc_writel(rtc, CTRL, RTC_BIT(CTRL_PCLR));
251 rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI));
252 rtc_writel(rtc, CTRL, RTC_BF(CTRL_PSEL, 0xe)
253 | RTC_BIT(CTRL_EN));
254 }
255
256 rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
257 &at32_rtc_ops, THIS_MODULE);
258 if (IS_ERR(rtc->rtc)) {
259 dev_dbg(&pdev->dev, "could not register rtc device\n");
260 ret = PTR_ERR(rtc->rtc);
261 goto out_iounmap;
262 }
263
264 platform_set_drvdata(pdev, rtc);
265
266 dev_info(&pdev->dev, "Atmel RTC for AT32AP700x at %08lx irq %ld\n",
267 (unsigned long)rtc->regs, rtc->irq);
268
269 return 0;
270
271out_iounmap:
272 iounmap(rtc->regs);
273out_free_irq:
274 free_irq(irq, rtc);
275out:
276 kfree(rtc);
277 return ret;
278}
279
280static int __exit at32_rtc_remove(struct platform_device *pdev)
281{
282 struct rtc_at32ap700x *rtc = platform_get_drvdata(pdev);
283
284 free_irq(rtc->irq, rtc);
285 iounmap(rtc->regs);
286 rtc_device_unregister(rtc->rtc);
287 kfree(rtc);
288 platform_set_drvdata(pdev, NULL);
289
290 return 0;
291}
292
293MODULE_ALIAS("at32ap700x_rtc");
294
295static struct platform_driver at32_rtc_driver = {
296 .remove = __exit_p(at32_rtc_remove),
297 .driver = {
298 .name = "at32ap700x_rtc",
299 .owner = THIS_MODULE,
300 },
301};
302
303static int __init at32_rtc_init(void)
304{
305 return platform_driver_probe(&at32_rtc_driver, at32_rtc_probe);
306}
307module_init(at32_rtc_init);
308
309static void __exit at32_rtc_exit(void)
310{
311 platform_driver_unregister(&at32_rtc_driver);
312}
313module_exit(at32_rtc_exit);
314
315MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>");
316MODULE_DESCRIPTION("Real time clock for AVR32 AT32AP700x");
317MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index f4e5f0040ff7..304535942de2 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -341,6 +341,8 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file,
341 case RTC_IRQP_READ: 341 case RTC_IRQP_READ:
342 if (ops->irq_set_freq) 342 if (ops->irq_set_freq)
343 err = put_user(rtc->irq_freq, (unsigned long __user *)uarg); 343 err = put_user(rtc->irq_freq, (unsigned long __user *)uarg);
344 else
345 err = -ENOTTY;
344 break; 346 break;
345 347
346 case RTC_IRQP_SET: 348 case RTC_IRQP_SET:
diff --git a/drivers/rtc/rtc-ds1216.c b/drivers/rtc/rtc-ds1216.c
new file mode 100644
index 000000000000..83efb88f8f23
--- /dev/null
+++ b/drivers/rtc/rtc-ds1216.c
@@ -0,0 +1,226 @@
1/*
2 * Dallas DS1216 RTC driver
3 *
4 * Copyright (c) 2007 Thomas Bogendoerfer
5 *
6 */
7
8#include <linux/module.h>
9#include <linux/rtc.h>
10#include <linux/platform_device.h>
11#include <linux/bcd.h>
12
13#define DRV_VERSION "0.1"
14
15struct ds1216_regs {
16 u8 tsec;
17 u8 sec;
18 u8 min;
19 u8 hour;
20 u8 wday;
21 u8 mday;
22 u8 month;
23 u8 year;
24};
25
26#define DS1216_HOUR_1224 (1 << 7)
27#define DS1216_HOUR_AMPM (1 << 5)
28
29struct ds1216_priv {
30 struct rtc_device *rtc;
31 void __iomem *ioaddr;
32 size_t size;
33 unsigned long baseaddr;
34};
35
36static const u8 magic[] = {
37 0xc5, 0x3a, 0xa3, 0x5c, 0xc5, 0x3a, 0xa3, 0x5c
38};
39
40/*
41 * Read the 64 bit we'd like to have - It a series
42 * of 64 bits showing up in the LSB of the base register.
43 *
44 */
45static void ds1216_read(u8 __iomem *ioaddr, u8 *buf)
46{
47 unsigned char c;
48 int i, j;
49
50 for (i = 0; i < 8; i++) {
51 c = 0;
52 for (j = 0; j < 8; j++)
53 c |= (readb(ioaddr) & 0x1) << j;
54 buf[i] = c;
55 }
56}
57
58static void ds1216_write(u8 __iomem *ioaddr, const u8 *buf)
59{
60 unsigned char c;
61 int i, j;
62
63 for (i = 0; i < 8; i++) {
64 c = buf[i];
65 for (j = 0; j < 8; j++) {
66 writeb(c, ioaddr);
67 c = c >> 1;
68 }
69 }
70}
71
72static void ds1216_switch_ds_to_clock(u8 __iomem *ioaddr)
73{
74 /* Reset magic pointer */
75 readb(ioaddr);
76 /* Write 64 bit magic to DS1216 */
77 ds1216_write(ioaddr, magic);
78}
79
80static int ds1216_rtc_read_time(struct device *dev, struct rtc_time *tm)
81{
82 struct platform_device *pdev = to_platform_device(dev);
83 struct ds1216_priv *priv = platform_get_drvdata(pdev);
84 struct ds1216_regs regs;
85
86 ds1216_switch_ds_to_clock(priv->ioaddr);
87 ds1216_read(priv->ioaddr, (u8 *)&regs);
88
89 tm->tm_sec = BCD2BIN(regs.sec);
90 tm->tm_min = BCD2BIN(regs.min);
91 if (regs.hour & DS1216_HOUR_1224) {
92 /* AM/PM mode */
93 tm->tm_hour = BCD2BIN(regs.hour & 0x1f);
94 if (regs.hour & DS1216_HOUR_AMPM)
95 tm->tm_hour += 12;
96 } else
97 tm->tm_hour = BCD2BIN(regs.hour & 0x3f);
98 tm->tm_wday = (regs.wday & 7) - 1;
99 tm->tm_mday = BCD2BIN(regs.mday & 0x3f);
100 tm->tm_mon = BCD2BIN(regs.month & 0x1f);
101 tm->tm_year = BCD2BIN(regs.year);
102 if (tm->tm_year < 70)
103 tm->tm_year += 100;
104 return 0;
105}
106
107static int ds1216_rtc_set_time(struct device *dev, struct rtc_time *tm)
108{
109 struct platform_device *pdev = to_platform_device(dev);
110 struct ds1216_priv *priv = platform_get_drvdata(pdev);
111 struct ds1216_regs regs;
112
113 ds1216_switch_ds_to_clock(priv->ioaddr);
114 ds1216_read(priv->ioaddr, (u8 *)&regs);
115
116 regs.tsec = 0; /* clear 0.1 and 0.01 seconds */
117 regs.sec = BIN2BCD(tm->tm_sec);
118 regs.min = BIN2BCD(tm->tm_min);
119 regs.hour &= DS1216_HOUR_1224;
120 if (regs.hour && tm->tm_hour > 12) {
121 regs.hour |= DS1216_HOUR_AMPM;
122 tm->tm_hour -= 12;
123 }
124 regs.hour |= BIN2BCD(tm->tm_hour);
125 regs.wday &= ~7;
126 regs.wday |= tm->tm_wday;
127 regs.mday = BIN2BCD(tm->tm_mday);
128 regs.month = BIN2BCD(tm->tm_mon);
129 regs.year = BIN2BCD(tm->tm_year % 100);
130
131 ds1216_switch_ds_to_clock(priv->ioaddr);
132 ds1216_write(priv->ioaddr, (u8 *)&regs);
133 return 0;
134}
135
136static const struct rtc_class_ops ds1216_rtc_ops = {
137 .read_time = ds1216_rtc_read_time,
138 .set_time = ds1216_rtc_set_time,
139};
140
141static int __devinit ds1216_rtc_probe(struct platform_device *pdev)
142{
143 struct rtc_device *rtc;
144 struct resource *res;
145 struct ds1216_priv *priv;
146 int ret = 0;
147 u8 dummy[8];
148
149 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
150 if (!res)
151 return -ENODEV;
152 priv = kzalloc(sizeof *priv, GFP_KERNEL);
153 if (!priv)
154 return -ENOMEM;
155 priv->size = res->end - res->start + 1;
156 if (!request_mem_region(res->start, priv->size, pdev->name)) {
157 ret = -EBUSY;
158 goto out;
159 }
160 priv->baseaddr = res->start;
161 priv->ioaddr = ioremap(priv->baseaddr, priv->size);
162 if (!priv->ioaddr) {
163 ret = -ENOMEM;
164 goto out;
165 }
166 rtc = rtc_device_register("ds1216", &pdev->dev,
167 &ds1216_rtc_ops, THIS_MODULE);
168 if (IS_ERR(rtc)) {
169 ret = PTR_ERR(rtc);
170 goto out;
171 }
172 priv->rtc = rtc;
173 platform_set_drvdata(pdev, priv);
174
175 /* dummy read to get clock into a known state */
176 ds1216_read(priv->ioaddr, dummy);
177 return 0;
178
179out:
180 if (priv->rtc)
181 rtc_device_unregister(priv->rtc);
182 if (priv->ioaddr)
183 iounmap(priv->ioaddr);
184 if (priv->baseaddr)
185 release_mem_region(priv->baseaddr, priv->size);
186 kfree(priv);
187 return ret;
188}
189
190static int __devexit ds1216_rtc_remove(struct platform_device *pdev)
191{
192 struct ds1216_priv *priv = platform_get_drvdata(pdev);
193
194 rtc_device_unregister(priv->rtc);
195 iounmap(priv->ioaddr);
196 release_mem_region(priv->baseaddr, priv->size);
197 kfree(priv);
198 return 0;
199}
200
201static struct platform_driver ds1216_rtc_platform_driver = {
202 .driver = {
203 .name = "rtc-ds1216",
204 .owner = THIS_MODULE,
205 },
206 .probe = ds1216_rtc_probe,
207 .remove = __devexit_p(ds1216_rtc_remove),
208};
209
210static int __init ds1216_rtc_init(void)
211{
212 return platform_driver_register(&ds1216_rtc_platform_driver);
213}
214
215static void __exit ds1216_rtc_exit(void)
216{
217 platform_driver_unregister(&ds1216_rtc_platform_driver);
218}
219
220MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>");
221MODULE_DESCRIPTION("DS1216 RTC driver");
222MODULE_LICENSE("GPL");
223MODULE_VERSION(DRV_VERSION);
224
225module_init(ds1216_rtc_init);
226module_exit(ds1216_rtc_exit);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 3f0f7b8fa813..5158a625671f 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -24,29 +24,29 @@
24 * setting the date and time), Linux can ignore the non-clock features. 24 * setting the date and time), Linux can ignore the non-clock features.
25 * That's a natural job for a factory or repair bench. 25 * That's a natural job for a factory or repair bench.
26 * 26 *
27 * If the I2C "force" mechanism is used, we assume the chip is a ds1337. 27 * This is currently a simple no-alarms driver. If your board has the
28 * (Much better would be board-specific tables of I2C devices, along with 28 * alarm irq wired up on a ds1337 or ds1339, and you want to use that,
29 * the platform_data drivers would use to sort such issues out.) 29 * then look at the rtc-rs5c372 driver for code to steal...
30 */ 30 */
31enum ds_type { 31enum ds_type {
32 unknown = 0, 32 ds_1307,
33 ds_1307, /* or ds1338, ... */ 33 ds_1337,
34 ds_1337, /* or ds1339, ... */ 34 ds_1338,
35 ds_1340, /* or st m41t00, ... */ 35 ds_1339,
36 ds_1340,
37 m41t00,
36 // rs5c372 too? different address... 38 // rs5c372 too? different address...
37}; 39};
38 40
39static unsigned short normal_i2c[] = { 0x68, I2C_CLIENT_END };
40
41I2C_CLIENT_INSMOD;
42
43
44 41
45/* RTC registers don't differ much, except for the century flag */ 42/* RTC registers don't differ much, except for the century flag */
46#define DS1307_REG_SECS 0x00 /* 00-59 */ 43#define DS1307_REG_SECS 0x00 /* 00-59 */
47# define DS1307_BIT_CH 0x80 44# define DS1307_BIT_CH 0x80
45# define DS1340_BIT_nEOSC 0x80
48#define DS1307_REG_MIN 0x01 /* 00-59 */ 46#define DS1307_REG_MIN 0x01 /* 00-59 */
49#define DS1307_REG_HOUR 0x02 /* 00-23, or 1-12{am,pm} */ 47#define DS1307_REG_HOUR 0x02 /* 00-23, or 1-12{am,pm} */
48# define DS1307_BIT_12HR 0x40 /* in REG_HOUR */
49# define DS1307_BIT_PM 0x20 /* in REG_HOUR */
50# define DS1340_BIT_CENTURY_EN 0x80 /* in REG_HOUR */ 50# define DS1340_BIT_CENTURY_EN 0x80 /* in REG_HOUR */
51# define DS1340_BIT_CENTURY 0x40 /* in REG_HOUR */ 51# define DS1340_BIT_CENTURY 0x40 /* in REG_HOUR */
52#define DS1307_REG_WDAY 0x03 /* 01-07 */ 52#define DS1307_REG_WDAY 0x03 /* 01-07 */
@@ -56,11 +56,12 @@ I2C_CLIENT_INSMOD;
56#define DS1307_REG_YEAR 0x06 /* 00-99 */ 56#define DS1307_REG_YEAR 0x06 /* 00-99 */
57 57
58/* Other registers (control, status, alarms, trickle charge, NVRAM, etc) 58/* Other registers (control, status, alarms, trickle charge, NVRAM, etc)
59 * start at 7, and they differ a lot. Only control and status matter for RTC; 59 * start at 7, and they differ a LOT. Only control and status matter for
60 * be careful using them. 60 * basic RTC date and time functionality; be careful using them.
61 */ 61 */
62#define DS1307_REG_CONTROL 0x07 62#define DS1307_REG_CONTROL 0x07 /* or ds1338 */
63# define DS1307_BIT_OUT 0x80 63# define DS1307_BIT_OUT 0x80
64# define DS1338_BIT_OSF 0x20
64# define DS1307_BIT_SQWE 0x10 65# define DS1307_BIT_SQWE 0x10
65# define DS1307_BIT_RS1 0x02 66# define DS1307_BIT_RS1 0x02
66# define DS1307_BIT_RS0 0x01 67# define DS1307_BIT_RS0 0x01
@@ -71,6 +72,13 @@ I2C_CLIENT_INSMOD;
71# define DS1337_BIT_INTCN 0x04 72# define DS1337_BIT_INTCN 0x04
72# define DS1337_BIT_A2IE 0x02 73# define DS1337_BIT_A2IE 0x02
73# define DS1337_BIT_A1IE 0x01 74# define DS1337_BIT_A1IE 0x01
75#define DS1340_REG_CONTROL 0x07
76# define DS1340_BIT_OUT 0x80
77# define DS1340_BIT_FT 0x40
78# define DS1340_BIT_CALIB_SIGN 0x20
79# define DS1340_M_CALIBRATION 0x1f
80#define DS1340_REG_FLAG 0x09
81# define DS1340_BIT_OSF 0x80
74#define DS1337_REG_STATUS 0x0f 82#define DS1337_REG_STATUS 0x0f
75# define DS1337_BIT_OSF 0x80 83# define DS1337_BIT_OSF 0x80
76# define DS1337_BIT_A2I 0x02 84# define DS1337_BIT_A2I 0x02
@@ -84,21 +92,63 @@ struct ds1307 {
84 u8 regs[8]; 92 u8 regs[8];
85 enum ds_type type; 93 enum ds_type type;
86 struct i2c_msg msg[2]; 94 struct i2c_msg msg[2];
87 struct i2c_client client; 95 struct i2c_client *client;
96 struct i2c_client dev;
88 struct rtc_device *rtc; 97 struct rtc_device *rtc;
89}; 98};
90 99
100struct chip_desc {
101 char name[9];
102 unsigned nvram56:1;
103 unsigned alarm:1;
104 enum ds_type type;
105};
106
107static const struct chip_desc chips[] = { {
108 .name = "ds1307",
109 .type = ds_1307,
110 .nvram56 = 1,
111}, {
112 .name = "ds1337",
113 .type = ds_1337,
114 .alarm = 1,
115}, {
116 .name = "ds1338",
117 .type = ds_1338,
118 .nvram56 = 1,
119}, {
120 .name = "ds1339",
121 .type = ds_1339,
122 .alarm = 1,
123}, {
124 .name = "ds1340",
125 .type = ds_1340,
126}, {
127 .name = "m41t00",
128 .type = m41t00,
129}, };
130
131static inline const struct chip_desc *find_chip(const char *s)
132{
133 unsigned i;
134
135 for (i = 0; i < ARRAY_SIZE(chips); i++)
136 if (strnicmp(s, chips[i].name, sizeof chips[i].name) == 0)
137 return &chips[i];
138 return NULL;
139}
91 140
92static int ds1307_get_time(struct device *dev, struct rtc_time *t) 141static int ds1307_get_time(struct device *dev, struct rtc_time *t)
93{ 142{
94 struct ds1307 *ds1307 = dev_get_drvdata(dev); 143 struct ds1307 *ds1307 = dev_get_drvdata(dev);
95 int tmp; 144 int tmp;
96 145
97 /* read the RTC registers all at once */ 146 /* read the RTC date and time registers all at once */
98 ds1307->msg[1].flags = I2C_M_RD; 147 ds1307->msg[1].flags = I2C_M_RD;
99 ds1307->msg[1].len = 7; 148 ds1307->msg[1].len = 7;
100 149
101 tmp = i2c_transfer(ds1307->client.adapter, ds1307->msg, 2); 150 tmp = i2c_transfer(to_i2c_adapter(ds1307->client->dev.parent),
151 ds1307->msg, 2);
102 if (tmp != 2) { 152 if (tmp != 2) {
103 dev_err(dev, "%s error %d\n", "read", tmp); 153 dev_err(dev, "%s error %d\n", "read", tmp);
104 return -EIO; 154 return -EIO;
@@ -129,7 +179,8 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
129 t->tm_hour, t->tm_mday, 179 t->tm_hour, t->tm_mday,
130 t->tm_mon, t->tm_year, t->tm_wday); 180 t->tm_mon, t->tm_year, t->tm_wday);
131 181
132 return 0; 182 /* initial clock setting can be undefined */
183 return rtc_valid_tm(t);
133} 184}
134 185
135static int ds1307_set_time(struct device *dev, struct rtc_time *t) 186static int ds1307_set_time(struct device *dev, struct rtc_time *t)
@@ -157,11 +208,18 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
157 tmp = t->tm_year - 100; 208 tmp = t->tm_year - 100;
158 buf[DS1307_REG_YEAR] = BIN2BCD(tmp); 209 buf[DS1307_REG_YEAR] = BIN2BCD(tmp);
159 210
160 if (ds1307->type == ds_1337) 211 switch (ds1307->type) {
212 case ds_1337:
213 case ds_1339:
161 buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY; 214 buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY;
162 else if (ds1307->type == ds_1340) 215 break;
216 case ds_1340:
163 buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN 217 buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN
164 | DS1340_BIT_CENTURY; 218 | DS1340_BIT_CENTURY;
219 break;
220 default:
221 break;
222 }
165 223
166 ds1307->msg[1].flags = 0; 224 ds1307->msg[1].flags = 0;
167 ds1307->msg[1].len = 8; 225 ds1307->msg[1].len = 8;
@@ -170,7 +228,8 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
170 "write", buf[0], buf[1], buf[2], buf[3], 228 "write", buf[0], buf[1], buf[2], buf[3],
171 buf[4], buf[5], buf[6]); 229 buf[4], buf[5], buf[6]);
172 230
173 result = i2c_transfer(ds1307->client.adapter, &ds1307->msg[1], 1); 231 result = i2c_transfer(to_i2c_adapter(ds1307->client->dev.parent),
232 &ds1307->msg[1], 1);
174 if (result != 1) { 233 if (result != 1) {
175 dev_err(dev, "%s error %d\n", "write", tmp); 234 dev_err(dev, "%s error %d\n", "write", tmp);
176 return -EIO; 235 return -EIO;
@@ -185,25 +244,29 @@ static const struct rtc_class_ops ds13xx_rtc_ops = {
185 244
186static struct i2c_driver ds1307_driver; 245static struct i2c_driver ds1307_driver;
187 246
188static int __devinit 247static int __devinit ds1307_probe(struct i2c_client *client)
189ds1307_detect(struct i2c_adapter *adapter, int address, int kind)
190{ 248{
191 struct ds1307 *ds1307; 249 struct ds1307 *ds1307;
192 int err = -ENODEV; 250 int err = -ENODEV;
193 struct i2c_client *client;
194 int tmp; 251 int tmp;
195 252 const struct chip_desc *chip;
196 if (!(ds1307 = kzalloc(sizeof(struct ds1307), GFP_KERNEL))) { 253 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
197 err = -ENOMEM; 254
198 goto exit; 255 chip = find_chip(client->name);
256 if (!chip) {
257 dev_err(&client->dev, "unknown chip type '%s'\n",
258 client->name);
259 return -ENODEV;
199 } 260 }
200 261
201 client = &ds1307->client; 262 if (!i2c_check_functionality(adapter,
202 client->addr = address; 263 I2C_FUNC_I2C | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
203 client->adapter = adapter; 264 return -EIO;
204 client->driver = &ds1307_driver; 265
205 client->flags = 0; 266 if (!(ds1307 = kzalloc(sizeof(struct ds1307), GFP_KERNEL)))
267 return -ENOMEM;
206 268
269 ds1307->client = client;
207 i2c_set_clientdata(client, ds1307); 270 i2c_set_clientdata(client, ds1307);
208 271
209 ds1307->msg[0].addr = client->addr; 272 ds1307->msg[0].addr = client->addr;
@@ -216,14 +279,16 @@ ds1307_detect(struct i2c_adapter *adapter, int address, int kind)
216 ds1307->msg[1].len = sizeof(ds1307->regs); 279 ds1307->msg[1].len = sizeof(ds1307->regs);
217 ds1307->msg[1].buf = ds1307->regs; 280 ds1307->msg[1].buf = ds1307->regs;
218 281
219 /* HACK: "force" implies "needs ds1337-style-oscillator setup" */ 282 ds1307->type = chip->type;
220 if (kind >= 0) {
221 ds1307->type = ds_1337;
222 283
284 switch (ds1307->type) {
285 case ds_1337:
286 case ds_1339:
223 ds1307->reg_addr = DS1337_REG_CONTROL; 287 ds1307->reg_addr = DS1337_REG_CONTROL;
224 ds1307->msg[1].len = 2; 288 ds1307->msg[1].len = 2;
225 289
226 tmp = i2c_transfer(client->adapter, ds1307->msg, 2); 290 /* get registers that the "rtc" read below won't read... */
291 tmp = i2c_transfer(adapter, ds1307->msg, 2);
227 if (tmp != 2) { 292 if (tmp != 2) {
228 pr_debug("read error %d\n", tmp); 293 pr_debug("read error %d\n", tmp);
229 err = -EIO; 294 err = -EIO;
@@ -233,19 +298,26 @@ ds1307_detect(struct i2c_adapter *adapter, int address, int kind)
233 ds1307->reg_addr = 0; 298 ds1307->reg_addr = 0;
234 ds1307->msg[1].len = sizeof(ds1307->regs); 299 ds1307->msg[1].len = sizeof(ds1307->regs);
235 300
236 /* oscillator is off; need to turn it on */ 301 /* oscillator off? turn it on, so clock can tick. */
237 if ((ds1307->regs[0] & DS1337_BIT_nEOSC) 302 if (ds1307->regs[0] & DS1337_BIT_nEOSC)
238 || (ds1307->regs[1] & DS1337_BIT_OSF)) { 303 i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL,
239 printk(KERN_ERR "no ds1337 oscillator code\n"); 304 ds1307->regs[0] & ~DS1337_BIT_nEOSC);
240 goto exit_free; 305
306 /* oscillator fault? clear flag, and warn */
307 if (ds1307->regs[1] & DS1337_BIT_OSF) {
308 i2c_smbus_write_byte_data(client, DS1337_REG_STATUS,
309 ds1307->regs[1] & ~DS1337_BIT_OSF);
310 dev_warn(&client->dev, "SET TIME!\n");
241 } 311 }
242 } else 312 break;
243 ds1307->type = ds_1307; 313 default:
314 break;
315 }
244 316
245read_rtc: 317read_rtc:
246 /* read RTC registers */ 318 /* read RTC registers */
247 319
248 tmp = i2c_transfer(client->adapter, ds1307->msg, 2); 320 tmp = i2c_transfer(adapter, ds1307->msg, 2);
249 if (tmp != 2) { 321 if (tmp != 2) {
250 pr_debug("read error %d\n", tmp); 322 pr_debug("read error %d\n", tmp);
251 err = -EIO; 323 err = -EIO;
@@ -257,72 +329,80 @@ read_rtc:
257 * still a few values that are clearly out-of-range. 329 * still a few values that are clearly out-of-range.
258 */ 330 */
259 tmp = ds1307->regs[DS1307_REG_SECS]; 331 tmp = ds1307->regs[DS1307_REG_SECS];
260 if (tmp & DS1307_BIT_CH) { 332 switch (ds1307->type) {
261 if (ds1307->type && ds1307->type != ds_1307) { 333 case ds_1340:
262 pr_debug("not a ds1307?\n"); 334 /* FIXME read register with DS1340_BIT_OSF, use that to
263 goto exit_free; 335 * trigger the "set time" warning (*after* restarting the
264 } 336 * oscillator!) instead of this weaker ds1307/m41t00 test.
265 ds1307->type = ds_1307;
266
267 /* this partial initialization should work for ds1307,
268 * ds1338, ds1340, st m41t00, and more.
269 */ 337 */
270 dev_warn(&client->dev, "oscillator started; SET TIME!\n"); 338 case ds_1307:
271 i2c_smbus_write_byte_data(client, 0, 0); 339 case m41t00:
272 goto read_rtc; 340 /* clock halted? turn it on, so clock can tick. */
341 if (tmp & DS1307_BIT_CH) {
342 i2c_smbus_write_byte_data(client, DS1307_REG_SECS, 0);
343 dev_warn(&client->dev, "SET TIME!\n");
344 goto read_rtc;
345 }
346 break;
347 case ds_1338:
348 /* clock halted? turn it on, so clock can tick. */
349 if (tmp & DS1307_BIT_CH)
350 i2c_smbus_write_byte_data(client, DS1307_REG_SECS, 0);
351
352 /* oscillator fault? clear flag, and warn */
353 if (ds1307->regs[DS1307_REG_CONTROL] & DS1338_BIT_OSF) {
354 i2c_smbus_write_byte_data(client, DS1307_REG_CONTROL,
355 ds1307->regs[DS1337_REG_CONTROL]
356 & ~DS1338_BIT_OSF);
357 dev_warn(&client->dev, "SET TIME!\n");
358 goto read_rtc;
359 }
360 break;
361 case ds_1337:
362 case ds_1339:
363 break;
273 } 364 }
365
366 tmp = ds1307->regs[DS1307_REG_SECS];
274 tmp = BCD2BIN(tmp & 0x7f); 367 tmp = BCD2BIN(tmp & 0x7f);
275 if (tmp > 60) 368 if (tmp > 60)
276 goto exit_free; 369 goto exit_bad;
277 tmp = BCD2BIN(ds1307->regs[DS1307_REG_MIN] & 0x7f); 370 tmp = BCD2BIN(ds1307->regs[DS1307_REG_MIN] & 0x7f);
278 if (tmp > 60) 371 if (tmp > 60)
279 goto exit_free; 372 goto exit_bad;
280 373
281 tmp = BCD2BIN(ds1307->regs[DS1307_REG_MDAY] & 0x3f); 374 tmp = BCD2BIN(ds1307->regs[DS1307_REG_MDAY] & 0x3f);
282 if (tmp == 0 || tmp > 31) 375 if (tmp == 0 || tmp > 31)
283 goto exit_free; 376 goto exit_bad;
284 377
285 tmp = BCD2BIN(ds1307->regs[DS1307_REG_MONTH] & 0x1f); 378 tmp = BCD2BIN(ds1307->regs[DS1307_REG_MONTH] & 0x1f);
286 if (tmp == 0 || tmp > 12) 379 if (tmp == 0 || tmp > 12)
287 goto exit_free; 380 goto exit_bad;
288 381
289 /* force into in 24 hour mode (most chips) or
290 * disable century bit (ds1340)
291 */
292 tmp = ds1307->regs[DS1307_REG_HOUR]; 382 tmp = ds1307->regs[DS1307_REG_HOUR];
293 if (tmp & (1 << 6)) {
294 if (tmp & (1 << 5))
295 tmp = BCD2BIN(tmp & 0x1f) + 12;
296 else
297 tmp = BCD2BIN(tmp);
298 i2c_smbus_write_byte_data(client,
299 DS1307_REG_HOUR,
300 BIN2BCD(tmp));
301 }
302
303 /* FIXME chips like 1337 can generate alarm irqs too; those are
304 * worth exposing through the API (especially when the irq is
305 * wakeup-capable).
306 */
307
308 switch (ds1307->type) { 383 switch (ds1307->type) {
309 case unknown:
310 strlcpy(client->name, "unknown", I2C_NAME_SIZE);
311 break;
312 case ds_1307:
313 strlcpy(client->name, "ds1307", I2C_NAME_SIZE);
314 break;
315 case ds_1337:
316 strlcpy(client->name, "ds1337", I2C_NAME_SIZE);
317 break;
318 case ds_1340: 384 case ds_1340:
319 strlcpy(client->name, "ds1340", I2C_NAME_SIZE); 385 case m41t00:
386 /* NOTE: ignores century bits; fix before deploying
387 * systems that will run through year 2100.
388 */
320 break; 389 break;
321 } 390 default:
391 if (!(tmp & DS1307_BIT_12HR))
392 break;
322 393
323 /* Tell the I2C layer a new client has arrived */ 394 /* Be sure we're in 24 hour mode. Multi-master systems
324 if ((err = i2c_attach_client(client))) 395 * take note...
325 goto exit_free; 396 */
397 tmp = BCD2BIN(tmp & 0x1f);
398 if (tmp == 12)
399 tmp = 0;
400 if (ds1307->regs[DS1307_REG_HOUR] & DS1307_BIT_PM)
401 tmp += 12;
402 i2c_smbus_write_byte_data(client,
403 DS1307_REG_HOUR,
404 BIN2BCD(tmp));
405 }
326 406
327 ds1307->rtc = rtc_device_register(client->name, &client->dev, 407 ds1307->rtc = rtc_device_register(client->name, &client->dev,
328 &ds13xx_rtc_ops, THIS_MODULE); 408 &ds13xx_rtc_ops, THIS_MODULE);
@@ -330,46 +410,40 @@ read_rtc:
330 err = PTR_ERR(ds1307->rtc); 410 err = PTR_ERR(ds1307->rtc);
331 dev_err(&client->dev, 411 dev_err(&client->dev,
332 "unable to register the class device\n"); 412 "unable to register the class device\n");
333 goto exit_detach; 413 goto exit_free;
334 } 414 }
335 415
336 return 0; 416 return 0;
337 417
338exit_detach: 418exit_bad:
339 i2c_detach_client(client); 419 dev_dbg(&client->dev, "%s: %02x %02x %02x %02x %02x %02x %02x\n",
420 "bogus register",
421 ds1307->regs[0], ds1307->regs[1],
422 ds1307->regs[2], ds1307->regs[3],
423 ds1307->regs[4], ds1307->regs[5],
424 ds1307->regs[6]);
425
340exit_free: 426exit_free:
341 kfree(ds1307); 427 kfree(ds1307);
342exit:
343 return err; 428 return err;
344} 429}
345 430
346static int __devinit 431static int __devexit ds1307_remove(struct i2c_client *client)
347ds1307_attach_adapter(struct i2c_adapter *adapter)
348{
349 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
350 return 0;
351 return i2c_probe(adapter, &addr_data, ds1307_detect);
352}
353
354static int __devexit ds1307_detach_client(struct i2c_client *client)
355{ 432{
356 int err;
357 struct ds1307 *ds1307 = i2c_get_clientdata(client); 433 struct ds1307 *ds1307 = i2c_get_clientdata(client);
358 434
359 rtc_device_unregister(ds1307->rtc); 435 rtc_device_unregister(ds1307->rtc);
360 if ((err = i2c_detach_client(client)))
361 return err;
362 kfree(ds1307); 436 kfree(ds1307);
363 return 0; 437 return 0;
364} 438}
365 439
366static struct i2c_driver ds1307_driver = { 440static struct i2c_driver ds1307_driver = {
367 .driver = { 441 .driver = {
368 .name = "ds1307", 442 .name = "rtc-ds1307",
369 .owner = THIS_MODULE, 443 .owner = THIS_MODULE,
370 }, 444 },
371 .attach_adapter = ds1307_attach_adapter, 445 .probe = ds1307_probe,
372 .detach_client = __devexit_p(ds1307_detach_client), 446 .remove = __devexit_p(ds1307_remove),
373}; 447};
374 448
375static int __init ds1307_init(void) 449static int __init ds1307_init(void)
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
new file mode 100644
index 000000000000..80c4a8463065
--- /dev/null
+++ b/drivers/rtc/rtc-m41t80.c
@@ -0,0 +1,917 @@
1/*
2 * I2C client/driver for the ST M41T80 family of i2c rtc chips.
3 *
4 * Author: Alexander Bigga <ab@mycable.de>
5 *
6 * Based on m41t00.c by Mark A. Greer <mgreer@mvista.com>
7 *
8 * 2006 (c) mycable GmbH
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <linux/string.h>
20#include <linux/i2c.h>
21#include <linux/rtc.h>
22#include <linux/bcd.h>
23#ifdef CONFIG_RTC_DRV_M41T80_WDT
24#include <linux/miscdevice.h>
25#include <linux/watchdog.h>
26#include <linux/reboot.h>
27#include <linux/fs.h>
28#include <linux/ioctl.h>
29#endif
30
31#define M41T80_REG_SSEC 0
32#define M41T80_REG_SEC 1
33#define M41T80_REG_MIN 2
34#define M41T80_REG_HOUR 3
35#define M41T80_REG_WDAY 4
36#define M41T80_REG_DAY 5
37#define M41T80_REG_MON 6
38#define M41T80_REG_YEAR 7
39#define M41T80_REG_ALARM_MON 0xa
40#define M41T80_REG_ALARM_DAY 0xb
41#define M41T80_REG_ALARM_HOUR 0xc
42#define M41T80_REG_ALARM_MIN 0xd
43#define M41T80_REG_ALARM_SEC 0xe
44#define M41T80_REG_FLAGS 0xf
45#define M41T80_REG_SQW 0x13
46
47#define M41T80_DATETIME_REG_SIZE (M41T80_REG_YEAR + 1)
48#define M41T80_ALARM_REG_SIZE \
49 (M41T80_REG_ALARM_SEC + 1 - M41T80_REG_ALARM_MON)
50
51#define M41T80_SEC_ST (1 << 7) /* ST: Stop Bit */
52#define M41T80_ALMON_AFE (1 << 7) /* AFE: AF Enable Bit */
53#define M41T80_ALMON_SQWE (1 << 6) /* SQWE: SQW Enable Bit */
54#define M41T80_ALHOUR_HT (1 << 6) /* HT: Halt Update Bit */
55#define M41T80_FLAGS_AF (1 << 6) /* AF: Alarm Flag Bit */
56#define M41T80_FLAGS_BATT_LOW (1 << 4) /* BL: Battery Low Bit */
57
58#define M41T80_FEATURE_HT (1 << 0)
59#define M41T80_FEATURE_BL (1 << 1)
60
61#define DRV_VERSION "0.05"
62
63struct m41t80_chip_info {
64 const char *name;
65 u8 features;
66};
67
68static const struct m41t80_chip_info m41t80_chip_info_tbl[] = {
69 {
70 .name = "m41t80",
71 .features = 0,
72 },
73 {
74 .name = "m41t81",
75 .features = M41T80_FEATURE_HT,
76 },
77 {
78 .name = "m41t81s",
79 .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
80 },
81 {
82 .name = "m41t82",
83 .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
84 },
85 {
86 .name = "m41t83",
87 .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
88 },
89 {
90 .name = "m41st84",
91 .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
92 },
93 {
94 .name = "m41st85",
95 .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
96 },
97 {
98 .name = "m41st87",
99 .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
100 },
101};
102
103struct m41t80_data {
104 const struct m41t80_chip_info *chip;
105 struct rtc_device *rtc;
106};
107
108static int m41t80_get_datetime(struct i2c_client *client,
109 struct rtc_time *tm)
110{
111 u8 buf[M41T80_DATETIME_REG_SIZE], dt_addr[1] = { M41T80_REG_SEC };
112 struct i2c_msg msgs[] = {
113 {
114 .addr = client->addr,
115 .flags = 0,
116 .len = 1,
117 .buf = dt_addr,
118 },
119 {
120 .addr = client->addr,
121 .flags = I2C_M_RD,
122 .len = M41T80_DATETIME_REG_SIZE - M41T80_REG_SEC,
123 .buf = buf + M41T80_REG_SEC,
124 },
125 };
126
127 if (i2c_transfer(client->adapter, msgs, 2) < 0) {
128 dev_err(&client->dev, "read error\n");
129 return -EIO;
130 }
131
132 tm->tm_sec = BCD2BIN(buf[M41T80_REG_SEC] & 0x7f);
133 tm->tm_min = BCD2BIN(buf[M41T80_REG_MIN] & 0x7f);
134 tm->tm_hour = BCD2BIN(buf[M41T80_REG_HOUR] & 0x3f);
135 tm->tm_mday = BCD2BIN(buf[M41T80_REG_DAY] & 0x3f);
136 tm->tm_wday = buf[M41T80_REG_WDAY] & 0x07;
137 tm->tm_mon = BCD2BIN(buf[M41T80_REG_MON] & 0x1f) - 1;
138
139 /* assume 20YY not 19YY, and ignore the Century Bit */
140 tm->tm_year = BCD2BIN(buf[M41T80_REG_YEAR]) + 100;
141 return 0;
142}
143
144/* Sets the given date and time to the real time clock. */
145static int m41t80_set_datetime(struct i2c_client *client, struct rtc_time *tm)
146{
147 u8 wbuf[1 + M41T80_DATETIME_REG_SIZE];
148 u8 *buf = &wbuf[1];
149 u8 dt_addr[1] = { M41T80_REG_SEC };
150 struct i2c_msg msgs_in[] = {
151 {
152 .addr = client->addr,
153 .flags = 0,
154 .len = 1,
155 .buf = dt_addr,
156 },
157 {
158 .addr = client->addr,
159 .flags = I2C_M_RD,
160 .len = M41T80_DATETIME_REG_SIZE - M41T80_REG_SEC,
161 .buf = buf + M41T80_REG_SEC,
162 },
163 };
164 struct i2c_msg msgs[] = {
165 {
166 .addr = client->addr,
167 .flags = 0,
168 .len = 1 + M41T80_DATETIME_REG_SIZE,
169 .buf = wbuf,
170 },
171 };
172
173 /* Read current reg values into buf[1..7] */
174 if (i2c_transfer(client->adapter, msgs_in, 2) < 0) {
175 dev_err(&client->dev, "read error\n");
176 return -EIO;
177 }
178
179 wbuf[0] = 0; /* offset into rtc's regs */
180 /* Merge time-data and register flags into buf[0..7] */
181 buf[M41T80_REG_SSEC] = 0;
182 buf[M41T80_REG_SEC] =
183 BIN2BCD(tm->tm_sec) | (buf[M41T80_REG_SEC] & ~0x7f);
184 buf[M41T80_REG_MIN] =
185 BIN2BCD(tm->tm_min) | (buf[M41T80_REG_MIN] & ~0x7f);
186 buf[M41T80_REG_HOUR] =
187 BIN2BCD(tm->tm_hour) | (buf[M41T80_REG_HOUR] & ~0x3f) ;
188 buf[M41T80_REG_WDAY] =
189 (tm->tm_wday & 0x07) | (buf[M41T80_REG_WDAY] & ~0x07);
190 buf[M41T80_REG_DAY] =
191 BIN2BCD(tm->tm_mday) | (buf[M41T80_REG_DAY] & ~0x3f);
192 buf[M41T80_REG_MON] =
193 BIN2BCD(tm->tm_mon + 1) | (buf[M41T80_REG_MON] & ~0x1f);
194 /* assume 20YY not 19YY */
195 buf[M41T80_REG_YEAR] = BIN2BCD(tm->tm_year % 100);
196
197 if (i2c_transfer(client->adapter, msgs, 1) != 1) {
198 dev_err(&client->dev, "write error\n");
199 return -EIO;
200 }
201 return 0;
202}
203
204#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
205static int m41t80_rtc_proc(struct device *dev, struct seq_file *seq)
206{
207 struct i2c_client *client = to_i2c_client(dev);
208 struct m41t80_data *clientdata = i2c_get_clientdata(client);
209 u8 reg;
210
211 if (clientdata->chip->features & M41T80_FEATURE_BL) {
212 reg = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
213 seq_printf(seq, "battery\t\t: %s\n",
214 (reg & M41T80_FLAGS_BATT_LOW) ? "exhausted" : "ok");
215 }
216 return 0;
217}
218#else
219#define m41t80_rtc_proc NULL
220#endif
221
222static int m41t80_rtc_read_time(struct device *dev, struct rtc_time *tm)
223{
224 return m41t80_get_datetime(to_i2c_client(dev), tm);
225}
226
227static int m41t80_rtc_set_time(struct device *dev, struct rtc_time *tm)
228{
229 return m41t80_set_datetime(to_i2c_client(dev), tm);
230}
231
232#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
233static int
234m41t80_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
235{
236 struct i2c_client *client = to_i2c_client(dev);
237 int rc;
238
239 switch (cmd) {
240 case RTC_AIE_OFF:
241 case RTC_AIE_ON:
242 break;
243 default:
244 return -ENOIOCTLCMD;
245 }
246
247 rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
248 if (rc < 0)
249 goto err;
250 switch (cmd) {
251 case RTC_AIE_OFF:
252 rc &= ~M41T80_ALMON_AFE;
253 break;
254 case RTC_AIE_ON:
255 rc |= M41T80_ALMON_AFE;
256 break;
257 }
258 if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, rc) < 0)
259 goto err;
260 return 0;
261err:
262 return -EIO;
263}
264#else
265#define m41t80_rtc_ioctl NULL
266#endif
267
268static int m41t80_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *t)
269{
270 struct i2c_client *client = to_i2c_client(dev);
271 u8 wbuf[1 + M41T80_ALARM_REG_SIZE];
272 u8 *buf = &wbuf[1];
273 u8 *reg = buf - M41T80_REG_ALARM_MON;
274 u8 dt_addr[1] = { M41T80_REG_ALARM_MON };
275 struct i2c_msg msgs_in[] = {
276 {
277 .addr = client->addr,
278 .flags = 0,
279 .len = 1,
280 .buf = dt_addr,
281 },
282 {
283 .addr = client->addr,
284 .flags = I2C_M_RD,
285 .len = M41T80_ALARM_REG_SIZE,
286 .buf = buf,
287 },
288 };
289 struct i2c_msg msgs[] = {
290 {
291 .addr = client->addr,
292 .flags = 0,
293 .len = 1 + M41T80_ALARM_REG_SIZE,
294 .buf = wbuf,
295 },
296 };
297
298 if (i2c_transfer(client->adapter, msgs_in, 2) < 0) {
299 dev_err(&client->dev, "read error\n");
300 return -EIO;
301 }
302 reg[M41T80_REG_ALARM_MON] &= ~(0x1f | M41T80_ALMON_AFE);
303 reg[M41T80_REG_ALARM_DAY] = 0;
304 reg[M41T80_REG_ALARM_HOUR] &= ~(0x3f | 0x80);
305 reg[M41T80_REG_ALARM_MIN] = 0;
306 reg[M41T80_REG_ALARM_SEC] = 0;
307
308 wbuf[0] = M41T80_REG_ALARM_MON; /* offset into rtc's regs */
309 reg[M41T80_REG_ALARM_SEC] |= t->time.tm_sec >= 0 ?
310 BIN2BCD(t->time.tm_sec) : 0x80;
311 reg[M41T80_REG_ALARM_MIN] |= t->time.tm_min >= 0 ?
312 BIN2BCD(t->time.tm_min) : 0x80;
313 reg[M41T80_REG_ALARM_HOUR] |= t->time.tm_hour >= 0 ?
314 BIN2BCD(t->time.tm_hour) : 0x80;
315 reg[M41T80_REG_ALARM_DAY] |= t->time.tm_mday >= 0 ?
316 BIN2BCD(t->time.tm_mday) : 0x80;
317 if (t->time.tm_mon >= 0)
318 reg[M41T80_REG_ALARM_MON] |= BIN2BCD(t->time.tm_mon + 1);
319 else
320 reg[M41T80_REG_ALARM_DAY] |= 0x40;
321
322 if (i2c_transfer(client->adapter, msgs, 1) != 1) {
323 dev_err(&client->dev, "write error\n");
324 return -EIO;
325 }
326
327 if (t->enabled) {
328 reg[M41T80_REG_ALARM_MON] |= M41T80_ALMON_AFE;
329 if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
330 reg[M41T80_REG_ALARM_MON]) < 0) {
331 dev_err(&client->dev, "write error\n");
332 return -EIO;
333 }
334 }
335 return 0;
336}
337
338static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
339{
340 struct i2c_client *client = to_i2c_client(dev);
341 u8 buf[M41T80_ALARM_REG_SIZE + 1]; /* all alarm regs and flags */
342 u8 dt_addr[1] = { M41T80_REG_ALARM_MON };
343 u8 *reg = buf - M41T80_REG_ALARM_MON;
344 struct i2c_msg msgs[] = {
345 {
346 .addr = client->addr,
347 .flags = 0,
348 .len = 1,
349 .buf = dt_addr,
350 },
351 {
352 .addr = client->addr,
353 .flags = I2C_M_RD,
354 .len = M41T80_ALARM_REG_SIZE + 1,
355 .buf = buf,
356 },
357 };
358
359 if (i2c_transfer(client->adapter, msgs, 2) < 0) {
360 dev_err(&client->dev, "read error\n");
361 return -EIO;
362 }
363 t->time.tm_sec = -1;
364 t->time.tm_min = -1;
365 t->time.tm_hour = -1;
366 t->time.tm_mday = -1;
367 t->time.tm_mon = -1;
368 if (!(reg[M41T80_REG_ALARM_SEC] & 0x80))
369 t->time.tm_sec = BCD2BIN(reg[M41T80_REG_ALARM_SEC] & 0x7f);
370 if (!(reg[M41T80_REG_ALARM_MIN] & 0x80))
371 t->time.tm_min = BCD2BIN(reg[M41T80_REG_ALARM_MIN] & 0x7f);
372 if (!(reg[M41T80_REG_ALARM_HOUR] & 0x80))
373 t->time.tm_hour = BCD2BIN(reg[M41T80_REG_ALARM_HOUR] & 0x3f);
374 if (!(reg[M41T80_REG_ALARM_DAY] & 0x80))
375 t->time.tm_mday = BCD2BIN(reg[M41T80_REG_ALARM_DAY] & 0x3f);
376 if (!(reg[M41T80_REG_ALARM_DAY] & 0x40))
377 t->time.tm_mon = BCD2BIN(reg[M41T80_REG_ALARM_MON] & 0x1f) - 1;
378 t->time.tm_year = -1;
379 t->time.tm_wday = -1;
380 t->time.tm_yday = -1;
381 t->time.tm_isdst = -1;
382 t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE);
383 t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF);
384 return 0;
385}
386
387static struct rtc_class_ops m41t80_rtc_ops = {
388 .read_time = m41t80_rtc_read_time,
389 .set_time = m41t80_rtc_set_time,
390 .read_alarm = m41t80_rtc_read_alarm,
391 .set_alarm = m41t80_rtc_set_alarm,
392 .proc = m41t80_rtc_proc,
393 .ioctl = m41t80_rtc_ioctl,
394};
395
396#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
397static ssize_t m41t80_sysfs_show_flags(struct device *dev,
398 struct device_attribute *attr, char *buf)
399{
400 struct i2c_client *client = to_i2c_client(dev);
401 int val;
402
403 val = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
404 if (val < 0)
405 return -EIO;
406 return sprintf(buf, "%#x\n", val);
407}
408static DEVICE_ATTR(flags, S_IRUGO, m41t80_sysfs_show_flags, NULL);
409
410static ssize_t m41t80_sysfs_show_sqwfreq(struct device *dev,
411 struct device_attribute *attr, char *buf)
412{
413 struct i2c_client *client = to_i2c_client(dev);
414 int val;
415
416 val = i2c_smbus_read_byte_data(client, M41T80_REG_SQW);
417 if (val < 0)
418 return -EIO;
419 val = (val >> 4) & 0xf;
420 switch (val) {
421 case 0:
422 break;
423 case 1:
424 val = 32768;
425 break;
426 default:
427 val = 32768 >> val;
428 }
429 return sprintf(buf, "%d\n", val);
430}
431static ssize_t m41t80_sysfs_set_sqwfreq(struct device *dev,
432 struct device_attribute *attr,
433 const char *buf, size_t count)
434{
435 struct i2c_client *client = to_i2c_client(dev);
436 int almon, sqw;
437 int val = simple_strtoul(buf, NULL, 0);
438
439 if (val) {
440 if (!is_power_of_2(val))
441 return -EINVAL;
442 val = ilog2(val);
443 if (val == 15)
444 val = 1;
445 else if (val < 14)
446 val = 15 - val;
447 else
448 return -EINVAL;
449 }
450 /* disable SQW, set SQW frequency & re-enable */
451 almon = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
452 if (almon < 0)
453 return -EIO;
454 sqw = i2c_smbus_read_byte_data(client, M41T80_REG_SQW);
455 if (sqw < 0)
456 return -EIO;
457 sqw = (sqw & 0x0f) | (val << 4);
458 if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
459 almon & ~M41T80_ALMON_SQWE) < 0 ||
460 i2c_smbus_write_byte_data(client, M41T80_REG_SQW, sqw) < 0)
461 return -EIO;
462 if (val && i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
463 almon | M41T80_ALMON_SQWE) < 0)
464 return -EIO;
465 return count;
466}
467static DEVICE_ATTR(sqwfreq, S_IRUGO | S_IWUSR,
468 m41t80_sysfs_show_sqwfreq, m41t80_sysfs_set_sqwfreq);
469
470static struct attribute *attrs[] = {
471 &dev_attr_flags.attr,
472 &dev_attr_sqwfreq.attr,
473 NULL,
474};
475static struct attribute_group attr_group = {
476 .attrs = attrs,
477};
478
479static int m41t80_sysfs_register(struct device *dev)
480{
481 return sysfs_create_group(&dev->kobj, &attr_group);
482}
483#else
484static int m41t80_sysfs_register(struct device *dev)
485{
486 return 0;
487}
488#endif
489
490#ifdef CONFIG_RTC_DRV_M41T80_WDT
491/*
492 *****************************************************************************
493 *
494 * Watchdog Driver
495 *
496 *****************************************************************************
497 */
498static struct i2c_client *save_client;
499
500/* Default margin */
501#define WD_TIMO 60 /* 1..31 seconds */
502
503static int wdt_margin = WD_TIMO;
504module_param(wdt_margin, int, 0);
505MODULE_PARM_DESC(wdt_margin, "Watchdog timeout in seconds (default 60s)");
506
507static unsigned long wdt_is_open;
508static int boot_flag;
509
510/**
511 * wdt_ping:
512 *
513 * Reload counter one with the watchdog timeout. We don't bother reloading
514 * the cascade counter.
515 */
516static void wdt_ping(void)
517{
518 unsigned char i2c_data[2];
519 struct i2c_msg msgs1[1] = {
520 {
521 .addr = save_client->addr,
522 .flags = 0,
523 .len = 2,
524 .buf = i2c_data,
525 },
526 };
527 i2c_data[0] = 0x09; /* watchdog register */
528
529 if (wdt_margin > 31)
530 i2c_data[1] = (wdt_margin & 0xFC) | 0x83; /* resolution = 4s */
531 else
532 /*
533 * WDS = 1 (0x80), mulitplier = WD_TIMO, resolution = 1s (0x02)
534 */
535 i2c_data[1] = wdt_margin<<2 | 0x82;
536
537 i2c_transfer(save_client->adapter, msgs1, 1);
538}
539
540/**
541 * wdt_disable:
542 *
543 * disables watchdog.
544 */
545static void wdt_disable(void)
546{
547 unsigned char i2c_data[2], i2c_buf[0x10];
548 struct i2c_msg msgs0[2] = {
549 {
550 .addr = save_client->addr,
551 .flags = 0,
552 .len = 1,
553 .buf = i2c_data,
554 },
555 {
556 .addr = save_client->addr,
557 .flags = I2C_M_RD,
558 .len = 1,
559 .buf = i2c_buf,
560 },
561 };
562 struct i2c_msg msgs1[1] = {
563 {
564 .addr = save_client->addr,
565 .flags = 0,
566 .len = 2,
567 .buf = i2c_data,
568 },
569 };
570
571 i2c_data[0] = 0x09;
572 i2c_transfer(save_client->adapter, msgs0, 2);
573
574 i2c_data[0] = 0x09;
575 i2c_data[1] = 0x00;
576 i2c_transfer(save_client->adapter, msgs1, 1);
577}
578
579/**
580 * wdt_write:
581 * @file: file handle to the watchdog
582 * @buf: buffer to write (unused as data does not matter here
583 * @count: count of bytes
584 * @ppos: pointer to the position to write. No seeks allowed
585 *
586 * A write to a watchdog device is defined as a keepalive signal. Any
587 * write of data will do, as we we don't define content meaning.
588 */
589static ssize_t wdt_write(struct file *file, const char __user *buf,
590 size_t count, loff_t *ppos)
591{
592 /* Can't seek (pwrite) on this device
593 if (ppos != &file->f_pos)
594 return -ESPIPE;
595 */
596 if (count) {
597 wdt_ping();
598 return 1;
599 }
600 return 0;
601}
602
603static ssize_t wdt_read(struct file *file, char __user *buf,
604 size_t count, loff_t *ppos)
605{
606 return 0;
607}
608
609/**
610 * wdt_ioctl:
611 * @inode: inode of the device
612 * @file: file handle to the device
613 * @cmd: watchdog command
614 * @arg: argument pointer
615 *
616 * The watchdog API defines a common set of functions for all watchdogs
617 * according to their available features. We only actually usefully support
618 * querying capabilities and current status.
619 */
620static int wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
621 unsigned long arg)
622{
623 int new_margin, rv;
624 static struct watchdog_info ident = {
625 .options = WDIOF_POWERUNDER | WDIOF_KEEPALIVEPING |
626 WDIOF_SETTIMEOUT,
627 .firmware_version = 1,
628 .identity = "M41T80 WTD"
629 };
630
631 switch (cmd) {
632 case WDIOC_GETSUPPORT:
633 return copy_to_user((struct watchdog_info __user *)arg, &ident,
634 sizeof(ident)) ? -EFAULT : 0;
635
636 case WDIOC_GETSTATUS:
637 case WDIOC_GETBOOTSTATUS:
638 return put_user(boot_flag, (int __user *)arg);
639 case WDIOC_KEEPALIVE:
640 wdt_ping();
641 return 0;
642 case WDIOC_SETTIMEOUT:
643 if (get_user(new_margin, (int __user *)arg))
644 return -EFAULT;
645 /* Arbitrary, can't find the card's limits */
646 if (new_margin < 1 || new_margin > 124)
647 return -EINVAL;
648 wdt_margin = new_margin;
649 wdt_ping();
650 /* Fall */
651 case WDIOC_GETTIMEOUT:
652 return put_user(wdt_margin, (int __user *)arg);
653
654 case WDIOC_SETOPTIONS:
655 if (copy_from_user(&rv, (int __user *)arg, sizeof(int)))
656 return -EFAULT;
657
658 if (rv & WDIOS_DISABLECARD) {
659 printk(KERN_INFO
660 "rtc-m41t80: disable watchdog\n");
661 wdt_disable();
662 }
663
664 if (rv & WDIOS_ENABLECARD) {
665 printk(KERN_INFO
666 "rtc-m41t80: enable watchdog\n");
667 wdt_ping();
668 }
669
670 return -EINVAL;
671 }
672 return -ENOTTY;
673}
674
675/**
676 * wdt_open:
677 * @inode: inode of device
678 * @file: file handle to device
679 *
680 */
681static int wdt_open(struct inode *inode, struct file *file)
682{
683 if (MINOR(inode->i_rdev) == WATCHDOG_MINOR) {
684 if (test_and_set_bit(0, &wdt_is_open))
685 return -EBUSY;
686 /*
687 * Activate
688 */
689 wdt_is_open = 1;
690 return 0;
691 }
692 return -ENODEV;
693}
694
695/**
696 * wdt_close:
697 * @inode: inode to board
698 * @file: file handle to board
699 *
700 */
701static int wdt_release(struct inode *inode, struct file *file)
702{
703 if (MINOR(inode->i_rdev) == WATCHDOG_MINOR)
704 clear_bit(0, &wdt_is_open);
705 return 0;
706}
707
708/**
709 * notify_sys:
710 * @this: our notifier block
711 * @code: the event being reported
712 * @unused: unused
713 *
714 * Our notifier is called on system shutdowns. We want to turn the card
715 * off at reboot otherwise the machine will reboot again during memory
716 * test or worse yet during the following fsck. This would suck, in fact
717 * trust me - if it happens it does suck.
718 */
719static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
720 void *unused)
721{
722 if (code == SYS_DOWN || code == SYS_HALT)
723 /* Disable Watchdog */
724 wdt_disable();
725 return NOTIFY_DONE;
726}
727
728static const struct file_operations wdt_fops = {
729 .owner = THIS_MODULE,
730 .read = wdt_read,
731 .ioctl = wdt_ioctl,
732 .write = wdt_write,
733 .open = wdt_open,
734 .release = wdt_release,
735};
736
737static struct miscdevice wdt_dev = {
738 .minor = WATCHDOG_MINOR,
739 .name = "watchdog",
740 .fops = &wdt_fops,
741};
742
743/*
744 * The WDT card needs to learn about soft shutdowns in order to
745 * turn the timebomb registers off.
746 */
747static struct notifier_block wdt_notifier = {
748 .notifier_call = wdt_notify_sys,
749};
750#endif /* CONFIG_RTC_DRV_M41T80_WDT */
751
752/*
753 *****************************************************************************
754 *
755 * Driver Interface
756 *
757 *****************************************************************************
758 */
759static int m41t80_probe(struct i2c_client *client)
760{
761 int i, rc = 0;
762 struct rtc_device *rtc = NULL;
763 struct rtc_time tm;
764 const struct m41t80_chip_info *chip;
765 struct m41t80_data *clientdata = NULL;
766
767 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C
768 | I2C_FUNC_SMBUS_BYTE_DATA)) {
769 rc = -ENODEV;
770 goto exit;
771 }
772
773 dev_info(&client->dev,
774 "chip found, driver version " DRV_VERSION "\n");
775
776 chip = NULL;
777 for (i = 0; i < ARRAY_SIZE(m41t80_chip_info_tbl); i++) {
778 if (!strcmp(m41t80_chip_info_tbl[i].name, client->name)) {
779 chip = &m41t80_chip_info_tbl[i];
780 break;
781 }
782 }
783 if (!chip) {
784 dev_err(&client->dev, "%s is not supported\n", client->name);
785 rc = -ENODEV;
786 goto exit;
787 }
788
789 clientdata = kzalloc(sizeof(*clientdata), GFP_KERNEL);
790 if (!clientdata) {
791 rc = -ENOMEM;
792 goto exit;
793 }
794
795 rtc = rtc_device_register(client->name, &client->dev,
796 &m41t80_rtc_ops, THIS_MODULE);
797 if (IS_ERR(rtc)) {
798 rc = PTR_ERR(rtc);
799 rtc = NULL;
800 goto exit;
801 }
802
803 clientdata->rtc = rtc;
804 clientdata->chip = chip;
805 i2c_set_clientdata(client, clientdata);
806
807 /* Make sure HT (Halt Update) bit is cleared */
808 rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR);
809 if (rc < 0)
810 goto ht_err;
811
812 if (rc & M41T80_ALHOUR_HT) {
813 if (chip->features & M41T80_FEATURE_HT) {
814 m41t80_get_datetime(client, &tm);
815 dev_info(&client->dev, "HT bit was set!\n");
816 dev_info(&client->dev,
817 "Power Down at "
818 "%04i-%02i-%02i %02i:%02i:%02i\n",
819 tm.tm_year + 1900,
820 tm.tm_mon + 1, tm.tm_mday, tm.tm_hour,
821 tm.tm_min, tm.tm_sec);
822 }
823 if (i2c_smbus_write_byte_data(client,
824 M41T80_REG_ALARM_HOUR,
825 rc & ~M41T80_ALHOUR_HT) < 0)
826 goto ht_err;
827 }
828
829 /* Make sure ST (stop) bit is cleared */
830 rc = i2c_smbus_read_byte_data(client, M41T80_REG_SEC);
831 if (rc < 0)
832 goto st_err;
833
834 if (rc & M41T80_SEC_ST) {
835 if (i2c_smbus_write_byte_data(client, M41T80_REG_SEC,
836 rc & ~M41T80_SEC_ST) < 0)
837 goto st_err;
838 }
839
840 rc = m41t80_sysfs_register(&client->dev);
841 if (rc)
842 goto exit;
843
844#ifdef CONFIG_RTC_DRV_M41T80_WDT
845 if (chip->features & M41T80_FEATURE_HT) {
846 rc = misc_register(&wdt_dev);
847 if (rc)
848 goto exit;
849 rc = register_reboot_notifier(&wdt_notifier);
850 if (rc) {
851 misc_deregister(&wdt_dev);
852 goto exit;
853 }
854 save_client = client;
855 }
856#endif
857 return 0;
858
859st_err:
860 rc = -EIO;
861 dev_err(&client->dev, "Can't clear ST bit\n");
862 goto exit;
863ht_err:
864 rc = -EIO;
865 dev_err(&client->dev, "Can't clear HT bit\n");
866 goto exit;
867
868exit:
869 if (rtc)
870 rtc_device_unregister(rtc);
871 kfree(clientdata);
872 return rc;
873}
874
875static int m41t80_remove(struct i2c_client *client)
876{
877 struct m41t80_data *clientdata = i2c_get_clientdata(client);
878 struct rtc_device *rtc = clientdata->rtc;
879
880#ifdef CONFIG_RTC_DRV_M41T80_WDT
881 if (clientdata->chip->features & M41T80_FEATURE_HT) {
882 misc_deregister(&wdt_dev);
883 unregister_reboot_notifier(&wdt_notifier);
884 }
885#endif
886 if (rtc)
887 rtc_device_unregister(rtc);
888 kfree(clientdata);
889
890 return 0;
891}
892
893static struct i2c_driver m41t80_driver = {
894 .driver = {
895 .name = "m41t80",
896 },
897 .probe = m41t80_probe,
898 .remove = m41t80_remove,
899};
900
901static int __init m41t80_rtc_init(void)
902{
903 return i2c_add_driver(&m41t80_driver);
904}
905
906static void __exit m41t80_rtc_exit(void)
907{
908 i2c_del_driver(&m41t80_driver);
909}
910
911MODULE_AUTHOR("Alexander Bigga <ab@mycable.de>");
912MODULE_DESCRIPTION("ST Microelectronics M41T80 series RTC I2C Client Driver");
913MODULE_LICENSE("GPL");
914MODULE_VERSION(DRV_VERSION);
915
916module_init(m41t80_rtc_init);
917module_exit(m41t80_rtc_exit);
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
new file mode 100644
index 000000000000..33b752350ab5
--- /dev/null
+++ b/drivers/rtc/rtc-m48t59.c
@@ -0,0 +1,491 @@
1/*
2 * ST M48T59 RTC driver
3 *
4 * Copyright (c) 2007 Wind River Systems, Inc.
5 *
6 * Author: Mark Zhan <rongkai.zhan@windriver.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/io.h>
17#include <linux/device.h>
18#include <linux/platform_device.h>
19#include <linux/rtc.h>
20#include <linux/rtc/m48t59.h>
21#include <linux/bcd.h>
22
23#ifndef NO_IRQ
24#define NO_IRQ (-1)
25#endif
26
27#define M48T59_READ(reg) pdata->read_byte(dev, reg)
28#define M48T59_WRITE(val, reg) pdata->write_byte(dev, reg, val)
29
30#define M48T59_SET_BITS(mask, reg) \
31 M48T59_WRITE((M48T59_READ(reg) | (mask)), (reg))
32#define M48T59_CLEAR_BITS(mask, reg) \
33 M48T59_WRITE((M48T59_READ(reg) & ~(mask)), (reg))
34
35struct m48t59_private {
36 void __iomem *ioaddr;
37 unsigned int size; /* iomem size */
38 unsigned int irq;
39 struct rtc_device *rtc;
40 spinlock_t lock; /* serialize the NVRAM and RTC access */
41};
42
43/*
44 * This is the generic access method when the chip is memory-mapped
45 */
46static void
47m48t59_mem_writeb(struct device *dev, u32 ofs, u8 val)
48{
49 struct platform_device *pdev = to_platform_device(dev);
50 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
51
52 writeb(val, m48t59->ioaddr+ofs);
53}
54
55static u8
56m48t59_mem_readb(struct device *dev, u32 ofs)
57{
58 struct platform_device *pdev = to_platform_device(dev);
59 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
60
61 return readb(m48t59->ioaddr+ofs);
62}
63
64/*
65 * NOTE: M48T59 only uses BCD mode
66 */
67static int m48t59_rtc_read_time(struct device *dev, struct rtc_time *tm)
68{
69 struct platform_device *pdev = to_platform_device(dev);
70 struct m48t59_plat_data *pdata = pdev->dev.platform_data;
71 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
72 unsigned long flags;
73 u8 val;
74
75 spin_lock_irqsave(&m48t59->lock, flags);
76 /* Issue the READ command */
77 M48T59_SET_BITS(M48T59_CNTL_READ, M48T59_CNTL);
78
79 tm->tm_year = BCD2BIN(M48T59_READ(M48T59_YEAR));
80 /* tm_mon is 0-11 */
81 tm->tm_mon = BCD2BIN(M48T59_READ(M48T59_MONTH)) - 1;
82 tm->tm_mday = BCD2BIN(M48T59_READ(M48T59_MDAY));
83
84 val = M48T59_READ(M48T59_WDAY);
85 if ((val & M48T59_WDAY_CEB) && (val & M48T59_WDAY_CB)) {
86 dev_dbg(dev, "Century bit is enabled\n");
87 tm->tm_year += 100; /* one century */
88 }
89
90 tm->tm_wday = BCD2BIN(val & 0x07);
91 tm->tm_hour = BCD2BIN(M48T59_READ(M48T59_HOUR) & 0x3F);
92 tm->tm_min = BCD2BIN(M48T59_READ(M48T59_MIN) & 0x7F);
93 tm->tm_sec = BCD2BIN(M48T59_READ(M48T59_SEC) & 0x7F);
94
95 /* Clear the READ bit */
96 M48T59_CLEAR_BITS(M48T59_CNTL_READ, M48T59_CNTL);
97 spin_unlock_irqrestore(&m48t59->lock, flags);
98
99 dev_dbg(dev, "RTC read time %04d-%02d-%02d %02d/%02d/%02d\n",
100 tm->tm_year + 1900, tm->tm_mon, tm->tm_mday,
101 tm->tm_hour, tm->tm_min, tm->tm_sec);
102 return 0;
103}
104
105static int m48t59_rtc_set_time(struct device *dev, struct rtc_time *tm)
106{
107 struct platform_device *pdev = to_platform_device(dev);
108 struct m48t59_plat_data *pdata = pdev->dev.platform_data;
109 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
110 unsigned long flags;
111 u8 val = 0;
112
113 dev_dbg(dev, "RTC set time %04d-%02d-%02d %02d/%02d/%02d\n",
114 tm->tm_year + 1900, tm->tm_mon, tm->tm_mday,
115 tm->tm_hour, tm->tm_min, tm->tm_sec);
116
117 spin_lock_irqsave(&m48t59->lock, flags);
118 /* Issue the WRITE command */
119 M48T59_SET_BITS(M48T59_CNTL_WRITE, M48T59_CNTL);
120
121 M48T59_WRITE((BIN2BCD(tm->tm_sec) & 0x7F), M48T59_SEC);
122 M48T59_WRITE((BIN2BCD(tm->tm_min) & 0x7F), M48T59_MIN);
123 M48T59_WRITE((BIN2BCD(tm->tm_hour) & 0x3F), M48T59_HOUR);
124 M48T59_WRITE((BIN2BCD(tm->tm_mday) & 0x3F), M48T59_MDAY);
125 /* tm_mon is 0-11 */
126 M48T59_WRITE((BIN2BCD(tm->tm_mon + 1) & 0x1F), M48T59_MONTH);
127 M48T59_WRITE(BIN2BCD(tm->tm_year % 100), M48T59_YEAR);
128
129 if (tm->tm_year/100)
130 val = (M48T59_WDAY_CEB | M48T59_WDAY_CB);
131 val |= (BIN2BCD(tm->tm_wday) & 0x07);
132 M48T59_WRITE(val, M48T59_WDAY);
133
134 /* Clear the WRITE bit */
135 M48T59_CLEAR_BITS(M48T59_CNTL_WRITE, M48T59_CNTL);
136 spin_unlock_irqrestore(&m48t59->lock, flags);
137 return 0;
138}
139
140/*
141 * Read alarm time and date in RTC
142 */
143static int m48t59_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
144{
145 struct platform_device *pdev = to_platform_device(dev);
146 struct m48t59_plat_data *pdata = pdev->dev.platform_data;
147 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
148 struct rtc_time *tm = &alrm->time;
149 unsigned long flags;
150 u8 val;
151
152 /* If no irq, we don't support ALARM */
153 if (m48t59->irq == NO_IRQ)
154 return -EIO;
155
156 spin_lock_irqsave(&m48t59->lock, flags);
157 /* Issue the READ command */
158 M48T59_SET_BITS(M48T59_CNTL_READ, M48T59_CNTL);
159
160 tm->tm_year = BCD2BIN(M48T59_READ(M48T59_YEAR));
161 /* tm_mon is 0-11 */
162 tm->tm_mon = BCD2BIN(M48T59_READ(M48T59_MONTH)) - 1;
163
164 val = M48T59_READ(M48T59_WDAY);
165 if ((val & M48T59_WDAY_CEB) && (val & M48T59_WDAY_CB))
166 tm->tm_year += 100; /* one century */
167
168 tm->tm_mday = BCD2BIN(M48T59_READ(M48T59_ALARM_DATE));
169 tm->tm_hour = BCD2BIN(M48T59_READ(M48T59_ALARM_HOUR));
170 tm->tm_min = BCD2BIN(M48T59_READ(M48T59_ALARM_MIN));
171 tm->tm_sec = BCD2BIN(M48T59_READ(M48T59_ALARM_SEC));
172
173 /* Clear the READ bit */
174 M48T59_CLEAR_BITS(M48T59_CNTL_READ, M48T59_CNTL);
175 spin_unlock_irqrestore(&m48t59->lock, flags);
176
177 dev_dbg(dev, "RTC read alarm time %04d-%02d-%02d %02d/%02d/%02d\n",
178 tm->tm_year + 1900, tm->tm_mon, tm->tm_mday,
179 tm->tm_hour, tm->tm_min, tm->tm_sec);
180 return 0;
181}
182
183/*
184 * Set alarm time and date in RTC
185 */
186static int m48t59_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
187{
188 struct platform_device *pdev = to_platform_device(dev);
189 struct m48t59_plat_data *pdata = pdev->dev.platform_data;
190 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
191 struct rtc_time *tm = &alrm->time;
192 u8 mday, hour, min, sec;
193 unsigned long flags;
194
195 /* If no irq, we don't support ALARM */
196 if (m48t59->irq == NO_IRQ)
197 return -EIO;
198
199 /*
200 * 0xff means "always match"
201 */
202 mday = tm->tm_mday;
203 mday = (mday >= 1 && mday <= 31) ? BIN2BCD(mday) : 0xff;
204 if (mday == 0xff)
205 mday = M48T59_READ(M48T59_MDAY);
206
207 hour = tm->tm_hour;
208 hour = (hour < 24) ? BIN2BCD(hour) : 0x00;
209
210 min = tm->tm_min;
211 min = (min < 60) ? BIN2BCD(min) : 0x00;
212
213 sec = tm->tm_sec;
214 sec = (sec < 60) ? BIN2BCD(sec) : 0x00;
215
216 spin_lock_irqsave(&m48t59->lock, flags);
217 /* Issue the WRITE command */
218 M48T59_SET_BITS(M48T59_CNTL_WRITE, M48T59_CNTL);
219
220 M48T59_WRITE(mday, M48T59_ALARM_DATE);
221 M48T59_WRITE(hour, M48T59_ALARM_HOUR);
222 M48T59_WRITE(min, M48T59_ALARM_MIN);
223 M48T59_WRITE(sec, M48T59_ALARM_SEC);
224
225 /* Clear the WRITE bit */
226 M48T59_CLEAR_BITS(M48T59_CNTL_WRITE, M48T59_CNTL);
227 spin_unlock_irqrestore(&m48t59->lock, flags);
228
229 dev_dbg(dev, "RTC set alarm time %04d-%02d-%02d %02d/%02d/%02d\n",
230 tm->tm_year + 1900, tm->tm_mon, tm->tm_mday,
231 tm->tm_hour, tm->tm_min, tm->tm_sec);
232 return 0;
233}
234
235/*
236 * Handle commands from user-space
237 */
238static int m48t59_rtc_ioctl(struct device *dev, unsigned int cmd,
239 unsigned long arg)
240{
241 struct platform_device *pdev = to_platform_device(dev);
242 struct m48t59_plat_data *pdata = pdev->dev.platform_data;
243 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
244 unsigned long flags;
245 int ret = 0;
246
247 spin_lock_irqsave(&m48t59->lock, flags);
248 switch (cmd) {
249 case RTC_AIE_OFF: /* alarm interrupt off */
250 M48T59_WRITE(0x00, M48T59_INTR);
251 break;
252 case RTC_AIE_ON: /* alarm interrupt on */
253 M48T59_WRITE(M48T59_INTR_AFE, M48T59_INTR);
254 break;
255 default:
256 ret = -ENOIOCTLCMD;
257 break;
258 }
259 spin_unlock_irqrestore(&m48t59->lock, flags);
260
261 return ret;
262}
263
264static int m48t59_rtc_proc(struct device *dev, struct seq_file *seq)
265{
266 struct platform_device *pdev = to_platform_device(dev);
267 struct m48t59_plat_data *pdata = pdev->dev.platform_data;
268 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
269 unsigned long flags;
270 u8 val;
271
272 spin_lock_irqsave(&m48t59->lock, flags);
273 val = M48T59_READ(M48T59_FLAGS);
274 spin_unlock_irqrestore(&m48t59->lock, flags);
275
276 seq_printf(seq, "battery\t\t: %s\n",
277 (val & M48T59_FLAGS_BF) ? "low" : "normal");
278 return 0;
279}
280
281/*
282 * IRQ handler for the RTC
283 */
284static irqreturn_t m48t59_rtc_interrupt(int irq, void *dev_id)
285{
286 struct device *dev = (struct device *)dev_id;
287 struct platform_device *pdev = to_platform_device(dev);
288 struct m48t59_plat_data *pdata = pdev->dev.platform_data;
289 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
290 u8 event;
291
292 spin_lock(&m48t59->lock);
293 event = M48T59_READ(M48T59_FLAGS);
294 spin_unlock(&m48t59->lock);
295
296 if (event & M48T59_FLAGS_AF) {
297 rtc_update_irq(m48t59->rtc, 1, (RTC_AF | RTC_IRQF));
298 return IRQ_HANDLED;
299 }
300
301 return IRQ_NONE;
302}
303
304static const struct rtc_class_ops m48t59_rtc_ops = {
305 .ioctl = m48t59_rtc_ioctl,
306 .read_time = m48t59_rtc_read_time,
307 .set_time = m48t59_rtc_set_time,
308 .read_alarm = m48t59_rtc_readalarm,
309 .set_alarm = m48t59_rtc_setalarm,
310 .proc = m48t59_rtc_proc,
311};
312
313static ssize_t m48t59_nvram_read(struct kobject *kobj,
314 struct bin_attribute *bin_attr,
315 char *buf, loff_t pos, size_t size)
316{
317 struct device *dev = container_of(kobj, struct device, kobj);
318 struct platform_device *pdev = to_platform_device(dev);
319 struct m48t59_plat_data *pdata = pdev->dev.platform_data;
320 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
321 ssize_t cnt = 0;
322 unsigned long flags;
323
324 for (; size > 0 && pos < M48T59_NVRAM_SIZE; cnt++, size--) {
325 spin_lock_irqsave(&m48t59->lock, flags);
326 *buf++ = M48T59_READ(cnt);
327 spin_unlock_irqrestore(&m48t59->lock, flags);
328 }
329
330 return cnt;
331}
332
333static ssize_t m48t59_nvram_write(struct kobject *kobj,
334 struct bin_attribute *bin_attr,
335 char *buf, loff_t pos, size_t size)
336{
337 struct device *dev = container_of(kobj, struct device, kobj);
338 struct platform_device *pdev = to_platform_device(dev);
339 struct m48t59_plat_data *pdata = pdev->dev.platform_data;
340 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
341 ssize_t cnt = 0;
342 unsigned long flags;
343
344 for (; size > 0 && pos < M48T59_NVRAM_SIZE; cnt++, size--) {
345 spin_lock_irqsave(&m48t59->lock, flags);
346 M48T59_WRITE(*buf++, cnt);
347 spin_unlock_irqrestore(&m48t59->lock, flags);
348 }
349
350 return cnt;
351}
352
353static struct bin_attribute m48t59_nvram_attr = {
354 .attr = {
355 .name = "nvram",
356 .mode = S_IRUGO | S_IWUGO,
357 .owner = THIS_MODULE,
358 },
359 .read = m48t59_nvram_read,
360 .write = m48t59_nvram_write,
361};
362
363static int __devinit m48t59_rtc_probe(struct platform_device *pdev)
364{
365 struct m48t59_plat_data *pdata = pdev->dev.platform_data;
366 struct m48t59_private *m48t59 = NULL;
367 struct resource *res;
368 int ret = -ENOMEM;
369
370 /* This chip could be memory-mapped or I/O-mapped */
371 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
372 if (!res) {
373 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
374 if (!res)
375 return -EINVAL;
376 }
377
378 if (res->flags & IORESOURCE_IO) {
379 /* If we are I/O-mapped, the platform should provide
380 * the operations accessing chip registers.
381 */
382 if (!pdata || !pdata->write_byte || !pdata->read_byte)
383 return -EINVAL;
384 } else if (res->flags & IORESOURCE_MEM) {
385 /* we are memory-mapped */
386 if (!pdata) {
387 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
388 if (!pdata)
389 return -ENOMEM;
390 /* Ensure we only kmalloc platform data once */
391 pdev->dev.platform_data = pdata;
392 }
393
394 /* Try to use the generic memory read/write ops */
395 if (!pdata->write_byte)
396 pdata->write_byte = m48t59_mem_writeb;
397 if (!pdata->read_byte)
398 pdata->read_byte = m48t59_mem_readb;
399 }
400
401 m48t59 = kzalloc(sizeof(*m48t59), GFP_KERNEL);
402 if (!m48t59)
403 return -ENOMEM;
404
405 m48t59->size = res->end - res->start + 1;
406 m48t59->ioaddr = ioremap(res->start, m48t59->size);
407 if (!m48t59->ioaddr)
408 goto out;
409
410 /* Try to get irq number. We also can work in
411 * the mode without IRQ.
412 */
413 m48t59->irq = platform_get_irq(pdev, 0);
414 if (m48t59->irq < 0)
415 m48t59->irq = NO_IRQ;
416
417 if (m48t59->irq != NO_IRQ) {
418 ret = request_irq(m48t59->irq, m48t59_rtc_interrupt,
419 IRQF_SHARED, "rtc-m48t59", &pdev->dev);
420 if (ret)
421 goto out;
422 }
423
424 m48t59->rtc = rtc_device_register("m48t59", &pdev->dev,
425 &m48t59_rtc_ops, THIS_MODULE);
426 if (IS_ERR(m48t59->rtc)) {
427 ret = PTR_ERR(m48t59->rtc);
428 goto out;
429 }
430
431 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
432 if (ret)
433 goto out;
434
435 spin_lock_init(&m48t59->lock);
436 platform_set_drvdata(pdev, m48t59);
437 return 0;
438
439out:
440 if (!IS_ERR(m48t59->rtc))
441 rtc_device_unregister(m48t59->rtc);
442 if (m48t59->irq != NO_IRQ)
443 free_irq(m48t59->irq, &pdev->dev);
444 if (m48t59->ioaddr)
445 iounmap(m48t59->ioaddr);
446 if (m48t59)
447 kfree(m48t59);
448 return ret;
449}
450
451static int __devexit m48t59_rtc_remove(struct platform_device *pdev)
452{
453 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
454
455 sysfs_remove_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
456 if (!IS_ERR(m48t59->rtc))
457 rtc_device_unregister(m48t59->rtc);
458 if (m48t59->ioaddr)
459 iounmap(m48t59->ioaddr);
460 if (m48t59->irq != NO_IRQ)
461 free_irq(m48t59->irq, &pdev->dev);
462 platform_set_drvdata(pdev, NULL);
463 kfree(m48t59);
464 return 0;
465}
466
467static struct platform_driver m48t59_rtc_platdrv = {
468 .driver = {
469 .name = "rtc-m48t59",
470 .owner = THIS_MODULE,
471 },
472 .probe = m48t59_rtc_probe,
473 .remove = __devexit_p(m48t59_rtc_remove),
474};
475
476static int __init m48t59_rtc_init(void)
477{
478 return platform_driver_register(&m48t59_rtc_platdrv);
479}
480
481static void __exit m48t59_rtc_exit(void)
482{
483 platform_driver_unregister(&m48t59_rtc_platdrv);
484}
485
486module_init(m48t59_rtc_init);
487module_exit(m48t59_rtc_exit);
488
489MODULE_AUTHOR("Mark Zhan <rongkai.zhan@windriver.com>");
490MODULE_DESCRIPTION("M48T59 RTC driver");
491MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index 09bbe575647b..6b67b5097927 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -13,13 +13,7 @@
13#include <linux/rtc.h> 13#include <linux/rtc.h>
14#include <linux/bcd.h> 14#include <linux/bcd.h>
15 15
16#define DRV_VERSION "0.4" 16#define DRV_VERSION "0.5"
17
18/* Addresses to scan */
19static unsigned short normal_i2c[] = { /* 0x32,*/ I2C_CLIENT_END };
20
21/* Insmod parameters */
22I2C_CLIENT_INSMOD;
23 17
24 18
25/* 19/*
@@ -88,9 +82,6 @@ struct rs5c372 {
88 unsigned has_irq:1; 82 unsigned has_irq:1;
89 char buf[17]; 83 char buf[17];
90 char *regs; 84 char *regs;
91
92 /* on conversion to a "new style" i2c driver, this vanishes */
93 struct i2c_client dev;
94}; 85};
95 86
96static int rs5c_get_regs(struct rs5c372 *rs5c) 87static int rs5c_get_regs(struct rs5c372 *rs5c)
@@ -483,25 +474,35 @@ static int rs5c_sysfs_register(struct device *dev)
483 return err; 474 return err;
484} 475}
485 476
477static void rs5c_sysfs_unregister(struct device *dev)
478{
479 device_remove_file(dev, &dev_attr_trim);
480 device_remove_file(dev, &dev_attr_osc);
481}
482
486#else 483#else
487static int rs5c_sysfs_register(struct device *dev) 484static int rs5c_sysfs_register(struct device *dev)
488{ 485{
489 return 0; 486 return 0;
490} 487}
488
489static void rs5c_sysfs_unregister(struct device *dev)
490{
491 /* nothing */
492}
491#endif /* SYSFS */ 493#endif /* SYSFS */
492 494
493static struct i2c_driver rs5c372_driver; 495static struct i2c_driver rs5c372_driver;
494 496
495static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind) 497static int rs5c372_probe(struct i2c_client *client)
496{ 498{
497 int err = 0; 499 int err = 0;
498 struct i2c_client *client;
499 struct rs5c372 *rs5c372; 500 struct rs5c372 *rs5c372;
500 struct rtc_time tm; 501 struct rtc_time tm;
501 502
502 dev_dbg(&adapter->dev, "%s\n", __FUNCTION__); 503 dev_dbg(&client->dev, "%s\n", __FUNCTION__);
503 504
504 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) { 505 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
505 err = -ENODEV; 506 err = -ENODEV;
506 goto exit; 507 goto exit;
507 } 508 }
@@ -514,35 +515,22 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
514 /* we read registers 0x0f then 0x00-0x0f; skip the first one */ 515 /* we read registers 0x0f then 0x00-0x0f; skip the first one */
515 rs5c372->regs=&rs5c372->buf[1]; 516 rs5c372->regs=&rs5c372->buf[1];
516 517
517 /* On conversion to a "new style" i2c driver, we'll be handed
518 * the i2c_client (we won't create it)
519 */
520 client = &rs5c372->dev;
521 rs5c372->client = client; 518 rs5c372->client = client;
522
523 /* I2C client */
524 client->addr = address;
525 client->driver = &rs5c372_driver;
526 client->adapter = adapter;
527
528 strlcpy(client->name, rs5c372_driver.driver.name, I2C_NAME_SIZE);
529
530 i2c_set_clientdata(client, rs5c372); 519 i2c_set_clientdata(client, rs5c372);
531 520
532 /* Inform the i2c layer */
533 if ((err = i2c_attach_client(client)))
534 goto exit_kfree;
535
536 err = rs5c_get_regs(rs5c372); 521 err = rs5c_get_regs(rs5c372);
537 if (err < 0) 522 if (err < 0)
538 goto exit_detach; 523 goto exit_kfree;
539 524
540 /* For "new style" drivers, irq is in i2c_client and chip type 525 if (strcmp(client->name, "rs5c372a") == 0)
541 * info comes from i2c_client.dev.platform_data. Meanwhile: 526 rs5c372->type = rtc_rs5c372a;
542 * 527 else if (strcmp(client->name, "rs5c372b") == 0)
543 * STICK BOARD-SPECIFIC SETUP CODE RIGHT HERE 528 rs5c372->type = rtc_rs5c372b;
544 */ 529 else if (strcmp(client->name, "rv5c386") == 0)
545 if (rs5c372->type == rtc_undef) { 530 rs5c372->type = rtc_rv5c386;
531 else if (strcmp(client->name, "rv5c387a") == 0)
532 rs5c372->type = rtc_rv5c387a;
533 else {
546 rs5c372->type = rtc_rs5c372b; 534 rs5c372->type = rtc_rs5c372b;
547 dev_warn(&client->dev, "assuming rs5c372b\n"); 535 dev_warn(&client->dev, "assuming rs5c372b\n");
548 } 536 }
@@ -567,7 +555,7 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
567 break; 555 break;
568 default: 556 default:
569 dev_err(&client->dev, "unknown RTC type\n"); 557 dev_err(&client->dev, "unknown RTC type\n");
570 goto exit_detach; 558 goto exit_kfree;
571 } 559 }
572 560
573 /* if the oscillator lost power and no other software (like 561 /* if the oscillator lost power and no other software (like
@@ -601,7 +589,7 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
601 589
602 if ((i2c_master_send(client, buf, 3)) != 3) { 590 if ((i2c_master_send(client, buf, 3)) != 3) {
603 dev_err(&client->dev, "setup error\n"); 591 dev_err(&client->dev, "setup error\n");
604 goto exit_detach; 592 goto exit_kfree;
605 } 593 }
606 rs5c372->regs[RS5C_REG_CTRL1] = buf[1]; 594 rs5c372->regs[RS5C_REG_CTRL1] = buf[1];
607 rs5c372->regs[RS5C_REG_CTRL2] = buf[2]; 595 rs5c372->regs[RS5C_REG_CTRL2] = buf[2];
@@ -621,14 +609,14 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
621 rs5c372->time24 ? "24hr" : "am/pm" 609 rs5c372->time24 ? "24hr" : "am/pm"
622 ); 610 );
623 611
624 /* FIXME when client->irq exists, use it to register alarm irq */ 612 /* REVISIT use client->irq to register alarm irq ... */
625 613
626 rs5c372->rtc = rtc_device_register(rs5c372_driver.driver.name, 614 rs5c372->rtc = rtc_device_register(rs5c372_driver.driver.name,
627 &client->dev, &rs5c372_rtc_ops, THIS_MODULE); 615 &client->dev, &rs5c372_rtc_ops, THIS_MODULE);
628 616
629 if (IS_ERR(rs5c372->rtc)) { 617 if (IS_ERR(rs5c372->rtc)) {
630 err = PTR_ERR(rs5c372->rtc); 618 err = PTR_ERR(rs5c372->rtc);
631 goto exit_detach; 619 goto exit_kfree;
632 } 620 }
633 621
634 err = rs5c_sysfs_register(&client->dev); 622 err = rs5c_sysfs_register(&client->dev);
@@ -640,9 +628,6 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
640exit_devreg: 628exit_devreg:
641 rtc_device_unregister(rs5c372->rtc); 629 rtc_device_unregister(rs5c372->rtc);
642 630
643exit_detach:
644 i2c_detach_client(client);
645
646exit_kfree: 631exit_kfree:
647 kfree(rs5c372); 632 kfree(rs5c372);
648 633
@@ -650,24 +635,12 @@ exit:
650 return err; 635 return err;
651} 636}
652 637
653static int rs5c372_attach(struct i2c_adapter *adapter) 638static int rs5c372_remove(struct i2c_client *client)
654{ 639{
655 return i2c_probe(adapter, &addr_data, rs5c372_probe);
656}
657
658static int rs5c372_detach(struct i2c_client *client)
659{
660 int err;
661 struct rs5c372 *rs5c372 = i2c_get_clientdata(client); 640 struct rs5c372 *rs5c372 = i2c_get_clientdata(client);
662 641
663 if (rs5c372->rtc) 642 rtc_device_unregister(rs5c372->rtc);
664 rtc_device_unregister(rs5c372->rtc); 643 rs5c_sysfs_unregister(&client->dev);
665
666 /* REVISIT properly destroy the sysfs files ... */
667
668 if ((err = i2c_detach_client(client)))
669 return err;
670
671 kfree(rs5c372); 644 kfree(rs5c372);
672 return 0; 645 return 0;
673} 646}
@@ -676,8 +649,8 @@ static struct i2c_driver rs5c372_driver = {
676 .driver = { 649 .driver = {
677 .name = "rtc-rs5c372", 650 .name = "rtc-rs5c372",
678 }, 651 },
679 .attach_adapter = &rs5c372_attach, 652 .probe = rs5c372_probe,
680 .detach_client = &rs5c372_detach, 653 .remove = rs5c372_remove,
681}; 654};
682 655
683static __init int rs5c372_init(void) 656static __init int rs5c372_init(void)
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 1340451ea408..35765f6a86e0 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -747,14 +747,9 @@ dcssblk_check_params(void)
747static void __exit 747static void __exit
748dcssblk_exit(void) 748dcssblk_exit(void)
749{ 749{
750 int rc;
751
752 PRINT_DEBUG("DCSSBLOCK EXIT...\n"); 750 PRINT_DEBUG("DCSSBLOCK EXIT...\n");
753 s390_root_dev_unregister(dcssblk_root_dev); 751 s390_root_dev_unregister(dcssblk_root_dev);
754 rc = unregister_blkdev(dcssblk_major, DCSSBLK_NAME); 752 unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
755 if (rc) {
756 PRINT_ERR("unregister_blkdev() failed!\n");
757 }
758 PRINT_DEBUG("...finished!\n"); 753 PRINT_DEBUG("...finished!\n");
759} 754}
760 755
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index 512857a23169..5157a2abc58d 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -619,8 +619,7 @@ static void __exit jsflash_cleanup_module(void)
619 jsf0.busy = 0; 619 jsf0.busy = 0;
620 620
621 misc_deregister(&jsf_dev); 621 misc_deregister(&jsf_dev);
622 if (unregister_blkdev(JSFD_MAJOR, "jsfd") != 0) 622 unregister_blkdev(JSFD_MAJOR, "jsfd");
623 printk("jsfd: cleanup_module failed\n");
624 blk_cleanup_queue(jsf_queue); 623 blk_cleanup_queue(jsf_queue);
625} 624}
626 625
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index d70ddfda93fc..9c5342e7a69c 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -40,6 +40,7 @@
40 40
41#include <linux/err.h> 41#include <linux/err.h>
42#include <linux/blkdev.h> 42#include <linux/blkdev.h>
43#include <linux/freezer.h>
43#include <linux/scatterlist.h> 44#include <linux/scatterlist.h>
44 45
45/* ---------- SCSI Host glue ---------- */ 46/* ---------- SCSI Host glue ---------- */
@@ -868,8 +869,6 @@ static int sas_queue_thread(void *_sas_ha)
868{ 869{
869 struct sas_ha_struct *sas_ha = _sas_ha; 870 struct sas_ha_struct *sas_ha = _sas_ha;
870 871
871 current->flags |= PF_NOFREEZE;
872
873 while (1) { 872 while (1) {
874 set_current_state(TASK_INTERRUPTIBLE); 873 set_current_state(TASK_INTERRUPTIBLE);
875 schedule(); 874 schedule();
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 9adb64ac054c..8a525abda30f 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -19,6 +19,7 @@
19#include <linux/timer.h> 19#include <linux/timer.h>
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/freezer.h>
22#include <linux/kthread.h> 23#include <linux/kthread.h>
23#include <linux/interrupt.h> 24#include <linux/interrupt.h>
24#include <linux/blkdev.h> 25#include <linux/blkdev.h>
@@ -1516,8 +1517,6 @@ int scsi_error_handler(void *data)
1516{ 1517{
1517 struct Scsi_Host *shost = data; 1518 struct Scsi_Host *shost = data;
1518 1519
1519 current->flags |= PF_NOFREEZE;
1520
1521 /* 1520 /*
1522 * We use TASK_INTERRUPTIBLE so that the thread is not 1521 * We use TASK_INTERRUPTIBLE so that the thread is not
1523 * counted against the load average as a running process. 1522 * counted against the load average as a running process.
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index cab42cbd920d..7fa413ddccf5 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -338,6 +338,34 @@ config SERIAL_AMBA_PL011_CONSOLE
338 your boot loader (lilo or loadlin) about how to pass options to the 338 your boot loader (lilo or loadlin) about how to pass options to the
339 kernel at boot time.) 339 kernel at boot time.)
340 340
341config SERIAL_SB1250_DUART
342 tristate "BCM1xxx on-chip DUART serial support"
343 depends on SIBYTE_SB1xxx_SOC=y
344 select SERIAL_CORE
345 default y
346 ---help---
347 Support for the asynchronous serial interface (DUART) included in
348 the BCM1250 and derived System-On-a-Chip (SOC) devices. Note that
349 the letter D in DUART stands for "dual", which is how the device
350 is implemented. Depending on the SOC configuration there may be
351 one or more DUARTs available of which all are handled.
352
353 If unsure, say Y. To compile this driver as a module, choose M here:
354 the module will be called sb1250-duart.
355
356config SERIAL_SB1250_DUART_CONSOLE
357 bool "Support for console on a BCM1xxx DUART serial port"
358 depends on SERIAL_SB1250_DUART=y
359 select SERIAL_CORE_CONSOLE
360 default y
361 ---help---
362 If you say Y here, it will be possible to use a serial port as the
363 system console (the system console is the device which receives all
364 kernel messages and warnings and which allows logins in single user
365 mode).
366
367 If unsure, say Y.
368
341config SERIAL_ATMEL 369config SERIAL_ATMEL
342 bool "AT91 / AT32 on-chip serial port support" 370 bool "AT91 / AT32 on-chip serial port support"
343 depends on (ARM && ARCH_AT91) || AVR32 371 depends on (ARM && ARCH_AT91) || AVR32
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 08ad0d978183..c48cdd61b736 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_SERIAL_MPC52xx) += mpc52xx_uart.o
51obj-$(CONFIG_SERIAL_ICOM) += icom.o 51obj-$(CONFIG_SERIAL_ICOM) += icom.o
52obj-$(CONFIG_SERIAL_M32R_SIO) += m32r_sio.o 52obj-$(CONFIG_SERIAL_M32R_SIO) += m32r_sio.o
53obj-$(CONFIG_SERIAL_MPSC) += mpsc.o 53obj-$(CONFIG_SERIAL_MPSC) += mpsc.o
54obj-$(CONFIG_SERIAL_SB1250_DUART) += sb1250-duart.o
54obj-$(CONFIG_ETRAX_SERIAL) += crisv10.o 55obj-$(CONFIG_ETRAX_SERIAL) += crisv10.o
55obj-$(CONFIG_SERIAL_JSM) += jsm/ 56obj-$(CONFIG_SERIAL_JSM) += jsm/
56obj-$(CONFIG_SERIAL_TXX9) += serial_txx9.o 57obj-$(CONFIG_SERIAL_TXX9) += serial_txx9.o
diff --git a/drivers/serial/sb1250-duart.c b/drivers/serial/sb1250-duart.c
new file mode 100644
index 000000000000..1d9d7285172a
--- /dev/null
+++ b/drivers/serial/sb1250-duart.c
@@ -0,0 +1,972 @@
1/*
2 * drivers/serial/sb1250-duart.c
3 *
4 * Support for the asynchronous serial interface (DUART) included
5 * in the BCM1250 and derived System-On-a-Chip (SOC) devices.
6 *
7 * Copyright (c) 2007 Maciej W. Rozycki
8 *
9 * Derived from drivers/char/sb1250_duart.c for which the following
10 * copyright applies:
11 *
12 * Copyright (c) 2000, 2001, 2002, 2003, 2004 Broadcom Corporation
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 * References:
20 *
21 * "BCM1250/BCM1125/BCM1125H User Manual", Broadcom Corporation
22 */
23
24#if defined(CONFIG_SERIAL_SB1250_DUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
25#define SUPPORT_SYSRQ
26#endif
27
28#include <linux/console.h>
29#include <linux/delay.h>
30#include <linux/errno.h>
31#include <linux/init.h>
32#include <linux/interrupt.h>
33#include <linux/ioport.h>
34#include <linux/kernel.h>
35#include <linux/major.h>
36#include <linux/serial.h>
37#include <linux/serial_core.h>
38#include <linux/spinlock.h>
39#include <linux/sysrq.h>
40#include <linux/tty.h>
41#include <linux/types.h>
42
43#include <asm/atomic.h>
44#include <asm/io.h>
45#include <asm/war.h>
46
47#include <asm/sibyte/sb1250.h>
48#include <asm/sibyte/sb1250_uart.h>
49#include <asm/sibyte/swarm.h>
50
51
52#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
53#include <asm/sibyte/bcm1480_regs.h>
54#include <asm/sibyte/bcm1480_int.h>
55
56#define SBD_CHANREGS(line) A_BCM1480_DUART_CHANREG((line), 0)
57#define SBD_CTRLREGS(line) A_BCM1480_DUART_CTRLREG((line), 0)
58#define SBD_INT(line) (K_BCM1480_INT_UART_0 + (line))
59
60#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
61#include <asm/sibyte/sb1250_regs.h>
62#include <asm/sibyte/sb1250_int.h>
63
64#define SBD_CHANREGS(line) A_DUART_CHANREG((line), 0)
65#define SBD_CTRLREGS(line) A_DUART_CTRLREG(0)
66#define SBD_INT(line) (K_INT_UART_0 + (line))
67
68#else
69#error invalid SB1250 UART configuration
70
71#endif
72
73
74MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
75MODULE_DESCRIPTION("BCM1xxx on-chip DUART serial driver");
76MODULE_LICENSE("GPL");
77
78
79#define DUART_MAX_CHIP 2
80#define DUART_MAX_SIDE 2
81
82/*
83 * Per-port state.
84 */
85struct sbd_port {
86 struct sbd_duart *duart;
87 struct uart_port port;
88 unsigned char __iomem *memctrl;
89 int tx_stopped;
90 int initialised;
91};
92
93/*
94 * Per-DUART state for the shared register space.
95 */
96struct sbd_duart {
97 struct sbd_port sport[2];
98 unsigned long mapctrl;
99 atomic_t map_guard;
100};
101
102#define to_sport(uport) container_of(uport, struct sbd_port, port)
103
104static struct sbd_duart sbd_duarts[DUART_MAX_CHIP];
105
106#define __unused __attribute__((__unused__))
107
108
109/*
110 * Reading and writing SB1250 DUART registers.
111 *
112 * There are three register spaces: two per-channel ones and
113 * a shared one. We have to define accessors appropriately.
114 * All registers are 64-bit and all but the Baud Rate Clock
115 * registers only define 8 least significant bits. There is
116 * also a workaround to take into account. Raw accessors use
117 * the full register width, but cooked ones truncate it
118 * intentionally so that the rest of the driver does not care.
119 */
120static u64 __read_sbdchn(struct sbd_port *sport, int reg)
121{
122 void __iomem *csr = sport->port.membase + reg;
123
124 return __raw_readq(csr);
125}
126
127static u64 __read_sbdshr(struct sbd_port *sport, int reg)
128{
129 void __iomem *csr = sport->memctrl + reg;
130
131 return __raw_readq(csr);
132}
133
134static void __write_sbdchn(struct sbd_port *sport, int reg, u64 value)
135{
136 void __iomem *csr = sport->port.membase + reg;
137
138 __raw_writeq(value, csr);
139}
140
141static void __write_sbdshr(struct sbd_port *sport, int reg, u64 value)
142{
143 void __iomem *csr = sport->memctrl + reg;
144
145 __raw_writeq(value, csr);
146}
147
148/*
149 * In bug 1956, we get glitches that can mess up uart registers. This
150 * "read-mode-reg after any register access" is an accepted workaround.
151 */
152static void __war_sbd1956(struct sbd_port *sport)
153{
154 __read_sbdchn(sport, R_DUART_MODE_REG_1);
155 __read_sbdchn(sport, R_DUART_MODE_REG_2);
156}
157
158static unsigned char read_sbdchn(struct sbd_port *sport, int reg)
159{
160 unsigned char retval;
161
162 retval = __read_sbdchn(sport, reg);
163 if (SIBYTE_1956_WAR)
164 __war_sbd1956(sport);
165 return retval;
166}
167
168static unsigned char read_sbdshr(struct sbd_port *sport, int reg)
169{
170 unsigned char retval;
171
172 retval = __read_sbdshr(sport, reg);
173 if (SIBYTE_1956_WAR)
174 __war_sbd1956(sport);
175 return retval;
176}
177
178static void write_sbdchn(struct sbd_port *sport, int reg, unsigned int value)
179{
180 __write_sbdchn(sport, reg, value);
181 if (SIBYTE_1956_WAR)
182 __war_sbd1956(sport);
183}
184
185static void write_sbdshr(struct sbd_port *sport, int reg, unsigned int value)
186{
187 __write_sbdshr(sport, reg, value);
188 if (SIBYTE_1956_WAR)
189 __war_sbd1956(sport);
190}
191
192
193static int sbd_receive_ready(struct sbd_port *sport)
194{
195 return read_sbdchn(sport, R_DUART_STATUS) & M_DUART_RX_RDY;
196}
197
198static int sbd_receive_drain(struct sbd_port *sport)
199{
200 int loops = 10000;
201
202 while (sbd_receive_ready(sport) && loops--)
203 read_sbdchn(sport, R_DUART_RX_HOLD);
204 return loops;
205}
206
207static int __unused sbd_transmit_ready(struct sbd_port *sport)
208{
209 return read_sbdchn(sport, R_DUART_STATUS) & M_DUART_TX_RDY;
210}
211
212static int __unused sbd_transmit_drain(struct sbd_port *sport)
213{
214 int loops = 10000;
215
216 while (!sbd_transmit_ready(sport) && loops--)
217 udelay(2);
218 return loops;
219}
220
221static int sbd_transmit_empty(struct sbd_port *sport)
222{
223 return read_sbdchn(sport, R_DUART_STATUS) & M_DUART_TX_EMT;
224}
225
226static int sbd_line_drain(struct sbd_port *sport)
227{
228 int loops = 10000;
229
230 while (!sbd_transmit_empty(sport) && loops--)
231 udelay(2);
232 return loops;
233}
234
235
236static unsigned int sbd_tx_empty(struct uart_port *uport)
237{
238 struct sbd_port *sport = to_sport(uport);
239
240 return sbd_transmit_empty(sport) ? TIOCSER_TEMT : 0;
241}
242
243static unsigned int sbd_get_mctrl(struct uart_port *uport)
244{
245 struct sbd_port *sport = to_sport(uport);
246 unsigned int mctrl, status;
247
248 status = read_sbdshr(sport, R_DUART_IN_PORT);
249 status >>= (uport->line) % 2;
250 mctrl = (!(status & M_DUART_IN_PIN0_VAL) ? TIOCM_CTS : 0) |
251 (!(status & M_DUART_IN_PIN4_VAL) ? TIOCM_CAR : 0) |
252 (!(status & M_DUART_RIN0_PIN) ? TIOCM_RNG : 0) |
253 (!(status & M_DUART_IN_PIN2_VAL) ? TIOCM_DSR : 0);
254 return mctrl;
255}
256
257static void sbd_set_mctrl(struct uart_port *uport, unsigned int mctrl)
258{
259 struct sbd_port *sport = to_sport(uport);
260 unsigned int clr = 0, set = 0, mode2;
261
262 if (mctrl & TIOCM_DTR)
263 set |= M_DUART_SET_OPR2;
264 else
265 clr |= M_DUART_CLR_OPR2;
266 if (mctrl & TIOCM_RTS)
267 set |= M_DUART_SET_OPR0;
268 else
269 clr |= M_DUART_CLR_OPR0;
270 clr <<= (uport->line) % 2;
271 set <<= (uport->line) % 2;
272
273 mode2 = read_sbdchn(sport, R_DUART_MODE_REG_2);
274 mode2 &= ~M_DUART_CHAN_MODE;
275 if (mctrl & TIOCM_LOOP)
276 mode2 |= V_DUART_CHAN_MODE_LCL_LOOP;
277 else
278 mode2 |= V_DUART_CHAN_MODE_NORMAL;
279
280 write_sbdshr(sport, R_DUART_CLEAR_OPR, clr);
281 write_sbdshr(sport, R_DUART_SET_OPR, set);
282 write_sbdchn(sport, R_DUART_MODE_REG_2, mode2);
283}
284
285static void sbd_stop_tx(struct uart_port *uport)
286{
287 struct sbd_port *sport = to_sport(uport);
288
289 write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS);
290 sport->tx_stopped = 1;
291};
292
293static void sbd_start_tx(struct uart_port *uport)
294{
295 struct sbd_port *sport = to_sport(uport);
296 unsigned int mask;
297
298 /* Enable tx interrupts. */
299 mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
300 mask |= M_DUART_IMR_TX;
301 write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
302
303 /* Go!, go!, go!... */
304 write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_EN);
305 sport->tx_stopped = 0;
306};
307
308static void sbd_stop_rx(struct uart_port *uport)
309{
310 struct sbd_port *sport = to_sport(uport);
311
312 write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), 0);
313};
314
315static void sbd_enable_ms(struct uart_port *uport)
316{
317 struct sbd_port *sport = to_sport(uport);
318
319 write_sbdchn(sport, R_DUART_AUXCTL_X,
320 M_DUART_CIN_CHNG_ENA | M_DUART_CTS_CHNG_ENA);
321}
322
323static void sbd_break_ctl(struct uart_port *uport, int break_state)
324{
325 struct sbd_port *sport = to_sport(uport);
326
327 if (break_state == -1)
328 write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_START_BREAK);
329 else
330 write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_STOP_BREAK);
331}
332
333
334static void sbd_receive_chars(struct sbd_port *sport)
335{
336 struct uart_port *uport = &sport->port;
337 struct uart_icount *icount;
338 unsigned int status, ch, flag;
339 int count;
340
341 for (count = 16; count; count--) {
342 status = read_sbdchn(sport, R_DUART_STATUS);
343 if (!(status & M_DUART_RX_RDY))
344 break;
345
346 ch = read_sbdchn(sport, R_DUART_RX_HOLD);
347
348 flag = TTY_NORMAL;
349
350 icount = &uport->icount;
351 icount->rx++;
352
353 if (unlikely(status &
354 (M_DUART_RCVD_BRK | M_DUART_FRM_ERR |
355 M_DUART_PARITY_ERR | M_DUART_OVRUN_ERR))) {
356 if (status & M_DUART_RCVD_BRK) {
357 icount->brk++;
358 if (uart_handle_break(uport))
359 continue;
360 } else if (status & M_DUART_FRM_ERR)
361 icount->frame++;
362 else if (status & M_DUART_PARITY_ERR)
363 icount->parity++;
364 if (status & M_DUART_OVRUN_ERR)
365 icount->overrun++;
366
367 status &= uport->read_status_mask;
368 if (status & M_DUART_RCVD_BRK)
369 flag = TTY_BREAK;
370 else if (status & M_DUART_FRM_ERR)
371 flag = TTY_FRAME;
372 else if (status & M_DUART_PARITY_ERR)
373 flag = TTY_PARITY;
374 }
375
376 if (uart_handle_sysrq_char(uport, ch))
377 continue;
378
379 uart_insert_char(uport, status, M_DUART_OVRUN_ERR, ch, flag);
380 }
381
382 tty_flip_buffer_push(uport->info->tty);
383}
384
385static void sbd_transmit_chars(struct sbd_port *sport)
386{
387 struct uart_port *uport = &sport->port;
388 struct circ_buf *xmit = &sport->port.info->xmit;
389 unsigned int mask;
390 int stop_tx;
391
392 /* XON/XOFF chars. */
393 if (sport->port.x_char) {
394 write_sbdchn(sport, R_DUART_TX_HOLD, sport->port.x_char);
395 sport->port.icount.tx++;
396 sport->port.x_char = 0;
397 return;
398 }
399
400 /* If nothing to do or stopped or hardware stopped. */
401 stop_tx = (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port));
402
403 /* Send char. */
404 if (!stop_tx) {
405 write_sbdchn(sport, R_DUART_TX_HOLD, xmit->buf[xmit->tail]);
406 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
407 sport->port.icount.tx++;
408
409 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
410 uart_write_wakeup(&sport->port);
411 }
412
413 /* Are we are done? */
414 if (stop_tx || uart_circ_empty(xmit)) {
415 /* Disable tx interrupts. */
416 mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
417 mask &= ~M_DUART_IMR_TX;
418 write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
419 }
420}
421
422static void sbd_status_handle(struct sbd_port *sport)
423{
424 struct uart_port *uport = &sport->port;
425 unsigned int delta;
426
427 delta = read_sbdshr(sport, R_DUART_INCHREG((uport->line) % 2));
428 delta >>= (uport->line) % 2;
429
430 if (delta & (M_DUART_IN_PIN0_VAL << S_DUART_IN_PIN_CHNG))
431 uart_handle_cts_change(uport, !(delta & M_DUART_IN_PIN0_VAL));
432
433 if (delta & (M_DUART_IN_PIN2_VAL << S_DUART_IN_PIN_CHNG))
434 uport->icount.dsr++;
435
436 if (delta & ((M_DUART_IN_PIN2_VAL | M_DUART_IN_PIN0_VAL) <<
437 S_DUART_IN_PIN_CHNG))
438 wake_up_interruptible(&uport->info->delta_msr_wait);
439}
440
441static irqreturn_t sbd_interrupt(int irq, void *dev_id)
442{
443 struct sbd_port *sport = dev_id;
444 struct uart_port *uport = &sport->port;
445 irqreturn_t status = IRQ_NONE;
446 unsigned int intstat;
447 int count;
448
449 for (count = 16; count; count--) {
450 intstat = read_sbdshr(sport,
451 R_DUART_ISRREG((uport->line) % 2));
452 intstat &= read_sbdshr(sport,
453 R_DUART_IMRREG((uport->line) % 2));
454 intstat &= M_DUART_ISR_ALL;
455 if (!intstat)
456 break;
457
458 if (intstat & M_DUART_ISR_RX)
459 sbd_receive_chars(sport);
460 if (intstat & M_DUART_ISR_IN)
461 sbd_status_handle(sport);
462 if (intstat & M_DUART_ISR_TX)
463 sbd_transmit_chars(sport);
464
465 status = IRQ_HANDLED;
466 }
467
468 return status;
469}
470
471
472static int sbd_startup(struct uart_port *uport)
473{
474 struct sbd_port *sport = to_sport(uport);
475 unsigned int mode1;
476 int ret;
477
478 ret = request_irq(sport->port.irq, sbd_interrupt,
479 IRQF_SHARED, "sb1250-duart", sport);
480 if (ret)
481 return ret;
482
483 /* Clear the receive FIFO. */
484 sbd_receive_drain(sport);
485
486 /* Clear the interrupt registers. */
487 write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_RESET_BREAK_INT);
488 read_sbdshr(sport, R_DUART_INCHREG((uport->line) % 2));
489
490 /* Set rx/tx interrupt to FIFO available. */
491 mode1 = read_sbdchn(sport, R_DUART_MODE_REG_1);
492 mode1 &= ~(M_DUART_RX_IRQ_SEL_RXFULL | M_DUART_TX_IRQ_SEL_TXEMPT);
493 write_sbdchn(sport, R_DUART_MODE_REG_1, mode1);
494
495 /* Disable tx, enable rx. */
496 write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS | M_DUART_RX_EN);
497 sport->tx_stopped = 1;
498
499 /* Enable interrupts. */
500 write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2),
501 M_DUART_IMR_IN | M_DUART_IMR_RX);
502
503 return 0;
504}
505
506static void sbd_shutdown(struct uart_port *uport)
507{
508 struct sbd_port *sport = to_sport(uport);
509
510 write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS | M_DUART_RX_DIS);
511 sport->tx_stopped = 1;
512 free_irq(sport->port.irq, sport);
513}
514
515
516static void sbd_init_port(struct sbd_port *sport)
517{
518 struct uart_port *uport = &sport->port;
519
520 if (sport->initialised)
521 return;
522
523 /* There is no DUART reset feature, so just set some sane defaults. */
524 write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_RESET_TX);
525 write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_RESET_RX);
526 write_sbdchn(sport, R_DUART_MODE_REG_1, V_DUART_BITS_PER_CHAR_8);
527 write_sbdchn(sport, R_DUART_MODE_REG_2, 0);
528 write_sbdchn(sport, R_DUART_FULL_CTL,
529 V_DUART_INT_TIME(0) | V_DUART_SIG_FULL(15));
530 write_sbdchn(sport, R_DUART_OPCR_X, 0);
531 write_sbdchn(sport, R_DUART_AUXCTL_X, 0);
532 write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), 0);
533
534 sport->initialised = 1;
535}
536
537static void sbd_set_termios(struct uart_port *uport, struct ktermios *termios,
538 struct ktermios *old_termios)
539{
540 struct sbd_port *sport = to_sport(uport);
541 unsigned int mode1 = 0, mode2 = 0, aux = 0;
542 unsigned int mode1mask = 0, mode2mask = 0, auxmask = 0;
543 unsigned int oldmode1, oldmode2, oldaux;
544 unsigned int baud, brg;
545 unsigned int command;
546
547 mode1mask |= ~(M_DUART_PARITY_MODE | M_DUART_PARITY_TYPE_ODD |
548 M_DUART_BITS_PER_CHAR);
549 mode2mask |= ~M_DUART_STOP_BIT_LEN_2;
550 auxmask |= ~M_DUART_CTS_CHNG_ENA;
551
552 /* Byte size. */
553 switch (termios->c_cflag & CSIZE) {
554 case CS5:
555 case CS6:
556 /* Unsupported, leave unchanged. */
557 mode1mask |= M_DUART_PARITY_MODE;
558 break;
559 case CS7:
560 mode1 |= V_DUART_BITS_PER_CHAR_7;
561 break;
562 case CS8:
563 default:
564 mode1 |= V_DUART_BITS_PER_CHAR_8;
565 break;
566 }
567
568 /* Parity and stop bits. */
569 if (termios->c_cflag & CSTOPB)
570 mode2 |= M_DUART_STOP_BIT_LEN_2;
571 else
572 mode2 |= M_DUART_STOP_BIT_LEN_1;
573 if (termios->c_cflag & PARENB)
574 mode1 |= V_DUART_PARITY_MODE_ADD;
575 else
576 mode1 |= V_DUART_PARITY_MODE_NONE;
577 if (termios->c_cflag & PARODD)
578 mode1 |= M_DUART_PARITY_TYPE_ODD;
579 else
580 mode1 |= M_DUART_PARITY_TYPE_EVEN;
581
582 baud = uart_get_baud_rate(uport, termios, old_termios, 1200, 5000000);
583 brg = V_DUART_BAUD_RATE(baud);
584 /* The actual lower bound is 1221bps, so compensate. */
585 if (brg > M_DUART_CLK_COUNTER)
586 brg = M_DUART_CLK_COUNTER;
587
588 uart_update_timeout(uport, termios->c_cflag, baud);
589
590 uport->read_status_mask = M_DUART_OVRUN_ERR;
591 if (termios->c_iflag & INPCK)
592 uport->read_status_mask |= M_DUART_FRM_ERR |
593 M_DUART_PARITY_ERR;
594 if (termios->c_iflag & (BRKINT | PARMRK))
595 uport->read_status_mask |= M_DUART_RCVD_BRK;
596
597 uport->ignore_status_mask = 0;
598 if (termios->c_iflag & IGNPAR)
599 uport->ignore_status_mask |= M_DUART_FRM_ERR |
600 M_DUART_PARITY_ERR;
601 if (termios->c_iflag & IGNBRK) {
602 uport->ignore_status_mask |= M_DUART_RCVD_BRK;
603 if (termios->c_iflag & IGNPAR)
604 uport->ignore_status_mask |= M_DUART_OVRUN_ERR;
605 }
606
607 if (termios->c_cflag & CREAD)
608 command = M_DUART_RX_EN;
609 else
610 command = M_DUART_RX_DIS;
611
612 if (termios->c_cflag & CRTSCTS)
613 aux |= M_DUART_CTS_CHNG_ENA;
614 else
615 aux &= ~M_DUART_CTS_CHNG_ENA;
616
617 spin_lock(&uport->lock);
618
619 if (sport->tx_stopped)
620 command |= M_DUART_TX_DIS;
621 else
622 command |= M_DUART_TX_EN;
623
624 oldmode1 = read_sbdchn(sport, R_DUART_MODE_REG_1) & mode1mask;
625 oldmode2 = read_sbdchn(sport, R_DUART_MODE_REG_2) & mode2mask;
626 oldaux = read_sbdchn(sport, R_DUART_AUXCTL_X) & auxmask;
627
628 if (!sport->tx_stopped)
629 sbd_line_drain(sport);
630 write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS | M_DUART_RX_DIS);
631
632 write_sbdchn(sport, R_DUART_MODE_REG_1, mode1 | oldmode1);
633 write_sbdchn(sport, R_DUART_MODE_REG_2, mode2 | oldmode2);
634 write_sbdchn(sport, R_DUART_CLK_SEL, brg);
635 write_sbdchn(sport, R_DUART_AUXCTL_X, aux | oldaux);
636
637 write_sbdchn(sport, R_DUART_CMD, command);
638
639 spin_unlock(&uport->lock);
640}
641
642
643static const char *sbd_type(struct uart_port *uport)
644{
645 return "SB1250 DUART";
646}
647
648static void sbd_release_port(struct uart_port *uport)
649{
650 struct sbd_port *sport = to_sport(uport);
651 struct sbd_duart *duart = sport->duart;
652 int map_guard;
653
654 iounmap(sport->memctrl);
655 sport->memctrl = NULL;
656 iounmap(uport->membase);
657 uport->membase = NULL;
658
659 map_guard = atomic_add_return(-1, &duart->map_guard);
660 if (!map_guard)
661 release_mem_region(duart->mapctrl, DUART_CHANREG_SPACING);
662 release_mem_region(uport->mapbase, DUART_CHANREG_SPACING);
663}
664
665static int sbd_map_port(struct uart_port *uport)
666{
667 static const char *err = KERN_ERR "sbd: Cannot map MMIO\n";
668 struct sbd_port *sport = to_sport(uport);
669 struct sbd_duart *duart = sport->duart;
670
671 if (!uport->membase)
672 uport->membase = ioremap_nocache(uport->mapbase,
673 DUART_CHANREG_SPACING);
674 if (!uport->membase) {
675 printk(err);
676 return -ENOMEM;
677 }
678
679 if (!sport->memctrl)
680 sport->memctrl = ioremap_nocache(duart->mapctrl,
681 DUART_CHANREG_SPACING);
682 if (!sport->memctrl) {
683 printk(err);
684 iounmap(uport->membase);
685 uport->membase = NULL;
686 return -ENOMEM;
687 }
688
689 return 0;
690}
691
692static int sbd_request_port(struct uart_port *uport)
693{
694 static const char *err = KERN_ERR
695 "sbd: Unable to reserve MMIO resource\n";
696 struct sbd_duart *duart = to_sport(uport)->duart;
697 int map_guard;
698 int ret = 0;
699
700 if (!request_mem_region(uport->mapbase, DUART_CHANREG_SPACING,
701 "sb1250-duart")) {
702 printk(err);
703 return -EBUSY;
704 }
705 map_guard = atomic_add_return(1, &duart->map_guard);
706 if (map_guard == 1) {
707 if (!request_mem_region(duart->mapctrl, DUART_CHANREG_SPACING,
708 "sb1250-duart")) {
709 atomic_add(-1, &duart->map_guard);
710 printk(err);
711 ret = -EBUSY;
712 }
713 }
714 if (!ret) {
715 ret = sbd_map_port(uport);
716 if (ret) {
717 map_guard = atomic_add_return(-1, &duart->map_guard);
718 if (!map_guard)
719 release_mem_region(duart->mapctrl,
720 DUART_CHANREG_SPACING);
721 }
722 }
723 if (ret) {
724 release_mem_region(uport->mapbase, DUART_CHANREG_SPACING);
725 return ret;
726 }
727 return 0;
728}
729
730static void sbd_config_port(struct uart_port *uport, int flags)
731{
732 struct sbd_port *sport = to_sport(uport);
733
734 if (flags & UART_CONFIG_TYPE) {
735 if (sbd_request_port(uport))
736 return;
737
738 uport->type = PORT_SB1250_DUART;
739
740 sbd_init_port(sport);
741 }
742}
743
744static int sbd_verify_port(struct uart_port *uport, struct serial_struct *ser)
745{
746 int ret = 0;
747
748 if (ser->type != PORT_UNKNOWN && ser->type != PORT_SB1250_DUART)
749 ret = -EINVAL;
750 if (ser->irq != uport->irq)
751 ret = -EINVAL;
752 if (ser->baud_base != uport->uartclk / 16)
753 ret = -EINVAL;
754 return ret;
755}
756
757
758static struct uart_ops sbd_ops = {
759 .tx_empty = sbd_tx_empty,
760 .set_mctrl = sbd_set_mctrl,
761 .get_mctrl = sbd_get_mctrl,
762 .stop_tx = sbd_stop_tx,
763 .start_tx = sbd_start_tx,
764 .stop_rx = sbd_stop_rx,
765 .enable_ms = sbd_enable_ms,
766 .break_ctl = sbd_break_ctl,
767 .startup = sbd_startup,
768 .shutdown = sbd_shutdown,
769 .set_termios = sbd_set_termios,
770 .type = sbd_type,
771 .release_port = sbd_release_port,
772 .request_port = sbd_request_port,
773 .config_port = sbd_config_port,
774 .verify_port = sbd_verify_port,
775};
776
777/* Initialize SB1250 DUART port structures. */
778static void __init sbd_probe_duarts(void)
779{
780 static int probed;
781 int chip, side;
782 int max_lines, line;
783
784 if (probed)
785 return;
786
787 /* Set the number of available units based on the SOC type. */
788 switch (soc_type) {
789 case K_SYS_SOC_TYPE_BCM1x55:
790 case K_SYS_SOC_TYPE_BCM1x80:
791 max_lines = 4;
792 break;
793 default:
794 /* Assume at least two serial ports at the normal address. */
795 max_lines = 2;
796 break;
797 }
798
799 probed = 1;
800
801 for (chip = 0, line = 0; chip < DUART_MAX_CHIP && line < max_lines;
802 chip++) {
803 sbd_duarts[chip].mapctrl = SBD_CTRLREGS(line);
804
805 for (side = 0; side < DUART_MAX_SIDE && line < max_lines;
806 side++, line++) {
807 struct sbd_port *sport = &sbd_duarts[chip].sport[side];
808 struct uart_port *uport = &sport->port;
809
810 sport->duart = &sbd_duarts[chip];
811
812 uport->irq = SBD_INT(line);
813 uport->uartclk = 100000000 / 20 * 16;
814 uport->fifosize = 16;
815 uport->iotype = UPIO_MEM;
816 uport->flags = UPF_BOOT_AUTOCONF;
817 uport->ops = &sbd_ops;
818 uport->line = line;
819 uport->mapbase = SBD_CHANREGS(line);
820 }
821 }
822}
823
824
825#ifdef CONFIG_SERIAL_SB1250_DUART_CONSOLE
826/*
827 * Serial console stuff. Very basic, polling driver for doing serial
828 * console output. The console_sem is held by the caller, so we
829 * shouldn't be interrupted for more console activity.
830 */
831static void sbd_console_putchar(struct uart_port *uport, int ch)
832{
833 struct sbd_port *sport = to_sport(uport);
834
835 sbd_transmit_drain(sport);
836 write_sbdchn(sport, R_DUART_TX_HOLD, ch);
837}
838
839static void sbd_console_write(struct console *co, const char *s,
840 unsigned int count)
841{
842 int chip = co->index / DUART_MAX_SIDE;
843 int side = co->index % DUART_MAX_SIDE;
844 struct sbd_port *sport = &sbd_duarts[chip].sport[side];
845 struct uart_port *uport = &sport->port;
846 unsigned long flags;
847 unsigned int mask;
848
849 /* Disable transmit interrupts and enable the transmitter. */
850 spin_lock_irqsave(&uport->lock, flags);
851 mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
852 write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2),
853 mask & ~M_DUART_IMR_TX);
854 write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_EN);
855 spin_unlock_irqrestore(&uport->lock, flags);
856
857 uart_console_write(&sport->port, s, count, sbd_console_putchar);
858
859 /* Restore transmit interrupts and the transmitter enable. */
860 spin_lock_irqsave(&uport->lock, flags);
861 sbd_line_drain(sport);
862 if (sport->tx_stopped)
863 write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS);
864 write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
865 spin_unlock_irqrestore(&uport->lock, flags);
866}
867
868static int __init sbd_console_setup(struct console *co, char *options)
869{
870 int chip = co->index / DUART_MAX_SIDE;
871 int side = co->index % DUART_MAX_SIDE;
872 struct sbd_port *sport = &sbd_duarts[chip].sport[side];
873 struct uart_port *uport = &sport->port;
874 int baud = 115200;
875 int bits = 8;
876 int parity = 'n';
877 int flow = 'n';
878 int ret;
879
880 if (!sport->duart)
881 return -ENXIO;
882
883 ret = sbd_map_port(uport);
884 if (ret)
885 return ret;
886
887 sbd_init_port(sport);
888
889 if (options)
890 uart_parse_options(options, &baud, &parity, &bits, &flow);
891 return uart_set_options(uport, co, baud, parity, bits, flow);
892}
893
894static struct uart_driver sbd_reg;
895static struct console sbd_console = {
896 .name = "duart",
897 .write = sbd_console_write,
898 .device = uart_console_device,
899 .setup = sbd_console_setup,
900 .flags = CON_PRINTBUFFER,
901 .index = -1,
902 .data = &sbd_reg
903};
904
905static int __init sbd_serial_console_init(void)
906{
907 sbd_probe_duarts();
908 register_console(&sbd_console);
909
910 return 0;
911}
912
913console_initcall(sbd_serial_console_init);
914
915#define SERIAL_SB1250_DUART_CONSOLE &sbd_console
916#else
917#define SERIAL_SB1250_DUART_CONSOLE NULL
918#endif /* CONFIG_SERIAL_SB1250_DUART_CONSOLE */
919
920
921static struct uart_driver sbd_reg = {
922 .owner = THIS_MODULE,
923 .driver_name = "serial",
924 .dev_name = "duart",
925 .major = TTY_MAJOR,
926 .minor = SB1250_DUART_MINOR_BASE,
927 .nr = DUART_MAX_CHIP * DUART_MAX_SIDE,
928 .cons = SERIAL_SB1250_DUART_CONSOLE,
929};
930
931/* Set up the driver and register it. */
932static int __init sbd_init(void)
933{
934 int i, ret;
935
936 sbd_probe_duarts();
937
938 ret = uart_register_driver(&sbd_reg);
939 if (ret)
940 return ret;
941
942 for (i = 0; i < DUART_MAX_CHIP * DUART_MAX_SIDE; i++) {
943 struct sbd_duart *duart = &sbd_duarts[i / DUART_MAX_SIDE];
944 struct sbd_port *sport = &duart->sport[i % DUART_MAX_SIDE];
945 struct uart_port *uport = &sport->port;
946
947 if (sport->duart)
948 uart_add_one_port(&sbd_reg, uport);
949 }
950
951 return 0;
952}
953
954/* Unload the driver. Unregister stuff, get ready to go away. */
955static void __exit sbd_exit(void)
956{
957 int i;
958
959 for (i = DUART_MAX_CHIP * DUART_MAX_SIDE - 1; i >= 0; i--) {
960 struct sbd_duart *duart = &sbd_duarts[i / DUART_MAX_SIDE];
961 struct sbd_port *sport = &duart->sport[i % DUART_MAX_SIDE];
962 struct uart_port *uport = &sport->port;
963
964 if (sport->duart)
965 uart_remove_one_port(&sbd_reg, uport);
966 }
967
968 uart_unregister_driver(&sbd_reg);
969}
970
971module_init(sbd_init);
972module_exit(sbd_exit);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 5e3f748f2693..b91571122daa 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -107,6 +107,15 @@ config SPI_IMX
107 This enables using the Freescale iMX SPI controller in master 107 This enables using the Freescale iMX SPI controller in master
108 mode. 108 mode.
109 109
110config SPI_LM70_LLP
111 tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
112 depends on SPI_MASTER && PARPORT && EXPERIMENTAL
113 select SPI_BITBANG
114 help
115 This driver supports the NS LM70 LLP Evaluation Board,
116 which interfaces to an LM70 temperature sensor using
117 a parallel port.
118
110config SPI_MPC52xx_PSC 119config SPI_MPC52xx_PSC
111 tristate "Freescale MPC52xx PSC SPI controller" 120 tristate "Freescale MPC52xx PSC SPI controller"
112 depends on SPI_MASTER && PPC_MPC52xx && EXPERIMENTAL 121 depends on SPI_MASTER && PPC_MPC52xx && EXPERIMENTAL
@@ -133,6 +142,12 @@ config SPI_OMAP_UWIRE
133 help 142 help
134 This hooks up to the MicroWire controller on OMAP1 chips. 143 This hooks up to the MicroWire controller on OMAP1 chips.
135 144
145config SPI_OMAP24XX
146 tristate "McSPI driver for OMAP24xx"
147 depends on SPI_MASTER && ARCH_OMAP24XX
148 help
149 SPI master controller for OMAP24xx Multichannel SPI
150 (McSPI) modules.
136 151
137config SPI_PXA2XX 152config SPI_PXA2XX
138 tristate "PXA2xx SSP SPI master" 153 tristate "PXA2xx SSP SPI master"
@@ -145,17 +160,36 @@ config SPI_PXA2XX
145config SPI_S3C24XX 160config SPI_S3C24XX
146 tristate "Samsung S3C24XX series SPI" 161 tristate "Samsung S3C24XX series SPI"
147 depends on SPI_MASTER && ARCH_S3C2410 && EXPERIMENTAL 162 depends on SPI_MASTER && ARCH_S3C2410 && EXPERIMENTAL
163 select SPI_BITBANG
148 help 164 help
149 SPI driver for Samsung S3C24XX series ARM SoCs 165 SPI driver for Samsung S3C24XX series ARM SoCs
150 166
151config SPI_S3C24XX_GPIO 167config SPI_S3C24XX_GPIO
152 tristate "Samsung S3C24XX series SPI by GPIO" 168 tristate "Samsung S3C24XX series SPI by GPIO"
153 depends on SPI_MASTER && ARCH_S3C2410 && SPI_BITBANG && EXPERIMENTAL 169 depends on SPI_MASTER && ARCH_S3C2410 && EXPERIMENTAL
170 select SPI_BITBANG
154 help 171 help
155 SPI driver for Samsung S3C24XX series ARM SoCs using 172 SPI driver for Samsung S3C24XX series ARM SoCs using
156 GPIO lines to provide the SPI bus. This can be used where 173 GPIO lines to provide the SPI bus. This can be used where
157 the inbuilt hardware cannot provide the transfer mode, or 174 the inbuilt hardware cannot provide the transfer mode, or
158 where the board is using non hardware connected pins. 175 where the board is using non hardware connected pins.
176
177config SPI_TXX9
178 tristate "Toshiba TXx9 SPI controller"
179 depends on SPI_MASTER && GENERIC_GPIO && CPU_TX49XX
180 help
181 SPI driver for Toshiba TXx9 MIPS SoCs
182
183config SPI_XILINX
184 tristate "Xilinx SPI controller"
185 depends on SPI_MASTER && XILINX_VIRTEX && EXPERIMENTAL
186 select SPI_BITBANG
187 help
188 This exposes the SPI controller IP from the Xilinx EDK.
189
190 See the "OPB Serial Peripheral Interface (SPI) (v1.00e)"
191 Product Specification document (DS464) for hardware details.
192
159# 193#
160# Add new SPI master controllers in alphabetical order above this line 194# Add new SPI master controllers in alphabetical order above this line
161# 195#
@@ -187,6 +221,15 @@ config SPI_SPIDEV
187 Note that this application programming interface is EXPERIMENTAL 221 Note that this application programming interface is EXPERIMENTAL
188 and hence SUBJECT TO CHANGE WITHOUT NOTICE while it stabilizes. 222 and hence SUBJECT TO CHANGE WITHOUT NOTICE while it stabilizes.
189 223
224config SPI_TLE62X0
225 tristate "Infineon TLE62X0 (for power switching)"
226 depends on SPI_MASTER && SYSFS
227 help
228 SPI driver for Infineon TLE62X0 series line driver chips,
229 such as the TLE6220, TLE6230 and TLE6240. This provides a
230 sysfs interface, with each line presented as a kind of GPIO
231 exposing both switch control and diagnostic feedback.
232
190# 233#
191# Add new SPI protocol masters in alphabetical order above this line 234# Add new SPI protocol masters in alphabetical order above this line
192# 235#
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 5788d867de84..41fbac45c323 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -17,17 +17,22 @@ obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
17obj-$(CONFIG_SPI_AU1550) += au1550_spi.o 17obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
18obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o 18obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
19obj-$(CONFIG_SPI_IMX) += spi_imx.o 19obj-$(CONFIG_SPI_IMX) += spi_imx.o
20obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
20obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o 21obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o
21obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o 22obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o
23obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o
22obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o 24obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o
23obj-$(CONFIG_SPI_MPC83xx) += spi_mpc83xx.o 25obj-$(CONFIG_SPI_MPC83xx) += spi_mpc83xx.o
24obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o 26obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
25obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o 27obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o
28obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
29obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
26# ... add above this line ... 30# ... add above this line ...
27 31
28# SPI protocol drivers (device/link on bus) 32# SPI protocol drivers (device/link on bus)
29obj-$(CONFIG_SPI_AT25) += at25.o 33obj-$(CONFIG_SPI_AT25) += at25.o
30obj-$(CONFIG_SPI_SPIDEV) += spidev.o 34obj-$(CONFIG_SPI_SPIDEV) += spidev.o
35obj-$(CONFIG_SPI_TLE62X0) += tle62x0.o
31# ... add above this line ... 36# ... add above this line ...
32 37
33# SPI slave controller drivers (upstream link) 38# SPI slave controller drivers (upstream link)
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index 8b2601de3630..ad144054da30 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -46,6 +46,7 @@ struct atmel_spi {
46 struct clk *clk; 46 struct clk *clk;
47 struct platform_device *pdev; 47 struct platform_device *pdev;
48 unsigned new_1:1; 48 unsigned new_1:1;
49 struct spi_device *stay;
49 50
50 u8 stopping; 51 u8 stopping;
51 struct list_head queue; 52 struct list_head queue;
@@ -62,29 +63,62 @@ struct atmel_spi {
62/* 63/*
63 * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby 64 * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby
64 * they assume that spi slave device state will not change on deselect, so 65 * they assume that spi slave device state will not change on deselect, so
65 * that automagic deselection is OK. Not so! Workaround uses nCSx pins 66 * that automagic deselection is OK. ("NPCSx rises if no data is to be
66 * as GPIOs; or newer controllers have CSAAT and friends. 67 * transmitted") Not so! Workaround uses nCSx pins as GPIOs; or newer
68 * controllers have CSAAT and friends.
67 * 69 *
68 * Since the CSAAT functionality is a bit weird on newer controllers 70 * Since the CSAAT functionality is a bit weird on newer controllers as
69 * as well, we use GPIO to control nCSx pins on all controllers. 71 * well, we use GPIO to control nCSx pins on all controllers, updating
72 * MR.PCS to avoid confusing the controller. Using GPIOs also lets us
73 * support active-high chipselects despite the controller's belief that
74 * only active-low devices/systems exists.
75 *
76 * However, at91rm9200 has a second erratum whereby nCS0 doesn't work
77 * right when driven with GPIO. ("Mode Fault does not allow more than one
78 * Master on Chip Select 0.") No workaround exists for that ... so for
79 * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH,
80 * and (c) will trigger that first erratum in some cases.
70 */ 81 */
71 82
72static inline void cs_activate(struct spi_device *spi) 83static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
73{ 84{
74 unsigned gpio = (unsigned) spi->controller_data; 85 unsigned gpio = (unsigned) spi->controller_data;
75 unsigned active = spi->mode & SPI_CS_HIGH; 86 unsigned active = spi->mode & SPI_CS_HIGH;
87 u32 mr;
88
89 mr = spi_readl(as, MR);
90 mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr);
76 91
77 dev_dbg(&spi->dev, "activate %u%s\n", gpio, active ? " (high)" : ""); 92 dev_dbg(&spi->dev, "activate %u%s, mr %08x\n",
78 gpio_set_value(gpio, active); 93 gpio, active ? " (high)" : "",
94 mr);
95
96 if (!(cpu_is_at91rm9200() && spi->chip_select == 0))
97 gpio_set_value(gpio, active);
98 spi_writel(as, MR, mr);
79} 99}
80 100
81static inline void cs_deactivate(struct spi_device *spi) 101static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
82{ 102{
83 unsigned gpio = (unsigned) spi->controller_data; 103 unsigned gpio = (unsigned) spi->controller_data;
84 unsigned active = spi->mode & SPI_CS_HIGH; 104 unsigned active = spi->mode & SPI_CS_HIGH;
105 u32 mr;
85 106
86 dev_dbg(&spi->dev, "DEactivate %u%s\n", gpio, active ? " (low)" : ""); 107 /* only deactivate *this* device; sometimes transfers to
87 gpio_set_value(gpio, !active); 108 * another device may be active when this routine is called.
109 */
110 mr = spi_readl(as, MR);
111 if (~SPI_BFEXT(PCS, mr) & (1 << spi->chip_select)) {
112 mr = SPI_BFINS(PCS, 0xf, mr);
113 spi_writel(as, MR, mr);
114 }
115
116 dev_dbg(&spi->dev, "DEactivate %u%s, mr %08x\n",
117 gpio, active ? " (low)" : "",
118 mr);
119
120 if (!(cpu_is_at91rm9200() && spi->chip_select == 0))
121 gpio_set_value(gpio, !active);
88} 122}
89 123
90/* 124/*
@@ -140,6 +174,7 @@ static void atmel_spi_next_xfer(struct spi_master *master,
140 174
141 /* REVISIT: when xfer->delay_usecs == 0, the PDC "next transfer" 175 /* REVISIT: when xfer->delay_usecs == 0, the PDC "next transfer"
142 * mechanism might help avoid the IRQ latency between transfers 176 * mechanism might help avoid the IRQ latency between transfers
177 * (and improve the nCS0 errata handling on at91rm9200 chips)
143 * 178 *
144 * We're also waiting for ENDRX before we start the next 179 * We're also waiting for ENDRX before we start the next
145 * transfer because we need to handle some difficult timing 180 * transfer because we need to handle some difficult timing
@@ -169,33 +204,62 @@ static void atmel_spi_next_message(struct spi_master *master)
169{ 204{
170 struct atmel_spi *as = spi_master_get_devdata(master); 205 struct atmel_spi *as = spi_master_get_devdata(master);
171 struct spi_message *msg; 206 struct spi_message *msg;
172 u32 mr; 207 struct spi_device *spi;
173 208
174 BUG_ON(as->current_transfer); 209 BUG_ON(as->current_transfer);
175 210
176 msg = list_entry(as->queue.next, struct spi_message, queue); 211 msg = list_entry(as->queue.next, struct spi_message, queue);
212 spi = msg->spi;
177 213
178 /* Select the chip */ 214 dev_dbg(master->cdev.dev, "start message %p for %s\n",
179 mr = spi_readl(as, MR); 215 msg, spi->dev.bus_id);
180 mr = SPI_BFINS(PCS, ~(1 << msg->spi->chip_select), mr); 216
181 spi_writel(as, MR, mr); 217 /* select chip if it's not still active */
182 cs_activate(msg->spi); 218 if (as->stay) {
219 if (as->stay != spi) {
220 cs_deactivate(as, as->stay);
221 cs_activate(as, spi);
222 }
223 as->stay = NULL;
224 } else
225 cs_activate(as, spi);
183 226
184 atmel_spi_next_xfer(master, msg); 227 atmel_spi_next_xfer(master, msg);
185} 228}
186 229
187static void 230/*
231 * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma:
232 * - The buffer is either valid for CPU access, else NULL
233 * - If the buffer is valid, so is its DMA addresss
234 *
235 * This driver manages the dma addresss unless message->is_dma_mapped.
236 */
237static int
188atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer) 238atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
189{ 239{
240 struct device *dev = &as->pdev->dev;
241
190 xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS; 242 xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS;
191 if (xfer->tx_buf) 243 if (xfer->tx_buf) {
192 xfer->tx_dma = dma_map_single(&as->pdev->dev, 244 xfer->tx_dma = dma_map_single(dev,
193 (void *) xfer->tx_buf, xfer->len, 245 (void *) xfer->tx_buf, xfer->len,
194 DMA_TO_DEVICE); 246 DMA_TO_DEVICE);
195 if (xfer->rx_buf) 247 if (dma_mapping_error(xfer->tx_dma))
196 xfer->rx_dma = dma_map_single(&as->pdev->dev, 248 return -ENOMEM;
249 }
250 if (xfer->rx_buf) {
251 xfer->rx_dma = dma_map_single(dev,
197 xfer->rx_buf, xfer->len, 252 xfer->rx_buf, xfer->len,
198 DMA_FROM_DEVICE); 253 DMA_FROM_DEVICE);
254 if (dma_mapping_error(xfer->tx_dma)) {
255 if (xfer->tx_buf)
256 dma_unmap_single(dev,
257 xfer->tx_dma, xfer->len,
258 DMA_TO_DEVICE);
259 return -ENOMEM;
260 }
261 }
262 return 0;
199} 263}
200 264
201static void atmel_spi_dma_unmap_xfer(struct spi_master *master, 265static void atmel_spi_dma_unmap_xfer(struct spi_master *master,
@@ -211,9 +275,13 @@ static void atmel_spi_dma_unmap_xfer(struct spi_master *master,
211 275
212static void 276static void
213atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as, 277atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as,
214 struct spi_message *msg, int status) 278 struct spi_message *msg, int status, int stay)
215{ 279{
216 cs_deactivate(msg->spi); 280 if (!stay || status < 0)
281 cs_deactivate(as, msg->spi);
282 else
283 as->stay = msg->spi;
284
217 list_del(&msg->queue); 285 list_del(&msg->queue);
218 msg->status = status; 286 msg->status = status;
219 287
@@ -303,7 +371,7 @@ atmel_spi_interrupt(int irq, void *dev_id)
303 /* Clear any overrun happening while cleaning up */ 371 /* Clear any overrun happening while cleaning up */
304 spi_readl(as, SR); 372 spi_readl(as, SR);
305 373
306 atmel_spi_msg_done(master, as, msg, -EIO); 374 atmel_spi_msg_done(master, as, msg, -EIO, 0);
307 } else if (pending & SPI_BIT(ENDRX)) { 375 } else if (pending & SPI_BIT(ENDRX)) {
308 ret = IRQ_HANDLED; 376 ret = IRQ_HANDLED;
309 377
@@ -321,12 +389,13 @@ atmel_spi_interrupt(int irq, void *dev_id)
321 389
322 if (msg->transfers.prev == &xfer->transfer_list) { 390 if (msg->transfers.prev == &xfer->transfer_list) {
323 /* report completed message */ 391 /* report completed message */
324 atmel_spi_msg_done(master, as, msg, 0); 392 atmel_spi_msg_done(master, as, msg, 0,
393 xfer->cs_change);
325 } else { 394 } else {
326 if (xfer->cs_change) { 395 if (xfer->cs_change) {
327 cs_deactivate(msg->spi); 396 cs_deactivate(as, msg->spi);
328 udelay(1); 397 udelay(1);
329 cs_activate(msg->spi); 398 cs_activate(as, msg->spi);
330 } 399 }
331 400
332 /* 401 /*
@@ -350,6 +419,7 @@ atmel_spi_interrupt(int irq, void *dev_id)
350 return ret; 419 return ret;
351} 420}
352 421
422/* the spi->mode bits understood by this driver: */
353#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH) 423#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
354 424
355static int atmel_spi_setup(struct spi_device *spi) 425static int atmel_spi_setup(struct spi_device *spi)
@@ -388,6 +458,14 @@ static int atmel_spi_setup(struct spi_device *spi)
388 return -EINVAL; 458 return -EINVAL;
389 } 459 }
390 460
461 /* see notes above re chipselect */
462 if (cpu_is_at91rm9200()
463 && spi->chip_select == 0
464 && (spi->mode & SPI_CS_HIGH)) {
465 dev_dbg(&spi->dev, "setup: can't be active-high\n");
466 return -EINVAL;
467 }
468
391 /* speed zero convention is used by some upper layers */ 469 /* speed zero convention is used by some upper layers */
392 bus_hz = clk_get_rate(as->clk); 470 bus_hz = clk_get_rate(as->clk);
393 if (spi->max_speed_hz) { 471 if (spi->max_speed_hz) {
@@ -397,8 +475,9 @@ static int atmel_spi_setup(struct spi_device *spi)
397 scbr = ((bus_hz + spi->max_speed_hz - 1) 475 scbr = ((bus_hz + spi->max_speed_hz - 1)
398 / spi->max_speed_hz); 476 / spi->max_speed_hz);
399 if (scbr >= (1 << SPI_SCBR_SIZE)) { 477 if (scbr >= (1 << SPI_SCBR_SIZE)) {
400 dev_dbg(&spi->dev, "setup: %d Hz too slow, scbr %u\n", 478 dev_dbg(&spi->dev,
401 spi->max_speed_hz, scbr); 479 "setup: %d Hz too slow, scbr %u; min %ld Hz\n",
480 spi->max_speed_hz, scbr, bus_hz/255);
402 return -EINVAL; 481 return -EINVAL;
403 } 482 }
404 } else 483 } else
@@ -423,6 +502,14 @@ static int atmel_spi_setup(struct spi_device *spi)
423 return ret; 502 return ret;
424 spi->controller_state = (void *)npcs_pin; 503 spi->controller_state = (void *)npcs_pin;
425 gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH)); 504 gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH));
505 } else {
506 unsigned long flags;
507
508 spin_lock_irqsave(&as->lock, flags);
509 if (as->stay == spi)
510 as->stay = NULL;
511 cs_deactivate(as, spi);
512 spin_unlock_irqrestore(&as->lock, flags);
426 } 513 }
427 514
428 dev_dbg(&spi->dev, 515 dev_dbg(&spi->dev,
@@ -464,14 +551,22 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
464 dev_dbg(&spi->dev, "no protocol options yet\n"); 551 dev_dbg(&spi->dev, "no protocol options yet\n");
465 return -ENOPROTOOPT; 552 return -ENOPROTOOPT;
466 } 553 }
467 }
468 554
469 /* scrub dcache "early" */ 555 /*
470 if (!msg->is_dma_mapped) { 556 * DMA map early, for performance (empties dcache ASAP) and
471 list_for_each_entry(xfer, &msg->transfers, transfer_list) 557 * better fault reporting. This is a DMA-only driver.
472 atmel_spi_dma_map_xfer(as, xfer); 558 *
559 * NOTE that if dma_unmap_single() ever starts to do work on
560 * platforms supported by this driver, we would need to clean
561 * up mappings for previously-mapped transfers.
562 */
563 if (!msg->is_dma_mapped) {
564 if (atmel_spi_dma_map_xfer(as, xfer) < 0)
565 return -ENOMEM;
566 }
473 } 567 }
474 568
569#ifdef VERBOSE
475 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 570 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
476 dev_dbg(controller, 571 dev_dbg(controller,
477 " xfer %p: len %u tx %p/%08x rx %p/%08x\n", 572 " xfer %p: len %u tx %p/%08x rx %p/%08x\n",
@@ -479,6 +574,7 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
479 xfer->tx_buf, xfer->tx_dma, 574 xfer->tx_buf, xfer->tx_dma,
480 xfer->rx_buf, xfer->rx_dma); 575 xfer->rx_buf, xfer->rx_dma);
481 } 576 }
577#endif
482 578
483 msg->status = -EINPROGRESS; 579 msg->status = -EINPROGRESS;
484 msg->actual_length = 0; 580 msg->actual_length = 0;
@@ -494,8 +590,21 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
494 590
495static void atmel_spi_cleanup(struct spi_device *spi) 591static void atmel_spi_cleanup(struct spi_device *spi)
496{ 592{
497 if (spi->controller_state) 593 struct atmel_spi *as = spi_master_get_devdata(spi->master);
498 gpio_free((unsigned int)spi->controller_data); 594 unsigned gpio = (unsigned) spi->controller_data;
595 unsigned long flags;
596
597 if (!spi->controller_state)
598 return;
599
600 spin_lock_irqsave(&as->lock, flags);
601 if (as->stay == spi) {
602 as->stay = NULL;
603 cs_deactivate(as, spi);
604 }
605 spin_unlock_irqrestore(&as->lock, flags);
606
607 gpio_free(gpio);
499} 608}
500 609
501/*-------------------------------------------------------------------------*/ 610/*-------------------------------------------------------------------------*/
@@ -536,6 +645,10 @@ static int __init atmel_spi_probe(struct platform_device *pdev)
536 645
537 as = spi_master_get_devdata(master); 646 as = spi_master_get_devdata(master);
538 647
648 /*
649 * Scratch buffer is used for throwaway rx and tx data.
650 * It's coherent to minimize dcache pollution.
651 */
539 as->buffer = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE, 652 as->buffer = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE,
540 &as->buffer_dma, GFP_KERNEL); 653 &as->buffer_dma, GFP_KERNEL);
541 if (!as->buffer) 654 if (!as->buffer)
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c
index ae2b1af0dba4..c47a650183a1 100644
--- a/drivers/spi/au1550_spi.c
+++ b/drivers/spi/au1550_spi.c
@@ -280,6 +280,9 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
280 return 0; 280 return 0;
281} 281}
282 282
283/* the spi->mode bits understood by this driver: */
284#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST)
285
283static int au1550_spi_setup(struct spi_device *spi) 286static int au1550_spi_setup(struct spi_device *spi)
284{ 287{
285 struct au1550_spi *hw = spi_master_get_devdata(spi->master); 288 struct au1550_spi *hw = spi_master_get_devdata(spi->master);
@@ -292,6 +295,12 @@ static int au1550_spi_setup(struct spi_device *spi)
292 return -EINVAL; 295 return -EINVAL;
293 } 296 }
294 297
298 if (spi->mode & ~MODEBITS) {
299 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
300 spi->mode & ~MODEBITS);
301 return -EINVAL;
302 }
303
295 if (spi->max_speed_hz == 0) 304 if (spi->max_speed_hz == 0)
296 spi->max_speed_hz = hw->freq_max; 305 spi->max_speed_hz = hw->freq_max;
297 if (spi->max_speed_hz > hw->freq_max 306 if (spi->max_speed_hz > hw->freq_max
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
index 11f36bef3057..d2a4b2bdb07b 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/mpc52xx_psc_spi.c
@@ -270,6 +270,9 @@ static void mpc52xx_psc_spi_work(struct work_struct *work)
270 spin_unlock_irq(&mps->lock); 270 spin_unlock_irq(&mps->lock);
271} 271}
272 272
273/* the spi->mode bits understood by this driver: */
274#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST)
275
273static int mpc52xx_psc_spi_setup(struct spi_device *spi) 276static int mpc52xx_psc_spi_setup(struct spi_device *spi)
274{ 277{
275 struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); 278 struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master);
@@ -279,6 +282,12 @@ static int mpc52xx_psc_spi_setup(struct spi_device *spi)
279 if (spi->bits_per_word%8) 282 if (spi->bits_per_word%8)
280 return -EINVAL; 283 return -EINVAL;
281 284
285 if (spi->mode & ~MODEBITS) {
286 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
287 spi->mode & ~MODEBITS);
288 return -EINVAL;
289 }
290
282 if (!cs) { 291 if (!cs) {
283 cs = kzalloc(sizeof *cs, GFP_KERNEL); 292 cs = kzalloc(sizeof *cs, GFP_KERNEL);
284 if (!cs) 293 if (!cs)
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
new file mode 100644
index 000000000000..6b357cdb9ea3
--- /dev/null
+++ b/drivers/spi/omap2_mcspi.c
@@ -0,0 +1,1081 @@
1/*
2 * OMAP2 McSPI controller driver
3 *
4 * Copyright (C) 2005, 2006 Nokia Corporation
5 * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
6 * Juha Yrjölä <juha.yrjola@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/init.h>
26#include <linux/interrupt.h>
27#include <linux/module.h>
28#include <linux/device.h>
29#include <linux/delay.h>
30#include <linux/dma-mapping.h>
31#include <linux/platform_device.h>
32#include <linux/err.h>
33#include <linux/clk.h>
34#include <linux/io.h>
35
36#include <linux/spi/spi.h>
37
38#include <asm/arch/dma.h>
39#include <asm/arch/clock.h>
40
41
42#define OMAP2_MCSPI_MAX_FREQ 48000000
43
44#define OMAP2_MCSPI_REVISION 0x00
45#define OMAP2_MCSPI_SYSCONFIG 0x10
46#define OMAP2_MCSPI_SYSSTATUS 0x14
47#define OMAP2_MCSPI_IRQSTATUS 0x18
48#define OMAP2_MCSPI_IRQENABLE 0x1c
49#define OMAP2_MCSPI_WAKEUPENABLE 0x20
50#define OMAP2_MCSPI_SYST 0x24
51#define OMAP2_MCSPI_MODULCTRL 0x28
52
53/* per-channel banks, 0x14 bytes each, first is: */
54#define OMAP2_MCSPI_CHCONF0 0x2c
55#define OMAP2_MCSPI_CHSTAT0 0x30
56#define OMAP2_MCSPI_CHCTRL0 0x34
57#define OMAP2_MCSPI_TX0 0x38
58#define OMAP2_MCSPI_RX0 0x3c
59
60/* per-register bitmasks: */
61
62#define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE (1 << 0)
63#define OMAP2_MCSPI_SYSCONFIG_SOFTRESET (1 << 1)
64
65#define OMAP2_MCSPI_SYSSTATUS_RESETDONE (1 << 0)
66
67#define OMAP2_MCSPI_MODULCTRL_SINGLE (1 << 0)
68#define OMAP2_MCSPI_MODULCTRL_MS (1 << 2)
69#define OMAP2_MCSPI_MODULCTRL_STEST (1 << 3)
70
71#define OMAP2_MCSPI_CHCONF_PHA (1 << 0)
72#define OMAP2_MCSPI_CHCONF_POL (1 << 1)
73#define OMAP2_MCSPI_CHCONF_CLKD_MASK (0x0f << 2)
74#define OMAP2_MCSPI_CHCONF_EPOL (1 << 6)
75#define OMAP2_MCSPI_CHCONF_WL_MASK (0x1f << 7)
76#define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY (0x01 << 12)
77#define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY (0x02 << 12)
78#define OMAP2_MCSPI_CHCONF_TRM_MASK (0x03 << 12)
79#define OMAP2_MCSPI_CHCONF_DMAW (1 << 14)
80#define OMAP2_MCSPI_CHCONF_DMAR (1 << 15)
81#define OMAP2_MCSPI_CHCONF_DPE0 (1 << 16)
82#define OMAP2_MCSPI_CHCONF_DPE1 (1 << 17)
83#define OMAP2_MCSPI_CHCONF_IS (1 << 18)
84#define OMAP2_MCSPI_CHCONF_TURBO (1 << 19)
85#define OMAP2_MCSPI_CHCONF_FORCE (1 << 20)
86
87#define OMAP2_MCSPI_CHSTAT_RXS (1 << 0)
88#define OMAP2_MCSPI_CHSTAT_TXS (1 << 1)
89#define OMAP2_MCSPI_CHSTAT_EOT (1 << 2)
90
91#define OMAP2_MCSPI_CHCTRL_EN (1 << 0)
92
93
94/* We have 2 DMA channels per CS, one for RX and one for TX */
95struct omap2_mcspi_dma {
96 int dma_tx_channel;
97 int dma_rx_channel;
98
99 int dma_tx_sync_dev;
100 int dma_rx_sync_dev;
101
102 struct completion dma_tx_completion;
103 struct completion dma_rx_completion;
104};
105
106/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
107 * cache operations; better heuristics consider wordsize and bitrate.
108 */
109#define DMA_MIN_BYTES 8
110
111
112struct omap2_mcspi {
113 struct work_struct work;
114 /* lock protects queue and registers */
115 spinlock_t lock;
116 struct list_head msg_queue;
117 struct spi_master *master;
118 struct clk *ick;
119 struct clk *fck;
120 /* Virtual base address of the controller */
121 void __iomem *base;
122 /* SPI1 has 4 channels, while SPI2 has 2 */
123 struct omap2_mcspi_dma *dma_channels;
124};
125
126struct omap2_mcspi_cs {
127 void __iomem *base;
128 int word_len;
129};
130
131static struct workqueue_struct *omap2_mcspi_wq;
132
133#define MOD_REG_BIT(val, mask, set) do { \
134 if (set) \
135 val |= mask; \
136 else \
137 val &= ~mask; \
138} while (0)
139
140static inline void mcspi_write_reg(struct spi_master *master,
141 int idx, u32 val)
142{
143 struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
144
145 __raw_writel(val, mcspi->base + idx);
146}
147
148static inline u32 mcspi_read_reg(struct spi_master *master, int idx)
149{
150 struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
151
152 return __raw_readl(mcspi->base + idx);
153}
154
155static inline void mcspi_write_cs_reg(const struct spi_device *spi,
156 int idx, u32 val)
157{
158 struct omap2_mcspi_cs *cs = spi->controller_state;
159
160 __raw_writel(val, cs->base + idx);
161}
162
163static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
164{
165 struct omap2_mcspi_cs *cs = spi->controller_state;
166
167 return __raw_readl(cs->base + idx);
168}
169
170static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
171 int is_read, int enable)
172{
173 u32 l, rw;
174
175 l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
176
177 if (is_read) /* 1 is read, 0 write */
178 rw = OMAP2_MCSPI_CHCONF_DMAR;
179 else
180 rw = OMAP2_MCSPI_CHCONF_DMAW;
181
182 MOD_REG_BIT(l, rw, enable);
183 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, l);
184}
185
186static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
187{
188 u32 l;
189
190 l = enable ? OMAP2_MCSPI_CHCTRL_EN : 0;
191 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, l);
192}
193
194static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active)
195{
196 u32 l;
197
198 l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
199 MOD_REG_BIT(l, OMAP2_MCSPI_CHCONF_FORCE, cs_active);
200 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, l);
201}
202
203static void omap2_mcspi_set_master_mode(struct spi_master *master)
204{
205 u32 l;
206
207 /* setup when switching from (reset default) slave mode
208 * to single-channel master mode
209 */
210 l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
211 MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_STEST, 0);
212 MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_MS, 0);
213 MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_SINGLE, 1);
214 mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
215}
216
217static unsigned
218omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
219{
220 struct omap2_mcspi *mcspi;
221 struct omap2_mcspi_cs *cs = spi->controller_state;
222 struct omap2_mcspi_dma *mcspi_dma;
223 unsigned int count, c;
224 unsigned long base, tx_reg, rx_reg;
225 int word_len, data_type, element_count;
226 u8 * rx;
227 const u8 * tx;
228
229 mcspi = spi_master_get_devdata(spi->master);
230 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
231
232 count = xfer->len;
233 c = count;
234 word_len = cs->word_len;
235
236 base = (unsigned long) io_v2p(cs->base);
237 tx_reg = base + OMAP2_MCSPI_TX0;
238 rx_reg = base + OMAP2_MCSPI_RX0;
239 rx = xfer->rx_buf;
240 tx = xfer->tx_buf;
241
242 if (word_len <= 8) {
243 data_type = OMAP_DMA_DATA_TYPE_S8;
244 element_count = count;
245 } else if (word_len <= 16) {
246 data_type = OMAP_DMA_DATA_TYPE_S16;
247 element_count = count >> 1;
248 } else /* word_len <= 32 */ {
249 data_type = OMAP_DMA_DATA_TYPE_S32;
250 element_count = count >> 2;
251 }
252
253 if (tx != NULL) {
254 omap_set_dma_transfer_params(mcspi_dma->dma_tx_channel,
255 data_type, element_count, 1,
256 OMAP_DMA_SYNC_ELEMENT,
257 mcspi_dma->dma_tx_sync_dev, 0);
258
259 omap_set_dma_dest_params(mcspi_dma->dma_tx_channel, 0,
260 OMAP_DMA_AMODE_CONSTANT,
261 tx_reg, 0, 0);
262
263 omap_set_dma_src_params(mcspi_dma->dma_tx_channel, 0,
264 OMAP_DMA_AMODE_POST_INC,
265 xfer->tx_dma, 0, 0);
266 }
267
268 if (rx != NULL) {
269 omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel,
270 data_type, element_count, 1,
271 OMAP_DMA_SYNC_ELEMENT,
272 mcspi_dma->dma_rx_sync_dev, 1);
273
274 omap_set_dma_src_params(mcspi_dma->dma_rx_channel, 0,
275 OMAP_DMA_AMODE_CONSTANT,
276 rx_reg, 0, 0);
277
278 omap_set_dma_dest_params(mcspi_dma->dma_rx_channel, 0,
279 OMAP_DMA_AMODE_POST_INC,
280 xfer->rx_dma, 0, 0);
281 }
282
283 if (tx != NULL) {
284 omap_start_dma(mcspi_dma->dma_tx_channel);
285 omap2_mcspi_set_dma_req(spi, 0, 1);
286 }
287
288 if (rx != NULL) {
289 omap_start_dma(mcspi_dma->dma_rx_channel);
290 omap2_mcspi_set_dma_req(spi, 1, 1);
291 }
292
293 if (tx != NULL) {
294 wait_for_completion(&mcspi_dma->dma_tx_completion);
295 dma_unmap_single(NULL, xfer->tx_dma, count, DMA_TO_DEVICE);
296 }
297
298 if (rx != NULL) {
299 wait_for_completion(&mcspi_dma->dma_rx_completion);
300 dma_unmap_single(NULL, xfer->rx_dma, count, DMA_FROM_DEVICE);
301 }
302 return count;
303}
304
305static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
306{
307 unsigned long timeout;
308
309 timeout = jiffies + msecs_to_jiffies(1000);
310 while (!(__raw_readl(reg) & bit)) {
311 if (time_after(jiffies, timeout))
312 return -1;
313 cpu_relax();
314 }
315 return 0;
316}
317
318static unsigned
319omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
320{
321 struct omap2_mcspi *mcspi;
322 struct omap2_mcspi_cs *cs = spi->controller_state;
323 unsigned int count, c;
324 u32 l;
325 void __iomem *base = cs->base;
326 void __iomem *tx_reg;
327 void __iomem *rx_reg;
328 void __iomem *chstat_reg;
329 int word_len;
330
331 mcspi = spi_master_get_devdata(spi->master);
332 count = xfer->len;
333 c = count;
334 word_len = cs->word_len;
335
336 l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
337 l &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
338
339 /* We store the pre-calculated register addresses on stack to speed
340 * up the transfer loop. */
341 tx_reg = base + OMAP2_MCSPI_TX0;
342 rx_reg = base + OMAP2_MCSPI_RX0;
343 chstat_reg = base + OMAP2_MCSPI_CHSTAT0;
344
345 if (word_len <= 8) {
346 u8 *rx;
347 const u8 *tx;
348
349 rx = xfer->rx_buf;
350 tx = xfer->tx_buf;
351
352 do {
353 if (tx != NULL) {
354 if (mcspi_wait_for_reg_bit(chstat_reg,
355 OMAP2_MCSPI_CHSTAT_TXS) < 0) {
356 dev_err(&spi->dev, "TXS timed out\n");
357 goto out;
358 }
359#ifdef VERBOSE
360 dev_dbg(&spi->dev, "write-%d %02x\n",
361 word_len, *tx);
362#endif
363 __raw_writel(*tx++, tx_reg);
364 }
365 if (rx != NULL) {
366 if (mcspi_wait_for_reg_bit(chstat_reg,
367 OMAP2_MCSPI_CHSTAT_RXS) < 0) {
368 dev_err(&spi->dev, "RXS timed out\n");
369 goto out;
370 }
371 /* prevent last RX_ONLY read from triggering
372 * more word i/o: switch to rx+tx
373 */
374 if (c == 0 && tx == NULL)
375 mcspi_write_cs_reg(spi,
376 OMAP2_MCSPI_CHCONF0, l);
377 *rx++ = __raw_readl(rx_reg);
378#ifdef VERBOSE
379 dev_dbg(&spi->dev, "read-%d %02x\n",
380 word_len, *(rx - 1));
381#endif
382 }
383 c -= 1;
384 } while (c);
385 } else if (word_len <= 16) {
386 u16 *rx;
387 const u16 *tx;
388
389 rx = xfer->rx_buf;
390 tx = xfer->tx_buf;
391 do {
392 if (tx != NULL) {
393 if (mcspi_wait_for_reg_bit(chstat_reg,
394 OMAP2_MCSPI_CHSTAT_TXS) < 0) {
395 dev_err(&spi->dev, "TXS timed out\n");
396 goto out;
397 }
398#ifdef VERBOSE
399 dev_dbg(&spi->dev, "write-%d %04x\n",
400 word_len, *tx);
401#endif
402 __raw_writel(*tx++, tx_reg);
403 }
404 if (rx != NULL) {
405 if (mcspi_wait_for_reg_bit(chstat_reg,
406 OMAP2_MCSPI_CHSTAT_RXS) < 0) {
407 dev_err(&spi->dev, "RXS timed out\n");
408 goto out;
409 }
410 /* prevent last RX_ONLY read from triggering
411 * more word i/o: switch to rx+tx
412 */
413 if (c == 0 && tx == NULL)
414 mcspi_write_cs_reg(spi,
415 OMAP2_MCSPI_CHCONF0, l);
416 *rx++ = __raw_readl(rx_reg);
417#ifdef VERBOSE
418 dev_dbg(&spi->dev, "read-%d %04x\n",
419 word_len, *(rx - 1));
420#endif
421 }
422 c -= 2;
423 } while (c);
424 } else if (word_len <= 32) {
425 u32 *rx;
426 const u32 *tx;
427
428 rx = xfer->rx_buf;
429 tx = xfer->tx_buf;
430 do {
431 if (tx != NULL) {
432 if (mcspi_wait_for_reg_bit(chstat_reg,
433 OMAP2_MCSPI_CHSTAT_TXS) < 0) {
434 dev_err(&spi->dev, "TXS timed out\n");
435 goto out;
436 }
437#ifdef VERBOSE
438 dev_dbg(&spi->dev, "write-%d %04x\n",
439 word_len, *tx);
440#endif
441 __raw_writel(*tx++, tx_reg);
442 }
443 if (rx != NULL) {
444 if (mcspi_wait_for_reg_bit(chstat_reg,
445 OMAP2_MCSPI_CHSTAT_RXS) < 0) {
446 dev_err(&spi->dev, "RXS timed out\n");
447 goto out;
448 }
449 /* prevent last RX_ONLY read from triggering
450 * more word i/o: switch to rx+tx
451 */
452 if (c == 0 && tx == NULL)
453 mcspi_write_cs_reg(spi,
454 OMAP2_MCSPI_CHCONF0, l);
455 *rx++ = __raw_readl(rx_reg);
456#ifdef VERBOSE
457 dev_dbg(&spi->dev, "read-%d %04x\n",
458 word_len, *(rx - 1));
459#endif
460 }
461 c -= 4;
462 } while (c);
463 }
464
465 /* for TX_ONLY mode, be sure all words have shifted out */
466 if (xfer->rx_buf == NULL) {
467 if (mcspi_wait_for_reg_bit(chstat_reg,
468 OMAP2_MCSPI_CHSTAT_TXS) < 0) {
469 dev_err(&spi->dev, "TXS timed out\n");
470 } else if (mcspi_wait_for_reg_bit(chstat_reg,
471 OMAP2_MCSPI_CHSTAT_EOT) < 0)
472 dev_err(&spi->dev, "EOT timed out\n");
473 }
474out:
475 return count - c;
476}
477
478/* called only when no transfer is active to this device */
479static int omap2_mcspi_setup_transfer(struct spi_device *spi,
480 struct spi_transfer *t)
481{
482 struct omap2_mcspi_cs *cs = spi->controller_state;
483 struct omap2_mcspi *mcspi;
484 u32 l = 0, div = 0;
485 u8 word_len = spi->bits_per_word;
486
487 mcspi = spi_master_get_devdata(spi->master);
488
489 if (t != NULL && t->bits_per_word)
490 word_len = t->bits_per_word;
491
492 cs->word_len = word_len;
493
494 if (spi->max_speed_hz) {
495 while (div <= 15 && (OMAP2_MCSPI_MAX_FREQ / (1 << div))
496 > spi->max_speed_hz)
497 div++;
498 } else
499 div = 15;
500
501 l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
502
503 /* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS
504 * REVISIT: this controller could support SPI_3WIRE mode.
505 */
506 l &= ~(OMAP2_MCSPI_CHCONF_IS|OMAP2_MCSPI_CHCONF_DPE1);
507 l |= OMAP2_MCSPI_CHCONF_DPE0;
508
509 /* wordlength */
510 l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
511 l |= (word_len - 1) << 7;
512
513 /* set chipselect polarity; manage with FORCE */
514 if (!(spi->mode & SPI_CS_HIGH))
515 l |= OMAP2_MCSPI_CHCONF_EPOL; /* active-low; normal */
516 else
517 l &= ~OMAP2_MCSPI_CHCONF_EPOL;
518
519 /* set clock divisor */
520 l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
521 l |= div << 2;
522
523 /* set SPI mode 0..3 */
524 if (spi->mode & SPI_CPOL)
525 l |= OMAP2_MCSPI_CHCONF_POL;
526 else
527 l &= ~OMAP2_MCSPI_CHCONF_POL;
528 if (spi->mode & SPI_CPHA)
529 l |= OMAP2_MCSPI_CHCONF_PHA;
530 else
531 l &= ~OMAP2_MCSPI_CHCONF_PHA;
532
533 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, l);
534
535 dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
536 OMAP2_MCSPI_MAX_FREQ / (1 << div),
537 (spi->mode & SPI_CPHA) ? "trailing" : "leading",
538 (spi->mode & SPI_CPOL) ? "inverted" : "normal");
539
540 return 0;
541}
542
543static void omap2_mcspi_dma_rx_callback(int lch, u16 ch_status, void *data)
544{
545 struct spi_device *spi = data;
546 struct omap2_mcspi *mcspi;
547 struct omap2_mcspi_dma *mcspi_dma;
548
549 mcspi = spi_master_get_devdata(spi->master);
550 mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
551
552 complete(&mcspi_dma->dma_rx_completion);
553
554 /* We must disable the DMA RX request */
555 omap2_mcspi_set_dma_req(spi, 1, 0);
556}
557
558static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data)
559{
560 struct spi_device *spi = data;
561 struct omap2_mcspi *mcspi;
562 struct omap2_mcspi_dma *mcspi_dma;
563
564 mcspi = spi_master_get_devdata(spi->master);
565 mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
566
567 complete(&mcspi_dma->dma_tx_completion);
568
569 /* We must disable the DMA TX request */
570 omap2_mcspi_set_dma_req(spi, 0, 0);
571}
572
573static int omap2_mcspi_request_dma(struct spi_device *spi)
574{
575 struct spi_master *master = spi->master;
576 struct omap2_mcspi *mcspi;
577 struct omap2_mcspi_dma *mcspi_dma;
578
579 mcspi = spi_master_get_devdata(master);
580 mcspi_dma = mcspi->dma_channels + spi->chip_select;
581
582 if (omap_request_dma(mcspi_dma->dma_rx_sync_dev, "McSPI RX",
583 omap2_mcspi_dma_rx_callback, spi,
584 &mcspi_dma->dma_rx_channel)) {
585 dev_err(&spi->dev, "no RX DMA channel for McSPI\n");
586 return -EAGAIN;
587 }
588
589 if (omap_request_dma(mcspi_dma->dma_tx_sync_dev, "McSPI TX",
590 omap2_mcspi_dma_tx_callback, spi,
591 &mcspi_dma->dma_tx_channel)) {
592 omap_free_dma(mcspi_dma->dma_rx_channel);
593 mcspi_dma->dma_rx_channel = -1;
594 dev_err(&spi->dev, "no TX DMA channel for McSPI\n");
595 return -EAGAIN;
596 }
597
598 init_completion(&mcspi_dma->dma_rx_completion);
599 init_completion(&mcspi_dma->dma_tx_completion);
600
601 return 0;
602}
603
604/* the spi->mode bits understood by this driver: */
605#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
606
607static int omap2_mcspi_setup(struct spi_device *spi)
608{
609 int ret;
610 struct omap2_mcspi *mcspi;
611 struct omap2_mcspi_dma *mcspi_dma;
612 struct omap2_mcspi_cs *cs = spi->controller_state;
613
614 if (spi->mode & ~MODEBITS) {
615 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
616 spi->mode & ~MODEBITS);
617 return -EINVAL;
618 }
619
620 if (spi->bits_per_word == 0)
621 spi->bits_per_word = 8;
622 else if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
623 dev_dbg(&spi->dev, "setup: unsupported %d bit words\n",
624 spi->bits_per_word);
625 return -EINVAL;
626 }
627
628 mcspi = spi_master_get_devdata(spi->master);
629 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
630
631 if (!cs) {
632 cs = kzalloc(sizeof *cs, GFP_KERNEL);
633 if (!cs)
634 return -ENOMEM;
635 cs->base = mcspi->base + spi->chip_select * 0x14;
636 spi->controller_state = cs;
637 }
638
639 if (mcspi_dma->dma_rx_channel == -1
640 || mcspi_dma->dma_tx_channel == -1) {
641 ret = omap2_mcspi_request_dma(spi);
642 if (ret < 0)
643 return ret;
644 }
645
646 clk_enable(mcspi->ick);
647 clk_enable(mcspi->fck);
648 ret = omap2_mcspi_setup_transfer(spi, NULL);
649 clk_disable(mcspi->fck);
650 clk_disable(mcspi->ick);
651
652 return ret;
653}
654
655static void omap2_mcspi_cleanup(struct spi_device *spi)
656{
657 struct omap2_mcspi *mcspi;
658 struct omap2_mcspi_dma *mcspi_dma;
659
660 mcspi = spi_master_get_devdata(spi->master);
661 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
662
663 kfree(spi->controller_state);
664
665 if (mcspi_dma->dma_rx_channel != -1) {
666 omap_free_dma(mcspi_dma->dma_rx_channel);
667 mcspi_dma->dma_rx_channel = -1;
668 }
669 if (mcspi_dma->dma_tx_channel != -1) {
670 omap_free_dma(mcspi_dma->dma_tx_channel);
671 mcspi_dma->dma_tx_channel = -1;
672 }
673}
674
675static void omap2_mcspi_work(struct work_struct *work)
676{
677 struct omap2_mcspi *mcspi;
678
679 mcspi = container_of(work, struct omap2_mcspi, work);
680 spin_lock_irq(&mcspi->lock);
681
682 clk_enable(mcspi->ick);
683 clk_enable(mcspi->fck);
684
685 /* We only enable one channel at a time -- the one whose message is
686 * at the head of the queue -- although this controller would gladly
687 * arbitrate among multiple channels. This corresponds to "single
688 * channel" master mode. As a side effect, we need to manage the
689 * chipselect with the FORCE bit ... CS != channel enable.
690 */
691 while (!list_empty(&mcspi->msg_queue)) {
692 struct spi_message *m;
693 struct spi_device *spi;
694 struct spi_transfer *t = NULL;
695 int cs_active = 0;
696 struct omap2_mcspi_device_config *conf;
697 struct omap2_mcspi_cs *cs;
698 int par_override = 0;
699 int status = 0;
700 u32 chconf;
701
702 m = container_of(mcspi->msg_queue.next, struct spi_message,
703 queue);
704
705 list_del_init(&m->queue);
706 spin_unlock_irq(&mcspi->lock);
707
708 spi = m->spi;
709 conf = spi->controller_data;
710 cs = spi->controller_state;
711
712 omap2_mcspi_set_enable(spi, 1);
713 list_for_each_entry(t, &m->transfers, transfer_list) {
714 if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
715 status = -EINVAL;
716 break;
717 }
718 if (par_override || t->speed_hz || t->bits_per_word) {
719 par_override = 1;
720 status = omap2_mcspi_setup_transfer(spi, t);
721 if (status < 0)
722 break;
723 if (!t->speed_hz && !t->bits_per_word)
724 par_override = 0;
725 }
726
727 if (!cs_active) {
728 omap2_mcspi_force_cs(spi, 1);
729 cs_active = 1;
730 }
731
732 chconf = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
733 chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
734 if (t->tx_buf == NULL)
735 chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
736 else if (t->rx_buf == NULL)
737 chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
738 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, chconf);
739
740 if (t->len) {
741 unsigned count;
742
743 /* RX_ONLY mode needs dummy data in TX reg */
744 if (t->tx_buf == NULL)
745 __raw_writel(0, cs->base
746 + OMAP2_MCSPI_TX0);
747
748 if (m->is_dma_mapped || t->len >= DMA_MIN_BYTES)
749 count = omap2_mcspi_txrx_dma(spi, t);
750 else
751 count = omap2_mcspi_txrx_pio(spi, t);
752 m->actual_length += count;
753
754 if (count != t->len) {
755 status = -EIO;
756 break;
757 }
758 }
759
760 if (t->delay_usecs)
761 udelay(t->delay_usecs);
762
763 /* ignore the "leave it on after last xfer" hint */
764 if (t->cs_change) {
765 omap2_mcspi_force_cs(spi, 0);
766 cs_active = 0;
767 }
768 }
769
770 /* Restore defaults if they were overriden */
771 if (par_override) {
772 par_override = 0;
773 status = omap2_mcspi_setup_transfer(spi, NULL);
774 }
775
776 if (cs_active)
777 omap2_mcspi_force_cs(spi, 0);
778
779 omap2_mcspi_set_enable(spi, 0);
780
781 m->status = status;
782 m->complete(m->context);
783
784 spin_lock_irq(&mcspi->lock);
785 }
786
787 clk_disable(mcspi->fck);
788 clk_disable(mcspi->ick);
789
790 spin_unlock_irq(&mcspi->lock);
791}
792
793static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
794{
795 struct omap2_mcspi *mcspi;
796 unsigned long flags;
797 struct spi_transfer *t;
798
799 m->actual_length = 0;
800 m->status = 0;
801
802 /* reject invalid messages and transfers */
803 if (list_empty(&m->transfers) || !m->complete)
804 return -EINVAL;
805 list_for_each_entry(t, &m->transfers, transfer_list) {
806 const void *tx_buf = t->tx_buf;
807 void *rx_buf = t->rx_buf;
808 unsigned len = t->len;
809
810 if (t->speed_hz > OMAP2_MCSPI_MAX_FREQ
811 || (len && !(rx_buf || tx_buf))
812 || (t->bits_per_word &&
813 ( t->bits_per_word < 4
814 || t->bits_per_word > 32))) {
815 dev_dbg(&spi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
816 t->speed_hz,
817 len,
818 tx_buf ? "tx" : "",
819 rx_buf ? "rx" : "",
820 t->bits_per_word);
821 return -EINVAL;
822 }
823 if (t->speed_hz && t->speed_hz < OMAP2_MCSPI_MAX_FREQ/(1<<16)) {
824 dev_dbg(&spi->dev, "%d Hz max exceeds %d\n",
825 t->speed_hz,
826 OMAP2_MCSPI_MAX_FREQ/(1<<16));
827 return -EINVAL;
828 }
829
830 if (m->is_dma_mapped || len < DMA_MIN_BYTES)
831 continue;
832
833 /* Do DMA mapping "early" for better error reporting and
834 * dcache use. Note that if dma_unmap_single() ever starts
835 * to do real work on ARM, we'd need to clean up mappings
836 * for previous transfers on *ALL* exits of this loop...
837 */
838 if (tx_buf != NULL) {
839 t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf,
840 len, DMA_TO_DEVICE);
841 if (dma_mapping_error(t->tx_dma)) {
842 dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
843 'T', len);
844 return -EINVAL;
845 }
846 }
847 if (rx_buf != NULL) {
848 t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len,
849 DMA_FROM_DEVICE);
850 if (dma_mapping_error(t->rx_dma)) {
851 dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
852 'R', len);
853 if (tx_buf != NULL)
854 dma_unmap_single(NULL, t->tx_dma,
855 len, DMA_TO_DEVICE);
856 return -EINVAL;
857 }
858 }
859 }
860
861 mcspi = spi_master_get_devdata(spi->master);
862
863 spin_lock_irqsave(&mcspi->lock, flags);
864 list_add_tail(&m->queue, &mcspi->msg_queue);
865 queue_work(omap2_mcspi_wq, &mcspi->work);
866 spin_unlock_irqrestore(&mcspi->lock, flags);
867
868 return 0;
869}
870
871static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi)
872{
873 struct spi_master *master = mcspi->master;
874 u32 tmp;
875
876 clk_enable(mcspi->ick);
877 clk_enable(mcspi->fck);
878
879 mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG,
880 OMAP2_MCSPI_SYSCONFIG_SOFTRESET);
881 do {
882 tmp = mcspi_read_reg(master, OMAP2_MCSPI_SYSSTATUS);
883 } while (!(tmp & OMAP2_MCSPI_SYSSTATUS_RESETDONE));
884
885 mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG,
886 /* (3 << 8) | (2 << 3) | */
887 OMAP2_MCSPI_SYSCONFIG_AUTOIDLE);
888
889 omap2_mcspi_set_master_mode(master);
890
891 clk_disable(mcspi->fck);
892 clk_disable(mcspi->ick);
893 return 0;
894}
895
896static u8 __initdata spi1_rxdma_id [] = {
897 OMAP24XX_DMA_SPI1_RX0,
898 OMAP24XX_DMA_SPI1_RX1,
899 OMAP24XX_DMA_SPI1_RX2,
900 OMAP24XX_DMA_SPI1_RX3,
901};
902
903static u8 __initdata spi1_txdma_id [] = {
904 OMAP24XX_DMA_SPI1_TX0,
905 OMAP24XX_DMA_SPI1_TX1,
906 OMAP24XX_DMA_SPI1_TX2,
907 OMAP24XX_DMA_SPI1_TX3,
908};
909
910static u8 __initdata spi2_rxdma_id[] = {
911 OMAP24XX_DMA_SPI2_RX0,
912 OMAP24XX_DMA_SPI2_RX1,
913};
914
915static u8 __initdata spi2_txdma_id[] = {
916 OMAP24XX_DMA_SPI2_TX0,
917 OMAP24XX_DMA_SPI2_TX1,
918};
919
920static int __init omap2_mcspi_probe(struct platform_device *pdev)
921{
922 struct spi_master *master;
923 struct omap2_mcspi *mcspi;
924 struct resource *r;
925 int status = 0, i;
926 const u8 *rxdma_id, *txdma_id;
927 unsigned num_chipselect;
928
929 switch (pdev->id) {
930 case 1:
931 rxdma_id = spi1_rxdma_id;
932 txdma_id = spi1_txdma_id;
933 num_chipselect = 4;
934 break;
935 case 2:
936 rxdma_id = spi2_rxdma_id;
937 txdma_id = spi2_txdma_id;
938 num_chipselect = 2;
939 break;
940 /* REVISIT omap2430 has a third McSPI ... */
941 default:
942 return -EINVAL;
943 }
944
945 master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
946 if (master == NULL) {
947 dev_dbg(&pdev->dev, "master allocation failed\n");
948 return -ENOMEM;
949 }
950
951 if (pdev->id != -1)
952 master->bus_num = pdev->id;
953
954 master->setup = omap2_mcspi_setup;
955 master->transfer = omap2_mcspi_transfer;
956 master->cleanup = omap2_mcspi_cleanup;
957 master->num_chipselect = num_chipselect;
958
959 dev_set_drvdata(&pdev->dev, master);
960
961 mcspi = spi_master_get_devdata(master);
962 mcspi->master = master;
963
964 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
965 if (r == NULL) {
966 status = -ENODEV;
967 goto err1;
968 }
969 if (!request_mem_region(r->start, (r->end - r->start) + 1,
970 pdev->dev.bus_id)) {
971 status = -EBUSY;
972 goto err1;
973 }
974
975 mcspi->base = (void __iomem *) io_p2v(r->start);
976
977 INIT_WORK(&mcspi->work, omap2_mcspi_work);
978
979 spin_lock_init(&mcspi->lock);
980 INIT_LIST_HEAD(&mcspi->msg_queue);
981
982 mcspi->ick = clk_get(&pdev->dev, "mcspi_ick");
983 if (IS_ERR(mcspi->ick)) {
984 dev_dbg(&pdev->dev, "can't get mcspi_ick\n");
985 status = PTR_ERR(mcspi->ick);
986 goto err1a;
987 }
988 mcspi->fck = clk_get(&pdev->dev, "mcspi_fck");
989 if (IS_ERR(mcspi->fck)) {
990 dev_dbg(&pdev->dev, "can't get mcspi_fck\n");
991 status = PTR_ERR(mcspi->fck);
992 goto err2;
993 }
994
995 mcspi->dma_channels = kcalloc(master->num_chipselect,
996 sizeof(struct omap2_mcspi_dma),
997 GFP_KERNEL);
998
999 if (mcspi->dma_channels == NULL)
1000 goto err3;
1001
1002 for (i = 0; i < num_chipselect; i++) {
1003 mcspi->dma_channels[i].dma_rx_channel = -1;
1004 mcspi->dma_channels[i].dma_rx_sync_dev = rxdma_id[i];
1005 mcspi->dma_channels[i].dma_tx_channel = -1;
1006 mcspi->dma_channels[i].dma_tx_sync_dev = txdma_id[i];
1007 }
1008
1009 if (omap2_mcspi_reset(mcspi) < 0)
1010 goto err4;
1011
1012 status = spi_register_master(master);
1013 if (status < 0)
1014 goto err4;
1015
1016 return status;
1017
1018err4:
1019 kfree(mcspi->dma_channels);
1020err3:
1021 clk_put(mcspi->fck);
1022err2:
1023 clk_put(mcspi->ick);
1024err1a:
1025 release_mem_region(r->start, (r->end - r->start) + 1);
1026err1:
1027 spi_master_put(master);
1028 return status;
1029}
1030
1031static int __exit omap2_mcspi_remove(struct platform_device *pdev)
1032{
1033 struct spi_master *master;
1034 struct omap2_mcspi *mcspi;
1035 struct omap2_mcspi_dma *dma_channels;
1036 struct resource *r;
1037
1038 master = dev_get_drvdata(&pdev->dev);
1039 mcspi = spi_master_get_devdata(master);
1040 dma_channels = mcspi->dma_channels;
1041
1042 clk_put(mcspi->fck);
1043 clk_put(mcspi->ick);
1044
1045 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1046 release_mem_region(r->start, (r->end - r->start) + 1);
1047
1048 spi_unregister_master(master);
1049 kfree(dma_channels);
1050
1051 return 0;
1052}
1053
1054static struct platform_driver omap2_mcspi_driver = {
1055 .driver = {
1056 .name = "omap2_mcspi",
1057 .owner = THIS_MODULE,
1058 },
1059 .remove = __exit_p(omap2_mcspi_remove),
1060};
1061
1062
1063static int __init omap2_mcspi_init(void)
1064{
1065 omap2_mcspi_wq = create_singlethread_workqueue(
1066 omap2_mcspi_driver.driver.name);
1067 if (omap2_mcspi_wq == NULL)
1068 return -1;
1069 return platform_driver_probe(&omap2_mcspi_driver, omap2_mcspi_probe);
1070}
1071subsys_initcall(omap2_mcspi_init);
1072
1073static void __exit omap2_mcspi_exit(void)
1074{
1075 platform_driver_unregister(&omap2_mcspi_driver);
1076
1077 destroy_workqueue(omap2_mcspi_wq);
1078}
1079module_exit(omap2_mcspi_exit);
1080
1081MODULE_LICENSE("GPL");
diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c
index 95183e1df525..d275c615a73e 100644
--- a/drivers/spi/omap_uwire.c
+++ b/drivers/spi/omap_uwire.c
@@ -445,10 +445,19 @@ done:
445 return status; 445 return status;
446} 446}
447 447
448/* the spi->mode bits understood by this driver: */
449#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
450
448static int uwire_setup(struct spi_device *spi) 451static int uwire_setup(struct spi_device *spi)
449{ 452{
450 struct uwire_state *ust = spi->controller_state; 453 struct uwire_state *ust = spi->controller_state;
451 454
455 if (spi->mode & ~MODEBITS) {
456 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
457 spi->mode & ~MODEBITS);
458 return -EINVAL;
459 }
460
452 if (ust == NULL) { 461 if (ust == NULL) {
453 ust = kzalloc(sizeof(*ust), GFP_KERNEL); 462 ust = kzalloc(sizeof(*ust), GFP_KERNEL);
454 if (ust == NULL) 463 if (ust == NULL)
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 9f2c887ffa04..e51311b2da0b 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -1067,6 +1067,9 @@ static int transfer(struct spi_device *spi, struct spi_message *msg)
1067 return 0; 1067 return 0;
1068} 1068}
1069 1069
1070/* the spi->mode bits understood by this driver: */
1071#define MODEBITS (SPI_CPOL | SPI_CPHA)
1072
1070static int setup(struct spi_device *spi) 1073static int setup(struct spi_device *spi)
1071{ 1074{
1072 struct pxa2xx_spi_chip *chip_info = NULL; 1075 struct pxa2xx_spi_chip *chip_info = NULL;
@@ -1093,6 +1096,12 @@ static int setup(struct spi_device *spi)
1093 return -EINVAL; 1096 return -EINVAL;
1094 } 1097 }
1095 1098
1099 if (spi->mode & ~MODEBITS) {
1100 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
1101 spi->mode & ~MODEBITS);
1102 return -EINVAL;
1103 }
1104
1096 /* Only alloc on first setup */ 1105 /* Only alloc on first setup */
1097 chip = spi_get_ctldata(spi); 1106 chip = spi_get_ctldata(spi);
1098 if (!chip) { 1107 if (!chip) {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 4831edbae2d5..018884d7a5fa 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -23,6 +23,7 @@
23#include <linux/device.h> 23#include <linux/device.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/cache.h> 25#include <linux/cache.h>
26#include <linux/mutex.h>
26#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
27 28
28 29
@@ -185,7 +186,7 @@ struct boardinfo {
185}; 186};
186 187
187static LIST_HEAD(board_list); 188static LIST_HEAD(board_list);
188static DECLARE_MUTEX(board_lock); 189static DEFINE_MUTEX(board_lock);
189 190
190 191
191/** 192/**
@@ -292,9 +293,9 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
292 bi->n_board_info = n; 293 bi->n_board_info = n;
293 memcpy(bi->board_info, info, n * sizeof *info); 294 memcpy(bi->board_info, info, n * sizeof *info);
294 295
295 down(&board_lock); 296 mutex_lock(&board_lock);
296 list_add_tail(&bi->list, &board_list); 297 list_add_tail(&bi->list, &board_list);
297 up(&board_lock); 298 mutex_unlock(&board_lock);
298 return 0; 299 return 0;
299} 300}
300 301
@@ -308,7 +309,7 @@ scan_boardinfo(struct spi_master *master)
308 struct boardinfo *bi; 309 struct boardinfo *bi;
309 struct device *dev = master->cdev.dev; 310 struct device *dev = master->cdev.dev;
310 311
311 down(&board_lock); 312 mutex_lock(&board_lock);
312 list_for_each_entry(bi, &board_list, list) { 313 list_for_each_entry(bi, &board_list, list) {
313 struct spi_board_info *chip = bi->board_info; 314 struct spi_board_info *chip = bi->board_info;
314 unsigned n; 315 unsigned n;
@@ -330,7 +331,7 @@ scan_boardinfo(struct spi_master *master)
330 (void) spi_new_device(master, chip); 331 (void) spi_new_device(master, chip);
331 } 332 }
332 } 333 }
333 up(&board_lock); 334 mutex_unlock(&board_lock);
334} 335}
335 336
336/*-------------------------------------------------------------------------*/ 337/*-------------------------------------------------------------------------*/
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index 88425e1af4d3..0c85c984ccb4 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -187,12 +187,10 @@ int spi_bitbang_setup(struct spi_device *spi)
187 187
188 bitbang = spi_master_get_devdata(spi->master); 188 bitbang = spi_master_get_devdata(spi->master);
189 189
190 /* REVISIT: some systems will want to support devices using lsb-first 190 /* Bitbangers can support SPI_CS_HIGH, SPI_3WIRE, and so on;
191 * bit encodings on the wire. In pure software that would be trivial, 191 * add those to master->flags, and provide the other support.
192 * just bitbang_txrx_le_cphaX() routines shifting the other way, and
193 * some hardware controllers also have this support.
194 */ 192 */
195 if ((spi->mode & SPI_LSB_FIRST) != 0) 193 if ((spi->mode & ~(SPI_CPOL|SPI_CPHA|bitbang->flags)) != 0)
196 return -EINVAL; 194 return -EINVAL;
197 195
198 if (!cs) { 196 if (!cs) {
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 656be4a5094a..aee9ad6f633c 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -1163,6 +1163,9 @@ msg_rejected:
1163 return -EINVAL; 1163 return -EINVAL;
1164} 1164}
1165 1165
1166/* the spi->mode bits understood by this driver: */
1167#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
1168
1166/* On first setup bad values must free chip_data memory since will cause 1169/* On first setup bad values must free chip_data memory since will cause
1167 spi_new_device to fail. Bad value setup from protocol driver are simply not 1170 spi_new_device to fail. Bad value setup from protocol driver are simply not
1168 applied and notified to the calling driver. */ 1171 applied and notified to the calling driver. */
@@ -1174,6 +1177,12 @@ static int setup(struct spi_device *spi)
1174 u32 tmp; 1177 u32 tmp;
1175 int status = 0; 1178 int status = 0;
1176 1179
1180 if (spi->mode & ~MODEBITS) {
1181 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
1182 spi->mode & ~MODEBITS);
1183 return -EINVAL;
1184 }
1185
1177 /* Get controller data */ 1186 /* Get controller data */
1178 chip_info = spi->controller_data; 1187 chip_info = spi->controller_data;
1179 1188
@@ -1245,21 +1254,6 @@ static int setup(struct spi_device *spi)
1245 1254
1246 /* SPI mode */ 1255 /* SPI mode */
1247 tmp = spi->mode; 1256 tmp = spi->mode;
1248 if (tmp & SPI_LSB_FIRST) {
1249 status = -EINVAL;
1250 if (first_setup) {
1251 dev_err(&spi->dev,
1252 "setup - "
1253 "HW doesn't support LSB first transfer\n");
1254 goto err_first_setup;
1255 } else {
1256 dev_err(&spi->dev,
1257 "setup - "
1258 "HW doesn't support LSB first transfer, "
1259 "default to MSB first\n");
1260 spi->mode &= ~SPI_LSB_FIRST;
1261 }
1262 }
1263 if (tmp & SPI_CS_HIGH) { 1257 if (tmp & SPI_CS_HIGH) {
1264 u32_EDIT(chip->control, 1258 u32_EDIT(chip->control,
1265 SPI_CONTROL_SSPOL, SPI_CONTROL_SSPOL_ACT_HIGH); 1259 SPI_CONTROL_SSPOL, SPI_CONTROL_SSPOL_ACT_HIGH);
diff --git a/drivers/spi/spi_lm70llp.c b/drivers/spi/spi_lm70llp.c
new file mode 100644
index 000000000000..4ea68ac16115
--- /dev/null
+++ b/drivers/spi/spi_lm70llp.c
@@ -0,0 +1,361 @@
1/*
2 * spi_lm70llp.c - driver for lm70llp eval board for the LM70 sensor
3 *
4 * Copyright (C) 2006 Kaiwan N Billimoria <kaiwan@designergraphix.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/delay.h>
25#include <linux/device.h>
26#include <linux/parport.h>
27#include <linux/sysfs.h>
28#include <linux/workqueue.h>
29
30
31#include <linux/spi/spi.h>
32#include <linux/spi/spi_bitbang.h>
33
34
35/*
36 * The LM70 communicates with a host processor using a 3-wire variant of
37 * the SPI/Microwire bus interface. This driver specifically supports an
38 * NS LM70 LLP Evaluation Board, interfacing to a PC using its parallel
39 * port to bitbang an SPI-parport bridge. Accordingly, this is an SPI
40 * master controller driver. The hwmon/lm70 driver is a "SPI protocol
41 * driver", layered on top of this one and usable without the lm70llp.
42 *
43 * The LM70 is a temperature sensor chip from National Semiconductor; its
44 * datasheet is available at http://www.national.com/pf/LM/LM70.html
45 *
46 * Also see Documentation/spi/spi-lm70llp. The SPI<->parport code here is
47 * (heavily) based on spi-butterfly by David Brownell.
48 *
49 * The LM70 LLP connects to the PC parallel port in the following manner:
50 *
51 * Parallel LM70 LLP
52 * Port Direction JP2 Header
53 * ----------- --------- ------------
54 * D0 2 - -
55 * D1 3 --> V+ 5
56 * D2 4 --> V+ 5
57 * D3 5 --> V+ 5
58 * D4 6 --> V+ 5
59 * D5 7 --> nCS 8
60 * D6 8 --> SCLK 3
61 * D7 9 --> SI/O 5
62 * GND 25 - GND 7
63 * Select 13 <-- SI/O 1
64 *
65 * Note that parport pin 13 actually gets inverted by the transistor
66 * arrangement which lets either the parport or the LM70 drive the
67 * SI/SO signal.
68 */
69
70#define DRVNAME "spi-lm70llp"
71
72#define lm70_INIT 0xBE
73#define SIO 0x10
74#define nCS 0x20
75#define SCLK 0x40
76
77/*-------------------------------------------------------------------------*/
78
79struct spi_lm70llp {
80 struct spi_bitbang bitbang;
81 struct parport *port;
82 struct pardevice *pd;
83 struct spi_device *spidev_lm70;
84 struct spi_board_info info;
85 struct class_device *cdev;
86};
87
88/* REVISIT : ugly global ; provides "exclusive open" facility */
89static struct spi_lm70llp *lm70llp;
90
91
92/*-------------------------------------------------------------------*/
93
94static inline struct spi_lm70llp *spidev_to_pp(struct spi_device *spi)
95{
96 return spi->controller_data;
97}
98
99/*---------------------- LM70 LLP eval board-specific inlines follow */
100
101/* NOTE: we don't actually need to reread the output values, since they'll
102 * still be what we wrote before. Plus, going through parport builds in
103 * a ~1ms/operation delay; these SPI transfers could easily be faster.
104 */
105
106static inline void deassertCS(struct spi_lm70llp *pp)
107{
108 u8 data = parport_read_data(pp->port);
109 parport_write_data(pp->port, data | nCS);
110}
111
112static inline void assertCS(struct spi_lm70llp *pp)
113{
114 u8 data = parport_read_data(pp->port);
115 parport_write_data(pp->port, data & ~nCS);
116}
117
118static inline void clkHigh(struct spi_lm70llp *pp)
119{
120 u8 data = parport_read_data(pp->port);
121 parport_write_data(pp->port, data | SCLK);
122}
123
124static inline void clkLow(struct spi_lm70llp *pp)
125{
126 u8 data = parport_read_data(pp->port);
127 parport_write_data(pp->port, data & ~SCLK);
128}
129
130/*------------------------- SPI-LM70-specific inlines ----------------------*/
131
132static inline void spidelay(unsigned d)
133{
134 udelay(d);
135}
136
137static inline void setsck(struct spi_device *s, int is_on)
138{
139 struct spi_lm70llp *pp = spidev_to_pp(s);
140
141 if (is_on)
142 clkHigh(pp);
143 else
144 clkLow(pp);
145}
146
147static inline void setmosi(struct spi_device *s, int is_on)
148{
149 /* FIXME update D7 ... this way we can put the chip
150 * into shutdown mode and read the manufacturer ID,
151 * but we can't put it back into operational mode.
152 */
153}
154
155/*
156 * getmiso:
157 * Why do we return 0 when the SIO line is high and vice-versa?
158 * The fact is, the lm70 eval board from NS (which this driver drives),
159 * is wired in just such a way : when the lm70's SIO goes high, a transistor
160 * switches it to low reflecting this on the parport (pin 13), and vice-versa.
161 */
162static inline int getmiso(struct spi_device *s)
163{
164 struct spi_lm70llp *pp = spidev_to_pp(s);
165 return ((SIO == (parport_read_status(pp->port) & SIO)) ? 0 : 1 );
166}
167/*--------------------------------------------------------------------*/
168
169#define EXPAND_BITBANG_TXRX 1
170#include <linux/spi/spi_bitbang.h>
171
172static void lm70_chipselect(struct spi_device *spi, int value)
173{
174 struct spi_lm70llp *pp = spidev_to_pp(spi);
175
176 if (value)
177 assertCS(pp);
178 else
179 deassertCS(pp);
180}
181
182/*
183 * Our actual bitbanger routine.
184 */
185static u32 lm70_txrx(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits)
186{
187 static u32 sio=0;
188 static int first_time=1;
189
190 /* First time: perform SPI bitbang and return the LSB of
191 * the result of the SPI call.
192 */
193 if (first_time) {
194 sio = bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits);
195 first_time=0;
196 return (sio & 0x00ff);
197 }
198 /* Return the MSB of the result of the SPI call */
199 else {
200 first_time=1;
201 return (sio >> 8);
202 }
203}
204
205static void spi_lm70llp_attach(struct parport *p)
206{
207 struct pardevice *pd;
208 struct spi_lm70llp *pp;
209 struct spi_master *master;
210 int status;
211
212 if (lm70llp) {
213 printk(KERN_WARNING
214 "%s: spi_lm70llp instance already loaded. Aborting.\n",
215 DRVNAME);
216 return;
217 }
218
219 /* TODO: this just _assumes_ a lm70 is there ... no probe;
220 * the lm70 driver could verify it, reading the manf ID.
221 */
222
223 master = spi_alloc_master(p->physport->dev, sizeof *pp);
224 if (!master) {
225 status = -ENOMEM;
226 goto out_fail;
227 }
228 pp = spi_master_get_devdata(master);
229
230 master->bus_num = -1; /* dynamic alloc of a bus number */
231 master->num_chipselect = 1;
232
233 /*
234 * SPI and bitbang hookup.
235 */
236 pp->bitbang.master = spi_master_get(master);
237 pp->bitbang.chipselect = lm70_chipselect;
238 pp->bitbang.txrx_word[SPI_MODE_0] = lm70_txrx;
239 pp->bitbang.flags = SPI_3WIRE;
240
241 /*
242 * Parport hookup
243 */
244 pp->port = p;
245 pd = parport_register_device(p, DRVNAME,
246 NULL, NULL, NULL,
247 PARPORT_FLAG_EXCL, pp);
248 if (!pd) {
249 status = -ENOMEM;
250 goto out_free_master;
251 }
252 pp->pd = pd;
253
254 status = parport_claim(pd);
255 if (status < 0)
256 goto out_parport_unreg;
257
258 /*
259 * Start SPI ...
260 */
261 status = spi_bitbang_start(&pp->bitbang);
262 if (status < 0) {
263 printk(KERN_WARNING
264 "%s: spi_bitbang_start failed with status %d\n",
265 DRVNAME, status);
266 goto out_off_and_release;
267 }
268
269 /*
270 * The modalias name MUST match the device_driver name
271 * for the bus glue code to match and subsequently bind them.
272 * We are binding to the generic drivers/hwmon/lm70.c device
273 * driver.
274 */
275 strcpy(pp->info.modalias, "lm70");
276 pp->info.max_speed_hz = 6 * 1000 * 1000;
277 pp->info.chip_select = 0;
278 pp->info.mode = SPI_3WIRE | SPI_MODE_0;
279
280 /* power up the chip, and let the LM70 control SI/SO */
281 parport_write_data(pp->port, lm70_INIT);
282
283 /* Enable access to our primary data structure via
284 * the board info's (void *)controller_data.
285 */
286 pp->info.controller_data = pp;
287 pp->spidev_lm70 = spi_new_device(pp->bitbang.master, &pp->info);
288 if (pp->spidev_lm70)
289 dev_dbg(&pp->spidev_lm70->dev, "spidev_lm70 at %s\n",
290 pp->spidev_lm70->dev.bus_id);
291 else {
292 printk(KERN_WARNING "%s: spi_new_device failed\n", DRVNAME);
293 status = -ENODEV;
294 goto out_bitbang_stop;
295 }
296 pp->spidev_lm70->bits_per_word = 16;
297
298 lm70llp = pp;
299
300 return;
301
302out_bitbang_stop:
303 spi_bitbang_stop(&pp->bitbang);
304out_off_and_release:
305 /* power down */
306 parport_write_data(pp->port, 0);
307 mdelay(10);
308 parport_release(pp->pd);
309out_parport_unreg:
310 parport_unregister_device(pd);
311out_free_master:
312 (void) spi_master_put(master);
313out_fail:
314 pr_info("%s: spi_lm70llp probe fail, status %d\n", DRVNAME, status);
315}
316
317static void spi_lm70llp_detach(struct parport *p)
318{
319 struct spi_lm70llp *pp;
320
321 if (!lm70llp || lm70llp->port != p)
322 return;
323
324 pp = lm70llp;
325 spi_bitbang_stop(&pp->bitbang);
326
327 /* power down */
328 parport_write_data(pp->port, 0);
329 msleep(10);
330
331 parport_release(pp->pd);
332 parport_unregister_device(pp->pd);
333
334 (void) spi_master_put(pp->bitbang.master);
335
336 lm70llp = NULL;
337}
338
339
340static struct parport_driver spi_lm70llp_drv = {
341 .name = DRVNAME,
342 .attach = spi_lm70llp_attach,
343 .detach = spi_lm70llp_detach,
344};
345
346static int __init init_spi_lm70llp(void)
347{
348 return parport_register_driver(&spi_lm70llp_drv);
349}
350module_init(init_spi_lm70llp);
351
352static void __exit cleanup_spi_lm70llp(void)
353{
354 parport_unregister_driver(&spi_lm70llp_drv);
355}
356module_exit(cleanup_spi_lm70llp);
357
358MODULE_AUTHOR("Kaiwan N Billimoria <kaiwan@designergraphix.com>");
359MODULE_DESCRIPTION(
360 "Parport adapter for the National Semiconductor LM70 LLP eval board");
361MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi_mpc83xx.c b/drivers/spi/spi_mpc83xx.c
index e9798bf7b8c6..3295cfcc9f20 100644
--- a/drivers/spi/spi_mpc83xx.c
+++ b/drivers/spi/spi_mpc83xx.c
@@ -47,6 +47,7 @@ struct mpc83xx_spi_reg {
47#define SPMODE_ENABLE (1 << 24) 47#define SPMODE_ENABLE (1 << 24)
48#define SPMODE_LEN(x) ((x) << 20) 48#define SPMODE_LEN(x) ((x) << 20)
49#define SPMODE_PM(x) ((x) << 16) 49#define SPMODE_PM(x) ((x) << 16)
50#define SPMODE_OP (1 << 14)
50 51
51/* 52/*
52 * Default for SPI Mode: 53 * Default for SPI Mode:
@@ -85,6 +86,11 @@ struct mpc83xx_spi {
85 unsigned nsecs; /* (clock cycle time)/2 */ 86 unsigned nsecs; /* (clock cycle time)/2 */
86 87
87 u32 sysclk; 88 u32 sysclk;
89 u32 rx_shift; /* RX data reg shift when in qe mode */
90 u32 tx_shift; /* TX data reg shift when in qe mode */
91
92 bool qe_mode;
93
88 void (*activate_cs) (u8 cs, u8 polarity); 94 void (*activate_cs) (u8 cs, u8 polarity);
89 void (*deactivate_cs) (u8 cs, u8 polarity); 95 void (*deactivate_cs) (u8 cs, u8 polarity);
90}; 96};
@@ -103,7 +109,7 @@ static inline u32 mpc83xx_spi_read_reg(__be32 __iomem * reg)
103void mpc83xx_spi_rx_buf_##type(u32 data, struct mpc83xx_spi *mpc83xx_spi) \ 109void mpc83xx_spi_rx_buf_##type(u32 data, struct mpc83xx_spi *mpc83xx_spi) \
104{ \ 110{ \
105 type * rx = mpc83xx_spi->rx; \ 111 type * rx = mpc83xx_spi->rx; \
106 *rx++ = (type)data; \ 112 *rx++ = (type)(data >> mpc83xx_spi->rx_shift); \
107 mpc83xx_spi->rx = rx; \ 113 mpc83xx_spi->rx = rx; \
108} 114}
109 115
@@ -114,7 +120,7 @@ u32 mpc83xx_spi_tx_buf_##type(struct mpc83xx_spi *mpc83xx_spi) \
114 const type * tx = mpc83xx_spi->tx; \ 120 const type * tx = mpc83xx_spi->tx; \
115 if (!tx) \ 121 if (!tx) \
116 return 0; \ 122 return 0; \
117 data = *tx++; \ 123 data = *tx++ << mpc83xx_spi->tx_shift; \
118 mpc83xx_spi->tx = tx; \ 124 mpc83xx_spi->tx = tx; \
119 return data; \ 125 return data; \
120} 126}
@@ -158,6 +164,12 @@ static void mpc83xx_spi_chipselect(struct spi_device *spi, int value)
158 164
159 if ((mpc83xx_spi->sysclk / spi->max_speed_hz) >= 64) { 165 if ((mpc83xx_spi->sysclk / spi->max_speed_hz) >= 64) {
160 u8 pm = mpc83xx_spi->sysclk / (spi->max_speed_hz * 64); 166 u8 pm = mpc83xx_spi->sysclk / (spi->max_speed_hz * 64);
167 if (pm > 0x0f) {
168 printk(KERN_WARNING "MPC83xx SPI: SPICLK can't be less then a SYSCLK/1024!\n"
169 "Requested SPICLK is %d Hz. Will use %d Hz instead.\n",
170 spi->max_speed_hz, mpc83xx_spi->sysclk / 1024);
171 pm = 0x0f;
172 }
161 regval |= SPMODE_PM(pm) | SPMODE_DIV16; 173 regval |= SPMODE_PM(pm) | SPMODE_DIV16;
162 } else { 174 } else {
163 u8 pm = mpc83xx_spi->sysclk / (spi->max_speed_hz * 4); 175 u8 pm = mpc83xx_spi->sysclk / (spi->max_speed_hz * 4);
@@ -197,12 +209,22 @@ int mpc83xx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
197 || ((bits_per_word > 16) && (bits_per_word != 32))) 209 || ((bits_per_word > 16) && (bits_per_word != 32)))
198 return -EINVAL; 210 return -EINVAL;
199 211
212 mpc83xx_spi->rx_shift = 0;
213 mpc83xx_spi->tx_shift = 0;
200 if (bits_per_word <= 8) { 214 if (bits_per_word <= 8) {
201 mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u8; 215 mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u8;
202 mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u8; 216 mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u8;
217 if (mpc83xx_spi->qe_mode) {
218 mpc83xx_spi->rx_shift = 16;
219 mpc83xx_spi->tx_shift = 24;
220 }
203 } else if (bits_per_word <= 16) { 221 } else if (bits_per_word <= 16) {
204 mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u16; 222 mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u16;
205 mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u16; 223 mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u16;
224 if (mpc83xx_spi->qe_mode) {
225 mpc83xx_spi->rx_shift = 16;
226 mpc83xx_spi->tx_shift = 16;
227 }
206 } else if (bits_per_word <= 32) { 228 } else if (bits_per_word <= 32) {
207 mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u32; 229 mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u32;
208 mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u32; 230 mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u32;
@@ -232,12 +254,21 @@ int mpc83xx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
232 return 0; 254 return 0;
233} 255}
234 256
257/* the spi->mode bits understood by this driver: */
258#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
259
235static int mpc83xx_spi_setup(struct spi_device *spi) 260static int mpc83xx_spi_setup(struct spi_device *spi)
236{ 261{
237 struct spi_bitbang *bitbang; 262 struct spi_bitbang *bitbang;
238 struct mpc83xx_spi *mpc83xx_spi; 263 struct mpc83xx_spi *mpc83xx_spi;
239 int retval; 264 int retval;
240 265
266 if (spi->mode & ~MODEBITS) {
267 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
268 spi->mode & ~MODEBITS);
269 return -EINVAL;
270 }
271
241 if (!spi->max_speed_hz) 272 if (!spi->max_speed_hz)
242 return -EINVAL; 273 return -EINVAL;
243 274
@@ -371,7 +402,6 @@ static int __init mpc83xx_spi_probe(struct platform_device *dev)
371 ret = -ENODEV; 402 ret = -ENODEV;
372 goto free_master; 403 goto free_master;
373 } 404 }
374
375 mpc83xx_spi = spi_master_get_devdata(master); 405 mpc83xx_spi = spi_master_get_devdata(master);
376 mpc83xx_spi->bitbang.master = spi_master_get(master); 406 mpc83xx_spi->bitbang.master = spi_master_get(master);
377 mpc83xx_spi->bitbang.chipselect = mpc83xx_spi_chipselect; 407 mpc83xx_spi->bitbang.chipselect = mpc83xx_spi_chipselect;
@@ -380,9 +410,17 @@ static int __init mpc83xx_spi_probe(struct platform_device *dev)
380 mpc83xx_spi->sysclk = pdata->sysclk; 410 mpc83xx_spi->sysclk = pdata->sysclk;
381 mpc83xx_spi->activate_cs = pdata->activate_cs; 411 mpc83xx_spi->activate_cs = pdata->activate_cs;
382 mpc83xx_spi->deactivate_cs = pdata->deactivate_cs; 412 mpc83xx_spi->deactivate_cs = pdata->deactivate_cs;
413 mpc83xx_spi->qe_mode = pdata->qe_mode;
383 mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u8; 414 mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u8;
384 mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u8; 415 mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u8;
385 416
417 mpc83xx_spi->rx_shift = 0;
418 mpc83xx_spi->tx_shift = 0;
419 if (mpc83xx_spi->qe_mode) {
420 mpc83xx_spi->rx_shift = 16;
421 mpc83xx_spi->tx_shift = 24;
422 }
423
386 mpc83xx_spi->bitbang.master->setup = mpc83xx_spi_setup; 424 mpc83xx_spi->bitbang.master->setup = mpc83xx_spi_setup;
387 init_completion(&mpc83xx_spi->done); 425 init_completion(&mpc83xx_spi->done);
388 426
@@ -417,6 +455,9 @@ static int __init mpc83xx_spi_probe(struct platform_device *dev)
417 455
418 /* Enable SPI interface */ 456 /* Enable SPI interface */
419 regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; 457 regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
458 if (pdata->qe_mode)
459 regval |= SPMODE_OP;
460
420 mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, regval); 461 mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, regval);
421 462
422 ret = spi_bitbang_start(&mpc83xx_spi->bitbang); 463 ret = spi_bitbang_start(&mpc83xx_spi->bitbang);
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index d5a710f6e445..7071ff8da63e 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -146,6 +146,9 @@ static int s3c24xx_spi_setupxfer(struct spi_device *spi,
146 return 0; 146 return 0;
147} 147}
148 148
149/* the spi->mode bits understood by this driver: */
150#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
151
149static int s3c24xx_spi_setup(struct spi_device *spi) 152static int s3c24xx_spi_setup(struct spi_device *spi)
150{ 153{
151 int ret; 154 int ret;
@@ -153,8 +156,11 @@ static int s3c24xx_spi_setup(struct spi_device *spi)
153 if (!spi->bits_per_word) 156 if (!spi->bits_per_word)
154 spi->bits_per_word = 8; 157 spi->bits_per_word = 8;
155 158
156 if ((spi->mode & SPI_LSB_FIRST) != 0) 159 if (spi->mode & ~MODEBITS) {
160 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
161 spi->mode & ~MODEBITS);
157 return -EINVAL; 162 return -EINVAL;
163 }
158 164
159 ret = s3c24xx_spi_setupxfer(spi, NULL); 165 ret = s3c24xx_spi_setupxfer(spi, NULL);
160 if (ret < 0) { 166 if (ret < 0) {
diff --git a/drivers/spi/spi_txx9.c b/drivers/spi/spi_txx9.c
new file mode 100644
index 000000000000..08e981c40646
--- /dev/null
+++ b/drivers/spi/spi_txx9.c
@@ -0,0 +1,474 @@
1/*
2 * spi_txx9.c - TXx9 SPI controller driver.
3 *
4 * Based on linux/arch/mips/tx4938/toshiba_rbtx4938/spi_txx9.c
5 * Copyright (C) 2000-2001 Toshiba Corporation
6 *
7 * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
8 * terms of the GNU General Public License version 2. This program is
9 * licensed "as is" without any warranty of any kind, whether express
10 * or implied.
11 *
12 * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com)
13 *
14 * Convert to generic SPI framework - Atsushi Nemoto (anemo@mba.ocn.ne.jp)
15 */
16#include <linux/init.h>
17#include <linux/delay.h>
18#include <linux/errno.h>
19#include <linux/interrupt.h>
20#include <linux/platform_device.h>
21#include <linux/sched.h>
22#include <linux/spinlock.h>
23#include <linux/workqueue.h>
24#include <linux/spi/spi.h>
25#include <linux/err.h>
26#include <linux/clk.h>
27#include <asm/gpio.h>
28
29
30#define SPI_FIFO_SIZE 4
31
32#define TXx9_SPMCR 0x00
33#define TXx9_SPCR0 0x04
34#define TXx9_SPCR1 0x08
35#define TXx9_SPFS 0x0c
36#define TXx9_SPSR 0x14
37#define TXx9_SPDR 0x18
38
39/* SPMCR : SPI Master Control */
40#define TXx9_SPMCR_OPMODE 0xc0
41#define TXx9_SPMCR_CONFIG 0x40
42#define TXx9_SPMCR_ACTIVE 0x80
43#define TXx9_SPMCR_SPSTP 0x02
44#define TXx9_SPMCR_BCLR 0x01
45
46/* SPCR0 : SPI Control 0 */
47#define TXx9_SPCR0_TXIFL_MASK 0xc000
48#define TXx9_SPCR0_RXIFL_MASK 0x3000
49#define TXx9_SPCR0_SIDIE 0x0800
50#define TXx9_SPCR0_SOEIE 0x0400
51#define TXx9_SPCR0_RBSIE 0x0200
52#define TXx9_SPCR0_TBSIE 0x0100
53#define TXx9_SPCR0_IFSPSE 0x0010
54#define TXx9_SPCR0_SBOS 0x0004
55#define TXx9_SPCR0_SPHA 0x0002
56#define TXx9_SPCR0_SPOL 0x0001
57
58/* SPSR : SPI Status */
59#define TXx9_SPSR_TBSI 0x8000
60#define TXx9_SPSR_RBSI 0x4000
61#define TXx9_SPSR_TBS_MASK 0x3800
62#define TXx9_SPSR_RBS_MASK 0x0700
63#define TXx9_SPSR_SPOE 0x0080
64#define TXx9_SPSR_IFSD 0x0008
65#define TXx9_SPSR_SIDLE 0x0004
66#define TXx9_SPSR_STRDY 0x0002
67#define TXx9_SPSR_SRRDY 0x0001
68
69
70struct txx9spi {
71 struct workqueue_struct *workqueue;
72 struct work_struct work;
73 spinlock_t lock; /* protect 'queue' */
74 struct list_head queue;
75 wait_queue_head_t waitq;
76 void __iomem *membase;
77 int irq;
78 int baseclk;
79 struct clk *clk;
80 u32 max_speed_hz, min_speed_hz;
81 int last_chipselect;
82 int last_chipselect_val;
83};
84
85static u32 txx9spi_rd(struct txx9spi *c, int reg)
86{
87 return __raw_readl(c->membase + reg);
88}
89static void txx9spi_wr(struct txx9spi *c, u32 val, int reg)
90{
91 __raw_writel(val, c->membase + reg);
92}
93
94static void txx9spi_cs_func(struct spi_device *spi, struct txx9spi *c,
95 int on, unsigned int cs_delay)
96{
97 int val = (spi->mode & SPI_CS_HIGH) ? on : !on;
98 if (on) {
99 /* deselect the chip with cs_change hint in last transfer */
100 if (c->last_chipselect >= 0)
101 gpio_set_value(c->last_chipselect,
102 !c->last_chipselect_val);
103 c->last_chipselect = spi->chip_select;
104 c->last_chipselect_val = val;
105 } else {
106 c->last_chipselect = -1;
107 ndelay(cs_delay); /* CS Hold Time */
108 }
109 gpio_set_value(spi->chip_select, val);
110 ndelay(cs_delay); /* CS Setup Time / CS Recovery Time */
111}
112
113/* the spi->mode bits understood by this driver: */
114#define MODEBITS (SPI_CS_HIGH|SPI_CPOL|SPI_CPHA)
115
116static int txx9spi_setup(struct spi_device *spi)
117{
118 struct txx9spi *c = spi_master_get_devdata(spi->master);
119 u8 bits_per_word;
120
121 if (spi->mode & ~MODEBITS)
122 return -EINVAL;
123
124 if (!spi->max_speed_hz
125 || spi->max_speed_hz > c->max_speed_hz
126 || spi->max_speed_hz < c->min_speed_hz)
127 return -EINVAL;
128
129 bits_per_word = spi->bits_per_word ? : 8;
130 if (bits_per_word != 8 && bits_per_word != 16)
131 return -EINVAL;
132
133 if (gpio_direction_output(spi->chip_select,
134 !(spi->mode & SPI_CS_HIGH))) {
135 dev_err(&spi->dev, "Cannot setup GPIO for chipselect.\n");
136 return -EINVAL;
137 }
138
139 /* deselect chip */
140 spin_lock(&c->lock);
141 txx9spi_cs_func(spi, c, 0, (NSEC_PER_SEC / 2) / spi->max_speed_hz);
142 spin_unlock(&c->lock);
143
144 return 0;
145}
146
147static irqreturn_t txx9spi_interrupt(int irq, void *dev_id)
148{
149 struct txx9spi *c = dev_id;
150
151 /* disable rx intr */
152 txx9spi_wr(c, txx9spi_rd(c, TXx9_SPCR0) & ~TXx9_SPCR0_RBSIE,
153 TXx9_SPCR0);
154 wake_up(&c->waitq);
155 return IRQ_HANDLED;
156}
157
158static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m)
159{
160 struct spi_device *spi = m->spi;
161 struct spi_transfer *t;
162 unsigned int cs_delay;
163 unsigned int cs_change = 1;
164 int status = 0;
165 u32 mcr;
166 u32 prev_speed_hz = 0;
167 u8 prev_bits_per_word = 0;
168
169 /* CS setup/hold/recovery time in nsec */
170 cs_delay = 100 + (NSEC_PER_SEC / 2) / spi->max_speed_hz;
171
172 mcr = txx9spi_rd(c, TXx9_SPMCR);
173 if (unlikely((mcr & TXx9_SPMCR_OPMODE) == TXx9_SPMCR_ACTIVE)) {
174 dev_err(&spi->dev, "Bad mode.\n");
175 status = -EIO;
176 goto exit;
177 }
178 mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR);
179
180 /* enter config mode */
181 txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR);
182 txx9spi_wr(c, TXx9_SPCR0_SBOS
183 | ((spi->mode & SPI_CPOL) ? TXx9_SPCR0_SPOL : 0)
184 | ((spi->mode & SPI_CPHA) ? TXx9_SPCR0_SPHA : 0)
185 | 0x08,
186 TXx9_SPCR0);
187
188 list_for_each_entry (t, &m->transfers, transfer_list) {
189 const void *txbuf = t->tx_buf;
190 void *rxbuf = t->rx_buf;
191 u32 data;
192 unsigned int len = t->len;
193 unsigned int wsize;
194 u32 speed_hz = t->speed_hz ? : spi->max_speed_hz;
195 u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word;
196
197 bits_per_word = bits_per_word ? : 8;
198 wsize = bits_per_word >> 3; /* in bytes */
199
200 if (prev_speed_hz != speed_hz
201 || prev_bits_per_word != bits_per_word) {
202 u32 n = (c->baseclk + speed_hz - 1) / speed_hz;
203 if (n < 1)
204 n = 1;
205 else if (n > 0xff)
206 n = 0xff;
207 /* enter config mode */
208 txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR,
209 TXx9_SPMCR);
210 txx9spi_wr(c, (n << 8) | bits_per_word, TXx9_SPCR1);
211 /* enter active mode */
212 txx9spi_wr(c, mcr | TXx9_SPMCR_ACTIVE, TXx9_SPMCR);
213
214 prev_speed_hz = speed_hz;
215 prev_bits_per_word = bits_per_word;
216 }
217
218 if (cs_change)
219 txx9spi_cs_func(spi, c, 1, cs_delay);
220 cs_change = t->cs_change;
221 while (len) {
222 unsigned int count = SPI_FIFO_SIZE;
223 int i;
224 u32 cr0;
225
226 if (len < count * wsize)
227 count = len / wsize;
228 /* now tx must be idle... */
229 while (!(txx9spi_rd(c, TXx9_SPSR) & TXx9_SPSR_SIDLE))
230 cpu_relax();
231 cr0 = txx9spi_rd(c, TXx9_SPCR0);
232 cr0 &= ~TXx9_SPCR0_RXIFL_MASK;
233 cr0 |= (count - 1) << 12;
234 /* enable rx intr */
235 cr0 |= TXx9_SPCR0_RBSIE;
236 txx9spi_wr(c, cr0, TXx9_SPCR0);
237 /* send */
238 for (i = 0; i < count; i++) {
239 if (txbuf) {
240 data = (wsize == 1)
241 ? *(const u8 *)txbuf
242 : *(const u16 *)txbuf;
243 txx9spi_wr(c, data, TXx9_SPDR);
244 txbuf += wsize;
245 } else
246 txx9spi_wr(c, 0, TXx9_SPDR);
247 }
248 /* wait all rx data */
249 wait_event(c->waitq,
250 txx9spi_rd(c, TXx9_SPSR) & TXx9_SPSR_RBSI);
251 /* receive */
252 for (i = 0; i < count; i++) {
253 data = txx9spi_rd(c, TXx9_SPDR);
254 if (rxbuf) {
255 if (wsize == 1)
256 *(u8 *)rxbuf = data;
257 else
258 *(u16 *)rxbuf = data;
259 rxbuf += wsize;
260 }
261 }
262 len -= count * wsize;
263 }
264 m->actual_length += t->len;
265 if (t->delay_usecs)
266 udelay(t->delay_usecs);
267
268 if (!cs_change)
269 continue;
270 if (t->transfer_list.next == &m->transfers)
271 break;
272 /* sometimes a short mid-message deselect of the chip
273 * may be needed to terminate a mode or command
274 */
275 txx9spi_cs_func(spi, c, 0, cs_delay);
276 }
277
278exit:
279 m->status = status;
280 m->complete(m->context);
281
282 /* normally deactivate chipselect ... unless no error and
283 * cs_change has hinted that the next message will probably
284 * be for this chip too.
285 */
286 if (!(status == 0 && cs_change))
287 txx9spi_cs_func(spi, c, 0, cs_delay);
288
289 /* enter config mode */
290 txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR);
291}
292
293static void txx9spi_work(struct work_struct *work)
294{
295 struct txx9spi *c = container_of(work, struct txx9spi, work);
296 unsigned long flags;
297
298 spin_lock_irqsave(&c->lock, flags);
299 while (!list_empty(&c->queue)) {
300 struct spi_message *m;
301
302 m = container_of(c->queue.next, struct spi_message, queue);
303 list_del_init(&m->queue);
304 spin_unlock_irqrestore(&c->lock, flags);
305
306 txx9spi_work_one(c, m);
307
308 spin_lock_irqsave(&c->lock, flags);
309 }
310 spin_unlock_irqrestore(&c->lock, flags);
311}
312
313static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m)
314{
315 struct spi_master *master = spi->master;
316 struct txx9spi *c = spi_master_get_devdata(master);
317 struct spi_transfer *t;
318 unsigned long flags;
319
320 m->actual_length = 0;
321
322 /* check each transfer's parameters */
323 list_for_each_entry (t, &m->transfers, transfer_list) {
324 u32 speed_hz = t->speed_hz ? : spi->max_speed_hz;
325 u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word;
326
327 bits_per_word = bits_per_word ? : 8;
328 if (!t->tx_buf && !t->rx_buf && t->len)
329 return -EINVAL;
330 if (bits_per_word != 8 && bits_per_word != 16)
331 return -EINVAL;
332 if (t->len & ((bits_per_word >> 3) - 1))
333 return -EINVAL;
334 if (speed_hz < c->min_speed_hz || speed_hz > c->max_speed_hz)
335 return -EINVAL;
336 }
337
338 spin_lock_irqsave(&c->lock, flags);
339 list_add_tail(&m->queue, &c->queue);
340 queue_work(c->workqueue, &c->work);
341 spin_unlock_irqrestore(&c->lock, flags);
342
343 return 0;
344}
345
346static int __init txx9spi_probe(struct platform_device *dev)
347{
348 struct spi_master *master;
349 struct txx9spi *c;
350 struct resource *res;
351 int ret = -ENODEV;
352 u32 mcr;
353
354 master = spi_alloc_master(&dev->dev, sizeof(*c));
355 if (!master)
356 return ret;
357 c = spi_master_get_devdata(master);
358 c->irq = -1;
359 platform_set_drvdata(dev, master);
360
361 INIT_WORK(&c->work, txx9spi_work);
362 spin_lock_init(&c->lock);
363 INIT_LIST_HEAD(&c->queue);
364 init_waitqueue_head(&c->waitq);
365
366 c->clk = clk_get(&dev->dev, "spi-baseclk");
367 if (IS_ERR(c->clk)) {
368 ret = PTR_ERR(c->clk);
369 c->clk = NULL;
370 goto exit;
371 }
372 ret = clk_enable(c->clk);
373 if (ret) {
374 clk_put(c->clk);
375 c->clk = NULL;
376 goto exit;
377 }
378 c->baseclk = clk_get_rate(c->clk);
379 c->min_speed_hz = (c->baseclk + 0xff - 1) / 0xff;
380 c->max_speed_hz = c->baseclk;
381
382 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
383 if (!res)
384 goto exit;
385 c->membase = ioremap(res->start, res->end - res->start + 1);
386 if (!c->membase)
387 goto exit;
388
389 /* enter config mode */
390 mcr = txx9spi_rd(c, TXx9_SPMCR);
391 mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR);
392 txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR);
393
394 c->irq = platform_get_irq(dev, 0);
395 if (c->irq < 0)
396 goto exit;
397 ret = request_irq(c->irq, txx9spi_interrupt, 0, dev->name, c);
398 if (ret) {
399 c->irq = -1;
400 goto exit;
401 }
402
403 c->workqueue = create_singlethread_workqueue(master->cdev.dev->bus_id);
404 if (!c->workqueue)
405 goto exit;
406 c->last_chipselect = -1;
407
408 dev_info(&dev->dev, "at %#llx, irq %d, %dMHz\n",
409 (unsigned long long)res->start, c->irq,
410 (c->baseclk + 500000) / 1000000);
411
412 master->bus_num = dev->id;
413 master->setup = txx9spi_setup;
414 master->transfer = txx9spi_transfer;
415 master->num_chipselect = (u16)UINT_MAX; /* any GPIO numbers */
416
417 ret = spi_register_master(master);
418 if (ret)
419 goto exit;
420 return 0;
421exit:
422 if (c->workqueue)
423 destroy_workqueue(c->workqueue);
424 if (c->irq >= 0)
425 free_irq(c->irq, c);
426 if (c->membase)
427 iounmap(c->membase);
428 if (c->clk) {
429 clk_disable(c->clk);
430 clk_put(c->clk);
431 }
432 platform_set_drvdata(dev, NULL);
433 spi_master_put(master);
434 return ret;
435}
436
437static int __exit txx9spi_remove(struct platform_device *dev)
438{
439 struct spi_master *master = spi_master_get(platform_get_drvdata(dev));
440 struct txx9spi *c = spi_master_get_devdata(master);
441
442 spi_unregister_master(master);
443 platform_set_drvdata(dev, NULL);
444 destroy_workqueue(c->workqueue);
445 free_irq(c->irq, c);
446 iounmap(c->membase);
447 clk_disable(c->clk);
448 clk_put(c->clk);
449 spi_master_put(master);
450 return 0;
451}
452
453static struct platform_driver txx9spi_driver = {
454 .remove = __exit_p(txx9spi_remove),
455 .driver = {
456 .name = "txx9spi",
457 .owner = THIS_MODULE,
458 },
459};
460
461static int __init txx9spi_init(void)
462{
463 return platform_driver_probe(&txx9spi_driver, txx9spi_probe);
464}
465subsys_initcall(txx9spi_init);
466
467static void __exit txx9spi_exit(void)
468{
469 platform_driver_unregister(&txx9spi_driver);
470}
471module_exit(txx9spi_exit);
472
473MODULE_DESCRIPTION("TXx9 SPI Driver");
474MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index d04242aee40d..38b60ad0eda0 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -181,7 +181,8 @@ static int spidev_message(struct spidev_data *spidev,
181 } 181 }
182 if (u_tmp->tx_buf) { 182 if (u_tmp->tx_buf) {
183 k_tmp->tx_buf = buf; 183 k_tmp->tx_buf = buf;
184 if (copy_from_user(buf, (const u8 __user *)u_tmp->tx_buf, 184 if (copy_from_user(buf, (const u8 __user *)
185 (ptrdiff_t) u_tmp->tx_buf,
185 u_tmp->len)) 186 u_tmp->len))
186 goto done; 187 goto done;
187 } 188 }
@@ -213,7 +214,8 @@ static int spidev_message(struct spidev_data *spidev,
213 buf = spidev->buffer; 214 buf = spidev->buffer;
214 for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) { 215 for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
215 if (u_tmp->rx_buf) { 216 if (u_tmp->rx_buf) {
216 if (__copy_to_user((u8 __user *)u_tmp->rx_buf, buf, 217 if (__copy_to_user((u8 __user *)
218 (ptrdiff_t) u_tmp->rx_buf, buf,
217 u_tmp->len)) { 219 u_tmp->len)) {
218 status = -EFAULT; 220 status = -EFAULT;
219 goto done; 221 goto done;
diff --git a/drivers/spi/tle62x0.c b/drivers/spi/tle62x0.c
new file mode 100644
index 000000000000..6da58ca48b33
--- /dev/null
+++ b/drivers/spi/tle62x0.c
@@ -0,0 +1,328 @@
1/*
2 * tle62x0.c -- support Infineon TLE62x0 driver chips
3 *
4 * Copyright (c) 2007 Simtec Electronics
5 * Ben Dooks, <ben@simtec.co.uk>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/device.h>
13#include <linux/kernel.h>
14
15#include <linux/spi/spi.h>
16#include <linux/spi/tle62x0.h>
17
18
19#define CMD_READ 0x00
20#define CMD_SET 0xff
21
22#define DIAG_NORMAL 0x03
23#define DIAG_OVERLOAD 0x02
24#define DIAG_OPEN 0x01
25#define DIAG_SHORTGND 0x00
26
27struct tle62x0_state {
28 struct spi_device *us;
29 struct mutex lock;
30 unsigned int nr_gpio;
31 unsigned int gpio_state;
32
33 unsigned char tx_buff[4];
34 unsigned char rx_buff[4];
35};
36
37static int to_gpio_num(struct device_attribute *attr);
38
39static inline int tle62x0_write(struct tle62x0_state *st)
40{
41 unsigned char *buff = st->tx_buff;
42 unsigned int gpio_state = st->gpio_state;
43
44 buff[0] = CMD_SET;
45
46 if (st->nr_gpio == 16) {
47 buff[1] = gpio_state >> 8;
48 buff[2] = gpio_state;
49 } else {
50 buff[1] = gpio_state;
51 }
52
53 dev_dbg(&st->us->dev, "buff %02x,%02x,%02x\n",
54 buff[0], buff[1], buff[2]);
55
56 return spi_write(st->us, buff, (st->nr_gpio == 16) ? 3 : 2);
57}
58
59static inline int tle62x0_read(struct tle62x0_state *st)
60{
61 unsigned char *txbuff = st->tx_buff;
62 struct spi_transfer xfer = {
63 .tx_buf = txbuff,
64 .rx_buf = st->rx_buff,
65 .len = (st->nr_gpio * 2) / 8,
66 };
67 struct spi_message msg;
68
69 txbuff[0] = CMD_READ;
70 txbuff[1] = 0x00;
71 txbuff[2] = 0x00;
72 txbuff[3] = 0x00;
73
74 spi_message_init(&msg);
75 spi_message_add_tail(&xfer, &msg);
76
77 return spi_sync(st->us, &msg);
78}
79
80static unsigned char *decode_fault(unsigned int fault_code)
81{
82 fault_code &= 3;
83
84 switch (fault_code) {
85 case DIAG_NORMAL:
86 return "N";
87 case DIAG_OVERLOAD:
88 return "V";
89 case DIAG_OPEN:
90 return "O";
91 case DIAG_SHORTGND:
92 return "G";
93 }
94
95 return "?";
96}
97
98static ssize_t tle62x0_status_show(struct device *dev,
99 struct device_attribute *attr, char *buf)
100{
101 struct tle62x0_state *st = dev_get_drvdata(dev);
102 char *bp = buf;
103 unsigned char *buff = st->rx_buff;
104 unsigned long fault = 0;
105 int ptr;
106 int ret;
107
108 mutex_lock(&st->lock);
109 ret = tle62x0_read(st);
110
111 dev_dbg(dev, "tle62x0_read() returned %d\n", ret);
112
113 for (ptr = 0; ptr < (st->nr_gpio * 2)/8; ptr += 1) {
114 fault <<= 8;
115 fault |= ((unsigned long)buff[ptr]);
116
117 dev_dbg(dev, "byte %d is %02x\n", ptr, buff[ptr]);
118 }
119
120 for (ptr = 0; ptr < st->nr_gpio; ptr++) {
121 bp += sprintf(bp, "%s ", decode_fault(fault >> (ptr * 2)));
122 }
123
124 *bp++ = '\n';
125
126 mutex_unlock(&st->lock);
127 return bp - buf;
128}
129
130static DEVICE_ATTR(status_show, S_IRUGO, tle62x0_status_show, NULL);
131
132static ssize_t tle62x0_gpio_show(struct device *dev,
133 struct device_attribute *attr, char *buf)
134{
135 struct tle62x0_state *st = dev_get_drvdata(dev);
136 int gpio_num = to_gpio_num(attr);
137 int value;
138
139 mutex_lock(&st->lock);
140 value = (st->gpio_state >> gpio_num) & 1;
141 mutex_unlock(&st->lock);
142
143 return snprintf(buf, PAGE_SIZE, "%d", value);
144}
145
146static ssize_t tle62x0_gpio_store(struct device *dev,
147 struct device_attribute *attr,
148 const char *buf, size_t len)
149{
150 struct tle62x0_state *st = dev_get_drvdata(dev);
151 int gpio_num = to_gpio_num(attr);
152 unsigned long val;
153 char *endp;
154
155 val = simple_strtoul(buf, &endp, 0);
156 if (buf == endp)
157 return -EINVAL;
158
159 dev_dbg(dev, "setting gpio %d to %ld\n", gpio_num, val);
160
161 mutex_lock(&st->lock);
162
163 if (val)
164 st->gpio_state |= 1 << gpio_num;
165 else
166 st->gpio_state &= ~(1 << gpio_num);
167
168 tle62x0_write(st);
169 mutex_unlock(&st->lock);
170
171 return len;
172}
173
174static DEVICE_ATTR(gpio1, S_IWUSR|S_IRUGO,
175 tle62x0_gpio_show, tle62x0_gpio_store);
176static DEVICE_ATTR(gpio2, S_IWUSR|S_IRUGO,
177 tle62x0_gpio_show, tle62x0_gpio_store);
178static DEVICE_ATTR(gpio3, S_IWUSR|S_IRUGO,
179 tle62x0_gpio_show, tle62x0_gpio_store);
180static DEVICE_ATTR(gpio4, S_IWUSR|S_IRUGO,
181 tle62x0_gpio_show, tle62x0_gpio_store);
182static DEVICE_ATTR(gpio5, S_IWUSR|S_IRUGO,
183 tle62x0_gpio_show, tle62x0_gpio_store);
184static DEVICE_ATTR(gpio6, S_IWUSR|S_IRUGO,
185 tle62x0_gpio_show, tle62x0_gpio_store);
186static DEVICE_ATTR(gpio7, S_IWUSR|S_IRUGO,
187 tle62x0_gpio_show, tle62x0_gpio_store);
188static DEVICE_ATTR(gpio8, S_IWUSR|S_IRUGO,
189 tle62x0_gpio_show, tle62x0_gpio_store);
190static DEVICE_ATTR(gpio9, S_IWUSR|S_IRUGO,
191 tle62x0_gpio_show, tle62x0_gpio_store);
192static DEVICE_ATTR(gpio10, S_IWUSR|S_IRUGO,
193 tle62x0_gpio_show, tle62x0_gpio_store);
194static DEVICE_ATTR(gpio11, S_IWUSR|S_IRUGO,
195 tle62x0_gpio_show, tle62x0_gpio_store);
196static DEVICE_ATTR(gpio12, S_IWUSR|S_IRUGO,
197 tle62x0_gpio_show, tle62x0_gpio_store);
198static DEVICE_ATTR(gpio13, S_IWUSR|S_IRUGO,
199 tle62x0_gpio_show, tle62x0_gpio_store);
200static DEVICE_ATTR(gpio14, S_IWUSR|S_IRUGO,
201 tle62x0_gpio_show, tle62x0_gpio_store);
202static DEVICE_ATTR(gpio15, S_IWUSR|S_IRUGO,
203 tle62x0_gpio_show, tle62x0_gpio_store);
204static DEVICE_ATTR(gpio16, S_IWUSR|S_IRUGO,
205 tle62x0_gpio_show, tle62x0_gpio_store);
206
207static struct device_attribute *gpio_attrs[] = {
208 [0] = &dev_attr_gpio1,
209 [1] = &dev_attr_gpio2,
210 [2] = &dev_attr_gpio3,
211 [3] = &dev_attr_gpio4,
212 [4] = &dev_attr_gpio5,
213 [5] = &dev_attr_gpio6,
214 [6] = &dev_attr_gpio7,
215 [7] = &dev_attr_gpio8,
216 [8] = &dev_attr_gpio9,
217 [9] = &dev_attr_gpio10,
218 [10] = &dev_attr_gpio11,
219 [11] = &dev_attr_gpio12,
220 [12] = &dev_attr_gpio13,
221 [13] = &dev_attr_gpio14,
222 [14] = &dev_attr_gpio15,
223 [15] = &dev_attr_gpio16
224};
225
226static int to_gpio_num(struct device_attribute *attr)
227{
228 int ptr;
229
230 for (ptr = 0; ptr < ARRAY_SIZE(gpio_attrs); ptr++) {
231 if (gpio_attrs[ptr] == attr)
232 return ptr;
233 }
234
235 return -1;
236}
237
238static int __devinit tle62x0_probe(struct spi_device *spi)
239{
240 struct tle62x0_state *st;
241 struct tle62x0_pdata *pdata;
242 int ptr;
243 int ret;
244
245 pdata = spi->dev.platform_data;
246 if (pdata == NULL) {
247 dev_err(&spi->dev, "no device data specified\n");
248 return -EINVAL;
249 }
250
251 st = kzalloc(sizeof(struct tle62x0_state), GFP_KERNEL);
252 if (st == NULL) {
253 dev_err(&spi->dev, "no memory for device state\n");
254 return -ENOMEM;
255 }
256
257 st->us = spi;
258 st->nr_gpio = pdata->gpio_count;
259 st->gpio_state = pdata->init_state;
260
261 mutex_init(&st->lock);
262
263 ret = device_create_file(&spi->dev, &dev_attr_status_show);
264 if (ret) {
265 dev_err(&spi->dev, "cannot create status attribute\n");
266 goto err_status;
267 }
268
269 for (ptr = 0; ptr < pdata->gpio_count; ptr++) {
270 ret = device_create_file(&spi->dev, gpio_attrs[ptr]);
271 if (ret) {
272 dev_err(&spi->dev, "cannot create gpio attribute\n");
273 goto err_gpios;
274 }
275 }
276
277 /* tle62x0_write(st); */
278 spi_set_drvdata(spi, st);
279 return 0;
280
281 err_gpios:
282 for (; ptr > 0; ptr--)
283 device_remove_file(&spi->dev, gpio_attrs[ptr]);
284
285 device_remove_file(&spi->dev, &dev_attr_status_show);
286
287 err_status:
288 kfree(st);
289 return ret;
290}
291
292static int __devexit tle62x0_remove(struct spi_device *spi)
293{
294 struct tle62x0_state *st = spi_get_drvdata(spi);
295 int ptr;
296
297 for (ptr = 0; ptr < st->nr_gpio; ptr++)
298 device_remove_file(&spi->dev, gpio_attrs[ptr]);
299
300 kfree(st);
301 return 0;
302}
303
304static struct spi_driver tle62x0_driver = {
305 .driver = {
306 .name = "tle62x0",
307 .owner = THIS_MODULE,
308 },
309 .probe = tle62x0_probe,
310 .remove = __devexit_p(tle62x0_remove),
311};
312
313static __init int tle62x0_init(void)
314{
315 return spi_register_driver(&tle62x0_driver);
316}
317
318static __exit void tle62x0_exit(void)
319{
320 spi_unregister_driver(&tle62x0_driver);
321}
322
323module_init(tle62x0_init);
324module_exit(tle62x0_exit);
325
326MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
327MODULE_DESCRIPTION("TLE62x0 SPI driver");
328MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
new file mode 100644
index 000000000000..f0bf9a68e96b
--- /dev/null
+++ b/drivers/spi/xilinx_spi.c
@@ -0,0 +1,434 @@
1/*
2 * xilinx_spi.c
3 *
4 * Xilinx SPI controller driver (master mode only)
5 *
6 * Author: MontaVista Software, Inc.
7 * source@mvista.com
8 *
9 * 2002-2007 (c) MontaVista Software, Inc. This file is licensed under the
10 * terms of the GNU General Public License version 2. This program is licensed
11 * "as is" without any warranty of any kind, whether express or implied.
12 */
13
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
18#include <linux/spi/spi.h>
19#include <linux/spi/spi_bitbang.h>
20#include <linux/io.h>
21
22#include <syslib/virtex_devices.h>
23
24#define XILINX_SPI_NAME "xspi"
25
26/* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
27 * Product Specification", DS464
28 */
29#define XSPI_CR_OFFSET 0x62 /* 16-bit Control Register */
30
31#define XSPI_CR_ENABLE 0x02
32#define XSPI_CR_MASTER_MODE 0x04
33#define XSPI_CR_CPOL 0x08
34#define XSPI_CR_CPHA 0x10
35#define XSPI_CR_MODE_MASK (XSPI_CR_CPHA | XSPI_CR_CPOL)
36#define XSPI_CR_TXFIFO_RESET 0x20
37#define XSPI_CR_RXFIFO_RESET 0x40
38#define XSPI_CR_MANUAL_SSELECT 0x80
39#define XSPI_CR_TRANS_INHIBIT 0x100
40
41#define XSPI_SR_OFFSET 0x67 /* 8-bit Status Register */
42
43#define XSPI_SR_RX_EMPTY_MASK 0x01 /* Receive FIFO is empty */
44#define XSPI_SR_RX_FULL_MASK 0x02 /* Receive FIFO is full */
45#define XSPI_SR_TX_EMPTY_MASK 0x04 /* Transmit FIFO is empty */
46#define XSPI_SR_TX_FULL_MASK 0x08 /* Transmit FIFO is full */
47#define XSPI_SR_MODE_FAULT_MASK 0x10 /* Mode fault error */
48
49#define XSPI_TXD_OFFSET 0x6b /* 8-bit Data Transmit Register */
50#define XSPI_RXD_OFFSET 0x6f /* 8-bit Data Receive Register */
51
52#define XSPI_SSR_OFFSET 0x70 /* 32-bit Slave Select Register */
53
54/* Register definitions as per "OPB IPIF (v3.01c) Product Specification", DS414
55 * IPIF registers are 32 bit
56 */
57#define XIPIF_V123B_DGIER_OFFSET 0x1c /* IPIF global int enable reg */
58#define XIPIF_V123B_GINTR_ENABLE 0x80000000
59
60#define XIPIF_V123B_IISR_OFFSET 0x20 /* IPIF interrupt status reg */
61#define XIPIF_V123B_IIER_OFFSET 0x28 /* IPIF interrupt enable reg */
62
63#define XSPI_INTR_MODE_FAULT 0x01 /* Mode fault error */
64#define XSPI_INTR_SLAVE_MODE_FAULT 0x02 /* Selected as slave while
65 * disabled */
66#define XSPI_INTR_TX_EMPTY 0x04 /* TxFIFO is empty */
67#define XSPI_INTR_TX_UNDERRUN 0x08 /* TxFIFO was underrun */
68#define XSPI_INTR_RX_FULL 0x10 /* RxFIFO is full */
69#define XSPI_INTR_RX_OVERRUN 0x20 /* RxFIFO was overrun */
70
71#define XIPIF_V123B_RESETR_OFFSET 0x40 /* IPIF reset register */
72#define XIPIF_V123B_RESET_MASK 0x0a /* the value to write */
73
74struct xilinx_spi {
75 /* bitbang has to be first */
76 struct spi_bitbang bitbang;
77 struct completion done;
78
79 void __iomem *regs; /* virt. address of the control registers */
80
81 u32 irq;
82
83 u32 speed_hz; /* SCK has a fixed frequency of speed_hz Hz */
84
85 u8 *rx_ptr; /* pointer in the Tx buffer */
86 const u8 *tx_ptr; /* pointer in the Rx buffer */
87 int remaining_bytes; /* the number of bytes left to transfer */
88};
89
90static void xspi_init_hw(void __iomem *regs_base)
91{
92 /* Reset the SPI device */
93 out_be32(regs_base + XIPIF_V123B_RESETR_OFFSET,
94 XIPIF_V123B_RESET_MASK);
95 /* Disable all the interrupts just in case */
96 out_be32(regs_base + XIPIF_V123B_IIER_OFFSET, 0);
97 /* Enable the global IPIF interrupt */
98 out_be32(regs_base + XIPIF_V123B_DGIER_OFFSET,
99 XIPIF_V123B_GINTR_ENABLE);
100 /* Deselect the slave on the SPI bus */
101 out_be32(regs_base + XSPI_SSR_OFFSET, 0xffff);
102 /* Disable the transmitter, enable Manual Slave Select Assertion,
103 * put SPI controller into master mode, and enable it */
104 out_be16(regs_base + XSPI_CR_OFFSET,
105 XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT
106 | XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE);
107}
108
109static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
110{
111 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
112
113 if (is_on == BITBANG_CS_INACTIVE) {
114 /* Deselect the slave on the SPI bus */
115 out_be32(xspi->regs + XSPI_SSR_OFFSET, 0xffff);
116 } else if (is_on == BITBANG_CS_ACTIVE) {
117 /* Set the SPI clock phase and polarity */
118 u16 cr = in_be16(xspi->regs + XSPI_CR_OFFSET)
119 & ~XSPI_CR_MODE_MASK;
120 if (spi->mode & SPI_CPHA)
121 cr |= XSPI_CR_CPHA;
122 if (spi->mode & SPI_CPOL)
123 cr |= XSPI_CR_CPOL;
124 out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
125
126 /* We do not check spi->max_speed_hz here as the SPI clock
127 * frequency is not software programmable (the IP block design
128 * parameter)
129 */
130
131 /* Activate the chip select */
132 out_be32(xspi->regs + XSPI_SSR_OFFSET,
133 ~(0x0001 << spi->chip_select));
134 }
135}
136
137/* spi_bitbang requires custom setup_transfer() to be defined if there is a
138 * custom txrx_bufs(). We have nothing to setup here as the SPI IP block
139 * supports just 8 bits per word, and SPI clock can't be changed in software.
140 * Check for 8 bits per word. Chip select delay calculations could be
141 * added here as soon as bitbang_work() can be made aware of the delay value.
142 */
143static int xilinx_spi_setup_transfer(struct spi_device *spi,
144 struct spi_transfer *t)
145{
146 u8 bits_per_word;
147 u32 hz;
148 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
149
150 bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
151 hz = (t) ? t->speed_hz : spi->max_speed_hz;
152 if (bits_per_word != 8) {
153 dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
154 __FUNCTION__, bits_per_word);
155 return -EINVAL;
156 }
157
158 if (hz && xspi->speed_hz > hz) {
159 dev_err(&spi->dev, "%s, unsupported clock rate %uHz\n",
160 __FUNCTION__, hz);
161 return -EINVAL;
162 }
163
164 return 0;
165}
166
167/* the spi->mode bits understood by this driver: */
168#define MODEBITS (SPI_CPOL | SPI_CPHA)
169
170static int xilinx_spi_setup(struct spi_device *spi)
171{
172 struct spi_bitbang *bitbang;
173 struct xilinx_spi *xspi;
174 int retval;
175
176 xspi = spi_master_get_devdata(spi->master);
177 bitbang = &xspi->bitbang;
178
179 if (!spi->bits_per_word)
180 spi->bits_per_word = 8;
181
182 if (spi->mode & ~MODEBITS) {
183 dev_err(&spi->dev, "%s, unsupported mode bits %x\n",
184 __FUNCTION__, spi->mode & ~MODEBITS);
185 return -EINVAL;
186 }
187
188 retval = xilinx_spi_setup_transfer(spi, NULL);
189 if (retval < 0)
190 return retval;
191
192 dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n",
193 __FUNCTION__, spi->mode & MODEBITS, spi->bits_per_word, 0);
194
195 return 0;
196}
197
198static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi)
199{
200 u8 sr;
201
202 /* Fill the Tx FIFO with as many bytes as possible */
203 sr = in_8(xspi->regs + XSPI_SR_OFFSET);
204 while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) {
205 if (xspi->tx_ptr) {
206 out_8(xspi->regs + XSPI_TXD_OFFSET, *xspi->tx_ptr++);
207 } else {
208 out_8(xspi->regs + XSPI_TXD_OFFSET, 0);
209 }
210 xspi->remaining_bytes--;
211 sr = in_8(xspi->regs + XSPI_SR_OFFSET);
212 }
213}
214
215static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
216{
217 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
218 u32 ipif_ier;
219 u16 cr;
220
221 /* We get here with transmitter inhibited */
222
223 xspi->tx_ptr = t->tx_buf;
224 xspi->rx_ptr = t->rx_buf;
225 xspi->remaining_bytes = t->len;
226 INIT_COMPLETION(xspi->done);
227
228 xilinx_spi_fill_tx_fifo(xspi);
229
230 /* Enable the transmit empty interrupt, which we use to determine
231 * progress on the transmission.
232 */
233 ipif_ier = in_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET);
234 out_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET,
235 ipif_ier | XSPI_INTR_TX_EMPTY);
236
237 /* Start the transfer by not inhibiting the transmitter any longer */
238 cr = in_be16(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_TRANS_INHIBIT;
239 out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
240
241 wait_for_completion(&xspi->done);
242
243 /* Disable the transmit empty interrupt */
244 out_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET, ipif_ier);
245
246 return t->len - xspi->remaining_bytes;
247}
248
249
250/* This driver supports single master mode only. Hence Tx FIFO Empty
251 * is the only interrupt we care about.
252 * Receive FIFO Overrun, Transmit FIFO Underrun, Mode Fault, and Slave Mode
253 * Fault are not to happen.
254 */
255static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
256{
257 struct xilinx_spi *xspi = dev_id;
258 u32 ipif_isr;
259
260 /* Get the IPIF interrupts, and clear them immediately */
261 ipif_isr = in_be32(xspi->regs + XIPIF_V123B_IISR_OFFSET);
262 out_be32(xspi->regs + XIPIF_V123B_IISR_OFFSET, ipif_isr);
263
264 if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */
265 u16 cr;
266 u8 sr;
267
268 /* A transmit has just completed. Process received data and
269 * check for more data to transmit. Always inhibit the
270 * transmitter while the Isr refills the transmit register/FIFO,
271 * or make sure it is stopped if we're done.
272 */
273 cr = in_be16(xspi->regs + XSPI_CR_OFFSET);
274 out_be16(xspi->regs + XSPI_CR_OFFSET,
275 cr | XSPI_CR_TRANS_INHIBIT);
276
277 /* Read out all the data from the Rx FIFO */
278 sr = in_8(xspi->regs + XSPI_SR_OFFSET);
279 while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
280 u8 data;
281
282 data = in_8(xspi->regs + XSPI_RXD_OFFSET);
283 if (xspi->rx_ptr) {
284 *xspi->rx_ptr++ = data;
285 }
286 sr = in_8(xspi->regs + XSPI_SR_OFFSET);
287 }
288
289 /* See if there is more data to send */
290 if (xspi->remaining_bytes > 0) {
291 xilinx_spi_fill_tx_fifo(xspi);
292 /* Start the transfer by not inhibiting the
293 * transmitter any longer
294 */
295 out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
296 } else {
297 /* No more data to send.
298 * Indicate the transfer is completed.
299 */
300 complete(&xspi->done);
301 }
302 }
303
304 return IRQ_HANDLED;
305}
306
307static int __init xilinx_spi_probe(struct platform_device *dev)
308{
309 int ret = 0;
310 struct spi_master *master;
311 struct xilinx_spi *xspi;
312 struct xspi_platform_data *pdata;
313 struct resource *r;
314
315 /* Get resources(memory, IRQ) associated with the device */
316 master = spi_alloc_master(&dev->dev, sizeof(struct xilinx_spi));
317
318 if (master == NULL) {
319 return -ENOMEM;
320 }
321
322 platform_set_drvdata(dev, master);
323 pdata = dev->dev.platform_data;
324
325 if (pdata == NULL) {
326 ret = -ENODEV;
327 goto put_master;
328 }
329
330 r = platform_get_resource(dev, IORESOURCE_MEM, 0);
331 if (r == NULL) {
332 ret = -ENODEV;
333 goto put_master;
334 }
335
336 xspi = spi_master_get_devdata(master);
337 xspi->bitbang.master = spi_master_get(master);
338 xspi->bitbang.chipselect = xilinx_spi_chipselect;
339 xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
340 xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
341 xspi->bitbang.master->setup = xilinx_spi_setup;
342 init_completion(&xspi->done);
343
344 if (!request_mem_region(r->start,
345 r->end - r->start + 1, XILINX_SPI_NAME)) {
346 ret = -ENXIO;
347 goto put_master;
348 }
349
350 xspi->regs = ioremap(r->start, r->end - r->start + 1);
351 if (xspi->regs == NULL) {
352 ret = -ENOMEM;
353 goto put_master;
354 }
355
356 xspi->irq = platform_get_irq(dev, 0);
357 if (xspi->irq < 0) {
358 ret = -ENXIO;
359 goto unmap_io;
360 }
361
362 master->bus_num = pdata->bus_num;
363 master->num_chipselect = pdata->num_chipselect;
364 xspi->speed_hz = pdata->speed_hz;
365
366 /* SPI controller initializations */
367 xspi_init_hw(xspi->regs);
368
369 /* Register for SPI Interrupt */
370 ret = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
371 if (ret != 0)
372 goto unmap_io;
373
374 ret = spi_bitbang_start(&xspi->bitbang);
375 if (ret != 0) {
376 dev_err(&dev->dev, "spi_bitbang_start FAILED\n");
377 goto free_irq;
378 }
379
380 dev_info(&dev->dev, "at 0x%08X mapped to 0x%08X, irq=%d\n",
381 r->start, (u32)xspi->regs, xspi->irq);
382
383 return ret;
384
385free_irq:
386 free_irq(xspi->irq, xspi);
387unmap_io:
388 iounmap(xspi->regs);
389put_master:
390 spi_master_put(master);
391 return ret;
392}
393
394static int __devexit xilinx_spi_remove(struct platform_device *dev)
395{
396 struct xilinx_spi *xspi;
397 struct spi_master *master;
398
399 master = platform_get_drvdata(dev);
400 xspi = spi_master_get_devdata(master);
401
402 spi_bitbang_stop(&xspi->bitbang);
403 free_irq(xspi->irq, xspi);
404 iounmap(xspi->regs);
405 platform_set_drvdata(dev, 0);
406 spi_master_put(xspi->bitbang.master);
407
408 return 0;
409}
410
411static struct platform_driver xilinx_spi_driver = {
412 .probe = xilinx_spi_probe,
413 .remove = __devexit_p(xilinx_spi_remove),
414 .driver = {
415 .name = XILINX_SPI_NAME,
416 .owner = THIS_MODULE,
417 },
418};
419
420static int __init xilinx_spi_init(void)
421{
422 return platform_driver_register(&xilinx_spi_driver);
423}
424module_init(xilinx_spi_init);
425
426static void __exit xilinx_spi_exit(void)
427{
428 platform_driver_unregister(&xilinx_spi_driver);
429}
430module_exit(xilinx_spi_exit);
431
432MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
433MODULE_DESCRIPTION("Xilinx SPI driver");
434MODULE_LICENSE("GPL");
diff --git a/drivers/telephony/Kconfig b/drivers/telephony/Kconfig
index 8f530e68263b..5f98f673f1b6 100644
--- a/drivers/telephony/Kconfig
+++ b/drivers/telephony/Kconfig
@@ -19,6 +19,7 @@ if PHONE
19 19
20config PHONE_IXJ 20config PHONE_IXJ
21 tristate "QuickNet Internet LineJack/PhoneJack support" 21 tristate "QuickNet Internet LineJack/PhoneJack support"
22 depends ISA || PCI
22 ---help--- 23 ---help---
23 Say M if you have a telephony card manufactured by Quicknet 24 Say M if you have a telephony card manufactured by Quicknet
24 Technologies, Inc. These include the Internet PhoneJACK and 25 Technologies, Inc. These include the Internet PhoneJACK and
diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
index c7b0a357b04a..49cd9793404f 100644
--- a/drivers/telephony/ixj.c
+++ b/drivers/telephony/ixj.c
@@ -3453,7 +3453,6 @@ static void ixj_write_frame(IXJ *j)
3453{ 3453{
3454 int cnt, frame_count, dly; 3454 int cnt, frame_count, dly;
3455 IXJ_WORD dat; 3455 IXJ_WORD dat;
3456 BYTES blankword;
3457 3456
3458 frame_count = 0; 3457 frame_count = 0;
3459 if(j->flags.cidplay) { 3458 if(j->flags.cidplay) {
@@ -3501,6 +3500,8 @@ static void ixj_write_frame(IXJ *j)
3501 } 3500 }
3502 if (frame_count >= 1) { 3501 if (frame_count >= 1) {
3503 if (j->ver.low == 0x12 && j->play_mode && j->flags.play_first_frame) { 3502 if (j->ver.low == 0x12 && j->play_mode && j->flags.play_first_frame) {
3503 BYTES blankword;
3504
3504 switch (j->play_mode) { 3505 switch (j->play_mode) {
3505 case PLAYBACK_MODE_ULAW: 3506 case PLAYBACK_MODE_ULAW:
3506 case PLAYBACK_MODE_ALAW: 3507 case PLAYBACK_MODE_ALAW:
@@ -3508,6 +3509,7 @@ static void ixj_write_frame(IXJ *j)
3508 break; 3509 break;
3509 case PLAYBACK_MODE_8LINEAR: 3510 case PLAYBACK_MODE_8LINEAR:
3510 case PLAYBACK_MODE_16LINEAR: 3511 case PLAYBACK_MODE_16LINEAR:
3512 default:
3511 blankword.low = blankword.high = 0x00; 3513 blankword.low = blankword.high = 0x00;
3512 break; 3514 break;
3513 case PLAYBACK_MODE_8LINEAR_WSS: 3515 case PLAYBACK_MODE_8LINEAR_WSS:
@@ -3531,6 +3533,8 @@ static void ixj_write_frame(IXJ *j)
3531 j->flags.play_first_frame = 0; 3533 j->flags.play_first_frame = 0;
3532 } else if (j->play_codec == G723_63 && j->flags.play_first_frame) { 3534 } else if (j->play_codec == G723_63 && j->flags.play_first_frame) {
3533 for (cnt = 0; cnt < 24; cnt++) { 3535 for (cnt = 0; cnt < 24; cnt++) {
3536 BYTES blankword;
3537
3534 if(cnt == 12) { 3538 if(cnt == 12) {
3535 blankword.low = 0x02; 3539 blankword.low = 0x02;
3536 blankword.high = 0x00; 3540 blankword.high = 0x00;
@@ -4868,6 +4872,7 @@ static char daa_CR_read(IXJ *j, int cr)
4868 bytes.high = 0xB0 + cr; 4872 bytes.high = 0xB0 + cr;
4869 break; 4873 break;
4870 case SOP_PU_PULSEDIALING: 4874 case SOP_PU_PULSEDIALING:
4875 default:
4871 bytes.high = 0xF0 + cr; 4876 bytes.high = 0xF0 + cr;
4872 break; 4877 break;
4873 } 4878 }
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 071b9675a781..7dd73546bf43 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -16,7 +16,7 @@ config USB_ARCH_HAS_HCD
16 boolean 16 boolean
17 default y if USB_ARCH_HAS_OHCI 17 default y if USB_ARCH_HAS_OHCI
18 default y if USB_ARCH_HAS_EHCI 18 default y if USB_ARCH_HAS_EHCI
19 default y if PCMCIA # sl811_cs 19 default y if PCMCIA && !M32R # sl811_cs
20 default y if ARM # SL-811 20 default y if ARM # SL-811
21 default PCI 21 default PCI
22 22
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 4973e147bc79..8f046659b4e9 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -1168,6 +1168,7 @@ static int uea_kthread(void *data)
1168 struct uea_softc *sc = data; 1168 struct uea_softc *sc = data;
1169 int ret = -EAGAIN; 1169 int ret = -EAGAIN;
1170 1170
1171 set_freezable();
1171 uea_enters(INS_TO_USBDEV(sc)); 1172 uea_enters(INS_TO_USBDEV(sc));
1172 while (!kthread_should_stop()) { 1173 while (!kthread_should_stop()) {
1173 if (ret < 0 || sc->reset) 1174 if (ret < 0 || sc->reset)
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 50e79010401c..fd74c50b1804 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2728,6 +2728,7 @@ loop:
2728 2728
2729static int hub_thread(void *__unused) 2729static int hub_thread(void *__unused)
2730{ 2730{
2731 set_freezable();
2731 do { 2732 do {
2732 hub_events(); 2733 hub_events();
2733 wait_event_interruptible(khubd_wait, 2734 wait_event_interruptible(khubd_wait,
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 8712ef987179..be7a1bd2823b 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -3434,6 +3434,9 @@ static int fsg_main_thread(void *fsg_)
3434 allow_signal(SIGKILL); 3434 allow_signal(SIGKILL);
3435 allow_signal(SIGUSR1); 3435 allow_signal(SIGUSR1);
3436 3436
3437 /* Allow the thread to be frozen */
3438 set_freezable();
3439
3437 /* Arrange for userspace references to be interpreted as kernel 3440 /* Arrange for userspace references to be interpreted as kernel
3438 * pointers. That way we can pass a kernel pointer to a routine 3441 * pointers. That way we can pass a kernel pointer to a routine
3439 * that expects a __user pointer and it will work okay. */ 3442 * that expects a __user pointer and it will work okay. */
diff --git a/drivers/usb/misc/auerswald.c b/drivers/usb/misc/auerswald.c
index 1fd5fc220cd7..42d4e6454a77 100644
--- a/drivers/usb/misc/auerswald.c
+++ b/drivers/usb/misc/auerswald.c
@@ -630,7 +630,7 @@ static int auerchain_start_wait_urb (pauerchain_t acp, struct urb *urb, int time
630 } else 630 } else
631 status = urb->status; 631 status = urb->status;
632 632
633 if (actual_length) 633 if (status >= 0)
634 *actual_length = urb->actual_length; 634 *actual_length = urb->actual_length;
635 635
636 return status; 636 return status;
@@ -664,7 +664,7 @@ static int auerchain_control_msg (pauerchain_t acp, struct usb_device *dev, unsi
664 int ret; 664 int ret;
665 struct usb_ctrlrequest *dr; 665 struct usb_ctrlrequest *dr;
666 struct urb *urb; 666 struct urb *urb;
667 int length; 667 int uninitialized_var(length);
668 668
669 dbg ("auerchain_control_msg"); 669 dbg ("auerchain_control_msg");
670 dr = kmalloc (sizeof (struct usb_ctrlrequest), GFP_KERNEL); 670 dr = kmalloc (sizeof (struct usb_ctrlrequest), GFP_KERNEL);
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index bef8bcd9bd98..28842d208bb0 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -311,8 +311,6 @@ static int usb_stor_control_thread(void * __us)
311 struct Scsi_Host *host = us_to_host(us); 311 struct Scsi_Host *host = us_to_host(us);
312 int autopm_rc; 312 int autopm_rc;
313 313
314 current->flags |= PF_NOFREEZE;
315
316 for(;;) { 314 for(;;) {
317 US_DEBUGP("*** thread sleeping.\n"); 315 US_DEBUGP("*** thread sleeping.\n");
318 if(down_interruptible(&us->sema)) 316 if(down_interruptible(&us->sema))
@@ -920,6 +918,7 @@ static int usb_stor_scan_thread(void * __us)
920 printk(KERN_DEBUG 918 printk(KERN_DEBUG
921 "usb-storage: device found at %d\n", us->pusb_dev->devnum); 919 "usb-storage: device found at %d\n", us->pusb_dev->devnum);
922 920
921 set_freezable();
923 /* Wait for the timeout to expire or for a disconnect */ 922 /* Wait for the timeout to expire or for a disconnect */
924 if (delay_use > 0) { 923 if (delay_use > 0) {
925 printk(KERN_DEBUG "usb-storage: waiting for device " 924 printk(KERN_DEBUG "usb-storage: waiting for device "
diff --git a/drivers/video/68328fb.c b/drivers/video/68328fb.c
index 0dda73da8628..7f907fb23b8a 100644
--- a/drivers/video/68328fb.c
+++ b/drivers/video/68328fb.c
@@ -60,7 +60,7 @@ static u_long videomemory;
60static u_long videomemorysize; 60static u_long videomemorysize;
61 61
62static struct fb_info fb_info; 62static struct fb_info fb_info;
63static u32 mc68x328fb_pseudo_palette[17]; 63static u32 mc68x328fb_pseudo_palette[16];
64 64
65static struct fb_var_screeninfo mc68x328fb_default __initdata = { 65static struct fb_var_screeninfo mc68x328fb_default __initdata = {
66 .red = { 0, 8, 0 }, 66 .red = { 0, 8, 0 },
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 9b7a76be36a0..0c5644bb59af 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -812,7 +812,7 @@ config FB_PVR2
812 812
813config FB_EPSON1355 813config FB_EPSON1355
814 bool "Epson 1355 framebuffer support" 814 bool "Epson 1355 framebuffer support"
815 depends on (FB = y) && (SUPERH || ARCH_CEIVA) 815 depends on (FB = y) && ARCH_CEIVA
816 select FB_CFB_FILLRECT 816 select FB_CFB_FILLRECT
817 select FB_CFB_COPYAREA 817 select FB_CFB_COPYAREA
818 select FB_CFB_IMAGEBLIT 818 select FB_CFB_IMAGEBLIT
@@ -1820,6 +1820,10 @@ config FB_XILINX
1820 framebuffer. ML300 carries a 640*480 LCD display on the board, 1820 framebuffer. ML300 carries a 640*480 LCD display on the board,
1821 ML403 uses a standard DB15 VGA connector. 1821 ML403 uses a standard DB15 VGA connector.
1822 1822
1823if ARCH_OMAP
1824 source "drivers/video/omap/Kconfig"
1825endif
1826
1823config FB_VIRTUAL 1827config FB_VIRTUAL
1824 tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)" 1828 tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)"
1825 depends on FB 1829 depends on FB
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index bd8b05229500..a562f9d69d2c 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -113,6 +113,7 @@ obj-$(CONFIG_FB_IBM_GXT4500) += gxt4500.o
113obj-$(CONFIG_FB_PS3) += ps3fb.o 113obj-$(CONFIG_FB_PS3) += ps3fb.o
114obj-$(CONFIG_FB_SM501) += sm501fb.o 114obj-$(CONFIG_FB_SM501) += sm501fb.o
115obj-$(CONFIG_FB_XILINX) += xilinxfb.o 115obj-$(CONFIG_FB_XILINX) += xilinxfb.o
116obj-$(CONFIG_FB_OMAP) += omap/
116 117
117# Platform or fallback drivers go here 118# Platform or fallback drivers go here
118obj-$(CONFIG_FB_VESA) += vesafb.o 119obj-$(CONFIG_FB_VESA) += vesafb.o
diff --git a/drivers/video/aty/ati_ids.h b/drivers/video/aty/ati_ids.h
index 90e7df22f508..685a754991c6 100644
--- a/drivers/video/aty/ati_ids.h
+++ b/drivers/video/aty/ati_ids.h
@@ -204,6 +204,7 @@
204#define PCI_CHIP_RV280_5961 0x5961 204#define PCI_CHIP_RV280_5961 0x5961
205#define PCI_CHIP_RV280_5962 0x5962 205#define PCI_CHIP_RV280_5962 0x5962
206#define PCI_CHIP_RV280_5964 0x5964 206#define PCI_CHIP_RV280_5964 0x5964
207#define PCI_CHIP_RS485_5975 0x5975
207#define PCI_CHIP_RV280_5C61 0x5C61 208#define PCI_CHIP_RV280_5C61 0x5C61
208#define PCI_CHIP_RV280_5C63 0x5C63 209#define PCI_CHIP_RV280_5C63 0x5C63
209#define PCI_CHIP_R423_5D57 0x5D57 210#define PCI_CHIP_R423_5D57 0x5D57
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 2fbff6317433..ef330e34d031 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -541,7 +541,7 @@ static char ram_off[] __devinitdata = "OFF";
541#endif /* CONFIG_FB_ATY_CT */ 541#endif /* CONFIG_FB_ATY_CT */
542 542
543 543
544static u32 pseudo_palette[17]; 544static u32 pseudo_palette[16];
545 545
546#ifdef CONFIG_FB_ATY_GX 546#ifdef CONFIG_FB_ATY_GX
547static char *aty_gx_ram[8] __devinitdata = { 547static char *aty_gx_ram[8] __devinitdata = {
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 2349e71b0083..47ca62fe7c3e 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -153,6 +153,8 @@ static struct pci_device_id radeonfb_pci_table[] = {
153 /* Mobility 9200 (M9+) */ 153 /* Mobility 9200 (M9+) */
154 CHIP_DEF(PCI_CHIP_RV280_5C61, RV280, CHIP_HAS_CRTC2 | CHIP_IS_MOBILITY), 154 CHIP_DEF(PCI_CHIP_RV280_5C61, RV280, CHIP_HAS_CRTC2 | CHIP_IS_MOBILITY),
155 CHIP_DEF(PCI_CHIP_RV280_5C63, RV280, CHIP_HAS_CRTC2 | CHIP_IS_MOBILITY), 155 CHIP_DEF(PCI_CHIP_RV280_5C63, RV280, CHIP_HAS_CRTC2 | CHIP_IS_MOBILITY),
156 /*Mobility Xpress 200 */
157 CHIP_DEF(PCI_CHIP_RS485_5975, R300, CHIP_HAS_CRTC2 | CHIP_IS_IGP | CHIP_IS_MOBILITY),
156 /* 9200 */ 158 /* 9200 */
157 CHIP_DEF(PCI_CHIP_RV280_5960, RV280, CHIP_HAS_CRTC2), 159 CHIP_DEF(PCI_CHIP_RV280_5960, RV280, CHIP_HAS_CRTC2),
158 CHIP_DEF(PCI_CHIP_RV280_5961, RV280, CHIP_HAS_CRTC2), 160 CHIP_DEF(PCI_CHIP_RV280_5961, RV280, CHIP_HAS_CRTC2),
diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h
index 7ebffcdfd1e3..7c922c7b460b 100644
--- a/drivers/video/aty/radeonfb.h
+++ b/drivers/video/aty/radeonfb.h
@@ -301,7 +301,7 @@ struct radeonfb_info {
301 void __iomem *bios_seg; 301 void __iomem *bios_seg;
302 int fp_bios_start; 302 int fp_bios_start;
303 303
304 u32 pseudo_palette[17]; 304 u32 pseudo_palette[16];
305 struct { u8 red, green, blue, pad; } 305 struct { u8 red, green, blue, pad; }
306 palette[256]; 306 palette[256];
307 307
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index d3b8a6be2916..49643969f9f8 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -118,6 +118,22 @@ config FRAMEBUFFER_CONSOLE
118 help 118 help
119 Low-level framebuffer-based console driver. 119 Low-level framebuffer-based console driver.
120 120
121config FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
122 bool "Map the console to the primary display device"
123 depends on FRAMEBUFFER_CONSOLE
124 default n
125 ---help---
126 If this option is selected, the framebuffer console will
127 automatically select the primary display device (if the architecture
128 supports this feature). Otherwise, the framebuffer console will
129 always select the first framebuffer driver that is loaded. The latter
130 is the default behavior.
131
132 You can always override the automatic selection of the primary device
133 by using the fbcon=map: boot option.
134
135 If unsure, select n.
136
121config FRAMEBUFFER_CONSOLE_ROTATION 137config FRAMEBUFFER_CONSOLE_ROTATION
122 bool "Framebuffer Console Rotation" 138 bool "Framebuffer Console Rotation"
123 depends on FRAMEBUFFER_CONSOLE 139 depends on FRAMEBUFFER_CONSOLE
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 73813c60d03a..decfdc8eb9cc 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -75,6 +75,7 @@
75#include <linux/init.h> 75#include <linux/init.h>
76#include <linux/interrupt.h> 76#include <linux/interrupt.h>
77#include <linux/crc32.h> /* For counting font checksums */ 77#include <linux/crc32.h> /* For counting font checksums */
78#include <asm/fb.h>
78#include <asm/irq.h> 79#include <asm/irq.h>
79#include <asm/system.h> 80#include <asm/system.h>
80#include <asm/uaccess.h> 81#include <asm/uaccess.h>
@@ -125,6 +126,8 @@ static int first_fb_vc;
125static int last_fb_vc = MAX_NR_CONSOLES - 1; 126static int last_fb_vc = MAX_NR_CONSOLES - 1;
126static int fbcon_is_default = 1; 127static int fbcon_is_default = 1;
127static int fbcon_has_exited; 128static int fbcon_has_exited;
129static int primary_device = -1;
130static int map_override;
128 131
129/* font data */ 132/* font data */
130static char fontname[40]; 133static char fontname[40];
@@ -152,6 +155,7 @@ static int fbcon_set_origin(struct vc_data *);
152#define DEFAULT_CURSOR_BLINK_RATE (20) 155#define DEFAULT_CURSOR_BLINK_RATE (20)
153 156
154static int vbl_cursor_cnt; 157static int vbl_cursor_cnt;
158static int fbcon_cursor_noblink;
155 159
156#define divides(a, b) ((!(a) || (b)%(a)) ? 0 : 1) 160#define divides(a, b) ((!(a) || (b)%(a)) ? 0 : 1)
157 161
@@ -188,16 +192,14 @@ static __inline__ void ypan_down(struct vc_data *vc, int count);
188static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int sx, 192static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int sx,
189 int dy, int dx, int height, int width, u_int y_break); 193 int dy, int dx, int height, int width, u_int y_break);
190static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, 194static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
191 struct vc_data *vc); 195 int unit);
192static void fbcon_preset_disp(struct fb_info *info, struct fb_var_screeninfo *var,
193 int unit);
194static void fbcon_redraw_move(struct vc_data *vc, struct display *p, 196static void fbcon_redraw_move(struct vc_data *vc, struct display *p,
195 int line, int count, int dy); 197 int line, int count, int dy);
196static void fbcon_modechanged(struct fb_info *info); 198static void fbcon_modechanged(struct fb_info *info);
197static void fbcon_set_all_vcs(struct fb_info *info); 199static void fbcon_set_all_vcs(struct fb_info *info);
198static void fbcon_start(void); 200static void fbcon_start(void);
199static void fbcon_exit(void); 201static void fbcon_exit(void);
200static struct class_device *fbcon_class_device; 202static struct device *fbcon_device;
201 203
202#ifdef CONFIG_MAC 204#ifdef CONFIG_MAC
203/* 205/*
@@ -441,7 +443,8 @@ static void fbcon_add_cursor_timer(struct fb_info *info)
441 struct fbcon_ops *ops = info->fbcon_par; 443 struct fbcon_ops *ops = info->fbcon_par;
442 444
443 if ((!info->queue.func || info->queue.func == fb_flashcursor) && 445 if ((!info->queue.func || info->queue.func == fb_flashcursor) &&
444 !(ops->flags & FBCON_FLAGS_CURSOR_TIMER)) { 446 !(ops->flags & FBCON_FLAGS_CURSOR_TIMER) &&
447 !fbcon_cursor_noblink) {
445 if (!info->queue.func) 448 if (!info->queue.func)
446 INIT_WORK(&info->queue, fb_flashcursor); 449 INIT_WORK(&info->queue, fb_flashcursor);
447 450
@@ -495,13 +498,17 @@ static int __init fb_console_setup(char *this_opt)
495 498
496 if (!strncmp(options, "map:", 4)) { 499 if (!strncmp(options, "map:", 4)) {
497 options += 4; 500 options += 4;
498 if (*options) 501 if (*options) {
499 for (i = 0, j = 0; i < MAX_NR_CONSOLES; i++) { 502 for (i = 0, j = 0; i < MAX_NR_CONSOLES; i++) {
500 if (!options[j]) 503 if (!options[j])
501 j = 0; 504 j = 0;
502 con2fb_map_boot[i] = 505 con2fb_map_boot[i] =
503 (options[j++]-'0') % FB_MAX; 506 (options[j++]-'0') % FB_MAX;
504 } 507 }
508
509 map_override = 1;
510 }
511
505 return 1; 512 return 1;
506 } 513 }
507 514
@@ -736,7 +743,9 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
736 743
737 if (!err) { 744 if (!err) {
738 info->fbcon_par = ops; 745 info->fbcon_par = ops;
739 set_blitting_type(vc, info); 746
747 if (vc)
748 set_blitting_type(vc, info);
740 } 749 }
741 750
742 if (err) { 751 if (err) {
@@ -798,11 +807,7 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info,
798 807
799 ops->flags |= FBCON_FLAGS_INIT; 808 ops->flags |= FBCON_FLAGS_INIT;
800 ops->graphics = 0; 809 ops->graphics = 0;
801 810 fbcon_set_disp(info, &info->var, unit);
802 if (vc)
803 fbcon_set_disp(info, &info->var, vc);
804 else
805 fbcon_preset_disp(info, &info->var, unit);
806 811
807 if (show_logo) { 812 if (show_logo) {
808 struct vc_data *fg_vc = vc_cons[fg_console].d; 813 struct vc_data *fg_vc = vc_cons[fg_console].d;
@@ -1107,6 +1112,9 @@ static void fbcon_init(struct vc_data *vc, int init)
1107 if (var_to_display(p, &info->var, info)) 1112 if (var_to_display(p, &info->var, info))
1108 return; 1113 return;
1109 1114
1115 if (!info->fbcon_par)
1116 con2fb_acquire_newinfo(vc, info, vc->vc_num, -1);
1117
1110 /* If we are not the first console on this 1118 /* If we are not the first console on this
1111 fb, copy the font from that console */ 1119 fb, copy the font from that console */
1112 t = &fb_display[fg_console]; 1120 t = &fb_display[fg_console];
@@ -1349,6 +1357,11 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
1349 if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1) 1357 if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1)
1350 return; 1358 return;
1351 1359
1360 if (vc->vc_cursor_type & 0x10)
1361 fbcon_del_cursor_timer(info);
1362 else
1363 fbcon_add_cursor_timer(info);
1364
1352 ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1; 1365 ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1;
1353 if (mode & CM_SOFTBACK) { 1366 if (mode & CM_SOFTBACK) {
1354 mode &= ~CM_SOFTBACK; 1367 mode &= ~CM_SOFTBACK;
@@ -1368,36 +1381,29 @@ static int scrollback_phys_max = 0;
1368static int scrollback_max = 0; 1381static int scrollback_max = 0;
1369static int scrollback_current = 0; 1382static int scrollback_current = 0;
1370 1383
1371/*
1372 * If no vc is existent yet, just set struct display
1373 */
1374static void fbcon_preset_disp(struct fb_info *info, struct fb_var_screeninfo *var,
1375 int unit)
1376{
1377 struct display *p = &fb_display[unit];
1378 struct display *t = &fb_display[fg_console];
1379
1380 if (var_to_display(p, var, info))
1381 return;
1382
1383 p->fontdata = t->fontdata;
1384 p->userfont = t->userfont;
1385 if (p->userfont)
1386 REFCOUNT(p->fontdata)++;
1387}
1388
1389static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, 1384static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
1390 struct vc_data *vc) 1385 int unit)
1391{ 1386{
1392 struct display *p = &fb_display[vc->vc_num], *t; 1387 struct display *p, *t;
1393 struct vc_data **default_mode = vc->vc_display_fg; 1388 struct vc_data **default_mode, *vc;
1394 struct vc_data *svc = *default_mode; 1389 struct vc_data *svc;
1395 struct fbcon_ops *ops = info->fbcon_par; 1390 struct fbcon_ops *ops = info->fbcon_par;
1396 int rows, cols, charcnt = 256; 1391 int rows, cols, charcnt = 256;
1397 1392
1393 p = &fb_display[unit];
1394
1398 if (var_to_display(p, var, info)) 1395 if (var_to_display(p, var, info))
1399 return; 1396 return;
1397
1398 vc = vc_cons[unit].d;
1399
1400 if (!vc)
1401 return;
1402
1403 default_mode = vc->vc_display_fg;
1404 svc = *default_mode;
1400 t = &fb_display[svc->vc_num]; 1405 t = &fb_display[svc->vc_num];
1406
1401 if (!vc->vc_font.data) { 1407 if (!vc->vc_font.data) {
1402 vc->vc_font.data = (void *)(p->fontdata = t->fontdata); 1408 vc->vc_font.data = (void *)(p->fontdata = t->fontdata);
1403 vc->vc_font.width = (*default_mode)->vc_font.width; 1409 vc->vc_font.width = (*default_mode)->vc_font.width;
@@ -1704,6 +1710,56 @@ static void fbcon_redraw_move(struct vc_data *vc, struct display *p,
1704 } 1710 }
1705} 1711}
1706 1712
1713static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info,
1714 struct display *p, int line, int count, int ycount)
1715{
1716 int offset = ycount * vc->vc_cols;
1717 unsigned short *d = (unsigned short *)
1718 (vc->vc_origin + vc->vc_size_row * line);
1719 unsigned short *s = d + offset;
1720 struct fbcon_ops *ops = info->fbcon_par;
1721
1722 while (count--) {
1723 unsigned short *start = s;
1724 unsigned short *le = advance_row(s, 1);
1725 unsigned short c;
1726 int x = 0;
1727
1728 do {
1729 c = scr_readw(s);
1730
1731 if (c == scr_readw(d)) {
1732 if (s > start) {
1733 ops->bmove(vc, info, line + ycount, x,
1734 line, x, 1, s-start);
1735 x += s - start + 1;
1736 start = s + 1;
1737 } else {
1738 x++;
1739 start++;
1740 }
1741 }
1742
1743 scr_writew(c, d);
1744 console_conditional_schedule();
1745 s++;
1746 d++;
1747 } while (s < le);
1748 if (s > start)
1749 ops->bmove(vc, info, line + ycount, x, line, x, 1,
1750 s-start);
1751 console_conditional_schedule();
1752 if (ycount > 0)
1753 line++;
1754 else {
1755 line--;
1756 /* NOTE: We subtract two lines from these pointers */
1757 s -= vc->vc_size_row;
1758 d -= vc->vc_size_row;
1759 }
1760 }
1761}
1762
1707static void fbcon_redraw(struct vc_data *vc, struct display *p, 1763static void fbcon_redraw(struct vc_data *vc, struct display *p,
1708 int line, int count, int offset) 1764 int line, int count, int offset)
1709{ 1765{
@@ -1789,7 +1845,6 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
1789{ 1845{
1790 struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; 1846 struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
1791 struct display *p = &fb_display[vc->vc_num]; 1847 struct display *p = &fb_display[vc->vc_num];
1792 struct fbcon_ops *ops = info->fbcon_par;
1793 int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK; 1848 int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK;
1794 1849
1795 if (fbcon_is_inactive(vc, info)) 1850 if (fbcon_is_inactive(vc, info))
@@ -1813,10 +1868,15 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
1813 goto redraw_up; 1868 goto redraw_up;
1814 switch (p->scrollmode) { 1869 switch (p->scrollmode) {
1815 case SCROLL_MOVE: 1870 case SCROLL_MOVE:
1816 ops->bmove(vc, info, t + count, 0, t, 0, 1871 fbcon_redraw_blit(vc, info, p, t, b - t - count,
1817 b - t - count, vc->vc_cols); 1872 count);
1818 ops->clear(vc, info, b - count, 0, count, 1873 fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
1819 vc->vc_cols); 1874 scr_memsetw((unsigned short *) (vc->vc_origin +
1875 vc->vc_size_row *
1876 (b - count)),
1877 vc->vc_video_erase_char,
1878 vc->vc_size_row * count);
1879 return 1;
1820 break; 1880 break;
1821 1881
1822 case SCROLL_WRAP_MOVE: 1882 case SCROLL_WRAP_MOVE:
@@ -1899,9 +1959,15 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
1899 goto redraw_down; 1959 goto redraw_down;
1900 switch (p->scrollmode) { 1960 switch (p->scrollmode) {
1901 case SCROLL_MOVE: 1961 case SCROLL_MOVE:
1902 ops->bmove(vc, info, t, 0, t + count, 0, 1962 fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
1903 b - t - count, vc->vc_cols); 1963 -count);
1904 ops->clear(vc, info, t, 0, count, vc->vc_cols); 1964 fbcon_clear(vc, t, 0, count, vc->vc_cols);
1965 scr_memsetw((unsigned short *) (vc->vc_origin +
1966 vc->vc_size_row *
1967 t),
1968 vc->vc_video_erase_char,
1969 vc->vc_size_row * count);
1970 return 1;
1905 break; 1971 break;
1906 1972
1907 case SCROLL_WRAP_MOVE: 1973 case SCROLL_WRAP_MOVE:
@@ -2937,9 +3003,48 @@ static int fbcon_mode_deleted(struct fb_info *info,
2937 return found; 3003 return found;
2938} 3004}
2939 3005
2940static int fbcon_fb_unregistered(int idx) 3006#ifdef CONFIG_VT_HW_CONSOLE_BINDING
3007static int fbcon_unbind(void)
2941{ 3008{
2942 int i; 3009 int ret;
3010
3011 ret = unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc,
3012 fbcon_is_default);
3013 return ret;
3014}
3015#else
3016static inline int fbcon_unbind(void)
3017{
3018 return -EINVAL;
3019}
3020#endif /* CONFIG_VT_HW_CONSOLE_BINDING */
3021
3022static int fbcon_fb_unbind(int idx)
3023{
3024 int i, new_idx = -1, ret = 0;
3025
3026 for (i = first_fb_vc; i <= last_fb_vc; i++) {
3027 if (con2fb_map[i] != idx &&
3028 con2fb_map[i] != -1) {
3029 new_idx = i;
3030 break;
3031 }
3032 }
3033
3034 if (new_idx != -1) {
3035 for (i = first_fb_vc; i <= last_fb_vc; i++) {
3036 if (con2fb_map[i] == idx)
3037 set_con2fb_map(i, new_idx, 0);
3038 }
3039 } else
3040 ret = fbcon_unbind();
3041
3042 return ret;
3043}
3044
3045static int fbcon_fb_unregistered(struct fb_info *info)
3046{
3047 int i, idx = info->node;
2943 3048
2944 for (i = first_fb_vc; i <= last_fb_vc; i++) { 3049 for (i = first_fb_vc; i <= last_fb_vc; i++) {
2945 if (con2fb_map[i] == idx) 3050 if (con2fb_map[i] == idx)
@@ -2967,12 +3072,48 @@ static int fbcon_fb_unregistered(int idx)
2967 if (!num_registered_fb) 3072 if (!num_registered_fb)
2968 unregister_con_driver(&fb_con); 3073 unregister_con_driver(&fb_con);
2969 3074
3075
3076 if (primary_device == idx)
3077 primary_device = -1;
3078
2970 return 0; 3079 return 0;
2971} 3080}
2972 3081
2973static int fbcon_fb_registered(int idx) 3082#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
3083static void fbcon_select_primary(struct fb_info *info)
2974{ 3084{
2975 int ret = 0, i; 3085 if (!map_override && primary_device == -1 &&
3086 fb_is_primary_device(info)) {
3087 int i;
3088
3089 printk(KERN_INFO "fbcon: %s (fb%i) is primary device\n",
3090 info->fix.id, info->node);
3091 primary_device = info->node;
3092
3093 for (i = first_fb_vc; i <= last_fb_vc; i++)
3094 con2fb_map_boot[i] = primary_device;
3095
3096 if (con_is_bound(&fb_con)) {
3097 printk(KERN_INFO "fbcon: Remapping primary device, "
3098 "fb%i, to tty %i-%i\n", info->node,
3099 first_fb_vc + 1, last_fb_vc + 1);
3100 info_idx = primary_device;
3101 }
3102 }
3103
3104}
3105#else
3106static inline void fbcon_select_primary(struct fb_info *info)
3107{
3108 return;
3109}
3110#endif /* CONFIG_FRAMEBUFFER_DETECT_PRIMARY */
3111
3112static int fbcon_fb_registered(struct fb_info *info)
3113{
3114 int ret = 0, i, idx = info->node;
3115
3116 fbcon_select_primary(info);
2976 3117
2977 if (info_idx == -1) { 3118 if (info_idx == -1) {
2978 for (i = first_fb_vc; i <= last_fb_vc; i++) { 3119 for (i = first_fb_vc; i <= last_fb_vc; i++) {
@@ -2986,8 +3127,7 @@ static int fbcon_fb_registered(int idx)
2986 ret = fbcon_takeover(1); 3127 ret = fbcon_takeover(1);
2987 } else { 3128 } else {
2988 for (i = first_fb_vc; i <= last_fb_vc; i++) { 3129 for (i = first_fb_vc; i <= last_fb_vc; i++) {
2989 if (con2fb_map_boot[i] == idx && 3130 if (con2fb_map_boot[i] == idx)
2990 con2fb_map[i] == -1)
2991 set_con2fb_map(i, idx, 0); 3131 set_con2fb_map(i, idx, 0);
2992 } 3132 }
2993 } 3133 }
@@ -3034,12 +3174,7 @@ static void fbcon_new_modelist(struct fb_info *info)
3034 mode = fb_find_nearest_mode(fb_display[i].mode, 3174 mode = fb_find_nearest_mode(fb_display[i].mode,
3035 &info->modelist); 3175 &info->modelist);
3036 fb_videomode_to_var(&var, mode); 3176 fb_videomode_to_var(&var, mode);
3037 3177 fbcon_set_disp(info, &var, vc->vc_num);
3038 if (vc)
3039 fbcon_set_disp(info, &var, vc);
3040 else
3041 fbcon_preset_disp(info, &var, i);
3042
3043 } 3178 }
3044} 3179}
3045 3180
@@ -3114,11 +3249,14 @@ static int fbcon_event_notify(struct notifier_block *self,
3114 mode = event->data; 3249 mode = event->data;
3115 ret = fbcon_mode_deleted(info, mode); 3250 ret = fbcon_mode_deleted(info, mode);
3116 break; 3251 break;
3252 case FB_EVENT_FB_UNBIND:
3253 ret = fbcon_fb_unbind(info->node);
3254 break;
3117 case FB_EVENT_FB_REGISTERED: 3255 case FB_EVENT_FB_REGISTERED:
3118 ret = fbcon_fb_registered(info->node); 3256 ret = fbcon_fb_registered(info);
3119 break; 3257 break;
3120 case FB_EVENT_FB_UNREGISTERED: 3258 case FB_EVENT_FB_UNREGISTERED:
3121 ret = fbcon_fb_unregistered(info->node); 3259 ret = fbcon_fb_unregistered(info);
3122 break; 3260 break;
3123 case FB_EVENT_SET_CONSOLE_MAP: 3261 case FB_EVENT_SET_CONSOLE_MAP:
3124 con2fb = event->data; 3262 con2fb = event->data;
@@ -3179,8 +3317,9 @@ static struct notifier_block fbcon_event_notifier = {
3179 .notifier_call = fbcon_event_notify, 3317 .notifier_call = fbcon_event_notify,
3180}; 3318};
3181 3319
3182static ssize_t store_rotate(struct class_device *class_device, 3320static ssize_t store_rotate(struct device *device,
3183 const char *buf, size_t count) 3321 struct device_attribute *attr, const char *buf,
3322 size_t count)
3184{ 3323{
3185 struct fb_info *info; 3324 struct fb_info *info;
3186 int rotate, idx; 3325 int rotate, idx;
@@ -3203,8 +3342,9 @@ err:
3203 return count; 3342 return count;
3204} 3343}
3205 3344
3206static ssize_t store_rotate_all(struct class_device *class_device, 3345static ssize_t store_rotate_all(struct device *device,
3207 const char *buf, size_t count) 3346 struct device_attribute *attr,const char *buf,
3347 size_t count)
3208{ 3348{
3209 struct fb_info *info; 3349 struct fb_info *info;
3210 int rotate, idx; 3350 int rotate, idx;
@@ -3227,7 +3367,8 @@ err:
3227 return count; 3367 return count;
3228} 3368}
3229 3369
3230static ssize_t show_rotate(struct class_device *class_device, char *buf) 3370static ssize_t show_rotate(struct device *device,
3371 struct device_attribute *attr,char *buf)
3231{ 3372{
3232 struct fb_info *info; 3373 struct fb_info *info;
3233 int rotate = 0, idx; 3374 int rotate = 0, idx;
@@ -3248,20 +3389,86 @@ err:
3248 return snprintf(buf, PAGE_SIZE, "%d\n", rotate); 3389 return snprintf(buf, PAGE_SIZE, "%d\n", rotate);
3249} 3390}
3250 3391
3251static struct class_device_attribute class_device_attrs[] = { 3392static ssize_t show_cursor_blink(struct device *device,
3393 struct device_attribute *attr, char *buf)
3394{
3395 struct fb_info *info;
3396 struct fbcon_ops *ops;
3397 int idx, blink = -1;
3398
3399 if (fbcon_has_exited)
3400 return 0;
3401
3402 acquire_console_sem();
3403 idx = con2fb_map[fg_console];
3404
3405 if (idx == -1 || registered_fb[idx] == NULL)
3406 goto err;
3407
3408 info = registered_fb[idx];
3409 ops = info->fbcon_par;
3410
3411 if (!ops)
3412 goto err;
3413
3414 blink = (ops->flags & FBCON_FLAGS_CURSOR_TIMER) ? 1 : 0;
3415err:
3416 release_console_sem();
3417 return snprintf(buf, PAGE_SIZE, "%d\n", blink);
3418}
3419
3420static ssize_t store_cursor_blink(struct device *device,
3421 struct device_attribute *attr,
3422 const char *buf, size_t count)
3423{
3424 struct fb_info *info;
3425 int blink, idx;
3426 char **last = NULL;
3427
3428 if (fbcon_has_exited)
3429 return count;
3430
3431 acquire_console_sem();
3432 idx = con2fb_map[fg_console];
3433
3434 if (idx == -1 || registered_fb[idx] == NULL)
3435 goto err;
3436
3437 info = registered_fb[idx];
3438
3439 if (!info->fbcon_par)
3440 goto err;
3441
3442 blink = simple_strtoul(buf, last, 0);
3443
3444 if (blink) {
3445 fbcon_cursor_noblink = 0;
3446 fbcon_add_cursor_timer(info);
3447 } else {
3448 fbcon_cursor_noblink = 1;
3449 fbcon_del_cursor_timer(info);
3450 }
3451
3452err:
3453 release_console_sem();
3454 return count;
3455}
3456
3457static struct device_attribute device_attrs[] = {
3252 __ATTR(rotate, S_IRUGO|S_IWUSR, show_rotate, store_rotate), 3458 __ATTR(rotate, S_IRUGO|S_IWUSR, show_rotate, store_rotate),
3253 __ATTR(rotate_all, S_IWUSR, NULL, store_rotate_all), 3459 __ATTR(rotate_all, S_IWUSR, NULL, store_rotate_all),
3460 __ATTR(cursor_blink, S_IRUGO|S_IWUSR, show_cursor_blink,
3461 store_cursor_blink),
3254}; 3462};
3255 3463
3256static int fbcon_init_class_device(void) 3464static int fbcon_init_device(void)
3257{ 3465{
3258 int i, error = 0; 3466 int i, error = 0;
3259 3467
3260 fbcon_has_sysfs = 1; 3468 fbcon_has_sysfs = 1;
3261 3469
3262 for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) { 3470 for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
3263 error = class_device_create_file(fbcon_class_device, 3471 error = device_create_file(fbcon_device, &device_attrs[i]);
3264 &class_device_attrs[i]);
3265 3472
3266 if (error) 3473 if (error)
3267 break; 3474 break;
@@ -3269,8 +3476,7 @@ static int fbcon_init_class_device(void)
3269 3476
3270 if (error) { 3477 if (error) {
3271 while (--i >= 0) 3478 while (--i >= 0)
3272 class_device_remove_file(fbcon_class_device, 3479 device_remove_file(fbcon_device, &device_attrs[i]);
3273 &class_device_attrs[i]);
3274 3480
3275 fbcon_has_sysfs = 0; 3481 fbcon_has_sysfs = 0;
3276 } 3482 }
@@ -3356,16 +3562,15 @@ static int __init fb_console_init(void)
3356 3562
3357 acquire_console_sem(); 3563 acquire_console_sem();
3358 fb_register_client(&fbcon_event_notifier); 3564 fb_register_client(&fbcon_event_notifier);
3359 fbcon_class_device = 3565 fbcon_device = device_create(fb_class, NULL, MKDEV(0, 0), "fbcon");
3360 class_device_create(fb_class, NULL, MKDEV(0, 0), NULL, "fbcon");
3361 3566
3362 if (IS_ERR(fbcon_class_device)) { 3567 if (IS_ERR(fbcon_device)) {
3363 printk(KERN_WARNING "Unable to create class_device " 3568 printk(KERN_WARNING "Unable to create device "
3364 "for fbcon; errno = %ld\n", 3569 "for fbcon; errno = %ld\n",
3365 PTR_ERR(fbcon_class_device)); 3570 PTR_ERR(fbcon_device));
3366 fbcon_class_device = NULL; 3571 fbcon_device = NULL;
3367 } else 3572 } else
3368 fbcon_init_class_device(); 3573 fbcon_init_device();
3369 3574
3370 for (i = 0; i < MAX_NR_CONSOLES; i++) 3575 for (i = 0; i < MAX_NR_CONSOLES; i++)
3371 con2fb_map[i] = -1; 3576 con2fb_map[i] = -1;
@@ -3379,14 +3584,13 @@ module_init(fb_console_init);
3379 3584
3380#ifdef MODULE 3585#ifdef MODULE
3381 3586
3382static void __exit fbcon_deinit_class_device(void) 3587static void __exit fbcon_deinit_device(void)
3383{ 3588{
3384 int i; 3589 int i;
3385 3590
3386 if (fbcon_has_sysfs) { 3591 if (fbcon_has_sysfs) {
3387 for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) 3592 for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
3388 class_device_remove_file(fbcon_class_device, 3593 device_remove_file(fbcon_device, &device_attrs[i]);
3389 &class_device_attrs[i]);
3390 3594
3391 fbcon_has_sysfs = 0; 3595 fbcon_has_sysfs = 0;
3392 } 3596 }
@@ -3396,8 +3600,8 @@ static void __exit fb_console_exit(void)
3396{ 3600{
3397 acquire_console_sem(); 3601 acquire_console_sem();
3398 fb_unregister_client(&fbcon_event_notifier); 3602 fb_unregister_client(&fbcon_event_notifier);
3399 fbcon_deinit_class_device(); 3603 fbcon_deinit_device();
3400 class_device_destroy(fb_class, MKDEV(0, 0)); 3604 device_destroy(fb_class, MKDEV(0, 0));
3401 fbcon_exit(); 3605 fbcon_exit();
3402 release_console_sem(); 3606 release_console_sem();
3403 unregister_con_driver(&fb_con); 3607 unregister_con_driver(&fb_con);
diff --git a/drivers/video/controlfb.c b/drivers/video/controlfb.c
index 8b762739b1e0..b0be7eac32d8 100644
--- a/drivers/video/controlfb.c
+++ b/drivers/video/controlfb.c
@@ -94,7 +94,7 @@ static inline int VAR_MATCH(struct fb_var_screeninfo *x, struct fb_var_screeninf
94struct fb_info_control { 94struct fb_info_control {
95 struct fb_info info; 95 struct fb_info info;
96 struct fb_par_control par; 96 struct fb_par_control par;
97 u32 pseudo_palette[17]; 97 u32 pseudo_palette[16];
98 98
99 struct cmap_regs __iomem *cmap_regs; 99 struct cmap_regs __iomem *cmap_regs;
100 unsigned long cmap_regs_phys; 100 unsigned long cmap_regs_phys;
diff --git a/drivers/video/cyblafb.c b/drivers/video/cyblafb.c
index 94a66c2d2cf5..e23324d10be2 100644
--- a/drivers/video/cyblafb.c
+++ b/drivers/video/cyblafb.c
@@ -1068,15 +1068,18 @@ static int cyblafb_setcolreg(unsigned regno, unsigned red, unsigned green,
1068 out8(0x3C9, green >> 10); 1068 out8(0x3C9, green >> 10);
1069 out8(0x3C9, blue >> 10); 1069 out8(0x3C9, blue >> 10);
1070 1070
1071 } else if (bpp == 16) // RGB 565 1071 } else if (regno < 16) {
1072 ((u32 *) info->pseudo_palette)[regno] = 1072 if (bpp == 16) // RGB 565
1073 (red & 0xF800) | 1073 ((u32 *) info->pseudo_palette)[regno] =
1074 ((green & 0xFC00) >> 5) | ((blue & 0xF800) >> 11); 1074 (red & 0xF800) |
1075 else if (bpp == 32) // ARGB 8888 1075 ((green & 0xFC00) >> 5) |
1076 ((u32 *) info->pseudo_palette)[regno] = 1076 ((blue & 0xF800) >> 11);
1077 ((transp & 0xFF00) << 16) | 1077 else if (bpp == 32) // ARGB 8888
1078 ((red & 0xFF00) << 8) | 1078 ((u32 *) info->pseudo_palette)[regno] =
1079 ((green & 0xFF00)) | ((blue & 0xFF00) >> 8); 1079 ((transp & 0xFF00) << 16) |
1080 ((red & 0xFF00) << 8) |
1081 ((green & 0xFF00)) | ((blue & 0xFF00) >> 8);
1082 }
1080 1083
1081 return 0; 1084 return 0;
1082} 1085}
diff --git a/drivers/video/epson1355fb.c b/drivers/video/epson1355fb.c
index ca2c54ce508e..33be46ccb54f 100644
--- a/drivers/video/epson1355fb.c
+++ b/drivers/video/epson1355fb.c
@@ -63,23 +63,12 @@
63 63
64struct epson1355_par { 64struct epson1355_par {
65 unsigned long reg_addr; 65 unsigned long reg_addr;
66 u32 pseudo_palette[16];
66}; 67};
67 68
68/* ------------------------------------------------------------------------- */ 69/* ------------------------------------------------------------------------- */
69 70
70#ifdef CONFIG_SUPERH 71#if defined(CONFIG_ARM)
71
72static inline u8 epson1355_read_reg(int index)
73{
74 return ctrl_inb(par.reg_addr + index);
75}
76
77static inline void epson1355_write_reg(u8 data, int index)
78{
79 ctrl_outb(data, par.reg_addr + index);
80}
81
82#elif defined(CONFIG_ARM)
83 72
84# ifdef CONFIG_ARCH_CEIVA 73# ifdef CONFIG_ARCH_CEIVA
85# include <asm/arch/hardware.h> 74# include <asm/arch/hardware.h>
@@ -289,7 +278,7 @@ static int epson1355fb_blank(int blank_mode, struct fb_info *info)
289 struct epson1355_par *par = info->par; 278 struct epson1355_par *par = info->par;
290 279
291 switch (blank_mode) { 280 switch (blank_mode) {
292 case FB_BLANK_UNBLANKING: 281 case FB_BLANK_UNBLANK:
293 case FB_BLANK_NORMAL: 282 case FB_BLANK_NORMAL:
294 lcd_enable(par, 1); 283 lcd_enable(par, 1);
295 backlight_enable(1); 284 backlight_enable(1);
@@ -635,7 +624,7 @@ int __init epson1355fb_probe(struct platform_device *dev)
635 goto bail; 624 goto bail;
636 } 625 }
637 626
638 info = framebuffer_alloc(sizeof(struct epson1355_par) + sizeof(u32) * 256, &dev->dev); 627 info = framebuffer_alloc(sizeof(struct epson1355_par), &dev->dev);
639 if (!info) { 628 if (!info) {
640 rc = -ENOMEM; 629 rc = -ENOMEM;
641 goto bail; 630 goto bail;
@@ -648,7 +637,7 @@ int __init epson1355fb_probe(struct platform_device *dev)
648 rc = -ENOMEM; 637 rc = -ENOMEM;
649 goto bail; 638 goto bail;
650 } 639 }
651 info->pseudo_palette = (void *)(default_par + 1); 640 info->pseudo_palette = default_par->pseudo_palette;
652 641
653 info->screen_base = ioremap(EPSON1355FB_FB_PHYS, EPSON1355FB_FB_LEN); 642 info->screen_base = ioremap(EPSON1355FB_FB_PHYS, EPSON1355FB_FB_LEN);
654 if (!info->screen_base) { 643 if (!info->screen_base) {
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 38c2e2558f5e..215ac579f901 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -33,17 +33,10 @@
33#include <linux/err.h> 33#include <linux/err.h>
34#include <linux/device.h> 34#include <linux/device.h>
35#include <linux/efi.h> 35#include <linux/efi.h>
36#include <linux/fb.h>
36 37
37#if defined(__mc68000__) || defined(CONFIG_APUS) 38#include <asm/fb.h>
38#include <asm/setup.h>
39#endif
40 39
41#include <asm/io.h>
42#include <asm/uaccess.h>
43#include <asm/page.h>
44#include <asm/pgtable.h>
45
46#include <linux/fb.h>
47 40
48 /* 41 /*
49 * Frame buffer device initialization and setup routines 42 * Frame buffer device initialization and setup routines
@@ -411,10 +404,146 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
411 } 404 }
412} 405}
413 406
407static int fb_show_logo_line(struct fb_info *info, int rotate,
408 const struct linux_logo *logo, int y,
409 unsigned int n)
410{
411 u32 *palette = NULL, *saved_pseudo_palette = NULL;
412 unsigned char *logo_new = NULL, *logo_rotate = NULL;
413 struct fb_image image;
414
415 /* Return if the frame buffer is not mapped or suspended */
416 if (logo == NULL || info->state != FBINFO_STATE_RUNNING ||
417 info->flags & FBINFO_MODULE)
418 return 0;
419
420 image.depth = 8;
421 image.data = logo->data;
422
423 if (fb_logo.needs_cmapreset)
424 fb_set_logocmap(info, logo);
425
426 if (fb_logo.needs_truepalette ||
427 fb_logo.needs_directpalette) {
428 palette = kmalloc(256 * 4, GFP_KERNEL);
429 if (palette == NULL)
430 return 0;
431
432 if (fb_logo.needs_truepalette)
433 fb_set_logo_truepalette(info, logo, palette);
434 else
435 fb_set_logo_directpalette(info, logo, palette);
436
437 saved_pseudo_palette = info->pseudo_palette;
438 info->pseudo_palette = palette;
439 }
440
441 if (fb_logo.depth <= 4) {
442 logo_new = kmalloc(logo->width * logo->height, GFP_KERNEL);
443 if (logo_new == NULL) {
444 kfree(palette);
445 if (saved_pseudo_palette)
446 info->pseudo_palette = saved_pseudo_palette;
447 return 0;
448 }
449 image.data = logo_new;
450 fb_set_logo(info, logo, logo_new, fb_logo.depth);
451 }
452
453 image.dx = 0;
454 image.dy = y;
455 image.width = logo->width;
456 image.height = logo->height;
457
458 if (rotate) {
459 logo_rotate = kmalloc(logo->width *
460 logo->height, GFP_KERNEL);
461 if (logo_rotate)
462 fb_rotate_logo(info, logo_rotate, &image, rotate);
463 }
464
465 fb_do_show_logo(info, &image, rotate, n);
466
467 kfree(palette);
468 if (saved_pseudo_palette != NULL)
469 info->pseudo_palette = saved_pseudo_palette;
470 kfree(logo_new);
471 kfree(logo_rotate);
472 return logo->height;
473}
474
475
476#ifdef CONFIG_FB_LOGO_EXTRA
477
478#define FB_LOGO_EX_NUM_MAX 10
479static struct logo_data_extra {
480 const struct linux_logo *logo;
481 unsigned int n;
482} fb_logo_ex[FB_LOGO_EX_NUM_MAX];
483static unsigned int fb_logo_ex_num;
484
485void fb_append_extra_logo(const struct linux_logo *logo, unsigned int n)
486{
487 if (!n || fb_logo_ex_num == FB_LOGO_EX_NUM_MAX)
488 return;
489
490 fb_logo_ex[fb_logo_ex_num].logo = logo;
491 fb_logo_ex[fb_logo_ex_num].n = n;
492 fb_logo_ex_num++;
493}
494
495static int fb_prepare_extra_logos(struct fb_info *info, unsigned int height,
496 unsigned int yres)
497{
498 unsigned int i;
499
500 /* FIXME: logo_ex supports only truecolor fb. */
501 if (info->fix.visual != FB_VISUAL_TRUECOLOR)
502 fb_logo_ex_num = 0;
503
504 for (i = 0; i < fb_logo_ex_num; i++) {
505 height += fb_logo_ex[i].logo->height;
506 if (height > yres) {
507 height -= fb_logo_ex[i].logo->height;
508 fb_logo_ex_num = i;
509 break;
510 }
511 }
512 return height;
513}
514
515static int fb_show_extra_logos(struct fb_info *info, int y, int rotate)
516{
517 unsigned int i;
518
519 for (i = 0; i < fb_logo_ex_num; i++)
520 y += fb_show_logo_line(info, rotate,
521 fb_logo_ex[i].logo, y, fb_logo_ex[i].n);
522
523 return y;
524}
525
526#else /* !CONFIG_FB_LOGO_EXTRA */
527
528static inline int fb_prepare_extra_logos(struct fb_info *info,
529 unsigned int height,
530 unsigned int yres)
531{
532 return height;
533}
534
535static inline int fb_show_extra_logos(struct fb_info *info, int y, int rotate)
536{
537 return y;
538}
539
540#endif /* CONFIG_FB_LOGO_EXTRA */
541
542
414int fb_prepare_logo(struct fb_info *info, int rotate) 543int fb_prepare_logo(struct fb_info *info, int rotate)
415{ 544{
416 int depth = fb_get_color_depth(&info->var, &info->fix); 545 int depth = fb_get_color_depth(&info->var, &info->fix);
417 int yres; 546 unsigned int yres;
418 547
419 memset(&fb_logo, 0, sizeof(struct logo_data)); 548 memset(&fb_logo, 0, sizeof(struct logo_data));
420 549
@@ -456,7 +585,7 @@ int fb_prepare_logo(struct fb_info *info, int rotate)
456 if (!fb_logo.logo) { 585 if (!fb_logo.logo) {
457 return 0; 586 return 0;
458 } 587 }
459 588
460 if (rotate == FB_ROTATE_UR || rotate == FB_ROTATE_UD) 589 if (rotate == FB_ROTATE_UR || rotate == FB_ROTATE_UD)
461 yres = info->var.yres; 590 yres = info->var.yres;
462 else 591 else
@@ -473,75 +602,20 @@ int fb_prepare_logo(struct fb_info *info, int rotate)
473 else if (fb_logo.logo->type == LINUX_LOGO_VGA16) 602 else if (fb_logo.logo->type == LINUX_LOGO_VGA16)
474 fb_logo.depth = 4; 603 fb_logo.depth = 4;
475 else 604 else
476 fb_logo.depth = 1; 605 fb_logo.depth = 1;
477 return fb_logo.logo->height; 606
607 return fb_prepare_extra_logos(info, fb_logo.logo->height, yres);
478} 608}
479 609
480int fb_show_logo(struct fb_info *info, int rotate) 610int fb_show_logo(struct fb_info *info, int rotate)
481{ 611{
482 u32 *palette = NULL, *saved_pseudo_palette = NULL; 612 int y;
483 unsigned char *logo_new = NULL, *logo_rotate = NULL;
484 struct fb_image image;
485
486 /* Return if the frame buffer is not mapped or suspended */
487 if (fb_logo.logo == NULL || info->state != FBINFO_STATE_RUNNING ||
488 info->flags & FBINFO_MODULE)
489 return 0;
490
491 image.depth = 8;
492 image.data = fb_logo.logo->data;
493
494 if (fb_logo.needs_cmapreset)
495 fb_set_logocmap(info, fb_logo.logo);
496
497 if (fb_logo.needs_truepalette ||
498 fb_logo.needs_directpalette) {
499 palette = kmalloc(256 * 4, GFP_KERNEL);
500 if (palette == NULL)
501 return 0;
502
503 if (fb_logo.needs_truepalette)
504 fb_set_logo_truepalette(info, fb_logo.logo, palette);
505 else
506 fb_set_logo_directpalette(info, fb_logo.logo, palette);
507
508 saved_pseudo_palette = info->pseudo_palette;
509 info->pseudo_palette = palette;
510 }
511
512 if (fb_logo.depth <= 4) {
513 logo_new = kmalloc(fb_logo.logo->width * fb_logo.logo->height,
514 GFP_KERNEL);
515 if (logo_new == NULL) {
516 kfree(palette);
517 if (saved_pseudo_palette)
518 info->pseudo_palette = saved_pseudo_palette;
519 return 0;
520 }
521 image.data = logo_new;
522 fb_set_logo(info, fb_logo.logo, logo_new, fb_logo.depth);
523 }
524 613
525 image.dx = 0; 614 y = fb_show_logo_line(info, rotate, fb_logo.logo, 0,
526 image.dy = 0; 615 num_online_cpus());
527 image.width = fb_logo.logo->width; 616 y = fb_show_extra_logos(info, y, rotate);
528 image.height = fb_logo.logo->height;
529 617
530 if (rotate) { 618 return y;
531 logo_rotate = kmalloc(fb_logo.logo->width *
532 fb_logo.logo->height, GFP_KERNEL);
533 if (logo_rotate)
534 fb_rotate_logo(info, logo_rotate, &image, rotate);
535 }
536
537 fb_do_show_logo(info, &image, rotate, num_online_cpus());
538
539 kfree(palette);
540 if (saved_pseudo_palette != NULL)
541 info->pseudo_palette = saved_pseudo_palette;
542 kfree(logo_new);
543 kfree(logo_rotate);
544 return fb_logo.logo->height;
545} 619}
546#else 620#else
547int fb_prepare_logo(struct fb_info *info, int rotate) { return 0; } 621int fb_prepare_logo(struct fb_info *info, int rotate) { return 0; }
@@ -1155,17 +1229,15 @@ fb_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1155} 1229}
1156#endif 1230#endif
1157 1231
1158static int 1232static int
1159fb_mmap(struct file *file, struct vm_area_struct * vma) 1233fb_mmap(struct file *file, struct vm_area_struct * vma)
1160{ 1234{
1161 int fbidx = iminor(file->f_path.dentry->d_inode); 1235 int fbidx = iminor(file->f_path.dentry->d_inode);
1162 struct fb_info *info = registered_fb[fbidx]; 1236 struct fb_info *info = registered_fb[fbidx];
1163 struct fb_ops *fb = info->fbops; 1237 struct fb_ops *fb = info->fbops;
1164 unsigned long off; 1238 unsigned long off;
1165#if !defined(__sparc__) || defined(__sparc_v9__)
1166 unsigned long start; 1239 unsigned long start;
1167 u32 len; 1240 u32 len;
1168#endif
1169 1241
1170 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) 1242 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
1171 return -EINVAL; 1243 return -EINVAL;
@@ -1180,12 +1252,6 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
1180 return res; 1252 return res;
1181 } 1253 }
1182 1254
1183#if defined(__sparc__) && !defined(__sparc_v9__)
1184 /* Should never get here, all fb drivers should have their own
1185 mmap routines */
1186 return -EINVAL;
1187#else
1188 /* !sparc32... */
1189 lock_kernel(); 1255 lock_kernel();
1190 1256
1191 /* frame buffer memory */ 1257 /* frame buffer memory */
@@ -1209,50 +1275,11 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
1209 vma->vm_pgoff = off >> PAGE_SHIFT; 1275 vma->vm_pgoff = off >> PAGE_SHIFT;
1210 /* This is an IO map - tell maydump to skip this VMA */ 1276 /* This is an IO map - tell maydump to skip this VMA */
1211 vma->vm_flags |= VM_IO | VM_RESERVED; 1277 vma->vm_flags |= VM_IO | VM_RESERVED;
1212#if defined(__mc68000__) 1278 fb_pgprotect(file, vma, off);
1213#if defined(CONFIG_SUN3)
1214 pgprot_val(vma->vm_page_prot) |= SUN3_PAGE_NOCACHE;
1215#elif defined(CONFIG_MMU)
1216 if (CPU_IS_020_OR_030)
1217 pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE030;
1218 if (CPU_IS_040_OR_060) {
1219 pgprot_val(vma->vm_page_prot) &= _CACHEMASK040;
1220 /* Use no-cache mode, serialized */
1221 pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE_S;
1222 }
1223#endif
1224#elif defined(__powerpc__)
1225 vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT,
1226 vma->vm_end - vma->vm_start,
1227 vma->vm_page_prot);
1228#elif defined(__alpha__)
1229 /* Caching is off in the I/O space quadrant by design. */
1230#elif defined(__i386__) || defined(__x86_64__)
1231 if (boot_cpu_data.x86 > 3)
1232 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
1233#elif defined(__mips__) || defined(__sparc_v9__)
1234 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1235#elif defined(__hppa__)
1236 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
1237#elif defined(__arm__) || defined(__sh__) || defined(__m32r__)
1238 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1239#elif defined(__avr32__)
1240 vma->vm_page_prot = __pgprot((pgprot_val(vma->vm_page_prot)
1241 & ~_PAGE_CACHABLE)
1242 | (_PAGE_BUFFER | _PAGE_DIRTY));
1243#elif defined(__ia64__)
1244 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
1245 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1246 else
1247 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1248#else
1249#warning What do we have to do here??
1250#endif
1251 if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, 1279 if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
1252 vma->vm_end - vma->vm_start, vma->vm_page_prot)) 1280 vma->vm_end - vma->vm_start, vma->vm_page_prot))
1253 return -EAGAIN; 1281 return -EAGAIN;
1254 return 0; 1282 return 0;
1255#endif /* !sparc32 */
1256} 1283}
1257 1284
1258static int 1285static int
@@ -1388,17 +1415,34 @@ register_framebuffer(struct fb_info *fb_info)
1388 * 1415 *
1389 * Returns negative errno on error, or zero for success. 1416 * Returns negative errno on error, or zero for success.
1390 * 1417 *
1418 * This function will also notify the framebuffer console
1419 * to release the driver.
1420 *
1421 * This is meant to be called within a driver's module_exit()
1422 * function. If this is called outside module_exit(), ensure
1423 * that the driver implements fb_open() and fb_release() to
1424 * check that no processes are using the device.
1391 */ 1425 */
1392 1426
1393int 1427int
1394unregister_framebuffer(struct fb_info *fb_info) 1428unregister_framebuffer(struct fb_info *fb_info)
1395{ 1429{
1396 struct fb_event event; 1430 struct fb_event event;
1397 int i; 1431 int i, ret = 0;
1398 1432
1399 i = fb_info->node; 1433 i = fb_info->node;
1400 if (!registered_fb[i]) 1434 if (!registered_fb[i]) {
1401 return -EINVAL; 1435 ret = -EINVAL;
1436 goto done;
1437 }
1438
1439 event.info = fb_info;
1440 ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
1441
1442 if (ret) {
1443 ret = -EINVAL;
1444 goto done;
1445 }
1402 1446
1403 if (fb_info->pixmap.addr && 1447 if (fb_info->pixmap.addr &&
1404 (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) 1448 (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
@@ -1410,7 +1454,8 @@ unregister_framebuffer(struct fb_info *fb_info)
1410 device_destroy(fb_class, MKDEV(FB_MAJOR, i)); 1454 device_destroy(fb_class, MKDEV(FB_MAJOR, i));
1411 event.info = fb_info; 1455 event.info = fb_info;
1412 fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); 1456 fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
1413 return 0; 1457done:
1458 return ret;
1414} 1459}
1415 1460
1416/** 1461/**
diff --git a/drivers/video/fm2fb.c b/drivers/video/fm2fb.c
index 70ff55b14596..6c91c61cdb63 100644
--- a/drivers/video/fm2fb.c
+++ b/drivers/video/fm2fb.c
@@ -195,13 +195,15 @@ static int fm2fb_blank(int blank, struct fb_info *info)
195static int fm2fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, 195static int fm2fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
196 u_int transp, struct fb_info *info) 196 u_int transp, struct fb_info *info)
197{ 197{
198 if (regno > info->cmap.len) 198 if (regno < 16) {
199 return 1; 199 red >>= 8;
200 red >>= 8; 200 green >>= 8;
201 green >>= 8; 201 blue >>= 8;
202 blue >>= 8; 202
203 ((u32*)(info->pseudo_palette))[regno] = (red << 16) |
204 (green << 8) | blue;
205 }
203 206
204 ((u32*)(info->pseudo_palette))[regno] = (red << 16) | (green << 8) | blue;
205 return 0; 207 return 0;
206} 208}
207 209
@@ -237,7 +239,7 @@ static int __devinit fm2fb_probe(struct zorro_dev *z,
237 if (!zorro_request_device(z,"fm2fb")) 239 if (!zorro_request_device(z,"fm2fb"))
238 return -ENXIO; 240 return -ENXIO;
239 241
240 info = framebuffer_alloc(256 * sizeof(u32), &z->dev); 242 info = framebuffer_alloc(16 * sizeof(u32), &z->dev);
241 if (!info) { 243 if (!info) {
242 zorro_release_device(z); 244 zorro_release_device(z);
243 return -ENOMEM; 245 return -ENOMEM;
diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c
index bf0e60b5a3b6..b9b572b293d4 100644
--- a/drivers/video/gbefb.c
+++ b/drivers/video/gbefb.c
@@ -86,7 +86,7 @@ static int gbe_revision;
86 86
87static int ypan, ywrap; 87static int ypan, ywrap;
88 88
89static uint32_t pseudo_palette[256]; 89static uint32_t pseudo_palette[16];
90 90
91static char *mode_option __initdata = NULL; 91static char *mode_option __initdata = NULL;
92 92
@@ -854,8 +854,7 @@ static int gbefb_setcolreg(unsigned regno, unsigned red, unsigned green,
854 green >>= 8; 854 green >>= 8;
855 blue >>= 8; 855 blue >>= 8;
856 856
857 switch (info->var.bits_per_pixel) { 857 if (info->var.bits_per_pixel <= 8) {
858 case 8:
859 /* wait for the color map FIFO to have a free entry */ 858 /* wait for the color map FIFO to have a free entry */
860 for (i = 0; i < 1000 && gbe->cm_fifo >= 63; i++) 859 for (i = 0; i < 1000 && gbe->cm_fifo >= 63; i++)
861 udelay(10); 860 udelay(10);
@@ -864,23 +863,25 @@ static int gbefb_setcolreg(unsigned regno, unsigned red, unsigned green,
864 return 1; 863 return 1;
865 } 864 }
866 gbe->cmap[regno] = (red << 24) | (green << 16) | (blue << 8); 865 gbe->cmap[regno] = (red << 24) | (green << 16) | (blue << 8);
867 break; 866 } else if (regno < 16) {
868 case 15: 867 switch (info->var.bits_per_pixel) {
869 case 16: 868 case 15:
870 red >>= 3; 869 case 16:
871 green >>= 3; 870 red >>= 3;
872 blue >>= 3; 871 green >>= 3;
873 pseudo_palette[regno] = 872 blue >>= 3;
874 (red << info->var.red.offset) | 873 pseudo_palette[regno] =
875 (green << info->var.green.offset) | 874 (red << info->var.red.offset) |
876 (blue << info->var.blue.offset); 875 (green << info->var.green.offset) |
877 break; 876 (blue << info->var.blue.offset);
878 case 32: 877 break;
879 pseudo_palette[regno] = 878 case 32:
880 (red << info->var.red.offset) | 879 pseudo_palette[regno] =
881 (green << info->var.green.offset) | 880 (red << info->var.red.offset) |
882 (blue << info->var.blue.offset); 881 (green << info->var.green.offset) |
883 break; 882 (blue << info->var.blue.offset);
883 break;
884 }
884 } 885 }
885 886
886 return 0; 887 return 0;
diff --git a/drivers/video/i810/i810.h b/drivers/video/i810/i810.h
index 889e4ea5edc1..328ae6c673ec 100644
--- a/drivers/video/i810/i810.h
+++ b/drivers/video/i810/i810.h
@@ -266,7 +266,7 @@ struct i810fb_par {
266 struct i810fb_i2c_chan chan[3]; 266 struct i810fb_i2c_chan chan[3];
267 struct mutex open_lock; 267 struct mutex open_lock;
268 unsigned int use_count; 268 unsigned int use_count;
269 u32 pseudo_palette[17]; 269 u32 pseudo_palette[16];
270 unsigned long mmio_start_phys; 270 unsigned long mmio_start_phys;
271 u8 __iomem *mmio_start_virtual; 271 u8 __iomem *mmio_start_virtual;
272 u8 *edid; 272 u8 *edid;
diff --git a/drivers/video/intelfb/intelfb.h b/drivers/video/intelfb/intelfb.h
index 80b94c19a9fa..6148300fadd6 100644
--- a/drivers/video/intelfb/intelfb.h
+++ b/drivers/video/intelfb/intelfb.h
@@ -302,7 +302,7 @@ struct intelfb_info {
302 u32 ring_lockup; 302 u32 ring_lockup;
303 303
304 /* palette */ 304 /* palette */
305 u32 pseudo_palette[17]; 305 u32 pseudo_palette[16];
306 306
307 /* chip info */ 307 /* chip info */
308 int pci_chipset; 308 int pci_chipset;
diff --git a/drivers/video/logo/Kconfig b/drivers/video/logo/Kconfig
index 9397bcef3018..da219c043c99 100644
--- a/drivers/video/logo/Kconfig
+++ b/drivers/video/logo/Kconfig
@@ -10,6 +10,11 @@ menuconfig LOGO
10 10
11if LOGO 11if LOGO
12 12
13config FB_LOGO_EXTRA
14 bool
15 depends on FB
16 default y if SPU_BASE
17
13config LOGO_LINUX_MONO 18config LOGO_LINUX_MONO
14 bool "Standard black and white Linux logo" 19 bool "Standard black and white Linux logo"
15 default y 20 default y
diff --git a/drivers/video/logo/Makefile b/drivers/video/logo/Makefile
index b985dfad6c63..a5fc4edf84e6 100644
--- a/drivers/video/logo/Makefile
+++ b/drivers/video/logo/Makefile
@@ -14,6 +14,8 @@ obj-$(CONFIG_LOGO_SUPERH_VGA16) += logo_superh_vga16.o
14obj-$(CONFIG_LOGO_SUPERH_CLUT224) += logo_superh_clut224.o 14obj-$(CONFIG_LOGO_SUPERH_CLUT224) += logo_superh_clut224.o
15obj-$(CONFIG_LOGO_M32R_CLUT224) += logo_m32r_clut224.o 15obj-$(CONFIG_LOGO_M32R_CLUT224) += logo_m32r_clut224.o
16 16
17obj-$(CONFIG_SPU_BASE) += logo_spe_clut224.o
18
17# How to generate logo's 19# How to generate logo's
18 20
19# Use logo-cfiles to retrieve list of .c files to be built 21# Use logo-cfiles to retrieve list of .c files to be built
diff --git a/drivers/video/logo/logo_spe_clut224.ppm b/drivers/video/logo/logo_spe_clut224.ppm
new file mode 100644
index 000000000000..d36ad624a79c
--- /dev/null
+++ b/drivers/video/logo/logo_spe_clut224.ppm
@@ -0,0 +1,283 @@
1P3
240 40
3255
40 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
50 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
60 0 0 0 0 0 0 0 0 0 0 0 2 2 2 6 6 6
715 15 15 21 21 21 19 19 19 14 14 14 6 6 6 2 2 2
80 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
90 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
100 0 0 0 0 0 0 0 0 0 0 0
110 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
120 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
130 0 0 0 0 0 0 0 0 2 2 2 21 21 21 55 55 55
1456 56 56 54 54 54 53 53 53 60 60 60 56 56 56 25 25 25
156 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
160 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
170 0 0 0 0 0 0 0 0 0 0 0
180 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
190 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
200 0 0 0 0 0 2 2 2 27 27 27 62 62 62 17 17 19
212 2 6 2 2 6 2 2 6 2 2 6 16 16 18 57 57 57
2245 45 45 8 8 8 0 0 0 0 0 0 0 0 0 0 0 0
230 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
240 0 0 0 0 0 0 0 0 0 0 0
250 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
260 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
270 0 0 0 0 0 16 16 16 62 62 62 8 8 10 2 2 6
282 2 6 2 2 6 2 2 6 12 12 14 67 67 67 16 16 17
2945 45 45 41 41 41 4 4 4 0 0 0 0 0 0 0 0 0
300 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
310 0 0 0 0 0 0 0 0 0 0 0
320 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
330 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
340 0 0 2 2 2 35 35 35 40 40 40 2 2 6 2 2 6
352 2 6 2 2 6 2 2 6 15 15 17 70 70 70 27 27 27
363 3 6 62 62 62 20 20 20 0 0 0 0 0 0 0 0 0
370 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
380 0 0 0 0 0 0 0 0 0 0 0
390 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
400 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
410 0 0 4 4 4 58 58 58 12 12 14 2 2 6 2 2 6
422 2 6 2 2 6 2 2 6 4 4 7 4 4 7 2 2 6
432 2 6 34 34 36 40 40 40 3 3 3 0 0 0 0 0 0
440 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
450 0 0 0 0 0 0 0 0 0 0 0
460 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
470 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
480 0 0 7 7 7 64 64 64 2 2 6 5 5 5 17 17 17
493 3 6 2 2 6 2 2 6 15 15 15 21 21 21 7 7 10
502 2 6 8 8 10 62 62 62 6 6 6 0 0 0 0 0 0
510 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
520 0 0 0 0 0 0 0 0 0 0 0
530 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
540 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
550 0 0 7 7 7 66 66 66 5 5 8 122 122 122 122 122 122
569 9 11 3 3 6 104 96 81 179 179 179 122 122 122 13 13 13
572 2 6 2 2 6 67 67 67 10 10 10 0 0 0 0 0 0
580 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
590 0 0 0 0 0 0 0 0 0 0 0
600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
610 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
620 0 0 7 7 7 65 65 65 41 41 43 152 149 142 192 191 189
6348 48 49 23 23 24 228 210 210 86 86 86 192 191 189 59 59 61
642 2 6 2 2 6 64 64 64 14 14 14 0 0 0 0 0 0
650 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
660 0 0 0 0 0 0 0 0 0 0 0
670 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
680 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
690 0 0 7 7 7 66 66 66 59 59 59 59 59 61 86 86 86
7099 84 50 78 66 28 152 149 142 5 5 8 122 122 122 104 96 81
712 2 6 2 2 6 67 67 67 14 14 14 0 0 0 0 0 0
720 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
730 0 0 0 0 0 0 0 0 0 0 0
740 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
750 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
760 0 0 5 5 5 63 63 63 24 24 24 152 149 142 175 122 13
77238 184 12 220 170 13 226 181 52 112 86 32 194 165 151 46 46 47
782 2 6 2 2 6 65 65 65 17 17 17 0 0 0 0 0 0
790 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
800 0 0 0 0 0 0 0 0 0 0 0
810 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
820 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
830 0 0 5 5 5 59 59 59 21 21 21 175 122 13 231 174 11
84240 192 13 237 183 61 240 192 13 240 192 13 234 179 16 81 64 9
852 2 6 2 2 6 63 63 63 25 25 25 0 0 0 0 0 0
860 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
870 0 0 0 0 0 0 0 0 0 0 0
880 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
890 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
900 0 0 5 5 5 54 54 54 51 48 39 189 138 9 238 184 12
91240 192 13 240 192 13 240 192 13 215 161 11 207 152 19 81 64 9
9216 16 18 5 5 8 40 40 40 44 44 44 4 4 4 0 0 0
930 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
940 0 0 0 0 0 0 0 0 0 0 0
950 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
960 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
970 0 0 5 5 5 59 59 59 27 27 27 126 107 64 187 136 12
98220 170 13 201 147 20 189 138 9 198 154 46 199 182 125 70 70 70
9927 27 27 104 96 81 12 12 14 70 70 70 16 16 16 0 0 0
1000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1010 0 0 0 0 0 0 0 0 0 0 0
1020 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1030 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1040 0 0 17 17 17 70 70 70 12 12 12 168 168 168 174 135 135
105175 122 13 175 122 13 178 151 83 192 191 189 233 233 233 179 179 179
1063 3 6 29 29 31 3 3 6 41 41 41 44 44 44 5 5 5
1070 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1080 0 0 0 0 0 0 0 0 0 0 0
1090 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1100 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1118 8 8 53 53 53 44 44 44 59 59 59 238 238 238 192 191 189
112192 191 189 192 191 189 221 205 205 240 240 240 253 253 253 253 253 253
11370 70 70 2 2 6 2 2 6 5 5 8 67 67 67 22 22 22
1142 2 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1150 0 0 0 0 0 0 0 0 0 0 0
1160 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1170 0 0 0 0 0 0 0 0 0 0 0 0 0 0 5 5 5
11838 38 38 56 56 56 7 7 9 221 205 205 253 253 253 233 233 233
119221 205 205 233 233 233 251 251 251 253 253 253 253 253 253 253 253 253
120192 191 189 2 2 6 2 2 6 2 2 6 25 25 25 64 64 64
12115 15 15 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1220 0 0 0 0 0 0 0 0 0 0 0
1230 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1240 0 0 0 0 0 0 0 0 0 0 0 2 2 2 27 27 27
12566 66 66 7 7 9 86 86 86 252 252 252 253 253 253 253 253 253
126252 252 252 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
127244 244 244 19 19 21 2 2 6 2 2 6 2 2 6 38 38 38
12854 54 54 10 10 10 0 0 0 0 0 0 0 0 0 0 0 0
1290 0 0 0 0 0 0 0 0 0 0 0
1300 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1310 0 0 0 0 0 0 0 0 0 0 0 14 14 14 62 62 62
13210 10 12 3 3 6 122 122 122 235 235 235 251 251 251 248 248 248
133235 235 235 248 248 248 252 252 252 246 246 246 233 233 233 237 228 228
134223 207 207 70 70 70 2 2 6 2 2 6 2 2 6 2 2 6
13546 46 47 38 38 38 4 4 4 0 0 0 0 0 0 0 0 0
1360 0 0 0 0 0 0 0 0 0 0 0
1370 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1380 0 0 0 0 0 0 0 0 2 2 2 33 33 33 44 44 44
1394 4 7 9 9 11 168 168 168 240 240 240 252 252 252 252 252 252
140246 246 246 253 253 253 253 253 253 251 251 251 245 241 241 233 233 233
141221 205 205 192 191 189 29 29 31 27 27 27 9 9 12 2 2 6
1423 3 6 65 65 65 15 15 15 0 0 0 0 0 0 0 0 0
1430 0 0 0 0 0 0 0 0 0 0 0
1440 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1450 0 0 0 0 0 0 0 0 6 6 6 59 59 59 19 19 21
14624 24 24 86 86 86 249 249 249 253 253 253 253 253 253 253 253 253
147253 253 253 228 210 210 241 230 230 253 253 253 253 253 253 253 253 253
148251 251 251 228 210 210 152 149 142 5 5 8 27 27 27 4 4 7
1492 2 6 46 46 47 34 34 34 2 2 2 0 0 0 0 0 0
1500 0 0 0 0 0 0 0 0 0 0 0
1510 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1520 0 0 0 0 0 0 0 0 16 16 16 67 67 67 19 19 21
15312 12 14 223 207 207 254 20 20 254 20 20 253 127 127 242 223 223
154254 20 20 253 127 127 254 48 48 242 223 223 254 86 86 254 20 20
155254 20 20 253 137 137 233 233 233 32 32 32 35 35 35 23 23 24
1562 2 6 15 15 15 60 60 60 6 6 6 0 0 0 0 0 0
1570 0 0 0 0 0 0 0 0 0 0 0
1580 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1590 0 0 0 0 0 4 4 4 38 38 38 48 48 49 22 22 22
16086 86 86 253 253 253 254 20 20 241 230 230 227 216 186 253 137 137
161253 137 137 253 253 253 253 137 137 253 137 137 254 48 48 253 253 253
162253 253 253 253 253 253 253 253 253 62 62 62 2 2 6 23 23 24
1632 2 6 2 2 6 62 62 62 17 17 17 0 0 0 0 0 0
1640 0 0 0 0 0 0 0 0 0 0 0
1650 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1660 0 0 0 0 0 14 14 14 70 70 70 14 14 14 16 16 18
167179 179 179 253 253 253 227 216 186 254 48 48 240 219 160 253 127 127
168254 20 20 253 137 137 254 86 86 231 203 141 254 20 20 254 20 20
169253 137 137 253 253 253 253 253 253 104 96 81 2 2 6 23 23 24
1702 2 6 2 2 6 46 46 47 27 27 27 0 0 0 0 0 0
1710 0 0 0 0 0 0 0 0 0 0 0
1720 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1730 0 0 4 4 4 39 39 39 42 42 43 19 19 21 13 13 13
174228 210 210 242 223 223 253 253 253 242 223 223 253 127 127 253 127 127
175253 127 127 253 127 127 253 137 137 253 253 253 254 48 48 253 253 253
176228 210 210 253 253 253 253 253 253 122 122 122 2 2 6 19 19 19
1772 2 6 2 2 6 39 39 39 38 38 38 3 3 3 0 0 0
1780 0 0 0 0 0 0 0 0 0 0 0
1790 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1800 0 0 8 8 8 60 60 60 3 3 6 33 33 33 38 38 38
181253 137 137 254 86 86 253 137 137 254 86 86 253 137 137 209 197 168
182253 127 127 253 253 253 253 253 253 253 253 253 253 127 127 254 86 86
183254 86 86 253 137 137 253 253 253 122 122 122 2 2 6 17 17 17
1842 2 6 2 2 6 34 34 36 42 42 43 3 3 3 0 0 0
1850 0 0 0 0 0 0 0 0 0 0 0
1860 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1870 0 0 13 13 13 59 59 59 2 2 6 9 9 12 56 56 56
188252 252 252 240 219 160 253 137 137 240 219 160 253 253 253 237 228 228
189254 86 86 253 253 253 253 253 253 253 253 253 253 253 253 242 223 223
190227 216 186 249 249 249 253 253 253 122 122 122 16 16 17 17 17 17
19112 12 14 3 3 6 39 39 39 38 38 38 3 3 3 0 0 0
1920 0 0 0 0 0 0 0 0 0 0 0
1930 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 2 2
1945 5 5 22 22 22 104 96 81 187 136 12 207 152 19 51 48 39
195221 205 205 253 253 253 253 253 253 253 253 253 253 253 253 240 240 240
196250 247 243 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
197253 253 253 250 247 243 240 219 160 99 84 50 5 5 8 2 2 6
1987 7 9 46 46 47 58 58 58 35 35 35 3 3 3 0 0 0
1990 0 0 0 0 0 0 0 0 0 0 0
2000 0 0 0 0 0 0 0 0 0 0 0 8 8 8 33 33 33
20158 58 58 86 86 86 170 136 53 239 182 13 246 190 14 220 170 13
20244 38 29 179 179 179 253 253 253 253 253 253 253 253 253 240 240 240
203253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
204253 253 253 240 219 160 240 192 13 112 86 32 2 2 6 2 2 6
2053 3 6 41 33 20 220 170 13 53 53 53 4 4 4 0 0 0
2060 0 0 0 0 0 0 0 0 0 0 0
2070 0 0 0 0 0 0 0 0 2 2 2 32 32 32 150 116 44
208215 161 11 215 161 11 228 170 11 245 188 14 246 190 14 246 190 14
209187 136 12 9 9 11 122 122 122 251 251 251 253 253 253 253 253 253
210253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
211248 248 248 211 196 135 239 182 13 175 122 13 6 5 6 2 2 6
21216 14 12 187 136 12 238 184 12 84 78 65 10 10 10 0 0 0
2130 0 0 0 0 0 0 0 0 0 0 0
2140 0 0 0 0 0 0 0 0 4 4 4 53 53 53 207 152 19
215242 185 13 245 188 14 246 190 14 246 190 14 246 190 14 246 190 14
216240 192 13 81 64 9 2 2 6 86 86 86 244 244 244 253 253 253
217253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
218233 233 233 199 182 125 231 174 11 207 152 19 175 122 13 175 122 13
219201 147 20 239 182 13 244 187 14 150 116 44 35 35 35 6 6 6
2200 0 0 0 0 0 0 0 0 0 0 0
2210 0 0 0 0 0 0 0 0 5 5 5 53 53 53 201 147 20
222242 185 13 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
223246 190 14 220 170 13 13 11 10 2 2 6 152 149 142 253 253 253
224253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
225235 235 235 199 182 125 228 170 11 234 177 12 226 168 11 226 168 11
226234 177 12 246 190 14 246 190 14 234 179 16 126 107 64 36 36 36
2276 6 6 0 0 0 0 0 0 0 0 0
2280 0 0 0 0 0 0 0 0 3 3 3 48 48 49 189 142 35
229242 185 13 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
230246 190 14 246 190 14 140 112 39 36 36 36 192 191 189 253 253 253
231253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
232192 191 189 112 86 32 226 168 11 244 187 14 244 187 14 244 187 14
233245 188 14 246 190 14 246 190 14 246 190 14 242 185 13 150 116 44
23427 27 27 2 2 2 0 0 0 0 0 0
2350 0 0 0 0 0 0 0 0 6 6 6 58 58 58 189 142 35
236239 182 13 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
237246 190 14 246 190 14 239 188 14 209 197 168 253 253 253 253 253 253
238253 253 253 253 253 253 253 253 253 253 253 253 252 252 252 168 168 168
23916 16 18 97 67 8 228 170 11 245 188 14 246 190 14 246 190 14
240246 190 14 246 190 14 246 190 14 246 190 14 244 187 14 198 154 46
24135 35 35 3 3 3 0 0 0 0 0 0
2420 0 0 0 0 0 0 0 0 13 13 13 84 78 65 215 161 11
243244 187 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
244246 190 14 246 190 14 238 184 12 187 136 12 168 168 168 244 244 244
245253 253 253 252 252 252 240 240 240 179 179 179 67 67 67 2 2 6
2462 2 6 97 67 8 228 170 11 246 190 14 246 190 14 246 190 14
247246 190 14 246 190 14 245 188 14 234 177 12 189 142 35 86 77 61
24816 16 16 0 0 0 0 0 0 0 0 0
2490 0 0 0 0 0 0 0 0 13 13 13 103 92 56 207 152 19
250228 170 11 234 177 12 239 182 13 242 186 14 245 188 14 246 190 14
251246 190 14 246 190 14 239 182 13 189 138 9 41 33 20 10 10 12
25230 30 31 23 23 24 5 5 8 2 2 6 2 2 6 2 2 6
2534 4 6 112 86 32 215 161 11 245 188 14 246 190 14 245 188 14
254239 182 13 228 170 11 189 142 35 104 96 81 48 48 49 17 17 17
2552 2 2 0 0 0 0 0 0 0 0 0
2560 0 0 0 0 0 0 0 0 5 5 5 39 39 39 103 92 56
257141 109 44 175 122 13 187 136 12 189 138 9 207 152 19 228 170 11
258239 182 13 239 182 13 215 161 11 175 122 13 41 33 20 2 2 6
25915 15 17 20 20 22 20 20 22 20 20 22 20 20 22 8 8 10
2604 4 6 97 67 8 189 138 9 231 174 11 239 182 13 226 168 11
261189 138 9 126 107 64 59 59 59 21 21 21 5 5 5 0 0 0
2620 0 0 0 0 0 0 0 0 0 0 0
2630 0 0 0 0 0 0 0 0 0 0 0 5 5 5 17 17 17
26434 34 34 57 57 57 84 78 65 103 92 56 125 101 41 140 112 39
265175 122 13 175 122 13 175 122 13 97 67 8 72 67 58 84 78 65
26660 60 60 56 56 56 56 56 56 56 56 56 57 57 57 65 65 65
26786 86 86 95 73 34 175 122 13 187 136 12 187 136 12 175 122 13
268103 92 56 41 41 41 10 10 10 0 0 0 0 0 0 0 0 0
2690 0 0 0 0 0 0 0 0 0 0 0
2700 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
2712 2 2 4 4 4 12 12 12 24 24 24 40 40 40 70 70 70
27286 77 61 95 73 34 88 72 41 72 67 58 36 36 36 10 10 10
2735 5 5 5 5 5 5 5 5 4 4 4 5 5 5 6 6 6
27422 22 22 61 61 59 88 72 41 112 86 32 112 86 32 84 78 65
27532 32 32 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0
2760 0 0 0 0 0 0 0 0 0 0 0
2770 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
2780 0 0 0 0 0 0 0 0 0 0 0 3 3 3 10 10 10
27921 21 21 33 33 33 31 31 31 16 16 16 2 2 2 0 0 0
2800 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
2812 2 2 12 12 12 30 30 31 40 40 40 32 32 32 16 16 16
2822 2 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
2830 0 0 0 0 0 0 0 0 0 0 0
diff --git a/drivers/video/macfb.c b/drivers/video/macfb.c
index f7d647dda978..aa8c714d6245 100644
--- a/drivers/video/macfb.c
+++ b/drivers/video/macfb.c
@@ -170,7 +170,7 @@ static struct fb_fix_screeninfo macfb_fix = {
170}; 170};
171 171
172static struct fb_info fb_info; 172static struct fb_info fb_info;
173static u32 pseudo_palette[17]; 173static u32 pseudo_palette[16];
174static int inverse = 0; 174static int inverse = 0;
175static int vidtest = 0; 175static int vidtest = 0;
176 176
@@ -529,56 +529,63 @@ static int macfb_setcolreg(unsigned regno, unsigned red, unsigned green,
529 if (regno >= fb_info->cmap.len) 529 if (regno >= fb_info->cmap.len)
530 return 1; 530 return 1;
531 531
532 switch (fb_info->var.bits_per_pixel) { 532 if (fb_info->var.bits_per_pixel <= 8) {
533 case 1: 533 switch (fb_info->var.bits_per_pixel) {
534 /* We shouldn't get here */ 534 case 1:
535 break; 535 /* We shouldn't get here */
536 case 2: 536 break;
537 case 4: 537 case 2:
538 case 8: 538 case 4:
539 if (macfb_setpalette) 539 case 8:
540 macfb_setpalette(regno, red, green, blue, fb_info); 540 if (macfb_setpalette)
541 else 541 macfb_setpalette(regno, red, green, blue,
542 return 1; 542 fb_info);
543 break; 543 else
544 case 16: 544 return 1;
545 if (fb_info->var.red.offset == 10) { 545 break;
546 /* 1:5:5:5 */ 546 }
547 ((u32*) (fb_info->pseudo_palette))[regno] = 547 } else if (regno < 16) {
548 switch (fb_info->var.bits_per_pixel) {
549 case 16:
550 if (fb_info->var.red.offset == 10) {
551 /* 1:5:5:5 */
552 ((u32*) (fb_info->pseudo_palette))[regno] =
548 ((red & 0xf800) >> 1) | 553 ((red & 0xf800) >> 1) |
549 ((green & 0xf800) >> 6) | 554 ((green & 0xf800) >> 6) |
550 ((blue & 0xf800) >> 11) | 555 ((blue & 0xf800) >> 11) |
551 ((transp != 0) << 15); 556 ((transp != 0) << 15);
552 } else { 557 } else {
553 /* 0:5:6:5 */ 558 /* 0:5:6:5 */
554 ((u32*) (fb_info->pseudo_palette))[regno] = 559 ((u32*) (fb_info->pseudo_palette))[regno] =
555 ((red & 0xf800) ) | 560 ((red & 0xf800) ) |
556 ((green & 0xfc00) >> 5) | 561 ((green & 0xfc00) >> 5) |
557 ((blue & 0xf800) >> 11); 562 ((blue & 0xf800) >> 11);
563 }
564 break;
565 /* I'm pretty sure that one or the other of these
566 doesn't exist on 68k Macs */
567 case 24:
568 red >>= 8;
569 green >>= 8;
570 blue >>= 8;
571 ((u32 *)(fb_info->pseudo_palette))[regno] =
572 (red << fb_info->var.red.offset) |
573 (green << fb_info->var.green.offset) |
574 (blue << fb_info->var.blue.offset);
575 break;
576 case 32:
577 red >>= 8;
578 green >>= 8;
579 blue >>= 8;
580 ((u32 *)(fb_info->pseudo_palette))[regno] =
581 (red << fb_info->var.red.offset) |
582 (green << fb_info->var.green.offset) |
583 (blue << fb_info->var.blue.offset);
584 break;
558 } 585 }
559 break; 586 }
560 /* I'm pretty sure that one or the other of these 587
561 doesn't exist on 68k Macs */ 588 return 0;
562 case 24:
563 red >>= 8;
564 green >>= 8;
565 blue >>= 8;
566 ((u32 *)(fb_info->pseudo_palette))[regno] =
567 (red << fb_info->var.red.offset) |
568 (green << fb_info->var.green.offset) |
569 (blue << fb_info->var.blue.offset);
570 break;
571 case 32:
572 red >>= 8;
573 green >>= 8;
574 blue >>= 8;
575 ((u32 *)(fb_info->pseudo_palette))[regno] =
576 (red << fb_info->var.red.offset) |
577 (green << fb_info->var.green.offset) |
578 (blue << fb_info->var.blue.offset);
579 break;
580 }
581 return 0;
582} 589}
583 590
584static struct fb_ops macfb_ops = { 591static struct fb_ops macfb_ops = {
diff --git a/drivers/video/macmodes.c b/drivers/video/macmodes.c
index ab2149531a04..083f60321ed8 100644
--- a/drivers/video/macmodes.c
+++ b/drivers/video/macmodes.c
@@ -369,9 +369,8 @@ EXPORT_SYMBOL(mac_map_monitor_sense);
369 * 369 *
370 */ 370 */
371 371
372int __devinit mac_find_mode(struct fb_var_screeninfo *var, 372int mac_find_mode(struct fb_var_screeninfo *var, struct fb_info *info,
373 struct fb_info *info, const char *mode_option, 373 const char *mode_option, unsigned int default_bpp)
374 unsigned int default_bpp)
375{ 374{
376 const struct fb_videomode *db = NULL; 375 const struct fb_videomode *db = NULL;
377 unsigned int dbsize = 0; 376 unsigned int dbsize = 0;
diff --git a/drivers/video/macmodes.h b/drivers/video/macmodes.h
index babeb81f467d..b86ba08aac9e 100644
--- a/drivers/video/macmodes.h
+++ b/drivers/video/macmodes.h
@@ -55,10 +55,10 @@ extern int mac_vmode_to_var(int vmode, int cmode,
55extern int mac_var_to_vmode(const struct fb_var_screeninfo *var, int *vmode, 55extern int mac_var_to_vmode(const struct fb_var_screeninfo *var, int *vmode,
56 int *cmode); 56 int *cmode);
57extern int mac_map_monitor_sense(int sense); 57extern int mac_map_monitor_sense(int sense);
58extern int __devinit mac_find_mode(struct fb_var_screeninfo *var, 58extern int mac_find_mode(struct fb_var_screeninfo *var,
59 struct fb_info *info, 59 struct fb_info *info,
60 const char *mode_option, 60 const char *mode_option,
61 unsigned int default_bpp); 61 unsigned int default_bpp);
62 62
63 63
64 /* 64 /*
diff --git a/drivers/video/matrox/matroxfb_accel.c b/drivers/video/matrox/matroxfb_accel.c
index c57aaadf410c..3660d2673bdc 100644
--- a/drivers/video/matrox/matroxfb_accel.c
+++ b/drivers/video/matrox/matroxfb_accel.c
@@ -91,7 +91,6 @@ static inline void matrox_cfb4_pal(u_int32_t* pal) {
91 for (i = 0; i < 16; i++) { 91 for (i = 0; i < 16; i++) {
92 pal[i] = i * 0x11111111U; 92 pal[i] = i * 0x11111111U;
93 } 93 }
94 pal[i] = 0xFFFFFFFF;
95} 94}
96 95
97static inline void matrox_cfb8_pal(u_int32_t* pal) { 96static inline void matrox_cfb8_pal(u_int32_t* pal) {
@@ -100,7 +99,6 @@ static inline void matrox_cfb8_pal(u_int32_t* pal) {
100 for (i = 0; i < 16; i++) { 99 for (i = 0; i < 16; i++) {
101 pal[i] = i * 0x01010101U; 100 pal[i] = i * 0x01010101U;
102 } 101 }
103 pal[i] = 0x0F0F0F0F;
104} 102}
105 103
106static void matroxfb_copyarea(struct fb_info* info, const struct fb_copyarea* area); 104static void matroxfb_copyarea(struct fb_info* info, const struct fb_copyarea* area);
@@ -145,13 +143,10 @@ void matrox_cfbX_init(WPMINFO2) {
145 ACCESS_FBINFO(fbops).fb_imageblit = matroxfb_imageblit; 143 ACCESS_FBINFO(fbops).fb_imageblit = matroxfb_imageblit;
146 } 144 }
147 break; 145 break;
148 case 16: if (ACCESS_FBINFO(fbcon).var.green.length == 5) { 146 case 16: if (ACCESS_FBINFO(fbcon).var.green.length == 5)
149 maccess = 0xC0000001; 147 maccess = 0xC0000001;
150 ACCESS_FBINFO(cmap[16]) = 0x7FFF7FFF; 148 else
151 } else {
152 maccess = 0x40000001; 149 maccess = 0x40000001;
153 ACCESS_FBINFO(cmap[16]) = 0xFFFFFFFF;
154 }
155 mopmode = M_OPMODE_16BPP; 150 mopmode = M_OPMODE_16BPP;
156 if (accel) { 151 if (accel) {
157 ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_copyarea; 152 ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_copyarea;
@@ -161,7 +156,6 @@ void matrox_cfbX_init(WPMINFO2) {
161 break; 156 break;
162 case 24: maccess = 0x00000003; 157 case 24: maccess = 0x00000003;
163 mopmode = M_OPMODE_24BPP; 158 mopmode = M_OPMODE_24BPP;
164 ACCESS_FBINFO(cmap[16]) = 0xFFFFFFFF;
165 if (accel) { 159 if (accel) {
166 ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_copyarea; 160 ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_copyarea;
167 ACCESS_FBINFO(fbops).fb_fillrect = matroxfb_fillrect; 161 ACCESS_FBINFO(fbops).fb_fillrect = matroxfb_fillrect;
@@ -170,7 +164,6 @@ void matrox_cfbX_init(WPMINFO2) {
170 break; 164 break;
171 case 32: maccess = 0x00000002; 165 case 32: maccess = 0x00000002;
172 mopmode = M_OPMODE_32BPP; 166 mopmode = M_OPMODE_32BPP;
173 ACCESS_FBINFO(cmap[16]) = 0xFFFFFFFF;
174 if (accel) { 167 if (accel) {
175 ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_copyarea; 168 ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_copyarea;
176 ACCESS_FBINFO(fbops).fb_fillrect = matroxfb_fillrect; 169 ACCESS_FBINFO(fbops).fb_fillrect = matroxfb_fillrect;
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index 886e475f22f2..86ca7b179000 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -679,6 +679,8 @@ static int matroxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
679 mga_outb(M_DAC_VAL, blue); 679 mga_outb(M_DAC_VAL, blue);
680 break; 680 break;
681 case 16: 681 case 16:
682 if (regno >= 16)
683 break;
682 { 684 {
683 u_int16_t col = 685 u_int16_t col =
684 (red << ACCESS_FBINFO(fbcon).var.red.offset) | 686 (red << ACCESS_FBINFO(fbcon).var.red.offset) |
@@ -690,6 +692,8 @@ static int matroxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
690 break; 692 break;
691 case 24: 693 case 24:
692 case 32: 694 case 32:
695 if (regno >= 16)
696 break;
693 ACCESS_FBINFO(cmap[regno]) = 697 ACCESS_FBINFO(cmap[regno]) =
694 (red << ACCESS_FBINFO(fbcon).var.red.offset) | 698 (red << ACCESS_FBINFO(fbcon).var.red.offset) |
695 (green << ACCESS_FBINFO(fbcon).var.green.offset) | 699 (green << ACCESS_FBINFO(fbcon).var.green.offset) |
diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
index 9c25c2f7966b..d59577c8de86 100644
--- a/drivers/video/matrox/matroxfb_base.h
+++ b/drivers/video/matrox/matroxfb_base.h
@@ -518,7 +518,7 @@ struct matrox_fb_info {
518 dll:1; 518 dll:1;
519 } memory; 519 } memory;
520 } values; 520 } values;
521 u_int32_t cmap[17]; 521 u_int32_t cmap[16];
522}; 522};
523 523
524#define info2minfo(info) container_of(info, struct matrox_fb_info, fbcon) 524#define info2minfo(info) container_of(info, struct matrox_fb_info, fbcon)
diff --git a/drivers/video/matrox/matroxfb_crtc2.c b/drivers/video/matrox/matroxfb_crtc2.c
index 03ae55b168ff..4b3344e03695 100644
--- a/drivers/video/matrox/matroxfb_crtc2.c
+++ b/drivers/video/matrox/matroxfb_crtc2.c
@@ -163,11 +163,6 @@ static void matroxfb_dh_disable(struct matroxfb_dh_fb_info* m2info) {
163 ACCESS_FBINFO(hw).crtc2.ctl = 0x00000004; 163 ACCESS_FBINFO(hw).crtc2.ctl = 0x00000004;
164} 164}
165 165
166static void matroxfb_dh_cfbX_init(struct matroxfb_dh_fb_info* m2info) {
167 /* no acceleration for secondary head... */
168 m2info->cmap[16] = 0xFFFFFFFF;
169}
170
171static void matroxfb_dh_pan_var(struct matroxfb_dh_fb_info* m2info, 166static void matroxfb_dh_pan_var(struct matroxfb_dh_fb_info* m2info,
172 struct fb_var_screeninfo* var) { 167 struct fb_var_screeninfo* var) {
173 unsigned int pos; 168 unsigned int pos;
@@ -385,7 +380,6 @@ static int matroxfb_dh_set_par(struct fb_info* info) {
385 } 380 }
386 } 381 }
387 up_read(&ACCESS_FBINFO(altout).lock); 382 up_read(&ACCESS_FBINFO(altout).lock);
388 matroxfb_dh_cfbX_init(m2info);
389 } 383 }
390 m2info->initialized = 1; 384 m2info->initialized = 1;
391 return 0; 385 return 0;
diff --git a/drivers/video/matrox/matroxfb_crtc2.h b/drivers/video/matrox/matroxfb_crtc2.h
index 177177609be7..1005582e843e 100644
--- a/drivers/video/matrox/matroxfb_crtc2.h
+++ b/drivers/video/matrox/matroxfb_crtc2.h
@@ -28,7 +28,7 @@ struct matroxfb_dh_fb_info {
28 28
29 unsigned int interlaced:1; 29 unsigned int interlaced:1;
30 30
31 u_int32_t cmap[17]; 31 u_int32_t cmap[16];
32}; 32};
33 33
34#endif /* __MATROXFB_CRTC2_H__ */ 34#endif /* __MATROXFB_CRTC2_H__ */
diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c
index 5d29a26b8cdf..de0d755f9019 100644
--- a/drivers/video/matrox/matroxfb_maven.c
+++ b/drivers/video/matrox/matroxfb_maven.c
@@ -273,8 +273,11 @@ static int matroxfb_PLL_mavenclock(const struct matrox_pll_features2* pll,
273 } 273 }
274 } 274 }
275 } 275 }
276
277 /* if h2/post/in/feed have not been assigned, return zero (error) */
276 if (besth2 < 2) 278 if (besth2 < 2)
277 return 0; 279 return 0;
280
278 dprintk(KERN_ERR "clk: %02X %02X %02X %d %d\n", *in, *feed, *post, fxtal, fwant); 281 dprintk(KERN_ERR "clk: %02X %02X %02X %d %d\n", *in, *feed, *post, fxtal, fwant);
279 return fxtal * (*feed) / (*in) * ctl->den; 282 return fxtal * (*feed) / (*in) * ctl->den;
280} 283}
@@ -284,7 +287,7 @@ static unsigned int matroxfb_mavenclock(const struct matrox_pll_ctl* ctl,
284 unsigned int* in, unsigned int* feed, unsigned int* post, 287 unsigned int* in, unsigned int* feed, unsigned int* post,
285 unsigned int* htotal2) { 288 unsigned int* htotal2) {
286 unsigned int fvco; 289 unsigned int fvco;
287 unsigned int p; 290 unsigned int uninitialized_var(p);
288 291
289 fvco = matroxfb_PLL_mavenclock(&maven1000_pll, ctl, htotal, vtotal, in, feed, &p, htotal2); 292 fvco = matroxfb_PLL_mavenclock(&maven1000_pll, ctl, htotal, vtotal, in, feed, &p, htotal2);
290 if (!fvco) 293 if (!fvco)
@@ -715,7 +718,9 @@ static int maven_find_exact_clocks(unsigned int ht, unsigned int vt,
715 m->regs[0x82] = 0x81; 718 m->regs[0x82] = 0x81;
716 719
717 for (x = 0; x < 8; x++) { 720 for (x = 0; x < 8; x++) {
718 unsigned int a, b, c, h2; 721 unsigned int c;
722 unsigned int uninitialized_var(a), uninitialized_var(b),
723 uninitialized_var(h2);
719 unsigned int h = ht + 2 + x; 724 unsigned int h = ht + 2 + x;
720 725
721 if (!matroxfb_mavenclock((m->mode == MATROXFB_OUTPUT_MODE_PAL) ? &maven_PAL : &maven_NTSC, h, vt, &a, &b, &c, &h2)) { 726 if (!matroxfb_mavenclock((m->mode == MATROXFB_OUTPUT_MODE_PAL) ? &maven_PAL : &maven_NTSC, h, vt, &a, &b, &c, &h2)) {
diff --git a/drivers/video/nvidia/nv_hw.c b/drivers/video/nvidia/nv_hw.c
index aff11bbf59a7..d1a10549f543 100644
--- a/drivers/video/nvidia/nv_hw.c
+++ b/drivers/video/nvidia/nv_hw.c
@@ -150,8 +150,7 @@ static void nvGetClocks(struct nvidia_par *par, unsigned int *MClk,
150 M = pll & 0xFF; 150 M = pll & 0xFF;
151 N = (pll >> 8) & 0xFF; 151 N = (pll >> 8) & 0xFF;
152 if (((par->Chipset & 0xfff0) == 0x0290) || 152 if (((par->Chipset & 0xfff0) == 0x0290) ||
153 ((par->Chipset & 0xfff0) == 0x0390) || 153 ((par->Chipset & 0xfff0) == 0x0390)) {
154 ((par->Chipset & 0xfff0) == 0x02E0)) {
155 MB = 1; 154 MB = 1;
156 NB = 1; 155 NB = 1;
157 } else { 156 } else {
@@ -161,7 +160,7 @@ static void nvGetClocks(struct nvidia_par *par, unsigned int *MClk,
161 *MClk = ((N * NB * par->CrystalFreqKHz) / (M * MB)) >> P; 160 *MClk = ((N * NB * par->CrystalFreqKHz) / (M * MB)) >> P;
162 161
163 pll = NV_RD32(par->PMC, 0x4000); 162 pll = NV_RD32(par->PMC, 0x4000);
164 P = (pll >> 16) & 0x03; 163 P = (pll >> 16) & 0x07;
165 pll = NV_RD32(par->PMC, 0x4004); 164 pll = NV_RD32(par->PMC, 0x4004);
166 M = pll & 0xFF; 165 M = pll & 0xFF;
167 N = (pll >> 8) & 0xFF; 166 N = (pll >> 8) & 0xFF;
@@ -892,11 +891,17 @@ void NVCalcStateExt(struct nvidia_par *par,
892 state->general = bpp == 16 ? 0x00101100 : 0x00100100; 891 state->general = bpp == 16 ? 0x00101100 : 0x00100100;
893 state->repaint1 = hDisplaySize < 1280 ? 0x04 : 0x00; 892 state->repaint1 = hDisplaySize < 1280 ? 0x04 : 0x00;
894 break; 893 break;
894 case NV_ARCH_40:
895 if (!par->FlatPanel)
896 state->control = NV_RD32(par->PRAMDAC0, 0x0580) &
897 0xeffffeff;
898 /* fallthrough */
895 case NV_ARCH_10: 899 case NV_ARCH_10:
896 case NV_ARCH_20: 900 case NV_ARCH_20:
897 case NV_ARCH_30: 901 case NV_ARCH_30:
898 default: 902 default:
899 if ((par->Chipset & 0xfff0) == 0x0240) { 903 if ((par->Chipset & 0xfff0) == 0x0240 ||
904 (par->Chipset & 0xfff0) == 0x03d0) {
900 state->arbitration0 = 256; 905 state->arbitration0 = 256;
901 state->arbitration1 = 0x0480; 906 state->arbitration1 = 0x0480;
902 } else if (((par->Chipset & 0xffff) == 0x01A0) || 907 } else if (((par->Chipset & 0xffff) == 0x01A0) ||
@@ -939,7 +944,7 @@ void NVCalcStateExt(struct nvidia_par *par,
939 944
940void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state) 945void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
941{ 946{
942 int i; 947 int i, j;
943 948
944 NV_WR32(par->PMC, 0x0140, 0x00000000); 949 NV_WR32(par->PMC, 0x0140, 0x00000000);
945 NV_WR32(par->PMC, 0x0200, 0xFFFF00FF); 950 NV_WR32(par->PMC, 0x0200, 0xFFFF00FF);
@@ -951,7 +956,8 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
951 NV_WR32(par->PTIMER, 0x0100 * 4, 0xFFFFFFFF); 956 NV_WR32(par->PTIMER, 0x0100 * 4, 0xFFFFFFFF);
952 957
953 if (par->Architecture == NV_ARCH_04) { 958 if (par->Architecture == NV_ARCH_04) {
954 NV_WR32(par->PFB, 0x0200, state->config); 959 if (state)
960 NV_WR32(par->PFB, 0x0200, state->config);
955 } else if ((par->Architecture < NV_ARCH_40) || 961 } else if ((par->Architecture < NV_ARCH_40) ||
956 (par->Chipset & 0xfff0) == 0x0040) { 962 (par->Chipset & 0xfff0) == 0x0040) {
957 for (i = 0; i < 8; i++) { 963 for (i = 0; i < 8; i++) {
@@ -964,8 +970,9 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
964 970
965 if (((par->Chipset & 0xfff0) == 0x0090) || 971 if (((par->Chipset & 0xfff0) == 0x0090) ||
966 ((par->Chipset & 0xfff0) == 0x01D0) || 972 ((par->Chipset & 0xfff0) == 0x01D0) ||
967 ((par->Chipset & 0xfff0) == 0x02E0) || 973 ((par->Chipset & 0xfff0) == 0x0290) ||
968 ((par->Chipset & 0xfff0) == 0x0290)) 974 ((par->Chipset & 0xfff0) == 0x0390) ||
975 ((par->Chipset & 0xfff0) == 0x03D0))
969 regions = 15; 976 regions = 15;
970 for(i = 0; i < regions; i++) { 977 for(i = 0; i < regions; i++) {
971 NV_WR32(par->PFB, 0x0600 + (i * 0x10), 0); 978 NV_WR32(par->PFB, 0x0600 + (i * 0x10), 0);
@@ -1206,16 +1213,20 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1206 NV_WR32(par->PGRAPH, 0x0608, 0xFFFFFFFF); 1213 NV_WR32(par->PGRAPH, 0x0608, 0xFFFFFFFF);
1207 } else { 1214 } else {
1208 if (par->Architecture >= NV_ARCH_40) { 1215 if (par->Architecture >= NV_ARCH_40) {
1209 u32 tmp;
1210
1211 NV_WR32(par->PGRAPH, 0x0084, 0x401287c0); 1216 NV_WR32(par->PGRAPH, 0x0084, 0x401287c0);
1212 NV_WR32(par->PGRAPH, 0x008C, 0x60de8051); 1217 NV_WR32(par->PGRAPH, 0x008C, 0x60de8051);
1213 NV_WR32(par->PGRAPH, 0x0090, 0x00008000); 1218 NV_WR32(par->PGRAPH, 0x0090, 0x00008000);
1214 NV_WR32(par->PGRAPH, 0x0610, 0x00be3c5f); 1219 NV_WR32(par->PGRAPH, 0x0610, 0x00be3c5f);
1220 NV_WR32(par->PGRAPH, 0x0bc4,
1221 NV_RD32(par->PGRAPH, 0x0bc4) |
1222 0x00008000);
1215 1223
1216 tmp = NV_RD32(par->REGS, 0x1540) & 0xff; 1224 j = NV_RD32(par->REGS, 0x1540) & 0xff;
1217 for(i = 0; tmp && !(tmp & 1); tmp >>= 1, i++); 1225
1218 NV_WR32(par->PGRAPH, 0x5000, i); 1226 if (j) {
1227 for (i = 0; !(j & 1); j >>= 1, i++);
1228 NV_WR32(par->PGRAPH, 0x5000, i);
1229 }
1219 1230
1220 if ((par->Chipset & 0xfff0) == 0x0040) { 1231 if ((par->Chipset & 0xfff0) == 0x0040) {
1221 NV_WR32(par->PGRAPH, 0x09b0, 1232 NV_WR32(par->PGRAPH, 0x09b0,
@@ -1250,6 +1261,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1250 case 0x0160: 1261 case 0x0160:
1251 case 0x01D0: 1262 case 0x01D0:
1252 case 0x0240: 1263 case 0x0240:
1264 case 0x03D0:
1253 NV_WR32(par->PMC, 0x1700, 1265 NV_WR32(par->PMC, 0x1700,
1254 NV_RD32(par->PFB, 0x020C)); 1266 NV_RD32(par->PFB, 0x020C));
1255 NV_WR32(par->PMC, 0x1704, 0); 1267 NV_WR32(par->PMC, 0x1704, 0);
@@ -1269,7 +1281,6 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1269 0x00000108); 1281 0x00000108);
1270 break; 1282 break;
1271 case 0x0220: 1283 case 0x0220:
1272 case 0x0230:
1273 NV_WR32(par->PGRAPH, 0x0860, 0); 1284 NV_WR32(par->PGRAPH, 0x0860, 0);
1274 NV_WR32(par->PGRAPH, 0x0864, 0); 1285 NV_WR32(par->PGRAPH, 0x0864, 0);
1275 NV_WR32(par->PRAMDAC, 0x0608, 1286 NV_WR32(par->PRAMDAC, 0x0608,
@@ -1277,8 +1288,8 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1277 0x00100000); 1288 0x00100000);
1278 break; 1289 break;
1279 case 0x0090: 1290 case 0x0090:
1280 case 0x02E0:
1281 case 0x0290: 1291 case 0x0290:
1292 case 0x0390:
1282 NV_WR32(par->PRAMDAC, 0x0608, 1293 NV_WR32(par->PRAMDAC, 0x0608,
1283 NV_RD32(par->PRAMDAC, 0x0608) | 1294 NV_RD32(par->PRAMDAC, 0x0608) |
1284 0x00100000); 1295 0x00100000);
@@ -1355,8 +1366,9 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1355 } else { 1366 } else {
1356 if (((par->Chipset & 0xfff0) == 0x0090) || 1367 if (((par->Chipset & 0xfff0) == 0x0090) ||
1357 ((par->Chipset & 0xfff0) == 0x01D0) || 1368 ((par->Chipset & 0xfff0) == 0x01D0) ||
1358 ((par->Chipset & 0xfff0) == 0x02E0) || 1369 ((par->Chipset & 0xfff0) == 0x0290) ||
1359 ((par->Chipset & 0xfff0) == 0x0290)) { 1370 ((par->Chipset & 0xfff0) == 0x0390) ||
1371 ((par->Chipset & 0xfff0) == 0x03D0)) {
1360 for (i = 0; i < 60; i++) { 1372 for (i = 0; i < 60; i++) {
1361 NV_WR32(par->PGRAPH, 1373 NV_WR32(par->PGRAPH,
1362 0x0D00 + i*4, 1374 0x0D00 + i*4,
@@ -1407,8 +1419,8 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1407 } else { 1419 } else {
1408 if ((par->Chipset & 0xfff0) == 0x0090 || 1420 if ((par->Chipset & 0xfff0) == 0x0090 ||
1409 (par->Chipset & 0xfff0) == 0x01D0 || 1421 (par->Chipset & 0xfff0) == 0x01D0 ||
1410 (par->Chipset & 0xfff0) == 0x02E0 || 1422 (par->Chipset & 0xfff0) == 0x0290 ||
1411 (par->Chipset & 0xfff0) == 0x0290) { 1423 (par->Chipset & 0xfff0) == 0x0390) {
1412 NV_WR32(par->PGRAPH, 0x0DF0, 1424 NV_WR32(par->PGRAPH, 0x0DF0,
1413 NV_RD32(par->PFB, 0x0200)); 1425 NV_RD32(par->PFB, 0x0200));
1414 NV_WR32(par->PGRAPH, 0x0DF4, 1426 NV_WR32(par->PGRAPH, 0x0DF4,
@@ -1495,6 +1507,12 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1495 NV_WR32(par->PFIFO, 0x0494 * 4, 0x00000001); 1507 NV_WR32(par->PFIFO, 0x0494 * 4, 0x00000001);
1496 NV_WR32(par->PFIFO, 0x0495 * 4, 0x00000001); 1508 NV_WR32(par->PFIFO, 0x0495 * 4, 0x00000001);
1497 NV_WR32(par->PFIFO, 0x0140 * 4, 0x00000001); 1509 NV_WR32(par->PFIFO, 0x0140 * 4, 0x00000001);
1510
1511 if (!state) {
1512 par->CurrentState = NULL;
1513 return;
1514 }
1515
1498 if (par->Architecture >= NV_ARCH_10) { 1516 if (par->Architecture >= NV_ARCH_10) {
1499 if (par->twoHeads) { 1517 if (par->twoHeads) {
1500 NV_WR32(par->PCRTC0, 0x0860, state->head); 1518 NV_WR32(par->PCRTC0, 0x0860, state->head);
@@ -1566,6 +1584,9 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1566 VGA_WR08(par->PCIO, 0x03D5, state->interlace); 1584 VGA_WR08(par->PCIO, 0x03D5, state->interlace);
1567 1585
1568 if (!par->FlatPanel) { 1586 if (!par->FlatPanel) {
1587 if (par->Architecture >= NV_ARCH_40)
1588 NV_WR32(par->PRAMDAC0, 0x0580, state->control);
1589
1569 NV_WR32(par->PRAMDAC0, 0x050C, state->pllsel); 1590 NV_WR32(par->PRAMDAC0, 0x050C, state->pllsel);
1570 NV_WR32(par->PRAMDAC0, 0x0508, state->vpll); 1591 NV_WR32(par->PRAMDAC0, 0x0508, state->vpll);
1571 if (par->twoHeads) 1592 if (par->twoHeads)
@@ -1631,6 +1652,9 @@ void NVUnloadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state) {
1631 state->scale = NV_RD32(par->PRAMDAC, 0x0848); 1652 state->scale = NV_RD32(par->PRAMDAC, 0x0848);
1632 state->config = NV_RD32(par->PFB, 0x0200); 1653 state->config = NV_RD32(par->PFB, 0x0200);
1633 1654
1655 if (par->Architecture >= NV_ARCH_40 && !par->FlatPanel)
1656 state->control = NV_RD32(par->PRAMDAC0, 0x0580);
1657
1634 if (par->Architecture >= NV_ARCH_10) { 1658 if (par->Architecture >= NV_ARCH_10) {
1635 if (par->twoHeads) { 1659 if (par->twoHeads) {
1636 state->head = NV_RD32(par->PCRTC0, 0x0860); 1660 state->head = NV_RD32(par->PCRTC0, 0x0860);
diff --git a/drivers/video/nvidia/nv_setup.c b/drivers/video/nvidia/nv_setup.c
index 707e2c8a13ed..82579d3a9970 100644
--- a/drivers/video/nvidia/nv_setup.c
+++ b/drivers/video/nvidia/nv_setup.c
@@ -166,11 +166,13 @@ u8 NVReadDacData(struct nvidia_par *par)
166static int NVIsConnected(struct nvidia_par *par, int output) 166static int NVIsConnected(struct nvidia_par *par, int output)
167{ 167{
168 volatile u32 __iomem *PRAMDAC = par->PRAMDAC0; 168 volatile u32 __iomem *PRAMDAC = par->PRAMDAC0;
169 u32 reg52C, reg608; 169 u32 reg52C, reg608, dac0_reg608 = 0;
170 int present; 170 int present;
171 171
172 if (output) 172 if (output) {
173 PRAMDAC += 0x800; 173 dac0_reg608 = NV_RD32(PRAMDAC, 0x0608);
174 PRAMDAC += 0x800;
175 }
174 176
175 reg52C = NV_RD32(PRAMDAC, 0x052C); 177 reg52C = NV_RD32(PRAMDAC, 0x052C);
176 reg608 = NV_RD32(PRAMDAC, 0x0608); 178 reg608 = NV_RD32(PRAMDAC, 0x0608);
@@ -194,8 +196,8 @@ static int NVIsConnected(struct nvidia_par *par, int output)
194 else 196 else
195 printk("nvidiafb: CRTC%i analog not found\n", output); 197 printk("nvidiafb: CRTC%i analog not found\n", output);
196 198
197 NV_WR32(par->PRAMDAC0, 0x0608, NV_RD32(par->PRAMDAC0, 0x0608) & 199 if (output)
198 0x0000EFFF); 200 NV_WR32(par->PRAMDAC0, 0x0608, dac0_reg608);
199 201
200 NV_WR32(PRAMDAC, 0x052C, reg52C); 202 NV_WR32(PRAMDAC, 0x052C, reg52C);
201 NV_WR32(PRAMDAC, 0x0608, reg608); 203 NV_WR32(PRAMDAC, 0x0608, reg608);
diff --git a/drivers/video/nvidia/nv_type.h b/drivers/video/nvidia/nv_type.h
index 38f7cc0a2331..2fdf77ec39fc 100644
--- a/drivers/video/nvidia/nv_type.h
+++ b/drivers/video/nvidia/nv_type.h
@@ -86,6 +86,7 @@ typedef struct _riva_hw_state {
86 u32 timingV; 86 u32 timingV;
87 u32 displayV; 87 u32 displayV;
88 u32 crtcSync; 88 u32 crtcSync;
89 u32 control;
89} RIVA_HW_STATE; 90} RIVA_HW_STATE;
90 91
91struct riva_regs { 92struct riva_regs {
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 41f63658572f..a7fe214f0f77 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -674,6 +674,7 @@ static int nvidiafb_set_par(struct fb_info *info)
674 info->fbops->fb_sync = nvidiafb_sync; 674 info->fbops->fb_sync = nvidiafb_sync;
675 info->pixmap.scan_align = 4; 675 info->pixmap.scan_align = 4;
676 info->flags &= ~FBINFO_HWACCEL_DISABLED; 676 info->flags &= ~FBINFO_HWACCEL_DISABLED;
677 info->flags |= FBINFO_READS_FAST;
677 NVResetGraphics(info); 678 NVResetGraphics(info);
678 } else { 679 } else {
679 info->fbops->fb_imageblit = cfb_imageblit; 680 info->fbops->fb_imageblit = cfb_imageblit;
@@ -682,6 +683,7 @@ static int nvidiafb_set_par(struct fb_info *info)
682 info->fbops->fb_sync = NULL; 683 info->fbops->fb_sync = NULL;
683 info->pixmap.scan_align = 1; 684 info->pixmap.scan_align = 1;
684 info->flags |= FBINFO_HWACCEL_DISABLED; 685 info->flags |= FBINFO_HWACCEL_DISABLED;
686 info->flags &= ~FBINFO_READS_FAST;
685 } 687 }
686 688
687 par->cursor_reset = 1; 689 par->cursor_reset = 1;
@@ -1193,7 +1195,8 @@ static u32 __devinit nvidia_get_chipset(struct fb_info *info)
1193 1195
1194 printk(KERN_INFO PFX "Device ID: %x \n", id); 1196 printk(KERN_INFO PFX "Device ID: %x \n", id);
1195 1197
1196 if ((id & 0xfff0) == 0x00f0) { 1198 if ((id & 0xfff0) == 0x00f0 ||
1199 (id & 0xfff0) == 0x02e0) {
1197 /* pci-e */ 1200 /* pci-e */
1198 id = NV_RD32(par->REGS, 0x1800); 1201 id = NV_RD32(par->REGS, 0x1800);
1199 1202
@@ -1238,18 +1241,16 @@ static u32 __devinit nvidia_get_arch(struct fb_info *info)
1238 case 0x0040: /* GeForce 6800 */ 1241 case 0x0040: /* GeForce 6800 */
1239 case 0x00C0: /* GeForce 6800 */ 1242 case 0x00C0: /* GeForce 6800 */
1240 case 0x0120: /* GeForce 6800 */ 1243 case 0x0120: /* GeForce 6800 */
1241 case 0x0130:
1242 case 0x0140: /* GeForce 6600 */ 1244 case 0x0140: /* GeForce 6600 */
1243 case 0x0160: /* GeForce 6200 */ 1245 case 0x0160: /* GeForce 6200 */
1244 case 0x01D0: /* GeForce 7200, 7300, 7400 */ 1246 case 0x01D0: /* GeForce 7200, 7300, 7400 */
1245 case 0x02E0: /* GeForce 7300 GT */
1246 case 0x0090: /* GeForce 7800 */ 1247 case 0x0090: /* GeForce 7800 */
1247 case 0x0210: /* GeForce 6800 */ 1248 case 0x0210: /* GeForce 6800 */
1248 case 0x0220: /* GeForce 6200 */ 1249 case 0x0220: /* GeForce 6200 */
1249 case 0x0230:
1250 case 0x0240: /* GeForce 6100 */ 1250 case 0x0240: /* GeForce 6100 */
1251 case 0x0290: /* GeForce 7900 */ 1251 case 0x0290: /* GeForce 7900 */
1252 case 0x0390: /* GeForce 7600 */ 1252 case 0x0390: /* GeForce 7600 */
1253 case 0x03D0:
1253 arch = NV_ARCH_40; 1254 arch = NV_ARCH_40;
1254 break; 1255 break;
1255 case 0x0020: /* TNT, TNT2 */ 1256 case 0x0020: /* TNT, TNT2 */
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index 885b42836cbb..452433d46973 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -271,7 +271,7 @@ static void __init offb_init_fb(const char *name, const char *full_name,
271 return; 271 return;
272 } 272 }
273 273
274 size = sizeof(struct fb_info) + sizeof(u32) * 17; 274 size = sizeof(struct fb_info) + sizeof(u32) * 16;
275 275
276 info = kmalloc(size, GFP_ATOMIC); 276 info = kmalloc(size, GFP_ATOMIC);
277 277
diff --git a/drivers/video/omap/Kconfig b/drivers/video/omap/Kconfig
new file mode 100644
index 000000000000..7f4d25b8a184
--- /dev/null
+++ b/drivers/video/omap/Kconfig
@@ -0,0 +1,58 @@
1config FB_OMAP
2 tristate "OMAP frame buffer support (EXPERIMENTAL)"
3 depends on FB
4 select FB_CFB_FILLRECT
5 select FB_CFB_COPYAREA
6 select FB_CFB_IMAGEBLIT
7 help
8 Frame buffer driver for OMAP based boards.
9
10config FB_OMAP_BOOTLOADER_INIT
11 bool "Check bootloader initializaion"
12 depends on FB_OMAP
13 help
14 Say Y here if you want to enable checking if the bootloader has
15 already initialized the display controller. In this case the
16 driver will skip the initialization.
17
18config FB_OMAP_CONSISTENT_DMA_SIZE
19 int "Consistent DMA memory size (MB)"
20 depends on FB_OMAP
21 range 1 14
22 default 2
23 help
24 Increase the DMA consistent memory size according to your video
25 memory needs, for example if you want to use multiple planes.
26 The size must be 2MB aligned.
27 If unsure say 1.
28
29config FB_OMAP_DMA_TUNE
30 bool "Set DMA SDRAM access priority high"
31 depends on FB_OMAP && ARCH_OMAP1
32 help
33 On systems in which video memory is in system memory
34 (SDRAM) this will speed up graphics DMA operations.
35 If you have such a system and want to use rotation
36 answer yes. Answer no if you have a dedicated video
37 memory, or don't use any of the accelerated features.
38
39config FB_OMAP_LCDC_EXTERNAL
40 bool "External LCD controller support"
41 depends on FB_OMAP
42 help
43 Say Y here, if you want to have support for boards with an
44 external LCD controller connected to the SoSSI/RFBI interface.
45
46config FB_OMAP_LCDC_HWA742
47 bool "Epson HWA742 LCD controller support"
48 depends on FB_OMAP && FB_OMAP_LCDC_EXTERNAL
49 help
50 Say Y here if you want to have support for the external
51 Epson HWA742 LCD controller.
52
53config FB_OMAP_LCDC_BLIZZARD
54 bool "Epson Blizzard LCD controller support"
55 depends on FB_OMAP && FB_OMAP_LCDC_EXTERNAL
56 help
57 Say Y here if you want to have support for the external
58 Epson Blizzard LCD controller.
diff --git a/drivers/video/omap/Makefile b/drivers/video/omap/Makefile
new file mode 100644
index 000000000000..99da8b6d2c36
--- /dev/null
+++ b/drivers/video/omap/Makefile
@@ -0,0 +1,29 @@
1#
2# Makefile for the new OMAP framebuffer device driver
3#
4
5obj-$(CONFIG_FB_OMAP) += omapfb.o
6
7objs-yy := omapfb_main.o
8
9objs-y$(CONFIG_ARCH_OMAP1) += lcdc.o
10objs-y$(CONFIG_ARCH_OMAP2) += dispc.o
11
12objs-$(CONFIG_ARCH_OMAP1)$(CONFIG_FB_OMAP_LCDC_EXTERNAL) += sossi.o
13objs-$(CONFIG_ARCH_OMAP2)$(CONFIG_FB_OMAP_LCDC_EXTERNAL) += rfbi.o
14
15objs-y$(CONFIG_FB_OMAP_LCDC_HWA742) += hwa742.o
16objs-y$(CONFIG_FB_OMAP_LCDC_BLIZZARD) += blizzard.o
17
18objs-y$(CONFIG_MACH_OMAP_H4) += lcd_h4.o
19objs-y$(CONFIG_MACH_OMAP_H3) += lcd_h3.o
20objs-y$(CONFIG_MACH_OMAP_PALMTE) += lcd_palmte.o
21objs-y$(CONFIG_MACH_OMAP_PALMTT) += lcd_palmtt.o
22objs-y$(CONFIG_MACH_OMAP_PALMZ71) += lcd_palmz71.o
23objs-$(CONFIG_ARCH_OMAP16XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1610.o
24objs-$(CONFIG_ARCH_OMAP15XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1510.o
25objs-y$(CONFIG_MACH_OMAP_OSK) += lcd_osk.o
26objs-y$(CONFIG_MACH_SX1) += lcd_sx1.o
27
28omapfb-objs := $(objs-yy)
29
diff --git a/drivers/video/omap/blizzard.c b/drivers/video/omap/blizzard.c
new file mode 100644
index 000000000000..e682940a97a4
--- /dev/null
+++ b/drivers/video/omap/blizzard.c
@@ -0,0 +1,1568 @@
1/*
2 * Epson Blizzard LCD controller driver
3 *
4 * Copyright (C) 2004-2005 Nokia Corporation
5 * Authors: Juha Yrjola <juha.yrjola@nokia.com>
6 * Imre Deak <imre.deak@nokia.com>
7 * YUV support: Jussi Laako <jussi.laako@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23#include <linux/module.h>
24#include <linux/mm.h>
25#include <linux/fb.h>
26#include <linux/delay.h>
27#include <linux/clk.h>
28
29#include <asm/arch/dma.h>
30#include <asm/arch/omapfb.h>
31#include <asm/arch/blizzard.h>
32
33#include "dispc.h"
34
35#define MODULE_NAME "blizzard"
36
37#define BLIZZARD_REV_CODE 0x00
38#define BLIZZARD_CONFIG 0x02
39#define BLIZZARD_PLL_DIV 0x04
40#define BLIZZARD_PLL_LOCK_RANGE 0x06
41#define BLIZZARD_PLL_CLOCK_SYNTH_0 0x08
42#define BLIZZARD_PLL_CLOCK_SYNTH_1 0x0a
43#define BLIZZARD_PLL_MODE 0x0c
44#define BLIZZARD_CLK_SRC 0x0e
45#define BLIZZARD_MEM_BANK0_ACTIVATE 0x10
46#define BLIZZARD_MEM_BANK0_STATUS 0x14
47#define BLIZZARD_HDISP 0x2a
48#define BLIZZARD_HNDP 0x2c
49#define BLIZZARD_VDISP0 0x2e
50#define BLIZZARD_VDISP1 0x30
51#define BLIZZARD_VNDP 0x32
52#define BLIZZARD_HSW 0x34
53#define BLIZZARD_VSW 0x38
54#define BLIZZARD_DISPLAY_MODE 0x68
55#define BLIZZARD_INPUT_WIN_X_START_0 0x6c
56#define BLIZZARD_DATA_SOURCE_SELECT 0x8e
57#define BLIZZARD_DISP_MEM_DATA_PORT 0x90
58#define BLIZZARD_DISP_MEM_READ_ADDR0 0x92
59#define BLIZZARD_POWER_SAVE 0xE6
60#define BLIZZARD_NDISP_CTRL_STATUS 0xE8
61
62/* Data source select */
63/* For S1D13745 */
64#define BLIZZARD_SRC_WRITE_LCD_BACKGROUND 0x00
65#define BLIZZARD_SRC_WRITE_LCD_DESTRUCTIVE 0x01
66#define BLIZZARD_SRC_WRITE_OVERLAY_ENABLE 0x04
67#define BLIZZARD_SRC_DISABLE_OVERLAY 0x05
68/* For S1D13744 */
69#define BLIZZARD_SRC_WRITE_LCD 0x00
70#define BLIZZARD_SRC_BLT_LCD 0x06
71
72#define BLIZZARD_COLOR_RGB565 0x01
73#define BLIZZARD_COLOR_YUV420 0x09
74
75#define BLIZZARD_VERSION_S1D13745 0x01 /* Hailstorm */
76#define BLIZZARD_VERSION_S1D13744 0x02 /* Blizzard */
77
78#define BLIZZARD_AUTO_UPDATE_TIME (HZ / 20)
79
80/* Reserve 4 request slots for requests in irq context */
81#define REQ_POOL_SIZE 24
82#define IRQ_REQ_POOL_SIZE 4
83
84#define REQ_FROM_IRQ_POOL 0x01
85
86#define REQ_COMPLETE 0
87#define REQ_PENDING 1
88
89struct blizzard_reg_list {
90 int start;
91 int end;
92};
93
94/* These need to be saved / restored separately from the rest. */
95static struct blizzard_reg_list blizzard_pll_regs[] = {
96 {
97 .start = 0x04, /* Don't save PLL ctrl (0x0C) */
98 .end = 0x0a,
99 },
100 {
101 .start = 0x0e, /* Clock configuration */
102 .end = 0x0e,
103 },
104};
105
106static struct blizzard_reg_list blizzard_gen_regs[] = {
107 {
108 .start = 0x18, /* SDRAM control */
109 .end = 0x20,
110 },
111 {
112 .start = 0x28, /* LCD Panel configuration */
113 .end = 0x5a, /* HSSI interface, TV configuration */
114 },
115};
116
117static u8 blizzard_reg_cache[0x5a / 2];
118
119struct update_param {
120 int plane;
121 int x, y, width, height;
122 int out_x, out_y;
123 int out_width, out_height;
124 int color_mode;
125 int bpp;
126 int flags;
127};
128
129struct blizzard_request {
130 struct list_head entry;
131 unsigned int flags;
132
133 int (*handler)(struct blizzard_request *req);
134 void (*complete)(void *data);
135 void *complete_data;
136
137 union {
138 struct update_param update;
139 struct completion *sync;
140 } par;
141};
142
143struct plane_info {
144 unsigned long offset;
145 int pos_x, pos_y;
146 int width, height;
147 int out_width, out_height;
148 int scr_width;
149 int color_mode;
150 int bpp;
151};
152
153struct blizzard_struct {
154 enum omapfb_update_mode update_mode;
155 enum omapfb_update_mode update_mode_before_suspend;
156
157 struct timer_list auto_update_timer;
158 int stop_auto_update;
159 struct omapfb_update_window auto_update_window;
160 int enabled_planes;
161 int vid_nonstd_color;
162 int vid_scaled;
163 int last_color_mode;
164 int zoom_on;
165 int screen_width;
166 int screen_height;
167 unsigned te_connected:1;
168 unsigned vsync_only:1;
169
170 struct plane_info plane[OMAPFB_PLANE_NUM];
171
172 struct blizzard_request req_pool[REQ_POOL_SIZE];
173 struct list_head pending_req_list;
174 struct list_head free_req_list;
175 struct semaphore req_sema;
176 spinlock_t req_lock;
177
178 unsigned long sys_ck_rate;
179 struct extif_timings reg_timings, lut_timings;
180
181 u32 max_transmit_size;
182 u32 extif_clk_period;
183 int extif_clk_div;
184 unsigned long pix_tx_time;
185 unsigned long line_upd_time;
186
187 struct omapfb_device *fbdev;
188 struct lcd_ctrl_extif *extif;
189 struct lcd_ctrl *int_ctrl;
190
191 void (*power_up)(struct device *dev);
192 void (*power_down)(struct device *dev);
193
194 int version;
195} blizzard;
196
197struct lcd_ctrl blizzard_ctrl;
198
199static u8 blizzard_read_reg(u8 reg)
200{
201 u8 data;
202
203 blizzard.extif->set_bits_per_cycle(8);
204 blizzard.extif->write_command(&reg, 1);
205 blizzard.extif->read_data(&data, 1);
206
207 return data;
208}
209
210static void blizzard_write_reg(u8 reg, u8 val)
211{
212 blizzard.extif->set_bits_per_cycle(8);
213 blizzard.extif->write_command(&reg, 1);
214 blizzard.extif->write_data(&val, 1);
215}
216
217static void blizzard_restart_sdram(void)
218{
219 unsigned long tmo;
220
221 blizzard_write_reg(BLIZZARD_MEM_BANK0_ACTIVATE, 0);
222 udelay(50);
223 blizzard_write_reg(BLIZZARD_MEM_BANK0_ACTIVATE, 1);
224 tmo = jiffies + msecs_to_jiffies(200);
225 while (!(blizzard_read_reg(BLIZZARD_MEM_BANK0_STATUS) & 0x01)) {
226 if (time_after(jiffies, tmo)) {
227 dev_err(blizzard.fbdev->dev,
228 "s1d1374x: SDRAM not ready");
229 break;
230 }
231 msleep(1);
232 }
233}
234
235static void blizzard_stop_sdram(void)
236{
237 blizzard_write_reg(BLIZZARD_MEM_BANK0_ACTIVATE, 0);
238}
239
240/* Wait until the last window was completely written into the controllers
241 * SDRAM and we can start transferring the next window.
242 */
243static void blizzard_wait_line_buffer(void)
244{
245 unsigned long tmo = jiffies + msecs_to_jiffies(30);
246
247 while (blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS) & (1 << 7)) {
248 if (time_after(jiffies, tmo)) {
249 if (printk_ratelimit())
250 dev_err(blizzard.fbdev->dev,
251 "s1d1374x: line buffer not ready\n");
252 break;
253 }
254 }
255}
256
257/* Wait until the YYC color space converter is idle. */
258static void blizzard_wait_yyc(void)
259{
260 unsigned long tmo = jiffies + msecs_to_jiffies(30);
261
262 while (blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS) & (1 << 4)) {
263 if (time_after(jiffies, tmo)) {
264 if (printk_ratelimit())
265 dev_err(blizzard.fbdev->dev,
266 "s1d1374x: YYC not ready\n");
267 break;
268 }
269 }
270}
271
272static void disable_overlay(void)
273{
274 blizzard_write_reg(BLIZZARD_DATA_SOURCE_SELECT,
275 BLIZZARD_SRC_DISABLE_OVERLAY);
276}
277
278static void set_window_regs(int x_start, int y_start, int x_end, int y_end,
279 int x_out_start, int y_out_start,
280 int x_out_end, int y_out_end, int color_mode,
281 int zoom_off, int flags)
282{
283 u8 tmp[18];
284 u8 cmd;
285
286 x_end--;
287 y_end--;
288 tmp[0] = x_start;
289 tmp[1] = x_start >> 8;
290 tmp[2] = y_start;
291 tmp[3] = y_start >> 8;
292 tmp[4] = x_end;
293 tmp[5] = x_end >> 8;
294 tmp[6] = y_end;
295 tmp[7] = y_end >> 8;
296
297 x_out_end--;
298 y_out_end--;
299 tmp[8] = x_out_start;
300 tmp[9] = x_out_start >> 8;
301 tmp[10] = y_out_start;
302 tmp[11] = y_out_start >> 8;
303 tmp[12] = x_out_end;
304 tmp[13] = x_out_end >> 8;
305 tmp[14] = y_out_end;
306 tmp[15] = y_out_end >> 8;
307
308 tmp[16] = color_mode;
309 if (zoom_off && blizzard.version == BLIZZARD_VERSION_S1D13745)
310 tmp[17] = BLIZZARD_SRC_WRITE_LCD_BACKGROUND;
311 else if (flags & OMAPFB_FORMAT_FLAG_ENABLE_OVERLAY)
312 tmp[17] = BLIZZARD_SRC_WRITE_OVERLAY_ENABLE;
313 else
314 tmp[17] = blizzard.version == BLIZZARD_VERSION_S1D13744 ?
315 BLIZZARD_SRC_WRITE_LCD :
316 BLIZZARD_SRC_WRITE_LCD_DESTRUCTIVE;
317
318 blizzard.extif->set_bits_per_cycle(8);
319 cmd = BLIZZARD_INPUT_WIN_X_START_0;
320 blizzard.extif->write_command(&cmd, 1);
321 blizzard.extif->write_data(tmp, 18);
322}
323
324static void enable_tearsync(int y, int width, int height, int screen_height,
325 int out_height, int force_vsync)
326{
327 u8 b;
328
329 b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS);
330 b |= 1 << 3;
331 blizzard_write_reg(BLIZZARD_NDISP_CTRL_STATUS, b);
332
333 if (likely(blizzard.vsync_only || force_vsync)) {
334 blizzard.extif->enable_tearsync(1, 0);
335 return;
336 }
337
338 if (width * blizzard.pix_tx_time < blizzard.line_upd_time) {
339 blizzard.extif->enable_tearsync(1, 0);
340 return;
341 }
342
343 if ((width * blizzard.pix_tx_time / 1000) * height <
344 (y + out_height) * (blizzard.line_upd_time / 1000)) {
345 blizzard.extif->enable_tearsync(1, 0);
346 return;
347 }
348
349 blizzard.extif->enable_tearsync(1, y + 1);
350}
351
352static void disable_tearsync(void)
353{
354 u8 b;
355
356 blizzard.extif->enable_tearsync(0, 0);
357 b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS);
358 b &= ~(1 << 3);
359 blizzard_write_reg(BLIZZARD_NDISP_CTRL_STATUS, b);
360 b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS);
361}
362
363static inline void set_extif_timings(const struct extif_timings *t);
364
365static inline struct blizzard_request *alloc_req(void)
366{
367 unsigned long flags;
368 struct blizzard_request *req;
369 int req_flags = 0;
370
371 if (!in_interrupt())
372 down(&blizzard.req_sema);
373 else
374 req_flags = REQ_FROM_IRQ_POOL;
375
376 spin_lock_irqsave(&blizzard.req_lock, flags);
377 BUG_ON(list_empty(&blizzard.free_req_list));
378 req = list_entry(blizzard.free_req_list.next,
379 struct blizzard_request, entry);
380 list_del(&req->entry);
381 spin_unlock_irqrestore(&blizzard.req_lock, flags);
382
383 INIT_LIST_HEAD(&req->entry);
384 req->flags = req_flags;
385
386 return req;
387}
388
389static inline void free_req(struct blizzard_request *req)
390{
391 unsigned long flags;
392
393 spin_lock_irqsave(&blizzard.req_lock, flags);
394
395 list_del(&req->entry);
396 list_add(&req->entry, &blizzard.free_req_list);
397 if (!(req->flags & REQ_FROM_IRQ_POOL))
398 up(&blizzard.req_sema);
399
400 spin_unlock_irqrestore(&blizzard.req_lock, flags);
401}
402
403static void process_pending_requests(void)
404{
405 unsigned long flags;
406
407 spin_lock_irqsave(&blizzard.req_lock, flags);
408
409 while (!list_empty(&blizzard.pending_req_list)) {
410 struct blizzard_request *req;
411 void (*complete)(void *);
412 void *complete_data;
413
414 req = list_entry(blizzard.pending_req_list.next,
415 struct blizzard_request, entry);
416 spin_unlock_irqrestore(&blizzard.req_lock, flags);
417
418 if (req->handler(req) == REQ_PENDING)
419 return;
420
421 complete = req->complete;
422 complete_data = req->complete_data;
423 free_req(req);
424
425 if (complete)
426 complete(complete_data);
427
428 spin_lock_irqsave(&blizzard.req_lock, flags);
429 }
430
431 spin_unlock_irqrestore(&blizzard.req_lock, flags);
432}
433
434static void submit_req_list(struct list_head *head)
435{
436 unsigned long flags;
437 int process = 1;
438
439 spin_lock_irqsave(&blizzard.req_lock, flags);
440 if (likely(!list_empty(&blizzard.pending_req_list)))
441 process = 0;
442 list_splice_init(head, blizzard.pending_req_list.prev);
443 spin_unlock_irqrestore(&blizzard.req_lock, flags);
444
445 if (process)
446 process_pending_requests();
447}
448
449static void request_complete(void *data)
450{
451 struct blizzard_request *req = (struct blizzard_request *)data;
452 void (*complete)(void *);
453 void *complete_data;
454
455 complete = req->complete;
456 complete_data = req->complete_data;
457
458 free_req(req);
459
460 if (complete)
461 complete(complete_data);
462
463 process_pending_requests();
464}
465
466
467static int do_full_screen_update(struct blizzard_request *req)
468{
469 int i;
470 int flags;
471
472 for (i = 0; i < 3; i++) {
473 struct plane_info *p = &blizzard.plane[i];
474 if (!(blizzard.enabled_planes & (1 << i))) {
475 blizzard.int_ctrl->enable_plane(i, 0);
476 continue;
477 }
478 dev_dbg(blizzard.fbdev->dev, "pw %d ph %d\n",
479 p->width, p->height);
480 blizzard.int_ctrl->setup_plane(i,
481 OMAPFB_CHANNEL_OUT_LCD, p->offset,
482 p->scr_width, p->pos_x, p->pos_y,
483 p->width, p->height,
484 p->color_mode);
485 blizzard.int_ctrl->enable_plane(i, 1);
486 }
487
488 dev_dbg(blizzard.fbdev->dev, "sw %d sh %d\n",
489 blizzard.screen_width, blizzard.screen_height);
490 blizzard_wait_line_buffer();
491 flags = req->par.update.flags;
492 if (flags & OMAPFB_FORMAT_FLAG_TEARSYNC)
493 enable_tearsync(0, blizzard.screen_width,
494 blizzard.screen_height,
495 blizzard.screen_height,
496 blizzard.screen_height,
497 flags & OMAPFB_FORMAT_FLAG_FORCE_VSYNC);
498 else
499 disable_tearsync();
500
501 set_window_regs(0, 0, blizzard.screen_width, blizzard.screen_height,
502 0, 0, blizzard.screen_width, blizzard.screen_height,
503 BLIZZARD_COLOR_RGB565, blizzard.zoom_on, flags);
504 blizzard.zoom_on = 0;
505
506 blizzard.extif->set_bits_per_cycle(16);
507 /* set_window_regs has left the register index at the right
508 * place, so no need to set it here.
509 */
510 blizzard.extif->transfer_area(blizzard.screen_width,
511 blizzard.screen_height,
512 request_complete, req);
513 return REQ_PENDING;
514}
515
516/* Setup all planes with an overlapping area with the update window. */
517static int do_partial_update(struct blizzard_request *req, int plane,
518 int x, int y, int w, int h,
519 int x_out, int y_out, int w_out, int h_out,
520 int wnd_color_mode, int bpp)
521{
522 int i;
523 int gx1, gy1, gx2, gy2;
524 int gx1_out, gy1_out, gx2_out, gy2_out;
525 int color_mode;
526 int flags;
527 int zoom_off;
528
529 /* Global coordinates, relative to pixel 0,0 of the LCD */
530 gx1 = x + blizzard.plane[plane].pos_x;
531 gy1 = y + blizzard.plane[plane].pos_y;
532 gx2 = gx1 + w;
533 gy2 = gy1 + h;
534
535 flags = req->par.update.flags;
536 if (flags & OMAPFB_FORMAT_FLAG_DOUBLE) {
537 gx1_out = gx1;
538 gy1_out = gy1;
539 gx2_out = gx1 + w * 2;
540 gy2_out = gy1 + h * 2;
541 } else {
542 gx1_out = x_out + blizzard.plane[plane].pos_x;
543 gy1_out = y_out + blizzard.plane[plane].pos_y;
544 gx2_out = gx1_out + w_out;
545 gy2_out = gy1_out + h_out;
546 }
547 zoom_off = blizzard.zoom_on && gx1 == 0 && gy1 == 0 &&
548 w == blizzard.screen_width && h == blizzard.screen_height;
549 blizzard.zoom_on = (!zoom_off && blizzard.zoom_on) ||
550 (w < w_out || h < h_out);
551
552 for (i = 0; i < OMAPFB_PLANE_NUM; i++) {
553 struct plane_info *p = &blizzard.plane[i];
554 int px1, py1;
555 int px2, py2;
556 int pw, ph;
557 int pposx, pposy;
558 unsigned long offset;
559
560 if (!(blizzard.enabled_planes & (1 << i)) ||
561 (wnd_color_mode && i != plane)) {
562 blizzard.int_ctrl->enable_plane(i, 0);
563 continue;
564 }
565 /* Plane coordinates */
566 if (i == plane) {
567 /* Plane in which we are doing the update.
568 * Local coordinates are the one in the update
569 * request.
570 */
571 px1 = x;
572 py1 = y;
573 px2 = x + w;
574 py2 = y + h;
575 pposx = 0;
576 pposy = 0;
577 } else {
578 /* Check if this plane has an overlapping part */
579 px1 = gx1 - p->pos_x;
580 py1 = gy1 - p->pos_y;
581 px2 = gx2 - p->pos_x;
582 py2 = gy2 - p->pos_y;
583 if (px1 >= p->width || py1 >= p->height ||
584 px2 <= 0 || py2 <= 0) {
585 blizzard.int_ctrl->enable_plane(i, 0);
586 continue;
587 }
588 /* Calculate the coordinates for the overlapping
589 * part in the plane's local coordinates.
590 */
591 pposx = -px1;
592 pposy = -py1;
593 if (px1 < 0)
594 px1 = 0;
595 if (py1 < 0)
596 py1 = 0;
597 if (px2 > p->width)
598 px2 = p->width;
599 if (py2 > p->height)
600 py2 = p->height;
601 if (pposx < 0)
602 pposx = 0;
603 if (pposy < 0)
604 pposy = 0;
605 }
606 pw = px2 - px1;
607 ph = py2 - py1;
608 offset = p->offset + (p->scr_width * py1 + px1) * p->bpp / 8;
609 if (wnd_color_mode)
610 /* Window embedded in the plane with a differing
611 * color mode / bpp. Calculate the number of DMA
612 * transfer elements in terms of the plane's bpp.
613 */
614 pw = (pw + 1) * bpp / p->bpp;
615#ifdef VERBOSE
616 dev_dbg(blizzard.fbdev->dev,
617 "plane %d offset %#08lx pposx %d pposy %d "
618 "px1 %d py1 %d pw %d ph %d\n",
619 i, offset, pposx, pposy, px1, py1, pw, ph);
620#endif
621 blizzard.int_ctrl->setup_plane(i,
622 OMAPFB_CHANNEL_OUT_LCD, offset,
623 p->scr_width,
624 pposx, pposy, pw, ph,
625 p->color_mode);
626
627 blizzard.int_ctrl->enable_plane(i, 1);
628 }
629
630 switch (wnd_color_mode) {
631 case OMAPFB_COLOR_YUV420:
632 color_mode = BLIZZARD_COLOR_YUV420;
633 /* Currently only the 16 bits/pixel cycle format is
634 * supported on the external interface. Adjust the number
635 * of transfer elements per line for 12bpp format.
636 */
637 w = (w + 1) * 3 / 4;
638 break;
639 default:
640 color_mode = BLIZZARD_COLOR_RGB565;
641 break;
642 }
643
644 blizzard_wait_line_buffer();
645 if (blizzard.last_color_mode == BLIZZARD_COLOR_YUV420)
646 blizzard_wait_yyc();
647 blizzard.last_color_mode = color_mode;
648 if (flags & OMAPFB_FORMAT_FLAG_TEARSYNC)
649 enable_tearsync(gy1, w, h,
650 blizzard.screen_height,
651 h_out,
652 flags & OMAPFB_FORMAT_FLAG_FORCE_VSYNC);
653 else
654 disable_tearsync();
655
656 set_window_regs(gx1, gy1, gx2, gy2, gx1_out, gy1_out, gx2_out, gy2_out,
657 color_mode, zoom_off, flags);
658
659 blizzard.extif->set_bits_per_cycle(16);
660 /* set_window_regs has left the register index at the right
661 * place, so no need to set it here.
662 */
663 blizzard.extif->transfer_area(w, h, request_complete, req);
664
665 return REQ_PENDING;
666}
667
668static int send_frame_handler(struct blizzard_request *req)
669{
670 struct update_param *par = &req->par.update;
671 int plane = par->plane;
672
673#ifdef VERBOSE
674 dev_dbg(blizzard.fbdev->dev,
675 "send_frame: x %d y %d w %d h %d "
676 "x_out %d y_out %d w_out %d h_out %d "
677 "color_mode %04x flags %04x planes %01x\n",
678 par->x, par->y, par->width, par->height,
679 par->out_x, par->out_y, par->out_width, par->out_height,
680 par->color_mode, par->flags, blizzard.enabled_planes);
681#endif
682 if (par->flags & OMAPFB_FORMAT_FLAG_DISABLE_OVERLAY)
683 disable_overlay();
684
685 if ((blizzard.enabled_planes & blizzard.vid_nonstd_color) ||
686 (blizzard.enabled_planes & blizzard.vid_scaled))
687 return do_full_screen_update(req);
688
689 return do_partial_update(req, plane, par->x, par->y,
690 par->width, par->height,
691 par->out_x, par->out_y,
692 par->out_width, par->out_height,
693 par->color_mode, par->bpp);
694}
695
696static void send_frame_complete(void *data)
697{
698}
699
700#define ADD_PREQ(_x, _y, _w, _h, _x_out, _y_out, _w_out, _h_out) do { \
701 req = alloc_req(); \
702 req->handler = send_frame_handler; \
703 req->complete = send_frame_complete; \
704 req->par.update.plane = plane_idx; \
705 req->par.update.x = _x; \
706 req->par.update.y = _y; \
707 req->par.update.width = _w; \
708 req->par.update.height = _h; \
709 req->par.update.out_x = _x_out; \
710 req->par.update.out_y = _y_out; \
711 req->par.update.out_width = _w_out; \
712 req->par.update.out_height = _h_out; \
713 req->par.update.bpp = bpp; \
714 req->par.update.color_mode = color_mode;\
715 req->par.update.flags = flags; \
716 list_add_tail(&req->entry, req_head); \
717} while(0)
718
719static void create_req_list(int plane_idx,
720 struct omapfb_update_window *win,
721 struct list_head *req_head)
722{
723 struct blizzard_request *req;
724 int x = win->x;
725 int y = win->y;
726 int width = win->width;
727 int height = win->height;
728 int x_out = win->out_x;
729 int y_out = win->out_y;
730 int width_out = win->out_width;
731 int height_out = win->out_height;
732 int color_mode;
733 int bpp;
734 int flags;
735 unsigned int ystart = y;
736 unsigned int yspan = height;
737 unsigned int ystart_out = y_out;
738 unsigned int yspan_out = height_out;
739
740 flags = win->format & ~OMAPFB_FORMAT_MASK;
741 color_mode = win->format & OMAPFB_FORMAT_MASK;
742 switch (color_mode) {
743 case OMAPFB_COLOR_YUV420:
744 /* Embedded window with different color mode */
745 bpp = 12;
746 /* X, Y, height must be aligned at 2, width at 4 pixels */
747 x &= ~1;
748 y &= ~1;
749 height = yspan = height & ~1;
750 width = width & ~3;
751 break;
752 default:
753 /* Same as the plane color mode */
754 bpp = blizzard.plane[plane_idx].bpp;
755 break;
756 }
757 if (width * height * bpp / 8 > blizzard.max_transmit_size) {
758 yspan = blizzard.max_transmit_size / (width * bpp / 8);
759 yspan_out = yspan * height_out / height;
760 ADD_PREQ(x, ystart, width, yspan, x_out, ystart_out,
761 width_out, yspan_out);
762 ystart += yspan;
763 ystart_out += yspan_out;
764 yspan = height - yspan;
765 yspan_out = height_out - yspan_out;
766 flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
767 }
768
769 ADD_PREQ(x, ystart, width, yspan, x_out, ystart_out,
770 width_out, yspan_out);
771}
772
773static void auto_update_complete(void *data)
774{
775 if (!blizzard.stop_auto_update)
776 mod_timer(&blizzard.auto_update_timer,
777 jiffies + BLIZZARD_AUTO_UPDATE_TIME);
778}
779
780static void blizzard_update_window_auto(unsigned long arg)
781{
782 LIST_HEAD(req_list);
783 struct blizzard_request *last;
784 struct omapfb_plane_struct *plane;
785
786 plane = blizzard.fbdev->fb_info[0]->par;
787 create_req_list(plane->idx,
788 &blizzard.auto_update_window, &req_list);
789 last = list_entry(req_list.prev, struct blizzard_request, entry);
790
791 last->complete = auto_update_complete;
792 last->complete_data = NULL;
793
794 submit_req_list(&req_list);
795}
796
797int blizzard_update_window_async(struct fb_info *fbi,
798 struct omapfb_update_window *win,
799 void (*complete_callback)(void *arg),
800 void *complete_callback_data)
801{
802 LIST_HEAD(req_list);
803 struct blizzard_request *last;
804 struct omapfb_plane_struct *plane = fbi->par;
805
806 if (unlikely(blizzard.update_mode != OMAPFB_MANUAL_UPDATE))
807 return -EINVAL;
808 if (unlikely(!blizzard.te_connected &&
809 (win->format & OMAPFB_FORMAT_FLAG_TEARSYNC)))
810 return -EINVAL;
811
812 create_req_list(plane->idx, win, &req_list);
813 last = list_entry(req_list.prev, struct blizzard_request, entry);
814
815 last->complete = complete_callback;
816 last->complete_data = (void *)complete_callback_data;
817
818 submit_req_list(&req_list);
819
820 return 0;
821}
822EXPORT_SYMBOL(blizzard_update_window_async);
823
824static int update_full_screen(void)
825{
826 return blizzard_update_window_async(blizzard.fbdev->fb_info[0],
827 &blizzard.auto_update_window, NULL, NULL);
828
829}
830
831static int blizzard_setup_plane(int plane, int channel_out,
832 unsigned long offset, int screen_width,
833 int pos_x, int pos_y, int width, int height,
834 int color_mode)
835{
836 struct plane_info *p;
837
838#ifdef VERBOSE
839 dev_dbg(blizzard.fbdev->dev,
840 "plane %d ch_out %d offset %#08lx scr_width %d "
841 "pos_x %d pos_y %d width %d height %d color_mode %d\n",
842 plane, channel_out, offset, screen_width,
843 pos_x, pos_y, width, height, color_mode);
844#endif
845 if ((unsigned)plane > OMAPFB_PLANE_NUM)
846 return -EINVAL;
847 p = &blizzard.plane[plane];
848
849 switch (color_mode) {
850 case OMAPFB_COLOR_YUV422:
851 case OMAPFB_COLOR_YUY422:
852 p->bpp = 16;
853 blizzard.vid_nonstd_color &= ~(1 << plane);
854 break;
855 case OMAPFB_COLOR_YUV420:
856 p->bpp = 12;
857 blizzard.vid_nonstd_color |= 1 << plane;
858 break;
859 case OMAPFB_COLOR_RGB565:
860 p->bpp = 16;
861 blizzard.vid_nonstd_color &= ~(1 << plane);
862 break;
863 default:
864 return -EINVAL;
865 }
866
867 p->offset = offset;
868 p->pos_x = pos_x;
869 p->pos_y = pos_y;
870 p->width = width;
871 p->height = height;
872 p->scr_width = screen_width;
873 if (!p->out_width)
874 p->out_width = width;
875 if (!p->out_height)
876 p->out_height = height;
877
878 p->color_mode = color_mode;
879
880 return 0;
881}
882
883static int blizzard_set_scale(int plane, int orig_w, int orig_h,
884 int out_w, int out_h)
885{
886 struct plane_info *p = &blizzard.plane[plane];
887 int r;
888
889 dev_dbg(blizzard.fbdev->dev,
890 "plane %d orig_w %d orig_h %d out_w %d out_h %d\n",
891 plane, orig_w, orig_h, out_w, out_h);
892 if ((unsigned)plane > OMAPFB_PLANE_NUM)
893 return -ENODEV;
894
895 r = blizzard.int_ctrl->set_scale(plane, orig_w, orig_h, out_w, out_h);
896 if (r < 0)
897 return r;
898
899 p->width = orig_w;
900 p->height = orig_h;
901 p->out_width = out_w;
902 p->out_height = out_h;
903 if (orig_w == out_w && orig_h == out_h)
904 blizzard.vid_scaled &= ~(1 << plane);
905 else
906 blizzard.vid_scaled |= 1 << plane;
907
908 return 0;
909}
910
911static int blizzard_enable_plane(int plane, int enable)
912{
913 if (enable)
914 blizzard.enabled_planes |= 1 << plane;
915 else
916 blizzard.enabled_planes &= ~(1 << plane);
917
918 return 0;
919}
920
921static int sync_handler(struct blizzard_request *req)
922{
923 complete(req->par.sync);
924 return REQ_COMPLETE;
925}
926
927static void blizzard_sync(void)
928{
929 LIST_HEAD(req_list);
930 struct blizzard_request *req;
931 struct completion comp;
932
933 req = alloc_req();
934
935 req->handler = sync_handler;
936 req->complete = NULL;
937 init_completion(&comp);
938 req->par.sync = &comp;
939
940 list_add(&req->entry, &req_list);
941 submit_req_list(&req_list);
942
943 wait_for_completion(&comp);
944}
945
946
947static void blizzard_bind_client(struct omapfb_notifier_block *nb)
948{
949 if (blizzard.update_mode == OMAPFB_MANUAL_UPDATE) {
950 omapfb_notify_clients(blizzard.fbdev, OMAPFB_EVENT_READY);
951 }
952}
953
954static int blizzard_set_update_mode(enum omapfb_update_mode mode)
955{
956 if (unlikely(mode != OMAPFB_MANUAL_UPDATE &&
957 mode != OMAPFB_AUTO_UPDATE &&
958 mode != OMAPFB_UPDATE_DISABLED))
959 return -EINVAL;
960
961 if (mode == blizzard.update_mode)
962 return 0;
963
964 dev_info(blizzard.fbdev->dev, "s1d1374x: setting update mode to %s\n",
965 mode == OMAPFB_UPDATE_DISABLED ? "disabled" :
966 (mode == OMAPFB_AUTO_UPDATE ? "auto" : "manual"));
967
968 switch (blizzard.update_mode) {
969 case OMAPFB_MANUAL_UPDATE:
970 omapfb_notify_clients(blizzard.fbdev, OMAPFB_EVENT_DISABLED);
971 break;
972 case OMAPFB_AUTO_UPDATE:
973 blizzard.stop_auto_update = 1;
974 del_timer_sync(&blizzard.auto_update_timer);
975 break;
976 case OMAPFB_UPDATE_DISABLED:
977 break;
978 }
979
980 blizzard.update_mode = mode;
981 blizzard_sync();
982 blizzard.stop_auto_update = 0;
983
984 switch (mode) {
985 case OMAPFB_MANUAL_UPDATE:
986 omapfb_notify_clients(blizzard.fbdev, OMAPFB_EVENT_READY);
987 break;
988 case OMAPFB_AUTO_UPDATE:
989 blizzard_update_window_auto(0);
990 break;
991 case OMAPFB_UPDATE_DISABLED:
992 break;
993 }
994
995 return 0;
996}
997
998static enum omapfb_update_mode blizzard_get_update_mode(void)
999{
1000 return blizzard.update_mode;
1001}
1002
1003static inline void set_extif_timings(const struct extif_timings *t)
1004{
1005 blizzard.extif->set_timings(t);
1006}
1007
1008static inline unsigned long round_to_extif_ticks(unsigned long ps, int div)
1009{
1010 int bus_tick = blizzard.extif_clk_period * div;
1011 return (ps + bus_tick - 1) / bus_tick * bus_tick;
1012}
1013
1014static int calc_reg_timing(unsigned long sysclk, int div)
1015{
1016 struct extif_timings *t;
1017 unsigned long systim;
1018
1019 /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns,
1020 * AccessTime 2 ns + 12.2 ns (regs),
1021 * WEOffTime = WEOnTime + 1 ns,
1022 * REOffTime = REOnTime + 12 ns (regs),
1023 * CSOffTime = REOffTime + 1 ns
1024 * ReadCycle = 2ns + 2*SYSCLK (regs),
1025 * WriteCycle = 2*SYSCLK + 2 ns,
1026 * CSPulseWidth = 10 ns */
1027
1028 systim = 1000000000 / (sysclk / 1000);
1029 dev_dbg(blizzard.fbdev->dev,
1030 "Blizzard systim %lu ps extif_clk_period %u div %d\n",
1031 systim, blizzard.extif_clk_period, div);
1032
1033 t = &blizzard.reg_timings;
1034 memset(t, 0, sizeof(*t));
1035
1036 t->clk_div = div;
1037
1038 t->cs_on_time = 0;
1039 t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
1040 t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
1041 t->access_time = round_to_extif_ticks(t->re_on_time + 12200, div);
1042 t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div);
1043 t->re_off_time = round_to_extif_ticks(t->re_on_time + 13000, div);
1044 t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div);
1045 t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
1046 if (t->we_cycle_time < t->we_off_time)
1047 t->we_cycle_time = t->we_off_time;
1048 t->re_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
1049 if (t->re_cycle_time < t->re_off_time)
1050 t->re_cycle_time = t->re_off_time;
1051 t->cs_pulse_width = 0;
1052
1053 dev_dbg(blizzard.fbdev->dev, "[reg]cson %d csoff %d reon %d reoff %d\n",
1054 t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
1055 dev_dbg(blizzard.fbdev->dev, "[reg]weon %d weoff %d recyc %d wecyc %d\n",
1056 t->we_on_time, t->we_off_time, t->re_cycle_time,
1057 t->we_cycle_time);
1058 dev_dbg(blizzard.fbdev->dev, "[reg]rdaccess %d cspulse %d\n",
1059 t->access_time, t->cs_pulse_width);
1060
1061 return blizzard.extif->convert_timings(t);
1062}
1063
1064static int calc_lut_timing(unsigned long sysclk, int div)
1065{
1066 struct extif_timings *t;
1067 unsigned long systim;
1068
1069 /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns,
1070 * AccessTime 2 ns + 4 * SYSCLK + 26 (lut),
1071 * WEOffTime = WEOnTime + 1 ns,
1072 * REOffTime = REOnTime + 4*SYSCLK + 26 ns (lut),
1073 * CSOffTime = REOffTime + 1 ns
1074 * ReadCycle = 2ns + 4*SYSCLK + 26 ns (lut),
1075 * WriteCycle = 2*SYSCLK + 2 ns,
1076 * CSPulseWidth = 10 ns */
1077
1078 systim = 1000000000 / (sysclk / 1000);
1079 dev_dbg(blizzard.fbdev->dev,
1080 "Blizzard systim %lu ps extif_clk_period %u div %d\n",
1081 systim, blizzard.extif_clk_period, div);
1082
1083 t = &blizzard.lut_timings;
1084 memset(t, 0, sizeof(*t));
1085
1086 t->clk_div = div;
1087
1088 t->cs_on_time = 0;
1089 t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
1090 t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
1091 t->access_time = round_to_extif_ticks(t->re_on_time + 4 * systim +
1092 26000, div);
1093 t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div);
1094 t->re_off_time = round_to_extif_ticks(t->re_on_time + 4 * systim +
1095 26000, div);
1096 t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div);
1097 t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
1098 if (t->we_cycle_time < t->we_off_time)
1099 t->we_cycle_time = t->we_off_time;
1100 t->re_cycle_time = round_to_extif_ticks(2000 + 4 * systim + 26000, div);
1101 if (t->re_cycle_time < t->re_off_time)
1102 t->re_cycle_time = t->re_off_time;
1103 t->cs_pulse_width = 0;
1104
1105 dev_dbg(blizzard.fbdev->dev,
1106 "[lut]cson %d csoff %d reon %d reoff %d\n",
1107 t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
1108 dev_dbg(blizzard.fbdev->dev,
1109 "[lut]weon %d weoff %d recyc %d wecyc %d\n",
1110 t->we_on_time, t->we_off_time, t->re_cycle_time,
1111 t->we_cycle_time);
1112 dev_dbg(blizzard.fbdev->dev, "[lut]rdaccess %d cspulse %d\n",
1113 t->access_time, t->cs_pulse_width);
1114
1115 return blizzard.extif->convert_timings(t);
1116}
1117
1118static int calc_extif_timings(unsigned long sysclk, int *extif_mem_div)
1119{
1120 int max_clk_div;
1121 int div;
1122
1123 blizzard.extif->get_clk_info(&blizzard.extif_clk_period, &max_clk_div);
1124 for (div = 1; div <= max_clk_div; div++) {
1125 if (calc_reg_timing(sysclk, div) == 0)
1126 break;
1127 }
1128 if (div > max_clk_div) {
1129 dev_dbg(blizzard.fbdev->dev, "reg timing failed\n");
1130 goto err;
1131 }
1132 *extif_mem_div = div;
1133
1134 for (div = 1; div <= max_clk_div; div++) {
1135 if (calc_lut_timing(sysclk, div) == 0)
1136 break;
1137 }
1138
1139 if (div > max_clk_div)
1140 goto err;
1141
1142 blizzard.extif_clk_div = div;
1143
1144 return 0;
1145err:
1146 dev_err(blizzard.fbdev->dev, "can't setup timings\n");
1147 return -1;
1148}
1149
1150static void calc_blizzard_clk_rates(unsigned long ext_clk,
1151 unsigned long *sys_clk, unsigned long *pix_clk)
1152{
1153 int pix_clk_src;
1154 int sys_div = 0, sys_mul = 0;
1155 int pix_div;
1156
1157 pix_clk_src = blizzard_read_reg(BLIZZARD_CLK_SRC);
1158 pix_div = ((pix_clk_src >> 3) & 0x1f) + 1;
1159 if ((pix_clk_src & (0x3 << 1)) == 0) {
1160 /* Source is the PLL */
1161 sys_div = (blizzard_read_reg(BLIZZARD_PLL_DIV) & 0x3f) + 1;
1162 sys_mul = blizzard_read_reg(BLIZZARD_PLL_CLOCK_SYNTH_0);
1163 sys_mul |= ((blizzard_read_reg(BLIZZARD_PLL_CLOCK_SYNTH_1)
1164 & 0x0f) << 11);
1165 *sys_clk = ext_clk * sys_mul / sys_div;
1166 } else /* else source is ext clk, or oscillator */
1167 *sys_clk = ext_clk;
1168
1169 *pix_clk = *sys_clk / pix_div; /* HZ */
1170 dev_dbg(blizzard.fbdev->dev,
1171 "ext_clk %ld pix_src %d pix_div %d sys_div %d sys_mul %d\n",
1172 ext_clk, pix_clk_src & (0x3 << 1), pix_div, sys_div, sys_mul);
1173 dev_dbg(blizzard.fbdev->dev, "sys_clk %ld pix_clk %ld\n",
1174 *sys_clk, *pix_clk);
1175}
1176
1177static int setup_tearsync(unsigned long pix_clk, int extif_div)
1178{
1179 int hdisp, vdisp;
1180 int hndp, vndp;
1181 int hsw, vsw;
1182 int hs, vs;
1183 int hs_pol_inv, vs_pol_inv;
1184 int use_hsvs, use_ndp;
1185 u8 b;
1186
1187 hsw = blizzard_read_reg(BLIZZARD_HSW);
1188 vsw = blizzard_read_reg(BLIZZARD_VSW);
1189 hs_pol_inv = !(hsw & 0x80);
1190 vs_pol_inv = !(vsw & 0x80);
1191 hsw = hsw & 0x7f;
1192 vsw = vsw & 0x3f;
1193
1194 hdisp = blizzard_read_reg(BLIZZARD_HDISP) * 8;
1195 vdisp = blizzard_read_reg(BLIZZARD_VDISP0) +
1196 ((blizzard_read_reg(BLIZZARD_VDISP1) & 0x3) << 8);
1197
1198 hndp = blizzard_read_reg(BLIZZARD_HNDP) & 0x3f;
1199 vndp = blizzard_read_reg(BLIZZARD_VNDP);
1200
1201 /* time to transfer one pixel (16bpp) in ps */
1202 blizzard.pix_tx_time = blizzard.reg_timings.we_cycle_time;
1203 if (blizzard.extif->get_max_tx_rate != NULL) {
1204 /* The external interface might have a rate limitation,
1205 * if so, we have to maximize our transfer rate.
1206 */
1207 unsigned long min_tx_time;
1208 unsigned long max_tx_rate = blizzard.extif->get_max_tx_rate();
1209
1210 dev_dbg(blizzard.fbdev->dev, "max_tx_rate %ld HZ\n",
1211 max_tx_rate);
1212 min_tx_time = 1000000000 / (max_tx_rate / 1000); /* ps */
1213 if (blizzard.pix_tx_time < min_tx_time)
1214 blizzard.pix_tx_time = min_tx_time;
1215 }
1216
1217 /* time to update one line in ps */
1218 blizzard.line_upd_time = (hdisp + hndp) * 1000000 / (pix_clk / 1000);
1219 blizzard.line_upd_time *= 1000;
1220 if (hdisp * blizzard.pix_tx_time > blizzard.line_upd_time)
1221 /* transfer speed too low, we might have to use both
1222 * HS and VS */
1223 use_hsvs = 1;
1224 else
1225 /* decent transfer speed, we'll always use only VS */
1226 use_hsvs = 0;
1227
1228 if (use_hsvs && (hs_pol_inv || vs_pol_inv)) {
1229 /* HS or'ed with VS doesn't work, use the active high
1230 * TE signal based on HNDP / VNDP */
1231 use_ndp = 1;
1232 hs_pol_inv = 0;
1233 vs_pol_inv = 0;
1234 hs = hndp;
1235 vs = vndp;
1236 } else {
1237 /* Use HS or'ed with VS as a TE signal if both are needed
1238 * or VNDP if only vsync is needed. */
1239 use_ndp = 0;
1240 hs = hsw;
1241 vs = vsw;
1242 if (!use_hsvs) {
1243 hs_pol_inv = 0;
1244 vs_pol_inv = 0;
1245 }
1246 }
1247
1248 hs = hs * 1000000 / (pix_clk / 1000); /* ps */
1249 hs *= 1000;
1250
1251 vs = vs * (hdisp + hndp) * 1000000 / (pix_clk / 1000); /* ps */
1252 vs *= 1000;
1253
1254 if (vs <= hs)
1255 return -EDOM;
1256 /* set VS to 120% of HS to minimize VS detection time */
1257 vs = hs * 12 / 10;
1258 /* minimize HS too */
1259 if (hs > 10000)
1260 hs = 10000;
1261
1262 b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS);
1263 b &= ~0x3;
1264 b |= use_hsvs ? 1 : 0;
1265 b |= (use_ndp && use_hsvs) ? 0 : 2;
1266 blizzard_write_reg(BLIZZARD_NDISP_CTRL_STATUS, b);
1267
1268 blizzard.vsync_only = !use_hsvs;
1269
1270 dev_dbg(blizzard.fbdev->dev,
1271 "pix_clk %ld HZ pix_tx_time %ld ps line_upd_time %ld ps\n",
1272 pix_clk, blizzard.pix_tx_time, blizzard.line_upd_time);
1273 dev_dbg(blizzard.fbdev->dev,
1274 "hs %d ps vs %d ps mode %d vsync_only %d\n",
1275 hs, vs, b & 0x3, !use_hsvs);
1276
1277 return blizzard.extif->setup_tearsync(1, hs, vs,
1278 hs_pol_inv, vs_pol_inv,
1279 extif_div);
1280}
1281
1282static void blizzard_get_caps(int plane, struct omapfb_caps *caps)
1283{
1284 blizzard.int_ctrl->get_caps(plane, caps);
1285 caps->ctrl |= OMAPFB_CAPS_MANUAL_UPDATE |
1286 OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE |
1287 OMAPFB_CAPS_WINDOW_SCALE |
1288 OMAPFB_CAPS_WINDOW_OVERLAY;
1289 if (blizzard.te_connected)
1290 caps->ctrl |= OMAPFB_CAPS_TEARSYNC;
1291 caps->wnd_color |= (1 << OMAPFB_COLOR_RGB565) |
1292 (1 << OMAPFB_COLOR_YUV420);
1293}
1294
1295static void _save_regs(struct blizzard_reg_list *list, int cnt)
1296{
1297 int i;
1298
1299 for (i = 0; i < cnt; i++, list++) {
1300 int reg;
1301 for (reg = list->start; reg <= list->end; reg += 2)
1302 blizzard_reg_cache[reg / 2] = blizzard_read_reg(reg);
1303 }
1304}
1305
1306static void _restore_regs(struct blizzard_reg_list *list, int cnt)
1307{
1308 int i;
1309
1310 for (i = 0; i < cnt; i++, list++) {
1311 int reg;
1312 for (reg = list->start; reg <= list->end; reg += 2)
1313 blizzard_write_reg(reg, blizzard_reg_cache[reg / 2]);
1314 }
1315}
1316
1317static void blizzard_save_all_regs(void)
1318{
1319 _save_regs(blizzard_pll_regs, ARRAY_SIZE(blizzard_pll_regs));
1320 _save_regs(blizzard_gen_regs, ARRAY_SIZE(blizzard_gen_regs));
1321}
1322
1323static void blizzard_restore_pll_regs(void)
1324{
1325 _restore_regs(blizzard_pll_regs, ARRAY_SIZE(blizzard_pll_regs));
1326}
1327
1328static void blizzard_restore_gen_regs(void)
1329{
1330 _restore_regs(blizzard_gen_regs, ARRAY_SIZE(blizzard_gen_regs));
1331}
1332
1333static void blizzard_suspend(void)
1334{
1335 u32 l;
1336 unsigned long tmo;
1337
1338 if (blizzard.last_color_mode) {
1339 update_full_screen();
1340 blizzard_sync();
1341 }
1342 blizzard.update_mode_before_suspend = blizzard.update_mode;
1343 /* the following will disable clocks as well */
1344 blizzard_set_update_mode(OMAPFB_UPDATE_DISABLED);
1345
1346 blizzard_save_all_regs();
1347
1348 blizzard_stop_sdram();
1349
1350 l = blizzard_read_reg(BLIZZARD_POWER_SAVE);
1351 /* Standby, Sleep. We assume we use an external clock. */
1352 l |= 0x03;
1353 blizzard_write_reg(BLIZZARD_POWER_SAVE, l);
1354
1355 tmo = jiffies + msecs_to_jiffies(100);
1356 while (!(blizzard_read_reg(BLIZZARD_PLL_MODE) & (1 << 1))) {
1357 if (time_after(jiffies, tmo)) {
1358 dev_err(blizzard.fbdev->dev,
1359 "s1d1374x: sleep timeout, stopping PLL manually\n");
1360 l = blizzard_read_reg(BLIZZARD_PLL_MODE);
1361 l &= ~0x03;
1362 /* Disable PLL, counter function */
1363 l |= 0x2;
1364 blizzard_write_reg(BLIZZARD_PLL_MODE, l);
1365 break;
1366 }
1367 msleep(1);
1368 }
1369
1370 if (blizzard.power_down != NULL)
1371 blizzard.power_down(blizzard.fbdev->dev);
1372}
1373
1374static void blizzard_resume(void)
1375{
1376 u32 l;
1377
1378 if (blizzard.power_up != NULL)
1379 blizzard.power_up(blizzard.fbdev->dev);
1380
1381 l = blizzard_read_reg(BLIZZARD_POWER_SAVE);
1382 /* Standby, Sleep */
1383 l &= ~0x03;
1384 blizzard_write_reg(BLIZZARD_POWER_SAVE, l);
1385
1386 blizzard_restore_pll_regs();
1387 l = blizzard_read_reg(BLIZZARD_PLL_MODE);
1388 l &= ~0x03;
1389 /* Enable PLL, counter function */
1390 l |= 0x1;
1391 blizzard_write_reg(BLIZZARD_PLL_MODE, l);
1392
1393 while (!(blizzard_read_reg(BLIZZARD_PLL_DIV) & (1 << 7)))
1394 msleep(1);
1395
1396 blizzard_restart_sdram();
1397
1398 blizzard_restore_gen_regs();
1399
1400 /* Enable display */
1401 blizzard_write_reg(BLIZZARD_DISPLAY_MODE, 0x01);
1402
1403 /* the following will enable clocks as necessary */
1404 blizzard_set_update_mode(blizzard.update_mode_before_suspend);
1405
1406 /* Force a background update */
1407 blizzard.zoom_on = 1;
1408 update_full_screen();
1409 blizzard_sync();
1410}
1411
1412static int blizzard_init(struct omapfb_device *fbdev, int ext_mode,
1413 struct omapfb_mem_desc *req_vram)
1414{
1415 int r = 0, i;
1416 u8 rev, conf;
1417 unsigned long ext_clk;
1418 int extif_div;
1419 unsigned long sys_clk, pix_clk;
1420 struct omapfb_platform_data *omapfb_conf;
1421 struct blizzard_platform_data *ctrl_conf;
1422
1423 blizzard.fbdev = fbdev;
1424
1425 BUG_ON(!fbdev->ext_if || !fbdev->int_ctrl);
1426
1427 blizzard.fbdev = fbdev;
1428 blizzard.extif = fbdev->ext_if;
1429 blizzard.int_ctrl = fbdev->int_ctrl;
1430
1431 omapfb_conf = fbdev->dev->platform_data;
1432 ctrl_conf = omapfb_conf->ctrl_platform_data;
1433 if (ctrl_conf == NULL || ctrl_conf->get_clock_rate == NULL) {
1434 dev_err(fbdev->dev, "s1d1374x: missing platform data\n");
1435 r = -ENOENT;
1436 goto err1;
1437 }
1438
1439 blizzard.power_down = ctrl_conf->power_down;
1440 blizzard.power_up = ctrl_conf->power_up;
1441
1442 spin_lock_init(&blizzard.req_lock);
1443
1444 if ((r = blizzard.int_ctrl->init(fbdev, 1, req_vram)) < 0)
1445 goto err1;
1446
1447 if ((r = blizzard.extif->init(fbdev)) < 0)
1448 goto err2;
1449
1450 blizzard_ctrl.set_color_key = blizzard.int_ctrl->set_color_key;
1451 blizzard_ctrl.get_color_key = blizzard.int_ctrl->get_color_key;
1452 blizzard_ctrl.setup_mem = blizzard.int_ctrl->setup_mem;
1453 blizzard_ctrl.mmap = blizzard.int_ctrl->mmap;
1454
1455 ext_clk = ctrl_conf->get_clock_rate(fbdev->dev);
1456 if ((r = calc_extif_timings(ext_clk, &extif_div)) < 0)
1457 goto err3;
1458
1459 set_extif_timings(&blizzard.reg_timings);
1460
1461 if (blizzard.power_up != NULL)
1462 blizzard.power_up(fbdev->dev);
1463
1464 calc_blizzard_clk_rates(ext_clk, &sys_clk, &pix_clk);
1465
1466 if ((r = calc_extif_timings(sys_clk, &extif_div)) < 0)
1467 goto err3;
1468 set_extif_timings(&blizzard.reg_timings);
1469
1470 if (!(blizzard_read_reg(BLIZZARD_PLL_DIV) & 0x80)) {
1471 dev_err(fbdev->dev,
1472 "controller not initialized by the bootloader\n");
1473 r = -ENODEV;
1474 goto err3;
1475 }
1476
1477 if (ctrl_conf->te_connected) {
1478 if ((r = setup_tearsync(pix_clk, extif_div)) < 0)
1479 goto err3;
1480 blizzard.te_connected = 1;
1481 }
1482
1483 rev = blizzard_read_reg(BLIZZARD_REV_CODE);
1484 conf = blizzard_read_reg(BLIZZARD_CONFIG);
1485
1486 switch (rev & 0xfc) {
1487 case 0x9c:
1488 blizzard.version = BLIZZARD_VERSION_S1D13744;
1489 pr_info("omapfb: s1d13744 LCD controller rev %d "
1490 "initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07);
1491 break;
1492 case 0xa4:
1493 blizzard.version = BLIZZARD_VERSION_S1D13745;
1494 pr_info("omapfb: s1d13745 LCD controller rev %d "
1495 "initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07);
1496 break;
1497 default:
1498 dev_err(fbdev->dev, "invalid s1d1374x revision %02x\n",
1499 rev);
1500 r = -ENODEV;
1501 goto err3;
1502 }
1503
1504 blizzard.max_transmit_size = blizzard.extif->max_transmit_size;
1505
1506 blizzard.update_mode = OMAPFB_UPDATE_DISABLED;
1507
1508 blizzard.auto_update_window.x = 0;
1509 blizzard.auto_update_window.y = 0;
1510 blizzard.auto_update_window.width = fbdev->panel->x_res;
1511 blizzard.auto_update_window.height = fbdev->panel->y_res;
1512 blizzard.auto_update_window.out_x = 0;
1513 blizzard.auto_update_window.out_x = 0;
1514 blizzard.auto_update_window.out_width = fbdev->panel->x_res;
1515 blizzard.auto_update_window.out_height = fbdev->panel->y_res;
1516 blizzard.auto_update_window.format = 0;
1517
1518 blizzard.screen_width = fbdev->panel->x_res;
1519 blizzard.screen_height = fbdev->panel->y_res;
1520
1521 init_timer(&blizzard.auto_update_timer);
1522 blizzard.auto_update_timer.function = blizzard_update_window_auto;
1523 blizzard.auto_update_timer.data = 0;
1524
1525 INIT_LIST_HEAD(&blizzard.free_req_list);
1526 INIT_LIST_HEAD(&blizzard.pending_req_list);
1527 for (i = 0; i < ARRAY_SIZE(blizzard.req_pool); i++)
1528 list_add(&blizzard.req_pool[i].entry, &blizzard.free_req_list);
1529 BUG_ON(i <= IRQ_REQ_POOL_SIZE);
1530 sema_init(&blizzard.req_sema, i - IRQ_REQ_POOL_SIZE);
1531
1532 return 0;
1533err3:
1534 if (blizzard.power_down != NULL)
1535 blizzard.power_down(fbdev->dev);
1536 blizzard.extif->cleanup();
1537err2:
1538 blizzard.int_ctrl->cleanup();
1539err1:
1540 return r;
1541}
1542
1543static void blizzard_cleanup(void)
1544{
1545 blizzard_set_update_mode(OMAPFB_UPDATE_DISABLED);
1546 blizzard.extif->cleanup();
1547 blizzard.int_ctrl->cleanup();
1548 if (blizzard.power_down != NULL)
1549 blizzard.power_down(blizzard.fbdev->dev);
1550}
1551
1552struct lcd_ctrl blizzard_ctrl = {
1553 .name = "blizzard",
1554 .init = blizzard_init,
1555 .cleanup = blizzard_cleanup,
1556 .bind_client = blizzard_bind_client,
1557 .get_caps = blizzard_get_caps,
1558 .set_update_mode = blizzard_set_update_mode,
1559 .get_update_mode = blizzard_get_update_mode,
1560 .setup_plane = blizzard_setup_plane,
1561 .set_scale = blizzard_set_scale,
1562 .enable_plane = blizzard_enable_plane,
1563 .update_window = blizzard_update_window_async,
1564 .sync = blizzard_sync,
1565 .suspend = blizzard_suspend,
1566 .resume = blizzard_resume,
1567};
1568
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
new file mode 100644
index 000000000000..f4c23434de6f
--- /dev/null
+++ b/drivers/video/omap/dispc.c
@@ -0,0 +1,1502 @@
1/*
2 * OMAP2 display controller support
3 *
4 * Copyright (C) 2005 Nokia Corporation
5 * Author: Imre Deak <imre.deak@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21#include <linux/kernel.h>
22#include <linux/dma-mapping.h>
23#include <linux/vmalloc.h>
24#include <linux/clk.h>
25#include <linux/io.h>
26
27#include <asm/arch/sram.h>
28#include <asm/arch/omapfb.h>
29#include <asm/arch/board.h>
30
31#include "dispc.h"
32
33#define MODULE_NAME "dispc"
34
35#define DSS_BASE 0x48050000
36#define DSS_SYSCONFIG 0x0010
37
38#define DISPC_BASE 0x48050400
39
40/* DISPC common */
41#define DISPC_REVISION 0x0000
42#define DISPC_SYSCONFIG 0x0010
43#define DISPC_SYSSTATUS 0x0014
44#define DISPC_IRQSTATUS 0x0018
45#define DISPC_IRQENABLE 0x001C
46#define DISPC_CONTROL 0x0040
47#define DISPC_CONFIG 0x0044
48#define DISPC_CAPABLE 0x0048
49#define DISPC_DEFAULT_COLOR0 0x004C
50#define DISPC_DEFAULT_COLOR1 0x0050
51#define DISPC_TRANS_COLOR0 0x0054
52#define DISPC_TRANS_COLOR1 0x0058
53#define DISPC_LINE_STATUS 0x005C
54#define DISPC_LINE_NUMBER 0x0060
55#define DISPC_TIMING_H 0x0064
56#define DISPC_TIMING_V 0x0068
57#define DISPC_POL_FREQ 0x006C
58#define DISPC_DIVISOR 0x0070
59#define DISPC_SIZE_DIG 0x0078
60#define DISPC_SIZE_LCD 0x007C
61
62#define DISPC_DATA_CYCLE1 0x01D4
63#define DISPC_DATA_CYCLE2 0x01D8
64#define DISPC_DATA_CYCLE3 0x01DC
65
66/* DISPC GFX plane */
67#define DISPC_GFX_BA0 0x0080
68#define DISPC_GFX_BA1 0x0084
69#define DISPC_GFX_POSITION 0x0088
70#define DISPC_GFX_SIZE 0x008C
71#define DISPC_GFX_ATTRIBUTES 0x00A0
72#define DISPC_GFX_FIFO_THRESHOLD 0x00A4
73#define DISPC_GFX_FIFO_SIZE_STATUS 0x00A8
74#define DISPC_GFX_ROW_INC 0x00AC
75#define DISPC_GFX_PIXEL_INC 0x00B0
76#define DISPC_GFX_WINDOW_SKIP 0x00B4
77#define DISPC_GFX_TABLE_BA 0x00B8
78
79/* DISPC Video plane 1/2 */
80#define DISPC_VID1_BASE 0x00BC
81#define DISPC_VID2_BASE 0x014C
82
83/* Offsets into DISPC_VID1/2_BASE */
84#define DISPC_VID_BA0 0x0000
85#define DISPC_VID_BA1 0x0004
86#define DISPC_VID_POSITION 0x0008
87#define DISPC_VID_SIZE 0x000C
88#define DISPC_VID_ATTRIBUTES 0x0010
89#define DISPC_VID_FIFO_THRESHOLD 0x0014
90#define DISPC_VID_FIFO_SIZE_STATUS 0x0018
91#define DISPC_VID_ROW_INC 0x001C
92#define DISPC_VID_PIXEL_INC 0x0020
93#define DISPC_VID_FIR 0x0024
94#define DISPC_VID_PICTURE_SIZE 0x0028
95#define DISPC_VID_ACCU0 0x002C
96#define DISPC_VID_ACCU1 0x0030
97
98/* 8 elements in 8 byte increments */
99#define DISPC_VID_FIR_COEF_H0 0x0034
100/* 8 elements in 8 byte increments */
101#define DISPC_VID_FIR_COEF_HV0 0x0038
102/* 5 elements in 4 byte increments */
103#define DISPC_VID_CONV_COEF0 0x0074
104
105#define DISPC_IRQ_FRAMEMASK 0x0001
106#define DISPC_IRQ_VSYNC 0x0002
107#define DISPC_IRQ_EVSYNC_EVEN 0x0004
108#define DISPC_IRQ_EVSYNC_ODD 0x0008
109#define DISPC_IRQ_ACBIAS_COUNT_STAT 0x0010
110#define DISPC_IRQ_PROG_LINE_NUM 0x0020
111#define DISPC_IRQ_GFX_FIFO_UNDERFLOW 0x0040
112#define DISPC_IRQ_GFX_END_WIN 0x0080
113#define DISPC_IRQ_PAL_GAMMA_MASK 0x0100
114#define DISPC_IRQ_OCP_ERR 0x0200
115#define DISPC_IRQ_VID1_FIFO_UNDERFLOW 0x0400
116#define DISPC_IRQ_VID1_END_WIN 0x0800
117#define DISPC_IRQ_VID2_FIFO_UNDERFLOW 0x1000
118#define DISPC_IRQ_VID2_END_WIN 0x2000
119#define DISPC_IRQ_SYNC_LOST 0x4000
120
121#define DISPC_IRQ_MASK_ALL 0x7fff
122
123#define DISPC_IRQ_MASK_ERROR (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \
124 DISPC_IRQ_VID1_FIFO_UNDERFLOW | \
125 DISPC_IRQ_VID2_FIFO_UNDERFLOW | \
126 DISPC_IRQ_SYNC_LOST)
127
128#define RFBI_CONTROL 0x48050040
129
130#define MAX_PALETTE_SIZE (256 * 16)
131
132#define FLD_MASK(pos, len) (((1 << len) - 1) << pos)
133
134#define MOD_REG_FLD(reg, mask, val) \
135 dispc_write_reg((reg), (dispc_read_reg(reg) & ~(mask)) | (val));
136
137#define OMAP2_SRAM_START 0x40200000
138/* Maximum size, in reality this is smaller if SRAM is partially locked. */
139#define OMAP2_SRAM_SIZE 0xa0000 /* 640k */
140
141/* We support the SDRAM / SRAM types. See OMAPFB_PLANE_MEMTYPE_* in omapfb.h */
142#define DISPC_MEMTYPE_NUM 2
143
144#define RESMAP_SIZE(_page_cnt) \
145 ((_page_cnt + (sizeof(unsigned long) * 8) - 1) / 8)
146#define RESMAP_PTR(_res_map, _page_nr) \
147 (((_res_map)->map) + (_page_nr) / (sizeof(unsigned long) * 8))
148#define RESMAP_MASK(_page_nr) \
149 (1 << ((_page_nr) & (sizeof(unsigned long) * 8 - 1)))
150
151struct resmap {
152 unsigned long start;
153 unsigned page_cnt;
154 unsigned long *map;
155};
156
157static struct {
158 u32 base;
159
160 struct omapfb_mem_desc mem_desc;
161 struct resmap *res_map[DISPC_MEMTYPE_NUM];
162 atomic_t map_count[OMAPFB_PLANE_NUM];
163
164 dma_addr_t palette_paddr;
165 void *palette_vaddr;
166
167 int ext_mode;
168
169 unsigned long enabled_irqs;
170 void (*irq_callback)(void *);
171 void *irq_callback_data;
172 struct completion frame_done;
173
174 int fir_hinc[OMAPFB_PLANE_NUM];
175 int fir_vinc[OMAPFB_PLANE_NUM];
176
177 struct clk *dss_ick, *dss1_fck;
178 struct clk *dss_54m_fck;
179
180 enum omapfb_update_mode update_mode;
181 struct omapfb_device *fbdev;
182
183 struct omapfb_color_key color_key;
184} dispc;
185
186static void enable_lcd_clocks(int enable);
187
188static void inline dispc_write_reg(int idx, u32 val)
189{
190 __raw_writel(val, dispc.base + idx);
191}
192
193static u32 inline dispc_read_reg(int idx)
194{
195 u32 l = __raw_readl(dispc.base + idx);
196 return l;
197}
198
199/* Select RFBI or bypass mode */
200static void enable_rfbi_mode(int enable)
201{
202 u32 l;
203
204 l = dispc_read_reg(DISPC_CONTROL);
205 /* Enable RFBI, GPIO0/1 */
206 l &= ~((1 << 11) | (1 << 15) | (1 << 16));
207 l |= enable ? (1 << 11) : 0;
208 /* RFBI En: GPIO0/1=10 RFBI Dis: GPIO0/1=11 */
209 l |= 1 << 15;
210 l |= enable ? 0 : (1 << 16);
211 dispc_write_reg(DISPC_CONTROL, l);
212
213 /* Set bypass mode in RFBI module */
214 l = __raw_readl(io_p2v(RFBI_CONTROL));
215 l |= enable ? 0 : (1 << 1);
216 __raw_writel(l, io_p2v(RFBI_CONTROL));
217}
218
219static void set_lcd_data_lines(int data_lines)
220{
221 u32 l;
222 int code = 0;
223
224 switch (data_lines) {
225 case 12:
226 code = 0;
227 break;
228 case 16:
229 code = 1;
230 break;
231 case 18:
232 code = 2;
233 break;
234 case 24:
235 code = 3;
236 break;
237 default:
238 BUG();
239 }
240
241 l = dispc_read_reg(DISPC_CONTROL);
242 l &= ~(0x03 << 8);
243 l |= code << 8;
244 dispc_write_reg(DISPC_CONTROL, l);
245}
246
247static void set_load_mode(int mode)
248{
249 BUG_ON(mode & ~(DISPC_LOAD_CLUT_ONLY | DISPC_LOAD_FRAME_ONLY |
250 DISPC_LOAD_CLUT_ONCE_FRAME));
251 MOD_REG_FLD(DISPC_CONFIG, 0x03 << 1, mode << 1);
252}
253
254void omap_dispc_set_lcd_size(int x, int y)
255{
256 BUG_ON((x > (1 << 11)) || (y > (1 << 11)));
257 enable_lcd_clocks(1);
258 MOD_REG_FLD(DISPC_SIZE_LCD, FLD_MASK(16, 11) | FLD_MASK(0, 11),
259 ((y - 1) << 16) | (x - 1));
260 enable_lcd_clocks(0);
261}
262EXPORT_SYMBOL(omap_dispc_set_lcd_size);
263
264void omap_dispc_set_digit_size(int x, int y)
265{
266 BUG_ON((x > (1 << 11)) || (y > (1 << 11)));
267 enable_lcd_clocks(1);
268 MOD_REG_FLD(DISPC_SIZE_DIG, FLD_MASK(16, 11) | FLD_MASK(0, 11),
269 ((y - 1) << 16) | (x - 1));
270 enable_lcd_clocks(0);
271}
272EXPORT_SYMBOL(omap_dispc_set_digit_size);
273
274static void setup_plane_fifo(int plane, int ext_mode)
275{
276 const u32 ftrs_reg[] = { DISPC_GFX_FIFO_THRESHOLD,
277 DISPC_VID1_BASE + DISPC_VID_FIFO_THRESHOLD,
278 DISPC_VID2_BASE + DISPC_VID_FIFO_THRESHOLD };
279 const u32 fsz_reg[] = { DISPC_GFX_FIFO_SIZE_STATUS,
280 DISPC_VID1_BASE + DISPC_VID_FIFO_SIZE_STATUS,
281 DISPC_VID2_BASE + DISPC_VID_FIFO_SIZE_STATUS };
282 int low, high;
283 u32 l;
284
285 BUG_ON(plane > 2);
286
287 l = dispc_read_reg(fsz_reg[plane]);
288 l &= FLD_MASK(0, 9);
289 if (ext_mode) {
290 low = l * 3 / 4;
291 high = l;
292 } else {
293 low = l / 4;
294 high = l * 3 / 4;
295 }
296 MOD_REG_FLD(ftrs_reg[plane], FLD_MASK(16, 9) | FLD_MASK(0, 9),
297 (high << 16) | low);
298}
299
300void omap_dispc_enable_lcd_out(int enable)
301{
302 enable_lcd_clocks(1);
303 MOD_REG_FLD(DISPC_CONTROL, 1, enable ? 1 : 0);
304 enable_lcd_clocks(0);
305}
306EXPORT_SYMBOL(omap_dispc_enable_lcd_out);
307
308void omap_dispc_enable_digit_out(int enable)
309{
310 enable_lcd_clocks(1);
311 MOD_REG_FLD(DISPC_CONTROL, 1 << 1, enable ? 1 << 1 : 0);
312 enable_lcd_clocks(0);
313}
314EXPORT_SYMBOL(omap_dispc_enable_digit_out);
315
316static inline int _setup_plane(int plane, int channel_out,
317 u32 paddr, int screen_width,
318 int pos_x, int pos_y, int width, int height,
319 int color_mode)
320{
321 const u32 at_reg[] = { DISPC_GFX_ATTRIBUTES,
322 DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES,
323 DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES };
324 const u32 ba_reg[] = { DISPC_GFX_BA0, DISPC_VID1_BASE + DISPC_VID_BA0,
325 DISPC_VID2_BASE + DISPC_VID_BA0 };
326 const u32 ps_reg[] = { DISPC_GFX_POSITION,
327 DISPC_VID1_BASE + DISPC_VID_POSITION,
328 DISPC_VID2_BASE + DISPC_VID_POSITION };
329 const u32 sz_reg[] = { DISPC_GFX_SIZE,
330 DISPC_VID1_BASE + DISPC_VID_PICTURE_SIZE,
331 DISPC_VID2_BASE + DISPC_VID_PICTURE_SIZE };
332 const u32 ri_reg[] = { DISPC_GFX_ROW_INC,
333 DISPC_VID1_BASE + DISPC_VID_ROW_INC,
334 DISPC_VID2_BASE + DISPC_VID_ROW_INC };
335 const u32 vs_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_SIZE,
336 DISPC_VID2_BASE + DISPC_VID_SIZE };
337
338 int chout_shift, burst_shift;
339 int chout_val;
340 int color_code;
341 int bpp;
342 int cconv_en;
343 int set_vsize;
344 u32 l;
345
346#ifdef VERBOSE
347 dev_dbg(dispc.fbdev->dev, "plane %d channel %d paddr %#08x scr_width %d"
348 " pos_x %d pos_y %d width %d height %d color_mode %d\n",
349 plane, channel_out, paddr, screen_width, pos_x, pos_y,
350 width, height, color_mode);
351#endif
352
353 set_vsize = 0;
354 switch (plane) {
355 case OMAPFB_PLANE_GFX:
356 burst_shift = 6;
357 chout_shift = 8;
358 break;
359 case OMAPFB_PLANE_VID1:
360 case OMAPFB_PLANE_VID2:
361 burst_shift = 14;
362 chout_shift = 16;
363 set_vsize = 1;
364 break;
365 default:
366 return -EINVAL;
367 }
368
369 switch (channel_out) {
370 case OMAPFB_CHANNEL_OUT_LCD:
371 chout_val = 0;
372 break;
373 case OMAPFB_CHANNEL_OUT_DIGIT:
374 chout_val = 1;
375 break;
376 default:
377 return -EINVAL;
378 }
379
380 cconv_en = 0;
381 switch (color_mode) {
382 case OMAPFB_COLOR_RGB565:
383 color_code = DISPC_RGB_16_BPP;
384 bpp = 16;
385 break;
386 case OMAPFB_COLOR_YUV422:
387 if (plane == 0)
388 return -EINVAL;
389 color_code = DISPC_UYVY_422;
390 cconv_en = 1;
391 bpp = 16;
392 break;
393 case OMAPFB_COLOR_YUY422:
394 if (plane == 0)
395 return -EINVAL;
396 color_code = DISPC_YUV2_422;
397 cconv_en = 1;
398 bpp = 16;
399 break;
400 default:
401 return -EINVAL;
402 }
403
404 l = dispc_read_reg(at_reg[plane]);
405
406 l &= ~(0x0f << 1);
407 l |= color_code << 1;
408 l &= ~(1 << 9);
409 l |= cconv_en << 9;
410
411 l &= ~(0x03 << burst_shift);
412 l |= DISPC_BURST_8x32 << burst_shift;
413
414 l &= ~(1 << chout_shift);
415 l |= chout_val << chout_shift;
416
417 dispc_write_reg(at_reg[plane], l);
418
419 dispc_write_reg(ba_reg[plane], paddr);
420 MOD_REG_FLD(ps_reg[plane],
421 FLD_MASK(16, 11) | FLD_MASK(0, 11), (pos_y << 16) | pos_x);
422
423 MOD_REG_FLD(sz_reg[plane], FLD_MASK(16, 11) | FLD_MASK(0, 11),
424 ((height - 1) << 16) | (width - 1));
425
426 if (set_vsize) {
427 /* Set video size if set_scale hasn't set it */
428 if (!dispc.fir_vinc[plane])
429 MOD_REG_FLD(vs_reg[plane],
430 FLD_MASK(16, 11), (height - 1) << 16);
431 if (!dispc.fir_hinc[plane])
432 MOD_REG_FLD(vs_reg[plane],
433 FLD_MASK(0, 11), width - 1);
434 }
435
436 dispc_write_reg(ri_reg[plane], (screen_width - width) * bpp / 8 + 1);
437
438 return height * screen_width * bpp / 8;
439}
440
441static int omap_dispc_setup_plane(int plane, int channel_out,
442 unsigned long offset,
443 int screen_width,
444 int pos_x, int pos_y, int width, int height,
445 int color_mode)
446{
447 u32 paddr;
448 int r;
449
450 if ((unsigned)plane > dispc.mem_desc.region_cnt)
451 return -EINVAL;
452 paddr = dispc.mem_desc.region[plane].paddr + offset;
453 enable_lcd_clocks(1);
454 r = _setup_plane(plane, channel_out, paddr,
455 screen_width,
456 pos_x, pos_y, width, height, color_mode);
457 enable_lcd_clocks(0);
458 return r;
459}
460
461static void write_firh_reg(int plane, int reg, u32 value)
462{
463 u32 base;
464
465 if (plane == 1)
466 base = DISPC_VID1_BASE + DISPC_VID_FIR_COEF_H0;
467 else
468 base = DISPC_VID2_BASE + DISPC_VID_FIR_COEF_H0;
469 dispc_write_reg(base + reg * 8, value);
470}
471
472static void write_firhv_reg(int plane, int reg, u32 value)
473{
474 u32 base;
475
476 if (plane == 1)
477 base = DISPC_VID1_BASE + DISPC_VID_FIR_COEF_HV0;
478 else
479 base = DISPC_VID2_BASE + DISPC_VID_FIR_COEF_HV0;
480 dispc_write_reg(base + reg * 8, value);
481}
482
483static void set_upsampling_coef_table(int plane)
484{
485 const u32 coef[][2] = {
486 { 0x00800000, 0x00800000 },
487 { 0x0D7CF800, 0x037B02FF },
488 { 0x1E70F5FF, 0x0C6F05FE },
489 { 0x335FF5FE, 0x205907FB },
490 { 0xF74949F7, 0x00404000 },
491 { 0xF55F33FB, 0x075920FE },
492 { 0xF5701EFE, 0x056F0CFF },
493 { 0xF87C0DFF, 0x027B0300 },
494 };
495 int i;
496
497 for (i = 0; i < 8; i++) {
498 write_firh_reg(plane, i, coef[i][0]);
499 write_firhv_reg(plane, i, coef[i][1]);
500 }
501}
502
503static int omap_dispc_set_scale(int plane,
504 int orig_width, int orig_height,
505 int out_width, int out_height)
506{
507 const u32 at_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES,
508 DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES };
509 const u32 vs_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_SIZE,
510 DISPC_VID2_BASE + DISPC_VID_SIZE };
511 const u32 fir_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_FIR,
512 DISPC_VID2_BASE + DISPC_VID_FIR };
513
514 u32 l;
515 int fir_hinc;
516 int fir_vinc;
517
518 if ((unsigned)plane > OMAPFB_PLANE_NUM)
519 return -ENODEV;
520
521 if (plane == OMAPFB_PLANE_GFX &&
522 (out_width != orig_width || out_height != orig_height))
523 return -EINVAL;
524
525 enable_lcd_clocks(1);
526 if (orig_width < out_width) {
527 /*
528 * Upsampling.
529 * Currently you can only scale both dimensions in one way.
530 */
531 if (orig_height > out_height ||
532 orig_width * 8 < out_width ||
533 orig_height * 8 < out_height) {
534 enable_lcd_clocks(0);
535 return -EINVAL;
536 }
537 set_upsampling_coef_table(plane);
538 } else if (orig_width > out_width) {
539 /* Downsampling not yet supported
540 */
541
542 enable_lcd_clocks(0);
543 return -EINVAL;
544 }
545 if (!orig_width || orig_width == out_width)
546 fir_hinc = 0;
547 else
548 fir_hinc = 1024 * orig_width / out_width;
549 if (!orig_height || orig_height == out_height)
550 fir_vinc = 0;
551 else
552 fir_vinc = 1024 * orig_height / out_height;
553 dispc.fir_hinc[plane] = fir_hinc;
554 dispc.fir_vinc[plane] = fir_vinc;
555
556 MOD_REG_FLD(fir_reg[plane],
557 FLD_MASK(16, 12) | FLD_MASK(0, 12),
558 ((fir_vinc & 4095) << 16) |
559 (fir_hinc & 4095));
560
561 dev_dbg(dispc.fbdev->dev, "out_width %d out_height %d orig_width %d "
562 "orig_height %d fir_hinc %d fir_vinc %d\n",
563 out_width, out_height, orig_width, orig_height,
564 fir_hinc, fir_vinc);
565
566 MOD_REG_FLD(vs_reg[plane],
567 FLD_MASK(16, 11) | FLD_MASK(0, 11),
568 ((out_height - 1) << 16) | (out_width - 1));
569
570 l = dispc_read_reg(at_reg[plane]);
571 l &= ~(0x03 << 5);
572 l |= fir_hinc ? (1 << 5) : 0;
573 l |= fir_vinc ? (1 << 6) : 0;
574 dispc_write_reg(at_reg[plane], l);
575
576 enable_lcd_clocks(0);
577 return 0;
578}
579
580static int omap_dispc_enable_plane(int plane, int enable)
581{
582 const u32 at_reg[] = { DISPC_GFX_ATTRIBUTES,
583 DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES,
584 DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES };
585 if ((unsigned int)plane > dispc.mem_desc.region_cnt)
586 return -EINVAL;
587
588 enable_lcd_clocks(1);
589 MOD_REG_FLD(at_reg[plane], 1, enable ? 1 : 0);
590 enable_lcd_clocks(0);
591
592 return 0;
593}
594
595static int omap_dispc_set_color_key(struct omapfb_color_key *ck)
596{
597 u32 df_reg, tr_reg;
598 int shift, val;
599
600 switch (ck->channel_out) {
601 case OMAPFB_CHANNEL_OUT_LCD:
602 df_reg = DISPC_DEFAULT_COLOR0;
603 tr_reg = DISPC_TRANS_COLOR0;
604 shift = 10;
605 break;
606 case OMAPFB_CHANNEL_OUT_DIGIT:
607 df_reg = DISPC_DEFAULT_COLOR1;
608 tr_reg = DISPC_TRANS_COLOR1;
609 shift = 12;
610 break;
611 default:
612 return -EINVAL;
613 }
614 switch (ck->key_type) {
615 case OMAPFB_COLOR_KEY_DISABLED:
616 val = 0;
617 break;
618 case OMAPFB_COLOR_KEY_GFX_DST:
619 val = 1;
620 break;
621 case OMAPFB_COLOR_KEY_VID_SRC:
622 val = 3;
623 break;
624 default:
625 return -EINVAL;
626 }
627 enable_lcd_clocks(1);
628 MOD_REG_FLD(DISPC_CONFIG, FLD_MASK(shift, 2), val << shift);
629
630 if (val != 0)
631 dispc_write_reg(tr_reg, ck->trans_key);
632 dispc_write_reg(df_reg, ck->background);
633 enable_lcd_clocks(0);
634
635 dispc.color_key = *ck;
636
637 return 0;
638}
639
640static int omap_dispc_get_color_key(struct omapfb_color_key *ck)
641{
642 *ck = dispc.color_key;
643 return 0;
644}
645
646static void load_palette(void)
647{
648}
649
650static int omap_dispc_set_update_mode(enum omapfb_update_mode mode)
651{
652 int r = 0;
653
654 if (mode != dispc.update_mode) {
655 switch (mode) {
656 case OMAPFB_AUTO_UPDATE:
657 case OMAPFB_MANUAL_UPDATE:
658 enable_lcd_clocks(1);
659 omap_dispc_enable_lcd_out(1);
660 dispc.update_mode = mode;
661 break;
662 case OMAPFB_UPDATE_DISABLED:
663 init_completion(&dispc.frame_done);
664 omap_dispc_enable_lcd_out(0);
665 if (!wait_for_completion_timeout(&dispc.frame_done,
666 msecs_to_jiffies(500))) {
667 dev_err(dispc.fbdev->dev,
668 "timeout waiting for FRAME DONE\n");
669 }
670 dispc.update_mode = mode;
671 enable_lcd_clocks(0);
672 break;
673 default:
674 r = -EINVAL;
675 }
676 }
677
678 return r;
679}
680
681static void omap_dispc_get_caps(int plane, struct omapfb_caps *caps)
682{
683 caps->ctrl |= OMAPFB_CAPS_PLANE_RELOCATE_MEM;
684 if (plane > 0)
685 caps->ctrl |= OMAPFB_CAPS_PLANE_SCALE;
686 caps->plane_color |= (1 << OMAPFB_COLOR_RGB565) |
687 (1 << OMAPFB_COLOR_YUV422) |
688 (1 << OMAPFB_COLOR_YUY422);
689 if (plane == 0)
690 caps->plane_color |= (1 << OMAPFB_COLOR_CLUT_8BPP) |
691 (1 << OMAPFB_COLOR_CLUT_4BPP) |
692 (1 << OMAPFB_COLOR_CLUT_2BPP) |
693 (1 << OMAPFB_COLOR_CLUT_1BPP) |
694 (1 << OMAPFB_COLOR_RGB444);
695}
696
697static enum omapfb_update_mode omap_dispc_get_update_mode(void)
698{
699 return dispc.update_mode;
700}
701
702static void setup_color_conv_coef(void)
703{
704 u32 mask = FLD_MASK(16, 11) | FLD_MASK(0, 11);
705 int cf1_reg = DISPC_VID1_BASE + DISPC_VID_CONV_COEF0;
706 int cf2_reg = DISPC_VID2_BASE + DISPC_VID_CONV_COEF0;
707 int at1_reg = DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES;
708 int at2_reg = DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES;
709 const struct color_conv_coef {
710 int ry, rcr, rcb, gy, gcr, gcb, by, bcr, bcb;
711 int full_range;
712 } ctbl_bt601_5 = {
713 298, 409, 0, 298, -208, -100, 298, 0, 517, 0,
714 };
715 const struct color_conv_coef *ct;
716#define CVAL(x, y) (((x & 2047) << 16) | (y & 2047))
717
718 ct = &ctbl_bt601_5;
719
720 MOD_REG_FLD(cf1_reg, mask, CVAL(ct->rcr, ct->ry));
721 MOD_REG_FLD(cf1_reg + 4, mask, CVAL(ct->gy, ct->rcb));
722 MOD_REG_FLD(cf1_reg + 8, mask, CVAL(ct->gcb, ct->gcr));
723 MOD_REG_FLD(cf1_reg + 12, mask, CVAL(ct->bcr, ct->by));
724 MOD_REG_FLD(cf1_reg + 16, mask, CVAL(0, ct->bcb));
725
726 MOD_REG_FLD(cf2_reg, mask, CVAL(ct->rcr, ct->ry));
727 MOD_REG_FLD(cf2_reg + 4, mask, CVAL(ct->gy, ct->rcb));
728 MOD_REG_FLD(cf2_reg + 8, mask, CVAL(ct->gcb, ct->gcr));
729 MOD_REG_FLD(cf2_reg + 12, mask, CVAL(ct->bcr, ct->by));
730 MOD_REG_FLD(cf2_reg + 16, mask, CVAL(0, ct->bcb));
731#undef CVAL
732
733 MOD_REG_FLD(at1_reg, (1 << 11), ct->full_range);
734 MOD_REG_FLD(at2_reg, (1 << 11), ct->full_range);
735}
736
737static void calc_ck_div(int is_tft, int pck, int *lck_div, int *pck_div)
738{
739 unsigned long fck, lck;
740
741 *lck_div = 1;
742 pck = max(1, pck);
743 fck = clk_get_rate(dispc.dss1_fck);
744 lck = fck;
745 *pck_div = (lck + pck - 1) / pck;
746 if (is_tft)
747 *pck_div = max(2, *pck_div);
748 else
749 *pck_div = max(3, *pck_div);
750 if (*pck_div > 255) {
751 *pck_div = 255;
752 lck = pck * *pck_div;
753 *lck_div = fck / lck;
754 BUG_ON(*lck_div < 1);
755 if (*lck_div > 255) {
756 *lck_div = 255;
757 dev_warn(dispc.fbdev->dev, "pixclock %d kHz too low.\n",
758 pck / 1000);
759 }
760 }
761}
762
763static void set_lcd_tft_mode(int enable)
764{
765 u32 mask;
766
767 mask = 1 << 3;
768 MOD_REG_FLD(DISPC_CONTROL, mask, enable ? mask : 0);
769}
770
771static void set_lcd_timings(void)
772{
773 u32 l;
774 int lck_div, pck_div;
775 struct lcd_panel *panel = dispc.fbdev->panel;
776 int is_tft = panel->config & OMAP_LCDC_PANEL_TFT;
777 unsigned long fck;
778
779 l = dispc_read_reg(DISPC_TIMING_H);
780 l &= ~(FLD_MASK(0, 6) | FLD_MASK(8, 8) | FLD_MASK(20, 8));
781 l |= ( max(1, (min(64, panel->hsw))) - 1 ) << 0;
782 l |= ( max(1, (min(256, panel->hfp))) - 1 ) << 8;
783 l |= ( max(1, (min(256, panel->hbp))) - 1 ) << 20;
784 dispc_write_reg(DISPC_TIMING_H, l);
785
786 l = dispc_read_reg(DISPC_TIMING_V);
787 l &= ~(FLD_MASK(0, 6) | FLD_MASK(8, 8) | FLD_MASK(20, 8));
788 l |= ( max(1, (min(64, panel->vsw))) - 1 ) << 0;
789 l |= ( max(0, (min(255, panel->vfp))) - 0 ) << 8;
790 l |= ( max(0, (min(255, panel->vbp))) - 0 ) << 20;
791 dispc_write_reg(DISPC_TIMING_V, l);
792
793 l = dispc_read_reg(DISPC_POL_FREQ);
794 l &= ~FLD_MASK(12, 6);
795 l |= (panel->config & OMAP_LCDC_SIGNAL_MASK) << 12;
796 l |= panel->acb & 0xff;
797 dispc_write_reg(DISPC_POL_FREQ, l);
798
799 calc_ck_div(is_tft, panel->pixel_clock * 1000, &lck_div, &pck_div);
800
801 l = dispc_read_reg(DISPC_DIVISOR);
802 l &= ~(FLD_MASK(16, 8) | FLD_MASK(0, 8));
803 l |= (lck_div << 16) | (pck_div << 0);
804 dispc_write_reg(DISPC_DIVISOR, l);
805
806 /* update panel info with the exact clock */
807 fck = clk_get_rate(dispc.dss1_fck);
808 panel->pixel_clock = fck / lck_div / pck_div / 1000;
809}
810
811int omap_dispc_request_irq(void (*callback)(void *data), void *data)
812{
813 int r = 0;
814
815 BUG_ON(callback == NULL);
816
817 if (dispc.irq_callback)
818 r = -EBUSY;
819 else {
820 dispc.irq_callback = callback;
821 dispc.irq_callback_data = data;
822 }
823
824 return r;
825}
826EXPORT_SYMBOL(omap_dispc_request_irq);
827
828void omap_dispc_enable_irqs(int irq_mask)
829{
830 enable_lcd_clocks(1);
831 dispc.enabled_irqs = irq_mask;
832 irq_mask |= DISPC_IRQ_MASK_ERROR;
833 MOD_REG_FLD(DISPC_IRQENABLE, 0x7fff, irq_mask);
834 enable_lcd_clocks(0);
835}
836EXPORT_SYMBOL(omap_dispc_enable_irqs);
837
838void omap_dispc_disable_irqs(int irq_mask)
839{
840 enable_lcd_clocks(1);
841 dispc.enabled_irqs &= ~irq_mask;
842 irq_mask &= ~DISPC_IRQ_MASK_ERROR;
843 MOD_REG_FLD(DISPC_IRQENABLE, 0x7fff, irq_mask);
844 enable_lcd_clocks(0);
845}
846EXPORT_SYMBOL(omap_dispc_disable_irqs);
847
848void omap_dispc_free_irq(void)
849{
850 enable_lcd_clocks(1);
851 omap_dispc_disable_irqs(DISPC_IRQ_MASK_ALL);
852 dispc.irq_callback = NULL;
853 dispc.irq_callback_data = NULL;
854 enable_lcd_clocks(0);
855}
856EXPORT_SYMBOL(omap_dispc_free_irq);
857
858static irqreturn_t omap_dispc_irq_handler(int irq, void *dev)
859{
860 u32 stat = dispc_read_reg(DISPC_IRQSTATUS);
861
862 if (stat & DISPC_IRQ_FRAMEMASK)
863 complete(&dispc.frame_done);
864
865 if (stat & DISPC_IRQ_MASK_ERROR) {
866 if (printk_ratelimit()) {
867 dev_err(dispc.fbdev->dev, "irq error status %04x\n",
868 stat & 0x7fff);
869 }
870 }
871
872 if ((stat & dispc.enabled_irqs) && dispc.irq_callback)
873 dispc.irq_callback(dispc.irq_callback_data);
874
875 dispc_write_reg(DISPC_IRQSTATUS, stat);
876
877 return IRQ_HANDLED;
878}
879
880static int get_dss_clocks(void)
881{
882 if (IS_ERR((dispc.dss_ick = clk_get(dispc.fbdev->dev, "dss_ick")))) {
883 dev_err(dispc.fbdev->dev, "can't get dss_ick");
884 return PTR_ERR(dispc.dss_ick);
885 }
886
887 if (IS_ERR((dispc.dss1_fck = clk_get(dispc.fbdev->dev, "dss1_fck")))) {
888 dev_err(dispc.fbdev->dev, "can't get dss1_fck");
889 clk_put(dispc.dss_ick);
890 return PTR_ERR(dispc.dss1_fck);
891 }
892
893 if (IS_ERR((dispc.dss_54m_fck =
894 clk_get(dispc.fbdev->dev, "dss_54m_fck")))) {
895 dev_err(dispc.fbdev->dev, "can't get dss_54m_fck");
896 clk_put(dispc.dss_ick);
897 clk_put(dispc.dss1_fck);
898 return PTR_ERR(dispc.dss_54m_fck);
899 }
900
901 return 0;
902}
903
904static void put_dss_clocks(void)
905{
906 clk_put(dispc.dss_54m_fck);
907 clk_put(dispc.dss1_fck);
908 clk_put(dispc.dss_ick);
909}
910
911static void enable_lcd_clocks(int enable)
912{
913 if (enable)
914 clk_enable(dispc.dss1_fck);
915 else
916 clk_disable(dispc.dss1_fck);
917}
918
919static void enable_interface_clocks(int enable)
920{
921 if (enable)
922 clk_enable(dispc.dss_ick);
923 else
924 clk_disable(dispc.dss_ick);
925}
926
927static void enable_digit_clocks(int enable)
928{
929 if (enable)
930 clk_enable(dispc.dss_54m_fck);
931 else
932 clk_disable(dispc.dss_54m_fck);
933}
934
935static void omap_dispc_suspend(void)
936{
937 if (dispc.update_mode == OMAPFB_AUTO_UPDATE) {
938 init_completion(&dispc.frame_done);
939 omap_dispc_enable_lcd_out(0);
940 if (!wait_for_completion_timeout(&dispc.frame_done,
941 msecs_to_jiffies(500))) {
942 dev_err(dispc.fbdev->dev,
943 "timeout waiting for FRAME DONE\n");
944 }
945 enable_lcd_clocks(0);
946 }
947}
948
949static void omap_dispc_resume(void)
950{
951 if (dispc.update_mode == OMAPFB_AUTO_UPDATE) {
952 enable_lcd_clocks(1);
953 if (!dispc.ext_mode) {
954 set_lcd_timings();
955 load_palette();
956 }
957 omap_dispc_enable_lcd_out(1);
958 }
959}
960
961
962static int omap_dispc_update_window(struct fb_info *fbi,
963 struct omapfb_update_window *win,
964 void (*complete_callback)(void *arg),
965 void *complete_callback_data)
966{
967 return dispc.update_mode == OMAPFB_UPDATE_DISABLED ? -ENODEV : 0;
968}
969
970static int mmap_kern(struct omapfb_mem_region *region)
971{
972 struct vm_struct *kvma;
973 struct vm_area_struct vma;
974 pgprot_t pgprot;
975 unsigned long vaddr;
976
977 kvma = get_vm_area(region->size, VM_IOREMAP);
978 if (kvma == NULL) {
979 dev_err(dispc.fbdev->dev, "can't get kernel vm area\n");
980 return -ENOMEM;
981 }
982 vma.vm_mm = &init_mm;
983
984 vaddr = (unsigned long)kvma->addr;
985
986 pgprot = pgprot_writecombine(pgprot_kernel);
987 vma.vm_start = vaddr;
988 vma.vm_end = vaddr + region->size;
989 if (io_remap_pfn_range(&vma, vaddr, region->paddr >> PAGE_SHIFT,
990 region->size, pgprot) < 0) {
991 dev_err(dispc.fbdev->dev, "kernel mmap for FBMEM failed\n");
992 return -EAGAIN;
993 }
994 region->vaddr = (void *)vaddr;
995
996 return 0;
997}
998
999static void mmap_user_open(struct vm_area_struct *vma)
1000{
1001 int plane = (int)vma->vm_private_data;
1002
1003 atomic_inc(&dispc.map_count[plane]);
1004}
1005
1006static void mmap_user_close(struct vm_area_struct *vma)
1007{
1008 int plane = (int)vma->vm_private_data;
1009
1010 atomic_dec(&dispc.map_count[plane]);
1011}
1012
1013static struct vm_operations_struct mmap_user_ops = {
1014 .open = mmap_user_open,
1015 .close = mmap_user_close,
1016};
1017
1018static int omap_dispc_mmap_user(struct fb_info *info,
1019 struct vm_area_struct *vma)
1020{
1021 struct omapfb_plane_struct *plane = info->par;
1022 unsigned long off;
1023 unsigned long start;
1024 u32 len;
1025
1026 if (vma->vm_end - vma->vm_start == 0)
1027 return 0;
1028 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
1029 return -EINVAL;
1030 off = vma->vm_pgoff << PAGE_SHIFT;
1031
1032 start = info->fix.smem_start;
1033 len = info->fix.smem_len;
1034 if (off >= len)
1035 return -EINVAL;
1036 if ((vma->vm_end - vma->vm_start + off) > len)
1037 return -EINVAL;
1038 off += start;
1039 vma->vm_pgoff = off >> PAGE_SHIFT;
1040 vma->vm_flags |= VM_IO | VM_RESERVED;
1041 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1042 vma->vm_ops = &mmap_user_ops;
1043 vma->vm_private_data = (void *)plane->idx;
1044 if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
1045 vma->vm_end - vma->vm_start, vma->vm_page_prot))
1046 return -EAGAIN;
1047 /* vm_ops.open won't be called for mmap itself. */
1048 atomic_inc(&dispc.map_count[plane->idx]);
1049 return 0;
1050}
1051
1052static void unmap_kern(struct omapfb_mem_region *region)
1053{
1054 vunmap(region->vaddr);
1055}
1056
1057static int alloc_palette_ram(void)
1058{
1059 dispc.palette_vaddr = dma_alloc_writecombine(dispc.fbdev->dev,
1060 MAX_PALETTE_SIZE, &dispc.palette_paddr, GFP_KERNEL);
1061 if (dispc.palette_vaddr == NULL) {
1062 dev_err(dispc.fbdev->dev, "failed to alloc palette memory\n");
1063 return -ENOMEM;
1064 }
1065
1066 return 0;
1067}
1068
1069static void free_palette_ram(void)
1070{
1071 dma_free_writecombine(dispc.fbdev->dev, MAX_PALETTE_SIZE,
1072 dispc.palette_vaddr, dispc.palette_paddr);
1073}
1074
1075static int alloc_fbmem(struct omapfb_mem_region *region)
1076{
1077 region->vaddr = dma_alloc_writecombine(dispc.fbdev->dev,
1078 region->size, &region->paddr, GFP_KERNEL);
1079
1080 if (region->vaddr == NULL) {
1081 dev_err(dispc.fbdev->dev, "unable to allocate FB DMA memory\n");
1082 return -ENOMEM;
1083 }
1084
1085 return 0;
1086}
1087
1088static void free_fbmem(struct omapfb_mem_region *region)
1089{
1090 dma_free_writecombine(dispc.fbdev->dev, region->size,
1091 region->vaddr, region->paddr);
1092}
1093
1094static struct resmap *init_resmap(unsigned long start, size_t size)
1095{
1096 unsigned page_cnt;
1097 struct resmap *res_map;
1098
1099 page_cnt = PAGE_ALIGN(size) / PAGE_SIZE;
1100 res_map =
1101 kzalloc(sizeof(struct resmap) + RESMAP_SIZE(page_cnt), GFP_KERNEL);
1102 if (res_map == NULL)
1103 return NULL;
1104 res_map->start = start;
1105 res_map->page_cnt = page_cnt;
1106 res_map->map = (unsigned long *)(res_map + 1);
1107 return res_map;
1108}
1109
1110static void cleanup_resmap(struct resmap *res_map)
1111{
1112 kfree(res_map);
1113}
1114
1115static inline int resmap_mem_type(unsigned long start)
1116{
1117 if (start >= OMAP2_SRAM_START &&
1118 start < OMAP2_SRAM_START + OMAP2_SRAM_SIZE)
1119 return OMAPFB_MEMTYPE_SRAM;
1120 else
1121 return OMAPFB_MEMTYPE_SDRAM;
1122}
1123
1124static inline int resmap_page_reserved(struct resmap *res_map, unsigned page_nr)
1125{
1126 return *RESMAP_PTR(res_map, page_nr) & RESMAP_MASK(page_nr) ? 1 : 0;
1127}
1128
1129static inline void resmap_reserve_page(struct resmap *res_map, unsigned page_nr)
1130{
1131 BUG_ON(resmap_page_reserved(res_map, page_nr));
1132 *RESMAP_PTR(res_map, page_nr) |= RESMAP_MASK(page_nr);
1133}
1134
1135static inline void resmap_free_page(struct resmap *res_map, unsigned page_nr)
1136{
1137 BUG_ON(!resmap_page_reserved(res_map, page_nr));
1138 *RESMAP_PTR(res_map, page_nr) &= ~RESMAP_MASK(page_nr);
1139}
1140
1141static void resmap_reserve_region(unsigned long start, size_t size)
1142{
1143
1144 struct resmap *res_map;
1145 unsigned start_page;
1146 unsigned end_page;
1147 int mtype;
1148 unsigned i;
1149
1150 mtype = resmap_mem_type(start);
1151 res_map = dispc.res_map[mtype];
1152 dev_dbg(dispc.fbdev->dev, "reserve mem type %d start %08lx size %d\n",
1153 mtype, start, size);
1154 start_page = (start - res_map->start) / PAGE_SIZE;
1155 end_page = start_page + PAGE_ALIGN(size) / PAGE_SIZE;
1156 for (i = start_page; i < end_page; i++)
1157 resmap_reserve_page(res_map, i);
1158}
1159
1160static void resmap_free_region(unsigned long start, size_t size)
1161{
1162 struct resmap *res_map;
1163 unsigned start_page;
1164 unsigned end_page;
1165 unsigned i;
1166 int mtype;
1167
1168 mtype = resmap_mem_type(start);
1169 res_map = dispc.res_map[mtype];
1170 dev_dbg(dispc.fbdev->dev, "free mem type %d start %08lx size %d\n",
1171 mtype, start, size);
1172 start_page = (start - res_map->start) / PAGE_SIZE;
1173 end_page = start_page + PAGE_ALIGN(size) / PAGE_SIZE;
1174 for (i = start_page; i < end_page; i++)
1175 resmap_free_page(res_map, i);
1176}
1177
1178static unsigned long resmap_alloc_region(int mtype, size_t size)
1179{
1180 unsigned i;
1181 unsigned total;
1182 unsigned start_page;
1183 unsigned long start;
1184 struct resmap *res_map = dispc.res_map[mtype];
1185
1186 BUG_ON(mtype >= DISPC_MEMTYPE_NUM || res_map == NULL || !size);
1187
1188 size = PAGE_ALIGN(size) / PAGE_SIZE;
1189 start_page = 0;
1190 total = 0;
1191 for (i = 0; i < res_map->page_cnt; i++) {
1192 if (resmap_page_reserved(res_map, i)) {
1193 start_page = i + 1;
1194 total = 0;
1195 } else if (++total == size)
1196 break;
1197 }
1198 if (total < size)
1199 return 0;
1200
1201 start = res_map->start + start_page * PAGE_SIZE;
1202 resmap_reserve_region(start, size * PAGE_SIZE);
1203
1204 return start;
1205}
1206
1207/* Note that this will only work for user mappings, we don't deal with
1208 * kernel mappings here, so fbcon will keep using the old region.
1209 */
1210static int omap_dispc_setup_mem(int plane, size_t size, int mem_type,
1211 unsigned long *paddr)
1212{
1213 struct omapfb_mem_region *rg;
1214 unsigned long new_addr = 0;
1215
1216 if ((unsigned)plane > dispc.mem_desc.region_cnt)
1217 return -EINVAL;
1218 if (mem_type >= DISPC_MEMTYPE_NUM)
1219 return -EINVAL;
1220 if (dispc.res_map[mem_type] == NULL)
1221 return -ENOMEM;
1222 rg = &dispc.mem_desc.region[plane];
1223 if (size == rg->size && mem_type == rg->type)
1224 return 0;
1225 if (atomic_read(&dispc.map_count[plane]))
1226 return -EBUSY;
1227 if (rg->size != 0)
1228 resmap_free_region(rg->paddr, rg->size);
1229 if (size != 0) {
1230 new_addr = resmap_alloc_region(mem_type, size);
1231 if (!new_addr) {
1232 /* Reallocate old region. */
1233 resmap_reserve_region(rg->paddr, rg->size);
1234 return -ENOMEM;
1235 }
1236 }
1237 rg->paddr = new_addr;
1238 rg->size = size;
1239 rg->type = mem_type;
1240
1241 *paddr = new_addr;
1242
1243 return 0;
1244}
1245
1246static int setup_fbmem(struct omapfb_mem_desc *req_md)
1247{
1248 struct omapfb_mem_region *rg;
1249 int i;
1250 int r;
1251 unsigned long mem_start[DISPC_MEMTYPE_NUM];
1252 unsigned long mem_end[DISPC_MEMTYPE_NUM];
1253
1254 if (!req_md->region_cnt) {
1255 dev_err(dispc.fbdev->dev, "no memory regions defined\n");
1256 return -ENOENT;
1257 }
1258
1259 rg = &req_md->region[0];
1260 memset(mem_start, 0xff, sizeof(mem_start));
1261 memset(mem_end, 0, sizeof(mem_end));
1262
1263 for (i = 0; i < req_md->region_cnt; i++, rg++) {
1264 int mtype;
1265 if (rg->paddr) {
1266 rg->alloc = 0;
1267 if (rg->vaddr == NULL) {
1268 rg->map = 1;
1269 if ((r = mmap_kern(rg)) < 0)
1270 return r;
1271 }
1272 } else {
1273 if (rg->type != OMAPFB_MEMTYPE_SDRAM) {
1274 dev_err(dispc.fbdev->dev,
1275 "unsupported memory type\n");
1276 return -EINVAL;
1277 }
1278 rg->alloc = rg->map = 1;
1279 if ((r = alloc_fbmem(rg)) < 0)
1280 return r;
1281 }
1282 mtype = rg->type;
1283
1284 if (rg->paddr < mem_start[mtype])
1285 mem_start[mtype] = rg->paddr;
1286 if (rg->paddr + rg->size > mem_end[mtype])
1287 mem_end[mtype] = rg->paddr + rg->size;
1288 }
1289
1290 for (i = 0; i < DISPC_MEMTYPE_NUM; i++) {
1291 unsigned long start;
1292 size_t size;
1293 if (mem_end[i] == 0)
1294 continue;
1295 start = mem_start[i];
1296 size = mem_end[i] - start;
1297 dispc.res_map[i] = init_resmap(start, size);
1298 r = -ENOMEM;
1299 if (dispc.res_map[i] == NULL)
1300 goto fail;
1301 /* Initial state is that everything is reserved. This
1302 * includes possible holes as well, which will never be
1303 * freed.
1304 */
1305 resmap_reserve_region(start, size);
1306 }
1307
1308 dispc.mem_desc = *req_md;
1309
1310 return 0;
1311fail:
1312 for (i = 0; i < DISPC_MEMTYPE_NUM; i++) {
1313 if (dispc.res_map[i] != NULL)
1314 cleanup_resmap(dispc.res_map[i]);
1315 }
1316 return r;
1317}
1318
1319static void cleanup_fbmem(void)
1320{
1321 struct omapfb_mem_region *rg;
1322 int i;
1323
1324 for (i = 0; i < DISPC_MEMTYPE_NUM; i++) {
1325 if (dispc.res_map[i] != NULL)
1326 cleanup_resmap(dispc.res_map[i]);
1327 }
1328 rg = &dispc.mem_desc.region[0];
1329 for (i = 0; i < dispc.mem_desc.region_cnt; i++, rg++) {
1330 if (rg->alloc)
1331 free_fbmem(rg);
1332 else {
1333 if (rg->map)
1334 unmap_kern(rg);
1335 }
1336 }
1337}
1338
1339static int omap_dispc_init(struct omapfb_device *fbdev, int ext_mode,
1340 struct omapfb_mem_desc *req_vram)
1341{
1342 int r;
1343 u32 l;
1344 struct lcd_panel *panel = fbdev->panel;
1345 int tmo = 10000;
1346 int skip_init = 0;
1347 int i;
1348
1349 memset(&dispc, 0, sizeof(dispc));
1350
1351 dispc.base = io_p2v(DISPC_BASE);
1352 dispc.fbdev = fbdev;
1353 dispc.ext_mode = ext_mode;
1354
1355 init_completion(&dispc.frame_done);
1356
1357 if ((r = get_dss_clocks()) < 0)
1358 return r;
1359
1360 enable_interface_clocks(1);
1361 enable_lcd_clocks(1);
1362
1363#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT
1364 l = dispc_read_reg(DISPC_CONTROL);
1365 /* LCD enabled ? */
1366 if (l & 1) {
1367 pr_info("omapfb: skipping hardware initialization\n");
1368 skip_init = 1;
1369 }
1370#endif
1371
1372 if (!skip_init) {
1373 /* Reset monitoring works only w/ the 54M clk */
1374 enable_digit_clocks(1);
1375
1376 /* Soft reset */
1377 MOD_REG_FLD(DISPC_SYSCONFIG, 1 << 1, 1 << 1);
1378
1379 while (!(dispc_read_reg(DISPC_SYSSTATUS) & 1)) {
1380 if (!--tmo) {
1381 dev_err(dispc.fbdev->dev, "soft reset failed\n");
1382 r = -ENODEV;
1383 enable_digit_clocks(0);
1384 goto fail1;
1385 }
1386 }
1387
1388 enable_digit_clocks(0);
1389 }
1390
1391 /* Enable smart idle and autoidle */
1392 l = dispc_read_reg(DISPC_CONTROL);
1393 l &= ~((3 << 12) | (3 << 3));
1394 l |= (2 << 12) | (2 << 3) | (1 << 0);
1395 dispc_write_reg(DISPC_SYSCONFIG, l);
1396 omap_writel(1 << 0, DSS_BASE + DSS_SYSCONFIG);
1397
1398 /* Set functional clock autogating */
1399 l = dispc_read_reg(DISPC_CONFIG);
1400 l |= 1 << 9;
1401 dispc_write_reg(DISPC_CONFIG, l);
1402
1403 l = dispc_read_reg(DISPC_IRQSTATUS);
1404 dispc_write_reg(l, DISPC_IRQSTATUS);
1405
1406 /* Enable those that we handle always */
1407 omap_dispc_enable_irqs(DISPC_IRQ_FRAMEMASK);
1408
1409 if ((r = request_irq(INT_24XX_DSS_IRQ, omap_dispc_irq_handler,
1410 0, MODULE_NAME, fbdev)) < 0) {
1411 dev_err(dispc.fbdev->dev, "can't get DSS IRQ\n");
1412 goto fail1;
1413 }
1414
1415 /* L3 firewall setting: enable access to OCM RAM */
1416 __raw_writel(0x402000b0, io_p2v(0x680050a0));
1417
1418 if ((r = alloc_palette_ram()) < 0)
1419 goto fail2;
1420
1421 if ((r = setup_fbmem(req_vram)) < 0)
1422 goto fail3;
1423
1424 if (!skip_init) {
1425 for (i = 0; i < dispc.mem_desc.region_cnt; i++) {
1426 memset(dispc.mem_desc.region[i].vaddr, 0,
1427 dispc.mem_desc.region[i].size);
1428 }
1429
1430 /* Set logic clock to fck, pixel clock to fck/2 for now */
1431 MOD_REG_FLD(DISPC_DIVISOR, FLD_MASK(16, 8), 1 << 16);
1432 MOD_REG_FLD(DISPC_DIVISOR, FLD_MASK(0, 8), 2 << 0);
1433
1434 setup_plane_fifo(0, ext_mode);
1435 setup_plane_fifo(1, ext_mode);
1436 setup_plane_fifo(2, ext_mode);
1437
1438 setup_color_conv_coef();
1439
1440 set_lcd_tft_mode(panel->config & OMAP_LCDC_PANEL_TFT);
1441 set_load_mode(DISPC_LOAD_FRAME_ONLY);
1442
1443 if (!ext_mode) {
1444 set_lcd_data_lines(panel->data_lines);
1445 omap_dispc_set_lcd_size(panel->x_res, panel->y_res);
1446 set_lcd_timings();
1447 } else
1448 set_lcd_data_lines(panel->bpp);
1449 enable_rfbi_mode(ext_mode);
1450 }
1451
1452 l = dispc_read_reg(DISPC_REVISION);
1453 pr_info("omapfb: DISPC version %d.%d initialized\n",
1454 l >> 4 & 0x0f, l & 0x0f);
1455 enable_lcd_clocks(0);
1456
1457 return 0;
1458fail3:
1459 free_palette_ram();
1460fail2:
1461 free_irq(INT_24XX_DSS_IRQ, fbdev);
1462fail1:
1463 enable_lcd_clocks(0);
1464 enable_interface_clocks(0);
1465 put_dss_clocks();
1466
1467 return r;
1468}
1469
1470static void omap_dispc_cleanup(void)
1471{
1472 int i;
1473
1474 omap_dispc_set_update_mode(OMAPFB_UPDATE_DISABLED);
1475 /* This will also disable clocks that are on */
1476 for (i = 0; i < dispc.mem_desc.region_cnt; i++)
1477 omap_dispc_enable_plane(i, 0);
1478 cleanup_fbmem();
1479 free_palette_ram();
1480 free_irq(INT_24XX_DSS_IRQ, dispc.fbdev);
1481 enable_interface_clocks(0);
1482 put_dss_clocks();
1483}
1484
1485const struct lcd_ctrl omap2_int_ctrl = {
1486 .name = "internal",
1487 .init = omap_dispc_init,
1488 .cleanup = omap_dispc_cleanup,
1489 .get_caps = omap_dispc_get_caps,
1490 .set_update_mode = omap_dispc_set_update_mode,
1491 .get_update_mode = omap_dispc_get_update_mode,
1492 .update_window = omap_dispc_update_window,
1493 .suspend = omap_dispc_suspend,
1494 .resume = omap_dispc_resume,
1495 .setup_plane = omap_dispc_setup_plane,
1496 .setup_mem = omap_dispc_setup_mem,
1497 .set_scale = omap_dispc_set_scale,
1498 .enable_plane = omap_dispc_enable_plane,
1499 .set_color_key = omap_dispc_set_color_key,
1500 .get_color_key = omap_dispc_get_color_key,
1501 .mmap = omap_dispc_mmap_user,
1502};
diff --git a/drivers/video/omap/dispc.h b/drivers/video/omap/dispc.h
new file mode 100644
index 000000000000..eb1512b56ce8
--- /dev/null
+++ b/drivers/video/omap/dispc.h
@@ -0,0 +1,43 @@
1#ifndef _DISPC_H
2#define _DISPC_H
3
4#include <linux/interrupt.h>
5
6#define DISPC_PLANE_GFX 0
7#define DISPC_PLANE_VID1 1
8#define DISPC_PLANE_VID2 2
9
10#define DISPC_RGB_1_BPP 0x00
11#define DISPC_RGB_2_BPP 0x01
12#define DISPC_RGB_4_BPP 0x02
13#define DISPC_RGB_8_BPP 0x03
14#define DISPC_RGB_12_BPP 0x04
15#define DISPC_RGB_16_BPP 0x06
16#define DISPC_RGB_24_BPP 0x08
17#define DISPC_RGB_24_BPP_UNPACK_32 0x09
18#define DISPC_YUV2_422 0x0a
19#define DISPC_UYVY_422 0x0b
20
21#define DISPC_BURST_4x32 0
22#define DISPC_BURST_8x32 1
23#define DISPC_BURST_16x32 2
24
25#define DISPC_LOAD_CLUT_AND_FRAME 0x00
26#define DISPC_LOAD_CLUT_ONLY 0x01
27#define DISPC_LOAD_FRAME_ONLY 0x02
28#define DISPC_LOAD_CLUT_ONCE_FRAME 0x03
29
30#define DISPC_TFT_DATA_LINES_12 0
31#define DISPC_TFT_DATA_LINES_16 1
32#define DISPC_TFT_DATA_LINES_18 2
33#define DISPC_TFT_DATA_LINES_24 3
34
35extern void omap_dispc_set_lcd_size(int width, int height);
36
37extern void omap_dispc_enable_lcd_out(int enable);
38extern void omap_dispc_enable_digit_out(int enable);
39
40extern int omap_dispc_request_irq(void (*callback)(void *data), void *data);
41extern void omap_dispc_free_irq(void);
42
43#endif
diff --git a/drivers/video/omap/hwa742.c b/drivers/video/omap/hwa742.c
new file mode 100644
index 000000000000..dc48e02f215c
--- /dev/null
+++ b/drivers/video/omap/hwa742.c
@@ -0,0 +1,1077 @@
1/*
2 * Epson HWA742 LCD controller driver
3 *
4 * Copyright (C) 2004-2005 Nokia Corporation
5 * Authors: Juha Yrjölä <juha.yrjola@nokia.com>
6 * Imre Deak <imre.deak@nokia.com>
7 * YUV support: Jussi Laako <jussi.laako@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23#include <linux/module.h>
24#include <linux/mm.h>
25#include <linux/fb.h>
26#include <linux/delay.h>
27#include <linux/clk.h>
28
29#include <asm/arch/dma.h>
30#include <asm/arch/omapfb.h>
31#include <asm/arch/hwa742.h>
32
33#define HWA742_REV_CODE_REG 0x0
34#define HWA742_CONFIG_REG 0x2
35#define HWA742_PLL_DIV_REG 0x4
36#define HWA742_PLL_0_REG 0x6
37#define HWA742_PLL_1_REG 0x8
38#define HWA742_PLL_2_REG 0xa
39#define HWA742_PLL_3_REG 0xc
40#define HWA742_PLL_4_REG 0xe
41#define HWA742_CLK_SRC_REG 0x12
42#define HWA742_PANEL_TYPE_REG 0x14
43#define HWA742_H_DISP_REG 0x16
44#define HWA742_H_NDP_REG 0x18
45#define HWA742_V_DISP_1_REG 0x1a
46#define HWA742_V_DISP_2_REG 0x1c
47#define HWA742_V_NDP_REG 0x1e
48#define HWA742_HS_W_REG 0x20
49#define HWA742_HP_S_REG 0x22
50#define HWA742_VS_W_REG 0x24
51#define HWA742_VP_S_REG 0x26
52#define HWA742_PCLK_POL_REG 0x28
53#define HWA742_INPUT_MODE_REG 0x2a
54#define HWA742_TRANSL_MODE_REG1 0x2e
55#define HWA742_DISP_MODE_REG 0x34
56#define HWA742_WINDOW_TYPE 0x36
57#define HWA742_WINDOW_X_START_0 0x38
58#define HWA742_WINDOW_X_START_1 0x3a
59#define HWA742_WINDOW_Y_START_0 0x3c
60#define HWA742_WINDOW_Y_START_1 0x3e
61#define HWA742_WINDOW_X_END_0 0x40
62#define HWA742_WINDOW_X_END_1 0x42
63#define HWA742_WINDOW_Y_END_0 0x44
64#define HWA742_WINDOW_Y_END_1 0x46
65#define HWA742_MEMORY_WRITE_LSB 0x48
66#define HWA742_MEMORY_WRITE_MSB 0x49
67#define HWA742_MEMORY_READ_0 0x4a
68#define HWA742_MEMORY_READ_1 0x4c
69#define HWA742_MEMORY_READ_2 0x4e
70#define HWA742_POWER_SAVE 0x56
71#define HWA742_NDP_CTRL 0x58
72
73#define HWA742_AUTO_UPDATE_TIME (HZ / 20)
74
75/* Reserve 4 request slots for requests in irq context */
76#define REQ_POOL_SIZE 24
77#define IRQ_REQ_POOL_SIZE 4
78
79#define REQ_FROM_IRQ_POOL 0x01
80
81#define REQ_COMPLETE 0
82#define REQ_PENDING 1
83
84struct update_param {
85 int x, y, width, height;
86 int color_mode;
87 int flags;
88};
89
90struct hwa742_request {
91 struct list_head entry;
92 unsigned int flags;
93
94 int (*handler)(struct hwa742_request *req);
95 void (*complete)(void *data);
96 void *complete_data;
97
98 union {
99 struct update_param update;
100 struct completion *sync;
101 } par;
102};
103
104struct {
105 enum omapfb_update_mode update_mode;
106 enum omapfb_update_mode update_mode_before_suspend;
107
108 struct timer_list auto_update_timer;
109 int stop_auto_update;
110 struct omapfb_update_window auto_update_window;
111 unsigned te_connected:1;
112 unsigned vsync_only:1;
113
114 struct hwa742_request req_pool[REQ_POOL_SIZE];
115 struct list_head pending_req_list;
116 struct list_head free_req_list;
117 struct semaphore req_sema;
118 spinlock_t req_lock;
119
120 struct extif_timings reg_timings, lut_timings;
121
122 int prev_color_mode;
123 int prev_flags;
124 int window_type;
125
126 u32 max_transmit_size;
127 u32 extif_clk_period;
128 unsigned long pix_tx_time;
129 unsigned long line_upd_time;
130
131
132 struct omapfb_device *fbdev;
133 struct lcd_ctrl_extif *extif;
134 struct lcd_ctrl *int_ctrl;
135
136 void (*power_up)(struct device *dev);
137 void (*power_down)(struct device *dev);
138} hwa742;
139
140struct lcd_ctrl hwa742_ctrl;
141
142static u8 hwa742_read_reg(u8 reg)
143{
144 u8 data;
145
146 hwa742.extif->set_bits_per_cycle(8);
147 hwa742.extif->write_command(&reg, 1);
148 hwa742.extif->read_data(&data, 1);
149
150 return data;
151}
152
153static void hwa742_write_reg(u8 reg, u8 data)
154{
155 hwa742.extif->set_bits_per_cycle(8);
156 hwa742.extif->write_command(&reg, 1);
157 hwa742.extif->write_data(&data, 1);
158}
159
160static void set_window_regs(int x_start, int y_start, int x_end, int y_end)
161{
162 u8 tmp[8];
163 u8 cmd;
164
165 x_end--;
166 y_end--;
167 tmp[0] = x_start;
168 tmp[1] = x_start >> 8;
169 tmp[2] = y_start;
170 tmp[3] = y_start >> 8;
171 tmp[4] = x_end;
172 tmp[5] = x_end >> 8;
173 tmp[6] = y_end;
174 tmp[7] = y_end >> 8;
175
176 hwa742.extif->set_bits_per_cycle(8);
177 cmd = HWA742_WINDOW_X_START_0;
178
179 hwa742.extif->write_command(&cmd, 1);
180
181 hwa742.extif->write_data(tmp, 8);
182}
183
184static void set_format_regs(int conv, int transl, int flags)
185{
186 if (flags & OMAPFB_FORMAT_FLAG_DOUBLE) {
187 hwa742.window_type = ((hwa742.window_type & 0xfc) | 0x01);
188#ifdef VERBOSE
189 dev_dbg(hwa742.fbdev->dev, "hwa742: enabled pixel doubling\n");
190#endif
191 } else {
192 hwa742.window_type = (hwa742.window_type & 0xfc);
193#ifdef VERBOSE
194 dev_dbg(hwa742.fbdev->dev, "hwa742: disabled pixel doubling\n");
195#endif
196 }
197
198 hwa742_write_reg(HWA742_INPUT_MODE_REG, conv);
199 hwa742_write_reg(HWA742_TRANSL_MODE_REG1, transl);
200 hwa742_write_reg(HWA742_WINDOW_TYPE, hwa742.window_type);
201}
202
203static void enable_tearsync(int y, int width, int height, int screen_height,
204 int force_vsync)
205{
206 u8 b;
207
208 b = hwa742_read_reg(HWA742_NDP_CTRL);
209 b |= 1 << 2;
210 hwa742_write_reg(HWA742_NDP_CTRL, b);
211
212 if (likely(hwa742.vsync_only || force_vsync)) {
213 hwa742.extif->enable_tearsync(1, 0);
214 return;
215 }
216
217 if (width * hwa742.pix_tx_time < hwa742.line_upd_time) {
218 hwa742.extif->enable_tearsync(1, 0);
219 return;
220 }
221
222 if ((width * hwa742.pix_tx_time / 1000) * height <
223 (y + height) * (hwa742.line_upd_time / 1000)) {
224 hwa742.extif->enable_tearsync(1, 0);
225 return;
226 }
227
228 hwa742.extif->enable_tearsync(1, y + 1);
229}
230
231static void disable_tearsync(void)
232{
233 u8 b;
234
235 hwa742.extif->enable_tearsync(0, 0);
236
237 b = hwa742_read_reg(HWA742_NDP_CTRL);
238 b &= ~(1 << 2);
239 hwa742_write_reg(HWA742_NDP_CTRL, b);
240}
241
242static inline struct hwa742_request *alloc_req(void)
243{
244 unsigned long flags;
245 struct hwa742_request *req;
246 int req_flags = 0;
247
248 if (!in_interrupt())
249 down(&hwa742.req_sema);
250 else
251 req_flags = REQ_FROM_IRQ_POOL;
252
253 spin_lock_irqsave(&hwa742.req_lock, flags);
254 BUG_ON(list_empty(&hwa742.free_req_list));
255 req = list_entry(hwa742.free_req_list.next,
256 struct hwa742_request, entry);
257 list_del(&req->entry);
258 spin_unlock_irqrestore(&hwa742.req_lock, flags);
259
260 INIT_LIST_HEAD(&req->entry);
261 req->flags = req_flags;
262
263 return req;
264}
265
266static inline void free_req(struct hwa742_request *req)
267{
268 unsigned long flags;
269
270 spin_lock_irqsave(&hwa742.req_lock, flags);
271
272 list_del(&req->entry);
273 list_add(&req->entry, &hwa742.free_req_list);
274 if (!(req->flags & REQ_FROM_IRQ_POOL))
275 up(&hwa742.req_sema);
276
277 spin_unlock_irqrestore(&hwa742.req_lock, flags);
278}
279
280static void process_pending_requests(void)
281{
282 unsigned long flags;
283
284 spin_lock_irqsave(&hwa742.req_lock, flags);
285
286 while (!list_empty(&hwa742.pending_req_list)) {
287 struct hwa742_request *req;
288 void (*complete)(void *);
289 void *complete_data;
290
291 req = list_entry(hwa742.pending_req_list.next,
292 struct hwa742_request, entry);
293 spin_unlock_irqrestore(&hwa742.req_lock, flags);
294
295 if (req->handler(req) == REQ_PENDING)
296 return;
297
298 complete = req->complete;
299 complete_data = req->complete_data;
300 free_req(req);
301
302 if (complete)
303 complete(complete_data);
304
305 spin_lock_irqsave(&hwa742.req_lock, flags);
306 }
307
308 spin_unlock_irqrestore(&hwa742.req_lock, flags);
309}
310
311static void submit_req_list(struct list_head *head)
312{
313 unsigned long flags;
314 int process = 1;
315
316 spin_lock_irqsave(&hwa742.req_lock, flags);
317 if (likely(!list_empty(&hwa742.pending_req_list)))
318 process = 0;
319 list_splice_init(head, hwa742.pending_req_list.prev);
320 spin_unlock_irqrestore(&hwa742.req_lock, flags);
321
322 if (process)
323 process_pending_requests();
324}
325
326static void request_complete(void *data)
327{
328 struct hwa742_request *req = (struct hwa742_request *)data;
329 void (*complete)(void *);
330 void *complete_data;
331
332 complete = req->complete;
333 complete_data = req->complete_data;
334
335 free_req(req);
336
337 if (complete)
338 complete(complete_data);
339
340 process_pending_requests();
341}
342
343static int send_frame_handler(struct hwa742_request *req)
344{
345 struct update_param *par = &req->par.update;
346 int x = par->x;
347 int y = par->y;
348 int w = par->width;
349 int h = par->height;
350 int bpp;
351 int conv, transl;
352 unsigned long offset;
353 int color_mode = par->color_mode;
354 int flags = par->flags;
355 int scr_width = hwa742.fbdev->panel->x_res;
356 int scr_height = hwa742.fbdev->panel->y_res;
357
358#ifdef VERBOSE
359 dev_dbg(hwa742.fbdev->dev, "x %d y %d w %d h %d scr_width %d "
360 "color_mode %d flags %d\n",
361 x, y, w, h, scr_width, color_mode, flags);
362#endif
363
364 switch (color_mode) {
365 case OMAPFB_COLOR_YUV422:
366 bpp = 16;
367 conv = 0x08;
368 transl = 0x25;
369 break;
370 case OMAPFB_COLOR_YUV420:
371 bpp = 12;
372 conv = 0x09;
373 transl = 0x25;
374 break;
375 case OMAPFB_COLOR_RGB565:
376 bpp = 16;
377 conv = 0x01;
378 transl = 0x05;
379 break;
380 default:
381 return -EINVAL;
382 }
383
384 if (hwa742.prev_flags != flags ||
385 hwa742.prev_color_mode != color_mode) {
386 set_format_regs(conv, transl, flags);
387 hwa742.prev_color_mode = color_mode;
388 hwa742.prev_flags = flags;
389 }
390 flags = req->par.update.flags;
391 if (flags & OMAPFB_FORMAT_FLAG_TEARSYNC)
392 enable_tearsync(y, scr_width, h, scr_height,
393 flags & OMAPFB_FORMAT_FLAG_FORCE_VSYNC);
394 else
395 disable_tearsync();
396
397 set_window_regs(x, y, x + w, y + h);
398
399 offset = (scr_width * y + x) * bpp / 8;
400
401 hwa742.int_ctrl->setup_plane(OMAPFB_PLANE_GFX,
402 OMAPFB_CHANNEL_OUT_LCD, offset, scr_width, 0, 0, w, h,
403 color_mode);
404
405 hwa742.extif->set_bits_per_cycle(16);
406
407 hwa742.int_ctrl->enable_plane(OMAPFB_PLANE_GFX, 1);
408 hwa742.extif->transfer_area(w, h, request_complete, req);
409
410 return REQ_PENDING;
411}
412
413static void send_frame_complete(void *data)
414{
415 hwa742.int_ctrl->enable_plane(OMAPFB_PLANE_GFX, 0);
416}
417
418#define ADD_PREQ(_x, _y, _w, _h) do { \
419 req = alloc_req(); \
420 req->handler = send_frame_handler; \
421 req->complete = send_frame_complete; \
422 req->par.update.x = _x; \
423 req->par.update.y = _y; \
424 req->par.update.width = _w; \
425 req->par.update.height = _h; \
426 req->par.update.color_mode = color_mode;\
427 req->par.update.flags = flags; \
428 list_add_tail(&req->entry, req_head); \
429} while(0)
430
431static void create_req_list(struct omapfb_update_window *win,
432 struct list_head *req_head)
433{
434 struct hwa742_request *req;
435 int x = win->x;
436 int y = win->y;
437 int width = win->width;
438 int height = win->height;
439 int color_mode;
440 int flags;
441
442 flags = win->format & ~OMAPFB_FORMAT_MASK;
443 color_mode = win->format & OMAPFB_FORMAT_MASK;
444
445 if (x & 1) {
446 ADD_PREQ(x, y, 1, height);
447 width--;
448 x++;
449 flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
450 }
451 if (width & ~1) {
452 unsigned int xspan = width & ~1;
453 unsigned int ystart = y;
454 unsigned int yspan = height;
455
456 if (xspan * height * 2 > hwa742.max_transmit_size) {
457 yspan = hwa742.max_transmit_size / (xspan * 2);
458 ADD_PREQ(x, ystart, xspan, yspan);
459 ystart += yspan;
460 yspan = height - yspan;
461 flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
462 }
463
464 ADD_PREQ(x, ystart, xspan, yspan);
465 x += xspan;
466 width -= xspan;
467 flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
468 }
469 if (width)
470 ADD_PREQ(x, y, 1, height);
471}
472
473static void auto_update_complete(void *data)
474{
475 if (!hwa742.stop_auto_update)
476 mod_timer(&hwa742.auto_update_timer,
477 jiffies + HWA742_AUTO_UPDATE_TIME);
478}
479
480static void hwa742_update_window_auto(unsigned long arg)
481{
482 LIST_HEAD(req_list);
483 struct hwa742_request *last;
484
485 create_req_list(&hwa742.auto_update_window, &req_list);
486 last = list_entry(req_list.prev, struct hwa742_request, entry);
487
488 last->complete = auto_update_complete;
489 last->complete_data = NULL;
490
491 submit_req_list(&req_list);
492}
493
494int hwa742_update_window_async(struct fb_info *fbi,
495 struct omapfb_update_window *win,
496 void (*complete_callback)(void *arg),
497 void *complete_callback_data)
498{
499 LIST_HEAD(req_list);
500 struct hwa742_request *last;
501 int r = 0;
502
503 if (hwa742.update_mode != OMAPFB_MANUAL_UPDATE) {
504 dev_dbg(hwa742.fbdev->dev, "invalid update mode\n");
505 r = -EINVAL;
506 goto out;
507 }
508 if (unlikely(win->format &
509 ~(0x03 | OMAPFB_FORMAT_FLAG_DOUBLE |
510 OMAPFB_FORMAT_FLAG_TEARSYNC | OMAPFB_FORMAT_FLAG_FORCE_VSYNC))) {
511 dev_dbg(hwa742.fbdev->dev, "invalid window flag");
512 r = -EINVAL;
513 goto out;
514 }
515
516 create_req_list(win, &req_list);
517 last = list_entry(req_list.prev, struct hwa742_request, entry);
518
519 last->complete = complete_callback;
520 last->complete_data = (void *)complete_callback_data;
521
522 submit_req_list(&req_list);
523
524out:
525 return r;
526}
527EXPORT_SYMBOL(hwa742_update_window_async);
528
529static int hwa742_setup_plane(int plane, int channel_out,
530 unsigned long offset, int screen_width,
531 int pos_x, int pos_y, int width, int height,
532 int color_mode)
533{
534 if (plane != OMAPFB_PLANE_GFX ||
535 channel_out != OMAPFB_CHANNEL_OUT_LCD)
536 return -EINVAL;
537
538 return 0;
539}
540
541static int hwa742_enable_plane(int plane, int enable)
542{
543 if (plane != 0)
544 return -EINVAL;
545
546 hwa742.int_ctrl->enable_plane(plane, enable);
547
548 return 0;
549}
550
551static int sync_handler(struct hwa742_request *req)
552{
553 complete(req->par.sync);
554 return REQ_COMPLETE;
555}
556
557static void hwa742_sync(void)
558{
559 LIST_HEAD(req_list);
560 struct hwa742_request *req;
561 struct completion comp;
562
563 req = alloc_req();
564
565 req->handler = sync_handler;
566 req->complete = NULL;
567 init_completion(&comp);
568 req->par.sync = &comp;
569
570 list_add(&req->entry, &req_list);
571 submit_req_list(&req_list);
572
573 wait_for_completion(&comp);
574}
575
576static void hwa742_bind_client(struct omapfb_notifier_block *nb)
577{
578 dev_dbg(hwa742.fbdev->dev, "update_mode %d\n", hwa742.update_mode);
579 if (hwa742.update_mode == OMAPFB_MANUAL_UPDATE) {
580 omapfb_notify_clients(hwa742.fbdev, OMAPFB_EVENT_READY);
581 }
582}
583
584static int hwa742_set_update_mode(enum omapfb_update_mode mode)
585{
586 if (mode != OMAPFB_MANUAL_UPDATE && mode != OMAPFB_AUTO_UPDATE &&
587 mode != OMAPFB_UPDATE_DISABLED)
588 return -EINVAL;
589
590 if (mode == hwa742.update_mode)
591 return 0;
592
593 dev_info(hwa742.fbdev->dev, "HWA742: setting update mode to %s\n",
594 mode == OMAPFB_UPDATE_DISABLED ? "disabled" :
595 (mode == OMAPFB_AUTO_UPDATE ? "auto" : "manual"));
596
597 switch (hwa742.update_mode) {
598 case OMAPFB_MANUAL_UPDATE:
599 omapfb_notify_clients(hwa742.fbdev, OMAPFB_EVENT_DISABLED);
600 break;
601 case OMAPFB_AUTO_UPDATE:
602 hwa742.stop_auto_update = 1;
603 del_timer_sync(&hwa742.auto_update_timer);
604 break;
605 case OMAPFB_UPDATE_DISABLED:
606 break;
607 }
608
609 hwa742.update_mode = mode;
610 hwa742_sync();
611 hwa742.stop_auto_update = 0;
612
613 switch (mode) {
614 case OMAPFB_MANUAL_UPDATE:
615 omapfb_notify_clients(hwa742.fbdev, OMAPFB_EVENT_READY);
616 break;
617 case OMAPFB_AUTO_UPDATE:
618 hwa742_update_window_auto(0);
619 break;
620 case OMAPFB_UPDATE_DISABLED:
621 break;
622 }
623
624 return 0;
625}
626
627static enum omapfb_update_mode hwa742_get_update_mode(void)
628{
629 return hwa742.update_mode;
630}
631
632static unsigned long round_to_extif_ticks(unsigned long ps, int div)
633{
634 int bus_tick = hwa742.extif_clk_period * div;
635 return (ps + bus_tick - 1) / bus_tick * bus_tick;
636}
637
638static int calc_reg_timing(unsigned long sysclk, int div)
639{
640 struct extif_timings *t;
641 unsigned long systim;
642
643 /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns,
644 * AccessTime 2 ns + 12.2 ns (regs),
645 * WEOffTime = WEOnTime + 1 ns,
646 * REOffTime = REOnTime + 16 ns (regs),
647 * CSOffTime = REOffTime + 1 ns
648 * ReadCycle = 2ns + 2*SYSCLK (regs),
649 * WriteCycle = 2*SYSCLK + 2 ns,
650 * CSPulseWidth = 10 ns */
651 systim = 1000000000 / (sysclk / 1000);
652 dev_dbg(hwa742.fbdev->dev, "HWA742 systim %lu ps extif_clk_period %u ps"
653 "extif_clk_div %d\n", systim, hwa742.extif_clk_period, div);
654
655 t = &hwa742.reg_timings;
656 memset(t, 0, sizeof(*t));
657 t->clk_div = div;
658 t->cs_on_time = 0;
659 t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
660 t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
661 t->access_time = round_to_extif_ticks(t->re_on_time + 12200, div);
662 t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div);
663 t->re_off_time = round_to_extif_ticks(t->re_on_time + 16000, div);
664 t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div);
665 t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
666 if (t->we_cycle_time < t->we_off_time)
667 t->we_cycle_time = t->we_off_time;
668 t->re_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
669 if (t->re_cycle_time < t->re_off_time)
670 t->re_cycle_time = t->re_off_time;
671 t->cs_pulse_width = 0;
672
673 dev_dbg(hwa742.fbdev->dev, "[reg]cson %d csoff %d reon %d reoff %d\n",
674 t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
675 dev_dbg(hwa742.fbdev->dev, "[reg]weon %d weoff %d recyc %d wecyc %d\n",
676 t->we_on_time, t->we_off_time, t->re_cycle_time,
677 t->we_cycle_time);
678 dev_dbg(hwa742.fbdev->dev, "[reg]rdaccess %d cspulse %d\n",
679 t->access_time, t->cs_pulse_width);
680
681 return hwa742.extif->convert_timings(t);
682}
683
684static int calc_lut_timing(unsigned long sysclk, int div)
685{
686 struct extif_timings *t;
687 unsigned long systim;
688
689 /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns,
690 * AccessTime 2 ns + 4 * SYSCLK + 26 (lut),
691 * WEOffTime = WEOnTime + 1 ns,
692 * REOffTime = REOnTime + 4*SYSCLK + 26 ns (lut),
693 * CSOffTime = REOffTime + 1 ns
694 * ReadCycle = 2ns + 4*SYSCLK + 26 ns (lut),
695 * WriteCycle = 2*SYSCLK + 2 ns,
696 * CSPulseWidth = 10 ns
697 */
698 systim = 1000000000 / (sysclk / 1000);
699 dev_dbg(hwa742.fbdev->dev, "HWA742 systim %lu ps extif_clk_period %u ps"
700 "extif_clk_div %d\n", systim, hwa742.extif_clk_period, div);
701
702 t = &hwa742.lut_timings;
703 memset(t, 0, sizeof(*t));
704
705 t->clk_div = div;
706
707 t->cs_on_time = 0;
708 t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
709 t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
710 t->access_time = round_to_extif_ticks(t->re_on_time + 4 * systim +
711 26000, div);
712 t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div);
713 t->re_off_time = round_to_extif_ticks(t->re_on_time + 4 * systim +
714 26000, div);
715 t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div);
716 t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
717 if (t->we_cycle_time < t->we_off_time)
718 t->we_cycle_time = t->we_off_time;
719 t->re_cycle_time = round_to_extif_ticks(2000 + 4 * systim + 26000, div);
720 if (t->re_cycle_time < t->re_off_time)
721 t->re_cycle_time = t->re_off_time;
722 t->cs_pulse_width = 0;
723
724 dev_dbg(hwa742.fbdev->dev, "[lut]cson %d csoff %d reon %d reoff %d\n",
725 t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
726 dev_dbg(hwa742.fbdev->dev, "[lut]weon %d weoff %d recyc %d wecyc %d\n",
727 t->we_on_time, t->we_off_time, t->re_cycle_time,
728 t->we_cycle_time);
729 dev_dbg(hwa742.fbdev->dev, "[lut]rdaccess %d cspulse %d\n",
730 t->access_time, t->cs_pulse_width);
731
732 return hwa742.extif->convert_timings(t);
733}
734
735static int calc_extif_timings(unsigned long sysclk, int *extif_mem_div)
736{
737 int max_clk_div;
738 int div;
739
740 hwa742.extif->get_clk_info(&hwa742.extif_clk_period, &max_clk_div);
741 for (div = 1; div < max_clk_div; div++) {
742 if (calc_reg_timing(sysclk, div) == 0)
743 break;
744 }
745 if (div > max_clk_div)
746 goto err;
747
748 *extif_mem_div = div;
749
750 for (div = 1; div < max_clk_div; div++) {
751 if (calc_lut_timing(sysclk, div) == 0)
752 break;
753 }
754
755 if (div > max_clk_div)
756 goto err;
757
758 return 0;
759
760err:
761 dev_err(hwa742.fbdev->dev, "can't setup timings\n");
762 return -1;
763}
764
765static void calc_hwa742_clk_rates(unsigned long ext_clk,
766 unsigned long *sys_clk, unsigned long *pix_clk)
767{
768 int pix_clk_src;
769 int sys_div = 0, sys_mul = 0;
770 int pix_div;
771
772 pix_clk_src = hwa742_read_reg(HWA742_CLK_SRC_REG);
773 pix_div = ((pix_clk_src >> 3) & 0x1f) + 1;
774 if ((pix_clk_src & (0x3 << 1)) == 0) {
775 /* Source is the PLL */
776 sys_div = (hwa742_read_reg(HWA742_PLL_DIV_REG) & 0x3f) + 1;
777 sys_mul = (hwa742_read_reg(HWA742_PLL_4_REG) & 0x7f) + 1;
778 *sys_clk = ext_clk * sys_mul / sys_div;
779 } else /* else source is ext clk, or oscillator */
780 *sys_clk = ext_clk;
781
782 *pix_clk = *sys_clk / pix_div; /* HZ */
783 dev_dbg(hwa742.fbdev->dev,
784 "ext_clk %ld pix_src %d pix_div %d sys_div %d sys_mul %d\n",
785 ext_clk, pix_clk_src & (0x3 << 1), pix_div, sys_div, sys_mul);
786 dev_dbg(hwa742.fbdev->dev, "sys_clk %ld pix_clk %ld\n",
787 *sys_clk, *pix_clk);
788}
789
790
791static int setup_tearsync(unsigned long pix_clk, int extif_div)
792{
793 int hdisp, vdisp;
794 int hndp, vndp;
795 int hsw, vsw;
796 int hs, vs;
797 int hs_pol_inv, vs_pol_inv;
798 int use_hsvs, use_ndp;
799 u8 b;
800
801 hsw = hwa742_read_reg(HWA742_HS_W_REG);
802 vsw = hwa742_read_reg(HWA742_VS_W_REG);
803 hs_pol_inv = !(hsw & 0x80);
804 vs_pol_inv = !(vsw & 0x80);
805 hsw = hsw & 0x7f;
806 vsw = vsw & 0x3f;
807
808 hdisp = (hwa742_read_reg(HWA742_H_DISP_REG) & 0x7f) * 8;
809 vdisp = hwa742_read_reg(HWA742_V_DISP_1_REG) +
810 ((hwa742_read_reg(HWA742_V_DISP_2_REG) & 0x3) << 8);
811
812 hndp = hwa742_read_reg(HWA742_H_NDP_REG) & 0x7f;
813 vndp = hwa742_read_reg(HWA742_V_NDP_REG);
814
815 /* time to transfer one pixel (16bpp) in ps */
816 hwa742.pix_tx_time = hwa742.reg_timings.we_cycle_time;
817 if (hwa742.extif->get_max_tx_rate != NULL) {
818 /*
819 * The external interface might have a rate limitation,
820 * if so, we have to maximize our transfer rate.
821 */
822 unsigned long min_tx_time;
823 unsigned long max_tx_rate = hwa742.extif->get_max_tx_rate();
824
825 dev_dbg(hwa742.fbdev->dev, "max_tx_rate %ld HZ\n",
826 max_tx_rate);
827 min_tx_time = 1000000000 / (max_tx_rate / 1000); /* ps */
828 if (hwa742.pix_tx_time < min_tx_time)
829 hwa742.pix_tx_time = min_tx_time;
830 }
831
832 /* time to update one line in ps */
833 hwa742.line_upd_time = (hdisp + hndp) * 1000000 / (pix_clk / 1000);
834 hwa742.line_upd_time *= 1000;
835 if (hdisp * hwa742.pix_tx_time > hwa742.line_upd_time)
836 /*
837 * transfer speed too low, we might have to use both
838 * HS and VS
839 */
840 use_hsvs = 1;
841 else
842 /* decent transfer speed, we'll always use only VS */
843 use_hsvs = 0;
844
845 if (use_hsvs && (hs_pol_inv || vs_pol_inv)) {
846 /*
847 * HS or'ed with VS doesn't work, use the active high
848 * TE signal based on HNDP / VNDP
849 */
850 use_ndp = 1;
851 hs_pol_inv = 0;
852 vs_pol_inv = 0;
853 hs = hndp;
854 vs = vndp;
855 } else {
856 /*
857 * Use HS or'ed with VS as a TE signal if both are needed
858 * or VNDP if only vsync is needed.
859 */
860 use_ndp = 0;
861 hs = hsw;
862 vs = vsw;
863 if (!use_hsvs) {
864 hs_pol_inv = 0;
865 vs_pol_inv = 0;
866 }
867 }
868
869 hs = hs * 1000000 / (pix_clk / 1000); /* ps */
870 hs *= 1000;
871
872 vs = vs * (hdisp + hndp) * 1000000 / (pix_clk / 1000); /* ps */
873 vs *= 1000;
874
875 if (vs <= hs)
876 return -EDOM;
877 /* set VS to 120% of HS to minimize VS detection time */
878 vs = hs * 12 / 10;
879 /* minimize HS too */
880 hs = 10000;
881
882 b = hwa742_read_reg(HWA742_NDP_CTRL);
883 b &= ~0x3;
884 b |= use_hsvs ? 1 : 0;
885 b |= (use_ndp && use_hsvs) ? 0 : 2;
886 hwa742_write_reg(HWA742_NDP_CTRL, b);
887
888 hwa742.vsync_only = !use_hsvs;
889
890 dev_dbg(hwa742.fbdev->dev,
891 "pix_clk %ld HZ pix_tx_time %ld ps line_upd_time %ld ps\n",
892 pix_clk, hwa742.pix_tx_time, hwa742.line_upd_time);
893 dev_dbg(hwa742.fbdev->dev,
894 "hs %d ps vs %d ps mode %d vsync_only %d\n",
895 hs, vs, (b & 0x3), !use_hsvs);
896
897 return hwa742.extif->setup_tearsync(1, hs, vs,
898 hs_pol_inv, vs_pol_inv, extif_div);
899}
900
901static void hwa742_get_caps(int plane, struct omapfb_caps *caps)
902{
903 hwa742.int_ctrl->get_caps(plane, caps);
904 caps->ctrl |= OMAPFB_CAPS_MANUAL_UPDATE |
905 OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE;
906 if (hwa742.te_connected)
907 caps->ctrl |= OMAPFB_CAPS_TEARSYNC;
908 caps->wnd_color |= (1 << OMAPFB_COLOR_RGB565) |
909 (1 << OMAPFB_COLOR_YUV420);
910}
911
912static void hwa742_suspend(void)
913{
914 hwa742.update_mode_before_suspend = hwa742.update_mode;
915 hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED);
916 /* Enable sleep mode */
917 hwa742_write_reg(HWA742_POWER_SAVE, 1 << 1);
918 if (hwa742.power_down != NULL)
919 hwa742.power_down(hwa742.fbdev->dev);
920}
921
922static void hwa742_resume(void)
923{
924 if (hwa742.power_up != NULL)
925 hwa742.power_up(hwa742.fbdev->dev);
926 /* Disable sleep mode */
927 hwa742_write_reg(HWA742_POWER_SAVE, 0);
928 while (1) {
929 /* Loop until PLL output is stabilized */
930 if (hwa742_read_reg(HWA742_PLL_DIV_REG) & (1 << 7))
931 break;
932 set_current_state(TASK_UNINTERRUPTIBLE);
933 schedule_timeout(msecs_to_jiffies(5));
934 }
935 hwa742_set_update_mode(hwa742.update_mode_before_suspend);
936}
937
938static int hwa742_init(struct omapfb_device *fbdev, int ext_mode,
939 struct omapfb_mem_desc *req_vram)
940{
941 int r = 0, i;
942 u8 rev, conf;
943 unsigned long ext_clk;
944 unsigned long sys_clk, pix_clk;
945 int extif_mem_div;
946 struct omapfb_platform_data *omapfb_conf;
947 struct hwa742_platform_data *ctrl_conf;
948
949 BUG_ON(!fbdev->ext_if || !fbdev->int_ctrl);
950
951 hwa742.fbdev = fbdev;
952 hwa742.extif = fbdev->ext_if;
953 hwa742.int_ctrl = fbdev->int_ctrl;
954
955 omapfb_conf = fbdev->dev->platform_data;
956 ctrl_conf = omapfb_conf->ctrl_platform_data;
957
958 if (ctrl_conf == NULL || ctrl_conf->get_clock_rate == NULL) {
959 dev_err(fbdev->dev, "HWA742: missing platform data\n");
960 r = -ENOENT;
961 goto err1;
962 }
963
964 hwa742.power_down = ctrl_conf->power_down;
965 hwa742.power_up = ctrl_conf->power_up;
966
967 spin_lock_init(&hwa742.req_lock);
968
969 if ((r = hwa742.int_ctrl->init(fbdev, 1, req_vram)) < 0)
970 goto err1;
971
972 if ((r = hwa742.extif->init(fbdev)) < 0)
973 goto err2;
974
975 ext_clk = ctrl_conf->get_clock_rate(fbdev->dev);
976 if ((r = calc_extif_timings(ext_clk, &extif_mem_div)) < 0)
977 goto err3;
978 hwa742.extif->set_timings(&hwa742.reg_timings);
979 if (hwa742.power_up != NULL)
980 hwa742.power_up(fbdev->dev);
981
982 calc_hwa742_clk_rates(ext_clk, &sys_clk, &pix_clk);
983 if ((r = calc_extif_timings(sys_clk, &extif_mem_div)) < 0)
984 goto err4;
985 hwa742.extif->set_timings(&hwa742.reg_timings);
986
987 rev = hwa742_read_reg(HWA742_REV_CODE_REG);
988 if ((rev & 0xfc) != 0x80) {
989 dev_err(fbdev->dev, "HWA742: invalid revision %02x\n", rev);
990 r = -ENODEV;
991 goto err4;
992 }
993
994
995 if (!(hwa742_read_reg(HWA742_PLL_DIV_REG) & 0x80)) {
996 dev_err(fbdev->dev,
997 "HWA742: controller not initialized by the bootloader\n");
998 r = -ENODEV;
999 goto err4;
1000 }
1001
1002 if (ctrl_conf->te_connected) {
1003 if ((r = setup_tearsync(pix_clk, extif_mem_div)) < 0) {
1004 dev_err(hwa742.fbdev->dev,
1005 "HWA742: can't setup tearing synchronization\n");
1006 goto err4;
1007 }
1008 hwa742.te_connected = 1;
1009 }
1010
1011 hwa742.max_transmit_size = hwa742.extif->max_transmit_size;
1012
1013 hwa742.update_mode = OMAPFB_UPDATE_DISABLED;
1014
1015 hwa742.auto_update_window.x = 0;
1016 hwa742.auto_update_window.y = 0;
1017 hwa742.auto_update_window.width = fbdev->panel->x_res;
1018 hwa742.auto_update_window.height = fbdev->panel->y_res;
1019 hwa742.auto_update_window.format = 0;
1020
1021 init_timer(&hwa742.auto_update_timer);
1022 hwa742.auto_update_timer.function = hwa742_update_window_auto;
1023 hwa742.auto_update_timer.data = 0;
1024
1025 hwa742.prev_color_mode = -1;
1026 hwa742.prev_flags = 0;
1027
1028 hwa742.fbdev = fbdev;
1029
1030 INIT_LIST_HEAD(&hwa742.free_req_list);
1031 INIT_LIST_HEAD(&hwa742.pending_req_list);
1032 for (i = 0; i < ARRAY_SIZE(hwa742.req_pool); i++)
1033 list_add(&hwa742.req_pool[i].entry, &hwa742.free_req_list);
1034 BUG_ON(i <= IRQ_REQ_POOL_SIZE);
1035 sema_init(&hwa742.req_sema, i - IRQ_REQ_POOL_SIZE);
1036
1037 conf = hwa742_read_reg(HWA742_CONFIG_REG);
1038 dev_info(fbdev->dev, ": Epson HWA742 LCD controller rev %d "
1039 "initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07);
1040
1041 return 0;
1042err4:
1043 if (hwa742.power_down != NULL)
1044 hwa742.power_down(fbdev->dev);
1045err3:
1046 hwa742.extif->cleanup();
1047err2:
1048 hwa742.int_ctrl->cleanup();
1049err1:
1050 return r;
1051}
1052
1053static void hwa742_cleanup(void)
1054{
1055 hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED);
1056 hwa742.extif->cleanup();
1057 hwa742.int_ctrl->cleanup();
1058 if (hwa742.power_down != NULL)
1059 hwa742.power_down(hwa742.fbdev->dev);
1060}
1061
1062struct lcd_ctrl hwa742_ctrl = {
1063 .name = "hwa742",
1064 .init = hwa742_init,
1065 .cleanup = hwa742_cleanup,
1066 .bind_client = hwa742_bind_client,
1067 .get_caps = hwa742_get_caps,
1068 .set_update_mode = hwa742_set_update_mode,
1069 .get_update_mode = hwa742_get_update_mode,
1070 .setup_plane = hwa742_setup_plane,
1071 .enable_plane = hwa742_enable_plane,
1072 .update_window = hwa742_update_window_async,
1073 .sync = hwa742_sync,
1074 .suspend = hwa742_suspend,
1075 .resume = hwa742_resume,
1076};
1077
diff --git a/drivers/video/omap/lcd_h3.c b/drivers/video/omap/lcd_h3.c
new file mode 100644
index 000000000000..51807b4e26d1
--- /dev/null
+++ b/drivers/video/omap/lcd_h3.c
@@ -0,0 +1,141 @@
1/*
2 * LCD panel support for the TI OMAP H3 board
3 *
4 * Copyright (C) 2004 Nokia Corporation
5 * Author: Imre Deak <imre.deak@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22#include <linux/module.h>
23#include <linux/platform_device.h>
24
25#include <asm/arch/gpio.h>
26#include <asm/arch/tps65010.h>
27#include <asm/arch/omapfb.h>
28
29#define MODULE_NAME "omapfb-lcd_h3"
30
31#define pr_err(fmt, args...) printk(KERN_ERR MODULE_NAME ": " fmt, ## args)
32
33static int h3_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
34{
35 return 0;
36}
37
38static void h3_panel_cleanup(struct lcd_panel *panel)
39{
40}
41
42static int h3_panel_enable(struct lcd_panel *panel)
43{
44 int r = 0;
45
46 /* GPIO1 and GPIO2 of TPS65010 send LCD_ENBKL and LCD_ENVDD signals */
47 r = tps65010_set_gpio_out_value(GPIO1, HIGH);
48 if (!r)
49 r = tps65010_set_gpio_out_value(GPIO2, HIGH);
50 if (r)
51 pr_err("Unable to turn on LCD panel\n");
52
53 return r;
54}
55
56static void h3_panel_disable(struct lcd_panel *panel)
57{
58 int r = 0;
59
60 /* GPIO1 and GPIO2 of TPS65010 send LCD_ENBKL and LCD_ENVDD signals */
61 r = tps65010_set_gpio_out_value(GPIO1, LOW);
62 if (!r)
63 tps65010_set_gpio_out_value(GPIO2, LOW);
64 if (r)
65 pr_err("Unable to turn off LCD panel\n");
66}
67
68static unsigned long h3_panel_get_caps(struct lcd_panel *panel)
69{
70 return 0;
71}
72
73struct lcd_panel h3_panel = {
74 .name = "h3",
75 .config = OMAP_LCDC_PANEL_TFT,
76
77 .data_lines = 16,
78 .bpp = 16,
79 .x_res = 240,
80 .y_res = 320,
81 .pixel_clock = 12000,
82 .hsw = 12,
83 .hfp = 14,
84 .hbp = 72 - 12,
85 .vsw = 1,
86 .vfp = 1,
87 .vbp = 0,
88 .pcd = 0,
89
90 .init = h3_panel_init,
91 .cleanup = h3_panel_cleanup,
92 .enable = h3_panel_enable,
93 .disable = h3_panel_disable,
94 .get_caps = h3_panel_get_caps,
95};
96
97static int h3_panel_probe(struct platform_device *pdev)
98{
99 omapfb_register_panel(&h3_panel);
100 return 0;
101}
102
103static int h3_panel_remove(struct platform_device *pdev)
104{
105 return 0;
106}
107
108static int h3_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
109{
110 return 0;
111}
112
113static int h3_panel_resume(struct platform_device *pdev)
114{
115 return 0;
116}
117
118struct platform_driver h3_panel_driver = {
119 .probe = h3_panel_probe,
120 .remove = h3_panel_remove,
121 .suspend = h3_panel_suspend,
122 .resume = h3_panel_resume,
123 .driver = {
124 .name = "lcd_h3",
125 .owner = THIS_MODULE,
126 },
127};
128
129static int h3_panel_drv_init(void)
130{
131 return platform_driver_register(&h3_panel_driver);
132}
133
134static void h3_panel_drv_cleanup(void)
135{
136 platform_driver_unregister(&h3_panel_driver);
137}
138
139module_init(h3_panel_drv_init);
140module_exit(h3_panel_drv_cleanup);
141
diff --git a/drivers/video/omap/lcd_h4.c b/drivers/video/omap/lcd_h4.c
new file mode 100644
index 000000000000..fd6f0eb16de1
--- /dev/null
+++ b/drivers/video/omap/lcd_h4.c
@@ -0,0 +1,117 @@
1/*
2 * LCD panel support for the TI OMAP H4 board
3 *
4 * Copyright (C) 2004 Nokia Corporation
5 * Author: Imre Deak <imre.deak@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22#include <linux/module.h>
23#include <linux/platform_device.h>
24
25#include <asm/arch/omapfb.h>
26
27static int h4_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
28{
29 return 0;
30}
31
32static void h4_panel_cleanup(struct lcd_panel *panel)
33{
34}
35
36static int h4_panel_enable(struct lcd_panel *panel)
37{
38 return 0;
39}
40
41static void h4_panel_disable(struct lcd_panel *panel)
42{
43}
44
45static unsigned long h4_panel_get_caps(struct lcd_panel *panel)
46{
47 return 0;
48}
49
50struct lcd_panel h4_panel = {
51 .name = "h4",
52 .config = OMAP_LCDC_PANEL_TFT,
53
54 .bpp = 16,
55 .data_lines = 16,
56 .x_res = 240,
57 .y_res = 320,
58 .pixel_clock = 6250,
59 .hsw = 15,
60 .hfp = 15,
61 .hbp = 60,
62 .vsw = 1,
63 .vfp = 1,
64 .vbp = 1,
65
66 .init = h4_panel_init,
67 .cleanup = h4_panel_cleanup,
68 .enable = h4_panel_enable,
69 .disable = h4_panel_disable,
70 .get_caps = h4_panel_get_caps,
71};
72
73static int h4_panel_probe(struct platform_device *pdev)
74{
75 omapfb_register_panel(&h4_panel);
76 return 0;
77}
78
79static int h4_panel_remove(struct platform_device *pdev)
80{
81 return 0;
82}
83
84static int h4_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
85{
86 return 0;
87}
88
89static int h4_panel_resume(struct platform_device *pdev)
90{
91 return 0;
92}
93
94struct platform_driver h4_panel_driver = {
95 .probe = h4_panel_probe,
96 .remove = h4_panel_remove,
97 .suspend = h4_panel_suspend,
98 .resume = h4_panel_resume,
99 .driver = {
100 .name = "lcd_h4",
101 .owner = THIS_MODULE,
102 },
103};
104
105static int h4_panel_drv_init(void)
106{
107 return platform_driver_register(&h4_panel_driver);
108}
109
110static void h4_panel_drv_cleanup(void)
111{
112 platform_driver_unregister(&h4_panel_driver);
113}
114
115module_init(h4_panel_drv_init);
116module_exit(h4_panel_drv_cleanup);
117
diff --git a/drivers/video/omap/lcd_inn1510.c b/drivers/video/omap/lcd_inn1510.c
new file mode 100644
index 000000000000..551f385861d1
--- /dev/null
+++ b/drivers/video/omap/lcd_inn1510.c
@@ -0,0 +1,124 @@
1/*
2 * LCD panel support for the TI OMAP1510 Innovator board
3 *
4 * Copyright (C) 2004 Nokia Corporation
5 * Author: Imre Deak <imre.deak@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22#include <linux/module.h>
23#include <linux/platform_device.h>
24#include <linux/io.h>
25
26#include <asm/arch/fpga.h>
27#include <asm/arch/omapfb.h>
28
29static int innovator1510_panel_init(struct lcd_panel *panel,
30 struct omapfb_device *fbdev)
31{
32 return 0;
33}
34
35static void innovator1510_panel_cleanup(struct lcd_panel *panel)
36{
37}
38
39static int innovator1510_panel_enable(struct lcd_panel *panel)
40{
41 fpga_write(0x7, OMAP1510_FPGA_LCD_PANEL_CONTROL);
42 return 0;
43}
44
45static void innovator1510_panel_disable(struct lcd_panel *panel)
46{
47 fpga_write(0x0, OMAP1510_FPGA_LCD_PANEL_CONTROL);
48}
49
50static unsigned long innovator1510_panel_get_caps(struct lcd_panel *panel)
51{
52 return 0;
53}
54
55struct lcd_panel innovator1510_panel = {
56 .name = "inn1510",
57 .config = OMAP_LCDC_PANEL_TFT,
58
59 .bpp = 16,
60 .data_lines = 16,
61 .x_res = 240,
62 .y_res = 320,
63 .pixel_clock = 12500,
64 .hsw = 40,
65 .hfp = 40,
66 .hbp = 72,
67 .vsw = 1,
68 .vfp = 1,
69 .vbp = 0,
70 .pcd = 12,
71
72 .init = innovator1510_panel_init,
73 .cleanup = innovator1510_panel_cleanup,
74 .enable = innovator1510_panel_enable,
75 .disable = innovator1510_panel_disable,
76 .get_caps = innovator1510_panel_get_caps,
77};
78
79static int innovator1510_panel_probe(struct platform_device *pdev)
80{
81 omapfb_register_panel(&innovator1510_panel);
82 return 0;
83}
84
85static int innovator1510_panel_remove(struct platform_device *pdev)
86{
87 return 0;
88}
89
90static int innovator1510_panel_suspend(struct platform_device *pdev,
91 pm_message_t mesg)
92{
93 return 0;
94}
95
96static int innovator1510_panel_resume(struct platform_device *pdev)
97{
98 return 0;
99}
100
101struct platform_driver innovator1510_panel_driver = {
102 .probe = innovator1510_panel_probe,
103 .remove = innovator1510_panel_remove,
104 .suspend = innovator1510_panel_suspend,
105 .resume = innovator1510_panel_resume,
106 .driver = {
107 .name = "lcd_inn1510",
108 .owner = THIS_MODULE,
109 },
110};
111
112static int innovator1510_panel_drv_init(void)
113{
114 return platform_driver_register(&innovator1510_panel_driver);
115}
116
117static void innovator1510_panel_drv_cleanup(void)
118{
119 platform_driver_unregister(&innovator1510_panel_driver);
120}
121
122module_init(innovator1510_panel_drv_init);
123module_exit(innovator1510_panel_drv_cleanup);
124
diff --git a/drivers/video/omap/lcd_inn1610.c b/drivers/video/omap/lcd_inn1610.c
new file mode 100644
index 000000000000..95604ca43301
--- /dev/null
+++ b/drivers/video/omap/lcd_inn1610.c
@@ -0,0 +1,150 @@
1/*
2 * LCD panel support for the TI OMAP1610 Innovator board
3 *
4 * Copyright (C) 2004 Nokia Corporation
5 * Author: Imre Deak <imre.deak@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22#include <linux/module.h>
23#include <linux/platform_device.h>
24
25#include <asm/arch/gpio.h>
26#include <asm/arch/omapfb.h>
27
28#define MODULE_NAME "omapfb-lcd_h3"
29
30#define pr_err(fmt, args...) printk(KERN_ERR MODULE_NAME ": " fmt, ## args)
31
32static int innovator1610_panel_init(struct lcd_panel *panel,
33 struct omapfb_device *fbdev)
34{
35 int r = 0;
36
37 if (omap_request_gpio(14)) {
38 pr_err("can't request GPIO 14\n");
39 r = -1;
40 goto exit;
41 }
42 if (omap_request_gpio(15)) {
43 pr_err("can't request GPIO 15\n");
44 omap_free_gpio(14);
45 r = -1;
46 goto exit;
47 }
48 /* configure GPIO(14, 15) as outputs */
49 omap_set_gpio_direction(14, 0);
50 omap_set_gpio_direction(15, 0);
51exit:
52 return r;
53}
54
55static void innovator1610_panel_cleanup(struct lcd_panel *panel)
56{
57 omap_free_gpio(15);
58 omap_free_gpio(14);
59}
60
61static int innovator1610_panel_enable(struct lcd_panel *panel)
62{
63 /* set GPIO14 and GPIO15 high */
64 omap_set_gpio_dataout(14, 1);
65 omap_set_gpio_dataout(15, 1);
66 return 0;
67}
68
69static void innovator1610_panel_disable(struct lcd_panel *panel)
70{
71 /* set GPIO13, GPIO14 and GPIO15 low */
72 omap_set_gpio_dataout(14, 0);
73 omap_set_gpio_dataout(15, 0);
74}
75
76static unsigned long innovator1610_panel_get_caps(struct lcd_panel *panel)
77{
78 return 0;
79}
80
81struct lcd_panel innovator1610_panel = {
82 .name = "inn1610",
83 .config = OMAP_LCDC_PANEL_TFT,
84
85 .bpp = 16,
86 .data_lines = 16,
87 .x_res = 320,
88 .y_res = 240,
89 .pixel_clock = 12500,
90 .hsw = 40,
91 .hfp = 40,
92 .hbp = 72,
93 .vsw = 1,
94 .vfp = 1,
95 .vbp = 0,
96 .pcd = 12,
97
98 .init = innovator1610_panel_init,
99 .cleanup = innovator1610_panel_cleanup,
100 .enable = innovator1610_panel_enable,
101 .disable = innovator1610_panel_disable,
102 .get_caps = innovator1610_panel_get_caps,
103};
104
105static int innovator1610_panel_probe(struct platform_device *pdev)
106{
107 omapfb_register_panel(&innovator1610_panel);
108 return 0;
109}
110
111static int innovator1610_panel_remove(struct platform_device *pdev)
112{
113 return 0;
114}
115
116static int innovator1610_panel_suspend(struct platform_device *pdev,
117 pm_message_t mesg)
118{
119 return 0;
120}
121
122static int innovator1610_panel_resume(struct platform_device *pdev)
123{
124 return 0;
125}
126
127struct platform_driver innovator1610_panel_driver = {
128 .probe = innovator1610_panel_probe,
129 .remove = innovator1610_panel_remove,
130 .suspend = innovator1610_panel_suspend,
131 .resume = innovator1610_panel_resume,
132 .driver = {
133 .name = "lcd_inn1610",
134 .owner = THIS_MODULE,
135 },
136};
137
138static int innovator1610_panel_drv_init(void)
139{
140 return platform_driver_register(&innovator1610_panel_driver);
141}
142
143static void innovator1610_panel_drv_cleanup(void)
144{
145 platform_driver_unregister(&innovator1610_panel_driver);
146}
147
148module_init(innovator1610_panel_drv_init);
149module_exit(innovator1610_panel_drv_cleanup);
150
diff --git a/drivers/video/omap/lcd_osk.c b/drivers/video/omap/lcd_osk.c
new file mode 100644
index 000000000000..a38038840fd6
--- /dev/null
+++ b/drivers/video/omap/lcd_osk.c
@@ -0,0 +1,144 @@
1/*
2 * LCD panel support for the TI OMAP OSK board
3 *
4 * Copyright (C) 2004 Nokia Corporation
5 * Author: Imre Deak <imre.deak@nokia.com>
6 * Adapted for OSK by <dirk.behme@de.bosch.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23#include <linux/module.h>
24#include <linux/platform_device.h>
25
26#include <asm/arch/gpio.h>
27#include <asm/arch/mux.h>
28#include <asm/arch/omapfb.h>
29
30static int osk_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
31{
32 return 0;
33}
34
35static void osk_panel_cleanup(struct lcd_panel *panel)
36{
37}
38
39static int osk_panel_enable(struct lcd_panel *panel)
40{
41 /* configure PWL pin */
42 omap_cfg_reg(PWL);
43
44 /* Enable PWL unit */
45 omap_writeb(0x01, OMAP_PWL_CLK_ENABLE);
46
47 /* Set PWL level */
48 omap_writeb(0xFF, OMAP_PWL_ENABLE);
49
50 /* configure GPIO2 as output */
51 omap_set_gpio_direction(2, 0);
52
53 /* set GPIO2 high */
54 omap_set_gpio_dataout(2, 1);
55
56 return 0;
57}
58
59static void osk_panel_disable(struct lcd_panel *panel)
60{
61 /* Set PWL level to zero */
62 omap_writeb(0x00, OMAP_PWL_ENABLE);
63
64 /* Disable PWL unit */
65 omap_writeb(0x00, OMAP_PWL_CLK_ENABLE);
66
67 /* set GPIO2 low */
68 omap_set_gpio_dataout(2, 0);
69}
70
71static unsigned long osk_panel_get_caps(struct lcd_panel *panel)
72{
73 return 0;
74}
75
76struct lcd_panel osk_panel = {
77 .name = "osk",
78 .config = OMAP_LCDC_PANEL_TFT,
79
80 .bpp = 16,
81 .data_lines = 16,
82 .x_res = 240,
83 .y_res = 320,
84 .pixel_clock = 12500,
85 .hsw = 40,
86 .hfp = 40,
87 .hbp = 72,
88 .vsw = 1,
89 .vfp = 1,
90 .vbp = 0,
91 .pcd = 12,
92
93 .init = osk_panel_init,
94 .cleanup = osk_panel_cleanup,
95 .enable = osk_panel_enable,
96 .disable = osk_panel_disable,
97 .get_caps = osk_panel_get_caps,
98};
99
100static int osk_panel_probe(struct platform_device *pdev)
101{
102 omapfb_register_panel(&osk_panel);
103 return 0;
104}
105
106static int osk_panel_remove(struct platform_device *pdev)
107{
108 return 0;
109}
110
111static int osk_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
112{
113 return 0;
114}
115
116static int osk_panel_resume(struct platform_device *pdev)
117{
118 return 0;
119}
120
121struct platform_driver osk_panel_driver = {
122 .probe = osk_panel_probe,
123 .remove = osk_panel_remove,
124 .suspend = osk_panel_suspend,
125 .resume = osk_panel_resume,
126 .driver = {
127 .name = "lcd_osk",
128 .owner = THIS_MODULE,
129 },
130};
131
132static int osk_panel_drv_init(void)
133{
134 return platform_driver_register(&osk_panel_driver);
135}
136
137static void osk_panel_drv_cleanup(void)
138{
139 platform_driver_unregister(&osk_panel_driver);
140}
141
142module_init(osk_panel_drv_init);
143module_exit(osk_panel_drv_cleanup);
144
diff --git a/drivers/video/omap/lcd_palmte.c b/drivers/video/omap/lcd_palmte.c
new file mode 100644
index 000000000000..52bdfdac42c9
--- /dev/null
+++ b/drivers/video/omap/lcd_palmte.c
@@ -0,0 +1,123 @@
1/*
2 * LCD panel support for the Palm Tungsten E
3 *
4 * Original version : Romain Goyet <r.goyet@gmail.com>
5 * Current version : Laurent Gonzalez <palmte.linux@free.fr>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22#include <linux/module.h>
23#include <linux/platform_device.h>
24#include <linux/io.h>
25
26#include <asm/arch/fpga.h>
27#include <asm/arch/omapfb.h>
28
29static int palmte_panel_init(struct lcd_panel *panel,
30 struct omapfb_device *fbdev)
31{
32 return 0;
33}
34
35static void palmte_panel_cleanup(struct lcd_panel *panel)
36{
37}
38
39static int palmte_panel_enable(struct lcd_panel *panel)
40{
41 return 0;
42}
43
44static void palmte_panel_disable(struct lcd_panel *panel)
45{
46}
47
48static unsigned long palmte_panel_get_caps(struct lcd_panel *panel)
49{
50 return 0;
51}
52
53struct lcd_panel palmte_panel = {
54 .name = "palmte",
55 .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
56 OMAP_LCDC_INV_HSYNC | OMAP_LCDC_HSVS_RISING_EDGE |
57 OMAP_LCDC_HSVS_OPPOSITE,
58
59 .data_lines = 16,
60 .bpp = 8,
61 .pixel_clock = 12000,
62 .x_res = 320,
63 .y_res = 320,
64 .hsw = 4,
65 .hfp = 8,
66 .hbp = 28,
67 .vsw = 1,
68 .vfp = 8,
69 .vbp = 7,
70 .pcd = 0,
71
72 .init = palmte_panel_init,
73 .cleanup = palmte_panel_cleanup,
74 .enable = palmte_panel_enable,
75 .disable = palmte_panel_disable,
76 .get_caps = palmte_panel_get_caps,
77};
78
79static int palmte_panel_probe(struct platform_device *pdev)
80{
81 omapfb_register_panel(&palmte_panel);
82 return 0;
83}
84
85static int palmte_panel_remove(struct platform_device *pdev)
86{
87 return 0;
88}
89
90static int palmte_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
91{
92 return 0;
93}
94
95static int palmte_panel_resume(struct platform_device *pdev)
96{
97 return 0;
98}
99
100struct platform_driver palmte_panel_driver = {
101 .probe = palmte_panel_probe,
102 .remove = palmte_panel_remove,
103 .suspend = palmte_panel_suspend,
104 .resume = palmte_panel_resume,
105 .driver = {
106 .name = "lcd_palmte",
107 .owner = THIS_MODULE,
108 },
109};
110
111static int palmte_panel_drv_init(void)
112{
113 return platform_driver_register(&palmte_panel_driver);
114}
115
116static void palmte_panel_drv_cleanup(void)
117{
118 platform_driver_unregister(&palmte_panel_driver);
119}
120
121module_init(palmte_panel_drv_init);
122module_exit(palmte_panel_drv_cleanup);
123
diff --git a/drivers/video/omap/lcd_palmtt.c b/drivers/video/omap/lcd_palmtt.c
new file mode 100644
index 000000000000..4bb349f54356
--- /dev/null
+++ b/drivers/video/omap/lcd_palmtt.c
@@ -0,0 +1,127 @@
1/*
2 * LCD panel support for Palm Tungsten|T
3 * Current version : Marek Vasut <marek.vasut@gmail.com>
4 *
5 * Modified from lcd_inn1510.c
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22/*
23GPIO11 - backlight
24GPIO12 - screen blanking
25GPIO13 - screen blanking
26*/
27
28#include <linux/platform_device.h>
29#include <linux/module.h>
30#include <linux/io.h>
31
32#include <asm/arch/gpio.h>
33#include <asm/arch/omapfb.h>
34
35static int palmtt_panel_init(struct lcd_panel *panel,
36 struct omapfb_device *fbdev)
37{
38 return 0;
39}
40
41static void palmtt_panel_cleanup(struct lcd_panel *panel)
42{
43}
44
45static int palmtt_panel_enable(struct lcd_panel *panel)
46{
47 return 0;
48}
49
50static void palmtt_panel_disable(struct lcd_panel *panel)
51{
52}
53
54static unsigned long palmtt_panel_get_caps(struct lcd_panel *panel)
55{
56 return OMAPFB_CAPS_SET_BACKLIGHT;
57}
58
59struct lcd_panel palmtt_panel = {
60 .name = "palmtt",
61 .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
62 OMAP_LCDC_INV_HSYNC | OMAP_LCDC_HSVS_RISING_EDGE |
63 OMAP_LCDC_HSVS_OPPOSITE,
64 .bpp = 16,
65 .data_lines = 16,
66 .x_res = 320,
67 .y_res = 320,
68 .pixel_clock = 10000,
69 .hsw = 4,
70 .hfp = 8,
71 .hbp = 28,
72 .vsw = 1,
73 .vfp = 8,
74 .vbp = 7,
75 .pcd = 0,
76
77 .init = palmtt_panel_init,
78 .cleanup = palmtt_panel_cleanup,
79 .enable = palmtt_panel_enable,
80 .disable = palmtt_panel_disable,
81 .get_caps = palmtt_panel_get_caps,
82};
83
84static int palmtt_panel_probe(struct platform_device *pdev)
85{
86 omapfb_register_panel(&palmtt_panel);
87 return 0;
88}
89
90static int palmtt_panel_remove(struct platform_device *pdev)
91{
92 return 0;
93}
94
95static int palmtt_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
96{
97 return 0;
98}
99
100static int palmtt_panel_resume(struct platform_device *pdev)
101{
102 return 0;
103}
104
105struct platform_driver palmtt_panel_driver = {
106 .probe = palmtt_panel_probe,
107 .remove = palmtt_panel_remove,
108 .suspend = palmtt_panel_suspend,
109 .resume = palmtt_panel_resume,
110 .driver = {
111 .name = "lcd_palmtt",
112 .owner = THIS_MODULE,
113 },
114};
115
116static int palmtt_panel_drv_init(void)
117{
118 return platform_driver_register(&palmtt_panel_driver);
119}
120
121static void palmtt_panel_drv_cleanup(void)
122{
123 platform_driver_unregister(&palmtt_panel_driver);
124}
125
126module_init(palmtt_panel_drv_init);
127module_exit(palmtt_panel_drv_cleanup);
diff --git a/drivers/video/omap/lcd_palmz71.c b/drivers/video/omap/lcd_palmz71.c
new file mode 100644
index 000000000000..ea6170ddff35
--- /dev/null
+++ b/drivers/video/omap/lcd_palmz71.c
@@ -0,0 +1,123 @@
1/*
2 * LCD panel support for the Palm Zire71
3 *
4 * Original version : Romain Goyet
5 * Current version : Laurent Gonzalez
6 * Modified for zire71 : Marek Vasut
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/io.h>
26
27#include <asm/arch/omapfb.h>
28
29static int palmz71_panel_init(struct lcd_panel *panel,
30 struct omapfb_device *fbdev)
31{
32 return 0;
33}
34
35static void palmz71_panel_cleanup(struct lcd_panel *panel)
36{
37
38}
39
40static int palmz71_panel_enable(struct lcd_panel *panel)
41{
42 return 0;
43}
44
45static void palmz71_panel_disable(struct lcd_panel *panel)
46{
47}
48
49static unsigned long palmz71_panel_get_caps(struct lcd_panel *panel)
50{
51 return OMAPFB_CAPS_SET_BACKLIGHT;
52}
53
54struct lcd_panel palmz71_panel = {
55 .name = "palmz71",
56 .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
57 OMAP_LCDC_INV_HSYNC | OMAP_LCDC_HSVS_RISING_EDGE |
58 OMAP_LCDC_HSVS_OPPOSITE,
59 .data_lines = 16,
60 .bpp = 16,
61 .pixel_clock = 24000,
62 .x_res = 320,
63 .y_res = 320,
64 .hsw = 4,
65 .hfp = 8,
66 .hbp = 28,
67 .vsw = 1,
68 .vfp = 8,
69 .vbp = 7,
70 .pcd = 0,
71
72 .init = palmz71_panel_init,
73 .cleanup = palmz71_panel_cleanup,
74 .enable = palmz71_panel_enable,
75 .disable = palmz71_panel_disable,
76 .get_caps = palmz71_panel_get_caps,
77};
78
79static int palmz71_panel_probe(struct platform_device *pdev)
80{
81 omapfb_register_panel(&palmz71_panel);
82 return 0;
83}
84
85static int palmz71_panel_remove(struct platform_device *pdev)
86{
87 return 0;
88}
89
90static int palmz71_panel_suspend(struct platform_device *pdev,
91 pm_message_t mesg)
92{
93 return 0;
94}
95
96static int palmz71_panel_resume(struct platform_device *pdev)
97{
98 return 0;
99}
100
101struct platform_driver palmz71_panel_driver = {
102 .probe = palmz71_panel_probe,
103 .remove = palmz71_panel_remove,
104 .suspend = palmz71_panel_suspend,
105 .resume = palmz71_panel_resume,
106 .driver = {
107 .name = "lcd_palmz71",
108 .owner = THIS_MODULE,
109 },
110};
111
112static int palmz71_panel_drv_init(void)
113{
114 return platform_driver_register(&palmz71_panel_driver);
115}
116
117static void palmz71_panel_drv_cleanup(void)
118{
119 platform_driver_unregister(&palmz71_panel_driver);
120}
121
122module_init(palmz71_panel_drv_init);
123module_exit(palmz71_panel_drv_cleanup);
diff --git a/drivers/video/omap/lcd_sx1.c b/drivers/video/omap/lcd_sx1.c
new file mode 100644
index 000000000000..c4f306a4e5c9
--- /dev/null
+++ b/drivers/video/omap/lcd_sx1.c
@@ -0,0 +1,334 @@
1/*
2 * LCD panel support for the Siemens SX1 mobile phone
3 *
4 * Current version : Vovan888@gmail.com, great help from FCA00000
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/module.h>
22#include <linux/platform_device.h>
23#include <linux/delay.h>
24#include <linux/io.h>
25
26#include <asm/arch/gpio.h>
27#include <asm/arch/omapfb.h>
28#include <asm/arch/mcbsp.h>
29#include <asm/arch/mux.h>
30
31/*
32 * OMAP310 GPIO registers
33 */
34#define GPIO_DATA_INPUT 0xfffce000
35#define GPIO_DATA_OUTPUT 0xfffce004
36#define GPIO_DIR_CONTROL 0xfffce008
37#define GPIO_INT_CONTROL 0xfffce00c
38#define GPIO_INT_MASK 0xfffce010
39#define GPIO_INT_STATUS 0xfffce014
40#define GPIO_PIN_CONTROL 0xfffce018
41
42
43#define A_LCD_SSC_RD 3
44#define A_LCD_SSC_SD 7
45#define _A_LCD_RESET 9
46#define _A_LCD_SSC_CS 12
47#define _A_LCD_SSC_A0 13
48
49#define DSP_REG 0xE1017024
50
51const unsigned char INIT_1[12] = {
52 0x1C, 0x02, 0x88, 0x00, 0x1E, 0xE0, 0x00, 0xDC, 0x00, 0x02, 0x00
53};
54
55const unsigned char INIT_2[127] = {
56 0x15, 0x00, 0x29, 0x00, 0x3E, 0x00, 0x51, 0x00,
57 0x65, 0x00, 0x7A, 0x00, 0x8D, 0x00, 0xA1, 0x00,
58 0xB6, 0x00, 0xC7, 0x00, 0xD8, 0x00, 0xEB, 0x00,
59 0xFB, 0x00, 0x0B, 0x01, 0x1B, 0x01, 0x27, 0x01,
60 0x34, 0x01, 0x41, 0x01, 0x4C, 0x01, 0x55, 0x01,
61 0x5F, 0x01, 0x68, 0x01, 0x70, 0x01, 0x78, 0x01,
62 0x7E, 0x01, 0x86, 0x01, 0x8C, 0x01, 0x94, 0x01,
63 0x9B, 0x01, 0xA1, 0x01, 0xA4, 0x01, 0xA9, 0x01,
64 0xAD, 0x01, 0xB2, 0x01, 0xB7, 0x01, 0xBC, 0x01,
65 0xC0, 0x01, 0xC4, 0x01, 0xC8, 0x01, 0xCB, 0x01,
66 0xCF, 0x01, 0xD2, 0x01, 0xD5, 0x01, 0xD8, 0x01,
67 0xDB, 0x01, 0xE0, 0x01, 0xE3, 0x01, 0xE6, 0x01,
68 0xE8, 0x01, 0xEB, 0x01, 0xEE, 0x01, 0xF1, 0x01,
69 0xF3, 0x01, 0xF8, 0x01, 0xF9, 0x01, 0xFC, 0x01,
70 0x00, 0x02, 0x03, 0x02, 0x07, 0x02, 0x09, 0x02,
71 0x0E, 0x02, 0x13, 0x02, 0x1C, 0x02, 0x00
72};
73
74const unsigned char INIT_3[15] = {
75 0x14, 0x26, 0x33, 0x3D, 0x45, 0x4D, 0x53, 0x59,
76 0x5E, 0x63, 0x67, 0x6D, 0x71, 0x78, 0xFF
77};
78
79static void epson_sendbyte(int flag, unsigned char byte)
80{
81 int i, shifter = 0x80;
82
83 if (!flag)
84 omap_set_gpio_dataout(_A_LCD_SSC_A0, 0);
85 mdelay(2);
86 omap_set_gpio_dataout(A_LCD_SSC_RD, 1);
87
88 omap_set_gpio_dataout(A_LCD_SSC_SD, flag);
89
90 OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2200);
91 OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2202);
92 for (i = 0; i < 8; i++) {
93 OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2200);
94 omap_set_gpio_dataout(A_LCD_SSC_SD, shifter & byte);
95 OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2202);
96 shifter >>= 1;
97 }
98 omap_set_gpio_dataout(_A_LCD_SSC_A0, 1);
99}
100
101static void init_system(void)
102{
103 omap_mcbsp_request(OMAP_MCBSP3);
104 omap_mcbsp_stop(OMAP_MCBSP3);
105}
106
107static void setup_GPIO(void)
108{
109 /* new wave */
110 omap_request_gpio(A_LCD_SSC_RD);
111 omap_request_gpio(A_LCD_SSC_SD);
112 omap_request_gpio(_A_LCD_RESET);
113 omap_request_gpio(_A_LCD_SSC_CS);
114 omap_request_gpio(_A_LCD_SSC_A0);
115
116 /* set all GPIOs to output */
117 omap_set_gpio_direction(A_LCD_SSC_RD, 0);
118 omap_set_gpio_direction(A_LCD_SSC_SD, 0);
119 omap_set_gpio_direction(_A_LCD_RESET, 0);
120 omap_set_gpio_direction(_A_LCD_SSC_CS, 0);
121 omap_set_gpio_direction(_A_LCD_SSC_A0, 0);
122
123 /* set GPIO data */
124 omap_set_gpio_dataout(A_LCD_SSC_RD, 1);
125 omap_set_gpio_dataout(A_LCD_SSC_SD, 0);
126 omap_set_gpio_dataout(_A_LCD_RESET, 0);
127 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
128 omap_set_gpio_dataout(_A_LCD_SSC_A0, 1);
129}
130
131static void display_init(void)
132{
133 int i;
134
135 omap_cfg_reg(MCBSP3_CLKX);
136
137 mdelay(2);
138 setup_GPIO();
139 mdelay(2);
140
141 /* reset LCD */
142 omap_set_gpio_dataout(A_LCD_SSC_SD, 1);
143 epson_sendbyte(0, 0x25);
144
145 omap_set_gpio_dataout(_A_LCD_RESET, 0);
146 mdelay(10);
147 omap_set_gpio_dataout(_A_LCD_RESET, 1);
148
149 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
150 mdelay(2);
151 omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
152
153 /* init LCD, phase 1 */
154 epson_sendbyte(0, 0xCA);
155 for (i = 0; i < 10; i++)
156 epson_sendbyte(1, INIT_1[i]);
157 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
158 omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
159
160 /* init LCD phase 2 */
161 epson_sendbyte(0, 0xCB);
162 for (i = 0; i < 125; i++)
163 epson_sendbyte(1, INIT_2[i]);
164 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
165 omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
166
167 /* init LCD phase 2a */
168 epson_sendbyte(0, 0xCC);
169 for (i = 0; i < 14; i++)
170 epson_sendbyte(1, INIT_3[i]);
171 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
172 omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
173
174 /* init LCD phase 3 */
175 epson_sendbyte(0, 0xBC);
176 epson_sendbyte(1, 0x08);
177 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
178 omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
179
180 /* init LCD phase 4 */
181 epson_sendbyte(0, 0x07);
182 epson_sendbyte(1, 0x05);
183 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
184 omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
185
186 /* init LCD phase 5 */
187 epson_sendbyte(0, 0x94);
188 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
189 omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
190
191 /* init LCD phase 6 */
192 epson_sendbyte(0, 0xC6);
193 epson_sendbyte(1, 0x80);
194 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
195 mdelay(100); /* used to be 1000 */
196 omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
197
198 /* init LCD phase 7 */
199 epson_sendbyte(0, 0x16);
200 epson_sendbyte(1, 0x02);
201 epson_sendbyte(1, 0x00);
202 epson_sendbyte(1, 0xB1);
203 epson_sendbyte(1, 0x00);
204 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
205 omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
206
207 /* init LCD phase 8 */
208 epson_sendbyte(0, 0x76);
209 epson_sendbyte(1, 0x00);
210 epson_sendbyte(1, 0x00);
211 epson_sendbyte(1, 0xDB);
212 epson_sendbyte(1, 0x00);
213 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
214 omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
215
216 /* init LCD phase 9 */
217 epson_sendbyte(0, 0xAF);
218 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
219}
220
221static int sx1_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
222{
223 return 0;
224}
225
226static void sx1_panel_cleanup(struct lcd_panel *panel)
227{
228}
229
230static void sx1_panel_disable(struct lcd_panel *panel)
231{
232 printk(KERN_INFO "SX1: LCD panel disable\n");
233 sx1_setmmipower(0);
234 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
235
236 epson_sendbyte(0, 0x25);
237 omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
238
239 epson_sendbyte(0, 0xAE);
240 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
241 mdelay(100);
242 omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
243
244 epson_sendbyte(0, 0x95);
245 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
246}
247
248static int sx1_panel_enable(struct lcd_panel *panel)
249{
250 printk(KERN_INFO "lcd_sx1: LCD panel enable\n");
251 init_system();
252 display_init();
253
254 sx1_setmmipower(1);
255 sx1_setbacklight(0x18);
256 sx1_setkeylight (0x06);
257 return 0;
258}
259
260
261static unsigned long sx1_panel_get_caps(struct lcd_panel *panel)
262{
263 return 0;
264}
265
266struct lcd_panel sx1_panel = {
267 .name = "sx1",
268 .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
269 OMAP_LCDC_INV_HSYNC | OMAP_LCDC_INV_PIX_CLOCK |
270 OMAP_LCDC_INV_OUTPUT_EN,
271
272 .x_res = 176,
273 .y_res = 220,
274 .data_lines = 16,
275 .bpp = 16,
276 .hsw = 5,
277 .hfp = 5,
278 .hbp = 5,
279 .vsw = 2,
280 .vfp = 1,
281 .vbp = 1,
282 .pixel_clock = 1500,
283
284 .init = sx1_panel_init,
285 .cleanup = sx1_panel_cleanup,
286 .enable = sx1_panel_enable,
287 .disable = sx1_panel_disable,
288 .get_caps = sx1_panel_get_caps,
289};
290
291static int sx1_panel_probe(struct platform_device *pdev)
292{
293 omapfb_register_panel(&sx1_panel);
294 return 0;
295}
296
297static int sx1_panel_remove(struct platform_device *pdev)
298{
299 return 0;
300}
301
302static int sx1_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
303{
304 return 0;
305}
306
307static int sx1_panel_resume(struct platform_device *pdev)
308{
309 return 0;
310}
311
312struct platform_driver sx1_panel_driver = {
313 .probe = sx1_panel_probe,
314 .remove = sx1_panel_remove,
315 .suspend = sx1_panel_suspend,
316 .resume = sx1_panel_resume,
317 .driver = {
318 .name = "lcd_sx1",
319 .owner = THIS_MODULE,
320 },
321};
322
323static int sx1_panel_drv_init(void)
324{
325 return platform_driver_register(&sx1_panel_driver);
326}
327
328static void sx1_panel_drv_cleanup(void)
329{
330 platform_driver_unregister(&sx1_panel_driver);
331}
332
333module_init(sx1_panel_drv_init);
334module_exit(sx1_panel_drv_cleanup);
diff --git a/drivers/video/omap/lcdc.c b/drivers/video/omap/lcdc.c
new file mode 100644
index 000000000000..9085188d815e
--- /dev/null
+++ b/drivers/video/omap/lcdc.c
@@ -0,0 +1,893 @@
1/*
2 * OMAP1 internal LCD controller
3 *
4 * Copyright (C) 2004 Nokia Corporation
5 * Author: Imre Deak <imre.deak@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21#include <linux/module.h>
22#include <linux/device.h>
23#include <linux/interrupt.h>
24#include <linux/spinlock.h>
25#include <linux/err.h>
26#include <linux/mm.h>
27#include <linux/fb.h>
28#include <linux/dma-mapping.h>
29#include <linux/vmalloc.h>
30#include <linux/clk.h>
31
32#include <asm/arch/dma.h>
33#include <asm/arch/omapfb.h>
34
35#include <asm/mach-types.h>
36
37#define MODULE_NAME "lcdc"
38
39#define OMAP_LCDC_BASE 0xfffec000
40#define OMAP_LCDC_SIZE 256
41#define OMAP_LCDC_IRQ INT_LCD_CTRL
42
43#define OMAP_LCDC_CONTROL (OMAP_LCDC_BASE + 0x00)
44#define OMAP_LCDC_TIMING0 (OMAP_LCDC_BASE + 0x04)
45#define OMAP_LCDC_TIMING1 (OMAP_LCDC_BASE + 0x08)
46#define OMAP_LCDC_TIMING2 (OMAP_LCDC_BASE + 0x0c)
47#define OMAP_LCDC_STATUS (OMAP_LCDC_BASE + 0x10)
48#define OMAP_LCDC_SUBPANEL (OMAP_LCDC_BASE + 0x14)
49#define OMAP_LCDC_LINE_INT (OMAP_LCDC_BASE + 0x18)
50#define OMAP_LCDC_DISPLAY_STATUS (OMAP_LCDC_BASE + 0x1c)
51
52#define OMAP_LCDC_STAT_DONE (1 << 0)
53#define OMAP_LCDC_STAT_VSYNC (1 << 1)
54#define OMAP_LCDC_STAT_SYNC_LOST (1 << 2)
55#define OMAP_LCDC_STAT_ABC (1 << 3)
56#define OMAP_LCDC_STAT_LINE_INT (1 << 4)
57#define OMAP_LCDC_STAT_FUF (1 << 5)
58#define OMAP_LCDC_STAT_LOADED_PALETTE (1 << 6)
59
60#define OMAP_LCDC_CTRL_LCD_EN (1 << 0)
61#define OMAP_LCDC_CTRL_LCD_TFT (1 << 7)
62#define OMAP_LCDC_CTRL_LINE_IRQ_CLR_SEL (1 << 10)
63
64#define OMAP_LCDC_IRQ_VSYNC (1 << 2)
65#define OMAP_LCDC_IRQ_DONE (1 << 3)
66#define OMAP_LCDC_IRQ_LOADED_PALETTE (1 << 4)
67#define OMAP_LCDC_IRQ_LINE_NIRQ (1 << 5)
68#define OMAP_LCDC_IRQ_LINE (1 << 6)
69#define OMAP_LCDC_IRQ_MASK (((1 << 5) - 1) << 2)
70
71#define MAX_PALETTE_SIZE PAGE_SIZE
72
73enum lcdc_load_mode {
74 OMAP_LCDC_LOAD_PALETTE,
75 OMAP_LCDC_LOAD_FRAME,
76 OMAP_LCDC_LOAD_PALETTE_AND_FRAME
77};
78
79static struct omap_lcd_controller {
80 enum omapfb_update_mode update_mode;
81 int ext_mode;
82
83 unsigned long frame_offset;
84 int screen_width;
85 int xres;
86 int yres;
87
88 enum omapfb_color_format color_mode;
89 int bpp;
90 void *palette_virt;
91 dma_addr_t palette_phys;
92 int palette_code;
93 int palette_size;
94
95 unsigned int irq_mask;
96 struct completion last_frame_complete;
97 struct completion palette_load_complete;
98 struct clk *lcd_ck;
99 struct omapfb_device *fbdev;
100
101 void (*dma_callback)(void *data);
102 void *dma_callback_data;
103
104 int fbmem_allocated;
105 dma_addr_t vram_phys;
106 void *vram_virt;
107 unsigned long vram_size;
108} lcdc;
109
110static void inline enable_irqs(int mask)
111{
112 lcdc.irq_mask |= mask;
113}
114
115static void inline disable_irqs(int mask)
116{
117 lcdc.irq_mask &= ~mask;
118}
119
120static void set_load_mode(enum lcdc_load_mode mode)
121{
122 u32 l;
123
124 l = omap_readl(OMAP_LCDC_CONTROL);
125 l &= ~(3 << 20);
126 switch (mode) {
127 case OMAP_LCDC_LOAD_PALETTE:
128 l |= 1 << 20;
129 break;
130 case OMAP_LCDC_LOAD_FRAME:
131 l |= 2 << 20;
132 break;
133 case OMAP_LCDC_LOAD_PALETTE_AND_FRAME:
134 break;
135 default:
136 BUG();
137 }
138 omap_writel(l, OMAP_LCDC_CONTROL);
139}
140
141static void enable_controller(void)
142{
143 u32 l;
144
145 l = omap_readl(OMAP_LCDC_CONTROL);
146 l |= OMAP_LCDC_CTRL_LCD_EN;
147 l &= ~OMAP_LCDC_IRQ_MASK;
148 l |= lcdc.irq_mask | OMAP_LCDC_IRQ_DONE; /* enabled IRQs */
149 omap_writel(l, OMAP_LCDC_CONTROL);
150}
151
152static void disable_controller_async(void)
153{
154 u32 l;
155 u32 mask;
156
157 l = omap_readl(OMAP_LCDC_CONTROL);
158 mask = OMAP_LCDC_CTRL_LCD_EN | OMAP_LCDC_IRQ_MASK;
159 /*
160 * Preserve the DONE mask, since we still want to get the
161 * final DONE irq. It will be disabled in the IRQ handler.
162 */
163 mask &= ~OMAP_LCDC_IRQ_DONE;
164 l &= ~mask;
165 omap_writel(l, OMAP_LCDC_CONTROL);
166}
167
168static void disable_controller(void)
169{
170 init_completion(&lcdc.last_frame_complete);
171 disable_controller_async();
172 if (!wait_for_completion_timeout(&lcdc.last_frame_complete,
173 msecs_to_jiffies(500)))
174 dev_err(lcdc.fbdev->dev, "timeout waiting for FRAME DONE\n");
175}
176
177static void reset_controller(u32 status)
178{
179 static unsigned long reset_count;
180 static unsigned long last_jiffies;
181
182 disable_controller_async();
183 reset_count++;
184 if (reset_count == 1 || time_after(jiffies, last_jiffies + HZ)) {
185 dev_err(lcdc.fbdev->dev,
186 "resetting (status %#010x,reset count %lu)\n",
187 status, reset_count);
188 last_jiffies = jiffies;
189 }
190 if (reset_count < 100) {
191 enable_controller();
192 } else {
193 reset_count = 0;
194 dev_err(lcdc.fbdev->dev,
195 "too many reset attempts, giving up.\n");
196 }
197}
198
199/*
200 * Configure the LCD DMA according to the current mode specified by parameters
201 * in lcdc.fbdev and fbdev->var.
202 */
203static void setup_lcd_dma(void)
204{
205 static const int dma_elem_type[] = {
206 0,
207 OMAP_DMA_DATA_TYPE_S8,
208 OMAP_DMA_DATA_TYPE_S16,
209 0,
210 OMAP_DMA_DATA_TYPE_S32,
211 };
212 struct omapfb_plane_struct *plane = lcdc.fbdev->fb_info[0]->par;
213 struct fb_var_screeninfo *var = &lcdc.fbdev->fb_info[0]->var;
214 unsigned long src;
215 int esize, xelem, yelem;
216
217 src = lcdc.vram_phys + lcdc.frame_offset;
218
219 switch (var->rotate) {
220 case 0:
221 if (plane->info.mirror || (src & 3) ||
222 lcdc.color_mode == OMAPFB_COLOR_YUV420 ||
223 (lcdc.xres & 1))
224 esize = 2;
225 else
226 esize = 4;
227 xelem = lcdc.xres * lcdc.bpp / 8 / esize;
228 yelem = lcdc.yres;
229 break;
230 case 90:
231 case 180:
232 case 270:
233 if (cpu_is_omap15xx()) {
234 BUG();
235 }
236 esize = 2;
237 xelem = lcdc.yres * lcdc.bpp / 16;
238 yelem = lcdc.xres;
239 break;
240 default:
241 BUG();
242 return;
243 }
244#ifdef VERBOSE
245 dev_dbg(lcdc.fbdev->dev,
246 "setup_dma: src %#010lx esize %d xelem %d yelem %d\n",
247 src, esize, xelem, yelem);
248#endif
249 omap_set_lcd_dma_b1(src, xelem, yelem, dma_elem_type[esize]);
250 if (!cpu_is_omap15xx()) {
251 int bpp = lcdc.bpp;
252
253 /*
254 * YUV support is only for external mode when we have the
255 * YUV window embedded in a 16bpp frame buffer.
256 */
257 if (lcdc.color_mode == OMAPFB_COLOR_YUV420)
258 bpp = 16;
259 /* Set virtual xres elem size */
260 omap_set_lcd_dma_b1_vxres(
261 lcdc.screen_width * bpp / 8 / esize);
262 /* Setup transformations */
263 omap_set_lcd_dma_b1_rotation(var->rotate);
264 omap_set_lcd_dma_b1_mirror(plane->info.mirror);
265 }
266 omap_setup_lcd_dma();
267}
268
269static irqreturn_t lcdc_irq_handler(int irq, void *dev_id)
270{
271 u32 status;
272
273 status = omap_readl(OMAP_LCDC_STATUS);
274
275 if (status & (OMAP_LCDC_STAT_FUF | OMAP_LCDC_STAT_SYNC_LOST))
276 reset_controller(status);
277 else {
278 if (status & OMAP_LCDC_STAT_DONE) {
279 u32 l;
280
281 /*
282 * Disable IRQ_DONE. The status bit will be cleared
283 * only when the controller is reenabled and we don't
284 * want to get more interrupts.
285 */
286 l = omap_readl(OMAP_LCDC_CONTROL);
287 l &= ~OMAP_LCDC_IRQ_DONE;
288 omap_writel(l, OMAP_LCDC_CONTROL);
289 complete(&lcdc.last_frame_complete);
290 }
291 if (status & OMAP_LCDC_STAT_LOADED_PALETTE) {
292 disable_controller_async();
293 complete(&lcdc.palette_load_complete);
294 }
295 }
296
297 /*
298 * Clear these interrupt status bits.
299 * Sync_lost, FUF bits were cleared by disabling the LCD controller
300 * LOADED_PALETTE can be cleared this way only in palette only
301 * load mode. In other load modes it's cleared by disabling the
302 * controller.
303 */
304 status &= ~(OMAP_LCDC_STAT_VSYNC |
305 OMAP_LCDC_STAT_LOADED_PALETTE |
306 OMAP_LCDC_STAT_ABC |
307 OMAP_LCDC_STAT_LINE_INT);
308 omap_writel(status, OMAP_LCDC_STATUS);
309 return IRQ_HANDLED;
310}
311
312/*
313 * Change to a new video mode. We defer this to a later time to avoid any
314 * flicker and not to mess up the current LCD DMA context. For this we disable
315 * the LCD controler, which will generate a DONE irq after the last frame has
316 * been transferred. Then it'll be safe to reconfigure both the LCD controller
317 * as well as the LCD DMA.
318 */
319static int omap_lcdc_setup_plane(int plane, int channel_out,
320 unsigned long offset, int screen_width,
321 int pos_x, int pos_y, int width, int height,
322 int color_mode)
323{
324 struct fb_var_screeninfo *var = &lcdc.fbdev->fb_info[0]->var;
325 struct lcd_panel *panel = lcdc.fbdev->panel;
326 int rot_x, rot_y;
327
328 if (var->rotate == 0) {
329 rot_x = panel->x_res;
330 rot_y = panel->y_res;
331 } else {
332 rot_x = panel->y_res;
333 rot_y = panel->x_res;
334 }
335 if (plane != 0 || channel_out != 0 || pos_x != 0 || pos_y != 0 ||
336 width > rot_x || height > rot_y) {
337#ifdef VERBOSE
338 dev_dbg(lcdc.fbdev->dev,
339 "invalid plane params plane %d pos_x %d pos_y %d "
340 "w %d h %d\n", plane, pos_x, pos_y, width, height);
341#endif
342 return -EINVAL;
343 }
344
345 lcdc.frame_offset = offset;
346 lcdc.xres = width;
347 lcdc.yres = height;
348 lcdc.screen_width = screen_width;
349 lcdc.color_mode = color_mode;
350
351 switch (color_mode) {
352 case OMAPFB_COLOR_CLUT_8BPP:
353 lcdc.bpp = 8;
354 lcdc.palette_code = 0x3000;
355 lcdc.palette_size = 512;
356 break;
357 case OMAPFB_COLOR_RGB565:
358 lcdc.bpp = 16;
359 lcdc.palette_code = 0x4000;
360 lcdc.palette_size = 32;
361 break;
362 case OMAPFB_COLOR_RGB444:
363 lcdc.bpp = 16;
364 lcdc.palette_code = 0x4000;
365 lcdc.palette_size = 32;
366 break;
367 case OMAPFB_COLOR_YUV420:
368 if (lcdc.ext_mode) {
369 lcdc.bpp = 12;
370 break;
371 }
372 /* fallthrough */
373 case OMAPFB_COLOR_YUV422:
374 if (lcdc.ext_mode) {
375 lcdc.bpp = 16;
376 break;
377 }
378 /* fallthrough */
379 default:
380 /* FIXME: other BPPs.
381 * bpp1: code 0, size 256
382 * bpp2: code 0x1000 size 256
383 * bpp4: code 0x2000 size 256
384 * bpp12: code 0x4000 size 32
385 */
386 dev_dbg(lcdc.fbdev->dev, "invalid color mode %d\n", color_mode);
387 BUG();
388 return -1;
389 }
390
391 if (lcdc.ext_mode) {
392 setup_lcd_dma();
393 return 0;
394 }
395
396 if (lcdc.update_mode == OMAPFB_AUTO_UPDATE) {
397 disable_controller();
398 omap_stop_lcd_dma();
399 setup_lcd_dma();
400 enable_controller();
401 }
402
403 return 0;
404}
405
406static int omap_lcdc_enable_plane(int plane, int enable)
407{
408 dev_dbg(lcdc.fbdev->dev,
409 "plane %d enable %d update_mode %d ext_mode %d\n",
410 plane, enable, lcdc.update_mode, lcdc.ext_mode);
411 if (plane != OMAPFB_PLANE_GFX)
412 return -EINVAL;
413
414 return 0;
415}
416
417/*
418 * Configure the LCD DMA for a palette load operation and do the palette
419 * downloading synchronously. We don't use the frame+palette load mode of
420 * the controller, since the palette can always be downloaded seperately.
421 */
422static void load_palette(void)
423{
424 u16 *palette;
425
426 palette = (u16 *)lcdc.palette_virt;
427
428 *(u16 *)palette &= 0x0fff;
429 *(u16 *)palette |= lcdc.palette_code;
430
431 omap_set_lcd_dma_b1(lcdc.palette_phys,
432 lcdc.palette_size / 4 + 1, 1, OMAP_DMA_DATA_TYPE_S32);
433
434 omap_set_lcd_dma_single_transfer(1);
435 omap_setup_lcd_dma();
436
437 init_completion(&lcdc.palette_load_complete);
438 enable_irqs(OMAP_LCDC_IRQ_LOADED_PALETTE);
439 set_load_mode(OMAP_LCDC_LOAD_PALETTE);
440 enable_controller();
441 if (!wait_for_completion_timeout(&lcdc.palette_load_complete,
442 msecs_to_jiffies(500)))
443 dev_err(lcdc.fbdev->dev, "timeout waiting for FRAME DONE\n");
444 /* The controller gets disabled in the irq handler */
445 disable_irqs(OMAP_LCDC_IRQ_LOADED_PALETTE);
446 omap_stop_lcd_dma();
447
448 omap_set_lcd_dma_single_transfer(lcdc.ext_mode);
449}
450
451/* Used only in internal controller mode */
452static int omap_lcdc_setcolreg(u_int regno, u16 red, u16 green, u16 blue,
453 u16 transp, int update_hw_pal)
454{
455 u16 *palette;
456
457 if (lcdc.color_mode != OMAPFB_COLOR_CLUT_8BPP || regno > 255)
458 return -EINVAL;
459
460 palette = (u16 *)lcdc.palette_virt;
461
462 palette[regno] &= ~0x0fff;
463 palette[regno] |= ((red >> 12) << 8) | ((green >> 12) << 4 ) |
464 (blue >> 12);
465
466 if (update_hw_pal) {
467 disable_controller();
468 omap_stop_lcd_dma();
469 load_palette();
470 setup_lcd_dma();
471 set_load_mode(OMAP_LCDC_LOAD_FRAME);
472 enable_controller();
473 }
474
475 return 0;
476}
477
478static void calc_ck_div(int is_tft, int pck, int *pck_div)
479{
480 unsigned long lck;
481
482 pck = max(1, pck);
483 lck = clk_get_rate(lcdc.lcd_ck);
484 *pck_div = (lck + pck - 1) / pck;
485 if (is_tft)
486 *pck_div = max(2, *pck_div);
487 else
488 *pck_div = max(3, *pck_div);
489 if (*pck_div > 255) {
490 /* FIXME: try to adjust logic clock divider as well */
491 *pck_div = 255;
492 dev_warn(lcdc.fbdev->dev, "pixclock %d kHz too low.\n",
493 pck / 1000);
494 }
495}
496
497static void inline setup_regs(void)
498{
499 u32 l;
500 struct lcd_panel *panel = lcdc.fbdev->panel;
501 int is_tft = panel->config & OMAP_LCDC_PANEL_TFT;
502 unsigned long lck;
503 int pcd;
504
505 l = omap_readl(OMAP_LCDC_CONTROL);
506 l &= ~OMAP_LCDC_CTRL_LCD_TFT;
507 l |= is_tft ? OMAP_LCDC_CTRL_LCD_TFT : 0;
508#ifdef CONFIG_MACH_OMAP_PALMTE
509/* FIXME:if (machine_is_omap_palmte()) { */
510 /* PalmTE uses alternate TFT setting in 8BPP mode */
511 l |= (is_tft && panel->bpp == 8) ? 0x810000 : 0;
512/* } */
513#endif
514 omap_writel(l, OMAP_LCDC_CONTROL);
515
516 l = omap_readl(OMAP_LCDC_TIMING2);
517 l &= ~(((1 << 6) - 1) << 20);
518 l |= (panel->config & OMAP_LCDC_SIGNAL_MASK) << 20;
519 omap_writel(l, OMAP_LCDC_TIMING2);
520
521 l = panel->x_res - 1;
522 l |= (panel->hsw - 1) << 10;
523 l |= (panel->hfp - 1) << 16;
524 l |= (panel->hbp - 1) << 24;
525 omap_writel(l, OMAP_LCDC_TIMING0);
526
527 l = panel->y_res - 1;
528 l |= (panel->vsw - 1) << 10;
529 l |= panel->vfp << 16;
530 l |= panel->vbp << 24;
531 omap_writel(l, OMAP_LCDC_TIMING1);
532
533 l = omap_readl(OMAP_LCDC_TIMING2);
534 l &= ~0xff;
535
536 lck = clk_get_rate(lcdc.lcd_ck);
537
538 if (!panel->pcd)
539 calc_ck_div(is_tft, panel->pixel_clock * 1000, &pcd);
540 else {
541 dev_warn(lcdc.fbdev->dev,
542 "Pixel clock divider value is obsolete.\n"
543 "Try to set pixel_clock to %lu and pcd to 0 "
544 "in drivers/video/omap/lcd_%s.c and submit a patch.\n",
545 lck / panel->pcd / 1000, panel->name);
546
547 pcd = panel->pcd;
548 }
549 l |= pcd & 0xff;
550 l |= panel->acb << 8;
551 omap_writel(l, OMAP_LCDC_TIMING2);
552
553 /* update panel info with the exact clock */
554 panel->pixel_clock = lck / pcd / 1000;
555}
556
557/*
558 * Configure the LCD controller, download the color palette and start a looped
559 * DMA transfer of the frame image data. Called only in internal
560 * controller mode.
561 */
562static int omap_lcdc_set_update_mode(enum omapfb_update_mode mode)
563{
564 int r = 0;
565
566 if (mode != lcdc.update_mode) {
567 switch (mode) {
568 case OMAPFB_AUTO_UPDATE:
569 setup_regs();
570 load_palette();
571
572 /* Setup and start LCD DMA */
573 setup_lcd_dma();
574
575 set_load_mode(OMAP_LCDC_LOAD_FRAME);
576 enable_irqs(OMAP_LCDC_IRQ_DONE);
577 /* This will start the actual DMA transfer */
578 enable_controller();
579 lcdc.update_mode = mode;
580 break;
581 case OMAPFB_UPDATE_DISABLED:
582 disable_controller();
583 omap_stop_lcd_dma();
584 lcdc.update_mode = mode;
585 break;
586 default:
587 r = -EINVAL;
588 }
589 }
590
591 return r;
592}
593
594static enum omapfb_update_mode omap_lcdc_get_update_mode(void)
595{
596 return lcdc.update_mode;
597}
598
599/* PM code called only in internal controller mode */
600static void omap_lcdc_suspend(void)
601{
602 if (lcdc.update_mode == OMAPFB_AUTO_UPDATE) {
603 disable_controller();
604 omap_stop_lcd_dma();
605 }
606}
607
608static void omap_lcdc_resume(void)
609{
610 if (lcdc.update_mode == OMAPFB_AUTO_UPDATE) {
611 setup_regs();
612 load_palette();
613 setup_lcd_dma();
614 set_load_mode(OMAP_LCDC_LOAD_FRAME);
615 enable_irqs(OMAP_LCDC_IRQ_DONE);
616 enable_controller();
617 }
618}
619
620static void omap_lcdc_get_caps(int plane, struct omapfb_caps *caps)
621{
622 return;
623}
624
625int omap_lcdc_set_dma_callback(void (*callback)(void *data), void *data)
626{
627 BUG_ON(callback == NULL);
628
629 if (lcdc.dma_callback)
630 return -EBUSY;
631 else {
632 lcdc.dma_callback = callback;
633 lcdc.dma_callback_data = data;
634 }
635 return 0;
636}
637EXPORT_SYMBOL(omap_lcdc_set_dma_callback);
638
639void omap_lcdc_free_dma_callback(void)
640{
641 lcdc.dma_callback = NULL;
642}
643EXPORT_SYMBOL(omap_lcdc_free_dma_callback);
644
645static void lcdc_dma_handler(u16 status, void *data)
646{
647 if (lcdc.dma_callback)
648 lcdc.dma_callback(lcdc.dma_callback_data);
649}
650
651static int mmap_kern(void)
652{
653 struct vm_struct *kvma;
654 struct vm_area_struct vma;
655 pgprot_t pgprot;
656 unsigned long vaddr;
657
658 kvma = get_vm_area(lcdc.vram_size, VM_IOREMAP);
659 if (kvma == NULL) {
660 dev_err(lcdc.fbdev->dev, "can't get kernel vm area\n");
661 return -ENOMEM;
662 }
663 vma.vm_mm = &init_mm;
664
665 vaddr = (unsigned long)kvma->addr;
666 vma.vm_start = vaddr;
667 vma.vm_end = vaddr + lcdc.vram_size;
668
669 pgprot = pgprot_writecombine(pgprot_kernel);
670 if (io_remap_pfn_range(&vma, vaddr,
671 lcdc.vram_phys >> PAGE_SHIFT,
672 lcdc.vram_size, pgprot) < 0) {
673 dev_err(lcdc.fbdev->dev, "kernel mmap for FB memory failed\n");
674 return -EAGAIN;
675 }
676
677 lcdc.vram_virt = (void *)vaddr;
678
679 return 0;
680}
681
682static void unmap_kern(void)
683{
684 vunmap(lcdc.vram_virt);
685}
686
687static int alloc_palette_ram(void)
688{
689 lcdc.palette_virt = dma_alloc_writecombine(lcdc.fbdev->dev,
690 MAX_PALETTE_SIZE, &lcdc.palette_phys, GFP_KERNEL);
691 if (lcdc.palette_virt == NULL) {
692 dev_err(lcdc.fbdev->dev, "failed to alloc palette memory\n");
693 return -ENOMEM;
694 }
695 memset(lcdc.palette_virt, 0, MAX_PALETTE_SIZE);
696
697 return 0;
698}
699
700static void free_palette_ram(void)
701{
702 dma_free_writecombine(lcdc.fbdev->dev, MAX_PALETTE_SIZE,
703 lcdc.palette_virt, lcdc.palette_phys);
704}
705
706static int alloc_fbmem(struct omapfb_mem_region *region)
707{
708 int bpp;
709 int frame_size;
710 struct lcd_panel *panel = lcdc.fbdev->panel;
711
712 bpp = panel->bpp;
713 if (bpp == 12)
714 bpp = 16;
715 frame_size = PAGE_ALIGN(panel->x_res * bpp / 8 * panel->y_res);
716 if (region->size > frame_size)
717 frame_size = region->size;
718 lcdc.vram_size = frame_size;
719 lcdc.vram_virt = dma_alloc_writecombine(lcdc.fbdev->dev,
720 lcdc.vram_size, &lcdc.vram_phys, GFP_KERNEL);
721 if (lcdc.vram_virt == NULL) {
722 dev_err(lcdc.fbdev->dev, "unable to allocate FB DMA memory\n");
723 return -ENOMEM;
724 }
725 region->size = frame_size;
726 region->paddr = lcdc.vram_phys;
727 region->vaddr = lcdc.vram_virt;
728 region->alloc = 1;
729
730 memset(lcdc.vram_virt, 0, lcdc.vram_size);
731
732 return 0;
733}
734
735static void free_fbmem(void)
736{
737 dma_free_writecombine(lcdc.fbdev->dev, lcdc.vram_size,
738 lcdc.vram_virt, lcdc.vram_phys);
739}
740
741static int setup_fbmem(struct omapfb_mem_desc *req_md)
742{
743 int r;
744
745 if (!req_md->region_cnt) {
746 dev_err(lcdc.fbdev->dev, "no memory regions defined\n");
747 return -EINVAL;
748 }
749
750 if (req_md->region_cnt > 1) {
751 dev_err(lcdc.fbdev->dev, "only one plane is supported\n");
752 req_md->region_cnt = 1;
753 }
754
755 if (req_md->region[0].paddr == 0) {
756 lcdc.fbmem_allocated = 1;
757 if ((r = alloc_fbmem(&req_md->region[0])) < 0)
758 return r;
759 return 0;
760 }
761
762 lcdc.vram_phys = req_md->region[0].paddr;
763 lcdc.vram_size = req_md->region[0].size;
764
765 if ((r = mmap_kern()) < 0)
766 return r;
767
768 dev_dbg(lcdc.fbdev->dev, "vram at %08x size %08lx mapped to 0x%p\n",
769 lcdc.vram_phys, lcdc.vram_size, lcdc.vram_virt);
770
771 return 0;
772}
773
774static void cleanup_fbmem(void)
775{
776 if (lcdc.fbmem_allocated)
777 free_fbmem();
778 else
779 unmap_kern();
780}
781
782static int omap_lcdc_init(struct omapfb_device *fbdev, int ext_mode,
783 struct omapfb_mem_desc *req_vram)
784{
785 int r;
786 u32 l;
787 int rate;
788 struct clk *tc_ck;
789
790 lcdc.irq_mask = 0;
791
792 lcdc.fbdev = fbdev;
793 lcdc.ext_mode = ext_mode;
794
795 l = 0;
796 omap_writel(l, OMAP_LCDC_CONTROL);
797
798 /* FIXME:
799 * According to errata some platforms have a clock rate limitiation
800 */
801 lcdc.lcd_ck = clk_get(NULL, "lcd_ck");
802 if (IS_ERR(lcdc.lcd_ck)) {
803 dev_err(fbdev->dev, "unable to access LCD clock\n");
804 r = PTR_ERR(lcdc.lcd_ck);
805 goto fail0;
806 }
807
808 tc_ck = clk_get(NULL, "tc_ck");
809 if (IS_ERR(tc_ck)) {
810 dev_err(fbdev->dev, "unable to access TC clock\n");
811 r = PTR_ERR(tc_ck);
812 goto fail1;
813 }
814
815 rate = clk_get_rate(tc_ck);
816 clk_put(tc_ck);
817
818 if (machine_is_ams_delta())
819 rate /= 4;
820 if (machine_is_omap_h3())
821 rate /= 3;
822 r = clk_set_rate(lcdc.lcd_ck, rate);
823 if (r) {
824 dev_err(fbdev->dev, "failed to adjust LCD rate\n");
825 goto fail1;
826 }
827 clk_enable(lcdc.lcd_ck);
828
829 r = request_irq(OMAP_LCDC_IRQ, lcdc_irq_handler, 0, MODULE_NAME, fbdev);
830 if (r) {
831 dev_err(fbdev->dev, "unable to get IRQ\n");
832 goto fail2;
833 }
834
835 r = omap_request_lcd_dma(lcdc_dma_handler, NULL);
836 if (r) {
837 dev_err(fbdev->dev, "unable to get LCD DMA\n");
838 goto fail3;
839 }
840
841 omap_set_lcd_dma_single_transfer(ext_mode);
842 omap_set_lcd_dma_ext_controller(ext_mode);
843
844 if (!ext_mode)
845 if ((r = alloc_palette_ram()) < 0)
846 goto fail4;
847
848 if ((r = setup_fbmem(req_vram)) < 0)
849 goto fail5;
850
851 pr_info("omapfb: LCDC initialized\n");
852
853 return 0;
854fail5:
855 if (!ext_mode)
856 free_palette_ram();
857fail4:
858 omap_free_lcd_dma();
859fail3:
860 free_irq(OMAP_LCDC_IRQ, lcdc.fbdev);
861fail2:
862 clk_disable(lcdc.lcd_ck);
863fail1:
864 clk_put(lcdc.lcd_ck);
865fail0:
866 return r;
867}
868
869static void omap_lcdc_cleanup(void)
870{
871 if (!lcdc.ext_mode)
872 free_palette_ram();
873 cleanup_fbmem();
874 omap_free_lcd_dma();
875 free_irq(OMAP_LCDC_IRQ, lcdc.fbdev);
876 clk_disable(lcdc.lcd_ck);
877 clk_put(lcdc.lcd_ck);
878}
879
880const struct lcd_ctrl omap1_int_ctrl = {
881 .name = "internal",
882 .init = omap_lcdc_init,
883 .cleanup = omap_lcdc_cleanup,
884 .get_caps = omap_lcdc_get_caps,
885 .set_update_mode = omap_lcdc_set_update_mode,
886 .get_update_mode = omap_lcdc_get_update_mode,
887 .update_window = NULL,
888 .suspend = omap_lcdc_suspend,
889 .resume = omap_lcdc_resume,
890 .setup_plane = omap_lcdc_setup_plane,
891 .enable_plane = omap_lcdc_enable_plane,
892 .setcolreg = omap_lcdc_setcolreg,
893};
diff --git a/drivers/video/omap/lcdc.h b/drivers/video/omap/lcdc.h
new file mode 100644
index 000000000000..adb731e5314a
--- /dev/null
+++ b/drivers/video/omap/lcdc.h
@@ -0,0 +1,7 @@
1#ifndef LCDC_H
2#define LCDC_H
3
4int omap_lcdc_set_dma_callback(void (*callback)(void *data), void *data);
5void omap_lcdc_free_dma_callback(void);
6
7#endif
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
new file mode 100644
index 000000000000..14d0f7a11145
--- /dev/null
+++ b/drivers/video/omap/omapfb_main.c
@@ -0,0 +1,1941 @@
1/*
2 * Framebuffer driver for TI OMAP boards
3 *
4 * Copyright (C) 2004 Nokia Corporation
5 * Author: Imre Deak <imre.deak@nokia.com>
6 *
7 * Acknowledgements:
8 * Alex McMains <aam@ridgerun.com> - Original driver
9 * Juha Yrjola <juha.yrjola@nokia.com> - Original driver and improvements
10 * Dirk Behme <dirk.behme@de.bosch.com> - changes for 2.6 kernel API
11 * Texas Instruments - H3 support
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License along
24 * with this program; if not, write to the Free Software Foundation, Inc.,
25 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 */
27#include <linux/platform_device.h>
28#include <linux/uaccess.h>
29
30#include <asm/mach-types.h>
31#include <asm/arch/dma.h>
32#include <asm/arch/omapfb.h>
33
34#define MODULE_NAME "omapfb"
35
36static unsigned int def_accel;
37static unsigned long def_vram[OMAPFB_PLANE_NUM];
38static int def_vram_cnt;
39static unsigned long def_vxres;
40static unsigned long def_vyres;
41static unsigned int def_rotate;
42static unsigned int def_mirror;
43
44#ifdef CONFIG_FB_OMAP_MANUAL_UPDATE
45static int manual_update = 1;
46#else
47static int manual_update;
48#endif
49
50static struct platform_device *fbdev_pdev;
51static struct lcd_panel *fbdev_panel;
52static struct omapfb_device *omapfb_dev;
53
54struct caps_table_struct {
55 unsigned long flag;
56 const char *name;
57};
58
59static struct caps_table_struct ctrl_caps[] = {
60 { OMAPFB_CAPS_MANUAL_UPDATE, "manual update" },
61 { OMAPFB_CAPS_TEARSYNC, "tearing synchronization" },
62 { OMAPFB_CAPS_PLANE_RELOCATE_MEM, "relocate plane memory" },
63 { OMAPFB_CAPS_PLANE_SCALE, "scale plane" },
64 { OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE, "pixel double window" },
65 { OMAPFB_CAPS_WINDOW_SCALE, "scale window" },
66 { OMAPFB_CAPS_WINDOW_OVERLAY, "overlay window" },
67 { OMAPFB_CAPS_SET_BACKLIGHT, "backlight setting" },
68};
69
70static struct caps_table_struct color_caps[] = {
71 { 1 << OMAPFB_COLOR_RGB565, "RGB565", },
72 { 1 << OMAPFB_COLOR_YUV422, "YUV422", },
73 { 1 << OMAPFB_COLOR_YUV420, "YUV420", },
74 { 1 << OMAPFB_COLOR_CLUT_8BPP, "CLUT8", },
75 { 1 << OMAPFB_COLOR_CLUT_4BPP, "CLUT4", },
76 { 1 << OMAPFB_COLOR_CLUT_2BPP, "CLUT2", },
77 { 1 << OMAPFB_COLOR_CLUT_1BPP, "CLUT1", },
78 { 1 << OMAPFB_COLOR_RGB444, "RGB444", },
79 { 1 << OMAPFB_COLOR_YUY422, "YUY422", },
80};
81
82/*
83 * ---------------------------------------------------------------------------
84 * LCD panel
85 * ---------------------------------------------------------------------------
86 */
87extern struct lcd_ctrl omap1_int_ctrl;
88extern struct lcd_ctrl omap2_int_ctrl;
89extern struct lcd_ctrl hwa742_ctrl;
90extern struct lcd_ctrl blizzard_ctrl;
91
92static struct lcd_ctrl *ctrls[] = {
93#ifdef CONFIG_ARCH_OMAP1
94 &omap1_int_ctrl,
95#else
96 &omap2_int_ctrl,
97#endif
98
99#ifdef CONFIG_FB_OMAP_LCDC_HWA742
100 &hwa742_ctrl,
101#endif
102#ifdef CONFIG_FB_OMAP_LCDC_BLIZZARD
103 &blizzard_ctrl,
104#endif
105};
106
107#ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
108#ifdef CONFIG_ARCH_OMAP1
109extern struct lcd_ctrl_extif omap1_ext_if;
110#else
111extern struct lcd_ctrl_extif omap2_ext_if;
112#endif
113#endif
114
115static void omapfb_rqueue_lock(struct omapfb_device *fbdev)
116{
117 mutex_lock(&fbdev->rqueue_mutex);
118}
119
120static void omapfb_rqueue_unlock(struct omapfb_device *fbdev)
121{
122 mutex_unlock(&fbdev->rqueue_mutex);
123}
124
125/*
126 * ---------------------------------------------------------------------------
127 * LCD controller and LCD DMA
128 * ---------------------------------------------------------------------------
129 */
130/* Lookup table to map elem size to elem type. */
131static const int dma_elem_type[] = {
132 0,
133 OMAP_DMA_DATA_TYPE_S8,
134 OMAP_DMA_DATA_TYPE_S16,
135 0,
136 OMAP_DMA_DATA_TYPE_S32,
137};
138
139/*
140 * Allocate resources needed for LCD controller and LCD DMA operations. Video
141 * memory is allocated from system memory according to the virtual display
142 * size, except if a bigger memory size is specified explicitly as a kernel
143 * parameter.
144 */
145static int ctrl_init(struct omapfb_device *fbdev)
146{
147 int r;
148 int i;
149
150 /* kernel/module vram parameters override boot tags/board config */
151 if (def_vram_cnt) {
152 for (i = 0; i < def_vram_cnt; i++)
153 fbdev->mem_desc.region[i].size =
154 PAGE_ALIGN(def_vram[i]);
155 fbdev->mem_desc.region_cnt = i;
156 } else {
157 struct omapfb_platform_data *conf;
158
159 conf = fbdev->dev->platform_data;
160 fbdev->mem_desc = conf->mem_desc;
161 }
162
163 if (!fbdev->mem_desc.region_cnt) {
164 struct lcd_panel *panel = fbdev->panel;
165 int def_size;
166 int bpp = panel->bpp;
167
168 /* 12 bpp is packed in 16 bits */
169 if (bpp == 12)
170 bpp = 16;
171 def_size = def_vxres * def_vyres * bpp / 8;
172 fbdev->mem_desc.region_cnt = 1;
173 fbdev->mem_desc.region[0].size = PAGE_ALIGN(def_size);
174 }
175 r = fbdev->ctrl->init(fbdev, 0, &fbdev->mem_desc);
176 if (r < 0) {
177 dev_err(fbdev->dev, "controller initialization failed (%d)\n",
178 r);
179 return r;
180 }
181
182#ifdef DEBUG
183 for (i = 0; i < fbdev->mem_desc.region_cnt; i++) {
184 dev_dbg(fbdev->dev, "region%d phys %08x virt %p size=%lu\n",
185 i,
186 fbdev->mem_desc.region[i].paddr,
187 fbdev->mem_desc.region[i].vaddr,
188 fbdev->mem_desc.region[i].size);
189 }
190#endif
191 return 0;
192}
193
194static void ctrl_cleanup(struct omapfb_device *fbdev)
195{
196 fbdev->ctrl->cleanup();
197}
198
199/* Must be called with fbdev->rqueue_mutex held. */
200static int ctrl_change_mode(struct fb_info *fbi)
201{
202 int r;
203 unsigned long offset;
204 struct omapfb_plane_struct *plane = fbi->par;
205 struct omapfb_device *fbdev = plane->fbdev;
206 struct fb_var_screeninfo *var = &fbi->var;
207
208 offset = var->yoffset * fbi->fix.line_length +
209 var->xoffset * var->bits_per_pixel / 8;
210
211 if (fbdev->ctrl->sync)
212 fbdev->ctrl->sync();
213 r = fbdev->ctrl->setup_plane(plane->idx, plane->info.channel_out,
214 offset, var->xres_virtual,
215 plane->info.pos_x, plane->info.pos_y,
216 var->xres, var->yres, plane->color_mode);
217 if (fbdev->ctrl->set_scale != NULL)
218 r = fbdev->ctrl->set_scale(plane->idx,
219 var->xres, var->yres,
220 plane->info.out_width,
221 plane->info.out_height);
222
223 return r;
224}
225
226/*
227 * ---------------------------------------------------------------------------
228 * fbdev framework callbacks and the ioctl interface
229 * ---------------------------------------------------------------------------
230 */
231/* Called each time the omapfb device is opened */
232static int omapfb_open(struct fb_info *info, int user)
233{
234 return 0;
235}
236
237static void omapfb_sync(struct fb_info *info);
238
239/* Called when the omapfb device is closed. We make sure that any pending
240 * gfx DMA operations are ended, before we return. */
241static int omapfb_release(struct fb_info *info, int user)
242{
243 omapfb_sync(info);
244 return 0;
245}
246
247/* Store a single color palette entry into a pseudo palette or the hardware
248 * palette if one is available. For now we support only 16bpp and thus store
249 * the entry only to the pseudo palette.
250 */
251static int _setcolreg(struct fb_info *info, u_int regno, u_int red, u_int green,
252 u_int blue, u_int transp, int update_hw_pal)
253{
254 struct omapfb_plane_struct *plane = info->par;
255 struct omapfb_device *fbdev = plane->fbdev;
256 struct fb_var_screeninfo *var = &info->var;
257 int r = 0;
258
259 switch (plane->color_mode) {
260 case OMAPFB_COLOR_YUV422:
261 case OMAPFB_COLOR_YUV420:
262 case OMAPFB_COLOR_YUY422:
263 r = -EINVAL;
264 break;
265 case OMAPFB_COLOR_CLUT_8BPP:
266 case OMAPFB_COLOR_CLUT_4BPP:
267 case OMAPFB_COLOR_CLUT_2BPP:
268 case OMAPFB_COLOR_CLUT_1BPP:
269 if (fbdev->ctrl->setcolreg)
270 r = fbdev->ctrl->setcolreg(regno, red, green, blue,
271 transp, update_hw_pal);
272 /* Fallthrough */
273 case OMAPFB_COLOR_RGB565:
274 case OMAPFB_COLOR_RGB444:
275 if (r != 0)
276 break;
277
278 if (regno < 0) {
279 r = -EINVAL;
280 break;
281 }
282
283 if (regno < 16) {
284 u16 pal;
285 pal = ((red >> (16 - var->red.length)) <<
286 var->red.offset) |
287 ((green >> (16 - var->green.length)) <<
288 var->green.offset) |
289 (blue >> (16 - var->blue.length));
290 ((u32 *)(info->pseudo_palette))[regno] = pal;
291 }
292 break;
293 default:
294 BUG();
295 }
296 return r;
297}
298
299static int omapfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
300 u_int transp, struct fb_info *info)
301{
302 return _setcolreg(info, regno, red, green, blue, transp, 1);
303}
304
305static int omapfb_setcmap(struct fb_cmap *cmap, struct fb_info *info)
306{
307 int count, index, r;
308 u16 *red, *green, *blue, *transp;
309 u16 trans = 0xffff;
310
311 red = cmap->red;
312 green = cmap->green;
313 blue = cmap->blue;
314 transp = cmap->transp;
315 index = cmap->start;
316
317 for (count = 0; count < cmap->len; count++) {
318 if (transp)
319 trans = *transp++;
320 r = _setcolreg(info, index++, *red++, *green++, *blue++, trans,
321 count == cmap->len - 1);
322 if (r != 0)
323 return r;
324 }
325
326 return 0;
327}
328
329static int omapfb_update_full_screen(struct fb_info *fbi);
330
331static int omapfb_blank(int blank, struct fb_info *fbi)
332{
333 struct omapfb_plane_struct *plane = fbi->par;
334 struct omapfb_device *fbdev = plane->fbdev;
335 int do_update = 0;
336 int r = 0;
337
338 omapfb_rqueue_lock(fbdev);
339 switch (blank) {
340 case VESA_NO_BLANKING:
341 if (fbdev->state == OMAPFB_SUSPENDED) {
342 if (fbdev->ctrl->resume)
343 fbdev->ctrl->resume();
344 fbdev->panel->enable(fbdev->panel);
345 fbdev->state = OMAPFB_ACTIVE;
346 if (fbdev->ctrl->get_update_mode() ==
347 OMAPFB_MANUAL_UPDATE)
348 do_update = 1;
349 }
350 break;
351 case VESA_POWERDOWN:
352 if (fbdev->state == OMAPFB_ACTIVE) {
353 fbdev->panel->disable(fbdev->panel);
354 if (fbdev->ctrl->suspend)
355 fbdev->ctrl->suspend();
356 fbdev->state = OMAPFB_SUSPENDED;
357 }
358 break;
359 default:
360 r = -EINVAL;
361 }
362 omapfb_rqueue_unlock(fbdev);
363
364 if (r == 0 && do_update)
365 r = omapfb_update_full_screen(fbi);
366
367 return r;
368}
369
370static void omapfb_sync(struct fb_info *fbi)
371{
372 struct omapfb_plane_struct *plane = fbi->par;
373 struct omapfb_device *fbdev = plane->fbdev;
374
375 omapfb_rqueue_lock(fbdev);
376 if (fbdev->ctrl->sync)
377 fbdev->ctrl->sync();
378 omapfb_rqueue_unlock(fbdev);
379}
380
381/*
382 * Set fb_info.fix fields and also updates fbdev.
383 * When calling this fb_info.var must be set up already.
384 */
385static void set_fb_fix(struct fb_info *fbi)
386{
387 struct fb_fix_screeninfo *fix = &fbi->fix;
388 struct fb_var_screeninfo *var = &fbi->var;
389 struct omapfb_plane_struct *plane = fbi->par;
390 struct omapfb_mem_region *rg;
391 int bpp;
392
393 rg = &plane->fbdev->mem_desc.region[plane->idx];
394 fbi->screen_base = (char __iomem *)rg->vaddr;
395 fix->smem_start = rg->paddr;
396 fix->smem_len = rg->size;
397
398 fix->type = FB_TYPE_PACKED_PIXELS;
399 bpp = var->bits_per_pixel;
400 if (var->nonstd)
401 fix->visual = FB_VISUAL_PSEUDOCOLOR;
402 else switch (var->bits_per_pixel) {
403 case 16:
404 case 12:
405 fix->visual = FB_VISUAL_TRUECOLOR;
406 /* 12bpp is stored in 16 bits */
407 bpp = 16;
408 break;
409 case 1:
410 case 2:
411 case 4:
412 case 8:
413 fix->visual = FB_VISUAL_PSEUDOCOLOR;
414 break;
415 }
416 fix->accel = FB_ACCEL_OMAP1610;
417 fix->line_length = var->xres_virtual * bpp / 8;
418}
419
420static int set_color_mode(struct omapfb_plane_struct *plane,
421 struct fb_var_screeninfo *var)
422{
423 switch (var->nonstd) {
424 case 0:
425 break;
426 case OMAPFB_COLOR_YUV422:
427 var->bits_per_pixel = 16;
428 plane->color_mode = var->nonstd;
429 return 0;
430 case OMAPFB_COLOR_YUV420:
431 var->bits_per_pixel = 12;
432 plane->color_mode = var->nonstd;
433 return 0;
434 case OMAPFB_COLOR_YUY422:
435 var->bits_per_pixel = 16;
436 plane->color_mode = var->nonstd;
437 return 0;
438 default:
439 return -EINVAL;
440 }
441
442 switch (var->bits_per_pixel) {
443 case 1:
444 plane->color_mode = OMAPFB_COLOR_CLUT_1BPP;
445 return 0;
446 case 2:
447 plane->color_mode = OMAPFB_COLOR_CLUT_2BPP;
448 return 0;
449 case 4:
450 plane->color_mode = OMAPFB_COLOR_CLUT_4BPP;
451 return 0;
452 case 8:
453 plane->color_mode = OMAPFB_COLOR_CLUT_8BPP;
454 return 0;
455 case 12:
456 var->bits_per_pixel = 16;
457 plane->color_mode = OMAPFB_COLOR_RGB444;
458 return 0;
459 case 16:
460 plane->color_mode = OMAPFB_COLOR_RGB565;
461 return 0;
462 default:
463 return -EINVAL;
464 }
465}
466
467/*
468 * Check the values in var against our capabilities and in case of out of
469 * bound values try to adjust them.
470 */
471static int set_fb_var(struct fb_info *fbi,
472 struct fb_var_screeninfo *var)
473{
474 int bpp;
475 unsigned long max_frame_size;
476 unsigned long line_size;
477 int xres_min, xres_max;
478 int yres_min, yres_max;
479 struct omapfb_plane_struct *plane = fbi->par;
480 struct omapfb_device *fbdev = plane->fbdev;
481 struct lcd_panel *panel = fbdev->panel;
482
483 if (set_color_mode(plane, var) < 0)
484 return -EINVAL;
485
486 bpp = var->bits_per_pixel;
487 if (plane->color_mode == OMAPFB_COLOR_RGB444)
488 bpp = 16;
489
490 switch (var->rotate) {
491 case 0:
492 case 180:
493 xres_min = OMAPFB_PLANE_XRES_MIN;
494 xres_max = panel->x_res;
495 yres_min = OMAPFB_PLANE_YRES_MIN;
496 yres_max = panel->y_res;
497 if (cpu_is_omap15xx()) {
498 var->xres = panel->x_res;
499 var->yres = panel->y_res;
500 }
501 break;
502 case 90:
503 case 270:
504 xres_min = OMAPFB_PLANE_YRES_MIN;
505 xres_max = panel->y_res;
506 yres_min = OMAPFB_PLANE_XRES_MIN;
507 yres_max = panel->x_res;
508 if (cpu_is_omap15xx()) {
509 var->xres = panel->y_res;
510 var->yres = panel->x_res;
511 }
512 break;
513 default:
514 return -EINVAL;
515 }
516
517 if (var->xres < xres_min)
518 var->xres = xres_min;
519 if (var->yres < yres_min)
520 var->yres = yres_min;
521 if (var->xres > xres_max)
522 var->xres = xres_max;
523 if (var->yres > yres_max)
524 var->yres = yres_max;
525
526 if (var->xres_virtual < var->xres)
527 var->xres_virtual = var->xres;
528 if (var->yres_virtual < var->yres)
529 var->yres_virtual = var->yres;
530 max_frame_size = fbdev->mem_desc.region[plane->idx].size;
531 line_size = var->xres_virtual * bpp / 8;
532 if (line_size * var->yres_virtual > max_frame_size) {
533 /* Try to keep yres_virtual first */
534 line_size = max_frame_size / var->yres_virtual;
535 var->xres_virtual = line_size * 8 / bpp;
536 if (var->xres_virtual < var->xres) {
537 /* Still doesn't fit. Shrink yres_virtual too */
538 var->xres_virtual = var->xres;
539 line_size = var->xres * bpp / 8;
540 var->yres_virtual = max_frame_size / line_size;
541 }
542 /* Recheck this, as the virtual size changed. */
543 if (var->xres_virtual < var->xres)
544 var->xres = var->xres_virtual;
545 if (var->yres_virtual < var->yres)
546 var->yres = var->yres_virtual;
547 if (var->xres < xres_min || var->yres < yres_min)
548 return -EINVAL;
549 }
550 if (var->xres + var->xoffset > var->xres_virtual)
551 var->xoffset = var->xres_virtual - var->xres;
552 if (var->yres + var->yoffset > var->yres_virtual)
553 var->yoffset = var->yres_virtual - var->yres;
554 line_size = var->xres * bpp / 8;
555
556 if (plane->color_mode == OMAPFB_COLOR_RGB444) {
557 var->red.offset = 8; var->red.length = 4;
558 var->red.msb_right = 0;
559 var->green.offset = 4; var->green.length = 4;
560 var->green.msb_right = 0;
561 var->blue.offset = 0; var->blue.length = 4;
562 var->blue.msb_right = 0;
563 } else {
564 var->red.offset = 11; var->red.length = 5;
565 var->red.msb_right = 0;
566 var->green.offset = 5; var->green.length = 6;
567 var->green.msb_right = 0;
568 var->blue.offset = 0; var->blue.length = 5;
569 var->blue.msb_right = 0;
570 }
571
572 var->height = -1;
573 var->width = -1;
574 var->grayscale = 0;
575
576 /* pixclock in ps, the rest in pixclock */
577 var->pixclock = 10000000 / (panel->pixel_clock / 100);
578 var->left_margin = panel->hfp;
579 var->right_margin = panel->hbp;
580 var->upper_margin = panel->vfp;
581 var->lower_margin = panel->vbp;
582 var->hsync_len = panel->hsw;
583 var->vsync_len = panel->vsw;
584
585 /* TODO: get these from panel->config */
586 var->vmode = FB_VMODE_NONINTERLACED;
587 var->sync = 0;
588
589 return 0;
590}
591
592
593/* Set rotation (0, 90, 180, 270 degree), and switch to the new mode. */
594static void omapfb_rotate(struct fb_info *fbi, int rotate)
595{
596 struct omapfb_plane_struct *plane = fbi->par;
597 struct omapfb_device *fbdev = plane->fbdev;
598
599 omapfb_rqueue_lock(fbdev);
600 if (cpu_is_omap15xx() && rotate != fbi->var.rotate) {
601 struct fb_var_screeninfo *new_var = &fbdev->new_var;
602
603 memcpy(new_var, &fbi->var, sizeof(*new_var));
604 new_var->rotate = rotate;
605 if (set_fb_var(fbi, new_var) == 0 &&
606 memcmp(new_var, &fbi->var, sizeof(*new_var))) {
607 memcpy(&fbi->var, new_var, sizeof(*new_var));
608 ctrl_change_mode(fbi);
609 }
610 }
611 omapfb_rqueue_unlock(fbdev);
612}
613
614/*
615 * Set new x,y offsets in the virtual display for the visible area and switch
616 * to the new mode.
617 */
618static int omapfb_pan_display(struct fb_var_screeninfo *var,
619 struct fb_info *fbi)
620{
621 struct omapfb_plane_struct *plane = fbi->par;
622 struct omapfb_device *fbdev = plane->fbdev;
623 int r = 0;
624
625 omapfb_rqueue_lock(fbdev);
626 if (var->xoffset != fbi->var.xoffset ||
627 var->yoffset != fbi->var.yoffset) {
628 struct fb_var_screeninfo *new_var = &fbdev->new_var;
629
630 memcpy(new_var, &fbi->var, sizeof(*new_var));
631 new_var->xoffset = var->xoffset;
632 new_var->yoffset = var->yoffset;
633 if (set_fb_var(fbi, new_var))
634 r = -EINVAL;
635 else {
636 memcpy(&fbi->var, new_var, sizeof(*new_var));
637 ctrl_change_mode(fbi);
638 }
639 }
640 omapfb_rqueue_unlock(fbdev);
641
642 return r;
643}
644
645/* Set mirror to vertical axis and switch to the new mode. */
646static int omapfb_mirror(struct fb_info *fbi, int mirror)
647{
648 struct omapfb_plane_struct *plane = fbi->par;
649 struct omapfb_device *fbdev = plane->fbdev;
650 int r = 0;
651
652 omapfb_rqueue_lock(fbdev);
653 mirror = mirror ? 1 : 0;
654 if (cpu_is_omap15xx())
655 r = -EINVAL;
656 else if (mirror != plane->info.mirror) {
657 plane->info.mirror = mirror;
658 r = ctrl_change_mode(fbi);
659 }
660 omapfb_rqueue_unlock(fbdev);
661
662 return r;
663}
664
665/*
666 * Check values in var, try to adjust them in case of out of bound values if
667 * possible, or return error.
668 */
669static int omapfb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi)
670{
671 struct omapfb_plane_struct *plane = fbi->par;
672 struct omapfb_device *fbdev = plane->fbdev;
673 int r;
674
675 omapfb_rqueue_lock(fbdev);
676 if (fbdev->ctrl->sync != NULL)
677 fbdev->ctrl->sync();
678 r = set_fb_var(fbi, var);
679 omapfb_rqueue_unlock(fbdev);
680
681 return r;
682}
683
684/*
685 * Switch to a new mode. The parameters for it has been check already by
686 * omapfb_check_var.
687 */
688static int omapfb_set_par(struct fb_info *fbi)
689{
690 struct omapfb_plane_struct *plane = fbi->par;
691 struct omapfb_device *fbdev = plane->fbdev;
692 int r = 0;
693
694 omapfb_rqueue_lock(fbdev);
695 set_fb_fix(fbi);
696 r = ctrl_change_mode(fbi);
697 omapfb_rqueue_unlock(fbdev);
698
699 return r;
700}
701
702int omapfb_update_window_async(struct fb_info *fbi,
703 struct omapfb_update_window *win,
704 void (*callback)(void *),
705 void *callback_data)
706{
707 struct omapfb_plane_struct *plane = fbi->par;
708 struct omapfb_device *fbdev = plane->fbdev;
709 struct fb_var_screeninfo *var;
710
711 var = &fbi->var;
712 if (win->x >= var->xres || win->y >= var->yres ||
713 win->out_x > var->xres || win->out_y >= var->yres)
714 return -EINVAL;
715
716 if (!fbdev->ctrl->update_window ||
717 fbdev->ctrl->get_update_mode() != OMAPFB_MANUAL_UPDATE)
718 return -ENODEV;
719
720 if (win->x + win->width >= var->xres)
721 win->width = var->xres - win->x;
722 if (win->y + win->height >= var->yres)
723 win->height = var->yres - win->y;
724 /* The out sizes should be cropped to the LCD size */
725 if (win->out_x + win->out_width > fbdev->panel->x_res)
726 win->out_width = fbdev->panel->x_res - win->out_x;
727 if (win->out_y + win->out_height > fbdev->panel->y_res)
728 win->out_height = fbdev->panel->y_res - win->out_y;
729 if (!win->width || !win->height || !win->out_width || !win->out_height)
730 return 0;
731
732 return fbdev->ctrl->update_window(fbi, win, callback, callback_data);
733}
734EXPORT_SYMBOL(omapfb_update_window_async);
735
736static int omapfb_update_win(struct fb_info *fbi,
737 struct omapfb_update_window *win)
738{
739 struct omapfb_plane_struct *plane = fbi->par;
740 int ret;
741
742 omapfb_rqueue_lock(plane->fbdev);
743 ret = omapfb_update_window_async(fbi, win, NULL, 0);
744 omapfb_rqueue_unlock(plane->fbdev);
745
746 return ret;
747}
748
749static int omapfb_update_full_screen(struct fb_info *fbi)
750{
751 struct omapfb_plane_struct *plane = fbi->par;
752 struct omapfb_device *fbdev = plane->fbdev;
753 struct omapfb_update_window win;
754 int r;
755
756 if (!fbdev->ctrl->update_window ||
757 fbdev->ctrl->get_update_mode() != OMAPFB_MANUAL_UPDATE)
758 return -ENODEV;
759
760 win.x = 0;
761 win.y = 0;
762 win.width = fbi->var.xres;
763 win.height = fbi->var.yres;
764 win.out_x = 0;
765 win.out_y = 0;
766 win.out_width = fbi->var.xres;
767 win.out_height = fbi->var.yres;
768 win.format = 0;
769
770 omapfb_rqueue_lock(fbdev);
771 r = fbdev->ctrl->update_window(fbi, &win, NULL, 0);
772 omapfb_rqueue_unlock(fbdev);
773
774 return r;
775}
776
777static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
778{
779 struct omapfb_plane_struct *plane = fbi->par;
780 struct omapfb_device *fbdev = plane->fbdev;
781 struct lcd_panel *panel = fbdev->panel;
782 struct omapfb_plane_info old_info;
783 int r = 0;
784
785 if (pi->pos_x + pi->out_width > panel->x_res ||
786 pi->pos_y + pi->out_height > panel->y_res)
787 return -EINVAL;
788
789 omapfb_rqueue_lock(fbdev);
790 if (pi->enabled && !fbdev->mem_desc.region[plane->idx].size) {
791 /*
792 * This plane's memory was freed, can't enable it
793 * until it's reallocated.
794 */
795 r = -EINVAL;
796 goto out;
797 }
798 old_info = plane->info;
799 plane->info = *pi;
800 if (pi->enabled) {
801 r = ctrl_change_mode(fbi);
802 if (r < 0) {
803 plane->info = old_info;
804 goto out;
805 }
806 }
807 r = fbdev->ctrl->enable_plane(plane->idx, pi->enabled);
808 if (r < 0) {
809 plane->info = old_info;
810 goto out;
811 }
812out:
813 omapfb_rqueue_unlock(fbdev);
814 return r;
815}
816
817static int omapfb_query_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
818{
819 struct omapfb_plane_struct *plane = fbi->par;
820
821 *pi = plane->info;
822 return 0;
823}
824
825static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
826{
827 struct omapfb_plane_struct *plane = fbi->par;
828 struct omapfb_device *fbdev = plane->fbdev;
829 struct omapfb_mem_region *rg = &fbdev->mem_desc.region[plane->idx];
830 size_t size;
831 int r = 0;
832
833 if (fbdev->ctrl->setup_mem == NULL)
834 return -ENODEV;
835 if (mi->type > OMAPFB_MEMTYPE_MAX)
836 return -EINVAL;
837
838 size = PAGE_ALIGN(mi->size);
839 omapfb_rqueue_lock(fbdev);
840 if (plane->info.enabled) {
841 r = -EBUSY;
842 goto out;
843 }
844 if (rg->size != size || rg->type != mi->type) {
845 struct fb_var_screeninfo *new_var = &fbdev->new_var;
846 unsigned long old_size = rg->size;
847 u8 old_type = rg->type;
848 unsigned long paddr;
849
850 rg->size = size;
851 rg->type = mi->type;
852 /*
853 * size == 0 is a special case, for which we
854 * don't check / adjust the screen parameters.
855 * This isn't a problem since the plane can't
856 * be reenabled unless its size is > 0.
857 */
858 if (old_size != size && size) {
859 if (size) {
860 memcpy(new_var, &fbi->var, sizeof(*new_var));
861 r = set_fb_var(fbi, new_var);
862 if (r < 0)
863 goto out;
864 }
865 }
866
867 if (fbdev->ctrl->sync)
868 fbdev->ctrl->sync();
869 r = fbdev->ctrl->setup_mem(plane->idx, size, mi->type, &paddr);
870 if (r < 0) {
871 /* Revert changes. */
872 rg->size = old_size;
873 rg->type = old_type;
874 goto out;
875 }
876 rg->paddr = paddr;
877
878 if (old_size != size) {
879 if (size) {
880 memcpy(&fbi->var, new_var, sizeof(fbi->var));
881 set_fb_fix(fbi);
882 } else {
883 /*
884 * Set these explicitly to indicate that the
885 * plane memory is dealloce'd, the other
886 * screen parameters in var / fix are invalid.
887 */
888 fbi->fix.smem_start = 0;
889 fbi->fix.smem_len = 0;
890 }
891 }
892 }
893out:
894 omapfb_rqueue_unlock(fbdev);
895
896 return r;
897}
898
899static int omapfb_query_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
900{
901 struct omapfb_plane_struct *plane = fbi->par;
902 struct omapfb_device *fbdev = plane->fbdev;
903 struct omapfb_mem_region *rg;
904
905 rg = &fbdev->mem_desc.region[plane->idx];
906 memset(mi, 0, sizeof(*mi));
907 mi->size = rg->size;
908 mi->type = rg->type;
909
910 return 0;
911}
912
913static int omapfb_set_color_key(struct omapfb_device *fbdev,
914 struct omapfb_color_key *ck)
915{
916 int r;
917
918 if (!fbdev->ctrl->set_color_key)
919 return -ENODEV;
920
921 omapfb_rqueue_lock(fbdev);
922 r = fbdev->ctrl->set_color_key(ck);
923 omapfb_rqueue_unlock(fbdev);
924
925 return r;
926}
927
928static int omapfb_get_color_key(struct omapfb_device *fbdev,
929 struct omapfb_color_key *ck)
930{
931 int r;
932
933 if (!fbdev->ctrl->get_color_key)
934 return -ENODEV;
935
936 omapfb_rqueue_lock(fbdev);
937 r = fbdev->ctrl->get_color_key(ck);
938 omapfb_rqueue_unlock(fbdev);
939
940 return r;
941}
942
943static struct blocking_notifier_head omapfb_client_list[OMAPFB_PLANE_NUM];
944static int notifier_inited;
945
946static void omapfb_init_notifier(void)
947{
948 int i;
949
950 for (i = 0; i < OMAPFB_PLANE_NUM; i++)
951 BLOCKING_INIT_NOTIFIER_HEAD(&omapfb_client_list[i]);
952}
953
954int omapfb_register_client(struct omapfb_notifier_block *omapfb_nb,
955 omapfb_notifier_callback_t callback,
956 void *callback_data)
957{
958 int r;
959
960 if ((unsigned)omapfb_nb->plane_idx > OMAPFB_PLANE_NUM)
961 return -EINVAL;
962
963 if (!notifier_inited) {
964 omapfb_init_notifier();
965 notifier_inited = 1;
966 }
967
968 omapfb_nb->nb.notifier_call = (int (*)(struct notifier_block *,
969 unsigned long, void *))callback;
970 omapfb_nb->data = callback_data;
971 r = blocking_notifier_chain_register(
972 &omapfb_client_list[omapfb_nb->plane_idx],
973 &omapfb_nb->nb);
974 if (r)
975 return r;
976 if (omapfb_dev != NULL &&
977 omapfb_dev->ctrl && omapfb_dev->ctrl->bind_client) {
978 omapfb_dev->ctrl->bind_client(omapfb_nb);
979 }
980
981 return 0;
982}
983EXPORT_SYMBOL(omapfb_register_client);
984
985int omapfb_unregister_client(struct omapfb_notifier_block *omapfb_nb)
986{
987 return blocking_notifier_chain_unregister(
988 &omapfb_client_list[omapfb_nb->plane_idx], &omapfb_nb->nb);
989}
990EXPORT_SYMBOL(omapfb_unregister_client);
991
992void omapfb_notify_clients(struct omapfb_device *fbdev, unsigned long event)
993{
994 int i;
995
996 if (!notifier_inited)
997 /* no client registered yet */
998 return;
999
1000 for (i = 0; i < OMAPFB_PLANE_NUM; i++)
1001 blocking_notifier_call_chain(&omapfb_client_list[i], event,
1002 fbdev->fb_info[i]);
1003}
1004EXPORT_SYMBOL(omapfb_notify_clients);
1005
1006static int omapfb_set_update_mode(struct omapfb_device *fbdev,
1007 enum omapfb_update_mode mode)
1008{
1009 int r;
1010
1011 omapfb_rqueue_lock(fbdev);
1012 r = fbdev->ctrl->set_update_mode(mode);
1013 omapfb_rqueue_unlock(fbdev);
1014
1015 return r;
1016}
1017
1018static enum omapfb_update_mode omapfb_get_update_mode(struct omapfb_device *fbdev)
1019{
1020 int r;
1021
1022 omapfb_rqueue_lock(fbdev);
1023 r = fbdev->ctrl->get_update_mode();
1024 omapfb_rqueue_unlock(fbdev);
1025
1026 return r;
1027}
1028
1029static void omapfb_get_caps(struct omapfb_device *fbdev, int plane,
1030 struct omapfb_caps *caps)
1031{
1032 memset(caps, 0, sizeof(*caps));
1033 fbdev->ctrl->get_caps(plane, caps);
1034 caps->ctrl |= fbdev->panel->get_caps(fbdev->panel);
1035}
1036
1037/* For lcd testing */
1038void omapfb_write_first_pixel(struct omapfb_device *fbdev, u16 pixval)
1039{
1040 omapfb_rqueue_lock(fbdev);
1041 *(u16 *)fbdev->mem_desc.region[0].vaddr = pixval;
1042 if (fbdev->ctrl->get_update_mode() == OMAPFB_MANUAL_UPDATE) {
1043 struct omapfb_update_window win;
1044
1045 memset(&win, 0, sizeof(win));
1046 win.width = 2;
1047 win.height = 2;
1048 win.out_width = 2;
1049 win.out_height = 2;
1050 fbdev->ctrl->update_window(fbdev->fb_info[0], &win, NULL, 0);
1051 }
1052 omapfb_rqueue_unlock(fbdev);
1053}
1054EXPORT_SYMBOL(omapfb_write_first_pixel);
1055
1056/*
1057 * Ioctl interface. Part of the kernel mode frame buffer API is duplicated
1058 * here to be accessible by user mode code.
1059 */
1060static int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd,
1061 unsigned long arg)
1062{
1063 struct omapfb_plane_struct *plane = fbi->par;
1064 struct omapfb_device *fbdev = plane->fbdev;
1065 struct fb_ops *ops = fbi->fbops;
1066 union {
1067 struct omapfb_update_window update_window;
1068 struct omapfb_plane_info plane_info;
1069 struct omapfb_mem_info mem_info;
1070 struct omapfb_color_key color_key;
1071 enum omapfb_update_mode update_mode;
1072 struct omapfb_caps caps;
1073 unsigned int mirror;
1074 int plane_out;
1075 int enable_plane;
1076 } p;
1077 int r = 0;
1078
1079 BUG_ON(!ops);
1080 switch (cmd) {
1081 case OMAPFB_MIRROR:
1082 if (get_user(p.mirror, (int __user *)arg))
1083 r = -EFAULT;
1084 else
1085 omapfb_mirror(fbi, p.mirror);
1086 break;
1087 case OMAPFB_SYNC_GFX:
1088 omapfb_sync(fbi);
1089 break;
1090 case OMAPFB_VSYNC:
1091 break;
1092 case OMAPFB_SET_UPDATE_MODE:
1093 if (get_user(p.update_mode, (int __user *)arg))
1094 r = -EFAULT;
1095 else
1096 r = omapfb_set_update_mode(fbdev, p.update_mode);
1097 break;
1098 case OMAPFB_GET_UPDATE_MODE:
1099 p.update_mode = omapfb_get_update_mode(fbdev);
1100 if (put_user(p.update_mode,
1101 (enum omapfb_update_mode __user *)arg))
1102 r = -EFAULT;
1103 break;
1104 case OMAPFB_UPDATE_WINDOW_OLD:
1105 if (copy_from_user(&p.update_window, (void __user *)arg,
1106 sizeof(struct omapfb_update_window_old)))
1107 r = -EFAULT;
1108 else {
1109 struct omapfb_update_window *u = &p.update_window;
1110 u->out_x = u->x;
1111 u->out_y = u->y;
1112 u->out_width = u->width;
1113 u->out_height = u->height;
1114 memset(u->reserved, 0, sizeof(u->reserved));
1115 r = omapfb_update_win(fbi, u);
1116 }
1117 break;
1118 case OMAPFB_UPDATE_WINDOW:
1119 if (copy_from_user(&p.update_window, (void __user *)arg,
1120 sizeof(p.update_window)))
1121 r = -EFAULT;
1122 else
1123 r = omapfb_update_win(fbi, &p.update_window);
1124 break;
1125 case OMAPFB_SETUP_PLANE:
1126 if (copy_from_user(&p.plane_info, (void __user *)arg,
1127 sizeof(p.plane_info)))
1128 r = -EFAULT;
1129 else
1130 r = omapfb_setup_plane(fbi, &p.plane_info);
1131 break;
1132 case OMAPFB_QUERY_PLANE:
1133 if ((r = omapfb_query_plane(fbi, &p.plane_info)) < 0)
1134 break;
1135 if (copy_to_user((void __user *)arg, &p.plane_info,
1136 sizeof(p.plane_info)))
1137 r = -EFAULT;
1138 break;
1139 case OMAPFB_SETUP_MEM:
1140 if (copy_from_user(&p.mem_info, (void __user *)arg,
1141 sizeof(p.mem_info)))
1142 r = -EFAULT;
1143 else
1144 r = omapfb_setup_mem(fbi, &p.mem_info);
1145 break;
1146 case OMAPFB_QUERY_MEM:
1147 if ((r = omapfb_query_mem(fbi, &p.mem_info)) < 0)
1148 break;
1149 if (copy_to_user((void __user *)arg, &p.mem_info,
1150 sizeof(p.mem_info)))
1151 r = -EFAULT;
1152 break;
1153 case OMAPFB_SET_COLOR_KEY:
1154 if (copy_from_user(&p.color_key, (void __user *)arg,
1155 sizeof(p.color_key)))
1156 r = -EFAULT;
1157 else
1158 r = omapfb_set_color_key(fbdev, &p.color_key);
1159 break;
1160 case OMAPFB_GET_COLOR_KEY:
1161 if ((r = omapfb_get_color_key(fbdev, &p.color_key)) < 0)
1162 break;
1163 if (copy_to_user((void __user *)arg, &p.color_key,
1164 sizeof(p.color_key)))
1165 r = -EFAULT;
1166 break;
1167 case OMAPFB_GET_CAPS:
1168 omapfb_get_caps(fbdev, plane->idx, &p.caps);
1169 if (copy_to_user((void __user *)arg, &p.caps, sizeof(p.caps)))
1170 r = -EFAULT;
1171 break;
1172 case OMAPFB_LCD_TEST:
1173 {
1174 int test_num;
1175
1176 if (get_user(test_num, (int __user *)arg)) {
1177 r = -EFAULT;
1178 break;
1179 }
1180 if (!fbdev->panel->run_test) {
1181 r = -EINVAL;
1182 break;
1183 }
1184 r = fbdev->panel->run_test(fbdev->panel, test_num);
1185 break;
1186 }
1187 case OMAPFB_CTRL_TEST:
1188 {
1189 int test_num;
1190
1191 if (get_user(test_num, (int __user *)arg)) {
1192 r = -EFAULT;
1193 break;
1194 }
1195 if (!fbdev->ctrl->run_test) {
1196 r = -EINVAL;
1197 break;
1198 }
1199 r = fbdev->ctrl->run_test(test_num);
1200 break;
1201 }
1202 default:
1203 r = -EINVAL;
1204 }
1205
1206 return r;
1207}
1208
1209static int omapfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
1210{
1211 struct omapfb_plane_struct *plane = info->par;
1212 struct omapfb_device *fbdev = plane->fbdev;
1213 int r;
1214
1215 omapfb_rqueue_lock(fbdev);
1216 r = fbdev->ctrl->mmap(info, vma);
1217 omapfb_rqueue_unlock(fbdev);
1218
1219 return r;
1220}
1221
1222/*
1223 * Callback table for the frame buffer framework. Some of these pointers
1224 * will be changed according to the current setting of fb_info->accel_flags.
1225 */
1226static struct fb_ops omapfb_ops = {
1227 .owner = THIS_MODULE,
1228 .fb_open = omapfb_open,
1229 .fb_release = omapfb_release,
1230 .fb_setcolreg = omapfb_setcolreg,
1231 .fb_setcmap = omapfb_setcmap,
1232 .fb_fillrect = cfb_fillrect,
1233 .fb_copyarea = cfb_copyarea,
1234 .fb_imageblit = cfb_imageblit,
1235 .fb_blank = omapfb_blank,
1236 .fb_ioctl = omapfb_ioctl,
1237 .fb_check_var = omapfb_check_var,
1238 .fb_set_par = omapfb_set_par,
1239 .fb_rotate = omapfb_rotate,
1240 .fb_pan_display = omapfb_pan_display,
1241};
1242
1243/*
1244 * ---------------------------------------------------------------------------
1245 * Sysfs interface
1246 * ---------------------------------------------------------------------------
1247 */
1248/* omapfbX sysfs entries */
1249static ssize_t omapfb_show_caps_num(struct device *dev,
1250 struct device_attribute *attr, char *buf)
1251{
1252 struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
1253 int plane;
1254 size_t size;
1255 struct omapfb_caps caps;
1256
1257 plane = 0;
1258 size = 0;
1259 while (size < PAGE_SIZE && plane < OMAPFB_PLANE_NUM) {
1260 omapfb_get_caps(fbdev, plane, &caps);
1261 size += snprintf(&buf[size], PAGE_SIZE - size,
1262 "plane#%d %#010x %#010x %#010x\n",
1263 plane, caps.ctrl, caps.plane_color, caps.wnd_color);
1264 plane++;
1265 }
1266 return size;
1267}
1268
1269static ssize_t omapfb_show_caps_text(struct device *dev,
1270 struct device_attribute *attr, char *buf)
1271{
1272 struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
1273 int i;
1274 struct omapfb_caps caps;
1275 int plane;
1276 size_t size;
1277
1278 plane = 0;
1279 size = 0;
1280 while (size < PAGE_SIZE && plane < OMAPFB_PLANE_NUM) {
1281 omapfb_get_caps(fbdev, plane, &caps);
1282 size += snprintf(&buf[size], PAGE_SIZE - size,
1283 "plane#%d:\n", plane);
1284 for (i = 0; i < ARRAY_SIZE(ctrl_caps) &&
1285 size < PAGE_SIZE; i++) {
1286 if (ctrl_caps[i].flag & caps.ctrl)
1287 size += snprintf(&buf[size], PAGE_SIZE - size,
1288 " %s\n", ctrl_caps[i].name);
1289 }
1290 size += snprintf(&buf[size], PAGE_SIZE - size,
1291 " plane colors:\n");
1292 for (i = 0; i < ARRAY_SIZE(color_caps) &&
1293 size < PAGE_SIZE; i++) {
1294 if (color_caps[i].flag & caps.plane_color)
1295 size += snprintf(&buf[size], PAGE_SIZE - size,
1296 " %s\n", color_caps[i].name);
1297 }
1298 size += snprintf(&buf[size], PAGE_SIZE - size,
1299 " window colors:\n");
1300 for (i = 0; i < ARRAY_SIZE(color_caps) &&
1301 size < PAGE_SIZE; i++) {
1302 if (color_caps[i].flag & caps.wnd_color)
1303 size += snprintf(&buf[size], PAGE_SIZE - size,
1304 " %s\n", color_caps[i].name);
1305 }
1306
1307 plane++;
1308 }
1309 return size;
1310}
1311
1312static DEVICE_ATTR(caps_num, 0444, omapfb_show_caps_num, NULL);
1313static DEVICE_ATTR(caps_text, 0444, omapfb_show_caps_text, NULL);
1314
1315/* panel sysfs entries */
1316static ssize_t omapfb_show_panel_name(struct device *dev,
1317 struct device_attribute *attr, char *buf)
1318{
1319 struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
1320
1321 return snprintf(buf, PAGE_SIZE, "%s\n", fbdev->panel->name);
1322}
1323
1324static ssize_t omapfb_show_bklight_level(struct device *dev,
1325 struct device_attribute *attr,
1326 char *buf)
1327{
1328 struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
1329 int r;
1330
1331 if (fbdev->panel->get_bklight_level) {
1332 r = snprintf(buf, PAGE_SIZE, "%d\n",
1333 fbdev->panel->get_bklight_level(fbdev->panel));
1334 } else
1335 r = -ENODEV;
1336 return r;
1337}
1338
1339static ssize_t omapfb_store_bklight_level(struct device *dev,
1340 struct device_attribute *attr,
1341 const char *buf, size_t size)
1342{
1343 struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
1344 int r;
1345
1346 if (fbdev->panel->set_bklight_level) {
1347 unsigned int level;
1348
1349 if (sscanf(buf, "%10d", &level) == 1) {
1350 r = fbdev->panel->set_bklight_level(fbdev->panel,
1351 level);
1352 } else
1353 r = -EINVAL;
1354 } else
1355 r = -ENODEV;
1356 return r ? r : size;
1357}
1358
1359static ssize_t omapfb_show_bklight_max(struct device *dev,
1360 struct device_attribute *attr, char *buf)
1361{
1362 struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
1363 int r;
1364
1365 if (fbdev->panel->get_bklight_level) {
1366 r = snprintf(buf, PAGE_SIZE, "%d\n",
1367 fbdev->panel->get_bklight_max(fbdev->panel));
1368 } else
1369 r = -ENODEV;
1370 return r;
1371}
1372
1373static struct device_attribute dev_attr_panel_name =
1374 __ATTR(name, 0444, omapfb_show_panel_name, NULL);
1375static DEVICE_ATTR(backlight_level, 0664,
1376 omapfb_show_bklight_level, omapfb_store_bklight_level);
1377static DEVICE_ATTR(backlight_max, 0444, omapfb_show_bklight_max, NULL);
1378
1379static struct attribute *panel_attrs[] = {
1380 &dev_attr_panel_name.attr,
1381 &dev_attr_backlight_level.attr,
1382 &dev_attr_backlight_max.attr,
1383 NULL,
1384};
1385
1386static struct attribute_group panel_attr_grp = {
1387 .name = "panel",
1388 .attrs = panel_attrs,
1389};
1390
1391/* ctrl sysfs entries */
1392static ssize_t omapfb_show_ctrl_name(struct device *dev,
1393 struct device_attribute *attr, char *buf)
1394{
1395 struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
1396
1397 return snprintf(buf, PAGE_SIZE, "%s\n", fbdev->ctrl->name);
1398}
1399
1400static struct device_attribute dev_attr_ctrl_name =
1401 __ATTR(name, 0444, omapfb_show_ctrl_name, NULL);
1402
1403static struct attribute *ctrl_attrs[] = {
1404 &dev_attr_ctrl_name.attr,
1405 NULL,
1406};
1407
1408static struct attribute_group ctrl_attr_grp = {
1409 .name = "ctrl",
1410 .attrs = ctrl_attrs,
1411};
1412
1413static int omapfb_register_sysfs(struct omapfb_device *fbdev)
1414{
1415 int r;
1416
1417 if ((r = device_create_file(fbdev->dev, &dev_attr_caps_num)))
1418 goto fail0;
1419
1420 if ((r = device_create_file(fbdev->dev, &dev_attr_caps_text)))
1421 goto fail1;
1422
1423 if ((r = sysfs_create_group(&fbdev->dev->kobj, &panel_attr_grp)))
1424 goto fail2;
1425
1426 if ((r = sysfs_create_group(&fbdev->dev->kobj, &ctrl_attr_grp)))
1427 goto fail3;
1428
1429 return 0;
1430fail3:
1431 sysfs_remove_group(&fbdev->dev->kobj, &panel_attr_grp);
1432fail2:
1433 device_remove_file(fbdev->dev, &dev_attr_caps_text);
1434fail1:
1435 device_remove_file(fbdev->dev, &dev_attr_caps_num);
1436fail0:
1437 dev_err(fbdev->dev, "unable to register sysfs interface\n");
1438 return r;
1439}
1440
1441static void omapfb_unregister_sysfs(struct omapfb_device *fbdev)
1442{
1443 sysfs_remove_group(&fbdev->dev->kobj, &ctrl_attr_grp);
1444 sysfs_remove_group(&fbdev->dev->kobj, &panel_attr_grp);
1445 device_remove_file(fbdev->dev, &dev_attr_caps_num);
1446 device_remove_file(fbdev->dev, &dev_attr_caps_text);
1447}
1448
1449/*
1450 * ---------------------------------------------------------------------------
1451 * LDM callbacks
1452 * ---------------------------------------------------------------------------
1453 */
1454/* Initialize system fb_info object and set the default video mode.
1455 * The frame buffer memory already allocated by lcddma_init
1456 */
1457static int fbinfo_init(struct omapfb_device *fbdev, struct fb_info *info)
1458{
1459 struct fb_var_screeninfo *var = &info->var;
1460 struct fb_fix_screeninfo *fix = &info->fix;
1461 int r = 0;
1462
1463 info->fbops = &omapfb_ops;
1464 info->flags = FBINFO_FLAG_DEFAULT;
1465
1466 strncpy(fix->id, MODULE_NAME, sizeof(fix->id));
1467
1468 info->pseudo_palette = fbdev->pseudo_palette;
1469
1470 var->accel_flags = def_accel ? FB_ACCELF_TEXT : 0;
1471 var->xres = def_vxres;
1472 var->yres = def_vyres;
1473 var->xres_virtual = def_vxres;
1474 var->yres_virtual = def_vyres;
1475 var->rotate = def_rotate;
1476 var->bits_per_pixel = fbdev->panel->bpp;
1477
1478 set_fb_var(info, var);
1479 set_fb_fix(info);
1480
1481 r = fb_alloc_cmap(&info->cmap, 16, 0);
1482 if (r != 0)
1483 dev_err(fbdev->dev, "unable to allocate color map memory\n");
1484
1485 return r;
1486}
1487
1488/* Release the fb_info object */
1489static void fbinfo_cleanup(struct omapfb_device *fbdev, struct fb_info *fbi)
1490{
1491 fb_dealloc_cmap(&fbi->cmap);
1492}
1493
1494static void planes_cleanup(struct omapfb_device *fbdev)
1495{
1496 int i;
1497
1498 for (i = 0; i < fbdev->mem_desc.region_cnt; i++) {
1499 if (fbdev->fb_info[i] == NULL)
1500 break;
1501 fbinfo_cleanup(fbdev, fbdev->fb_info[i]);
1502 framebuffer_release(fbdev->fb_info[i]);
1503 }
1504}
1505
1506static int planes_init(struct omapfb_device *fbdev)
1507{
1508 struct fb_info *fbi;
1509 int i;
1510 int r;
1511
1512 for (i = 0; i < fbdev->mem_desc.region_cnt; i++) {
1513 struct omapfb_plane_struct *plane;
1514 fbi = framebuffer_alloc(sizeof(struct omapfb_plane_struct),
1515 fbdev->dev);
1516 if (fbi == NULL) {
1517 dev_err(fbdev->dev,
1518 "unable to allocate memory for plane info\n");
1519 planes_cleanup(fbdev);
1520 return -ENOMEM;
1521 }
1522 plane = fbi->par;
1523 plane->idx = i;
1524 plane->fbdev = fbdev;
1525 plane->info.mirror = def_mirror;
1526 fbdev->fb_info[i] = fbi;
1527
1528 if ((r = fbinfo_init(fbdev, fbi)) < 0) {
1529 framebuffer_release(fbi);
1530 planes_cleanup(fbdev);
1531 return r;
1532 }
1533 plane->info.out_width = fbi->var.xres;
1534 plane->info.out_height = fbi->var.yres;
1535 }
1536 return 0;
1537}
1538
1539/*
1540 * Free driver resources. Can be called to rollback an aborted initialization
1541 * sequence.
1542 */
1543static void omapfb_free_resources(struct omapfb_device *fbdev, int state)
1544{
1545 int i;
1546
1547 switch (state) {
1548 case OMAPFB_ACTIVE:
1549 for (i = 0; i < fbdev->mem_desc.region_cnt; i++)
1550 unregister_framebuffer(fbdev->fb_info[i]);
1551 case 7:
1552 omapfb_unregister_sysfs(fbdev);
1553 case 6:
1554 fbdev->panel->disable(fbdev->panel);
1555 case 5:
1556 omapfb_set_update_mode(fbdev, OMAPFB_UPDATE_DISABLED);
1557 case 4:
1558 planes_cleanup(fbdev);
1559 case 3:
1560 ctrl_cleanup(fbdev);
1561 case 2:
1562 fbdev->panel->cleanup(fbdev->panel);
1563 case 1:
1564 dev_set_drvdata(fbdev->dev, NULL);
1565 kfree(fbdev);
1566 case 0:
1567 /* nothing to free */
1568 break;
1569 default:
1570 BUG();
1571 }
1572}
1573
1574static int omapfb_find_ctrl(struct omapfb_device *fbdev)
1575{
1576 struct omapfb_platform_data *conf;
1577 char name[17];
1578 int i;
1579
1580 conf = fbdev->dev->platform_data;
1581
1582 fbdev->ctrl = NULL;
1583
1584 strncpy(name, conf->lcd.ctrl_name, sizeof(name) - 1);
1585 name[sizeof(name) - 1] = '\0';
1586
1587 if (strcmp(name, "internal") == 0) {
1588 fbdev->ctrl = fbdev->int_ctrl;
1589 return 0;
1590 }
1591
1592 for (i = 0; i < ARRAY_SIZE(ctrls); i++) {
1593 dev_dbg(fbdev->dev, "ctrl %s\n", ctrls[i]->name);
1594 if (strcmp(ctrls[i]->name, name) == 0) {
1595 fbdev->ctrl = ctrls[i];
1596 break;
1597 }
1598 }
1599
1600 if (fbdev->ctrl == NULL) {
1601 dev_dbg(fbdev->dev, "ctrl %s not supported\n", name);
1602 return -1;
1603 }
1604
1605 return 0;
1606}
1607
1608static void check_required_callbacks(struct omapfb_device *fbdev)
1609{
1610#define _C(x) (fbdev->ctrl->x != NULL)
1611#define _P(x) (fbdev->panel->x != NULL)
1612 BUG_ON(fbdev->ctrl == NULL || fbdev->panel == NULL);
1613 BUG_ON(!(_C(init) && _C(cleanup) && _C(get_caps) &&
1614 _C(set_update_mode) && _C(setup_plane) && _C(enable_plane) &&
1615 _P(init) && _P(cleanup) && _P(enable) && _P(disable) &&
1616 _P(get_caps)));
1617#undef _P
1618#undef _C
1619}
1620
1621/*
1622 * Called by LDM binding to probe and attach a new device.
1623 * Initialization sequence:
1624 * 1. allocate system omapfb_device structure
1625 * 2. select controller type according to platform configuration
1626 * init LCD panel
1627 * 3. init LCD controller and LCD DMA
1628 * 4. init system fb_info structure for all planes
1629 * 5. setup video mode for first plane and enable it
1630 * 6. enable LCD panel
1631 * 7. register sysfs attributes
1632 * OMAPFB_ACTIVE: register system fb_info structure for all planes
1633 */
1634static int omapfb_do_probe(struct platform_device *pdev,
1635 struct lcd_panel *panel)
1636{
1637 struct omapfb_device *fbdev = NULL;
1638 int init_state;
1639 unsigned long phz, hhz, vhz;
1640 unsigned long vram;
1641 int i;
1642 int r = 0;
1643
1644 init_state = 0;
1645
1646 if (pdev->num_resources != 0) {
1647 dev_err(&pdev->dev, "probed for an unknown device\n");
1648 r = -ENODEV;
1649 goto cleanup;
1650 }
1651
1652 if (pdev->dev.platform_data == NULL) {
1653 dev_err(&pdev->dev, "missing platform data\n");
1654 r = -ENOENT;
1655 goto cleanup;
1656 }
1657
1658 fbdev = kzalloc(sizeof(struct omapfb_device), GFP_KERNEL);
1659 if (fbdev == NULL) {
1660 dev_err(&pdev->dev,
1661 "unable to allocate memory for device info\n");
1662 r = -ENOMEM;
1663 goto cleanup;
1664 }
1665 init_state++;
1666
1667 fbdev->dev = &pdev->dev;
1668 fbdev->panel = panel;
1669 platform_set_drvdata(pdev, fbdev);
1670
1671 mutex_init(&fbdev->rqueue_mutex);
1672
1673#ifdef CONFIG_ARCH_OMAP1
1674 fbdev->int_ctrl = &omap1_int_ctrl;
1675#ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1676 fbdev->ext_if = &omap1_ext_if;
1677#endif
1678#else /* OMAP2 */
1679 fbdev->int_ctrl = &omap2_int_ctrl;
1680#ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1681 fbdev->ext_if = &omap2_ext_if;
1682#endif
1683#endif
1684 if (omapfb_find_ctrl(fbdev) < 0) {
1685 dev_err(fbdev->dev,
1686 "LCD controller not found, board not supported\n");
1687 r = -ENODEV;
1688 goto cleanup;
1689 }
1690
1691 r = fbdev->panel->init(fbdev->panel, fbdev);
1692 if (r)
1693 goto cleanup;
1694
1695 pr_info("omapfb: configured for panel %s\n", fbdev->panel->name);
1696
1697 def_vxres = def_vxres ? : fbdev->panel->x_res;
1698 def_vyres = def_vyres ? : fbdev->panel->y_res;
1699
1700 init_state++;
1701
1702 r = ctrl_init(fbdev);
1703 if (r)
1704 goto cleanup;
1705 if (fbdev->ctrl->mmap != NULL)
1706 omapfb_ops.fb_mmap = omapfb_mmap;
1707 init_state++;
1708
1709 check_required_callbacks(fbdev);
1710
1711 r = planes_init(fbdev);
1712 if (r)
1713 goto cleanup;
1714 init_state++;
1715
1716#ifdef CONFIG_FB_OMAP_DMA_TUNE
1717 /* Set DMA priority for EMIFF access to highest */
1718 if (cpu_class_is_omap1())
1719 omap_set_dma_priority(0, OMAP_DMA_PORT_EMIFF, 15);
1720#endif
1721
1722 r = ctrl_change_mode(fbdev->fb_info[0]);
1723 if (r) {
1724 dev_err(fbdev->dev, "mode setting failed\n");
1725 goto cleanup;
1726 }
1727
1728 /* GFX plane is enabled by default */
1729 r = fbdev->ctrl->enable_plane(OMAPFB_PLANE_GFX, 1);
1730 if (r)
1731 goto cleanup;
1732
1733 omapfb_set_update_mode(fbdev, manual_update ?
1734 OMAPFB_MANUAL_UPDATE : OMAPFB_AUTO_UPDATE);
1735 init_state++;
1736
1737 r = fbdev->panel->enable(fbdev->panel);
1738 if (r)
1739 goto cleanup;
1740 init_state++;
1741
1742 r = omapfb_register_sysfs(fbdev);
1743 if (r)
1744 goto cleanup;
1745 init_state++;
1746
1747 vram = 0;
1748 for (i = 0; i < fbdev->mem_desc.region_cnt; i++) {
1749 r = register_framebuffer(fbdev->fb_info[i]);
1750 if (r != 0) {
1751 dev_err(fbdev->dev,
1752 "registering framebuffer %d failed\n", i);
1753 goto cleanup;
1754 }
1755 vram += fbdev->mem_desc.region[i].size;
1756 }
1757
1758 fbdev->state = OMAPFB_ACTIVE;
1759
1760 panel = fbdev->panel;
1761 phz = panel->pixel_clock * 1000;
1762 hhz = phz * 10 / (panel->hfp + panel->x_res + panel->hbp + panel->hsw);
1763 vhz = hhz / (panel->vfp + panel->y_res + panel->vbp + panel->vsw);
1764
1765 omapfb_dev = fbdev;
1766
1767 pr_info("omapfb: Framebuffer initialized. Total vram %lu planes %d\n",
1768 vram, fbdev->mem_desc.region_cnt);
1769 pr_info("omapfb: Pixclock %lu kHz hfreq %lu.%lu kHz "
1770 "vfreq %lu.%lu Hz\n",
1771 phz / 1000, hhz / 10000, hhz % 10, vhz / 10, vhz % 10);
1772
1773 return 0;
1774
1775cleanup:
1776 omapfb_free_resources(fbdev, init_state);
1777
1778 return r;
1779}
1780
1781static int omapfb_probe(struct platform_device *pdev)
1782{
1783 BUG_ON(fbdev_pdev != NULL);
1784
1785 /* Delay actual initialization until the LCD is registered */
1786 fbdev_pdev = pdev;
1787 if (fbdev_panel != NULL)
1788 omapfb_do_probe(fbdev_pdev, fbdev_panel);
1789 return 0;
1790}
1791
1792void omapfb_register_panel(struct lcd_panel *panel)
1793{
1794 BUG_ON(fbdev_panel != NULL);
1795
1796 fbdev_panel = panel;
1797 if (fbdev_pdev != NULL)
1798 omapfb_do_probe(fbdev_pdev, fbdev_panel);
1799}
1800
1801/* Called when the device is being detached from the driver */
1802static int omapfb_remove(struct platform_device *pdev)
1803{
1804 struct omapfb_device *fbdev = platform_get_drvdata(pdev);
1805 enum omapfb_state saved_state = fbdev->state;
1806
1807 /* FIXME: wait till completion of pending events */
1808
1809 fbdev->state = OMAPFB_DISABLED;
1810 omapfb_free_resources(fbdev, saved_state);
1811
1812 return 0;
1813}
1814
1815/* PM suspend */
1816static int omapfb_suspend(struct platform_device *pdev, pm_message_t mesg)
1817{
1818 struct omapfb_device *fbdev = platform_get_drvdata(pdev);
1819
1820 omapfb_blank(VESA_POWERDOWN, fbdev->fb_info[0]);
1821
1822 return 0;
1823}
1824
1825/* PM resume */
1826static int omapfb_resume(struct platform_device *pdev)
1827{
1828 struct omapfb_device *fbdev = platform_get_drvdata(pdev);
1829
1830 omapfb_blank(VESA_NO_BLANKING, fbdev->fb_info[0]);
1831 return 0;
1832}
1833
1834static struct platform_driver omapfb_driver = {
1835 .probe = omapfb_probe,
1836 .remove = omapfb_remove,
1837 .suspend = omapfb_suspend,
1838 .resume = omapfb_resume,
1839 .driver = {
1840 .name = MODULE_NAME,
1841 .owner = THIS_MODULE,
1842 },
1843};
1844
1845#ifndef MODULE
1846
1847/* Process kernel command line parameters */
1848static int __init omapfb_setup(char *options)
1849{
1850 char *this_opt = NULL;
1851 int r = 0;
1852
1853 pr_debug("omapfb: options %s\n", options);
1854
1855 if (!options || !*options)
1856 return 0;
1857
1858 while (!r && (this_opt = strsep(&options, ",")) != NULL) {
1859 if (!strncmp(this_opt, "accel", 5))
1860 def_accel = 1;
1861 else if (!strncmp(this_opt, "vram:", 5)) {
1862 char *suffix;
1863 unsigned long vram;
1864 vram = (simple_strtoul(this_opt + 5, &suffix, 0));
1865 switch (suffix[0]) {
1866 case '\0':
1867 break;
1868 case 'm':
1869 case 'M':
1870 vram *= 1024;
1871 /* Fall through */
1872 case 'k':
1873 case 'K':
1874 vram *= 1024;
1875 break;
1876 default:
1877 pr_debug("omapfb: invalid vram suffix %c\n",
1878 suffix[0]);
1879 r = -1;
1880 }
1881 def_vram[def_vram_cnt++] = vram;
1882 }
1883 else if (!strncmp(this_opt, "vxres:", 6))
1884 def_vxres = simple_strtoul(this_opt + 6, NULL, 0);
1885 else if (!strncmp(this_opt, "vyres:", 6))
1886 def_vyres = simple_strtoul(this_opt + 6, NULL, 0);
1887 else if (!strncmp(this_opt, "rotate:", 7))
1888 def_rotate = (simple_strtoul(this_opt + 7, NULL, 0));
1889 else if (!strncmp(this_opt, "mirror:", 7))
1890 def_mirror = (simple_strtoul(this_opt + 7, NULL, 0));
1891 else if (!strncmp(this_opt, "manual_update", 13))
1892 manual_update = 1;
1893 else {
1894 pr_debug("omapfb: invalid option\n");
1895 r = -1;
1896 }
1897 }
1898
1899 return r;
1900}
1901
1902#endif
1903
1904/* Register both the driver and the device */
1905static int __init omapfb_init(void)
1906{
1907#ifndef MODULE
1908 char *option;
1909
1910 if (fb_get_options("omapfb", &option))
1911 return -ENODEV;
1912 omapfb_setup(option);
1913#endif
1914 /* Register the driver with LDM */
1915 if (platform_driver_register(&omapfb_driver)) {
1916 pr_debug("failed to register omapfb driver\n");
1917 return -ENODEV;
1918 }
1919
1920 return 0;
1921}
1922
1923static void __exit omapfb_cleanup(void)
1924{
1925 platform_driver_unregister(&omapfb_driver);
1926}
1927
1928module_param_named(accel, def_accel, uint, 0664);
1929module_param_array_named(vram, def_vram, ulong, &def_vram_cnt, 0664);
1930module_param_named(vxres, def_vxres, long, 0664);
1931module_param_named(vyres, def_vyres, long, 0664);
1932module_param_named(rotate, def_rotate, uint, 0664);
1933module_param_named(mirror, def_mirror, uint, 0664);
1934module_param_named(manual_update, manual_update, bool, 0664);
1935
1936module_init(omapfb_init);
1937module_exit(omapfb_cleanup);
1938
1939MODULE_DESCRIPTION("TI OMAP framebuffer driver");
1940MODULE_AUTHOR("Imre Deak <imre.deak@nokia.com>");
1941MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c
new file mode 100644
index 000000000000..2b4269813b22
--- /dev/null
+++ b/drivers/video/omap/rfbi.c
@@ -0,0 +1,588 @@
1/*
2 * OMAP2 Remote Frame Buffer Interface support
3 *
4 * Copyright (C) 2005 Nokia Corporation
5 * Author: Juha Yrjölä <juha.yrjola@nokia.com>
6 * Imre Deak <imre.deak@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22#include <linux/module.h>
23#include <linux/delay.h>
24#include <linux/i2c.h>
25#include <linux/err.h>
26#include <linux/interrupt.h>
27#include <linux/clk.h>
28#include <linux/io.h>
29
30#include <asm/arch/omapfb.h>
31
32#include "dispc.h"
33
34/* To work around an RFBI transfer rate limitation */
35#define OMAP_RFBI_RATE_LIMIT 1
36
37#define RFBI_BASE 0x48050800
38#define RFBI_REVISION 0x0000
39#define RFBI_SYSCONFIG 0x0010
40#define RFBI_SYSSTATUS 0x0014
41#define RFBI_CONTROL 0x0040
42#define RFBI_PIXEL_CNT 0x0044
43#define RFBI_LINE_NUMBER 0x0048
44#define RFBI_CMD 0x004c
45#define RFBI_PARAM 0x0050
46#define RFBI_DATA 0x0054
47#define RFBI_READ 0x0058
48#define RFBI_STATUS 0x005c
49#define RFBI_CONFIG0 0x0060
50#define RFBI_ONOFF_TIME0 0x0064
51#define RFBI_CYCLE_TIME0 0x0068
52#define RFBI_DATA_CYCLE1_0 0x006c
53#define RFBI_DATA_CYCLE2_0 0x0070
54#define RFBI_DATA_CYCLE3_0 0x0074
55#define RFBI_VSYNC_WIDTH 0x0090
56#define RFBI_HSYNC_WIDTH 0x0094
57
58#define DISPC_BASE 0x48050400
59#define DISPC_CONTROL 0x0040
60
61static struct {
62 u32 base;
63 void (*lcdc_callback)(void *data);
64 void *lcdc_callback_data;
65 unsigned long l4_khz;
66 int bits_per_cycle;
67 struct omapfb_device *fbdev;
68 struct clk *dss_ick;
69 struct clk *dss1_fck;
70 unsigned tearsync_pin_cnt;
71 unsigned tearsync_mode;
72} rfbi;
73
74static inline void rfbi_write_reg(int idx, u32 val)
75{
76 __raw_writel(val, rfbi.base + idx);
77}
78
79static inline u32 rfbi_read_reg(int idx)
80{
81 return __raw_readl(rfbi.base + idx);
82}
83
84static int rfbi_get_clocks(void)
85{
86 if (IS_ERR((rfbi.dss_ick = clk_get(rfbi.fbdev->dev, "dss_ick")))) {
87 dev_err(rfbi.fbdev->dev, "can't get dss_ick");
88 return PTR_ERR(rfbi.dss_ick);
89 }
90
91 if (IS_ERR((rfbi.dss1_fck = clk_get(rfbi.fbdev->dev, "dss1_fck")))) {
92 dev_err(rfbi.fbdev->dev, "can't get dss1_fck");
93 clk_put(rfbi.dss_ick);
94 return PTR_ERR(rfbi.dss1_fck);
95 }
96
97 return 0;
98}
99
100static void rfbi_put_clocks(void)
101{
102 clk_put(rfbi.dss1_fck);
103 clk_put(rfbi.dss_ick);
104}
105
106static void rfbi_enable_clocks(int enable)
107{
108 if (enable) {
109 clk_enable(rfbi.dss_ick);
110 clk_enable(rfbi.dss1_fck);
111 } else {
112 clk_disable(rfbi.dss1_fck);
113 clk_disable(rfbi.dss_ick);
114 }
115}
116
117
118#ifdef VERBOSE
119static void rfbi_print_timings(void)
120{
121 u32 l;
122 u32 time;
123
124 l = rfbi_read_reg(RFBI_CONFIG0);
125 time = 1000000000 / rfbi.l4_khz;
126 if (l & (1 << 4))
127 time *= 2;
128
129 dev_dbg(rfbi.fbdev->dev, "Tick time %u ps\n", time);
130 l = rfbi_read_reg(RFBI_ONOFF_TIME0);
131 dev_dbg(rfbi.fbdev->dev,
132 "CSONTIME %d, CSOFFTIME %d, WEONTIME %d, WEOFFTIME %d, "
133 "REONTIME %d, REOFFTIME %d\n",
134 l & 0x0f, (l >> 4) & 0x3f, (l >> 10) & 0x0f, (l >> 14) & 0x3f,
135 (l >> 20) & 0x0f, (l >> 24) & 0x3f);
136
137 l = rfbi_read_reg(RFBI_CYCLE_TIME0);
138 dev_dbg(rfbi.fbdev->dev,
139 "WECYCLETIME %d, RECYCLETIME %d, CSPULSEWIDTH %d, "
140 "ACCESSTIME %d\n",
141 (l & 0x3f), (l >> 6) & 0x3f, (l >> 12) & 0x3f,
142 (l >> 22) & 0x3f);
143}
144#else
145static void rfbi_print_timings(void) {}
146#endif
147
148static void rfbi_set_timings(const struct extif_timings *t)
149{
150 u32 l;
151
152 BUG_ON(!t->converted);
153
154 rfbi_enable_clocks(1);
155 rfbi_write_reg(RFBI_ONOFF_TIME0, t->tim[0]);
156 rfbi_write_reg(RFBI_CYCLE_TIME0, t->tim[1]);
157
158 l = rfbi_read_reg(RFBI_CONFIG0);
159 l &= ~(1 << 4);
160 l |= (t->tim[2] ? 1 : 0) << 4;
161 rfbi_write_reg(RFBI_CONFIG0, l);
162
163 rfbi_print_timings();
164 rfbi_enable_clocks(0);
165}
166
167static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div)
168{
169 *clk_period = 1000000000 / rfbi.l4_khz;
170 *max_clk_div = 2;
171}
172
173static int ps_to_rfbi_ticks(int time, int div)
174{
175 unsigned long tick_ps;
176 int ret;
177
178 /* Calculate in picosecs to yield more exact results */
179 tick_ps = 1000000000 / (rfbi.l4_khz) * div;
180
181 ret = (time + tick_ps - 1) / tick_ps;
182
183 return ret;
184}
185
186#ifdef OMAP_RFBI_RATE_LIMIT
187static unsigned long rfbi_get_max_tx_rate(void)
188{
189 unsigned long l4_rate, dss1_rate;
190 int min_l4_ticks = 0;
191 int i;
192
193 /* According to TI this can't be calculated so make the
194 * adjustments for a couple of known frequencies and warn for
195 * others.
196 */
197 static const struct {
198 unsigned long l4_clk; /* HZ */
199 unsigned long dss1_clk; /* HZ */
200 unsigned long min_l4_ticks;
201 } ftab[] = {
202 { 55, 132, 7, }, /* 7.86 MPix/s */
203 { 110, 110, 12, }, /* 9.16 MPix/s */
204 { 110, 132, 10, }, /* 11 Mpix/s */
205 { 120, 120, 10, }, /* 12 Mpix/s */
206 { 133, 133, 10, }, /* 13.3 Mpix/s */
207 };
208
209 l4_rate = rfbi.l4_khz / 1000;
210 dss1_rate = clk_get_rate(rfbi.dss1_fck) / 1000000;
211
212 for (i = 0; i < ARRAY_SIZE(ftab); i++) {
213 /* Use a window instead of an exact match, to account
214 * for different DPLL multiplier / divider pairs.
215 */
216 if (abs(ftab[i].l4_clk - l4_rate) < 3 &&
217 abs(ftab[i].dss1_clk - dss1_rate) < 3) {
218 min_l4_ticks = ftab[i].min_l4_ticks;
219 break;
220 }
221 }
222 if (i == ARRAY_SIZE(ftab)) {
223 /* Can't be sure, return anyway the maximum not
224 * rate-limited. This might cause a problem only for the
225 * tearing synchronisation.
226 */
227 dev_err(rfbi.fbdev->dev,
228 "can't determine maximum RFBI transfer rate\n");
229 return rfbi.l4_khz * 1000;
230 }
231 return rfbi.l4_khz * 1000 / min_l4_ticks;
232}
233#else
234static int rfbi_get_max_tx_rate(void)
235{
236 return rfbi.l4_khz * 1000;
237}
238#endif
239
240
241static int rfbi_convert_timings(struct extif_timings *t)
242{
243 u32 l;
244 int reon, reoff, weon, weoff, cson, csoff, cs_pulse;
245 int actim, recyc, wecyc;
246 int div = t->clk_div;
247
248 if (div <= 0 || div > 2)
249 return -1;
250
251 /* Make sure that after conversion it still holds that:
252 * weoff > weon, reoff > reon, recyc >= reoff, wecyc >= weoff,
253 * csoff > cson, csoff >= max(weoff, reoff), actim > reon
254 */
255 weon = ps_to_rfbi_ticks(t->we_on_time, div);
256 weoff = ps_to_rfbi_ticks(t->we_off_time, div);
257 if (weoff <= weon)
258 weoff = weon + 1;
259 if (weon > 0x0f)
260 return -1;
261 if (weoff > 0x3f)
262 return -1;
263
264 reon = ps_to_rfbi_ticks(t->re_on_time, div);
265 reoff = ps_to_rfbi_ticks(t->re_off_time, div);
266 if (reoff <= reon)
267 reoff = reon + 1;
268 if (reon > 0x0f)
269 return -1;
270 if (reoff > 0x3f)
271 return -1;
272
273 cson = ps_to_rfbi_ticks(t->cs_on_time, div);
274 csoff = ps_to_rfbi_ticks(t->cs_off_time, div);
275 if (csoff <= cson)
276 csoff = cson + 1;
277 if (csoff < max(weoff, reoff))
278 csoff = max(weoff, reoff);
279 if (cson > 0x0f)
280 return -1;
281 if (csoff > 0x3f)
282 return -1;
283
284 l = cson;
285 l |= csoff << 4;
286 l |= weon << 10;
287 l |= weoff << 14;
288 l |= reon << 20;
289 l |= reoff << 24;
290
291 t->tim[0] = l;
292
293 actim = ps_to_rfbi_ticks(t->access_time, div);
294 if (actim <= reon)
295 actim = reon + 1;
296 if (actim > 0x3f)
297 return -1;
298
299 wecyc = ps_to_rfbi_ticks(t->we_cycle_time, div);
300 if (wecyc < weoff)
301 wecyc = weoff;
302 if (wecyc > 0x3f)
303 return -1;
304
305 recyc = ps_to_rfbi_ticks(t->re_cycle_time, div);
306 if (recyc < reoff)
307 recyc = reoff;
308 if (recyc > 0x3f)
309 return -1;
310
311 cs_pulse = ps_to_rfbi_ticks(t->cs_pulse_width, div);
312 if (cs_pulse > 0x3f)
313 return -1;
314
315 l = wecyc;
316 l |= recyc << 6;
317 l |= cs_pulse << 12;
318 l |= actim << 22;
319
320 t->tim[1] = l;
321
322 t->tim[2] = div - 1;
323
324 t->converted = 1;
325
326 return 0;
327}
328
329static int rfbi_setup_tearsync(unsigned pin_cnt,
330 unsigned hs_pulse_time, unsigned vs_pulse_time,
331 int hs_pol_inv, int vs_pol_inv, int extif_div)
332{
333 int hs, vs;
334 int min;
335 u32 l;
336
337 if (pin_cnt != 1 && pin_cnt != 2)
338 return -EINVAL;
339
340 hs = ps_to_rfbi_ticks(hs_pulse_time, 1);
341 vs = ps_to_rfbi_ticks(vs_pulse_time, 1);
342 if (hs < 2)
343 return -EDOM;
344 if (pin_cnt == 2)
345 min = 2;
346 else
347 min = 4;
348 if (vs < min)
349 return -EDOM;
350 if (vs == hs)
351 return -EINVAL;
352 rfbi.tearsync_pin_cnt = pin_cnt;
353 dev_dbg(rfbi.fbdev->dev,
354 "setup_tearsync: pins %d hs %d vs %d hs_inv %d vs_inv %d\n",
355 pin_cnt, hs, vs, hs_pol_inv, vs_pol_inv);
356
357 rfbi_enable_clocks(1);
358 rfbi_write_reg(RFBI_HSYNC_WIDTH, hs);
359 rfbi_write_reg(RFBI_VSYNC_WIDTH, vs);
360
361 l = rfbi_read_reg(RFBI_CONFIG0);
362 if (hs_pol_inv)
363 l &= ~(1 << 21);
364 else
365 l |= 1 << 21;
366 if (vs_pol_inv)
367 l &= ~(1 << 20);
368 else
369 l |= 1 << 20;
370 rfbi_enable_clocks(0);
371
372 return 0;
373}
374
375static int rfbi_enable_tearsync(int enable, unsigned line)
376{
377 u32 l;
378
379 dev_dbg(rfbi.fbdev->dev, "tearsync %d line %d mode %d\n",
380 enable, line, rfbi.tearsync_mode);
381 if (line > (1 << 11) - 1)
382 return -EINVAL;
383
384 rfbi_enable_clocks(1);
385 l = rfbi_read_reg(RFBI_CONFIG0);
386 l &= ~(0x3 << 2);
387 if (enable) {
388 rfbi.tearsync_mode = rfbi.tearsync_pin_cnt;
389 l |= rfbi.tearsync_mode << 2;
390 } else
391 rfbi.tearsync_mode = 0;
392 rfbi_write_reg(RFBI_CONFIG0, l);
393 rfbi_write_reg(RFBI_LINE_NUMBER, line);
394 rfbi_enable_clocks(0);
395
396 return 0;
397}
398
399static void rfbi_write_command(const void *buf, unsigned int len)
400{
401 rfbi_enable_clocks(1);
402 if (rfbi.bits_per_cycle == 16) {
403 const u16 *w = buf;
404 BUG_ON(len & 1);
405 for (; len; len -= 2)
406 rfbi_write_reg(RFBI_CMD, *w++);
407 } else {
408 const u8 *b = buf;
409 BUG_ON(rfbi.bits_per_cycle != 8);
410 for (; len; len--)
411 rfbi_write_reg(RFBI_CMD, *b++);
412 }
413 rfbi_enable_clocks(0);
414}
415
416static void rfbi_read_data(void *buf, unsigned int len)
417{
418 rfbi_enable_clocks(1);
419 if (rfbi.bits_per_cycle == 16) {
420 u16 *w = buf;
421 BUG_ON(len & ~1);
422 for (; len; len -= 2) {
423 rfbi_write_reg(RFBI_READ, 0);
424 *w++ = rfbi_read_reg(RFBI_READ);
425 }
426 } else {
427 u8 *b = buf;
428 BUG_ON(rfbi.bits_per_cycle != 8);
429 for (; len; len--) {
430 rfbi_write_reg(RFBI_READ, 0);
431 *b++ = rfbi_read_reg(RFBI_READ);
432 }
433 }
434 rfbi_enable_clocks(0);
435}
436
437static void rfbi_write_data(const void *buf, unsigned int len)
438{
439 rfbi_enable_clocks(1);
440 if (rfbi.bits_per_cycle == 16) {
441 const u16 *w = buf;
442 BUG_ON(len & 1);
443 for (; len; len -= 2)
444 rfbi_write_reg(RFBI_PARAM, *w++);
445 } else {
446 const u8 *b = buf;
447 BUG_ON(rfbi.bits_per_cycle != 8);
448 for (; len; len--)
449 rfbi_write_reg(RFBI_PARAM, *b++);
450 }
451 rfbi_enable_clocks(0);
452}
453
454static void rfbi_transfer_area(int width, int height,
455 void (callback)(void * data), void *data)
456{
457 u32 w;
458
459 BUG_ON(callback == NULL);
460
461 rfbi_enable_clocks(1);
462 omap_dispc_set_lcd_size(width, height);
463
464 rfbi.lcdc_callback = callback;
465 rfbi.lcdc_callback_data = data;
466
467 rfbi_write_reg(RFBI_PIXEL_CNT, width * height);
468
469 w = rfbi_read_reg(RFBI_CONTROL);
470 w |= 1; /* enable */
471 if (!rfbi.tearsync_mode)
472 w |= 1 << 4; /* internal trigger, reset by HW */
473 rfbi_write_reg(RFBI_CONTROL, w);
474
475 omap_dispc_enable_lcd_out(1);
476}
477
478static inline void _stop_transfer(void)
479{
480 u32 w;
481
482 w = rfbi_read_reg(RFBI_CONTROL);
483 rfbi_write_reg(RFBI_CONTROL, w & ~(1 << 0));
484 rfbi_enable_clocks(0);
485}
486
487static void rfbi_dma_callback(void *data)
488{
489 _stop_transfer();
490 rfbi.lcdc_callback(rfbi.lcdc_callback_data);
491}
492
493static void rfbi_set_bits_per_cycle(int bpc)
494{
495 u32 l;
496
497 rfbi_enable_clocks(1);
498 l = rfbi_read_reg(RFBI_CONFIG0);
499 l &= ~(0x03 << 0);
500
501 switch (bpc) {
502 case 8:
503 break;
504 case 16:
505 l |= 3;
506 break;
507 default:
508 BUG();
509 }
510 rfbi_write_reg(RFBI_CONFIG0, l);
511 rfbi.bits_per_cycle = bpc;
512 rfbi_enable_clocks(0);
513}
514
515static int rfbi_init(struct omapfb_device *fbdev)
516{
517 u32 l;
518 int r;
519
520 rfbi.fbdev = fbdev;
521 rfbi.base = io_p2v(RFBI_BASE);
522
523 if ((r = rfbi_get_clocks()) < 0)
524 return r;
525 rfbi_enable_clocks(1);
526
527 rfbi.l4_khz = clk_get_rate(rfbi.dss_ick) / 1000;
528
529 /* Reset */
530 rfbi_write_reg(RFBI_SYSCONFIG, 1 << 1);
531 while (!(rfbi_read_reg(RFBI_SYSSTATUS) & (1 << 0)));
532
533 l = rfbi_read_reg(RFBI_SYSCONFIG);
534 /* Enable autoidle and smart-idle */
535 l |= (1 << 0) | (2 << 3);
536 rfbi_write_reg(RFBI_SYSCONFIG, l);
537
538 /* 16-bit interface, ITE trigger mode, 16-bit data */
539 l = (0x03 << 0) | (0x00 << 2) | (0x01 << 5) | (0x02 << 7);
540 l |= (0 << 9) | (1 << 20) | (1 << 21);
541 rfbi_write_reg(RFBI_CONFIG0, l);
542
543 rfbi_write_reg(RFBI_DATA_CYCLE1_0, 0x00000010);
544
545 l = rfbi_read_reg(RFBI_CONTROL);
546 /* Select CS0, clear bypass mode */
547 l = (0x01 << 2);
548 rfbi_write_reg(RFBI_CONTROL, l);
549
550 if ((r = omap_dispc_request_irq(rfbi_dma_callback, NULL)) < 0) {
551 dev_err(fbdev->dev, "can't get DISPC irq\n");
552 rfbi_enable_clocks(0);
553 return r;
554 }
555
556 l = rfbi_read_reg(RFBI_REVISION);
557 pr_info("omapfb: RFBI version %d.%d initialized\n",
558 (l >> 4) & 0x0f, l & 0x0f);
559
560 rfbi_enable_clocks(0);
561
562 return 0;
563}
564
565static void rfbi_cleanup(void)
566{
567 omap_dispc_free_irq();
568 rfbi_put_clocks();
569}
570
571const struct lcd_ctrl_extif omap2_ext_if = {
572 .init = rfbi_init,
573 .cleanup = rfbi_cleanup,
574 .get_clk_info = rfbi_get_clk_info,
575 .get_max_tx_rate = rfbi_get_max_tx_rate,
576 .set_bits_per_cycle = rfbi_set_bits_per_cycle,
577 .convert_timings = rfbi_convert_timings,
578 .set_timings = rfbi_set_timings,
579 .write_command = rfbi_write_command,
580 .read_data = rfbi_read_data,
581 .write_data = rfbi_write_data,
582 .transfer_area = rfbi_transfer_area,
583 .setup_tearsync = rfbi_setup_tearsync,
584 .enable_tearsync = rfbi_enable_tearsync,
585
586 .max_transmit_size = (u32) ~0,
587};
588
diff --git a/drivers/video/omap/sossi.c b/drivers/video/omap/sossi.c
new file mode 100644
index 000000000000..81dbcf53cf0e
--- /dev/null
+++ b/drivers/video/omap/sossi.c
@@ -0,0 +1,686 @@
1/*
2 * OMAP1 Special OptimiSed Screen Interface support
3 *
4 * Copyright (C) 2004-2005 Nokia Corporation
5 * Author: Juha Yrjölä <juha.yrjola@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21#include <linux/module.h>
22#include <linux/mm.h>
23#include <linux/clk.h>
24#include <linux/irq.h>
25#include <linux/io.h>
26
27#include <asm/arch/dma.h>
28#include <asm/arch/omapfb.h>
29
30#include "lcdc.h"
31
32#define MODULE_NAME "omapfb-sossi"
33
34#define OMAP_SOSSI_BASE 0xfffbac00
35#define SOSSI_ID_REG 0x00
36#define SOSSI_INIT1_REG 0x04
37#define SOSSI_INIT2_REG 0x08
38#define SOSSI_INIT3_REG 0x0c
39#define SOSSI_FIFO_REG 0x10
40#define SOSSI_REOTABLE_REG 0x14
41#define SOSSI_TEARING_REG 0x18
42#define SOSSI_INIT1B_REG 0x1c
43#define SOSSI_FIFOB_REG 0x20
44
45#define DMA_GSCR 0xfffedc04
46#define DMA_LCD_CCR 0xfffee3c2
47#define DMA_LCD_CTRL 0xfffee3c4
48#define DMA_LCD_LCH_CTRL 0xfffee3ea
49
50#define CONF_SOSSI_RESET_R (1 << 23)
51
52#define RD_ACCESS 0
53#define WR_ACCESS 1
54
55#define SOSSI_MAX_XMIT_BYTES (512 * 1024)
56
57static struct {
58 void __iomem *base;
59 struct clk *fck;
60 unsigned long fck_hz;
61 spinlock_t lock;
62 int bus_pick_count;
63 int bus_pick_width;
64 int tearsync_mode;
65 int tearsync_line;
66 void (*lcdc_callback)(void *data);
67 void *lcdc_callback_data;
68 int vsync_dma_pending;
69 /* timing for read and write access */
70 int clk_div;
71 u8 clk_tw0[2];
72 u8 clk_tw1[2];
73 /*
74 * if last_access is the same as current we don't have to change
75 * the timings
76 */
77 int last_access;
78
79 struct omapfb_device *fbdev;
80} sossi;
81
82static inline u32 sossi_read_reg(int reg)
83{
84 return readl(sossi.base + reg);
85}
86
87static inline u16 sossi_read_reg16(int reg)
88{
89 return readw(sossi.base + reg);
90}
91
92static inline u8 sossi_read_reg8(int reg)
93{
94 return readb(sossi.base + reg);
95}
96
97static inline void sossi_write_reg(int reg, u32 value)
98{
99 writel(value, sossi.base + reg);
100}
101
102static inline void sossi_write_reg16(int reg, u16 value)
103{
104 writew(value, sossi.base + reg);
105}
106
107static inline void sossi_write_reg8(int reg, u8 value)
108{
109 writeb(value, sossi.base + reg);
110}
111
112static void sossi_set_bits(int reg, u32 bits)
113{
114 sossi_write_reg(reg, sossi_read_reg(reg) | bits);
115}
116
117static void sossi_clear_bits(int reg, u32 bits)
118{
119 sossi_write_reg(reg, sossi_read_reg(reg) & ~bits);
120}
121
122#define HZ_TO_PS(x) (1000000000 / (x / 1000))
123
124static u32 ps_to_sossi_ticks(u32 ps, int div)
125{
126 u32 clk_period = HZ_TO_PS(sossi.fck_hz) * div;
127 return (clk_period + ps - 1) / clk_period;
128}
129
130static int calc_rd_timings(struct extif_timings *t)
131{
132 u32 tw0, tw1;
133 int reon, reoff, recyc, actim;
134 int div = t->clk_div;
135
136 /*
137 * Make sure that after conversion it still holds that:
138 * reoff > reon, recyc >= reoff, actim > reon
139 */
140 reon = ps_to_sossi_ticks(t->re_on_time, div);
141 /* reon will be exactly one sossi tick */
142 if (reon > 1)
143 return -1;
144
145 reoff = ps_to_sossi_ticks(t->re_off_time, div);
146
147 if (reoff <= reon)
148 reoff = reon + 1;
149
150 tw0 = reoff - reon;
151 if (tw0 > 0x10)
152 return -1;
153
154 recyc = ps_to_sossi_ticks(t->re_cycle_time, div);
155 if (recyc <= reoff)
156 recyc = reoff + 1;
157
158 tw1 = recyc - tw0;
159 /* values less then 3 result in the SOSSI block resetting itself */
160 if (tw1 < 3)
161 tw1 = 3;
162 if (tw1 > 0x40)
163 return -1;
164
165 actim = ps_to_sossi_ticks(t->access_time, div);
166 if (actim < reoff)
167 actim++;
168 /*
169 * access time (data hold time) will be exactly one sossi
170 * tick
171 */
172 if (actim - reoff > 1)
173 return -1;
174
175 t->tim[0] = tw0 - 1;
176 t->tim[1] = tw1 - 1;
177
178 return 0;
179}
180
181static int calc_wr_timings(struct extif_timings *t)
182{
183 u32 tw0, tw1;
184 int weon, weoff, wecyc;
185 int div = t->clk_div;
186
187 /*
188 * Make sure that after conversion it still holds that:
189 * weoff > weon, wecyc >= weoff
190 */
191 weon = ps_to_sossi_ticks(t->we_on_time, div);
192 /* weon will be exactly one sossi tick */
193 if (weon > 1)
194 return -1;
195
196 weoff = ps_to_sossi_ticks(t->we_off_time, div);
197 if (weoff <= weon)
198 weoff = weon + 1;
199 tw0 = weoff - weon;
200 if (tw0 > 0x10)
201 return -1;
202
203 wecyc = ps_to_sossi_ticks(t->we_cycle_time, div);
204 if (wecyc <= weoff)
205 wecyc = weoff + 1;
206
207 tw1 = wecyc - tw0;
208 /* values less then 3 result in the SOSSI block resetting itself */
209 if (tw1 < 3)
210 tw1 = 3;
211 if (tw1 > 0x40)
212 return -1;
213
214 t->tim[2] = tw0 - 1;
215 t->tim[3] = tw1 - 1;
216
217 return 0;
218}
219
220static void _set_timing(int div, int tw0, int tw1)
221{
222 u32 l;
223
224#ifdef VERBOSE
225 dev_dbg(sossi.fbdev->dev, "Using TW0 = %d, TW1 = %d, div = %d\n",
226 tw0 + 1, tw1 + 1, div);
227#endif
228
229 clk_set_rate(sossi.fck, sossi.fck_hz / div);
230 clk_enable(sossi.fck);
231 l = sossi_read_reg(SOSSI_INIT1_REG);
232 l &= ~((0x0f << 20) | (0x3f << 24));
233 l |= (tw0 << 20) | (tw1 << 24);
234 sossi_write_reg(SOSSI_INIT1_REG, l);
235 clk_disable(sossi.fck);
236}
237
238static void _set_bits_per_cycle(int bus_pick_count, int bus_pick_width)
239{
240 u32 l;
241
242 l = sossi_read_reg(SOSSI_INIT3_REG);
243 l &= ~0x3ff;
244 l |= ((bus_pick_count - 1) << 5) | ((bus_pick_width - 1) & 0x1f);
245 sossi_write_reg(SOSSI_INIT3_REG, l);
246}
247
248static void _set_tearsync_mode(int mode, unsigned line)
249{
250 u32 l;
251
252 l = sossi_read_reg(SOSSI_TEARING_REG);
253 l &= ~(((1 << 11) - 1) << 15);
254 l |= line << 15;
255 l &= ~(0x3 << 26);
256 l |= mode << 26;
257 sossi_write_reg(SOSSI_TEARING_REG, l);
258 if (mode)
259 sossi_set_bits(SOSSI_INIT2_REG, 1 << 6); /* TE logic */
260 else
261 sossi_clear_bits(SOSSI_INIT2_REG, 1 << 6);
262}
263
264static inline void set_timing(int access)
265{
266 if (access != sossi.last_access) {
267 sossi.last_access = access;
268 _set_timing(sossi.clk_div,
269 sossi.clk_tw0[access], sossi.clk_tw1[access]);
270 }
271}
272
273static void sossi_start_transfer(void)
274{
275 /* WE */
276 sossi_clear_bits(SOSSI_INIT2_REG, 1 << 4);
277 /* CS active low */
278 sossi_clear_bits(SOSSI_INIT1_REG, 1 << 30);
279}
280
281static void sossi_stop_transfer(void)
282{
283 /* WE */
284 sossi_set_bits(SOSSI_INIT2_REG, 1 << 4);
285 /* CS active low */
286 sossi_set_bits(SOSSI_INIT1_REG, 1 << 30);
287}
288
289static void wait_end_of_write(void)
290{
291 /* Before reading we must check if some writings are going on */
292 while (!(sossi_read_reg(SOSSI_INIT2_REG) & (1 << 3)));
293}
294
295static void send_data(const void *data, unsigned int len)
296{
297 while (len >= 4) {
298 sossi_write_reg(SOSSI_FIFO_REG, *(const u32 *) data);
299 len -= 4;
300 data += 4;
301 }
302 while (len >= 2) {
303 sossi_write_reg16(SOSSI_FIFO_REG, *(const u16 *) data);
304 len -= 2;
305 data += 2;
306 }
307 while (len) {
308 sossi_write_reg8(SOSSI_FIFO_REG, *(const u8 *) data);
309 len--;
310 data++;
311 }
312}
313
314static void set_cycles(unsigned int len)
315{
316 unsigned long nr_cycles = len / (sossi.bus_pick_width / 8);
317
318 BUG_ON((nr_cycles - 1) & ~0x3ffff);
319
320 sossi_clear_bits(SOSSI_INIT1_REG, 0x3ffff);
321 sossi_set_bits(SOSSI_INIT1_REG, (nr_cycles - 1) & 0x3ffff);
322}
323
324static int sossi_convert_timings(struct extif_timings *t)
325{
326 int r = 0;
327 int div = t->clk_div;
328
329 t->converted = 0;
330
331 if (div <= 0 || div > 8)
332 return -1;
333
334 /* no CS on SOSSI, so ignore cson, csoff, cs_pulsewidth */
335 if ((r = calc_rd_timings(t)) < 0)
336 return r;
337
338 if ((r = calc_wr_timings(t)) < 0)
339 return r;
340
341 t->tim[4] = div;
342
343 t->converted = 1;
344
345 return 0;
346}
347
348static void sossi_set_timings(const struct extif_timings *t)
349{
350 BUG_ON(!t->converted);
351
352 sossi.clk_tw0[RD_ACCESS] = t->tim[0];
353 sossi.clk_tw1[RD_ACCESS] = t->tim[1];
354
355 sossi.clk_tw0[WR_ACCESS] = t->tim[2];
356 sossi.clk_tw1[WR_ACCESS] = t->tim[3];
357
358 sossi.clk_div = t->tim[4];
359}
360
361static void sossi_get_clk_info(u32 *clk_period, u32 *max_clk_div)
362{
363 *clk_period = HZ_TO_PS(sossi.fck_hz);
364 *max_clk_div = 8;
365}
366
367static void sossi_set_bits_per_cycle(int bpc)
368{
369 int bus_pick_count, bus_pick_width;
370
371 /*
372 * We set explicitly the the bus_pick_count as well, although
373 * with remapping/reordering disabled it will be calculated by HW
374 * as (32 / bus_pick_width).
375 */
376 switch (bpc) {
377 case 8:
378 bus_pick_count = 4;
379 bus_pick_width = 8;
380 break;
381 case 16:
382 bus_pick_count = 2;
383 bus_pick_width = 16;
384 break;
385 default:
386 BUG();
387 return;
388 }
389 sossi.bus_pick_width = bus_pick_width;
390 sossi.bus_pick_count = bus_pick_count;
391}
392
393static int sossi_setup_tearsync(unsigned pin_cnt,
394 unsigned hs_pulse_time, unsigned vs_pulse_time,
395 int hs_pol_inv, int vs_pol_inv, int div)
396{
397 int hs, vs;
398 u32 l;
399
400 if (pin_cnt != 1 || div < 1 || div > 8)
401 return -EINVAL;
402
403 hs = ps_to_sossi_ticks(hs_pulse_time, div);
404 vs = ps_to_sossi_ticks(vs_pulse_time, div);
405 if (vs < 8 || vs <= hs || vs >= (1 << 12))
406 return -EDOM;
407 vs /= 8;
408 vs--;
409 if (hs > 8)
410 hs = 8;
411 if (hs)
412 hs--;
413
414 dev_dbg(sossi.fbdev->dev,
415 "setup_tearsync: hs %d vs %d hs_inv %d vs_inv %d\n",
416 hs, vs, hs_pol_inv, vs_pol_inv);
417
418 clk_enable(sossi.fck);
419 l = sossi_read_reg(SOSSI_TEARING_REG);
420 l &= ~((1 << 15) - 1);
421 l |= vs << 3;
422 l |= hs;
423 if (hs_pol_inv)
424 l |= 1 << 29;
425 else
426 l &= ~(1 << 29);
427 if (vs_pol_inv)
428 l |= 1 << 28;
429 else
430 l &= ~(1 << 28);
431 sossi_write_reg(SOSSI_TEARING_REG, l);
432 clk_disable(sossi.fck);
433
434 return 0;
435}
436
437static int sossi_enable_tearsync(int enable, unsigned line)
438{
439 int mode;
440
441 dev_dbg(sossi.fbdev->dev, "tearsync %d line %d\n", enable, line);
442 if (line >= 1 << 11)
443 return -EINVAL;
444 if (enable) {
445 if (line)
446 mode = 2; /* HS or VS */
447 else
448 mode = 3; /* VS only */
449 } else
450 mode = 0;
451 sossi.tearsync_line = line;
452 sossi.tearsync_mode = mode;
453
454 return 0;
455}
456
457static void sossi_write_command(const void *data, unsigned int len)
458{
459 clk_enable(sossi.fck);
460 set_timing(WR_ACCESS);
461 _set_bits_per_cycle(sossi.bus_pick_count, sossi.bus_pick_width);
462 /* CMD#/DATA */
463 sossi_clear_bits(SOSSI_INIT1_REG, 1 << 18);
464 set_cycles(len);
465 sossi_start_transfer();
466 send_data(data, len);
467 sossi_stop_transfer();
468 wait_end_of_write();
469 clk_disable(sossi.fck);
470}
471
472static void sossi_write_data(const void *data, unsigned int len)
473{
474 clk_enable(sossi.fck);
475 set_timing(WR_ACCESS);
476 _set_bits_per_cycle(sossi.bus_pick_count, sossi.bus_pick_width);
477 /* CMD#/DATA */
478 sossi_set_bits(SOSSI_INIT1_REG, 1 << 18);
479 set_cycles(len);
480 sossi_start_transfer();
481 send_data(data, len);
482 sossi_stop_transfer();
483 wait_end_of_write();
484 clk_disable(sossi.fck);
485}
486
487static void sossi_transfer_area(int width, int height,
488 void (callback)(void *data), void *data)
489{
490 BUG_ON(callback == NULL);
491
492 sossi.lcdc_callback = callback;
493 sossi.lcdc_callback_data = data;
494
495 clk_enable(sossi.fck);
496 set_timing(WR_ACCESS);
497 _set_bits_per_cycle(sossi.bus_pick_count, sossi.bus_pick_width);
498 _set_tearsync_mode(sossi.tearsync_mode, sossi.tearsync_line);
499 /* CMD#/DATA */
500 sossi_set_bits(SOSSI_INIT1_REG, 1 << 18);
501 set_cycles(width * height * sossi.bus_pick_width / 8);
502
503 sossi_start_transfer();
504 if (sossi.tearsync_mode) {
505 /*
506 * Wait for the sync signal and start the transfer only
507 * then. We can't seem to be able to use HW sync DMA for
508 * this since LCD DMA shows huge latencies, as if it
509 * would ignore some of the DMA requests from SoSSI.
510 */
511 unsigned long flags;
512
513 spin_lock_irqsave(&sossi.lock, flags);
514 sossi.vsync_dma_pending++;
515 spin_unlock_irqrestore(&sossi.lock, flags);
516 } else
517 /* Just start the transfer right away. */
518 omap_enable_lcd_dma();
519}
520
521static void sossi_dma_callback(void *data)
522{
523 omap_stop_lcd_dma();
524 sossi_stop_transfer();
525 clk_disable(sossi.fck);
526 sossi.lcdc_callback(sossi.lcdc_callback_data);
527}
528
529static void sossi_read_data(void *data, unsigned int len)
530{
531 clk_enable(sossi.fck);
532 set_timing(RD_ACCESS);
533 _set_bits_per_cycle(sossi.bus_pick_count, sossi.bus_pick_width);
534 /* CMD#/DATA */
535 sossi_set_bits(SOSSI_INIT1_REG, 1 << 18);
536 set_cycles(len);
537 sossi_start_transfer();
538 while (len >= 4) {
539 *(u32 *) data = sossi_read_reg(SOSSI_FIFO_REG);
540 len -= 4;
541 data += 4;
542 }
543 while (len >= 2) {
544 *(u16 *) data = sossi_read_reg16(SOSSI_FIFO_REG);
545 len -= 2;
546 data += 2;
547 }
548 while (len) {
549 *(u8 *) data = sossi_read_reg8(SOSSI_FIFO_REG);
550 len--;
551 data++;
552 }
553 sossi_stop_transfer();
554 clk_disable(sossi.fck);
555}
556
557static irqreturn_t sossi_match_irq(int irq, void *data)
558{
559 unsigned long flags;
560
561 spin_lock_irqsave(&sossi.lock, flags);
562 if (sossi.vsync_dma_pending) {
563 sossi.vsync_dma_pending--;
564 omap_enable_lcd_dma();
565 }
566 spin_unlock_irqrestore(&sossi.lock, flags);
567 return IRQ_HANDLED;
568}
569
570static int sossi_init(struct omapfb_device *fbdev)
571{
572 u32 l, k;
573 struct clk *fck;
574 struct clk *dpll1out_ck;
575 int r;
576
577 sossi.base = (void __iomem *)IO_ADDRESS(OMAP_SOSSI_BASE);
578 sossi.fbdev = fbdev;
579 spin_lock_init(&sossi.lock);
580
581 dpll1out_ck = clk_get(fbdev->dev, "ck_dpll1out");
582 if (IS_ERR(dpll1out_ck)) {
583 dev_err(fbdev->dev, "can't get DPLL1OUT clock\n");
584 return PTR_ERR(dpll1out_ck);
585 }
586 /*
587 * We need the parent clock rate, which we might divide further
588 * depending on the timing requirements of the controller. See
589 * _set_timings.
590 */
591 sossi.fck_hz = clk_get_rate(dpll1out_ck);
592 clk_put(dpll1out_ck);
593
594 fck = clk_get(fbdev->dev, "ck_sossi");
595 if (IS_ERR(fck)) {
596 dev_err(fbdev->dev, "can't get SoSSI functional clock\n");
597 return PTR_ERR(fck);
598 }
599 sossi.fck = fck;
600
601 /* Reset and enable the SoSSI module */
602 l = omap_readl(MOD_CONF_CTRL_1);
603 l |= CONF_SOSSI_RESET_R;
604 omap_writel(l, MOD_CONF_CTRL_1);
605 l &= ~CONF_SOSSI_RESET_R;
606 omap_writel(l, MOD_CONF_CTRL_1);
607
608 clk_enable(sossi.fck);
609 l = omap_readl(ARM_IDLECT2);
610 l &= ~(1 << 8); /* DMACK_REQ */
611 omap_writel(l, ARM_IDLECT2);
612
613 l = sossi_read_reg(SOSSI_INIT2_REG);
614 /* Enable and reset the SoSSI block */
615 l |= (1 << 0) | (1 << 1);
616 sossi_write_reg(SOSSI_INIT2_REG, l);
617 /* Take SoSSI out of reset */
618 l &= ~(1 << 1);
619 sossi_write_reg(SOSSI_INIT2_REG, l);
620
621 sossi_write_reg(SOSSI_ID_REG, 0);
622 l = sossi_read_reg(SOSSI_ID_REG);
623 k = sossi_read_reg(SOSSI_ID_REG);
624
625 if (l != 0x55555555 || k != 0xaaaaaaaa) {
626 dev_err(fbdev->dev,
627 "invalid SoSSI sync pattern: %08x, %08x\n", l, k);
628 r = -ENODEV;
629 goto err;
630 }
631
632 if ((r = omap_lcdc_set_dma_callback(sossi_dma_callback, NULL)) < 0) {
633 dev_err(fbdev->dev, "can't get LCDC IRQ\n");
634 r = -ENODEV;
635 goto err;
636 }
637
638 l = sossi_read_reg(SOSSI_ID_REG); /* Component code */
639 l = sossi_read_reg(SOSSI_ID_REG);
640 dev_info(fbdev->dev, "SoSSI version %d.%d initialized\n",
641 l >> 16, l & 0xffff);
642
643 l = sossi_read_reg(SOSSI_INIT1_REG);
644 l |= (1 << 19); /* DMA_MODE */
645 l &= ~(1 << 31); /* REORDERING */
646 sossi_write_reg(SOSSI_INIT1_REG, l);
647
648 if ((r = request_irq(INT_1610_SoSSI_MATCH, sossi_match_irq,
649 IRQT_FALLING,
650 "sossi_match", sossi.fbdev->dev)) < 0) {
651 dev_err(sossi.fbdev->dev, "can't get SoSSI match IRQ\n");
652 goto err;
653 }
654
655 clk_disable(sossi.fck);
656 return 0;
657
658err:
659 clk_disable(sossi.fck);
660 clk_put(sossi.fck);
661 return r;
662}
663
664static void sossi_cleanup(void)
665{
666 omap_lcdc_free_dma_callback();
667 clk_put(sossi.fck);
668}
669
670struct lcd_ctrl_extif omap1_ext_if = {
671 .init = sossi_init,
672 .cleanup = sossi_cleanup,
673 .get_clk_info = sossi_get_clk_info,
674 .convert_timings = sossi_convert_timings,
675 .set_timings = sossi_set_timings,
676 .set_bits_per_cycle = sossi_set_bits_per_cycle,
677 .setup_tearsync = sossi_setup_tearsync,
678 .enable_tearsync = sossi_enable_tearsync,
679 .write_command = sossi_write_command,
680 .read_data = sossi_read_data,
681 .write_data = sossi_write_data,
682 .transfer_area = sossi_transfer_area,
683
684 .max_transmit_size = SOSSI_MAX_XMIT_BYTES,
685};
686
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index e64f8b5d0056..8503e733a172 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -52,7 +52,7 @@ struct fb_info_platinum {
52 struct { 52 struct {
53 __u8 red, green, blue; 53 __u8 red, green, blue;
54 } palette[256]; 54 } palette[256];
55 u32 pseudo_palette[17]; 55 u32 pseudo_palette[16];
56 56
57 volatile struct cmap_regs __iomem *cmap_regs; 57 volatile struct cmap_regs __iomem *cmap_regs;
58 unsigned long cmap_regs_phys; 58 unsigned long cmap_regs_phys;
diff --git a/drivers/video/pm2fb.c b/drivers/video/pm2fb.c
index 0a04483aa3e0..10c0cc6e93fc 100644
--- a/drivers/video/pm2fb.c
+++ b/drivers/video/pm2fb.c
@@ -24,7 +24,7 @@
24 * License. See the file COPYING in the main directory of this archive for 24 * License. See the file COPYING in the main directory of this archive for
25 * more details. 25 * more details.
26 * 26 *
27 * 27 *
28 */ 28 */
29 29
30#include <linux/module.h> 30#include <linux/module.h>
@@ -58,7 +58,7 @@
58#endif 58#endif
59 59
60/* 60/*
61 * Driver data 61 * Driver data
62 */ 62 */
63static char *mode __devinitdata = NULL; 63static char *mode __devinitdata = NULL;
64 64
@@ -82,12 +82,12 @@ struct pm2fb_par
82{ 82{
83 pm2type_t type; /* Board type */ 83 pm2type_t type; /* Board type */
84 unsigned char __iomem *v_regs;/* virtual address of p_regs */ 84 unsigned char __iomem *v_regs;/* virtual address of p_regs */
85 u32 memclock; /* memclock */ 85 u32 memclock; /* memclock */
86 u32 video; /* video flags before blanking */ 86 u32 video; /* video flags before blanking */
87 u32 mem_config; /* MemConfig reg at probe */ 87 u32 mem_config; /* MemConfig reg at probe */
88 u32 mem_control; /* MemControl reg at probe */ 88 u32 mem_control; /* MemControl reg at probe */
89 u32 boot_address; /* BootAddress reg at probe */ 89 u32 boot_address; /* BootAddress reg at probe */
90 u32 palette[16]; 90 u32 palette[16];
91}; 91};
92 92
93/* 93/*
@@ -95,12 +95,12 @@ struct pm2fb_par
95 * if we don't use modedb. 95 * if we don't use modedb.
96 */ 96 */
97static struct fb_fix_screeninfo pm2fb_fix __devinitdata = { 97static struct fb_fix_screeninfo pm2fb_fix __devinitdata = {
98 .id = "", 98 .id = "",
99 .type = FB_TYPE_PACKED_PIXELS, 99 .type = FB_TYPE_PACKED_PIXELS,
100 .visual = FB_VISUAL_PSEUDOCOLOR, 100 .visual = FB_VISUAL_PSEUDOCOLOR,
101 .xpanstep = 1, 101 .xpanstep = 1,
102 .ypanstep = 1, 102 .ypanstep = 1,
103 .ywrapstep = 0, 103 .ywrapstep = 0,
104 .accel = FB_ACCEL_3DLABS_PERMEDIA2, 104 .accel = FB_ACCEL_3DLABS_PERMEDIA2,
105}; 105};
106 106
@@ -109,26 +109,26 @@ static struct fb_fix_screeninfo pm2fb_fix __devinitdata = {
109 */ 109 */
110static struct fb_var_screeninfo pm2fb_var __devinitdata = { 110static struct fb_var_screeninfo pm2fb_var __devinitdata = {
111 /* "640x480, 8 bpp @ 60 Hz */ 111 /* "640x480, 8 bpp @ 60 Hz */
112 .xres = 640, 112 .xres = 640,
113 .yres = 480, 113 .yres = 480,
114 .xres_virtual = 640, 114 .xres_virtual = 640,
115 .yres_virtual = 480, 115 .yres_virtual = 480,
116 .bits_per_pixel =8, 116 .bits_per_pixel = 8,
117 .red = {0, 8, 0}, 117 .red = {0, 8, 0},
118 .blue = {0, 8, 0}, 118 .blue = {0, 8, 0},
119 .green = {0, 8, 0}, 119 .green = {0, 8, 0},
120 .activate = FB_ACTIVATE_NOW, 120 .activate = FB_ACTIVATE_NOW,
121 .height = -1, 121 .height = -1,
122 .width = -1, 122 .width = -1,
123 .accel_flags = 0, 123 .accel_flags = 0,
124 .pixclock = 39721, 124 .pixclock = 39721,
125 .left_margin = 40, 125 .left_margin = 40,
126 .right_margin = 24, 126 .right_margin = 24,
127 .upper_margin = 32, 127 .upper_margin = 32,
128 .lower_margin = 11, 128 .lower_margin = 11,
129 .hsync_len = 96, 129 .hsync_len = 96,
130 .vsync_len = 2, 130 .vsync_len = 2,
131 .vmode = FB_VMODE_NONINTERLACED 131 .vmode = FB_VMODE_NONINTERLACED
132}; 132};
133 133
134/* 134/*
@@ -166,7 +166,7 @@ static inline u32 pm2_RDAC_RD(struct pm2fb_par* p, s32 idx)
166 pm2_WR(p, PM2VR_RD_INDEX_LOW, idx & 0xff); 166 pm2_WR(p, PM2VR_RD_INDEX_LOW, idx & 0xff);
167 index = PM2VR_RD_INDEXED_DATA; 167 index = PM2VR_RD_INDEXED_DATA;
168 break; 168 break;
169 } 169 }
170 mb(); 170 mb();
171 return pm2_RD(p, index); 171 return pm2_RD(p, index);
172} 172}
@@ -182,7 +182,7 @@ static inline void pm2_RDAC_WR(struct pm2fb_par* p, s32 idx, u32 v)
182 pm2_WR(p, PM2VR_RD_INDEX_LOW, idx & 0xff); 182 pm2_WR(p, PM2VR_RD_INDEX_LOW, idx & 0xff);
183 index = PM2VR_RD_INDEXED_DATA; 183 index = PM2VR_RD_INDEXED_DATA;
184 break; 184 break;
185 } 185 }
186 wmb(); 186 wmb();
187 pm2_WR(p, index, v); 187 pm2_WR(p, index, v);
188 wmb(); 188 wmb();
@@ -197,7 +197,7 @@ static inline void pm2v_RDAC_WR(struct pm2fb_par* p, s32 idx, u32 v)
197} 197}
198 198
199#ifdef CONFIG_FB_PM2_FIFO_DISCONNECT 199#ifdef CONFIG_FB_PM2_FIFO_DISCONNECT
200#define WAIT_FIFO(p,a) 200#define WAIT_FIFO(p, a)
201#else 201#else
202static inline void WAIT_FIFO(struct pm2fb_par* p, u32 a) 202static inline void WAIT_FIFO(struct pm2fb_par* p, u32 a)
203{ 203{
@@ -209,7 +209,7 @@ static inline void WAIT_FIFO(struct pm2fb_par* p, u32 a)
209/* 209/*
210 * partial products for the supported horizontal resolutions. 210 * partial products for the supported horizontal resolutions.
211 */ 211 */
212#define PACKPP(p0,p1,p2) (((p2) << 6) | ((p1) << 3) | (p0)) 212#define PACKPP(p0, p1, p2) (((p2) << 6) | ((p1) << 3) | (p0))
213static const struct { 213static const struct {
214 u16 width; 214 u16 width;
215 u16 pp; 215 u16 pp;
@@ -357,7 +357,7 @@ static void reset_card(struct pm2fb_par* p)
357static void reset_config(struct pm2fb_par* p) 357static void reset_config(struct pm2fb_par* p)
358{ 358{
359 WAIT_FIFO(p, 52); 359 WAIT_FIFO(p, 52);
360 pm2_WR(p, PM2R_CHIP_CONFIG, pm2_RD(p, PM2R_CHIP_CONFIG)& 360 pm2_WR(p, PM2R_CHIP_CONFIG, pm2_RD(p, PM2R_CHIP_CONFIG) &
361 ~(PM2F_VGA_ENABLE|PM2F_VGA_FIXED)); 361 ~(PM2F_VGA_ENABLE|PM2F_VGA_FIXED));
362 pm2_WR(p, PM2R_BYPASS_WRITE_MASK, ~(0L)); 362 pm2_WR(p, PM2R_BYPASS_WRITE_MASK, ~(0L));
363 pm2_WR(p, PM2R_FRAMEBUFFER_WRITE_MASK, ~(0L)); 363 pm2_WR(p, PM2R_FRAMEBUFFER_WRITE_MASK, ~(0L));
@@ -367,7 +367,7 @@ static void reset_config(struct pm2fb_par* p)
367 pm2_WR(p, PM2R_RASTERIZER_MODE, 0); 367 pm2_WR(p, PM2R_RASTERIZER_MODE, 0);
368 pm2_WR(p, PM2R_DELTA_MODE, PM2F_DELTA_ORDER_RGB); 368 pm2_WR(p, PM2R_DELTA_MODE, PM2F_DELTA_ORDER_RGB);
369 pm2_WR(p, PM2R_LB_READ_FORMAT, 0); 369 pm2_WR(p, PM2R_LB_READ_FORMAT, 0);
370 pm2_WR(p, PM2R_LB_WRITE_FORMAT, 0); 370 pm2_WR(p, PM2R_LB_WRITE_FORMAT, 0);
371 pm2_WR(p, PM2R_LB_READ_MODE, 0); 371 pm2_WR(p, PM2R_LB_READ_MODE, 0);
372 pm2_WR(p, PM2R_LB_SOURCE_OFFSET, 0); 372 pm2_WR(p, PM2R_LB_SOURCE_OFFSET, 0);
373 pm2_WR(p, PM2R_FB_SOURCE_OFFSET, 0); 373 pm2_WR(p, PM2R_FB_SOURCE_OFFSET, 0);
@@ -535,7 +535,7 @@ static void set_video(struct pm2fb_par* p, u32 video) {
535 vsync = video; 535 vsync = video;
536 536
537 DPRINTK("video = 0x%x\n", video); 537 DPRINTK("video = 0x%x\n", video);
538 538
539 /* 539 /*
540 * The hardware cursor needs +vsync to recognise vert retrace. 540 * The hardware cursor needs +vsync to recognise vert retrace.
541 * We may not be using the hardware cursor, but the X Glint 541 * We may not be using the hardware cursor, but the X Glint
@@ -574,9 +574,9 @@ static void set_video(struct pm2fb_par* p, u32 video) {
574 */ 574 */
575 575
576/** 576/**
577 * pm2fb_check_var - Optional function. Validates a var passed in. 577 * pm2fb_check_var - Optional function. Validates a var passed in.
578 * @var: frame buffer variable screen structure 578 * @var: frame buffer variable screen structure
579 * @info: frame buffer structure that represents a single frame buffer 579 * @info: frame buffer structure that represents a single frame buffer
580 * 580 *
581 * Checks to see if the hardware supports the state requested by 581 * Checks to see if the hardware supports the state requested by
582 * var passed in. 582 * var passed in.
@@ -615,23 +615,23 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
615 615
616 var->xres = (var->xres + 15) & ~15; /* could sometimes be 8 */ 616 var->xres = (var->xres + 15) & ~15; /* could sometimes be 8 */
617 lpitch = var->xres * ((var->bits_per_pixel + 7)>>3); 617 lpitch = var->xres * ((var->bits_per_pixel + 7)>>3);
618 618
619 if (var->xres < 320 || var->xres > 1600) { 619 if (var->xres < 320 || var->xres > 1600) {
620 DPRINTK("width not supported: %u\n", var->xres); 620 DPRINTK("width not supported: %u\n", var->xres);
621 return -EINVAL; 621 return -EINVAL;
622 } 622 }
623 623
624 if (var->yres < 200 || var->yres > 1200) { 624 if (var->yres < 200 || var->yres > 1200) {
625 DPRINTK("height not supported: %u\n", var->yres); 625 DPRINTK("height not supported: %u\n", var->yres);
626 return -EINVAL; 626 return -EINVAL;
627 } 627 }
628 628
629 if (lpitch * var->yres_virtual > info->fix.smem_len) { 629 if (lpitch * var->yres_virtual > info->fix.smem_len) {
630 DPRINTK("no memory for screen (%ux%ux%u)\n", 630 DPRINTK("no memory for screen (%ux%ux%u)\n",
631 var->xres, var->yres_virtual, var->bits_per_pixel); 631 var->xres, var->yres_virtual, var->bits_per_pixel);
632 return -EINVAL; 632 return -EINVAL;
633 } 633 }
634 634
635 if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) { 635 if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) {
636 DPRINTK("pixclock too high (%ldKHz)\n", PICOS2KHZ(var->pixclock)); 636 DPRINTK("pixclock too high (%ldKHz)\n", PICOS2KHZ(var->pixclock));
637 return -EINVAL; 637 return -EINVAL;
@@ -672,17 +672,17 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
672 break; 672 break;
673 } 673 }
674 var->height = var->width = -1; 674 var->height = var->width = -1;
675 675
676 var->accel_flags = 0; /* Can't mmap if this is on */ 676 var->accel_flags = 0; /* Can't mmap if this is on */
677 677
678 DPRINTK("Checking graphics mode at %dx%d depth %d\n", 678 DPRINTK("Checking graphics mode at %dx%d depth %d\n",
679 var->xres, var->yres, var->bits_per_pixel); 679 var->xres, var->yres, var->bits_per_pixel);
680 return 0; 680 return 0;
681} 681}
682 682
683/** 683/**
684 * pm2fb_set_par - Alters the hardware state. 684 * pm2fb_set_par - Alters the hardware state.
685 * @info: frame buffer structure that represents a single frame buffer 685 * @info: frame buffer structure that represents a single frame buffer
686 * 686 *
687 * Using the fb_var_screeninfo in fb_info we set the resolution of the 687 * Using the fb_var_screeninfo in fb_info we set the resolution of the
688 * this particular framebuffer. 688 * this particular framebuffer.
@@ -709,7 +709,7 @@ static int pm2fb_set_par(struct fb_info *info)
709 clear_palette(par); 709 clear_palette(par);
710 if ( par->memclock ) 710 if ( par->memclock )
711 set_memclock(par, par->memclock); 711 set_memclock(par, par->memclock);
712 712
713 width = (info->var.xres_virtual + 7) & ~7; 713 width = (info->var.xres_virtual + 7) & ~7;
714 height = info->var.yres_virtual; 714 height = info->var.yres_virtual;
715 depth = (info->var.bits_per_pixel + 7) & ~7; 715 depth = (info->var.bits_per_pixel + 7) & ~7;
@@ -722,7 +722,7 @@ static int pm2fb_set_par(struct fb_info *info)
722 DPRINTK("pixclock too high (%uKHz)\n", pixclock); 722 DPRINTK("pixclock too high (%uKHz)\n", pixclock);
723 return -EINVAL; 723 return -EINVAL;
724 } 724 }
725 725
726 hsstart = to3264(info->var.right_margin, depth, data64); 726 hsstart = to3264(info->var.right_margin, depth, data64);
727 hsend = hsstart + to3264(info->var.hsync_len, depth, data64); 727 hsend = hsstart + to3264(info->var.hsync_len, depth, data64);
728 hbend = hsend + to3264(info->var.left_margin, depth, data64); 728 hbend = hsend + to3264(info->var.left_margin, depth, data64);
@@ -737,7 +737,7 @@ static int pm2fb_set_par(struct fb_info *info)
737 base = to3264(info->var.yoffset * xres + info->var.xoffset, depth, 1); 737 base = to3264(info->var.yoffset * xres + info->var.xoffset, depth, 1);
738 if (data64) 738 if (data64)
739 video |= PM2F_DATA_64_ENABLE; 739 video |= PM2F_DATA_64_ENABLE;
740 740
741 if (info->var.sync & FB_SYNC_HOR_HIGH_ACT) { 741 if (info->var.sync & FB_SYNC_HOR_HIGH_ACT) {
742 if (lowhsync) { 742 if (lowhsync) {
743 DPRINTK("ignoring +hsync, using -hsync.\n"); 743 DPRINTK("ignoring +hsync, using -hsync.\n");
@@ -778,9 +778,9 @@ static int pm2fb_set_par(struct fb_info *info)
778 WAIT_FIFO(par, 1); 778 WAIT_FIFO(par, 1);
779 pm2_WR(par, PM2VR_RD_INDEX_HIGH, 0); 779 pm2_WR(par, PM2VR_RD_INDEX_HIGH, 0);
780 } 780 }
781 781
782 set_aperture(par, depth); 782 set_aperture(par, depth);
783 783
784 mb(); 784 mb();
785 WAIT_FIFO(par, 19); 785 WAIT_FIFO(par, 19);
786 pm2_RDAC_WR(par, PM2I_RD_COLOR_KEY_CONTROL, 786 pm2_RDAC_WR(par, PM2I_RD_COLOR_KEY_CONTROL,
@@ -847,22 +847,22 @@ static int pm2fb_set_par(struct fb_info *info)
847 set_pixclock(par, pixclock); 847 set_pixclock(par, pixclock);
848 DPRINTK("Setting graphics mode at %dx%d depth %d\n", 848 DPRINTK("Setting graphics mode at %dx%d depth %d\n",
849 info->var.xres, info->var.yres, info->var.bits_per_pixel); 849 info->var.xres, info->var.yres, info->var.bits_per_pixel);
850 return 0; 850 return 0;
851} 851}
852 852
853/** 853/**
854 * pm2fb_setcolreg - Sets a color register. 854 * pm2fb_setcolreg - Sets a color register.
855 * @regno: boolean, 0 copy local, 1 get_user() function 855 * @regno: boolean, 0 copy local, 1 get_user() function
856 * @red: frame buffer colormap structure 856 * @red: frame buffer colormap structure
857 * @green: The green value which can be up to 16 bits wide 857 * @green: The green value which can be up to 16 bits wide
858 * @blue: The blue value which can be up to 16 bits wide. 858 * @blue: The blue value which can be up to 16 bits wide.
859 * @transp: If supported the alpha value which can be up to 16 bits wide. 859 * @transp: If supported the alpha value which can be up to 16 bits wide.
860 * @info: frame buffer info structure 860 * @info: frame buffer info structure
861 * 861 *
862 * Set a single color register. The values supplied have a 16 bit 862 * Set a single color register. The values supplied have a 16 bit
863 * magnitude which needs to be scaled in this function for the hardware. 863 * magnitude which needs to be scaled in this function for the hardware.
864 * Pretty much a direct lift from tdfxfb.c. 864 * Pretty much a direct lift from tdfxfb.c.
865 * 865 *
866 * Returns negative errno on error, or zero on success. 866 * Returns negative errno on error, or zero on success.
867 */ 867 */
868static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green, 868static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green,
@@ -906,7 +906,7 @@ static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green,
906 * (blue << blue.offset) | (transp << transp.offset) 906 * (blue << blue.offset) | (transp << transp.offset)
907 * RAMDAC does not exist 907 * RAMDAC does not exist
908 */ 908 */
909#define CNVT_TOHW(val,width) ((((val)<<(width))+0x7FFF-(val))>>16) 909#define CNVT_TOHW(val, width) ((((val) << (width)) + 0x7FFF -(val)) >> 16)
910 switch (info->fix.visual) { 910 switch (info->fix.visual) {
911 case FB_VISUAL_TRUECOLOR: 911 case FB_VISUAL_TRUECOLOR:
912 case FB_VISUAL_PSEUDOCOLOR: 912 case FB_VISUAL_PSEUDOCOLOR:
@@ -916,9 +916,9 @@ static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green,
916 transp = CNVT_TOHW(transp, info->var.transp.length); 916 transp = CNVT_TOHW(transp, info->var.transp.length);
917 break; 917 break;
918 case FB_VISUAL_DIRECTCOLOR: 918 case FB_VISUAL_DIRECTCOLOR:
919 /* example here assumes 8 bit DAC. Might be different 919 /* example here assumes 8 bit DAC. Might be different
920 * for your hardware */ 920 * for your hardware */
921 red = CNVT_TOHW(red, 8); 921 red = CNVT_TOHW(red, 8);
922 green = CNVT_TOHW(green, 8); 922 green = CNVT_TOHW(green, 8);
923 blue = CNVT_TOHW(blue, 8); 923 blue = CNVT_TOHW(blue, 8);
924 /* hey, there is bug in transp handling... */ 924 /* hey, there is bug in transp handling... */
@@ -940,11 +940,11 @@ static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green,
940 940
941 switch (info->var.bits_per_pixel) { 941 switch (info->var.bits_per_pixel) {
942 case 8: 942 case 8:
943 break; 943 break;
944 case 16: 944 case 16:
945 case 24: 945 case 24:
946 case 32: 946 case 32:
947 par->palette[regno] = v; 947 par->palette[regno] = v;
948 break; 948 break;
949 } 949 }
950 return 0; 950 return 0;
@@ -956,15 +956,15 @@ static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green,
956} 956}
957 957
958/** 958/**
959 * pm2fb_pan_display - Pans the display. 959 * pm2fb_pan_display - Pans the display.
960 * @var: frame buffer variable screen structure 960 * @var: frame buffer variable screen structure
961 * @info: frame buffer structure that represents a single frame buffer 961 * @info: frame buffer structure that represents a single frame buffer
962 * 962 *
963 * Pan (or wrap, depending on the `vmode' field) the display using the 963 * Pan (or wrap, depending on the `vmode' field) the display using the
964 * `xoffset' and `yoffset' fields of the `var' structure. 964 * `xoffset' and `yoffset' fields of the `var' structure.
965 * If the values don't fit, return -EINVAL. 965 * If the values don't fit, return -EINVAL.
966 * 966 *
967 * Returns negative errno on error, or zero on success. 967 * Returns negative errno on error, or zero on success.
968 * 968 *
969 */ 969 */
970static int pm2fb_pan_display(struct fb_var_screeninfo *var, 970static int pm2fb_pan_display(struct fb_var_screeninfo *var,
@@ -980,24 +980,24 @@ static int pm2fb_pan_display(struct fb_var_screeninfo *var,
980 depth = (depth > 32) ? 32 : depth; 980 depth = (depth > 32) ? 32 : depth;
981 base = to3264(var->yoffset * xres + var->xoffset, depth, 1); 981 base = to3264(var->yoffset * xres + var->xoffset, depth, 1);
982 WAIT_FIFO(p, 1); 982 WAIT_FIFO(p, 1);
983 pm2_WR(p, PM2R_SCREEN_BASE, base); 983 pm2_WR(p, PM2R_SCREEN_BASE, base);
984 return 0; 984 return 0;
985} 985}
986 986
987/** 987/**
988 * pm2fb_blank - Blanks the display. 988 * pm2fb_blank - Blanks the display.
989 * @blank_mode: the blank mode we want. 989 * @blank_mode: the blank mode we want.
990 * @info: frame buffer structure that represents a single frame buffer 990 * @info: frame buffer structure that represents a single frame buffer
991 * 991 *
992 * Blank the screen if blank_mode != 0, else unblank. Return 0 if 992 * Blank the screen if blank_mode != 0, else unblank. Return 0 if
993 * blanking succeeded, != 0 if un-/blanking failed due to e.g. a 993 * blanking succeeded, != 0 if un-/blanking failed due to e.g. a
994 * video mode which doesn't support it. Implements VESA suspend 994 * video mode which doesn't support it. Implements VESA suspend
995 * and powerdown modes on hardware that supports disabling hsync/vsync: 995 * and powerdown modes on hardware that supports disabling hsync/vsync:
996 * blank_mode == 2: suspend vsync 996 * blank_mode == 2: suspend vsync
997 * blank_mode == 3: suspend hsync 997 * blank_mode == 3: suspend hsync
998 * blank_mode == 4: powerdown 998 * blank_mode == 4: powerdown
999 * 999 *
1000 * Returns negative errno on error, or zero on success. 1000 * Returns negative errno on error, or zero on success.
1001 * 1001 *
1002 */ 1002 */
1003static int pm2fb_blank(int blank_mode, struct fb_info *info) 1003static int pm2fb_blank(int blank_mode, struct fb_info *info)
@@ -1071,7 +1071,7 @@ static void pm2fb_block_op(struct fb_info* info, int copy,
1071 pm2_WR(par, PM2R_RECTANGLE_ORIGIN, (y << 16) | x); 1071 pm2_WR(par, PM2R_RECTANGLE_ORIGIN, (y << 16) | x);
1072 pm2_WR(par, PM2R_RECTANGLE_SIZE, (h << 16) | w); 1072 pm2_WR(par, PM2R_RECTANGLE_SIZE, (h << 16) | w);
1073 wmb(); 1073 wmb();
1074 pm2_WR(par, PM2R_RENDER,PM2F_RENDER_RECTANGLE | 1074 pm2_WR(par, PM2R_RENDER, PM2F_RENDER_RECTANGLE |
1075 (x<xsrc ? PM2F_INCREASE_X : 0) | 1075 (x<xsrc ? PM2F_INCREASE_X : 0) |
1076 (y<ysrc ? PM2F_INCREASE_Y : 0) | 1076 (y<ysrc ? PM2F_INCREASE_Y : 0) |
1077 (copy ? 0 : PM2F_RENDER_FASTFILL)); 1077 (copy ? 0 : PM2F_RENDER_FASTFILL));
@@ -1234,7 +1234,7 @@ static int __devinit pm2fb_probe(struct pci_dev *pdev,
1234 DPRINTK("Adjusting register base for big-endian.\n"); 1234 DPRINTK("Adjusting register base for big-endian.\n");
1235#endif 1235#endif
1236 DPRINTK("Register base at 0x%lx\n", pm2fb_fix.mmio_start); 1236 DPRINTK("Register base at 0x%lx\n", pm2fb_fix.mmio_start);
1237 1237
1238 /* Registers - request region and map it. */ 1238 /* Registers - request region and map it. */
1239 if ( !request_mem_region(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len, 1239 if ( !request_mem_region(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len,
1240 "pm2fb regbase") ) { 1240 "pm2fb regbase") ) {
@@ -1317,17 +1317,17 @@ static int __devinit pm2fb_probe(struct pci_dev *pdev,
1317 } 1317 }
1318 1318
1319 info->fbops = &pm2fb_ops; 1319 info->fbops = &pm2fb_ops;
1320 info->fix = pm2fb_fix; 1320 info->fix = pm2fb_fix;
1321 info->pseudo_palette = default_par->palette; 1321 info->pseudo_palette = default_par->palette;
1322 info->flags = FBINFO_DEFAULT | 1322 info->flags = FBINFO_DEFAULT |
1323 FBINFO_HWACCEL_YPAN | 1323 FBINFO_HWACCEL_YPAN |
1324 FBINFO_HWACCEL_COPYAREA | 1324 FBINFO_HWACCEL_COPYAREA |
1325 FBINFO_HWACCEL_FILLRECT; 1325 FBINFO_HWACCEL_FILLRECT;
1326 1326
1327 if (!mode) 1327 if (!mode)
1328 mode = "640x480@60"; 1328 mode = "640x480@60";
1329 1329
1330 err = fb_find_mode(&info->var, info, mode, NULL, 0, NULL, 8); 1330 err = fb_find_mode(&info->var, info, mode, NULL, 0, NULL, 8);
1331 if (!err || err == 4) 1331 if (!err || err == 4)
1332 info->var = pm2fb_var; 1332 info->var = pm2fb_var;
1333 1333
@@ -1348,8 +1348,8 @@ static int __devinit pm2fb_probe(struct pci_dev *pdev,
1348 return 0; 1348 return 0;
1349 1349
1350 err_exit_all: 1350 err_exit_all:
1351 fb_dealloc_cmap(&info->cmap); 1351 fb_dealloc_cmap(&info->cmap);
1352 err_exit_both: 1352 err_exit_both:
1353 iounmap(info->screen_base); 1353 iounmap(info->screen_base);
1354 release_mem_region(pm2fb_fix.smem_start, pm2fb_fix.smem_len); 1354 release_mem_region(pm2fb_fix.smem_start, pm2fb_fix.smem_len);
1355 err_exit_mmio: 1355 err_exit_mmio:
@@ -1374,7 +1374,7 @@ static void __devexit pm2fb_remove(struct pci_dev *pdev)
1374 struct pm2fb_par *par = info->par; 1374 struct pm2fb_par *par = info->par;
1375 1375
1376 unregister_framebuffer(info); 1376 unregister_framebuffer(info);
1377 1377
1378 iounmap(info->screen_base); 1378 iounmap(info->screen_base);
1379 release_mem_region(fix->smem_start, fix->smem_len); 1379 release_mem_region(fix->smem_start, fix->smem_len);
1380 iounmap(par->v_regs); 1380 iounmap(par->v_regs);
@@ -1402,9 +1402,9 @@ static struct pci_device_id pm2fb_id_table[] = {
1402 1402
1403static struct pci_driver pm2fb_driver = { 1403static struct pci_driver pm2fb_driver = {
1404 .name = "pm2fb", 1404 .name = "pm2fb",
1405 .id_table = pm2fb_id_table, 1405 .id_table = pm2fb_id_table,
1406 .probe = pm2fb_probe, 1406 .probe = pm2fb_probe,
1407 .remove = __devexit_p(pm2fb_remove), 1407 .remove = __devexit_p(pm2fb_remove),
1408}; 1408};
1409 1409
1410MODULE_DEVICE_TABLE(pci, pm2fb_id_table); 1410MODULE_DEVICE_TABLE(pci, pm2fb_id_table);
@@ -1423,7 +1423,7 @@ static int __init pm2fb_setup(char *options)
1423 if (!options || !*options) 1423 if (!options || !*options)
1424 return 0; 1424 return 0;
1425 1425
1426 while ((this_opt = strsep(&options, ",")) != NULL) { 1426 while ((this_opt = strsep(&options, ",")) != NULL) {
1427 if (!*this_opt) 1427 if (!*this_opt)
1428 continue; 1428 continue;
1429 if(!strcmp(this_opt, "lowhsync")) { 1429 if(!strcmp(this_opt, "lowhsync")) {
diff --git a/drivers/video/pm3fb.c b/drivers/video/pm3fb.c
index b52e883f0a52..5b3f54c0918e 100644
--- a/drivers/video/pm3fb.c
+++ b/drivers/video/pm3fb.c
@@ -77,7 +77,7 @@ static struct fb_fix_screeninfo pm3fb_fix __devinitdata = {
77 .xpanstep = 1, 77 .xpanstep = 1,
78 .ypanstep = 1, 78 .ypanstep = 1,
79 .ywrapstep = 0, 79 .ywrapstep = 0,
80 .accel = FB_ACCEL_NONE, 80 .accel = FB_ACCEL_3DLABS_PERMEDIA3,
81}; 81};
82 82
83/* 83/*
@@ -185,6 +185,238 @@ static inline int pm3fb_shift_bpp(unsigned bpp, int v)
185 return 0; 185 return 0;
186} 186}
187 187
188/* acceleration */
189static int pm3fb_sync(struct fb_info *info)
190{
191 struct pm3_par *par = info->par;
192
193 PM3_WAIT(par, 2);
194 PM3_WRITE_REG(par, PM3FilterMode, PM3FilterModeSync);
195 PM3_WRITE_REG(par, PM3Sync, 0);
196 mb();
197 do {
198 while ((PM3_READ_REG(par, PM3OutFIFOWords)) == 0);
199 rmb();
200 } while ((PM3_READ_REG(par, PM3OutputFifo)) != PM3Sync_Tag);
201
202 return 0;
203}
204
205static void pm3fb_init_engine(struct fb_info *info)
206{
207 struct pm3_par *par = info->par;
208 const u32 width = (info->var.xres_virtual + 7) & ~7;
209
210 PM3_WAIT(par, 50);
211 PM3_WRITE_REG(par, PM3FilterMode, PM3FilterModeSync);
212 PM3_WRITE_REG(par, PM3StatisticMode, 0x0);
213 PM3_WRITE_REG(par, PM3DeltaMode, 0x0);
214 PM3_WRITE_REG(par, PM3RasterizerMode, 0x0);
215 PM3_WRITE_REG(par, PM3ScissorMode, 0x0);
216 PM3_WRITE_REG(par, PM3LineStippleMode, 0x0);
217 PM3_WRITE_REG(par, PM3AreaStippleMode, 0x0);
218 PM3_WRITE_REG(par, PM3GIDMode, 0x0);
219 PM3_WRITE_REG(par, PM3DepthMode, 0x0);
220 PM3_WRITE_REG(par, PM3StencilMode, 0x0);
221 PM3_WRITE_REG(par, PM3StencilData, 0x0);
222 PM3_WRITE_REG(par, PM3ColorDDAMode, 0x0);
223 PM3_WRITE_REG(par, PM3TextureCoordMode, 0x0);
224 PM3_WRITE_REG(par, PM3TextureIndexMode0, 0x0);
225 PM3_WRITE_REG(par, PM3TextureIndexMode1, 0x0);
226 PM3_WRITE_REG(par, PM3TextureReadMode, 0x0);
227 PM3_WRITE_REG(par, PM3LUTMode, 0x0);
228 PM3_WRITE_REG(par, PM3TextureFilterMode, 0x0);
229 PM3_WRITE_REG(par, PM3TextureCompositeMode, 0x0);
230 PM3_WRITE_REG(par, PM3TextureApplicationMode, 0x0);
231 PM3_WRITE_REG(par, PM3TextureCompositeColorMode1, 0x0);
232 PM3_WRITE_REG(par, PM3TextureCompositeAlphaMode1, 0x0);
233 PM3_WRITE_REG(par, PM3TextureCompositeColorMode0, 0x0);
234 PM3_WRITE_REG(par, PM3TextureCompositeAlphaMode0, 0x0);
235 PM3_WRITE_REG(par, PM3FogMode, 0x0);
236 PM3_WRITE_REG(par, PM3ChromaTestMode, 0x0);
237 PM3_WRITE_REG(par, PM3AlphaTestMode, 0x0);
238 PM3_WRITE_REG(par, PM3AntialiasMode, 0x0);
239 PM3_WRITE_REG(par, PM3YUVMode, 0x0);
240 PM3_WRITE_REG(par, PM3AlphaBlendColorMode, 0x0);
241 PM3_WRITE_REG(par, PM3AlphaBlendAlphaMode, 0x0);
242 PM3_WRITE_REG(par, PM3DitherMode, 0x0);
243 PM3_WRITE_REG(par, PM3LogicalOpMode, 0x0);
244 PM3_WRITE_REG(par, PM3RouterMode, 0x0);
245 PM3_WRITE_REG(par, PM3Window, 0x0);
246
247 PM3_WRITE_REG(par, PM3Config2D, 0x0);
248
249 PM3_WRITE_REG(par, PM3SpanColorMask, 0xffffffff);
250
251 PM3_WRITE_REG(par, PM3XBias, 0x0);
252 PM3_WRITE_REG(par, PM3YBias, 0x0);
253 PM3_WRITE_REG(par, PM3DeltaControl, 0x0);
254
255 PM3_WRITE_REG(par, PM3BitMaskPattern, 0xffffffff);
256
257 PM3_WRITE_REG(par, PM3FBDestReadEnables,
258 PM3FBDestReadEnables_E(0xff) |
259 PM3FBDestReadEnables_R(0xff) |
260 PM3FBDestReadEnables_ReferenceAlpha(0xff));
261 PM3_WRITE_REG(par, PM3FBDestReadBufferAddr0, 0x0);
262 PM3_WRITE_REG(par, PM3FBDestReadBufferOffset0, 0x0);
263 PM3_WRITE_REG(par, PM3FBDestReadBufferWidth0,
264 PM3FBDestReadBufferWidth_Width(width));
265
266 PM3_WRITE_REG(par, PM3FBDestReadMode,
267 PM3FBDestReadMode_ReadEnable |
268 PM3FBDestReadMode_Enable0);
269 PM3_WRITE_REG(par, PM3FBSourceReadBufferAddr, 0x0);
270 PM3_WRITE_REG(par, PM3FBSourceReadBufferOffset, 0x0);
271 PM3_WRITE_REG(par, PM3FBSourceReadBufferWidth,
272 PM3FBSourceReadBufferWidth_Width(width));
273 PM3_WRITE_REG(par, PM3FBSourceReadMode,
274 PM3FBSourceReadMode_Blocking |
275 PM3FBSourceReadMode_ReadEnable);
276
277 PM3_WAIT(par, 2);
278 {
279 unsigned long rm = 1;
280 switch (info->var.bits_per_pixel) {
281 case 8:
282 PM3_WRITE_REG(par, PM3PixelSize,
283 PM3PixelSize_GLOBAL_8BIT);
284 break;
285 case 16:
286 PM3_WRITE_REG(par, PM3PixelSize,
287 PM3PixelSize_GLOBAL_16BIT);
288 break;
289 case 32:
290 PM3_WRITE_REG(par, PM3PixelSize,
291 PM3PixelSize_GLOBAL_32BIT);
292 break;
293 default:
294 DPRINTK(1, "Unsupported depth %d\n",
295 info->var.bits_per_pixel);
296 break;
297 }
298 PM3_WRITE_REG(par, PM3RasterizerMode, rm);
299 }
300
301 PM3_WAIT(par, 20);
302 PM3_WRITE_REG(par, PM3FBSoftwareWriteMask, 0xffffffff);
303 PM3_WRITE_REG(par, PM3FBHardwareWriteMask, 0xffffffff);
304 PM3_WRITE_REG(par, PM3FBWriteMode,
305 PM3FBWriteMode_WriteEnable |
306 PM3FBWriteMode_OpaqueSpan |
307 PM3FBWriteMode_Enable0);
308 PM3_WRITE_REG(par, PM3FBWriteBufferAddr0, 0x0);
309 PM3_WRITE_REG(par, PM3FBWriteBufferOffset0, 0x0);
310 PM3_WRITE_REG(par, PM3FBWriteBufferWidth0,
311 PM3FBWriteBufferWidth_Width(width));
312
313 PM3_WRITE_REG(par, PM3SizeOfFramebuffer, 0x0);
314 {
315 /* size in lines of FB */
316 unsigned long sofb = info->screen_size /
317 info->fix.line_length;
318 if (sofb > 4095)
319 PM3_WRITE_REG(par, PM3SizeOfFramebuffer, 4095);
320 else
321 PM3_WRITE_REG(par, PM3SizeOfFramebuffer, sofb);
322
323 switch (info->var.bits_per_pixel) {
324 case 8:
325 PM3_WRITE_REG(par, PM3DitherMode,
326 (1 << 10) | (2 << 3));
327 break;
328 case 16:
329 PM3_WRITE_REG(par, PM3DitherMode,
330 (1 << 10) | (1 << 3));
331 break;
332 case 32:
333 PM3_WRITE_REG(par, PM3DitherMode,
334 (1 << 10) | (0 << 3));
335 break;
336 default:
337 DPRINTK(1, "Unsupported depth %d\n",
338 info->current_par->depth);
339 break;
340 }
341 }
342
343 PM3_WRITE_REG(par, PM3dXDom, 0x0);
344 PM3_WRITE_REG(par, PM3dXSub, 0x0);
345 PM3_WRITE_REG(par, PM3dY, (1 << 16));
346 PM3_WRITE_REG(par, PM3StartXDom, 0x0);
347 PM3_WRITE_REG(par, PM3StartXSub, 0x0);
348 PM3_WRITE_REG(par, PM3StartY, 0x0);
349 PM3_WRITE_REG(par, PM3Count, 0x0);
350
351/* Disable LocalBuffer. better safe than sorry */
352 PM3_WRITE_REG(par, PM3LBDestReadMode, 0x0);
353 PM3_WRITE_REG(par, PM3LBDestReadEnables, 0x0);
354 PM3_WRITE_REG(par, PM3LBSourceReadMode, 0x0);
355 PM3_WRITE_REG(par, PM3LBWriteMode, 0x0);
356
357 pm3fb_sync(info);
358}
359
360static void pm3fb_fillrect (struct fb_info *info,
361 const struct fb_fillrect *region)
362{
363 struct pm3_par *par = info->par;
364 struct fb_fillrect modded;
365 int vxres, vyres;
366 u32 color = (info->fix.visual == FB_VISUAL_TRUECOLOR) ?
367 ((u32*)info->pseudo_palette)[region->color] : region->color;
368
369 if (info->state != FBINFO_STATE_RUNNING)
370 return;
371 if ((info->flags & FBINFO_HWACCEL_DISABLED) ||
372 region->rop != ROP_COPY ) {
373 cfb_fillrect(info, region);
374 return;
375 }
376
377 vxres = info->var.xres_virtual;
378 vyres = info->var.yres_virtual;
379
380 memcpy(&modded, region, sizeof(struct fb_fillrect));
381
382 if(!modded.width || !modded.height ||
383 modded.dx >= vxres || modded.dy >= vyres)
384 return;
385
386 if(modded.dx + modded.width > vxres)
387 modded.width = vxres - modded.dx;
388 if(modded.dy + modded.height > vyres)
389 modded.height = vyres - modded.dy;
390
391 if(info->var.bits_per_pixel == 8)
392 color |= color << 8;
393 if(info->var.bits_per_pixel <= 16)
394 color |= color << 16;
395
396 PM3_WAIT(par, 4);
397
398 PM3_WRITE_REG(par, PM3Config2D,
399 PM3Config2D_UseConstantSource |
400 PM3Config2D_ForegroundROPEnable |
401 (PM3Config2D_ForegroundROP(0x3)) | /* Ox3 is GXcopy */
402 PM3Config2D_FBWriteEnable);
403
404 PM3_WRITE_REG(par, PM3ForegroundColor, color);
405
406 PM3_WRITE_REG(par, PM3RectanglePosition,
407 (PM3RectanglePosition_XOffset(modded.dx)) |
408 (PM3RectanglePosition_YOffset(modded.dy)));
409
410 PM3_WRITE_REG(par, PM3Render2D,
411 PM3Render2D_XPositive |
412 PM3Render2D_YPositive |
413 PM3Render2D_Operation_Normal |
414 PM3Render2D_SpanOperation |
415 (PM3Render2D_Width(modded.width)) |
416 (PM3Render2D_Height(modded.height)));
417}
418/* end of acceleration functions */
419
188/* write the mode to registers */ 420/* write the mode to registers */
189static void pm3fb_write_mode(struct fb_info *info) 421static void pm3fb_write_mode(struct fb_info *info)
190{ 422{
@@ -380,8 +612,6 @@ static void pm3fb_write_mode(struct fb_info *info)
380/* 612/*
381 * hardware independent functions 613 * hardware independent functions
382 */ 614 */
383int pm3fb_init(void);
384
385static int pm3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) 615static int pm3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
386{ 616{
387 u32 lpitch; 617 u32 lpitch;
@@ -528,6 +758,7 @@ static int pm3fb_set_par(struct fb_info *info)
528 pm3fb_clear_colormap(par, 0, 0, 0); 758 pm3fb_clear_colormap(par, 0, 0, 0);
529 PM3_WRITE_DAC_REG(par, PM3RD_CursorMode, 759 PM3_WRITE_DAC_REG(par, PM3RD_CursorMode,
530 PM3RD_CursorMode_CURSOR_DISABLE); 760 PM3RD_CursorMode_CURSOR_DISABLE);
761 pm3fb_init_engine(info);
531 pm3fb_write_mode(info); 762 pm3fb_write_mode(info);
532 return 0; 763 return 0;
533} 764}
@@ -675,10 +906,11 @@ static struct fb_ops pm3fb_ops = {
675 .fb_set_par = pm3fb_set_par, 906 .fb_set_par = pm3fb_set_par,
676 .fb_setcolreg = pm3fb_setcolreg, 907 .fb_setcolreg = pm3fb_setcolreg,
677 .fb_pan_display = pm3fb_pan_display, 908 .fb_pan_display = pm3fb_pan_display,
678 .fb_fillrect = cfb_fillrect, 909 .fb_fillrect = pm3fb_fillrect,
679 .fb_copyarea = cfb_copyarea, 910 .fb_copyarea = cfb_copyarea,
680 .fb_imageblit = cfb_imageblit, 911 .fb_imageblit = cfb_imageblit,
681 .fb_blank = pm3fb_blank, 912 .fb_blank = pm3fb_blank,
913 .fb_sync = pm3fb_sync,
682}; 914};
683 915
684/* ------------------------------------------------------------------------- */ 916/* ------------------------------------------------------------------------- */
@@ -847,7 +1079,8 @@ static int __devinit pm3fb_probe(struct pci_dev *dev,
847 1079
848 info->fix = pm3fb_fix; 1080 info->fix = pm3fb_fix;
849 info->pseudo_palette = par->palette; 1081 info->pseudo_palette = par->palette;
850 info->flags = FBINFO_DEFAULT;/* | FBINFO_HWACCEL_YPAN;*/ 1082 info->flags = FBINFO_DEFAULT |
1083 FBINFO_HWACCEL_FILLRECT;/* | FBINFO_HWACCEL_YPAN;*/
851 1084
852 /* 1085 /*
853 * This should give a reasonable default video mode. The following is 1086 * This should give a reasonable default video mode. The following is
@@ -935,35 +1168,12 @@ static struct pci_driver pm3fb_driver = {
935 1168
936MODULE_DEVICE_TABLE(pci, pm3fb_id_table); 1169MODULE_DEVICE_TABLE(pci, pm3fb_id_table);
937 1170
938#ifndef MODULE 1171static int __init pm3fb_init(void)
939 /*
940 * Setup
941 */
942
943/*
944 * Only necessary if your driver takes special options,
945 * otherwise we fall back on the generic fb_setup().
946 */
947static int __init pm3fb_setup(char *options)
948{ 1172{
949 /* Parse user speficied options (`video=pm3fb:') */
950 return 0;
951}
952#endif /* MODULE */
953
954int __init pm3fb_init(void)
955{
956 /*
957 * For kernel boot options (in 'video=pm3fb:<options>' format)
958 */
959#ifndef MODULE 1173#ifndef MODULE
960 char *option = NULL; 1174 if (fb_get_options("pm3fb", NULL))
961
962 if (fb_get_options("pm3fb", &option))
963 return -ENODEV; 1175 return -ENODEV;
964 pm3fb_setup(option);
965#endif 1176#endif
966
967 return pci_register_driver(&pm3fb_driver); 1177 return pci_register_driver(&pm3fb_driver);
968} 1178}
969 1179
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index 08b7ffbbbbd8..3972aa8cf859 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -812,6 +812,7 @@ static int ps3fb_ioctl(struct fb_info *info, unsigned int cmd,
812 812
813static int ps3fbd(void *arg) 813static int ps3fbd(void *arg)
814{ 814{
815 set_freezable();
815 while (!kthread_should_stop()) { 816 while (!kthread_should_stop()) {
816 try_to_freeze(); 817 try_to_freeze();
817 set_current_state(TASK_INTERRUPTIBLE); 818 set_current_state(TASK_INTERRUPTIBLE);
diff --git a/drivers/video/pvr2fb.c b/drivers/video/pvr2fb.c
index 2ba959a83eb0..0f88c30f94f8 100644
--- a/drivers/video/pvr2fb.c
+++ b/drivers/video/pvr2fb.c
@@ -333,24 +333,25 @@ static int pvr2fb_setcolreg(unsigned int regno, unsigned int red,
333 ((blue & 0xf800) >> 11); 333 ((blue & 0xf800) >> 11);
334 334
335 pvr2fb_set_pal_entry(par, regno, tmp); 335 pvr2fb_set_pal_entry(par, regno, tmp);
336 ((u16*)(info->pseudo_palette))[regno] = tmp;
337 break; 336 break;
338 case 24: /* RGB 888 */ 337 case 24: /* RGB 888 */
339 red >>= 8; green >>= 8; blue >>= 8; 338 red >>= 8; green >>= 8; blue >>= 8;
340 ((u32*)(info->pseudo_palette))[regno] = (red << 16) | (green << 8) | blue; 339 tmp = (red << 16) | (green << 8) | blue;
341 break; 340 break;
342 case 32: /* ARGB 8888 */ 341 case 32: /* ARGB 8888 */
343 red >>= 8; green >>= 8; blue >>= 8; 342 red >>= 8; green >>= 8; blue >>= 8;
344 tmp = (transp << 24) | (red << 16) | (green << 8) | blue; 343 tmp = (transp << 24) | (red << 16) | (green << 8) | blue;
345 344
346 pvr2fb_set_pal_entry(par, regno, tmp); 345 pvr2fb_set_pal_entry(par, regno, tmp);
347 ((u32*)(info->pseudo_palette))[regno] = tmp;
348 break; 346 break;
349 default: 347 default:
350 pr_debug("Invalid bit depth %d?!?\n", info->var.bits_per_pixel); 348 pr_debug("Invalid bit depth %d?!?\n", info->var.bits_per_pixel);
351 return 1; 349 return 1;
352 } 350 }
353 351
352 if (regno < 16)
353 ((u32*)(info->pseudo_palette))[regno] = tmp;
354
354 return 0; 355 return 0;
355} 356}
356 357
diff --git a/drivers/video/q40fb.c b/drivers/video/q40fb.c
index 48536c3e58a4..4beac1df617b 100644
--- a/drivers/video/q40fb.c
+++ b/drivers/video/q40fb.c
@@ -95,7 +95,7 @@ static int __init q40fb_probe(struct platform_device *dev)
95 /* mapped in q40/config.c */ 95 /* mapped in q40/config.c */
96 q40fb_fix.smem_start = Q40_PHYS_SCREEN_ADDR; 96 q40fb_fix.smem_start = Q40_PHYS_SCREEN_ADDR;
97 97
98 info = framebuffer_alloc(sizeof(u32) * 256, &dev->dev); 98 info = framebuffer_alloc(sizeof(u32) * 16, &dev->dev);
99 if (!info) 99 if (!info)
100 return -ENOMEM; 100 return -ENOMEM;
101 101
diff --git a/drivers/video/riva/riva_hw.c b/drivers/video/riva/riva_hw.c
index 70bfd78eca81..13307703a9f0 100644
--- a/drivers/video/riva/riva_hw.c
+++ b/drivers/video/riva/riva_hw.c
@@ -1223,6 +1223,8 @@ static int CalcVClock
1223 } 1223 }
1224 } 1224 }
1225 } 1225 }
1226
1227 /* non-zero: M/N/P/clock values assigned. zero: error (not set) */
1226 return (DeltaOld != 0xFFFFFFFF); 1228 return (DeltaOld != 0xFFFFFFFF);
1227} 1229}
1228/* 1230/*
@@ -1240,7 +1242,10 @@ int CalcStateExt
1240 int dotClock 1242 int dotClock
1241) 1243)
1242{ 1244{
1243 int pixelDepth, VClk, m, n, p; 1245 int pixelDepth;
1246 int uninitialized_var(VClk),uninitialized_var(m),
1247 uninitialized_var(n), uninitialized_var(p);
1248
1244 /* 1249 /*
1245 * Save mode parameters. 1250 * Save mode parameters.
1246 */ 1251 */
diff --git a/drivers/video/sgivwfb.c b/drivers/video/sgivwfb.c
index ebb6756aea08..4fb16240c04d 100644
--- a/drivers/video/sgivwfb.c
+++ b/drivers/video/sgivwfb.c
@@ -752,7 +752,7 @@ static int __init sgivwfb_probe(struct platform_device *dev)
752 struct fb_info *info; 752 struct fb_info *info;
753 char *monitor; 753 char *monitor;
754 754
755 info = framebuffer_alloc(sizeof(struct sgivw_par) + sizeof(u32) * 256, &dev->dev); 755 info = framebuffer_alloc(sizeof(struct sgivw_par) + sizeof(u32) * 16, &dev->dev);
756 if (!info) 756 if (!info)
757 return -ENOMEM; 757 return -ENOMEM;
758 par = info->par; 758 par = info->par;
diff --git a/drivers/video/sis/sis.h b/drivers/video/sis/sis.h
index d5e2d9c27847..d53bf6945f0c 100644
--- a/drivers/video/sis/sis.h
+++ b/drivers/video/sis/sis.h
@@ -479,7 +479,7 @@ struct sis_video_info {
479 struct fb_var_screeninfo default_var; 479 struct fb_var_screeninfo default_var;
480 480
481 struct fb_fix_screeninfo sisfb_fix; 481 struct fb_fix_screeninfo sisfb_fix;
482 u32 pseudo_palette[17]; 482 u32 pseudo_palette[16];
483 483
484 struct sisfb_monitor { 484 struct sisfb_monitor {
485 u16 hmin; 485 u16 hmin;
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 93d07ef85276..e8ccace01252 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1405,12 +1405,18 @@ sisfb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue,
1405 } 1405 }
1406 break; 1406 break;
1407 case 16: 1407 case 16:
1408 if (regno >= 16)
1409 break;
1410
1408 ((u32 *)(info->pseudo_palette))[regno] = 1411 ((u32 *)(info->pseudo_palette))[regno] =
1409 (red & 0xf800) | 1412 (red & 0xf800) |
1410 ((green & 0xfc00) >> 5) | 1413 ((green & 0xfc00) >> 5) |
1411 ((blue & 0xf800) >> 11); 1414 ((blue & 0xf800) >> 11);
1412 break; 1415 break;
1413 case 32: 1416 case 32:
1417 if (regno >= 16)
1418 break;
1419
1414 red >>= 8; 1420 red >>= 8;
1415 green >>= 8; 1421 green >>= 8;
1416 blue >>= 8; 1422 blue >>= 8;
diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c
index 5c0dab628099..89facb73edfc 100644
--- a/drivers/video/tgafb.c
+++ b/drivers/video/tgafb.c
@@ -1634,7 +1634,7 @@ tgafb_register(struct device *dev)
1634 FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT; 1634 FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT;
1635 info->fbops = &tgafb_ops; 1635 info->fbops = &tgafb_ops;
1636 info->screen_base = par->tga_fb_base; 1636 info->screen_base = par->tga_fb_base;
1637 info->pseudo_palette = (void *)(par + 1); 1637 info->pseudo_palette = par->palette;
1638 1638
1639 /* This should give a reasonable default video mode. */ 1639 /* This should give a reasonable default video mode. */
1640 if (tga_bus_pci) { 1640 if (tga_bus_pci) {
diff --git a/drivers/video/tridentfb.c b/drivers/video/tridentfb.c
index 55e8aa450bfa..c699864b6f4a 100644
--- a/drivers/video/tridentfb.c
+++ b/drivers/video/tridentfb.c
@@ -976,7 +976,7 @@ static int tridentfb_setcolreg(unsigned regno, unsigned red, unsigned green,
976 return 1; 976 return 1;
977 977
978 978
979 if (bpp==8) { 979 if (bpp == 8) {
980 t_outb(0xFF,0x3C6); 980 t_outb(0xFF,0x3C6);
981 t_outb(regno,0x3C8); 981 t_outb(regno,0x3C8);
982 982
@@ -984,19 +984,21 @@ static int tridentfb_setcolreg(unsigned regno, unsigned red, unsigned green,
984 t_outb(green>>10,0x3C9); 984 t_outb(green>>10,0x3C9);
985 t_outb(blue>>10,0x3C9); 985 t_outb(blue>>10,0x3C9);
986 986
987 } else if (bpp == 16) { /* RGB 565 */ 987 } else if (regno < 16) {
988 u32 col; 988 if (bpp == 16) { /* RGB 565 */
989 989 u32 col;
990 col = (red & 0xF800) | ((green & 0xFC00) >> 5) | 990
991 ((blue & 0xF800) >> 11); 991 col = (red & 0xF800) | ((green & 0xFC00) >> 5) |
992 col |= col << 16; 992 ((blue & 0xF800) >> 11);
993 ((u32 *)(info->pseudo_palette))[regno] = col; 993 col |= col << 16;
994 } else if (bpp == 32) /* ARGB 8888 */ 994 ((u32 *)(info->pseudo_palette))[regno] = col;
995 ((u32*)info->pseudo_palette)[regno] = 995 } else if (bpp == 32) /* ARGB 8888 */
996 ((transp & 0xFF00) <<16) | 996 ((u32*)info->pseudo_palette)[regno] =
997 ((red & 0xFF00) << 8) | 997 ((transp & 0xFF00) <<16) |
998 ((green & 0xFF00)) | 998 ((red & 0xFF00) << 8) |
999 ((blue & 0xFF00)>>8); 999 ((green & 0xFF00)) |
1000 ((blue & 0xFF00)>>8);
1001 }
1000 1002
1001// debug("exit\n"); 1003// debug("exit\n");
1002 return 0; 1004 return 0;
diff --git a/drivers/video/tx3912fb.c b/drivers/video/tx3912fb.c
index 07389ba01eff..e6f7c78da68b 100644
--- a/drivers/video/tx3912fb.c
+++ b/drivers/video/tx3912fb.c
@@ -291,7 +291,7 @@ int __init tx3912fb_init(void)
291 fb_info.fbops = &tx3912fb_ops; 291 fb_info.fbops = &tx3912fb_ops;
292 fb_info.var = tx3912fb_var; 292 fb_info.var = tx3912fb_var;
293 fb_info.fix = tx3912fb_fix; 293 fb_info.fix = tx3912fb_fix;
294 fb_info.pseudo_palette = pseudo_palette; 294 fb_info.pseudo_palette = cfb8;
295 fb_info.flags = FBINFO_DEFAULT; 295 fb_info.flags = FBINFO_DEFAULT;
296 296
297 /* Clear the framebuffer */ 297 /* Clear the framebuffer */
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
index 30c0b948852b..4c3a63308df1 100644
--- a/drivers/video/vt8623fb.c
+++ b/drivers/video/vt8623fb.c
@@ -68,26 +68,26 @@ static const struct svga_pll vt8623_pll = {2, 127, 2, 7, 0, 3,
68 68
69/* CRT timing register sets */ 69/* CRT timing register sets */
70 70
71struct vga_regset vt8623_h_total_regs[] = {{0x00, 0, 7}, {0x36, 3, 3}, VGA_REGSET_END}; 71static struct vga_regset vt8623_h_total_regs[] = {{0x00, 0, 7}, {0x36, 3, 3}, VGA_REGSET_END};
72struct vga_regset vt8623_h_display_regs[] = {{0x01, 0, 7}, VGA_REGSET_END}; 72static struct vga_regset vt8623_h_display_regs[] = {{0x01, 0, 7}, VGA_REGSET_END};
73struct vga_regset vt8623_h_blank_start_regs[] = {{0x02, 0, 7}, VGA_REGSET_END}; 73static struct vga_regset vt8623_h_blank_start_regs[] = {{0x02, 0, 7}, VGA_REGSET_END};
74struct vga_regset vt8623_h_blank_end_regs[] = {{0x03, 0, 4}, {0x05, 7, 7}, {0x33, 5, 5}, VGA_REGSET_END}; 74static struct vga_regset vt8623_h_blank_end_regs[] = {{0x03, 0, 4}, {0x05, 7, 7}, {0x33, 5, 5}, VGA_REGSET_END};
75struct vga_regset vt8623_h_sync_start_regs[] = {{0x04, 0, 7}, {0x33, 4, 4}, VGA_REGSET_END}; 75static struct vga_regset vt8623_h_sync_start_regs[] = {{0x04, 0, 7}, {0x33, 4, 4}, VGA_REGSET_END};
76struct vga_regset vt8623_h_sync_end_regs[] = {{0x05, 0, 4}, VGA_REGSET_END}; 76static struct vga_regset vt8623_h_sync_end_regs[] = {{0x05, 0, 4}, VGA_REGSET_END};
77 77
78struct vga_regset vt8623_v_total_regs[] = {{0x06, 0, 7}, {0x07, 0, 0}, {0x07, 5, 5}, {0x35, 0, 0}, VGA_REGSET_END}; 78static struct vga_regset vt8623_v_total_regs[] = {{0x06, 0, 7}, {0x07, 0, 0}, {0x07, 5, 5}, {0x35, 0, 0}, VGA_REGSET_END};
79struct vga_regset vt8623_v_display_regs[] = {{0x12, 0, 7}, {0x07, 1, 1}, {0x07, 6, 6}, {0x35, 2, 2}, VGA_REGSET_END}; 79static struct vga_regset vt8623_v_display_regs[] = {{0x12, 0, 7}, {0x07, 1, 1}, {0x07, 6, 6}, {0x35, 2, 2}, VGA_REGSET_END};
80struct vga_regset vt8623_v_blank_start_regs[] = {{0x15, 0, 7}, {0x07, 3, 3}, {0x09, 5, 5}, {0x35, 3, 3}, VGA_REGSET_END}; 80static struct vga_regset vt8623_v_blank_start_regs[] = {{0x15, 0, 7}, {0x07, 3, 3}, {0x09, 5, 5}, {0x35, 3, 3}, VGA_REGSET_END};
81struct vga_regset vt8623_v_blank_end_regs[] = {{0x16, 0, 7}, VGA_REGSET_END}; 81static struct vga_regset vt8623_v_blank_end_regs[] = {{0x16, 0, 7}, VGA_REGSET_END};
82struct vga_regset vt8623_v_sync_start_regs[] = {{0x10, 0, 7}, {0x07, 2, 2}, {0x07, 7, 7}, {0x35, 1, 1}, VGA_REGSET_END}; 82static struct vga_regset vt8623_v_sync_start_regs[] = {{0x10, 0, 7}, {0x07, 2, 2}, {0x07, 7, 7}, {0x35, 1, 1}, VGA_REGSET_END};
83struct vga_regset vt8623_v_sync_end_regs[] = {{0x11, 0, 3}, VGA_REGSET_END}; 83static struct vga_regset vt8623_v_sync_end_regs[] = {{0x11, 0, 3}, VGA_REGSET_END};
84 84
85struct vga_regset vt8623_offset_regs[] = {{0x13, 0, 7}, {0x35, 5, 7}, VGA_REGSET_END}; 85static struct vga_regset vt8623_offset_regs[] = {{0x13, 0, 7}, {0x35, 5, 7}, VGA_REGSET_END};
86struct vga_regset vt8623_line_compare_regs[] = {{0x18, 0, 7}, {0x07, 4, 4}, {0x09, 6, 6}, {0x33, 0, 2}, {0x35, 4, 4}, VGA_REGSET_END}; 86static struct vga_regset vt8623_line_compare_regs[] = {{0x18, 0, 7}, {0x07, 4, 4}, {0x09, 6, 6}, {0x33, 0, 2}, {0x35, 4, 4}, VGA_REGSET_END};
87struct vga_regset vt8623_fetch_count_regs[] = {{0x1C, 0, 7}, {0x1D, 0, 1}, VGA_REGSET_END}; 87static struct vga_regset vt8623_fetch_count_regs[] = {{0x1C, 0, 7}, {0x1D, 0, 1}, VGA_REGSET_END};
88struct vga_regset vt8623_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x34, 0, 7}, {0x48, 0, 1}, VGA_REGSET_END}; 88static struct vga_regset vt8623_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x34, 0, 7}, {0x48, 0, 1}, VGA_REGSET_END};
89 89
90struct svga_timing_regs vt8623_timing_regs = { 90static struct svga_timing_regs vt8623_timing_regs = {
91 vt8623_h_total_regs, vt8623_h_display_regs, vt8623_h_blank_start_regs, 91 vt8623_h_total_regs, vt8623_h_display_regs, vt8623_h_blank_start_regs,
92 vt8623_h_blank_end_regs, vt8623_h_sync_start_regs, vt8623_h_sync_end_regs, 92 vt8623_h_blank_end_regs, vt8623_h_sync_start_regs, vt8623_h_sync_end_regs,
93 vt8623_v_total_regs, vt8623_v_display_regs, vt8623_v_blank_start_regs, 93 vt8623_v_total_regs, vt8623_v_display_regs, vt8623_v_blank_start_regs,
@@ -903,7 +903,7 @@ static void __exit vt8623fb_cleanup(void)
903 903
904/* Driver Initialisation */ 904/* Driver Initialisation */
905 905
906int __init vt8623fb_init(void) 906static int __init vt8623fb_init(void)
907{ 907{
908 908
909#ifndef MODULE 909#ifndef MODULE
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index f5c5b760ed7b..c6332108f1c5 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -805,6 +805,7 @@ static int w1_control(void *data)
805 struct w1_master *dev, *n; 805 struct w1_master *dev, *n;
806 int have_to_wait = 0; 806 int have_to_wait = 0;
807 807
808 set_freezable();
808 while (!kthread_should_stop() || have_to_wait) { 809 while (!kthread_should_stop() || have_to_wait) {
809 have_to_wait = 0; 810 have_to_wait = 0;
810 811